repo_name
stringlengths 5
92
| path
stringlengths 4
221
| copies
stringclasses 19
values | size
stringlengths 4
6
| content
stringlengths 766
896k
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 32
997
| alpha_frac
float64 0.25
0.96
| autogenerated
bool 1
class | ratio
float64 1.5
13.6
| config_test
bool 2
classes | has_no_keywords
bool 2
classes | few_assignments
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
abhishekraok/GraphMap
|
graphmap/tree_viewer.py
|
1
|
3451
|
import imagetree
import serializer
from utilities import quadkey_to_xyz, xyz_to_quadkey, is_valid_quadkey
from serializer import create_tree_from_jpg_url
import commander
import constants
import sys
valid_commands = ['+', '-', 'l', 'r', 'u', 'd', 's']
def tree_viewer_valid_input(input_command):
return input_command in valid_commands or \
is_valid_quadkey(input_command) or \
input_command.startswith('c') or \
input_command.startswith('http')
def tree_viewer(tree):
"""
Interactively display the tree.
:type tree: imagetree.ImageTree
"""
import matplotlib.pyplot as plt
x, y, z = 0, 0, 0
im_array = tree.get_pil_image_at_xyz(x, y, z, constants.default_tile_resolution)
plt.imshow(im_array)
plt.draw()
plt.pause(1)
print('You can move around, connect to an existing image and insert image from url')
print('To move around please enter one of' + str(valid_commands))
print('To connect enter c <node_link> e.g. >c line@https://azurewebsite.line.tsv.gz')
print('To insert another image please input this\n<image url> <jpg_link> e.g. >http://imgur.com/haha.jpg smile')
input_command = raw_input('>')
while tree_viewer_valid_input(input_command):
if input_command == '+':
x *= 2
y *= 2
z += 1
if input_command == '-':
x /= 2
y /= 2
z -= 1
if input_command == 'l':
x -= 1
if input_command == 'r':
x += 1
if input_command == 'd':
y += 1
if input_command == 'u':
y -= 1
if input_command == 's':
quad_key = xyz_to_quadkey(x, y, z)
print('Saving at current location of ', quad_key, ' where filename is ', tree.filename)
serializer.save_tree(tree)
if is_valid_quadkey(input_command):
x, y, z = quadkey_to_xyz(quadkey=input_command)
current_quadkey = xyz_to_quadkey(x, y, z)
if input_command.startswith('c'):
_, node_link = input_command.split(' ')
quad_key = current_quadkey
print('connecting node link', node_link, ' at quadkey ', quad_key)
commander.insert_tree_root(root_tree=tree, child_link=node_link, quad_key=quad_key, save=False)
if input_command.startswith('http'):
url, node_name = input_command.split(' ')
blob_url = 'https://artmapstore.blob.core.windows.net/firstnodes/' + node_name + '.tsv.gz'
quad_key = current_quadkey
print('Inserting image ', url, ' at quadkey ', quad_key)
another_tree = create_tree_from_jpg_url(url=url, name=tree.name, filename=blob_url,
max_resolution=1024)
tree.insert(another_tree, quad_key)
print(
'xyz=', x, y, z, '. quadkey=', current_quadkey, 'node link=', tree.get_descendant(current_quadkey).get_link())
im_array = tree.get_pil_image_at_xyz(x, y, z, constants.default_tile_resolution)
plt.imshow(im_array)
plt.draw()
plt.pause(1)
input_command = raw_input('>')
if __name__ == '__main__':
print('Tree viewer with options ', sys.argv)
if len(sys.argv) > 1:
node_link = sys.argv[1]
else:
node_link = constants.ROOT_LINK
tree = serializer.load_link_new_serializer(node_link)
tree_viewer(tree)
|
apache-2.0
| -6,180,173,991,250,044,000
| 37.775281
| 118
| 0.578093
| false
| 3.503553
| false
| false
| false
|
Denvi/FlatCAM
|
FlatCAMWorker.py
|
1
|
1587
|
from PyQt4 import QtCore
class Worker(QtCore.QObject):
"""
Implements a queue of tasks to be carried out in order
in a single independent thread.
"""
# avoid multiple tests for debug availability
pydevd_failed = False
task_completed = QtCore.pyqtSignal(str)
def __init__(self, app, name=None):
super(Worker, self).__init__()
self.app = app
self.name = name
def allow_debug(self):
"""
allow debuging/breakpoints in this threads
should work from PyCharm and PyDev
:return:
"""
if not self.pydevd_failed:
try:
import pydevd
pydevd.settrace(suspend=False, trace_only_current_thread=True)
except ImportError:
self.pydevd_failed=True
def run(self):
# self.app.log.debug("Worker Started!")
self.allow_debug()
# Tasks are queued in the event listener.
self.app.worker_task.connect(self.do_worker_task)
def do_worker_task(self, task):
# self.app.log.debug("Running task: %s" % str(task))
self.allow_debug()
if ('worker_name' in task and task['worker_name'] == self.name) or \
('worker_name' not in task and self.name is None):
try:
task['fcn'](*task['params'])
except Exception as e:
self.app.thread_exception.emit(e)
raise e
finally:
self.task_completed.emit(self.name)
# self.app.log.debug("Task ignored.")
|
mit
| 5,585,728,055,401,166,000
| 25.898305
| 78
| 0.559546
| false
| 4.100775
| false
| false
| false
|
engeens/pangolin
|
routes.py
|
1
|
1523
|
# -*- coding: utf-8 -*-
# This is an app-specific example router
#
# This simple router is used for setting languages from app/languages directory
# as a part of the application path: app/<lang>/controller/function
# Language from default.py or 'en' (if the file is not found) is used as
# a default_language
#
# See <web2py-root-dir>/examples/routes.parametric.example.py for parameter's detail
#-------------------------------------------------------------------------------------
# To enable this route file you must do the steps:
#
# 1. rename <web2py-root-dir>/examples/routes.parametric.example.py to routes.py
# 2. rename this APP/routes.example.py to APP/routes.py
# (where APP - is your application directory)
# 3. restart web2py (or reload routes in web2py admin interfase)
#
# YOU CAN COPY THIS FILE TO ANY APPLICATION'S ROOT DIRECTORY WITHOUT CHANGES!
from fileutils import abspath
from languages import read_possible_languages
possible_languages = read_possible_languages(abspath('applications', app, 'languages'))
#NOTE! app - is an application based router's parameter with name of an
# application. E.g.'welcome'
routers = {
app: dict(
default_language = possible_languages['default'][0],
languages = [lang for lang in possible_languages
if lang != 'default']
)
}
#NOTE! To change language in your application using these rules add this line
#in one of your models files:
# if request.uri_language: T.force(request.uri_language)
|
gpl-3.0
| -6,541,904,693,397,986,000
| 39.078947
| 87
| 0.676297
| false
| 3.855696
| false
| false
| false
|
demin-dmitriy/almost-haskell
|
src/AHLexerTest.py
|
1
|
10390
|
from unittest import TestCase
from AHLexer import *
# Tests and helper methods that are common for PreLexer and AHLexer
class CommonLexerTestCase:
class BasicErrorListener:
def syntaxError(self, recogn, sym, line, column, msg, exc):
raise LexerError("some error %s" % msg, line, column, None)
def setLexer(self, LexerClass, TokenClass):
self._LexerClass = LexerClass
self._TokenClass = TokenClass
def lex(self, str):
from antlr4 import InputStream
lexer = self._LexerClass(InputStream(str))
lexer.removeErrorListeners()
lexer.addErrorListener(CommonLexerTestCase.BasicErrorListener())
tokens = []
for token in iterUntil(lexer.nextToken,
lambda token: token.type == Token.EOF):
tokens.append(token)
return tokens
def checkOutput(self, outTokens, correctTokens):
self.assertEqual(len(outTokens), len(correctTokens))
for outToken, correctToken in zip(outTokens, correctTokens):
correctType, correctText = correctToken
if correctType is not None:
self.assertEqual(outToken.type, correctType)
if correctText is not None:
self.assertEqual(outToken.text, correctText)
# For debug purposes
def printOutput(self, outTokens):
for token in outTokens:
print(repr(token.text), '(' + str(token.type) + ')')
def testID(self):
test1 = self.lex(""" abc ->=∀x_:⊥]{-→2₂-- a"{d} b{--}e data1
""")
self.checkOutput(test1, [
(self._TokenClass.ID, 'abc'),
(self._TokenClass.ID, '->=∀x_:⊥]{-→2₂--'),
(self._TokenClass.ID, 'a"{d}'),
(self._TokenClass.ID, 'b{--}e'),
(self._TokenClass.ID, 'data1'),
(self._TokenClass.NEWLINE, '\n')
])
def testQualifiedName(self):
test1 = self.lex("a.b.c\n")
self.checkOutput(test1, [
(self._TokenClass.ID, 'a'),
(self._TokenClass.Dot, '.'),
(self._TokenClass.ID, 'b'),
(self._TokenClass.Dot, '.'),
(self._TokenClass.ID, 'c'),
(self._TokenClass.NEWLINE, '\n')
])
test2 = self.lex("cba . abc. de .f \n")
self.checkOutput(test2, [
(self._TokenClass.ID, 'cba'),
(self._TokenClass.Dot, '.'),
(self._TokenClass.ID, 'abc'),
(self._TokenClass.Dot, '.'),
(self._TokenClass.ID, 'de'),
(self._TokenClass.Dot, '.'),
(self._TokenClass.ID, 'f'),
(self._TokenClass.NEWLINE, '\n')
])
def testComments(self):
test1 = self.lex(
"""1 {- This
is
comment {- it
can be nested, if necessary
-}
-}
2 -- Only two newlines should have been generated -} {-
one{--}token no-comment--here
""")
self.checkOutput(test1, [
(self._TokenClass.ID, '1'),
(self._TokenClass.NEWLINE, '\n'),
(self._TokenClass.ID, '2'),
(self._TokenClass.NEWLINE, '\n'),
(self._TokenClass.ID, 'one{--}token'),
(self._TokenClass.ID, 'no-comment--here'),
(self._TokenClass.NEWLINE, '\n')
])
# 'where' is special because it affects indentation rules
def testCommonKeywords(self):
test1 = self.lex("(()data) module from import -> → => ⇒ = _ : {= :--\n")
self.checkOutput(test1, [
(self._TokenClass.LParen, '('),
(self._TokenClass.LParen, '('),
(self._TokenClass.RParen, ')'),
(self._TokenClass.Data, 'data'),
(self._TokenClass.RParen, ')'),
(self._TokenClass.Module, 'module'),
(self._TokenClass.From, 'from'),
(self._TokenClass.Import, 'import'),
(self._TokenClass.RArrow, '->'),
(self._TokenClass.RArrow, '→'),
(self._TokenClass.RDoubleArrow, '=>'),
(self._TokenClass.RDoubleArrow, '⇒'),
(self._TokenClass.Equal, '='),
(self._TokenClass.Underscore, '_'),
(self._TokenClass.Colon, ':'),
(self._TokenClass.ID, '{='),
(self._TokenClass.ID, ':--'),
(self._TokenClass.NEWLINE, '\n')
])
def testIllegalTabs(self):
for test in ("\t", "\v", "\f"):
self.assertRaises(LexerError, self.lex, test)
def testInvalidComment(self):
self.assertRaises(LexerError, self.lex, "abc {- ups!\n \n ")
self.assertRaises(LexerError, self.lex, "abc {- {- ouch!")
self.assertRaises(LexerError, self.lex,
"a where {- -} -- ...\n {- {--} oh, not again ")
class PreLexerTest(TestCase, CommonLexerTestCase):
def setUp(self):
CommonLexerTestCase.setLexer(self, PreLexer, PreLexer)
def testTokenWhere(self):
self.checkOutput(self.lex("where"), [(PreLexer.Where, 'where')])
def testNewlines(self):
test1 = self.lex(""" a
abc
{-
-}
def -- one
-- two
-- three
ghi""")
self.checkOutput(test1, [
(PreLexer.ID, 'a'),
(PreLexer.NEWLINE, '\n'),
(PreLexer.ID, 'abc'),
(PreLexer.NEWLINE, '\n'),
(PreLexer.NEWLINE, '\n'),
(PreLexer.NEWLINE, '\n'),
(PreLexer.NEWLINE, '\n'),
(PreLexer.ID, 'def'),
(PreLexer.NEWLINE, '\n'),
(PreLexer.NEWLINE, '\n'),
(PreLexer.NEWLINE, '\n'),
(PreLexer.ID, 'ghi')
])
test2 = self.lex("a \r\n b \r c \n d")
self.checkOutput(test2, [
(PreLexer.ID, 'a'),
(PreLexer.NEWLINE, '\r'),
(PreLexer.NEWLINE, '\n'),
(PreLexer.ID, 'b'),
(PreLexer.NEWLINE, '\r'),
(PreLexer.ID, 'c'),
(PreLexer.NEWLINE, '\n'),
(PreLexer.ID, 'd')
])
class AHLexerTest(TestCase, CommonLexerTestCase):
def setUp(self):
CommonLexerTestCase.setLexer(self, AHLexer, AHToken)
def testTokenWhere(self):
self.checkOutput(self.lex("where"), [
(AHToken.Where, 'where'),
(AHToken.BeginBlock, None),
(AHToken.EndBlock, None)
])
test2 = self.lex("""
data ℕ₀ where
Zero : ℕ₀
Succ : ℕ₀ → ℕ₀
""")
self.checkOutput(test2, [
(AHToken.Data, 'data'),
(AHToken.ID, 'ℕ₀'),
(AHToken.Where, 'where'),
(AHToken.BeginBlock, None),
(AHToken.ID, 'Zero'),
(AHToken.Colon, ':'),
(AHToken.ID, 'ℕ₀'),
(AHToken.NEWLINE, '\n'),
(AHToken.ID, 'Succ'),
(AHToken.Colon, ':'),
(AHToken.ID, 'ℕ₀'),
(AHToken.RArrow, '→'),
(AHToken.ID, 'ℕ₀'),
(AHToken.NEWLINE, '\n'),
(AHToken.EndBlock, None)
])
test3 = self.lex("""
module where
a
where
b
c
d""")
self.checkOutput(test3, [
(AHToken.Module, 'module'),
(AHToken.Where, 'where'),
(AHToken.BeginBlock, None),
(AHToken.ID, 'a'),
(AHToken.NEWLINE, '\n'),
(AHToken.Where, 'where'),
(AHToken.BeginBlock, None),
(AHToken.ID, 'b'),
(AHToken.ID, 'c'),
(AHToken.NEWLINE, '\n'),
(AHToken.EndBlock, None),
(AHToken.EndBlock, None),
(AHToken.ID, 'd'),
(AHToken.NEWLINE, None)
])
test4 = self.lex("""
where where
a
b where
c
""")
self.checkOutput(test4, [
(AHToken.Where, 'where'),
(AHToken.BeginBlock, None),
(AHToken.Where, 'where'),
(AHToken.BeginBlock, None),
(AHToken.ID, 'a'),
(AHToken.NEWLINE, '\n'),
(AHToken.EndBlock, None),
(AHToken.ID, 'b'),
(AHToken.Where, 'where'),
(AHToken.BeginBlock, None),
(AHToken.EndBlock, None),
(AHToken.EndBlock, None),
(AHToken.ID, 'c'),
(AHToken.NEWLINE, '\n')
])
test5 = self.lex("where where where")
self.checkOutput(test5, [
(AHToken.Where, 'where'),
(AHToken.BeginBlock, None),
(AHToken.Where, 'where'),
(AHToken.BeginBlock, None),
(AHToken.Where, 'where'),
(AHToken.BeginBlock, None),
(AHToken.EndBlock, None),
(AHToken.EndBlock, None),
(AHToken.EndBlock, None)
])
test6 = self.lex("where where where \n \n\n ")
self.checkOutput(test5, [
(AHToken.Where, 'where'),
(AHToken.BeginBlock, None),
(AHToken.Where, 'where'),
(AHToken.BeginBlock, None),
(AHToken.Where, 'where'),
(AHToken.BeginBlock, None),
(AHToken.EndBlock, None),
(AHToken.EndBlock, None),
(AHToken.EndBlock, None)
])
def testNewlines(self):
test1 = self.lex("a\n\n\n b")
self.checkOutput(test1, [
(AHToken.ID, 'a'),
(AHToken.ID, 'b'),
(AHToken.NEWLINE, None)])
test2 = self.lex("a\n\n\nb\n c\n\n d")
self.checkOutput(test2, [
(AHToken.ID, 'a'),
(AHToken.NEWLINE, '\n'),
(AHToken.ID, 'b'),
(AHToken.ID, 'c'),
(AHToken.ID, 'd'),
(AHToken.NEWLINE, None)
])
def testBadIndentation(self):
testBelowLowest1 = """
firstToken
badToken"""
self.assertRaises(LexerError, self.lex, testBelowLowest1)
testBelowLowest2 = """
firstToken where
abc where
d
badToken -- Oh no"""
self.assertRaises(LexerError, self.lex, testBelowLowest2)
testBadIndent1 = """
a where
blockContent
badToken
"""
self.assertRaises(LexerError, self.lex, testBadIndent1)
|
mit
| 3,729,728,006,217,532,000
| 31.28125
| 80
| 0.488771
| false
| 3.549828
| true
| false
| false
|
gizmachi/ct_tools
|
gaol/gaol_lib.py
|
1
|
1947
|
import json
import urllib
import urllib2
import ssl
import base64
from lib import *
class sslparameters:
sslcontext = None
def get_opener():
try:
opener = urllib2.build_opener(urllib2.HTTPSHandler(context=sslparameters.sslcontext))
except TypeError:
opener = urllib2.build_opener(urllib2.HTTPSHandler())
return opener
def urlopen(url, data=None):
return get_opener().open(url, data)
def get_sth(baseurl):
result = urlopen(baseurl + "gaol/v1/get-sth").read()
return json.loads(result)
def get_entries(baseurl, start, end):
params = urllib.urlencode({"start":start, "end":end})
# try:
result = urlopen(baseurl + "gaol/v1/get-entries?" + params).read()
return json.loads(result)
def get_consistency_proof(baseurl, tree_size1, tree_size2):
# try:
params = urllib.urlencode({"first":tree_size1,
"second":tree_size2})
result = \
urlopen(baseurl + "gaol/v1/get-sth-consistency?" + params).read()
return json.loads(result)["consistency"]
# except urllib2.HTTPError, e:
# print "ERROR:", e.read()
# sys.exit(1)
def extract_original_entry(entry):
leaf_input = base64.decodestring(entry["leaf_input"])
(data_blob, timestamp, issuer_key_hash) = unpack_mtl(leaf_input)
return (data_blob, timestamp)
def make_blob(data):
return base64.b64encode(data)
def add_blob(baseurl, blob):
try:
result = urlopen(baseurl + "gaol/v1/add-blob", json.dumps({"blob" : blob})).read()
return json.loads(result)
except urllib2.HTTPError, e:
return "ERROR " + str(e.code) + " : " + e.read()
# if e.code == 400:
return None
# sys.exit(1)
except ValueError, e:
print "==== FAILED REQUEST ===="
print submission
print "======= RESPONSE ======="
print result
print "========================"
raise e
|
bsd-3-clause
| 5,098,226,471,701,478,000
| 28.515152
| 93
| 0.608115
| false
| 3.520796
| false
| false
| false
|
CCallahanIV/data-structures
|
src/test_priorityq.py
|
1
|
4519
|
"""This Module contains testing for the Priority Q."""
import pytest
TEST_SET = [
[(17, 1), (99, 2), (15, 1), (99, 3), (1, 2), (9, 3)]
]
BAD_PRIO = [True, False, [1, 2], (), {"oops": "This is bad"}, "No more, please!"]
BAD_INIT = [[(1, 2), (1, 2, 3)], True, False, "whoops"]
@pytest.fixture
def empty_priority_q():
"""Thie fixture creates and empty priority queue."""
from priorityq import PriorityQ
new_pq = PriorityQ()
return new_pq
@pytest.fixture
def filled_priority_q():
"""The fixture creates a filled priority queue."""
from priorityq import PriorityQ
new_pq = PriorityQ(TEST_SET[0])
return new_pq
def test_creation_of_empty_priority_q(empty_priority_q):
"""The creates an empty queue and tests the size."""
assert len(empty_priority_q) == 0
assert empty_priority_q._high_p is None
assert len(empty_priority_q._pdict) == 0
def test_initialize_with_single_tuple():
"""The test initializes priority q with a single tuple."""
from priorityq import PriorityQ
new_pq = PriorityQ((3, 2))
assert len(new_pq) == 1
assert new_pq._high_p == 2
assert new_pq.peek() == 3
def test_intitalize_with_single_digit():
"""The test initialized a prio q with a single digit."""
from priorityq import PriorityQ
with pytest.raises(TypeError):
PriorityQ(3)
# def test_intialize_with_bad_format_raises_type_error():
# """Test initializing with badly formatted arguments."""
# from priorityq import PriorityQ
# for item in BAD_INIT:
# with pytest.raises(TypeError):
# PriorityQ(item)
def test_insert_empty_with_val_and_no_prio(empty_priority_q):
"""The test inserts val w/o prio to empty list."""
empty_priority_q.insert(4)
assert empty_priority_q._high_p == 0
assert empty_priority_q._pdict[0].peek() == 4
def test_insert_filled_with_val_and_prio_where_prio_not_already_there(filled_priority_q):
"""The test inserts with val and prio, where prio not already there."""
filled_priority_q.insert(7, 4)
assert filled_priority_q._pdict[4].peek() == 7
def insert_val_into_empty_priorty_q(empty_priority_q):
"""The tests inserting into an empty priority queue."""
new_prq = empty_priority_q
new_prq.insert(3, 1)
assert len(empty_priority_q) == 1
assert empty_priority_q._high_p[0] == 3
assert empty_priority_q._pdict[0] == 3
def test_insert_into_full_prio_already_there(filled_priority_q):
"""Test inserting into a filled priority q, with priority already present."""
old_len = len(filled_priority_q)
filled_priority_q.insert("something", 1)
assert len(filled_priority_q) == old_len + 1
assert filled_priority_q.peek() == 17
def test_insert_into_full_with_an_iterable(filled_priority_q):
"""Test attempting to insert into a priority q with an iterable."""
with pytest.raises(TypeError):
filled_priority_q.insert([1, 2, 3])
def test_insert_weird_cases_for_priority(empty_priority_q):
"""Test that priorities can only be int."""
for item in BAD_PRIO:
with pytest.raises(TypeError):
empty_priority_q.insert("anything", item)
def pop_filled_priorty_q(filled_priority_q):
"""The tests inserting into a filled priority queue."""
new_fprq = filled_priority_q
val = new_fprq.pop()
assert len(empty_priority_q) == 5
assert filled_priority_q._high_p[0] == 1
assert val == 17
assert filled_priority_q.peek() == 15
def test_pop_on_empty_priority_q(empty_priority_q):
"""Test popping on an empty priority q."""
with pytest.raises(IndexError):
empty_priority_q.pop()
def test_pop_on_filled_until_empty(filled_priority_q):
"""Test pop on filled Priority Q until empty."""
expected = [17, 15, 99, 1, 99, 9]
for i in range(len(filled_priority_q)):
assert filled_priority_q.pop() == expected[i]
assert len(filled_priority_q) == 0
assert filled_priority_q._high_p is None
def test_peek_on_empty(empty_priority_q):
"""Test peek() on an empty priority Q, should return None."""
assert empty_priority_q.peek() is None
def test_peek_on_filled(filled_priority_q):
"""Test peek() on a filled priorityq."""
assert filled_priority_q.peek() == 17
def test_len_on_filled(filled_priority_q):
"""Test len method on full PQ."""
assert len(filled_priority_q) == len(TEST_SET[0])
def test_len_on_empty(empty_priority_q):
"""Test len method on empty PQ."""
assert len(empty_priority_q) == 0
|
mit
| -5,064,511,433,917,170,000
| 30.381944
| 89
| 0.658553
| false
| 3.262816
| true
| false
| false
|
platformio/platformio-core
|
platformio/commands/update.py
|
1
|
2091
|
# Copyright (c) 2014-present PlatformIO <contact@platformio.org>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import click
from platformio.cache import cleanup_content_cache
from platformio.commands.lib.command import CTX_META_STORAGE_DIRS_KEY
from platformio.commands.lib.command import lib_update as cmd_lib_update
from platformio.commands.platform import platform_update as cmd_platform_update
from platformio.package.manager.core import update_core_packages
from platformio.package.manager.library import LibraryPackageManager
@click.command(
"update", short_help="Update installed platforms, packages and libraries"
)
@click.option("--core-packages", is_flag=True, help="Update only the core packages")
@click.option(
"-c",
"--only-check",
is_flag=True,
help="DEPRECATED. Please use `--dry-run` instead",
)
@click.option(
"--dry-run", is_flag=True, help="Do not update, only check for the new versions"
)
@click.pass_context
def cli(ctx, core_packages, only_check, dry_run):
# cleanup lib search results, cached board and platform lists
cleanup_content_cache("http")
only_check = dry_run or only_check
update_core_packages(only_check)
if core_packages:
return
click.echo()
click.echo("Platform Manager")
click.echo("================")
ctx.invoke(cmd_platform_update, only_check=only_check)
click.echo()
click.echo("Library Manager")
click.echo("===============")
ctx.meta[CTX_META_STORAGE_DIRS_KEY] = [LibraryPackageManager().package_dir]
ctx.invoke(cmd_lib_update, only_check=only_check)
|
apache-2.0
| -3,055,954,892,819,605,500
| 34.440678
| 84
| 0.726925
| false
| 3.747312
| false
| false
| false
|
fridayy/movie-trailer-uc
|
main/fresh_tomatoes.py
|
1
|
6763
|
import webbrowser
import os
import re
# Styles and scripting for the page
main_page_head = '''
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8">
<title>Fresh Tomatoes!</title>
<!-- Bootstrap 3 -->
<link rel="stylesheet" href="https://netdna.bootstrapcdn.com/bootstrap/3.1.0/css/bootstrap.min.css">
<link rel="stylesheet" href="https://netdna.bootstrapcdn.com/bootstrap/3.1.0/css/bootstrap-theme.min.css">
<script src="http://code.jquery.com/jquery-1.10.1.min.js"></script>
<script src="https://netdna.bootstrapcdn.com/bootstrap/3.1.0/js/bootstrap.min.js"></script>
<link href='https://fonts.googleapis.com/css?family=Droid+Sans:400,700' rel='stylesheet' type='text/css'>
<style type="text/css" media="screen">
body {
padding-top: 80px;
font-family: 'Droid Sans', sans-serif;
}
#trailer .modal-dialog {
margin-top: 200px;
width: 640px;
height: 480px;
}
.hanging-close {
position: absolute;
top: -12px;
right: -12px;
z-index: 9001;
}
#trailer-video {
width: 100%;
height: 100%;
}
.movie-tile {
margin-bottom: 20px;
padding-top: 20px;
cursor: default;
}
.movie-tile:hover {
background-color: #EEE;
cursor: pointer;
}
.scale-media {
padding-bottom: 56.25%;
position: relative;
}
.scale-media iframe {
border: none;
height: 100%;
position: absolute;
width: 100%;
left: 0;
top: 0;
background-color: white;
}
.panel-heading {
background: #212121 !important;
text-align: left;
}
.panel-body {
text-align: left;
}
.panel-title {
color: white;
}
</style>
<script type="text/javascript" charset="utf-8">
//initialize tooltips
$(function () {
$('[data-toggle="tooltip"]').tooltip()
})
// Pause the video when the modal is closed
$(document).on('click', '.hanging-close, .modal-backdrop, .modal', function (event) {
// Remove the src so the player itself gets removed, as this is the only
// reliable way to ensure the video stops playing in IE
$("#trailer-video-container").empty();
});
// Start playing the video whenever the trailer modal is opened
$(document).on('click', '.movie-tile', function (event) {
var trailerYouTubeId = $(this).attr('data-trailer-youtube-id')
var sourceUrl = 'http://www.youtube.com/embed/' + trailerYouTubeId + '?autoplay=1&html5=1';
$("#trailer-video-container").empty().append($("<iframe></iframe>", {
'id': 'trailer-video',
'type': 'text-html',
'src': sourceUrl,
'frameborder': 0
}));
});
// Animate in the movies when the page loads
$(document).ready(function () {
$('.movie-tile').hide().first().show("fast", function showNext() {
$(this).next("div").show("fast", showNext);
});
});
</script>
</head>
'''
# The main page layout and title bar
main_page_content = '''
<body>
<!-- Trailer Video Modal -->
<div class="modal" id="trailer">
<div class="modal-dialog">
<div class="modal-content">
<a href="#" class="hanging-close" data-dismiss="modal" aria-hidden="true">
<img src="https://lh5.ggpht.com/v4-628SilF0HtHuHdu5EzxD7WRqOrrTIDi_MhEG6_qkNtUK5Wg7KPkofp_VJoF7RS2LhxwEFCO1ICHZlc-o_=s0#w=24&h=24"/>
</a>
<div class="scale-media" id="trailer-video-container">
</div>
</div>
</div>
</div>
<!-- Main Page Content -->
<div class="container">
<div class="navbar navbar-inverse navbar-fixed-top" role="navigation">
<div class="container">
<div class="navbar-header">
<a class="navbar-brand" href="#">Fresh Tomatoes Movie Trailers</a>
</div>
</div>
</div>
</div>
<div class="container">
{movie_tiles}
</div>
</body>
</html>
'''
# A single movie entry html template
movie_tile_content = '''
<div class="col-md-6 col-lg-4 movie-tile text-center" data-trailer-youtube-id="{trailer_youtube_id}" data-toggle="modal" data-target="#trailer">
<img src="{poster_image_url}" width="220" height="342">
<h2 data-toggle="tooltip" data-placement="right" title="Click to open trailer...">{movie_title}</h2>
<div class="panel panel-default">
<div class="panel-heading">
<h3 class="panel-title">Information</h3>
</div>
<div class="panel-body">
<p>Release Date: {movie_year}</p>
<p>Runtime: {movie_runtime}</p>
<p>Country: {movie_country}</p>
<p>Actors: {movie_actors}</p>
<p>{movie_plot}</p>
</div>
</div>
</div>
'''
def create_movie_tiles_content(movies):
# The HTML content for this section of the page
content = ''
for movie in movies:
# Extract the youtube ID from the url
youtube_id_match = re.search(
r'(?<=v=)[^&#]+', movie.trailer_youtube_url)
youtube_id_match = youtube_id_match or re.search(
r'(?<=be/)[^&#]+', movie.trailer_youtube_url)
trailer_youtube_id = (youtube_id_match.group(0) if youtube_id_match
else None)
# Append the tile for the movie with its content filled in
content += movie_tile_content.format(
movie_title=movie.title,
movie_year=movie.year,
movie_runtime=movie.runtime,
movie_country=movie.country,
movie_actors=movie.actors,
movie_genre=movie.genre,
movie_plot=movie.plot,
poster_image_url=movie.poster_image_url,
trailer_youtube_id=trailer_youtube_id
)
return content
def open_movies_page(movies):
# Create or overwrite the output file
output_file = open('fresh_tomatoes.html', 'w')
# Replace the movie tiles placeholder generated content
rendered_content = main_page_content.format(
movie_tiles=create_movie_tiles_content(movies))
# Output the file
output_file.write(main_page_head + rendered_content)
output_file.close()
# open the output file in the browser (in a new tab, if possible)
url = os.path.abspath(output_file.name)
webbrowser.open('file://' + url, new=2)
|
gpl-3.0
| -8,687,864,254,606,987,000
| 32.815
| 144
| 0.554488
| false
| 3.632116
| false
| false
| false
|
shiyifuchen/PyFem
|
pyfem/pyfem/fem/Assembly.py
|
1
|
4289
|
# -*- coding: utf-8 -*-
from numpy import zeros, ones, ix_
from pyfem.util.dataStructures import Properties
from pyfem.util.dataStructures import elementData
from scipy.sparse import lil_matrix
import time
#######################################
# General array assembly routine for: #
# * assembleInternalForce #
# * assembleTangentStiffness #
#######################################
def assembleArray ( props, globdat, rank, action ):
t0=time.time()
#Initialize the global array A with rank 2
A = lil_matrix((len(globdat.dofs),len(globdat.dofs)))
B = zeros( len(globdat.dofs) * ones(1,dtype=int) )
globdat.resetNodalOutput()
outlabel=[]
if hasattr(props,'outlabel'):
outlabel = getattr(props,'outlabel')
#Loop over the element groups
for elementGroup in globdat.elements.iterGroupNames():
#Get the properties corresponding to the elementGroup
el_props = getattr( props, elementGroup )
#Loop over the elements in the elementGroup
for element in globdat.elements.iterElementGroup( elementGroup ):
#Get the element nodes
el_nodes = element.getNodes()
#Get the element coordinates
el_coords = globdat.nodes.getNodeCoords( el_nodes )
#Get the element degrees of freedom
el_dofs = globdat.dofs.get( el_nodes )
#Get the element state
el_a = globdat.state [el_dofs].copy()
el_Da = globdat.Dstate[el_dofs].copy()
factor1 = 1.0
factor2 = 1.0
if elementGroup in props.kill:
el_a = zeros(el_a.shape)
el_Da = zeros(el_Da.shape)
factor1 = 0.0
factor2 = 1e-6
if hasattr(element,"mat"):
element.mat.clearHistory()
# if elementGroup == 'Elem1':
# el_a = zeros(el_a.shape)
# el_Da = zeros(el_Da.shape)
#Create the an element state to pass through to the element
#el_state = Properties( { 'state' : el_a, 'Dstate' : el_Da } )
elemdat = elementData( el_a , el_Da )
elemdat.coords = el_coords
elemdat.nodes = el_nodes
elemdat.props = el_props
elemdat.outlabel = outlabel
if hasattr( element , "matProps" ):
elemdat.matprops = element.matProps
if hasattr( element , "mat" ):
element.mat.reset()
#Get the element contribution by calling the specified action
getattr( element, action )( elemdat )
# for label in elemdat.outlabel:
# element.appendNodalOutput( label , globdat , elemdat.outdata )
if rank == 0:
if elementGroup in props.kill:
continue
for i,label in enumerate(elemdat.outlabel):
element.appendNodalOutput( label , globdat , elemdat.outdata[i] )
elif rank == 1:
B[el_dofs] += elemdat.fint*factor1
elif rank == 2 and action is "getTangentStiffness":
A[ix_(el_dofs,el_dofs)] += elemdat.stiff*factor2
B[el_dofs] += elemdat.fint*factor1
elif rank == 2 and action is "getMassMatrix":
A[ix_(el_dofs,el_dofs)] += elemdat.mass*factor1
B[el_dofs] += elemdat.lumped*factor1
else:
raise NotImplementedError('assemleArray is only implemented for vectors and matrices.')
# A=A.tocsr()
t1=time.time()
print "Time Elapse for Assembly: ",t1-t0
if rank == 1:
return B
elif rank == 2:
return A.tocsr(),B
##########################################
# Internal force vector assembly routine #
##########################################
def assembleInternalForce ( props, globdat ):
return assembleArray( props, globdat, rank = 1, action = 'getInternalForce' )
#############################################
# Tangent stiffness matrix assembly routine #
#############################################
def assembleTangentStiffness ( props, globdat ):
return assembleArray( props, globdat, rank = 2, action = 'getTangentStiffness' )
#############################################
# Mass matrix assembly routine #
#############################################
def assembleMassMatrix ( props, globdat ):
return assembleArray( props, globdat, rank = 2, action = 'getMassMatrix' )
def assembleOutputData ( props, globdat ):
return assembleArray( props, globdat, rank = 0, action = 'getOutputData' )
|
gpl-3.0
| 7,672,155,459,087,517,000
| 31.5
| 95
| 0.592213
| false
| 3.653322
| false
| false
| false
|
3dfxsoftware/cbss-addons
|
project_conf/model/project.py
|
1
|
4367
|
#
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>). All Rights Reserved
#
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#
from openerp.osv import fields, osv
from openerp.tools.translate import _
class project_task(osv.osv):
_inherit = 'project.task'
def _message_get_auto_subscribe_fields(self, cr, uid, updated_fields, auto_follow_fields=['user_id'], context=None):
res = super(project_task, self)._message_get_auto_subscribe_fields(cr, uid, updated_fields, auto_follow_fields=auto_follow_fields, context=context)
res.append('project_leader_id')
return res
def send_mail_task_new_test(self, cr, uid, ids, context=None):
'''
Send mail automatically to change task to Backlog and to Testing Leader.
'''
context = context or {}
#Dont send context to dont get language of user in read method
if ids.get('stage_id'):
type = self.pool.get('project.task.type').read(cr, uid, ids['stage_id'][0], ['name'])
if type.get('name', False) == 'Backlog':
self.send_mail_task(cr,uid,ids,'template_send_email_task_new',context)
elif type.get('name', False) == 'Testing Leader':
self.send_mail_task(cr,uid,ids,'template_send_email_task_end',context)
def send_mail_task(self,cr,uid,ids,template,context=None):
imd_obj = self.pool.get('ir.model.data')
template_ids = imd_obj.search(
cr, uid, [('model', '=', 'email.template'), ('name', '=', template)])
if template_ids:
res_id = imd_obj.read(
cr, uid, template_ids, ['res_id'])[0]['res_id']
followers = self.read(cr, uid, ids.get('id'), [
'message_follower_ids'])['message_follower_ids']
ids = [ids.get('id')]
body_html = self.pool.get('email.template').read(
cr, uid, res_id, ['body_html']).get('body_html')
context.update({'default_template_id': res_id,
'default_body': body_html,
'default_use_template': True,
'default_composition_mode': 'comment',
'active_model': 'project.task',
'default_partner_ids': followers,
'mail_post_autofollow_partner_ids': followers,
'active_id': ids and type(ids) is list and
ids[0] or ids,
'active_ids': ids and type(ids) is list and
ids or [ids],
})
mail_obj = self.pool.get('mail.compose.message')
fields = mail_obj.fields_get(cr, uid)
mail_ids = mail_obj.default_get(
cr, uid, fields.keys(), context=context)
mail_ids.update(
{'model': 'project.task', 'body': body_html, 'composition_mode': 'mass_mail', 'partner_ids': [(6, 0, followers)]})
mail_ids = mail_obj.create(cr, uid, mail_ids, context=context)
mail_obj.send_mail(cr, uid, [mail_ids], context=context)
return False
_track = {'stage_id': {'project.mt_task_stage': send_mail_task_new_test, }}
_columns = {
'project_leader_id': fields.many2one('res.users','Project Leader',help="""Person responsible of task review, when is in Testing Leader state. The person should review: Work Summary, Branch and Make Functional Tests. When everything works this person should change task to done."""),
}
_defaults = {
'project_leader_id': lambda obj,cr,uid,context: uid,
}
|
gpl-2.0
| -1,541,785,040,471,365,400
| 45.956989
| 294
| 0.580948
| false
| 3.87833
| true
| false
| false
|
stephrdev/django-formwizard
|
formwizard/storage/cookie.py
|
1
|
1712
|
from django.conf import settings
from django.core.exceptions import SuspiciousOperation
from django.utils import simplejson as json
from django.utils.hashcompat import sha_constructor
import hmac
from formwizard import storage
sha_hmac = sha_constructor
class CookieStorage(storage.BaseStorage):
encoder = json.JSONEncoder(separators=(',', ':'))
def __init__(self, *args, **kwargs):
super(CookieStorage, self).__init__(*args, **kwargs)
self.data = self.load_data()
if self.data is None:
self.init_data()
def unsign_cookie_data(self, data):
if data is None:
return None
bits = data.split('$', 1)
if len(bits) == 2:
if bits[0] == self.get_cookie_hash(bits[1]):
return bits[1]
raise SuspiciousOperation('FormWizard cookie manipulated')
def load_data(self):
data = self.request.COOKIES.get(self.prefix, None)
cookie_data = self.unsign_cookie_data(data)
if cookie_data is None:
return None
return json.loads(cookie_data, cls=json.JSONDecoder)
def update_response(self, response):
if self.data:
response.set_cookie(self.prefix, self.create_cookie_data(self.data))
else:
response.delete_cookie(self.prefix)
return response
def create_cookie_data(self, data):
encoded_data = self.encoder.encode(self.data)
cookie_data = '%s$%s' % (self.get_cookie_hash(encoded_data),
encoded_data)
return cookie_data
def get_cookie_hash(self, data):
return hmac.new('%s$%s' % (settings.SECRET_KEY, self.prefix),
data, sha_hmac).hexdigest()
|
bsd-3-clause
| -7,821,872,325,361,776,000
| 30.127273
| 80
| 0.625584
| false
| 3.855856
| false
| false
| false
|
MalloyDelacroix/DownloaderForReddit
|
DownloaderForReddit/viewmodels/add_reddit_object_list_model.py
|
1
|
5090
|
"""
Downloader for Reddit takes a list of reddit users and subreddits and downloads content posted to reddit either by the
users or on the subreddits.
Copyright (C) 2017, Kyle Hickey
This file is part of the Downloader for Reddit.
Downloader for Reddit is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Downloader for Reddit is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Downloader for Reddit. If not, see <http://www.gnu.org/licenses/>.
"""
from PyQt5.QtCore import QAbstractListModel, QModelIndex, Qt, QThread, pyqtSignal
from PyQt5.QtGui import QPixmap
import os
from queue import Queue
from ..utils.reddit_utils import NameChecker
class AddRedditObjectListModel(QAbstractListModel):
"""
A list model that handles the the list view for the AddRedditObjectDialog.
"""
name_list_updated = pyqtSignal()
def __init__(self, object_type, parent=None):
super().__init__()
self.parent = parent
self.object_type = object_type
self.queue = Queue()
self.name_list = []
self.validation_dict = {}
self.complete_reddit_object_list = []
self.name_checker = None
self.start_name_check_thread()
self.checker_running = True
valid_path = os.path.abspath('Resources/Images/valid_checkmark.png')
non_valid_path = os.path.abspath('Resources/Images/non_valid_x.png')
self.valid_img = QPixmap(valid_path)
self.non_valid_img = QPixmap(non_valid_path)
def rowCount(self, parent=None, *args, **kwargs):
return len(self.name_list)
def insertRow(self, name, parent=QModelIndex(), *args, **kwargs):
self.beginInsertRows(parent, self.rowCount() - 1, self.rowCount())
self.name_list.append(name)
self.validation_dict[name] = None
self.queue.put(name)
self.name_list_updated.emit()
self.endInsertRows()
return True
def removeRows(self, pos, rows, parent=QModelIndex(), *args, **kwargs):
self.beginRemoveRows(parent, pos, pos + rows - 1)
for x in range(rows):
name = self.name_list[pos]
self.name_list.remove(name)
del self.validation_dict[name]
try:
del self.complete_reddit_object_list[pos]
except IndexError:
pass
self.name_list_updated.emit()
self.endRemoveRows()
return True
def clear_non_valid(self):
"""Removes all non-valid names from the name list."""
name_list = []
for key, value in self.validation_dict.items():
if not value:
name_list.append(key)
for name in name_list:
self.removeRows(self.name_list.index(name), 1)
def data(self, index, role=Qt.DisplayRole):
if role == Qt.DisplayRole:
return self.name_list[index.row()]
elif role == Qt.DecorationRole:
name = self.name_list[index.row()]
if self.validation_dict[name] is None:
return None
if self.validation_dict[name]:
return self.valid_img
else:
return self.non_valid_img
def add_complete_object(self, reddit_object):
"""
Adds a complete reddit object to the complete reddit object list and inserts the name of the reddit object in
the data view and to be checked by the name checker.
:param reddit_object: A complete reddit object that has been imported from a file.
"""
self.complete_reddit_object_list.append(reddit_object)
self.insertRow(reddit_object.name)
def start_name_check_thread(self):
"""Initializes a NameChecker object, then runs it in another thread."""
self.name_checker = NameChecker(self.object_type, self.queue)
self.thread = QThread(self)
self.name_checker.moveToThread(self.thread)
self.name_checker.name_validation.connect(self.validate_name)
self.name_checker.finished.connect(self.thread.quit)
self.name_checker.finished.connect(self.name_checker.deleteLater)
self.thread.finished.connect(self.thread.deleteLater)
self.thread.started.connect(self.name_checker.run)
self.thread.start()
def stop_name_checker(self):
if self.name_checker:
self.name_checker.stop_run()
def validate_name(self, name_tup):
try:
self.validation_dict[name_tup[0]] = name_tup[1]
index = self.createIndex(self.name_list.index(name_tup[0]), 0)
self.dataChanged.emit(index, index)
except ValueError:
self.stop_name_checker()
|
gpl-3.0
| 6,999,803,051,143,106,000
| 36.426471
| 118
| 0.651277
| false
| 3.885496
| false
| false
| false
|
binarybana/samcnet
|
exps/priorstrength.py
|
1
|
2301
|
import sys, os, random
import zlib, cPickle
############### SAMC Setup ###############
import numpy as np
import scipy as sp
import networkx as nx
from samcnet.samc import SAMCRun
from samcnet.bayesnetcpd import BayesNetSampler, BayesNetCPD
from samcnet import utils
from samcnet.generator import *
if 'WORKHASH' in os.environ:
try:
redis_server = os.environ['REDIS']
import redis
r = redis.StrictRedis(redis_server)
except:
sys.exit("ERROR in worker: Need REDIS environment variable defined.")
############### /SAMC Setup ###############
N = 9
iters = 3e5
numdata = 0 #NEED TO ADD NOISE FIRST
temperature = 1.0
burn = 1000
stepscale = 10000
thin = 10
refden = 0.0
random.seed(12345)
np.random.seed(12345)
groundgraph = generateHourGlassGraph(nodes=N)
#joint, states = generateJoint(groundgraph, method='dirichlet')
joint, states = generateJoint(groundgraph, method='noisylogic')
data = generateData(groundgraph, joint, numdata)
groundbnet = BayesNetCPD(states, data, limparent=3)
groundbnet.set_cpds(joint)
if 'WORKHASH' in os.environ:
jobhash = os.environ['WORKHASH']
if not r.hexists('jobs:grounds', jobhash):
r.hset('jobs:grounds', jobhash, zlib.compress(cPickle.dumps(groundgraph)))
random.seed()
np.random.seed()
#p_struct = float(sys.argv[1])
p_struct = 30.0
for numtemplate in [4,8]:
for cpd in [True, False]:
if cpd:
p_cpd = p_struct
else:
p_cpd = 0.0
random.seed(12345)
np.random.seed(12345)
obj = BayesNetCPD(states, data, limparent=3)
template = sampleTemplate(groundgraph, numtemplate)
random.seed()
np.random.seed()
b = BayesNetSampler(obj,
template,
groundbnet,
p_struct=p_struct,
p_cpd=p_cpd)
s = SAMCRun(b,burn,stepscale,refden,thin)
s.sample(iters, temperature)
s.compute_means(cummeans=False)
if 'WORKHASH' in os.environ:
r.lpush('jobs:done:' + jobhash, s.read_db())
r.lpush('custom:%s:p_struct=%d:ntemplate=%d:p_cpd=%d' %
(jobhash, int(p_struct*10), numtemplate, int(p_cpd*10)),
s.db.root.computed.means._v_attrs['kld'] )
s.db.close()
|
mit
| 5,560,503,565,455,464,000
| 26.722892
| 82
| 0.616688
| false
| 3.178177
| false
| false
| false
|
cordery/django-countries-plus
|
countries_plus/utils.py
|
1
|
7136
|
# coding=utf-8
import re
import requests
import six
from django.core.exceptions import ValidationError
from .models import Country
DATA_HEADERS_ORDERED = [
'ISO', 'ISO3', 'ISO-Numeric', 'fips', 'Country', 'Capital', 'Area(in sq km)',
'Population', 'Continent', 'tld', 'CurrencyCode', 'CurrencyName', 'Phone',
'Postal Code Format', 'Postal Code Regex', 'Languages', 'geonameid', 'neighbours',
'EquivalentFipsCode'
]
DATA_HEADERS_MAP = {
'ISO': 'iso',
'ISO3': 'iso3',
'ISO-Numeric': 'iso_numeric',
'fips': 'fips',
'Country': 'name',
'Capital': 'capital',
'Area(in sq km)': 'area',
'Population': 'population',
'Continent': 'continent',
'tld': 'tld',
'CurrencyCode': 'currency_code',
'CurrencyName': 'currency_name',
'Phone': 'phone',
'Postal Code Format': 'postal_code_format',
'Postal Code Regex': 'postal_code_regex',
'Languages': 'languages',
'geonameid': 'geonameid',
'neighbours': 'neighbours',
'EquivalentFipsCode': 'equivalent_fips_code'
}
CURRENCY_SYMBOLS = {
"AED": "د.إ",
"AFN": "؋",
"ALL": "L",
"AMD": "դր.",
"ANG": "ƒ",
"AOA": "Kz",
"ARS": "$",
"AUD": "$",
"AWG": "ƒ",
"AZN": "m",
"BAM": "KM",
"BBD": "$",
"BDT": "৳",
"BGN": "лв",
"BHD": "ب.د",
"BIF": "Fr",
"BMD": "$",
"BND": "$",
"BOB": "Bs.",
"BRL": "R$",
"BSD": "$",
"BTN": "Nu",
"BWP": "P",
"BYR": "Br",
"BZD": "$",
"CAD": "$",
"CDF": "Fr",
"CHF": "Fr",
"CLP": "$",
"CNY": "¥",
"COP": "$",
"CRC": "₡",
"CUP": "$",
"CVE": "$, Esc",
"CZK": "Kč",
"DJF": "Fr",
"DKK": "kr",
"DOP": "$",
"DZD": "د.ج",
"EEK": "KR",
"EGP": "£,ج.م",
"ERN": "Nfk",
"ETB": "Br",
"EUR": "€",
"FJD": "$",
"FKP": "£",
"GBP": "£",
"GEL": "ლ",
"GHS": "₵",
"GIP": "£",
"GMD": "D",
"GNF": "Fr",
"GTQ": "Q",
"GYD": "$",
"HKD": "$",
"HNL": "L",
"HRK": "kn",
"HTG": "G",
"HUF": "Ft",
"IDR": "Rp",
"ILS": "₪",
"INR": "₨",
"IQD": "ع.د",
"IRR": "﷼",
"ISK": "kr",
"JMD": "$",
"JOD": "د.ا",
"JPY": "¥",
"KES": "Sh",
"KGS": "лв",
"KHR": "៛",
"KMF": "Fr",
"KPW": "₩",
"KRW": "₩",
"KWD": "د.ك",
"KYD": "$",
"KZT": "Т",
"LAK": "₭",
"LBP": "ل.ل",
"LKR": "ரூ",
"LRD": "$",
"LSL": "L",
"LTL": "Lt",
"LVL": "Ls",
"LYD": "ل.د",
"MAD": "د.م.",
"MDL": "L",
"MGA": "Ar",
"MKD": "ден",
"MMK": "K",
"MNT": "₮",
"MOP": "P",
"MRO": "UM",
"MUR": "₨",
"MVR": "ރ.",
"MWK": "MK",
"MXN": "$",
"MYR": "RM",
"MZN": "MT",
"NAD": "$",
"NGN": "₦",
"NIO": "C$",
"NOK": "kr",
"NPR": "₨",
"NZD": "$",
"OMR": "ر.ع.",
"PAB": "B/.",
"PEN": "S/.",
"PGK": "K",
"PHP": "₱",
"PKR": "₨",
"PLN": "zł",
"PYG": "₲",
"QAR": "ر.ق",
"RON": "RON",
"RSD": "RSD",
"RUB": "р.",
"RWF": "Fr",
"SAR": "ر.س",
"SBD": "$",
"SCR": "₨",
"SDG": "S$",
"SEK": "kr",
"SGD": "$",
"SHP": "£",
"SLL": "Le",
"SOS": "Sh",
"SRD": "$",
"STD": "Db",
"SYP": "£, ل.س",
"SZL": "L",
"THB": "฿",
"TJS": "ЅМ",
"TMT": "m",
"TND": "د.ت",
"TOP": "T$",
"TRY": "₤",
"TTD": "$",
"TWD": "$",
"TZS": "Sh",
"UAH": "₴",
"UGX": "Sh",
"USD": "$",
"UYU": "$",
"UZS": "лв",
"VEF": "Bs",
"VND": "₫",
"VUV": "Vt",
"WST": "T",
"XAF": "Fr",
"XCD": "$",
"XOF": "Fr",
"XPF": "Fr",
"YER": "﷼",
"ZAR": "R",
"ZMK": "ZK",
"ZWL": "$"
}
class GeonamesParseError(Exception):
def __init__(self, message=None):
message = "I couldn't parse the Geonames file (" \
"http://download.geonames.org/export/dump/countryInfo.txt). " \
"The format may have changed. An updated version of this software may be " \
"required, " \
"please check for updates and/or raise an issue on github. Specific error: " \
"%s" % message
super(GeonamesParseError, self).__init__(message)
def update_geonames_data():
"""
Requests the countries table from geonames.org, and then calls parse_geonames_data to parse it.
:return: num_updated, num_created
:raise GeonamesParseError:
"""
r = requests.get('http://download.geonames.org/export/dump/countryInfo.txt', stream=True)
return parse_geonames_data(r.iter_lines())
def parse_geonames_data(lines_iterator):
"""
Parses countries table data from geonames.org, updating or adding records as needed.
currency_symbol is not part of the countries table and is supplemented using the data
obtained from the link provided in the countries table.
:type lines_iterator: collections.iterable
:return: num_updated: int, num_created: int
:raise GeonamesParseError:
"""
data_headers = []
num_created = 0
num_updated = 0
for line in lines_iterator:
line = line.decode()
if line[0] == "#":
if line[0:4] == "#ISO":
data_headers = line.strip('# ').split('\t')
if data_headers != DATA_HEADERS_ORDERED:
raise GeonamesParseError(
"The table headers do not match the expected headers.")
continue
if not data_headers:
raise GeonamesParseError("No table headers found.")
bits = line.split('\t')
data = {DATA_HEADERS_MAP[DATA_HEADERS_ORDERED[x]]: bits[x] for x in range(0, len(bits))}
if 'currency_code' in data and data['currency_code']:
data['currency_symbol'] = CURRENCY_SYMBOLS.get(data['currency_code'])
# Remove empty items
clean_data = {x: y for x, y in data.items() if y}
# Puerto Rico and the Dominican Republic have two phone prefixes in the format "123 and
# 456"
if 'phone' in clean_data:
if 'and' in clean_data['phone']:
clean_data['phone'] = ",".join(re.split(r'\s*and\s*', clean_data['phone']))
# Avoiding update_or_create to maintain compatibility with Django 1.5
try:
country = Country.objects.get(iso=clean_data['iso'])
created = False
except Country.DoesNotExist:
try:
country = Country.objects.create(**clean_data)
except ValidationError as e:
raise GeonamesParseError("Unexpected field length: %s" % e.message_dict)
created = True
for k, v in six.iteritems(clean_data):
setattr(country, k, v)
try:
country.save()
except ValidationError as e:
raise GeonamesParseError("Unexpected field length: %s" % e.message_dict)
if created:
num_created += 1
else:
num_updated += 1
return num_updated, num_created
|
mit
| 7,095,548,300,540,033,000
| 23.904255
| 99
| 0.467749
| false
| 2.821615
| false
| false
| false
|
tensorflow/graphics
|
tensorflow_graphics/image/color_space/tests/srgb_test.py
|
1
|
3322
|
# Copyright 2020 The TensorFlow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for srgb."""
from absl.testing import flagsaver
from absl.testing import parameterized
import numpy as np
from tensorflow_graphics.image.color_space import linear_rgb
from tensorflow_graphics.image.color_space import srgb
from tensorflow_graphics.util import test_case
class SrgbTest(test_case.TestCase):
def test_cycle_linear_rgb_srgb_linear_rgb_for_random_input(self):
"""Tests loop from linear RGB to sRGB and back for random inputs."""
tensor_size = np.random.randint(3)
tensor_shape = np.random.randint(1, 10, size=(tensor_size)).tolist()
linear_input = np.random.uniform(size=tensor_shape + [3])
srgb_output = srgb.from_linear_rgb(linear_input)
linear_reverse = linear_rgb.from_srgb(srgb_output)
self.assertAllClose(linear_input, linear_reverse)
@parameterized.parameters(
(((0., 0.5, 1.), (0.00312, 0.0031308, 0.00314)),
((0., 0.735357, 1.), (0.04031, 0.04045, 0.040567))),)
def test_from_linear_rgb_preset(self, test_inputs, test_outputs):
"""Tests conversion from linear to sRGB color space for preset inputs."""
self.assert_output_is_correct(srgb.from_linear_rgb, (test_inputs,),
(test_outputs,))
def test_from_linear_rgb_jacobian_random(self):
"""Tests the Jacobian of the from_linear_rgb function for random inputs."""
tensor_size = np.random.randint(3)
tensor_shape = np.random.randint(1, 10, size=(tensor_size)).tolist()
linear_random_init = np.random.uniform(size=tensor_shape + [3])
self.assert_jacobian_is_correct_fn(srgb.from_linear_rgb,
[linear_random_init])
@parameterized.parameters((np.array((0., 0.001, 0.002)),), (np.array(
(0.004, 0.005, 1.)),), (np.array((0.00312, 0.004, 0.00314)),))
@flagsaver.flagsaver(tfg_add_asserts_to_graph=False)
def test_from_linear_rgb_jacobian_preset(self, inputs_init):
"""Tests the Jacobian of the from_linear_rgb function for preset inputs."""
self.assert_jacobian_is_correct_fn(srgb.from_linear_rgb, [inputs_init])
@parameterized.parameters(
((3,),),
((None, None, None, 3),),
)
def test_from_linear_rgb_exception_not_raised(self, *shape):
"""Tests that the shape exceptions are not raised."""
self.assert_exception_is_not_raised(srgb.from_linear_rgb, shape)
@parameterized.parameters(
("must have a rank greater than 0", ()),
("must have exactly 3 dimensions in axis -1", (2, 3, 4)),
)
def test_from_linear_rgb_exception_raised(self, error_msg, *shape):
"""Tests that the shape exceptions are properly raised."""
self.assert_exception_is_raised(srgb.from_linear_rgb, error_msg, shape)
if __name__ == "__main__":
test_case.main()
|
apache-2.0
| 3,111,544,979,320,867,000
| 40.525
| 79
| 0.688742
| false
| 3.456816
| true
| false
| false
|
jiaaro/django-alert
|
alert/utils.py
|
1
|
8238
|
from alert.exceptions import AlertIDAlreadyInUse, AlertBackendIDAlreadyInUse,\
InvalidApplicableUsers
import django
from django.conf import settings
from django.utils import timezone
from django.template.loader import render_to_string, get_template
from django.contrib.sites.models import Site
from django.template import TemplateDoesNotExist
from django.db import models
from itertools import islice
from alert.compat import get_user_model
ALERT_TYPES = {}
ALERT_BACKENDS = {}
ALERT_TYPE_CHOICES = []
ALERT_BACKEND_CHOICES = []
def grouper(n, iterable):
iterable = iter(iterable)
while True:
chunk = tuple(islice(iterable, n))
if not chunk: return
yield chunk
def render_email_to_string(tmpl, cx, alert_type="txt"):
cx['alert_shard_ext'] = alert_type
rendered = render_to_string(tmpl, cx)
return rendered.strip()
class AlertMeta(type):
def __new__(cls, name, bases, attrs):
new_alert = super(AlertMeta, cls).__new__(cls, name, bases, attrs)
# If this isn't a subclass of BaseAlert, don't do anything special.
parents = [b for b in bases if isinstance(b, AlertMeta)]
if not parents:
return new_alert
# allow subclasses to use the auto id feature
id = getattr(new_alert, 'id', name)
for parent in parents:
if getattr(parent, 'id', None) == id:
id = name
break
new_alert.id = id
if new_alert.id in ALERT_TYPES.keys():
raise AlertIDAlreadyInUse("The alert ID, \"%s\" was delared more than once" % new_alert.id)
ALERT_TYPES[new_alert.id] = new_alert()
ALERT_TYPE_CHOICES.append((new_alert.id, new_alert.title))
return new_alert
class BaseAlert(object):
__metaclass__ = AlertMeta
default = False
sender = None
template_filetype = "txt"
def __init__(self):
kwargs = {}
if self.sender:
kwargs['sender'] = self.sender
self.signal.connect(self.signal_handler, **kwargs)
def __repr__(self):
return "<Alert: %s>" % self.id
def __str__(self):
return str(self.id)
def signal_handler(self, **kwargs):
if self.before(**kwargs) is False:
return
from alert.models import AlertPreference
from alert.models import Alert
users = self.get_applicable_users(**kwargs)
if isinstance(users, models.Model):
users = [users]
try:
user_count = users.count()
except:
user_count = len(users)
User = get_user_model()
if user_count and not isinstance(users[0], User):
raise InvalidApplicableUsers("%s.get_applicable_users() returned an invalid value. Acceptable values are a django.contrib.auth.models.User instance OR an iterable containing 0 or more User instances" % (self.id))
site = Site.objects.get_current()
def mk_alert(user, backend):
context = self.get_template_context(BACKEND=backend, USER=user, SITE=site, ALERT=self, **kwargs)
template_kwargs = {'backend': backend, 'context': context }
return Alert(
user=user,
backend=backend.id,
alert_type=self.id,
when=self.get_send_time(**kwargs),
title=self.get_title(**template_kwargs),
body=self.get_body(**template_kwargs)
)
alerts = (mk_alert(user, backend) for (user, backend) in AlertPreference.objects.get_recipients_for_notice(self.id, users))
# bulk create is much faster so use it when available
if django.VERSION >= (1, 4) and getattr(settings, 'ALERT_USE_BULK_CREATE', True):
created = 0
for alerts_group in grouper(100, alerts):
# break bulk create into groups of 100 to avoid the dreaded
# OperationalError: (2006, 'MySQL server has gone away')
Alert.objects.bulk_create(alerts_group)
created += 100
else:
for alert in alerts: alert.save()
def before(self, **kwargs):
pass
def get_send_time(self, **kwargs):
return timezone.now()
def get_applicable_users(self, instance, **kwargs):
return [instance.user]
def get_template_context(self, **kwargs):
return kwargs
def _get_template(self, backend, part, filetype='txt'):
template = "alerts/%s/%s/%s.%s" % (self.id, backend.id, part, filetype)
try:
get_template(template)
return template
except TemplateDoesNotExist:
pass
template = "alerts/%s/%s.%s" % (self.id, part, filetype)
get_template(template)
return template
def get_title_template(self, backend, context):
return self._get_template(backend, 'title', self.template_filetype)
def get_body_template(self, backend, context):
return self._get_template(backend, 'body', self.template_filetype)
def get_title(self, backend, context):
template = self.get_title_template(backend, context)
return render_to_string(template, context)
def get_body(self, backend, context):
template = self.get_body_template(backend, context)
return render_to_string(template, context)
def get_default(self, backend):
if isinstance(self.default, bool):
return self.default
return self.default[backend]
class AlertBackendMeta(type):
def __new__(cls, name, bases, attrs):
new_alert_backend = super(AlertBackendMeta, cls).__new__(cls, name, bases, attrs)
# If this isn't a subclass of BaseAlert, don't do anything special.
parents = [b for b in bases if isinstance(b, AlertBackendMeta)]
if not parents:
return new_alert_backend
new_alert_backend.id = getattr(new_alert_backend, 'id', name)
if new_alert_backend.id in ALERT_BACKENDS.keys():
raise AlertBackendIDAlreadyInUse("The alert ID, \"%s\" was delared more than once" % new_alert_backend.id)
ALERT_BACKENDS[new_alert_backend.id] = new_alert_backend()
ALERT_BACKEND_CHOICES.append((new_alert_backend.id, new_alert_backend.title))
return new_alert_backend
class BaseAlertBackend(object):
__metaclass__ = AlertBackendMeta
def __repr__(self):
return "<AlertBackend: %s>" % self.id
def __str__(self):
return str(self.id)
def mass_send(self, alerts):
from .models import Alert
if isinstance(alerts, Alert):
self.send(alerts)
else:
[self.send(alert) for alert in alerts]
def super_accepter(arg, lookup_dict):
"""
for the alerts and backends keyword arguments...
- provides resonable defaults
- accept a single alert/backend or a list of them
- accept alert/backend class or the a string containing the alert/backend id
"""
# reasonable default
if arg is None: return lookup_dict.values()
# single item or a list
if not isinstance(arg, (tuple, list)):
arg = [arg]
# normalize the arguments
ids = ((a if isinstance(a, basestring) else a.id) for a in arg)
# remove duplicates
_set = {}
ids = (_set.setdefault(id,id) for id in ids if id not in _set)
# lookup the objects
return [lookup_dict[id] for id in ids]
def unsubscribe_user(user, alerts=None, backends=None):
from .forms import UnsubscribeForm
form = UnsubscribeForm(user=user, alerts=alerts, backends=backends)
data = dict((field, False) for field in form.fields.keys())
form = UnsubscribeForm(data, user=user, alerts=alerts, backends=backends)
assert(form.is_valid())
form.save()
|
mit
| -3,880,105,067,049,292,300
| 30.930233
| 224
| 0.59092
| false
| 4.158506
| false
| false
| false
|
Forage/Gramps
|
gramps/gen/merge/mergerepositoryquery.py
|
1
|
2677
|
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2010 Michiel D. Nauta
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# $Id$
"""
Provide merge capabilities for repositories.
"""
#-------------------------------------------------------------------------
#
# Gramps modules
#
#-------------------------------------------------------------------------
from ..lib import Source
from ..db import DbTxn
from ..const import GRAMPS_LOCALE as glocale
_ = glocale.get_translation().sgettext
from ..errors import MergeError
#-------------------------------------------------------------------------
#
# MergeRepoQuery
#
#-------------------------------------------------------------------------
class MergeRepositoryQuery(object):
"""
Create database query to merge two repositories.
"""
def __init__(self, dbstate, phoenix, titanic):
self.database = dbstate.db
self.phoenix = phoenix
self.titanic = titanic
def execute(self):
"""
Merges two repositories into a single repository.
"""
new_handle = self.phoenix.get_handle()
old_handle = self.titanic.get_handle()
self.phoenix.merge(self.titanic)
with DbTxn(_("Merge Repositories"), self.database) as trans:
self.database.commit_repository(self.phoenix, trans)
for (class_name, handle) in self.database.find_backlink_handles(
old_handle):
if class_name == Source.__name__:
source = self.database.get_source_from_handle(handle)
assert source.has_handle_reference('Repository', old_handle)
source.replace_repo_references(old_handle, new_handle)
self.database.commit_source(source, trans)
else:
raise MergeError("Encounter an object of type %s that has "
"a repository reference." % class_name)
self.database.remove_repository(old_handle, trans)
|
gpl-2.0
| -7,466,816,308,397,948,000
| 35.671233
| 80
| 0.583115
| false
| 4.395731
| false
| false
| false
|
griimick/feature-mlsite
|
app/static/hindi-dependency-parser-2.0/bin/normalize_bojar_lrec_2010.py
|
1
|
1281
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import sys
vowels_to_be_replaced= {}
def replace_null(from_chr_num, to_chr_num):
for x in range(from_chr_num, to_chr_num):
vowels_to_be_replaced[chr(x)]= ""
#replace_null(0x0900, 0x0904)
#replace_null(0x093A, 0x0950)
#replace_null(0x0951, 0x0958)
#replace_null(0x0962, 0x0964)
#replace_null(0x0971, 0x0972)
vowels_to_be_replaced[b'0x0901']= b'0x0902'
vowels_to_be_replaced[""]= "न"
vowels_to_be_replaced["ऩ"]= "न"
vowels_to_be_replaced['ऱ']= "र"
vowels_to_be_replaced['ऴ']= "ळ"
vowels_to_be_replaced['क़']= "क"
vowels_to_be_replaced['ख़']= "ख"
vowels_to_be_replaced['ग़']= "ग"
vowels_to_be_replaced['ज़']= "ज"
vowels_to_be_replaced['ड़']= "ड"
vowels_to_be_replaced['ढ़']= "ढ"
vowels_to_be_replaced['फ़']= "फ"
vowels_to_be_replaced['य़']= "य"
vowels_to_be_replaced['ॠ']= "ऋ"
vowels_to_be_replaced['ॡ']= "ऌ"
def normalise(word):
# Word should be unicode encoding
nword=""
for char in word:
if char in vowels_to_be_replaced:
nword+= vowels_to_be_replaced[char]
else:
nword+= char
return nword
if __name__=="__main__":
print((normalise("भागता")))
print((normalise("तृष्णा")))
|
mit
| 5,023,343,353,184,191,000
| 25.777778
| 47
| 0.617427
| false
| 2.260788
| false
| false
| false
|
balint256/ice
|
tlm/ui.py
|
1
|
18027
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# ui.py
#
# Copyright 2014 Balint Seeber <balint256@gmail.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
#
#
# FIXME:
# * Update prediction (using detected bitrate from Network)
# * Colour
# * Handle when screen size isn't large enough (curses throws ERR)
import curses, datetime, math
import state
from constants import *
from primitives import *
class Layout():
def __init__(self, name, ui):
self.name = name
self.ui = ui
self.active = False
self.y_offset = 0
def draw(self, y):
pass
def deactivate(self):
self.active = False
def activate(self, y):
self.y_offset = y
self.active = True
class MinorFrameLayout(Layout):
def __init__(self, *args, **kwds):
Layout.__init__(self, *args, **kwds)
self.deframer = self.ui.engine.deframer
self.last_updated_idx = 0
self.changed = False
self.prev_frame_idx = 0
def activate(self, y):
self.ui.engine.register(EVENT_NEW_BYTE, self)
# FIXME: Draw the frame thus far
Layout.activate(self, y)
def deactivate(self):
self.ui.engine.unregister(EVENT_NEW_BYTE, self)
Layout.deactivate(self)
def __call__(self, *args, **kwds):
if not self.active:
self.changed = True
raise Exception("MinorFrameLayout callback while not active")
return
stdscr = self.ui.scr
byte = kwds['byte']
frame = kwds['frame']
if kwds['idx'] is None:
frame_idx = len(frame) - 1
else:
frame_idx = kwds['idx']
width = 16
section_length = 8
y_factor = 2
prev_frame_idx = frame_idx - 1
if prev_frame_idx == -1:
prev_frame_idx = MINOR_FRAME_LEN - 1
#if prev_frame_idx < len(frame):
if True: # FIXME: Being lazy here
y = prev_frame_idx / width
x = prev_frame_idx % width
#stdscr.move(y + y_offset, x * section_length)
#stdscr.addstr("%03d %02x " % (prev_frame_idx, frame[prev_frame_idx]))
stdscr.move(y*y_factor + self.y_offset, x * section_length + 3)
stdscr.addstr(" ")
stdscr.move(y*y_factor + self.y_offset, x * section_length + 3 + 3)
stdscr.addstr(" ")
y = frame_idx / width
x = frame_idx % width
stdscr.move(y*y_factor + self.y_offset, x * section_length)
stdscr.addstr("%03d[%02x]" % (frame_idx, byte))
def draw(self, y):
#if not self.changed:
# return
#self.deframer
pass # Purely event driven at the moment
class SubcomSubLayout():
def __init__(self, key, subcom_tracker, y_offset):
self.key = key
self.subcom_tracker = subcom_tracker
self.last_updated_idx = None
self.y_offset = y_offset
def name(self): return self.key
class SubcomLayout(Layout):
def __init__(self, *args, **kwds):
Layout.__init__(self, *args, **kwds)
self.subcom_trackers = self.ui.engine.subcom_trackers
self.subcom_sublayouts = {}
self.width = 16
self.y_factor = 2
self.max_name_len = 0
y = 0
for subcom_key in self.subcom_trackers.keys():
subcom_tracker = self.subcom_trackers[subcom_key]
sublayout = SubcomSubLayout(subcom_key, subcom_tracker, y)
self.max_name_len = max(self.max_name_len, len(sublayout.name()))
self.subcom_sublayouts[subcom_key] = sublayout
height = int(math.ceil(1.*subcom_tracker.length / self.width)) * self.y_factor - (self.y_factor - 1)
y += (height + 3)
self.x_offset = self.max_name_len + 4 # Additional space
self.changed = False
def draw(self, y):
scr = self.ui.scr
for subcom_key in self.subcom_trackers.keys():
subcom_tracker = self.subcom_trackers[subcom_key]
subcom_sublayout = self.subcom_sublayouts[subcom_key]
scr.move(y + subcom_sublayout.y_offset + 2, 1)
scr.addstr("%03d" % (subcom_tracker.discontinuity_cnt))
def activate(self, y):
for subcom_key in self.subcom_trackers.keys():
self.subcom_trackers[subcom_key].register(EVENT_NEW_BYTE, self)
scr = self.ui.scr
for subcom_key in self.subcom_sublayouts.keys():
subcom_sublayout = self.subcom_sublayouts[subcom_key]
scr.move(y + subcom_sublayout.y_offset, 1)
scr.addstr(subcom_sublayout.name())
# FIXME: Draw the frame thus far
Layout.activate(self, y)
def deactivate(self):
for subcom_key in self.subcom_trackers.keys():
self.subcom_trackers[subcom_key].unregister(EVENT_NEW_BYTE, self)
Layout.deactivate(self)
def __call__(self, event, source, *args, **kwds):
if not self.active:
self.changed = True
raise Exception("SubcomLayout callback while not active")
return
stdscr = self.ui.scr
byte = kwds['byte']
frame = kwds['frame']
frame_idx = len(frame) - 1
sublayout = self.subcom_sublayouts[source.key]
section_length = 8
prev_frame_idx = frame_idx - 1
if prev_frame_idx == -1:
prev_frame_idx = sublayout.subcom_tracker.length - 1
#if prev_frame_idx < len(frame):
if True: # FIXME: Being lazy here
y = prev_frame_idx / self.width
x = prev_frame_idx % self.width
stdscr.move(y*self.y_factor + self.y_offset + sublayout.y_offset, self.x_offset + x * section_length + 3)
stdscr.addstr(" ")
stdscr.move(y*self.y_factor + self.y_offset + sublayout.y_offset, self.x_offset + x * section_length + 3 + 3)
stdscr.addstr(" ")
y = frame_idx / self.width
x = frame_idx % self.width
stdscr.move(self.y_offset + sublayout.y_offset + y*self.y_factor, self.x_offset + x * section_length)
stdscr.addstr("%03d[%02x]" % (frame_idx, byte))
class ElementsLayout(Layout):
def __init__(self, elements, padding=10, *args, **kwds):
Layout.__init__(self, *args, **kwds)
self.elements = elements
self.max_id_len = 0
self.y_offset_map = {}
self.trigger_map = {}
self.padding = padding
self.last_draw_time = {}
self.draw_count = {}
self.draw_time_delta = datetime.timedelta(milliseconds=250)
self.max_value_len = 0
self.full_refresh = False
for element in self.elements:
self.last_draw_time[element.id()] = None
self.draw_count[element.id()] = 0
self.max_id_len = max(self.max_id_len, len(element.id()))
trigger_indices = self.ui.engine.get_element_state(element).get_element().positions().get_trigger_indices(mode=self.ui.engine.options.mode)
for trigger_index in trigger_indices:
if trigger_index not in self.trigger_map.keys(): self.trigger_map[trigger_index] = []
self.trigger_map[trigger_index] += [element]
def activate(self, y):
scr = self.ui.scr
cnt = 0
self.y_offset_map = {}
for element in self.elements:
self.y_offset_map[element.id()] = y+cnt
self.ui.engine.track(element.positions().get_trigger_indices(mode=self.ui.engine.options.mode), self)
scr.move(self.y_offset_map[element.id()], 1)
scr.addstr(element.id())
self.draw_element(element)
cnt += 1
Layout.activate(self, y)
def deactivate(self):
for element in self.elements:
self.ui.engine.untrack(element.positions().get_trigger_indices(mode=self.ui.engine.options.mode), self)
Layout.deactivate(self)
def __call__(self, *args, **kwds):
trigger = kwds['trigger']
res, map_res = trigger.check_map(self.trigger_map)
if not res:
raise Exception("%s not in %s" % (trigger, self.trigger_map.keys()))
triggered_elements = map_res
for element in triggered_elements:
self.draw_element(element)
def draw_element(self, element):
scr = self.ui.scr
element_state = self.ui.engine.get_element_state(element)
scr.move(self.y_offset_map[element.id()], 1 + self.max_id_len + self.padding)
scr.clrtoeol()
if element_state.last_value is None:
return
self.draw_count[element.id()] += 1
count_str = "[%04d]" % element_state.update_count
scr.addstr(count_str)
s = " = "
value_str = element.formatter().format(element_state.last_value)
s += value_str
if element.unit() is not None and len(element.unit()) > 0:
s += " " + element.unit()
if element_state.last_valid is not None:
if element_state.last_valid == True:
s += " (valid)" # FIXME: Green
elif element_state.last_valid == False:
s += " (invalid)" # FIXME: Red
if len(s) > self.max_value_len:
self.max_value_len = len(s)
self.full_refresh = True
scr.addstr(s)
if element_state.previous_value is not None:
scr.move(self.y_offset_map[element.id()], 1 + self.max_id_len + self.padding + self.max_value_len + 10) # MAGIC
s = " (%03d: %s)" % ((self.ui.engine.get_local_time_now() - element_state.previous_value_time).total_seconds(), element.formatter().format(element_state.previous_value))
scr.addstr(s)
time_delta = self.ui.engine.get_local_time_now() - element_state.last_update_time
time_str = "%03d" % time_delta.total_seconds()
scr.move(self.y_offset_map[element.id()], self.ui.max_x - len(time_str))
scr.addstr(time_str)
trigger_str = str(element_state.last_trigger)
scr.move(self.y_offset_map[element.id()], self.ui.max_x - len(time_str) - 3 - len(trigger_str))
scr.addstr(trigger_str)
self.last_draw_time[element.id()] = self.ui.engine.get_local_time_now()
def draw(self, y_offset):
for element in self.elements:
if not self.full_refresh and self.last_draw_time[element.id()] is not None and (self.ui.engine.get_local_time_now() - self.last_draw_time[element.id()]) < self.draw_time_delta:
return
self.draw_element(element)
self.full_refresh = False
class HistoryLayout(Layout):
def __init__(self, width, elements, *args, **kwds):
Layout.__init__(self, *args, **kwds)
self.trigger_map = {}
self.history_map = {}
self.elements = elements
self.history_lengths = {}
self.width = width
for spec in elements:
element, history_length = spec
self.history_lengths[element] = history_length
self.history_map[element] = []
trigger_indices = self.ui.engine.get_element_state(element).get_element().positions().get_trigger_indices(mode=self.ui.engine.options.mode)
self.ui.engine.track(trigger_indices, self)
for trigger_index in trigger_indices:
if trigger_index not in self.trigger_map.keys(): self.trigger_map[trigger_index] = []
self.trigger_map[trigger_index] += [element]
self.changed = False
def __call__(self, *args, **kwds):
self.changed = True
trigger = kwds['trigger']
res, map_res = trigger.check_map(self.trigger_map)
if not res:
raise Exception("%s not in %s" % (trigger, self.trigger_map.keys()))
triggered_elements = map_res
for element in triggered_elements:
element_state = self.ui.engine.get_element_state(element)
if element_state.last_value is None:
return
value_str = element_state.get_element().formatter().format(element_state.last_value)
history = self.history_map[element]
history += [value_str]
diff = len(history) - self.history_lengths[element]
if diff > 0:
self.history_map[element] = history[diff:]
def draw(self, y):
if not self.changed:
return
scr = self.ui.scr
x = 8
n = 0
for spec in self.elements:
element, history_length = spec
history = self.history_map[element]
cnt = 0
scr.move(y + cnt, x)
scr.addstr(element)
cnt += 2
for val in history:
if n == 0:
scr.move(y + cnt, 0)
scr.clrtoeol()
scr.move(y + cnt, x)
scr.addstr(val)
cnt += 1
x += self.width
n += 1
class UserInterface():
def __init__(self, engine, timeout=10):
self.engine = engine
self.timeout = timeout
self.scr = None
self.active_layout = None
self.max_y, self.max_x = 0, 0
self.prev_max_y, self.prev_max_x = 0, 0
self.log_message = ""
self.update_log_message = False
self.last_engine_state = state.STATE_NONE
self.last_active_layout_name = ""
self.element_layout_key_shortcuts = {}
self.element_layouts = []
self.layout_y_offset = 5
def start(self, element_layouts):
self.minor_frame_layout = MinorFrameLayout("raw", self)
self.element_layout_key_shortcuts['`'] = self.minor_frame_layout
self.subcom_layout = SubcomLayout("subcom", self)
self.element_layout_key_shortcuts['~'] = self.subcom_layout
print "Building history layout..."
history_length = 40
self.history_layout = HistoryLayout(name="history", ui=self, width=24, elements=[
('hps_1_temp_supercom', history_length),
('hps_2_temp_supercom', history_length),
('hps_1_tc', history_length),
#('hps_1_tcX', history_length),
('hps_2_tc', history_length),
#('hps_2_tcX', history_length),
('accelerometer', history_length),
]) # MAGIC
self.element_layout_key_shortcuts['h'] = self.history_layout
print "Building layouts..."
for element_layout in element_layouts:
name = element_layout[0]
shortcut = name[0]
if len(element_layout) >= 3:
shortcut = element_layout[2]
elements = []
for element_name in element_layout[1]:
element = self.engine.get_element(element_name, safe=False)
if element is None:
print "The element '%s' was not found for layout '%s'" % (element_name, name)
element = self.engine.get_element(element_name)
elements += [element]
layout = ElementsLayout(elements, name=name, ui=self)
self.element_layouts += [layout]
if shortcut not in self.element_layout_key_shortcuts.keys():
self.element_layout_key_shortcuts[shortcut] = layout
else:
print "ElementLayout '%s' already has shortcut key '%s'" % (self.element_layout_key_shortcuts[shortcut].name, shortcut)
self.scr = curses.initscr()
#curses.start_color() # FIXME
self.scr.timeout(self.timeout) # -1 for blocking
self.scr.keypad(1) # Otherwise app will end when pressing arrow keys
curses.noecho()
#curses.raw()
#curses.cbreak()
#curses.nl / curses.nonl
#self.scr.deleteln()
self.switch_layout(self.minor_frame_layout)
self.update()
#self.scr.refresh() # Done in 'update'
def run(self):
if not self.handle_keys():
return False
self.update()
return True
def log(self, msg):
self.log_message = msg
self.update_log_message = True
def refresh_screen_state(self):
self.max_y, self.max_x = self.scr.getmaxyx()
changed = (self.max_y != self.prev_max_y) or (self.prev_max_x != self.max_x)
self.prev_max_y, self.prev_max_x = self.max_y, self.max_x
return changed
def update(self):
if self.refresh_screen_state():
self.clear()
self.prev_max_y, self.prev_max_x
if self.last_engine_state != self.engine.get_state():
self.scr.move(self.max_y-1, 0)
self.scr.clrtoeol()
self.scr.addstr(state.STATE_TXT[self.engine.get_state()])
self.last_engine_state = self.engine.get_state()
if True:
self.scr.move(0, 0)
#self.scr.clrtoeol() # Don't since current layout name is on RHS
self.scr.addstr("Current time: %s" % (self.engine.get_local_time_now()))
if self.engine.net.last_enqueue_time:
self.scr.move(1, 0)
#self.scr.clrtoeol() # Don't since layout shortcuts are on RHS
self.scr.addstr("Data arrived: %s" % (self.engine.net.last_enqueue_time))
if True:
self.scr.move(2, 0)
self.scr.clrtoeol()
self.scr.addstr("Data lag : %+f" % (self.engine.net.get_time_diff().total_seconds()))
self.scr.move(2, 32)
self.scr.addstr("Data source: %s" % (self.engine.net.get_status_string()))
self.scr.move(3, 0)
self.scr.clrtoeol()
self.scr.addstr("Complete frame count: %d, sync reset count: %d, minor frame discontinuities: %d, minor frame index lock: %s, auto minor frame index: %s" % (
self.engine.deframer.get_complete_frame_count(),
self.engine.deframer.get_sync_reset_count(),
self.engine.frame_tracker.frame_discontinuity_cnt,
self.engine.frame_tracker.ignore_minor_frame_idx,
self.engine.frame_tracker.minor_frame_idx,
))
if self.update_log_message:
self.scr.move(self.max_y-2, 0)
self.scr.clrtoeol()
self.scr.addstr(self.log_message)
self.update_log_message = False
if self.active_layout:
if self.last_active_layout_name != self.active_layout.name:
# Screen should have been cleared when changing layout
self.scr.move(0, self.max_x - len(self.active_layout.name))
self.scr.addstr(self.active_layout.name)
self.last_active_layout_name = self.active_layout.name
self.active_layout.draw(self.layout_y_offset)
self.scr.refresh()
def draw_underlay(self):
shortcuts = "".join(self.element_layout_key_shortcuts.keys())
self.scr.move(1, self.max_x - len(shortcuts))
self.scr.addstr(shortcuts)
def clear(self):
self.scr.erase()
self.last_engine_state = None
self.last_active_layout_name = ""
def switch_layout(self, layout, erase=True):
if self.active_layout:
self.active_layout.deactivate()
if erase:
self.clear()
self.refresh_screen_state()
self.draw_underlay()
self.active_layout = layout
self.active_layout.activate(self.layout_y_offset)
def handle_keys(self):
ch = self.scr.getch()
if ch > -1:
if ch == 27: # ESC (quit)
return False
elif ch >= ord('0') and ch <= ord('9'):
idx = (ch - ord('0') - 1) % 10
if idx < len(self.element_layouts):
self.switch_layout(self.element_layouts[idx])
elif ch >= 0 and ch < 256 and chr(ch) in self.element_layout_key_shortcuts.keys():
self.switch_layout(self.element_layout_key_shortcuts[chr(ch)])
else:
self.scr.move(self.max_y-3, 0)
self.scr.clrtoeol()
self.scr.addstr(str(ch))
return True
def stop(self):
if not self.scr:
return
self.scr.erase()
self.scr.refresh()
curses.nocbreak()
self.scr.keypad(0)
curses.echo()
curses.endwin()
def main():
return 0
if __name__ == '__main__':
main()
|
gpl-3.0
| -1,446,360,939,147,403,300
| 30.460733
| 179
| 0.669107
| false
| 2.851922
| false
| false
| false
|
jwilliamn/handwritten
|
extraction/FormatModel/TestingCornersAlgorithms.py
|
1
|
1349
|
import numpy as np
import cv2
import time
import helloworld
def countNonZero(sum, i_j, x_y = None):
if x_y is None:
i = i_j[0]
j = i_j[1]
if i<0 or j<0:
return 0
return sum[i,j]
else:
i = i_j[0]
j = i_j[1]
x = x_y[0]
y = x_y[1]
T = countNonZero(sum, i_j=x_y)
A = countNonZero(sum, i_j=(i-1,j-1))
P = countNonZero(sum, i_j=(x, j-1))
Q = countNonZero(sum, i_j=(i-1, y))
return T-P-Q+A
def createSum(A):
sum = np.zeros(A.shape)
rows, cols = A.shape
for x in range(rows):
for y in range(cols):
T = countNonZero(sum, i_j=(x-1, y - 1))
P = countNonZero(sum, i_j=(x - 1, y))
Q = countNonZero(sum, i_j=(x, y - 1))
S = P + Q - T
if A[x,y] != 0:
S += 1
sum[x,y] = S
return sum
if __name__ == '__main__':
A = np.zeros((4,3))
A[0, 1] = 1
A[1, 2] = 1
A[3, 2] = 1
A[2, 0] = 1
print(A)
S = createSum(A)
print(S)
start_time = time.time()
A = cv2.imread('/home/vmchura/Documents/handwritten/input/pagina1_1.png', 0)
print("--- %s seconds ---" % (time.time() - start_time))
start_time = time.time()
S = createSum(A)
print("--- %s seconds ---" % (time.time() - start_time))
|
gpl-3.0
| -184,997,578,644,377,950
| 24.961538
| 80
| 0.4596
| false
| 2.624514
| false
| false
| false
|
maximeolivier/pyCAF
|
pycaf/importer/importNetwork/importSwitch/cisco_switch.py
|
1
|
3141
|
#| This file is part of pyCAF. |
#| |
#| pyCAF is free software: you can redistribute it and/or modify |
#| it under the terms of the GNU General Public License as published by |
#| the Free Software Foundation, either version 3 of the License, or |
#| (at your option) any later version. |
#| |
#| pyCAF is distributed in the hope that it will be useful, |
#| but WITHOUT ANY WARRANTY; without even the implied warranty of |
#| MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
#| GNU General Public License for more details. |
#| |
#| You should have received a copy of the GNU General Public License |
#| along with this program. If not, see <http://www.gnu.org/licenses/>. |
# -*- coding: utf-8 -*-
"""
Created on Wed Aug 6 14:08:02 2014
@author: thierry
"""
import os
from pycaf.importer.importNetwork import functions as nf
from pycaf.architecture.devices.switch import Switch
import pycaf.tools as tools
def Import_cisco_switch_file(filename, config):
""" Create a Server object from an extraction script result archive
"""
import time
logger = tools.create_logger(__name__, config)
switch_to_import = Switch()
startTime = time.time()
if not os.path.isfile(filename):
logger.error("Cisco switch import error, file not foud : " + str(filename))
return False
else:
switch_to_import.name = filename.split('/')[-1]
switch_to_import.manufacturer = "Cisco"
# Open the file and store lines in a list
file_switch = open(filename, 'rb')
file_content_lines = file_switch.readlines()
file_switch.seek(0, 0)
file_content_exclamation = file_switch.read().split('!\n')
file_switch.close()
nf.import_cisco_hostname(switch_to_import, file_content_lines, logger)
nf.import_cisco_osversion(switch_to_import, file_content_lines, logger)
nf.import_cisco_vlan(switch_to_import, file_content_exclamation, logger)
nf.import_cisco_interfaces_and_switchport(switch_to_import, file_content_exclamation, logger)
nf.import_cisco_route(switch_to_import, file_content_lines, logger)
nf.import_cisco_catalyst_acl_table(switch_to_import, file_content_lines, logger)
print switch_to_import
print switch_to_import.acl_table
print switch_to_import.vlan
print switch_to_import.interfaces
print switch_to_import.switchport
print switch_to_import.routes
# import_osname(server_to_import, xtract_dir, logger)
endTime = time.time()
logger.info("Cisco switch successfully imported. Time : {0:.2} secs\n".format(endTime - startTime))
return switch_to_import
|
gpl-3.0
| 3,787,811,056,711,934,000
| 41.445946
| 107
| 0.584527
| false
| 4.084525
| false
| false
| false
|
evilncrazy/vake
|
vake.py
|
1
|
2076
|
import sys, os
import subprocess
import re
import select
"""
Runs an instance of make, echoing the stdout and storing the stderr
line by line.
"""
def run_make(args):
p = subprocess.Popen(["make"] + args,
stdout = subprocess.PIPE, stderr = subprocess.PIPE)
stderr = []
while p.poll() == None:
reads = [p.stdout.fileno(), p.stderr.fileno()]
ret = select.select(reads, [], [])
for fd in ret[0]:
if fd == p.stdout.fileno():
read = p.stdout.readline()
sys.stdout.write(read)
if fd == p.stderr.fileno():
read = p.stderr.readline()
sys.stderr.write(read)
stderr.append(read)
return stderr
"""
Parse the output of a Make instance.
"""
def parse_output(stderr):
parsed_output = []
for line in stderr:
# Regex to extract file, line, column number and error message
m = re.search(r"(.*?):([0-9]+):([0-9]+):\s(error|warning):\s(.*)", line)
if m:
parsed_output.append(m.groups())
return parsed_output
"""
Get the nth line of a file.
"""
def get_nth_line(file_name, n):
with open(file_name) as f:
for i, line in enumerate(f):
if i == n - 1:
return line
if __name__ == "__main__":
VAKE_HEADER = '\033[41m'
ENDC = '\033[0m'
parsed_output = parse_output(run_make(sys.argv[1:]))
if len(parsed_output) > 0:
# Give the user a choice of running vake or not
choice = raw_input(VAKE_HEADER + 'vake: ' + str(len(parsed_output)) + ' errors or warnings. Run vake? [Y/n]' + ENDC + ' ')
if (choice == "" or choice.lower() == 'y'):
# Print the instructions
print "<Enter> to edit. 'q' to skip."
for output in parsed_output:
# Print out the error message
file_name, line_no, col_no, errwarn, msg = output
print "{0}:{1}:{2} {3}".format(file_name, line_no, col_no, errwarn) + ':', msg
print ' ', get_nth_line(file_name, int(line_no)),
print ' ', ' ' * (int(col_no) - 1), '^'
cmd = raw_input(":")
subprocess.call(['vim', file_name,
'+call cursor({0}, {1})'.format(line_no, col_no), '+{0}'.format(cmd)])
|
mit
| -6,059,614,288,028,511,000
| 27.067568
| 124
| 0.587669
| false
| 2.982759
| false
| false
| false
|
cloudera/recordservice
|
tests/query_test/test_partitioning.py
|
1
|
4640
|
# Copyright (c) 2012 Cloudera, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import pytest
import shlex
import time
from tests.common.test_result_verifier import *
from subprocess import call
from tests.beeswax.impala_beeswax import ImpalaBeeswaxException
from tests.common.test_vector import *
from tests.common.test_dimensions import ALL_NODES_ONLY
from tests.common.impala_test_suite import *
from tests.common.skip import SkipIfS3, SkipIfIsilon
# Tests to validate HDFS partitioning.
class TestPartitioning(ImpalaTestSuite):
TEST_DBS = ['hdfs_partitioning', 'bool_partitions']
@classmethod
def get_workload(self):
return 'functional-query'
@classmethod
def add_test_dimensions(cls):
super(TestPartitioning, cls).add_test_dimensions()
cls.TestMatrix.add_dimension(create_single_exec_option_dimension())
# There is no reason to run these tests using all dimensions.
cls.TestMatrix.add_constraint(lambda v:\
v.get_value('table_format').file_format == 'text' and\
v.get_value('table_format').compression_codec == 'none')
@classmethod
def setup_class(cls):
super(TestPartitioning, cls).setup_class()
map(cls.cleanup_db, cls.TEST_DBS)
cls.hdfs_client.delete_file_dir("test-warehouse/all_insert_partition_col_types/",\
recursive=True)
@classmethod
def teardown_class(cls):
map(cls.cleanup_db, cls.TEST_DBS)
super(TestPartitioning, cls).teardown_class()
@SkipIfS3.insert
@SkipIfLocal.root_path
@pytest.mark.execute_serially
def test_partition_col_types(self, vector):
self.execute_query("create database hdfs_partitioning");
self.run_test_case('QueryTest/partition-col-types', vector,
use_db='hdfs_partitioning')
# Missing Coverage: Impala deals with boolean partitions created by Hive on a non-hdfs
# filesystem.
@SkipIfS3.hive
@SkipIfIsilon.hive
@pytest.mark.execute_serially
@SkipIfS3.insert
def test_boolean_partitions(self, vector):
# This test takes about a minute to complete due to the Hive commands that are
# executed. To cut down on runtime, limit the test to exhaustive exploration
# strategy.
if self.exploration_strategy() != 'exhaustive': pytest.skip()
db_name = 'bool_partitions'
tbl_name = 'tbl'
self.execute_query("create database " + db_name)
self.execute_query("use " + db_name)
self.execute_query("create table %s (i int) partitioned by (b boolean)" % tbl_name)
# Insert some data using Hive. Due to HIVE-6590, Hive may create multiple
# partitions, mapping to the same boolean literal value.
# For example, Hive may create partitions: /b=FALSE and /b=false, etc
call(["hive", "-e", "INSERT OVERWRITE TABLE %s.%s PARTITION(b=false) SELECT 1 from "\
"functional.alltypes limit 1" % (db_name, tbl_name)])
call(["hive", "-e", "INSERT OVERWRITE TABLE %s.%s PARTITION(b=FALSE) SELECT 2 from "\
"functional.alltypes limit 1" % (db_name, tbl_name)])
call(["hive", "-e", "INSERT OVERWRITE TABLE %s.%s PARTITION(b=true) SELECT 10 from "\
"functional.alltypes limit 1" % (db_name, tbl_name)])
# Update the Impala metadata
self.execute_query("refresh " + tbl_name)
# List the partitions. Show table stats returns 1 row for each partition + 1 summary
# row
result = self.execute_query("show table stats %s" % tbl_name)
assert len(result.data) == 3 + 1
# Verify Impala properly merges the results of the bad Hive metadata.
assert '13' == self.execute_scalar("select sum(i) from %s" % tbl_name);
assert '10' == self.execute_scalar("select sum(i) from %s where b=true" % tbl_name)
assert '3' == self.execute_scalar("select sum(i) from %s where b=false" % tbl_name)
# INSERT into a boolean column is disabled in Impala due to this Hive bug.
try:
self.execute_query("insert into %s partition(bool_col=true) select 1" % tbl_name)
except ImpalaBeeswaxException, e:
assert 'AnalysisException: INSERT into table with BOOLEAN partition column (%s) '\
'is not supported: %s.%s' % ('b', db_name, tbl_name) in str(e)
|
apache-2.0
| 1,965,043,666,579,426,000
| 40.428571
| 89
| 0.708405
| false
| 3.523159
| true
| false
| false
|
hickeroar/simplebayes
|
simplebayes/__init__.py
|
1
|
10281
|
# coding: utf-8
"""
The MIT License (MIT)
Copyright (c) 2015 Ryan Vennell
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from simplebayes.categories import BayesCategories
import pickle
import os
class SimpleBayes(object):
"""A memory-based, optional-persistence naïve bayesian text classifier."""
cache_file = '_simplebayes.pickle'
def __init__(self, tokenizer=None, cache_path='/tmp/'):
"""
:param tokenizer: A tokenizer override
:type tokenizer: function (optional)
:param cache_path: path to data storage
:type cache_path: str
"""
self.categories = BayesCategories()
self.tokenizer = tokenizer or SimpleBayes.tokenize_text
self.cache_path = cache_path
self.probabilities = {}
@classmethod
def tokenize_text(cls, text):
"""
Default tokenize method; can be overridden
:param text: the text we want to tokenize
:type text: str
:return: list of tokenized text
:rtype: list
"""
return [w for w in text.split() if len(w) > 2]
@classmethod
def count_token_occurrences(cls, words):
"""
Creates a key/value set of word/count for a given sample of text
:param words: full list of all tokens, non-unique
:type words: list
:return: key/value pairs of words and their counts in the list
:rtype: dict
"""
counts = {}
for word in words:
if word in counts:
counts[word] += 1
else:
counts[word] = 1
return counts
def flush(self):
"""
Deletes all tokens & categories
"""
self.categories = BayesCategories()
def calculate_category_probability(self):
"""
Caches the individual probabilities for each category
"""
total_tally = 0.0
probs = {}
for category, bayes_category in \
self.categories.get_categories().items():
count = bayes_category.get_tally()
total_tally += count
probs[category] = count
# Calculating the probability
for category, count in probs.items():
if total_tally > 0:
probs[category] = float(count)/float(total_tally)
else:
probs[category] = 0.0
for category, probability in probs.items():
self.probabilities[category] = {
# Probability that any given token is of this category
'prc': probability,
# Probability that any given token is not of this category
'prnc': sum(probs.values()) - probability
}
def train(self, category, text):
"""
Trains a category with a sample of text
:param category: the name of the category we want to train
:type category: str
:param text: the text we want to train the category with
:type text: str
"""
try:
bayes_category = self.categories.get_category(category)
except KeyError:
bayes_category = self.categories.add_category(category)
tokens = self.tokenizer(str(text))
occurrence_counts = self.count_token_occurrences(tokens)
for word, count in occurrence_counts.items():
bayes_category.train_token(word, count)
# Updating our per-category overall probabilities
self.calculate_category_probability()
def untrain(self, category, text):
"""
Untrains a category with a sample of text
:param category: the name of the category we want to train
:type category: str
:param text: the text we want to untrain the category with
:type text: str
"""
try:
bayes_category = self.categories.get_category(category)
except KeyError:
return
tokens = self.tokenizer(str(text))
occurance_counts = self.count_token_occurrences(tokens)
for word, count in occurance_counts.items():
bayes_category.untrain_token(word, count)
# Updating our per-category overall probabilities
self.calculate_category_probability()
def classify(self, text):
"""
Chooses the highest scoring category for a sample of text
:param text: sample text to classify
:type text: str
:return: the "winning" category
:rtype: str
"""
score = self.score(text)
if not score:
return None
return sorted(score.items(), key=lambda v: v[1])[-1][0]
def score(self, text):
"""
Scores a sample of text
:param text: sample text to score
:type text: str
:return: dict of scores per category
:rtype: dict
"""
occurs = self.count_token_occurrences(self.tokenizer(text))
scores = {}
for category in self.categories.get_categories().keys():
scores[category] = 0
categories = self.categories.get_categories().items()
for word, count in occurs.items():
token_scores = {}
# Adding up individual token scores
for category, bayes_category in categories:
token_scores[category] = \
float(bayes_category.get_token_count(word))
# We use this to get token-in-category probabilities
token_tally = sum(token_scores.values())
# If this token isn't found anywhere its probability is 0
if token_tally == 0.0:
continue
# Calculating bayes probabiltity for this token
# http://en.wikipedia.org/wiki/Naive_Bayes_spam_filtering
for category, token_score in token_scores.items():
# Bayes probability * the number of occurances of this token
scores[category] += count * \
self.calculate_bayesian_probability(
category,
token_score,
token_tally
)
# Removing empty categories from the results
final_scores = {}
for category, score in scores.items():
if score > 0:
final_scores[category] = score
return final_scores
def calculate_bayesian_probability(self, cat, token_score, token_tally):
"""
Calculates the bayesian probability for a given token/category
:param cat: The category we're scoring for this token
:type cat: str
:param token_score: The tally of this token for this category
:type token_score: float
:param token_tally: The tally total for this token from all categories
:type token_tally: float
:return: bayesian probability
:rtype: float
"""
# P that any given token IS in this category
prc = self.probabilities[cat]['prc']
# P that any given token is NOT in this category
prnc = self.probabilities[cat]['prnc']
# P that this token is NOT of this category
prtnc = (token_tally - token_score) / token_tally
# P that this token IS of this category
prtc = token_score / token_tally
# Assembling the parts of the bayes equation
numerator = (prtc * prc)
denominator = (numerator + (prtnc * prnc))
# Returning the calculated bayes probability unless the denom. is 0
return numerator / denominator if denominator != 0.0 else 0.0
def tally(self, category):
"""
Gets the tally for a requested category
:param category: The category we want a tally for
:type category: str
:return: tally for a given category
:rtype: int
"""
try:
bayes_category = self.categories.get_category(category)
except KeyError:
return 0
return bayes_category.get_tally()
def get_cache_location(self):
"""
Gets the location of the cache file
:return: the location of the cache file
:rtype: string
"""
filename = self.cache_path if \
self.cache_path[-1:] == '/' else \
self.cache_path + '/'
filename += self.cache_file
return filename
def cache_persist(self):
"""
Saves the current trained data to the cache.
This is initiated by the program using this module
"""
filename = self.get_cache_location()
pickle.dump(self.categories, open(filename, 'wb'))
def cache_train(self):
"""
Loads the data for this classifier from a cache file
:return: whether or not we were successful
:rtype: bool
"""
filename = self.get_cache_location()
if not os.path.exists(filename):
return False
categories = pickle.load(open(filename, 'rb'))
assert isinstance(categories, BayesCategories), \
"Cache data is either corrupt or invalid"
self.categories = categories
# Updating our per-category overall probabilities
self.calculate_category_probability()
return True
|
mit
| -7,677,386,702,332,478,000
| 32.376623
| 78
| 0.603502
| false
| 4.538631
| false
| false
| false
|
prasanna08/oppia
|
core/storage/topic/gae_models.py
|
1
|
25011
|
# coding: utf-8
#
# Copyright 2018 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Models for topics and related constructs."""
from __future__ import absolute_import # pylint: disable=import-only-modules
from __future__ import unicode_literals # pylint: disable=import-only-modules
from constants import constants
from core.platform import models
import feconf
import python_utils
from google.appengine.ext import ndb
(base_models, user_models,) = models.Registry.import_models([
models.NAMES.base_model, models.NAMES.user])
class TopicSnapshotMetadataModel(base_models.BaseSnapshotMetadataModel):
"""Storage model for the metadata for a topic snapshot."""
pass
class TopicSnapshotContentModel(base_models.BaseSnapshotContentModel):
"""Storage model for the content of a topic snapshot."""
pass
class TopicModel(base_models.VersionedModel):
"""Model for storing Topics.
This class should only be imported by the topic services file
and the topic model test file.
"""
SNAPSHOT_METADATA_CLASS = TopicSnapshotMetadataModel
SNAPSHOT_CONTENT_CLASS = TopicSnapshotContentModel
ALLOW_REVERT = False
# The name of the topic.
name = ndb.StringProperty(required=True, indexed=True)
# The canonical name of the topic, created by making `name` lowercase.
canonical_name = ndb.StringProperty(required=True, indexed=True)
# The abbreviated name of the topic.
abbreviated_name = ndb.StringProperty(indexed=True, default='')
# The thumbnail filename of the topic.
thumbnail_filename = ndb.StringProperty(indexed=True)
# The thumbnail background color of the topic.
thumbnail_bg_color = ndb.StringProperty(indexed=True)
# The description of the topic.
description = ndb.TextProperty(indexed=False)
# This consists of the list of objects referencing canonical stories that
# are part of this topic.
canonical_story_references = ndb.JsonProperty(repeated=True, indexed=False)
# This consists of the list of objects referencing additional stories that
# are part of this topic.
additional_story_references = ndb.JsonProperty(repeated=True, indexed=False)
# The schema version for the story reference object on each of the above 2
# lists.
story_reference_schema_version = ndb.IntegerProperty(
required=True, indexed=True)
# This consists of the list of uncategorized skill ids that are not part of
# any subtopic.
uncategorized_skill_ids = ndb.StringProperty(repeated=True, indexed=True)
# The list of subtopics that are part of the topic.
subtopics = ndb.JsonProperty(repeated=True, indexed=False)
# The schema version of the subtopic dict.
subtopic_schema_version = ndb.IntegerProperty(required=True, indexed=True)
# The id for the next subtopic.
next_subtopic_id = ndb.IntegerProperty(required=True)
# The ISO 639-1 code for the language this topic is written in.
language_code = ndb.StringProperty(required=True, indexed=True)
# The url fragment of the topic.
url_fragment = ndb.StringProperty(required=True, indexed=True)
# Whether to show practice tab in the Topic viewer page.
practice_tab_is_displayed = ndb.BooleanProperty(
required=True, default=False)
# The content of the meta tag in the Topic viewer page.
meta_tag_content = ndb.StringProperty(indexed=True)
@staticmethod
def get_deletion_policy():
"""Topic should be kept if it is published."""
return base_models.DELETION_POLICY.KEEP_IF_PUBLIC
@classmethod
def has_reference_to_user_id(cls, unused_user_id):
"""Check whether TopicModel snapshots references the given user.
Args:
unused_user_id: str. The ID of the user whose data should be
checked.
Returns:
bool. Whether any models refer to the given user ID.
"""
return False
def _trusted_commit(
self, committer_id, commit_type, commit_message, commit_cmds):
"""Record the event to the commit log after the model commit.
Note that this extends the superclass method.
Args:
committer_id: str. The user_id of the user who committed the
change.
commit_type: str. The type of commit. Possible values are in
core.storage.base_models.COMMIT_TYPE_CHOICES.
commit_message: str. The commit description message.
commit_cmds: list(dict). A list of commands, describing changes
made in this model, which should give sufficient information to
reconstruct the commit. Each dict always contains:
cmd: str. Unique command.
and then additional arguments for that command.
"""
super(TopicModel, self)._trusted_commit(
committer_id, commit_type, commit_message, commit_cmds)
topic_rights = TopicRightsModel.get_by_id(self.id)
if topic_rights.topic_is_published:
status = constants.ACTIVITY_STATUS_PUBLIC
else:
status = constants.ACTIVITY_STATUS_PRIVATE
topic_commit_log_entry = TopicCommitLogEntryModel.create(
self.id, self.version, committer_id, commit_type,
commit_message, commit_cmds, status, False
)
topic_commit_log_entry.topic_id = self.id
topic_commit_log_entry.put()
@classmethod
def get_by_name(cls, topic_name):
"""Gets TopicModel by topic_name. Returns None if the topic with
name topic_name doesn't exist.
Args:
topic_name: str. The name of the topic.
Returns:
TopicModel|None. The topic model of the topic or None if not
found.
"""
return TopicModel.query().filter(
cls.canonical_name == topic_name.lower()).filter(
cls.deleted == False).get() # pylint: disable=singleton-comparison
@classmethod
def get_by_url_fragment(cls, url_fragment):
"""Gets TopicModel by url_fragment. Returns None if the topic with
name url_fragment doesn't exist.
Args:
url_fragment: str. The url fragment of the topic.
Returns:
TopicModel|None. The topic model of the topic or None if not
found.
"""
# TODO(#10210): Make fetching by URL fragment faster.
return TopicModel.query().filter(
cls.url_fragment == url_fragment).filter(
cls.deleted == False).get() # pylint: disable=singleton-comparison
@classmethod
def get_export_policy(cls):
"""Model does not contain user data."""
return dict(super(cls, cls).get_export_policy(), **{
'name': base_models.EXPORT_POLICY.NOT_APPLICABLE,
'canonical_name': base_models.EXPORT_POLICY.NOT_APPLICABLE,
'abbreviated_name': base_models.EXPORT_POLICY.NOT_APPLICABLE,
'thumbnail_filename': base_models.EXPORT_POLICY.NOT_APPLICABLE,
'thumbnail_bg_color': base_models.EXPORT_POLICY.NOT_APPLICABLE,
'description': base_models.EXPORT_POLICY.NOT_APPLICABLE,
'canonical_story_references':
base_models.EXPORT_POLICY.NOT_APPLICABLE,
'additional_story_references':
base_models.EXPORT_POLICY.NOT_APPLICABLE,
'story_reference_schema_version':
base_models.EXPORT_POLICY.NOT_APPLICABLE,
'uncategorized_skill_ids': base_models.EXPORT_POLICY.NOT_APPLICABLE,
'subtopics': base_models.EXPORT_POLICY.NOT_APPLICABLE,
'subtopic_schema_version': base_models.EXPORT_POLICY.NOT_APPLICABLE,
'next_subtopic_id': base_models.EXPORT_POLICY.NOT_APPLICABLE,
'language_code': base_models.EXPORT_POLICY.NOT_APPLICABLE,
'meta_tag_content': base_models.EXPORT_POLICY.NOT_APPLICABLE,
'practice_tab_is_displayed':
base_models.EXPORT_POLICY.NOT_APPLICABLE,
'url_fragment': base_models.EXPORT_POLICY.NOT_APPLICABLE,
})
class TopicCommitLogEntryModel(base_models.BaseCommitLogEntryModel):
"""Log of commits to topics.
A new instance of this model is created and saved every time a commit to
TopicModel occurs.
The id for this model is of the form 'topic-[topic_id]-[version]'.
"""
# The id of the topic being edited.
topic_id = ndb.StringProperty(indexed=True, required=True)
@staticmethod
def get_deletion_policy():
"""Topic commit log is deleted only if the correspondingm topic is not
public.
"""
return base_models.DELETION_POLICY.KEEP_IF_PUBLIC
@classmethod
def _get_instance_id(cls, topic_id, version):
"""This function returns the generated id for the get_commit function
in the parent class.
Args:
topic_id: str. The id of the topic being edited.
version: int. The version number of the topic after the commit.
Returns:
str. The commit id with the topic id and version number.
"""
return 'topic-%s-%s' % (topic_id, version)
@classmethod
def get_export_policy(cls):
"""This model is only stored for archive purposes. The commit log of
entities is not related to personal user data.
"""
return dict(super(cls, cls).get_export_policy(), **{
'topic_id': base_models.EXPORT_POLICY.NOT_APPLICABLE
})
class TopicSummaryModel(base_models.BaseModel):
"""Summary model for an Oppia Topic.
This should be used whenever the content blob of the topic is not
needed (e.g. search results, etc).
A TopicSummaryModel instance stores the following information:
id, description, language_code, last_updated, created_on, version,
url_fragment.
The key of each instance is the topic id.
"""
# The name of the topic.
name = ndb.StringProperty(required=True, indexed=True)
# The canonical name of the topic, created by making `name` lowercase.
canonical_name = ndb.StringProperty(required=True, indexed=True)
# The ISO 639-1 code for the language this topic is written in.
language_code = ndb.StringProperty(required=True, indexed=True)
# The description of the topic.
description = ndb.TextProperty(indexed=False)
# The url fragment of the topic.
url_fragment = ndb.StringProperty(required=True, indexed=True)
# Time when the topic model was last updated (not to be
# confused with last_updated, which is the time when the
# topic *summary* model was last updated).
topic_model_last_updated = ndb.DateTimeProperty(required=True, indexed=True)
# Time when the topic model was created (not to be confused
# with created_on, which is the time when the topic *summary*
# model was created).
topic_model_created_on = ndb.DateTimeProperty(required=True, indexed=True)
# The number of canonical stories that are part of this topic.
canonical_story_count = ndb.IntegerProperty(required=True, indexed=True)
# The number of additional stories that are part of this topic.
additional_story_count = ndb.IntegerProperty(required=True, indexed=True)
# The total number of skills in the topic (including those that are
# uncategorized).
total_skill_count = ndb.IntegerProperty(required=True, indexed=True)
# The number of skills that are not part of any subtopic.
uncategorized_skill_count = ndb.IntegerProperty(required=True, indexed=True)
# The number of subtopics of the topic.
subtopic_count = ndb.IntegerProperty(required=True, indexed=True)
# The thumbnail filename of the topic.
thumbnail_filename = ndb.StringProperty(indexed=True)
# The thumbnail background color of the topic.
thumbnail_bg_color = ndb.StringProperty(indexed=True)
version = ndb.IntegerProperty(required=True)
@staticmethod
def get_deletion_policy():
"""Topic summary should be kept if associated topic is published."""
return base_models.DELETION_POLICY.KEEP_IF_PUBLIC
@classmethod
def has_reference_to_user_id(cls, unused_user_id):
"""Check whether TopicSummaryModel references the given user.
Args:
unused_user_id: str. The (unused) ID of the user whose data should
be checked.
Returns:
bool. Whether any models refer to the given user ID.
"""
return False
@classmethod
def get_export_policy(cls):
"""Model does not contain user data."""
return dict(super(cls, cls).get_export_policy(), **{
'name': base_models.EXPORT_POLICY.NOT_APPLICABLE,
'canonical_name': base_models.EXPORT_POLICY.NOT_APPLICABLE,
'language_code': base_models.EXPORT_POLICY.NOT_APPLICABLE,
'description': base_models.EXPORT_POLICY.NOT_APPLICABLE,
'topic_model_last_updated':
base_models.EXPORT_POLICY.NOT_APPLICABLE,
'topic_model_created_on': base_models.EXPORT_POLICY.NOT_APPLICABLE,
'canonical_story_count': base_models.EXPORT_POLICY.NOT_APPLICABLE,
'additional_story_count': base_models.EXPORT_POLICY.NOT_APPLICABLE,
'total_skill_count': base_models.EXPORT_POLICY.NOT_APPLICABLE,
'uncategorized_skill_count':
base_models.EXPORT_POLICY.NOT_APPLICABLE,
'subtopic_count': base_models.EXPORT_POLICY.NOT_APPLICABLE,
'thumbnail_filename': base_models.EXPORT_POLICY.NOT_APPLICABLE,
'thumbnail_bg_color': base_models.EXPORT_POLICY.NOT_APPLICABLE,
'version': base_models.EXPORT_POLICY.NOT_APPLICABLE,
'url_fragment': base_models.EXPORT_POLICY.NOT_APPLICABLE
})
class SubtopicPageSnapshotMetadataModel(base_models.BaseSnapshotMetadataModel):
"""Storage model for the metadata for a subtopic page snapshot."""
pass
class SubtopicPageSnapshotContentModel(base_models.BaseSnapshotContentModel):
"""Storage model for the content of a subtopic page snapshot."""
pass
class SubtopicPageModel(base_models.VersionedModel):
"""Model for storing Subtopic pages.
This stores the HTML data for a subtopic page.
"""
SNAPSHOT_METADATA_CLASS = SubtopicPageSnapshotMetadataModel
SNAPSHOT_CONTENT_CLASS = SubtopicPageSnapshotContentModel
ALLOW_REVERT = False
# The topic id that this subtopic is a part of.
topic_id = ndb.StringProperty(required=True, indexed=True)
# The json data of the subtopic consisting of subtitled_html,
# recorded_voiceovers and written_translations fields.
page_contents = ndb.JsonProperty(required=True)
# The schema version for the page_contents field.
page_contents_schema_version = ndb.IntegerProperty(
required=True, indexed=True)
# The ISO 639-1 code for the language this subtopic page is written in.
language_code = ndb.StringProperty(required=True, indexed=True)
@staticmethod
def get_deletion_policy():
"""Subtopic should be kept if associated topic is published."""
return base_models.DELETION_POLICY.KEEP_IF_PUBLIC
@classmethod
def has_reference_to_user_id(cls, unused_user_id):
"""Check whether SubtopicPageModel snapshots references the given user.
Args:
unused_user_id: str. The ID of the user whose data should be
checked.
Returns:
bool. Whether any models refer to the given user ID.
"""
return False
def _trusted_commit(
self, committer_id, commit_type, commit_message, commit_cmds):
"""Record the event to the commit log after the model commit.
Note that this extends the superclass method.
Args:
committer_id: str. The user_id of the user who committed the
change.
commit_type: str. The type of commit. Possible values are in
core.storage.base_models.COMMIT_TYPE_CHOICES.
commit_message: str. The commit description message.
commit_cmds: list(dict). A list of commands, describing changes
made in this model, which should give sufficient information to
reconstruct the commit. Each dict always contains:
cmd: str. Unique command.
and then additional arguments for that command.
"""
super(SubtopicPageModel, self)._trusted_commit(
committer_id, commit_type, commit_message, commit_cmds)
subtopic_page_commit_log_entry = SubtopicPageCommitLogEntryModel.create(
self.id, self.version, committer_id, commit_type, commit_message,
commit_cmds, constants.ACTIVITY_STATUS_PUBLIC, False
)
subtopic_page_commit_log_entry.subtopic_page_id = self.id
subtopic_page_commit_log_entry.put()
@classmethod
def get_export_policy(cls):
"""Model does not contain user data."""
return dict(super(cls, cls).get_export_policy(), **{
'topic_id': base_models.EXPORT_POLICY.NOT_APPLICABLE,
'page_contents': base_models.EXPORT_POLICY.NOT_APPLICABLE,
'page_contents_schema_version':
base_models.EXPORT_POLICY.NOT_APPLICABLE,
'language_code': base_models.EXPORT_POLICY.NOT_APPLICABLE
})
class SubtopicPageCommitLogEntryModel(base_models.BaseCommitLogEntryModel):
"""Log of commits to subtopic pages.
A new instance of this model is created and saved every time a commit to
SubtopicPageModel occurs.
The id for this model is of the form
'subtopicpage-[subtopic_page_id]-[version]'.
"""
# The id of the subtopic page being edited.
subtopic_page_id = ndb.StringProperty(indexed=True, required=True)
@staticmethod
def get_deletion_policy():
"""Subtopic page commit log is deleted only if the corresponding
topic is not public.
"""
return base_models.DELETION_POLICY.KEEP_IF_PUBLIC
@classmethod
def _get_instance_id(cls, subtopic_page_id, version):
"""This function returns the generated id for the get_commit function
in the parent class.
Args:
subtopic_page_id: str. The id of the subtopic page being edited.
version: int. The version number of the subtopic page after the
commit.
Returns:
str. The commit id with the subtopic page id and version number.
"""
return 'subtopicpage-%s-%s' % (subtopic_page_id, version)
@classmethod
def get_export_policy(cls):
"""This model is only stored for archive purposes. The commit log of
entities is not related to personal user data.
"""
return dict(super(cls, cls).get_export_policy(), **{
'subtopic_page_id': base_models.EXPORT_POLICY.NOT_APPLICABLE
})
class TopicRightsSnapshotMetadataModel(base_models.BaseSnapshotMetadataModel):
"""Storage model for the metadata for a topic rights snapshot."""
pass
class TopicRightsSnapshotContentModel(base_models.BaseSnapshotContentModel):
"""Storage model for the content of a topic rights snapshot."""
pass
class TopicRightsModel(base_models.VersionedModel):
"""Storage model for rights related to a topic.
The id of each instance is the id of the corresponding topic.
"""
SNAPSHOT_METADATA_CLASS = TopicRightsSnapshotMetadataModel
SNAPSHOT_CONTENT_CLASS = TopicRightsSnapshotContentModel
ALLOW_REVERT = False
# The user_ids of the managers of this topic.
manager_ids = ndb.StringProperty(indexed=True, repeated=True)
# Whether this topic is published.
topic_is_published = ndb.BooleanProperty(
indexed=True, required=True, default=False)
@staticmethod
def get_deletion_policy():
"""Topic rights should be kept if associated topic is published."""
return base_models.DELETION_POLICY.KEEP_IF_PUBLIC
@classmethod
def has_reference_to_user_id(cls, user_id):
"""Check whether TopicRightsModel references user.
Args:
user_id: str. The ID of the user whose data should be checked.
Returns:
bool. Whether any models refer to the given user ID.
"""
return cls.query(
cls.manager_ids == user_id
).get(keys_only=True) is not None
@classmethod
def get_by_user(cls, user_id):
"""Retrieves the rights object for all topics assigned to given user
Args:
user_id: str. ID of user.
Returns:
list(TopicRightsModel). The list of TopicRightsModel objects in
which the given user is a manager.
"""
topic_rights_models = cls.query(
cls.manager_ids == user_id
)
return topic_rights_models
def _trusted_commit(
self, committer_id, commit_type, commit_message, commit_cmds):
"""Record the event to the commit log after the model commit.
Note that this extends the superclass method.
Args:
committer_id: str. The user_id of the user who committed the
change.
commit_type: str. The type of commit. Possible values are in
core.storage.base_models.COMMIT_TYPE_CHOICES.
commit_message: str. The commit description message.
commit_cmds: list(dict). A list of commands, describing changes
made in this model, which should give sufficient information to
reconstruct the commit. Each dict always contains:
cmd: str. Unique command.
and then additional arguments for that command.
"""
super(TopicRightsModel, self)._trusted_commit(
committer_id, commit_type, commit_message, commit_cmds)
topic_rights = TopicRightsModel.get_by_id(self.id)
if topic_rights.topic_is_published:
status = constants.ACTIVITY_STATUS_PUBLIC
else:
status = constants.ACTIVITY_STATUS_PRIVATE
TopicCommitLogEntryModel(
id=('rights-%s-%s' % (self.id, self.version)),
user_id=committer_id,
topic_id=self.id,
commit_type=commit_type,
commit_message=commit_message,
commit_cmds=commit_cmds,
version=None,
post_commit_status=status,
post_commit_community_owned=False,
post_commit_is_private=not topic_rights.topic_is_published
).put()
snapshot_metadata_model = self.SNAPSHOT_METADATA_CLASS.get(
self.get_snapshot_id(self.id, self.version))
snapshot_metadata_model.content_user_ids = list(sorted(set(
self.manager_ids)))
commit_cmds_user_ids = set()
for commit_cmd in commit_cmds:
user_id_attribute_names = python_utils.NEXT(
cmd['user_id_attribute_names']
for cmd in feconf.TOPIC_RIGHTS_CHANGE_ALLOWED_COMMANDS
if cmd['name'] == commit_cmd['cmd']
)
for user_id_attribute_name in user_id_attribute_names:
commit_cmds_user_ids.add(commit_cmd[user_id_attribute_name])
snapshot_metadata_model.commit_cmds_user_ids = list(
sorted(commit_cmds_user_ids))
snapshot_metadata_model.put()
@classmethod
def get_export_policy(cls):
"""Model contains user data."""
return dict(super(cls, cls).get_export_policy(), **{
'manager_ids': base_models.EXPORT_POLICY.EXPORTED,
'topic_is_published': base_models.EXPORT_POLICY.NOT_APPLICABLE
})
@classmethod
def export_data(cls, user_id):
"""(Takeout) Export user-relevant properties of TopicRightsModel.
Args:
user_id: str. The user_id denotes which user's data to extract.
Returns:
dict. The user-relevant properties of TopicRightsModel in a dict
format. In this case, we are returning all the ids of the topics
this user manages.
"""
managed_topics = cls.get_all().filter(cls.manager_ids == user_id)
managed_topic_ids = [right.id for right in managed_topics]
return {
'managed_topic_ids': managed_topic_ids
}
|
apache-2.0
| -248,851,925,992,345,250
| 38.7
| 82
| 0.65963
| false
| 4.1685
| false
| false
| false
|
CentralLabFacilities/m3meka
|
python/m3/omnibase.py
|
1
|
31978
|
# -*- coding: utf-8 -*-
#M3 -- Meka Robotics Robot Components
#Copyright (c) 2010 Meka Robotics
#Author: edsinger@mekabot.com (Aaron Edsinger)
#M3 is free software: you can redistribute it and/or modify
#it under the terms of the GNU Lesser General Public License as published by
#the Free Software Foundation, either version 3 of the License, or
#(at your option) any later version.
#M3 is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU Lesser General Public License for more details.
#You should have received a copy of the GNU Lesser General Public License
#along with M3. If not, see <http://www.gnu.org/licenses/>.
from m3.vehicle import M3Vehicle
import m3.omnibase_pb2 as mob
from m3.component import M3Component
import yaml
import numpy as nu
import m3.toolbox as m3t
import time
from m3.unit_conversion import *
import math
class M3OmniBase(M3Vehicle):
"""
The M3OmniBase class has been designed as the principal interface for controlling an M3 omnidirectional mobile base.
It must be defined in the m3_config.yml file and can be created using the m3.component_factory module.
The example belows creates an interface for an M3OmniBase defined as M3OmniBase_mb0.
>>> import m3.omnibase as m3o
>>> omni = m3o.M3OmniBase('M3OmniBase_mb0') # creates M3OmniBase class
The M3OmniBase class can be used to send commands and retrieve status updates to and from the m3 realtime server. The
example below configures the realtime proxy to update the M3OmniBase class with updates from the robot and to recieve commands.
It also sets the omnibase controller to goal position control, subscribes to the power board to enable motor power,
and runs the wheel calibration routine if needed.
>>> import m3.rt_proxy as m3p
>>> proxy = m3p.M3RtProxy()
>>> proxy.start() # m3rt server must be running first
>>> proxy.make_operational_all()
>>> proxy.subscribe_status(omni)
>>> proxy.publish_command(omni)
>>> proxy.publish_param(omni)
>>> pwr_name=proxy.get_available_components('m3pwr')
>>> if len(pwr_name)>1:
pwr_name=m3t.user_select_components_interactive(pwr_name,single=True)
>>> pwr=m3f.create_component(pwr_name[0])
>>> proxy.subscribe_status(pwr)
>>> proxy.publish_command(pwr)
>>> pwr.set_motor_power_on()
>>> proxy.step()
>>> time.sleep(0.5)
>>> omni.calibrate(proxy)
>>> time.sleep(0.5)
>>> omni.set_local_position(0,0,0,proxy)
>>> omni.set_global_position(0,0,0,proxy)
>>> omni.set_max_linear_accel(0.3)
>>> omni.set_max_linear_velocity(0.3)
>>> omni.set_max_rotation_velocity(30)
>>> omni.set_max_rotation_accel(30)
>>> proxy.step()
>>> omni.set_mode_traj_goal()
>>> omni.set_traj_goal(0, 0, 0)
>>> proxy.step()
Now the M3OmniBase class can be used to issue global position commands and report our position:
>>> omni.set_traj_goal(2.0, 0, 180)
>>> proxy.step()
>>> print 'Position (x,y,yaw):', omni.get_global_position()
"""
def __init__(self,name):
M3Vehicle.__init__(self,name,type='m3omnibase')
self.status=mob.M3OmnibaseStatus()
self.command=mob.M3OmnibaseCommand()
self.param=mob.M3OmnibaseParam()
self.num_casters = 4
for i in range(3):
self.command.opspace_force_desired.append(0)
self.command.local_position_desired.append(0)
self.command.local_velocity_desired.append(0)
self.command.local_acceleration_desired.append(0)
self.command.global_position_desired.append(0)
self.command.global_velocity_desired.append(0)
self.command.global_acceleration_desired.append(0)
self.command.traj_goal.append(0)
self.command.local_position.append(0)
self.command.global_position.append(0)
for i in range(self.num_casters):
self.command.roll_torque_desired.append(0)
self.command.steer_torque_desired.append(0)
self.command.roll_velocity_desired.append(0)
self.command.steer_velocity_desired.append(0)
self.command.steer_theta_desired.append(0)
self.command.caster_mode.append(mob.OMNIBASE_CASTER_OFF)
self.param.enable_breakbeam.append(0)
self.vias=[]
self.via_idx=0
self.read_config()
def calibrate(self,proxy):
"""
Calibrates Omnibase casters if necessary.
:param proxy: running proxy
:type proxy: M3RtProxy
"""
need_to_calibrate = False
for i in range(self.num_casters):
if (not self.is_calibrated(i)):
need_to_calibrate = True
if need_to_calibrate:
print '------------------------------------------------'
print 'All casters not calibrated. Do calibration [y]?'
if m3t.get_yes_no('y'):
print 'Note: Orientations are facing robot'
print "Turn power on to robot and press any key."
raw_input()
self.set_mode_caster()
proxy.step()
time.sleep(4)
caster_names=['FrontRight','RearRight','RearLeft','FrontLeft']
wiggle = [1,2,1,2]
last_calib = -1
repeat_calib = 0
while need_to_calibrate:
for i in [1,2,3,0]:
if (not self.is_calibrated(i)):
print '-------------------------------------------'
print 'Calibrating caster: ', caster_names[i], '..'
#print 'Manual assist required in CCW direction'
if i == last_calib:
repeat_calib += 1
if repeat_calib == 0:
wiggle = [1,2,1,2]
self.home(i,proxy, wiggle[i])
elif repeat_calib == 1:
wiggle = [3,0,3,0]
self.home(i,proxy, wiggle[i])
elif repeat_calib == 2:
wiggle = [2,3,0,1]
self.home(i,proxy, wiggle[i])
elif repeat_calib >= 3:
raise m3t.M3Exception('Error calibrating. Please reposition base and try again.')
last_calib = i
need_to_calibrate = False
for i in range(self.num_casters):
if (not self.is_calibrated(i)):
need_to_calibrate = True
self.set_mode_caster_off(range(self.num_casters))
self.set_mode_off()
else:
print "Skipping Calibration.."
def home(self, idx, proxy, idx_wiggle):
time_out = 20.0
caster_names=['FrontRight','RearRight','RearLeft','FrontLeft']
self.set_mode_caster_off(range(4))
#self.set_mode_caster_theta(idx)
#self.set_mode_caster_theta(idx_wiggle)
self.set_roll_torques(0.0, idx)
self.enable_breakbeam(idx)
#start_theta = self.get_steer_theta()[idx]
#print 'Start theta:', idx, start_theta
#start_theta_wiggle = self.get_steer_theta()[idx_wiggle]
#theta = 0
#theta_cnt = 0\
self.set_mode_caster_torque(idx)
ts = time.time()
proxy.step()
while (not self.is_calibrated(idx)):
#theta_des = start_theta + theta
#self.set_steer_theta(theta_des, idx )
#theta_wig = start_theta_wiggle + 30.0 * math.cos(deg2rad(4.0 * theta_cnt))
#torque_roll = 2.0 * math.cos(deg2rad(6.0 * theta_cnt))
#self.set_steer_theta(theta_wig, idx_wiggle )
#self.set_roll_torques(torque_roll, idx)
proxy.step()
#str_tqs = self.get_steer_torques()
#rol_tqs = self.get_roll_torques()
#print 'Steer Joint Tq at idx', idx, ':', str_tqs[idx]
#print 'Roll Joint Tq at idx', idx, ':', rol_tqs[idx]
#print 'Steer Tq at idx', idx_wiggle, ':', str_tqs[idx_wiggle]
#print '.'
self.set_steer_torques(10.0, idx)
#theta_step = 2.0
#theta_cnt += theta_step
#theta_err = theta_des - self.get_steer_theta()[idx]
#print 'theta err:', theta_err
#if theta_err < 40.0:
# theta += theta_step
if time.time() - ts > time_out:
self.disable_breakbeam(idx)
self.set_mode_caster_off(idx)
#self.set_mode_caster_off(idx_wiggle)
self.set_roll_torques(0.0, idx)
self.set_steer_torques(0.0, idx)
proxy.step()
return
time.sleep(0.1)
self.set_steer_torques(0.0, idx)
self.set_roll_torques(0.0, idx)
self.set_mode_caster_off(idx)
#self.set_mode_caster_off(idx_wiggle)
self.disable_breakbeam(idx)
proxy.step()
print "Caster: ", caster_names[idx], " Calibrated."
def enable_breakbeam(self,idx):
self.param.enable_breakbeam[idx] = 1
def disable_breakbeam(self,idx):
self.param.enable_breakbeam[idx] = 0
def is_calibrated(self,idx):
return self.status.calibrated[idx]
def set_ctrl_mode(self, mode):
self.command.ctrl_mode=mode
def set_traj_mode(self, mode):
self.command.traj_mode=mode
def set_mode_off(self):
"""
Sets all caster controller modes to off.
"""
self.command.ctrl_mode=mob.OMNIBASE_CTRL_OFF
def set_mode_cart_local(self):
self.command.ctrl_mode=mob.OMNIBASE_CTRL_CART_LOCAL
def set_mode_caster_velocity(self, caster_idx):
self.command.ctrl_mode=mob.OMNIBASE_CTRL_CASTER
M3Component.set_int_array(self,self.command.caster_mode,mob.OMNIBASE_CASTER_VELOCITY,caster_idx)
def set_mode_caster_theta(self, caster_idx):
self.command.ctrl_mode=mob.OMNIBASE_CTRL_CASTER
M3Component.set_int_array(self,self.command.caster_mode,mob.OMNIBASE_CASTER_THETA,caster_idx)
def set_mode_caster_torque(self,caster_idx):
"""
Allows specified caster to be controlled with torque commands and places omnibase in 'caster_mode'.
:param caster_idx: Index of caster.
:type caster_idx: array_like, shape < ncasters, optional
:See Also:
:meth:`M3OmniBase.set_mode_caster_off`
:meth:`M3OmniBase.set_mode_caster`
"""
self.command.ctrl_mode=mob.OMNIBASE_CTRL_CASTER
M3Component.set_int_array(self,self.command.caster_mode,mob.OMNIBASE_CASTER_TORQUE,caster_idx)
def set_mode_caster_off(self,caster_idx):
"""
Turns off controller for specified caster and places omnibase in 'caster_mode'.
:param caster_idx: Index of caster.
:type caster_idx: array_like, shape < ncasters, optional
:See Also:
:meth:`M3OmniBase.set_mode_caster_torque`
:meth:`M3OmniBase.set_mode_caster`
"""
self.command.ctrl_mode=mob.OMNIBASE_CTRL_CASTER
M3Component.set_int_array(self,self.command.caster_mode,mob.OMNIBASE_CASTER_OFF,caster_idx)
def set_mode_caster(self,mode,caster_idx=None):
M3Component.set_int_array(self,self.command.caster_mode,mode, caster_idx)
def set_mode_traj_goal(self):
"""
Allows omnibase to be controlled by issuing a goal position in global cartesian space.
:See Also:
:meth:`M3OmniBase.is_traj_goal_reached`
:meth:`M3OmniBase.set_traj_goal`
:meth:`M3OmniBase.set_mode_off`
:meth:`M3OmniBase.set_mode_caster`
"""
self.command.traj_mode = mob.OMNIBASE_TRAJ_GOAL
self.command.ctrl_mode = mob.OMNIBASE_CTRL_OPSPACE_TRAJ
def set_mode_traj_via(self):
self.command.traj_mode = mob.OMNIBASE_TRAJ_VIAS
self.command.ctrl_mode = mob.OMNIBASE_CTRL_OPSPACE_TRAJ
def set_mode_joystick(self):
"""
Allows omnibase to be controlled by joystick commands.
:See Also:
:meth:`M3OmniBase.set_joystick_x`
:meth:`M3OmniBase.set_joystick_y`
:meth:`M3OmniBase.set_joystick_yaw`
:meth:`M3OmniBase.set_joystick_button`
"""
self.command.traj_mode = mob.OMNIBASE_TRAJ_JOYSTICK
self.command.ctrl_mode = mob.OMNIBASE_CTRL_OPSPACE_TRAJ
def set_mode_caster(self):
"""
Allows omnibase to be controlled at the caster level as opposed cartestian space.
Additional commands must be issued to set the control mode for each individual caster.
:See Also:
:meth:`M3OmniBase.set_mode_caster_torque`
:meth:`M3OmniBase.set_mode_caster_off`
"""
self.command.ctrl_mode=mob.OMNIBASE_CTRL_CASTER
def set_mode_op_space_force(self):
self.command.ctrl_mode=mob.OMNIBASE_CTRL_OPSPACE_FORCE
def set_traj_mode_off(self):
self.command.traj_mode=mob.OMNIBASE_TRAJ_OFF
def set_local_velocities(self,x_dot,y_dot,heading_dot):
self.command.local_velocity_desired[0] = x_dot
self.command.local_velocity_desired[1] = y_dot
self.command.local_velocity_desired[2] = heading_dot
def set_local_positions(self,x,y,heading):
self.command.local_position_desired[0] = x
self.command.local_position_desired[1] = y
self.command.local_position_desired[2] = heading
def set_local_accelerations(self,x_dotdot,y_dotdot,heading_dotdot):
self.command.local_acceleration_desired[0] = x_dotdot
self.command.local_acceleration_desired[1] = y_dotdot
self.command.local_acceleration_desired[2] = heading_dotdot
def set_roll_torques(self, tq, ind=None):
"""
Sets roll torque values for selected casters. A list of caster indexes can be supplied
to set specific caster torques, or the index
can be omitted if the length of tq is equal to the number of degrees of casters.
:param tq: Roll torque values in Nm.
:type tq: array_like
:param ind: Index of casters.
:type ind: array_like, shape(len(tq)), optional
:See Also:
:meth:`M3OmniBase.set_mode_caster_torque`
:meth:`M3OmniBase.set_steer_torques`
"""
M3Component.set_float_array(self,self.command.roll_torque_desired,tq,ind)
def set_steer_torques(self, tq, ind=None):
"""
Sets steer torque values for selected casters. A list of caster indexes can be supplied
to set specific caster torques, or the index
can be omitted if the length of tq is equal to the number of degrees of casters.
:param tq: Steer torque values in Nm.
:type tq: array_like
:param ind: Index of casters.
:type ind: array_like, shape(len(tq)), optional
:See Also:
:meth:`M3OmniBase.set_mode_caster_torque`
:meth:`M3OmniBase.set_roll_torques`
"""
M3Component.set_float_array(self,self.command.steer_torque_desired,tq,ind)
def set_steer_theta(self, th, ind=None):
M3Component.set_float_array(self,self.command.steer_theta_desired,th,ind)
def set_steer_velocities(self, v, ind=None):
M3Component.set_float_array(self,self.command.steer_velocity_desired,v,ind)
def set_roll_velocities(self, v, ind=None):
M3Component.set_float_array(self,self.command.roll_velocity_desired,v,ind)
def set_max_linear_accel(self, x):
"""
Sets maximum linear acceleration of omnibase in m/s^2
:param x: Max linear acceleration in m/s^2
:type x: float
.. Note:: Omnibase acceleration is still upper limited by absolute values
defined by parameters in configuration file.
:See Also:
:meth:`M3OmniBase.set_max_linear_velocity`
:meth:`M3OmniBase.set_max_rotation_accel`
:meth:`M3OmniBase.set_max_rotation_velocity`
"""
self.command.max_linear_acceleration = x
def set_max_linear_velocity(self, x):
"""
Sets maximum linear velocity of omnibase in m/s
:param x: Max linear velocity in m/s
:type x: float
.. Note:: Omnibase velocity is still upper limited by absolute values
defined by parameters in configuration file.
:See Also:
:meth:`M3OmniBase.set_max_linear_accel`
:meth:`M3OmniBase.set_max_rotation_accel`
:meth:`M3OmniBase.set_max_rotation_velocity`
"""
self.command.max_linear_velocity = x
def set_max_rotation_accel(self, x):
"""
Sets maximum rotational acceleration of omnibase in deg/sec^2
:param x: Max rotational acceleration in deg/sec^2
:type x: float
.. Note:: Omnibase acceleration is still upper limited by absolute values
defined by parameters in configuration file.
:See Also:
:meth:`M3OmniBase.set_max_linear_accel`
:meth:`M3OmniBase.set_max_linear_velocity`
:meth:`M3OmniBase.set_max_rotation_velocity`
"""
self.command.max_rotation_acceleration = x
def set_max_rotation_velocity(self, x):
"""
Sets maximum rotational velocity of omnibase in deg/s
:param x: Max rotational velocity in deg/s
:type x: float
.. Note:: Omnibase velocity is still upper limited by absolute values
defined by parameters in configuration file.
:See Also:
:meth:`M3OmniBase.set_max_linear_accel`
:meth:`M3OmniBase.set_max_rotation_accel`
:meth:`M3OmniBase.set_max_linear_velocity`
"""
self.command.max_rotation_velocity = x
def set_joystick_x(self, x):
"""
Sets value of X-axis command from joystick.
:param x: X-axis joystick command.
:type x: float (-1.0 <-> 1.0)
:See Also:
:meth:`M3OmniBase.set_mode_joystick`
:meth:`M3OmniBase.set_joystick_y`
:meth:`M3OmniBase.set_joystick_yaw`
:meth:`M3OmniBase.set_joystick_button`
"""
self.command.joystick_x = x
def set_joystick_y(self,y):
"""
Sets value of Y-axis command from joystick.
:param y: Y-axis joystick command.
:type y: float (-1.0 <-> 1.0)
:See Also:
:meth:`M3OmniBase.set_mode_joystick`
:meth:`M3OmniBase.set_joystick_x`
:meth:`M3OmniBase.set_joystick_yaw`
:meth:`M3OmniBase.set_joystick_button`
"""
self.command.joystick_y = y
def set_joystick_yaw(self,yaw):
"""
Sets value of Yaw-axis command from joystick.
:param yaw: Yaw-axis joystick command.
:type yaw: float (-1.0 <-> 1.0)
:See Also:
:meth:`M3OmniBase.set_mode_joystick`
:meth:`M3OmniBase.set_joystick_x`
:meth:`M3OmniBase.set_joystick_y`
:meth:`M3OmniBase.set_joystick_button`
"""
self.command.joystick_yaw = yaw
def set_joystick_button(self,b):
"""
Sets value of joystick button command. Currently a value of -1 should be sent to disable joystick,
and a value of 0 should be sent to enable joystick in default mode.
:param b: joystick button command.
:type b: int [-1,0]
:See Also:
:meth:`M3OmniBase.set_mode_joystick`
:meth:`M3OmniBase.set_joystick_x`
:meth:`M3OmniBase.set_joystick_y`
:meth:`M3OmniBase.set_joystick_yaw`
"""
self.command.joystick_button = b
def set_op_space_forces(self, x, y, torque):
self.command.opspace_force_desired[0] = x
self.command.opspace_force_desired[1] = y
self.command.opspace_force_desired[2] = torque
def get_global_position(self):
"""
Gets position of omnibase origin frame in the global frame.
:returns: position (x,y,yaw) in (m,m,deg)
:rtype: array, shape (3)
"""
return nu.array(self.status.global_position,float)
def get_motor_torques(self):
"""
Gets motor torque values at the actuator level (not joint/caster output).
:returns: torque values in Nm
:rtype: array, shape (ncasters*2)
:See Also:
:meth:`M3OmniBase.get_steer_torques`
:meth:`M3OmniBase.get_roll_torques`
"""
return nu.array(self.status.motor_torque_desired,float)
def get_steer_torques(self):
"""
Gets steer joint torque values at the caster level.
:returns: torque values in Nm
:rtype: array, shape (ncasters)
:See Also:
:meth:`M3OmniBase.get_motor_torques`
:meth:`M3OmniBase.get_roll_torques`
"""
return nu.array(self.status.steer_torque_desired,float)
def get_steer_theta(self):
"""
Gets steer joint torque values at the caster level.
:returns: torque values in Nm
:rtype: array, shape (ncasters)
:See Also:
:meth:`M3OmniBase.get_motor_torques`
:meth:`M3OmniBase.get_roll_torques`
"""
return nu.array(self.status.steer_angle,float)
def get_roll_torques(self):
"""
Gets roll joint torque values at the caster level.
:returns: torque values in Nm
:rtype: array, shape (ncasters)
:See Also:
:meth:`M3OmniBase.get_steer_torques`
:meth:`M3OmniBase.get_roll_torques`
"""
return nu.array(self.status.roll_torque_desired,float)
def get_local_position(self):
return nu.array(self.status.local_position,float)
def get_desired_position(self):
return nu.array(self.status.position_desired,float)
def get_desired_acceleration(self):
return nu.array(self.status.local_acceleration,float)
def get_bus_voltage(self):
"""
Gets bus voltage for motor power.
:returns: value in volts
:rtype: float
"""
return self.status.bus_voltage
def set_traj_goal(self, x, y, heading):
"""
Sets desired end location goal in global frame for trajectory controller.
:param x: desired X-axis value in global frame
:type x: float
:param y: desired Y-axis value in global frame
:type y: float
:param heading: desired Yaw-axis value in global frame
:type heading: float
:See Also:
:meth:`M3OmniBase.set_mode_traj_goal`
:meth:`M3OmniBase.is_traj_goal_reached`
"""
self.command.traj_goal[0] = x
self.command.traj_goal[1] = y
self.command.traj_goal[2] = heading
def is_traj_goal_reached(self):
"""
Returns true or false depending if the active goal location has been
reached by the controller.
:returns: true/false
:rtype: bool
:See Also:
:meth:`M3OmniBase.set_traj_goal`
:meth:`M3OmniBase.set_mode_traj_goal`
"""
return self.status.traj_goal_reached
def set_local_position(self,x,y,yaw,proxy):
"""
Sets the current local position of the odometry system.
:param x: desired X-axis value in local frame
:type x: float
:param y: desired Y-axis value in local frame
:type y: float
:param yaw: desired Yaw-axis value in local frame
:type yaw: float
.. Note:: Should be set to zero after starting real-time server component
because of initial drift caused by non-zero encoder values.
:See Also:
:meth:`M3OmniBase.set_global_position`
"""
self.command.local_position[0] = x
self.command.local_position[1] = y
self.command.local_position[2] = yaw
self.command.adjust_local_position = 1
proxy.step()
time.sleep(0.1)
self.command.adjust_local_position = 0
proxy.step()
'''def set_local_zero(self):
self.command.local_position[0] = 0
self.command.local_position[1] = 0
self.command.local_position[2] = 0
self.command.adjust_local_position = 1'''
def set_global_position(self,x,y,yaw,proxy):
"""
Sets the current global position of the odometry system.
:param x: desired X-axis value in global frame
:type x: float
:param y: desired Y-axis value in global frame
:type y: float
:param yaw: desired Yaw-axis value in global frame
:type yaw: float
.. Note:: Should be set to zero after starting real-time server component
because of initial drift caused by non-zero encoder values.
:See Also:
:meth:`M3OmniBase.set_local_position`
"""
self.command.global_position[0] = x
self.command.global_position[1] = y
self.command.global_position[2] = yaw
self.command.adjust_global_position = 1
proxy.step()
time.sleep(0.1)
self.command.adjust_global_position = 0
proxy.step()
def add_via(self,x_des, y_des, yaw_des):
self.vias.append([[x_des, y_des, yaw_des], 0 , 0])
def load_command(self):
self.command.ClearField('vias')
nadd=min(20,len(self.vias)) #only add 20 per cycle to keep packet size down
for n in range(nadd):
self.via_idx=self.via_idx+1
pos_des=self.vias[n][0]
lin_vel_avg=self.vias[n][1]
ang_vel_avg=self.vias[n][2]
self.command.vias.add()
for i in range(3):
self.command.vias[-1].position_desired.append(pos_des[i])
self.command.vias[-1].lin_velocity_avg = lin_vel_avg
self.command.vias[-1].ang_velocity_avg = ang_vel_avg
self.command.vias[-1].idx=self.via_idx
print self.command.vias[-1]
self.vias=self.vias[nadd:]
|
mit
| 7,803,926,033,021,083,000
| 42.44837
| 150
| 0.492682
| false
| 4.020367
| false
| false
| false
|
blckshrk/Weboob
|
modules/parolesmania/backend.py
|
1
|
1697
|
# -*- coding: utf-8 -*-
# Copyright(C) 2013 Julien Veyssier
#
# This file is part of weboob.
#
# weboob is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# weboob is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with weboob. If not, see <http://www.gnu.org/licenses/>.
from weboob.capabilities.lyrics import ICapLyrics, SongLyrics
from weboob.tools.backend import BaseBackend
from .browser import ParolesmaniaBrowser
from urllib import quote_plus
__all__ = ['ParolesmaniaBackend']
class ParolesmaniaBackend(BaseBackend, ICapLyrics):
NAME = 'parolesmania'
MAINTAINER = u'Julien Veyssier'
EMAIL = 'julien.veyssier@aiur.fr'
VERSION = '0.h'
DESCRIPTION = 'Paroles Mania lyrics website'
LICENSE = 'AGPLv3+'
BROWSER = ParolesmaniaBrowser
def get_lyrics(self, id):
return self.browser.get_lyrics(id)
def iter_lyrics(self, criteria, pattern):
return self.browser.iter_lyrics(criteria, quote_plus(pattern.encode('utf-8')))
def fill_songlyrics(self, songlyrics, fields):
if 'content' in fields:
sl = self.get_lyrics(songlyrics.id)
songlyrics.content = sl.content
return songlyrics
OBJECTS = {
SongLyrics: fill_songlyrics
}
|
agpl-3.0
| 5,074,640,716,121,560,000
| 31.018868
| 86
| 0.714791
| false
| 3.498969
| false
| false
| false
|
harisbal/pandas
|
pandas/tests/generic/test_generic.py
|
1
|
36186
|
# -*- coding: utf-8 -*-
# pylint: disable-msg=E1101,W0612
from copy import copy, deepcopy
from warnings import catch_warnings, simplefilter
import pytest
import numpy as np
import pandas as pd
from pandas.core.dtypes.common import is_scalar
from pandas import (Series, DataFrame, Panel,
date_range, MultiIndex)
import pandas.io.formats.printing as printing
from pandas.compat import range, zip, PY3
from pandas.util.testing import (assert_raises_regex,
assert_series_equal,
assert_panel_equal,
assert_frame_equal)
import pandas.util.testing as tm
# ----------------------------------------------------------------------
# Generic types test cases
class Generic(object):
@property
def _ndim(self):
return self._typ._AXIS_LEN
def _axes(self):
""" return the axes for my object typ """
return self._typ._AXIS_ORDERS
def _construct(self, shape, value=None, dtype=None, **kwargs):
""" construct an object for the given shape
if value is specified use that if its a scalar
if value is an array, repeat it as needed """
if isinstance(shape, int):
shape = tuple([shape] * self._ndim)
if value is not None:
if is_scalar(value):
if value == 'empty':
arr = None
# remove the info axis
kwargs.pop(self._typ._info_axis_name, None)
else:
arr = np.empty(shape, dtype=dtype)
arr.fill(value)
else:
fshape = np.prod(shape)
arr = value.ravel()
new_shape = fshape / arr.shape[0]
if fshape % arr.shape[0] != 0:
raise Exception("invalid value passed in _construct")
arr = np.repeat(arr, new_shape).reshape(shape)
else:
arr = np.random.randn(*shape)
return self._typ(arr, dtype=dtype, **kwargs)
def _compare(self, result, expected):
self._comparator(result, expected)
def test_rename(self):
# single axis
idx = list('ABCD')
# relabeling values passed into self.rename
args = [
str.lower,
{x: x.lower() for x in idx},
Series({x: x.lower() for x in idx}),
]
for axis in self._axes():
kwargs = {axis: idx}
obj = self._construct(4, **kwargs)
for arg in args:
# rename a single axis
result = obj.rename(**{axis: arg})
expected = obj.copy()
setattr(expected, axis, list('abcd'))
self._compare(result, expected)
# multiple axes at once
def test_get_numeric_data(self):
n = 4
kwargs = {}
for i in range(self._ndim):
kwargs[self._typ._AXIS_NAMES[i]] = list(range(n))
# get the numeric data
o = self._construct(n, **kwargs)
result = o._get_numeric_data()
self._compare(result, o)
# non-inclusion
result = o._get_bool_data()
expected = self._construct(n, value='empty', **kwargs)
self._compare(result, expected)
# get the bool data
arr = np.array([True, True, False, True])
o = self._construct(n, value=arr, **kwargs)
result = o._get_numeric_data()
self._compare(result, o)
# _get_numeric_data is includes _get_bool_data, so can't test for
# non-inclusion
def test_get_default(self):
# GH 7725
d0 = "a", "b", "c", "d"
d1 = np.arange(4, dtype='int64')
others = "e", 10
for data, index in ((d0, d1), (d1, d0)):
s = Series(data, index=index)
for i, d in zip(index, data):
assert s.get(i) == d
assert s.get(i, d) == d
assert s.get(i, "z") == d
for other in others:
assert s.get(other, "z") == "z"
assert s.get(other, other) == other
def test_nonzero(self):
# GH 4633
# look at the boolean/nonzero behavior for objects
obj = self._construct(shape=4)
pytest.raises(ValueError, lambda: bool(obj == 0))
pytest.raises(ValueError, lambda: bool(obj == 1))
pytest.raises(ValueError, lambda: bool(obj))
obj = self._construct(shape=4, value=1)
pytest.raises(ValueError, lambda: bool(obj == 0))
pytest.raises(ValueError, lambda: bool(obj == 1))
pytest.raises(ValueError, lambda: bool(obj))
obj = self._construct(shape=4, value=np.nan)
pytest.raises(ValueError, lambda: bool(obj == 0))
pytest.raises(ValueError, lambda: bool(obj == 1))
pytest.raises(ValueError, lambda: bool(obj))
# empty
obj = self._construct(shape=0)
pytest.raises(ValueError, lambda: bool(obj))
# invalid behaviors
obj1 = self._construct(shape=4, value=1)
obj2 = self._construct(shape=4, value=1)
def f():
if obj1:
printing.pprint_thing("this works and shouldn't")
pytest.raises(ValueError, f)
pytest.raises(ValueError, lambda: obj1 and obj2)
pytest.raises(ValueError, lambda: obj1 or obj2)
pytest.raises(ValueError, lambda: not obj1)
def test_downcast(self):
# test close downcasting
o = self._construct(shape=4, value=9, dtype=np.int64)
result = o.copy()
result._data = o._data.downcast(dtypes='infer')
self._compare(result, o)
o = self._construct(shape=4, value=9.)
expected = o.astype(np.int64)
result = o.copy()
result._data = o._data.downcast(dtypes='infer')
self._compare(result, expected)
o = self._construct(shape=4, value=9.5)
result = o.copy()
result._data = o._data.downcast(dtypes='infer')
self._compare(result, o)
# are close
o = self._construct(shape=4, value=9.000000000005)
result = o.copy()
result._data = o._data.downcast(dtypes='infer')
expected = o.astype(np.int64)
self._compare(result, expected)
def test_constructor_compound_dtypes(self):
# see gh-5191
# Compound dtypes should raise NotImplementedError.
def f(dtype):
return self._construct(shape=3, value=1, dtype=dtype)
pytest.raises(NotImplementedError, f, [("A", "datetime64[h]"),
("B", "str"),
("C", "int32")])
# these work (though results may be unexpected)
f('int64')
f('float64')
f('M8[ns]')
def check_metadata(self, x, y=None):
for m in x._metadata:
v = getattr(x, m, None)
if y is None:
assert v is None
else:
assert v == getattr(y, m, None)
def test_metadata_propagation(self):
# check that the metadata matches up on the resulting ops
o = self._construct(shape=3)
o.name = 'foo'
o2 = self._construct(shape=3)
o2.name = 'bar'
# TODO
# Once panel can do non-trivial combine operations
# (currently there is an a raise in the Panel arith_ops to prevent
# this, though it actually does work)
# can remove all of these try: except: blocks on the actual operations
# ----------
# preserving
# ----------
# simple ops with scalars
for op in ['__add__', '__sub__', '__truediv__', '__mul__']:
result = getattr(o, op)(1)
self.check_metadata(o, result)
# ops with like
for op in ['__add__', '__sub__', '__truediv__', '__mul__']:
try:
result = getattr(o, op)(o)
self.check_metadata(o, result)
except (ValueError, AttributeError):
pass
# simple boolean
for op in ['__eq__', '__le__', '__ge__']:
v1 = getattr(o, op)(o)
self.check_metadata(o, v1)
try:
self.check_metadata(o, v1 & v1)
except (ValueError):
pass
try:
self.check_metadata(o, v1 | v1)
except (ValueError):
pass
# combine_first
try:
result = o.combine_first(o2)
self.check_metadata(o, result)
except (AttributeError):
pass
# ---------------------------
# non-preserving (by default)
# ---------------------------
# add non-like
try:
result = o + o2
self.check_metadata(result)
except (ValueError, AttributeError):
pass
# simple boolean
for op in ['__eq__', '__le__', '__ge__']:
# this is a name matching op
v1 = getattr(o, op)(o)
v2 = getattr(o, op)(o2)
self.check_metadata(v2)
try:
self.check_metadata(v1 & v2)
except (ValueError):
pass
try:
self.check_metadata(v1 | v2)
except (ValueError):
pass
def test_head_tail(self):
# GH5370
o = self._construct(shape=10)
# check all index types
for index in [tm.makeFloatIndex, tm.makeIntIndex, tm.makeStringIndex,
tm.makeUnicodeIndex, tm.makeDateIndex,
tm.makePeriodIndex]:
axis = o._get_axis_name(0)
setattr(o, axis, index(len(getattr(o, axis))))
# Panel + dims
try:
o.head()
except (NotImplementedError):
pytest.skip('not implemented on {0}'.format(
o.__class__.__name__))
self._compare(o.head(), o.iloc[:5])
self._compare(o.tail(), o.iloc[-5:])
# 0-len
self._compare(o.head(0), o.iloc[0:0])
self._compare(o.tail(0), o.iloc[0:0])
# bounded
self._compare(o.head(len(o) + 1), o)
self._compare(o.tail(len(o) + 1), o)
# neg index
self._compare(o.head(-3), o.head(7))
self._compare(o.tail(-3), o.tail(7))
def test_sample(self):
# Fixes issue: 2419
o = self._construct(shape=10)
###
# Check behavior of random_state argument
###
# Check for stability when receives seed or random state -- run 10
# times.
for test in range(10):
seed = np.random.randint(0, 100)
self._compare(
o.sample(n=4, random_state=seed), o.sample(n=4,
random_state=seed))
self._compare(
o.sample(frac=0.7, random_state=seed), o.sample(
frac=0.7, random_state=seed))
self._compare(
o.sample(n=4, random_state=np.random.RandomState(test)),
o.sample(n=4, random_state=np.random.RandomState(test)))
self._compare(
o.sample(frac=0.7, random_state=np.random.RandomState(test)),
o.sample(frac=0.7, random_state=np.random.RandomState(test)))
os1, os2 = [], []
for _ in range(2):
np.random.seed(test)
os1.append(o.sample(n=4))
os2.append(o.sample(frac=0.7))
self._compare(*os1)
self._compare(*os2)
# Check for error when random_state argument invalid.
with pytest.raises(ValueError):
o.sample(random_state='astring!')
###
# Check behavior of `frac` and `N`
###
# Giving both frac and N throws error
with pytest.raises(ValueError):
o.sample(n=3, frac=0.3)
# Check that raises right error for negative lengths
with pytest.raises(ValueError):
o.sample(n=-3)
with pytest.raises(ValueError):
o.sample(frac=-0.3)
# Make sure float values of `n` give error
with pytest.raises(ValueError):
o.sample(n=3.2)
# Check lengths are right
assert len(o.sample(n=4) == 4)
assert len(o.sample(frac=0.34) == 3)
assert len(o.sample(frac=0.36) == 4)
###
# Check weights
###
# Weight length must be right
with pytest.raises(ValueError):
o.sample(n=3, weights=[0, 1])
with pytest.raises(ValueError):
bad_weights = [0.5] * 11
o.sample(n=3, weights=bad_weights)
with pytest.raises(ValueError):
bad_weight_series = Series([0, 0, 0.2])
o.sample(n=4, weights=bad_weight_series)
# Check won't accept negative weights
with pytest.raises(ValueError):
bad_weights = [-0.1] * 10
o.sample(n=3, weights=bad_weights)
# Check inf and -inf throw errors:
with pytest.raises(ValueError):
weights_with_inf = [0.1] * 10
weights_with_inf[0] = np.inf
o.sample(n=3, weights=weights_with_inf)
with pytest.raises(ValueError):
weights_with_ninf = [0.1] * 10
weights_with_ninf[0] = -np.inf
o.sample(n=3, weights=weights_with_ninf)
# All zeros raises errors
zero_weights = [0] * 10
with pytest.raises(ValueError):
o.sample(n=3, weights=zero_weights)
# All missing weights
nan_weights = [np.nan] * 10
with pytest.raises(ValueError):
o.sample(n=3, weights=nan_weights)
# Check np.nan are replaced by zeros.
weights_with_nan = [np.nan] * 10
weights_with_nan[5] = 0.5
self._compare(
o.sample(n=1, axis=0, weights=weights_with_nan), o.iloc[5:6])
# Check None are also replaced by zeros.
weights_with_None = [None] * 10
weights_with_None[5] = 0.5
self._compare(
o.sample(n=1, axis=0, weights=weights_with_None), o.iloc[5:6])
def test_size_compat(self):
# GH8846
# size property should be defined
o = self._construct(shape=10)
assert o.size == np.prod(o.shape)
assert o.size == 10 ** len(o.axes)
def test_split_compat(self):
# xref GH8846
o = self._construct(shape=10)
assert len(np.array_split(o, 5)) == 5
assert len(np.array_split(o, 2)) == 2
def test_unexpected_keyword(self): # GH8597
df = DataFrame(np.random.randn(5, 2), columns=['jim', 'joe'])
ca = pd.Categorical([0, 0, 2, 2, 3, np.nan])
ts = df['joe'].copy()
ts[2] = np.nan
with assert_raises_regex(TypeError, 'unexpected keyword'):
df.drop('joe', axis=1, in_place=True)
with assert_raises_regex(TypeError, 'unexpected keyword'):
df.reindex([1, 0], inplace=True)
with assert_raises_regex(TypeError, 'unexpected keyword'):
ca.fillna(0, inplace=True)
with assert_raises_regex(TypeError, 'unexpected keyword'):
ts.fillna(0, in_place=True)
# See gh-12301
def test_stat_unexpected_keyword(self):
obj = self._construct(5)
starwars = 'Star Wars'
errmsg = 'unexpected keyword'
with assert_raises_regex(TypeError, errmsg):
obj.max(epic=starwars) # stat_function
with assert_raises_regex(TypeError, errmsg):
obj.var(epic=starwars) # stat_function_ddof
with assert_raises_regex(TypeError, errmsg):
obj.sum(epic=starwars) # cum_function
with assert_raises_regex(TypeError, errmsg):
obj.any(epic=starwars) # logical_function
def test_api_compat(self):
# GH 12021
# compat for __name__, __qualname__
obj = self._construct(5)
for func in ['sum', 'cumsum', 'any', 'var']:
f = getattr(obj, func)
assert f.__name__ == func
if PY3:
assert f.__qualname__.endswith(func)
def test_stat_non_defaults_args(self):
obj = self._construct(5)
out = np.array([0])
errmsg = "the 'out' parameter is not supported"
with assert_raises_regex(ValueError, errmsg):
obj.max(out=out) # stat_function
with assert_raises_regex(ValueError, errmsg):
obj.var(out=out) # stat_function_ddof
with assert_raises_regex(ValueError, errmsg):
obj.sum(out=out) # cum_function
with assert_raises_regex(ValueError, errmsg):
obj.any(out=out) # logical_function
def test_truncate_out_of_bounds(self):
# GH11382
# small
shape = [int(2e3)] + ([1] * (self._ndim - 1))
small = self._construct(shape, dtype='int8', value=1)
self._compare(small.truncate(), small)
self._compare(small.truncate(before=0, after=3e3), small)
self._compare(small.truncate(before=-1, after=2e3), small)
# big
shape = [int(2e6)] + ([1] * (self._ndim - 1))
big = self._construct(shape, dtype='int8', value=1)
self._compare(big.truncate(), big)
self._compare(big.truncate(before=0, after=3e6), big)
self._compare(big.truncate(before=-1, after=2e6), big)
def test_validate_bool_args(self):
df = DataFrame({'a': [1, 2, 3], 'b': [4, 5, 6]})
invalid_values = [1, "True", [1, 2, 3], 5.0]
for value in invalid_values:
with pytest.raises(ValueError):
super(DataFrame, df).rename_axis(mapper={'a': 'x', 'b': 'y'},
axis=1, inplace=value)
with pytest.raises(ValueError):
super(DataFrame, df).drop('a', axis=1, inplace=value)
with pytest.raises(ValueError):
super(DataFrame, df).sort_index(inplace=value)
with pytest.raises(ValueError):
super(DataFrame, df)._consolidate(inplace=value)
with pytest.raises(ValueError):
super(DataFrame, df).fillna(value=0, inplace=value)
with pytest.raises(ValueError):
super(DataFrame, df).replace(to_replace=1, value=7,
inplace=value)
with pytest.raises(ValueError):
super(DataFrame, df).interpolate(inplace=value)
with pytest.raises(ValueError):
super(DataFrame, df)._where(cond=df.a > 2, inplace=value)
with pytest.raises(ValueError):
super(DataFrame, df).mask(cond=df.a > 2, inplace=value)
def test_copy_and_deepcopy(self):
# GH 15444
for shape in [0, 1, 2]:
obj = self._construct(shape)
for func in [copy,
deepcopy,
lambda x: x.copy(deep=False),
lambda x: x.copy(deep=True)]:
obj_copy = func(obj)
assert obj_copy is not obj
self._compare(obj_copy, obj)
@pytest.mark.parametrize("periods,fill_method,limit,exp", [
(1, "ffill", None, [np.nan, np.nan, np.nan, 1, 1, 1.5, 0, 0]),
(1, "ffill", 1, [np.nan, np.nan, np.nan, 1, 1, 1.5, 0, np.nan]),
(1, "bfill", None, [np.nan, 0, 0, 1, 1, 1.5, np.nan, np.nan]),
(1, "bfill", 1, [np.nan, np.nan, 0, 1, 1, 1.5, np.nan, np.nan]),
(-1, "ffill", None, [np.nan, np.nan, -.5, -.5, -.6, 0, 0, np.nan]),
(-1, "ffill", 1, [np.nan, np.nan, -.5, -.5, -.6, 0, np.nan, np.nan]),
(-1, "bfill", None, [0, 0, -.5, -.5, -.6, np.nan, np.nan, np.nan]),
(-1, "bfill", 1, [np.nan, 0, -.5, -.5, -.6, np.nan, np.nan, np.nan])
])
def test_pct_change(self, periods, fill_method, limit, exp):
vals = [np.nan, np.nan, 1, 2, 4, 10, np.nan, np.nan]
obj = self._typ(vals)
func = getattr(obj, 'pct_change')
res = func(periods=periods, fill_method=fill_method, limit=limit)
if type(obj) is DataFrame:
tm.assert_frame_equal(res, DataFrame(exp))
else:
tm.assert_series_equal(res, Series(exp))
class TestNDFrame(object):
# tests that don't fit elsewhere
def test_sample(sel):
# Fixes issue: 2419
# additional specific object based tests
# A few dataframe test with degenerate weights.
easy_weight_list = [0] * 10
easy_weight_list[5] = 1
df = pd.DataFrame({'col1': range(10, 20),
'col2': range(20, 30),
'colString': ['a'] * 10,
'easyweights': easy_weight_list})
sample1 = df.sample(n=1, weights='easyweights')
assert_frame_equal(sample1, df.iloc[5:6])
# Ensure proper error if string given as weight for Series, panel, or
# DataFrame with axis = 1.
s = Series(range(10))
with pytest.raises(ValueError):
s.sample(n=3, weights='weight_column')
with catch_warnings(record=True):
simplefilter("ignore", FutureWarning)
panel = Panel(items=[0, 1, 2], major_axis=[2, 3, 4],
minor_axis=[3, 4, 5])
with pytest.raises(ValueError):
panel.sample(n=1, weights='weight_column')
with pytest.raises(ValueError):
df.sample(n=1, weights='weight_column', axis=1)
# Check weighting key error
with pytest.raises(KeyError):
df.sample(n=3, weights='not_a_real_column_name')
# Check that re-normalizes weights that don't sum to one.
weights_less_than_1 = [0] * 10
weights_less_than_1[0] = 0.5
tm.assert_frame_equal(
df.sample(n=1, weights=weights_less_than_1), df.iloc[:1])
###
# Test axis argument
###
# Test axis argument
df = pd.DataFrame({'col1': range(10), 'col2': ['a'] * 10})
second_column_weight = [0, 1]
assert_frame_equal(
df.sample(n=1, axis=1, weights=second_column_weight), df[['col2']])
# Different axis arg types
assert_frame_equal(df.sample(n=1, axis='columns',
weights=second_column_weight),
df[['col2']])
weight = [0] * 10
weight[5] = 0.5
assert_frame_equal(df.sample(n=1, axis='rows', weights=weight),
df.iloc[5:6])
assert_frame_equal(df.sample(n=1, axis='index', weights=weight),
df.iloc[5:6])
# Check out of range axis values
with pytest.raises(ValueError):
df.sample(n=1, axis=2)
with pytest.raises(ValueError):
df.sample(n=1, axis='not_a_name')
with pytest.raises(ValueError):
s = pd.Series(range(10))
s.sample(n=1, axis=1)
# Test weight length compared to correct axis
with pytest.raises(ValueError):
df.sample(n=1, axis=1, weights=[0.5] * 10)
# Check weights with axis = 1
easy_weight_list = [0] * 3
easy_weight_list[2] = 1
df = pd.DataFrame({'col1': range(10, 20),
'col2': range(20, 30),
'colString': ['a'] * 10})
sample1 = df.sample(n=1, axis=1, weights=easy_weight_list)
assert_frame_equal(sample1, df[['colString']])
# Test default axes
with catch_warnings(record=True):
simplefilter("ignore", FutureWarning)
p = Panel(items=['a', 'b', 'c'], major_axis=[2, 4, 6],
minor_axis=[1, 3, 5])
assert_panel_equal(
p.sample(n=3, random_state=42), p.sample(n=3, axis=1,
random_state=42))
assert_frame_equal(
df.sample(n=3, random_state=42), df.sample(n=3, axis=0,
random_state=42))
# Test that function aligns weights with frame
df = DataFrame(
{'col1': [5, 6, 7],
'col2': ['a', 'b', 'c'], }, index=[9, 5, 3])
s = Series([1, 0, 0], index=[3, 5, 9])
assert_frame_equal(df.loc[[3]], df.sample(1, weights=s))
# Weights have index values to be dropped because not in
# sampled DataFrame
s2 = Series([0.001, 0, 10000], index=[3, 5, 10])
assert_frame_equal(df.loc[[3]], df.sample(1, weights=s2))
# Weights have empty values to be filed with zeros
s3 = Series([0.01, 0], index=[3, 5])
assert_frame_equal(df.loc[[3]], df.sample(1, weights=s3))
# No overlap in weight and sampled DataFrame indices
s4 = Series([1, 0], index=[1, 2])
with pytest.raises(ValueError):
df.sample(1, weights=s4)
def test_squeeze(self):
# noop
for s in [tm.makeFloatSeries(), tm.makeStringSeries(),
tm.makeObjectSeries()]:
tm.assert_series_equal(s.squeeze(), s)
for df in [tm.makeTimeDataFrame()]:
tm.assert_frame_equal(df.squeeze(), df)
with catch_warnings(record=True):
simplefilter("ignore", FutureWarning)
for p in [tm.makePanel()]:
tm.assert_panel_equal(p.squeeze(), p)
# squeezing
df = tm.makeTimeDataFrame().reindex(columns=['A'])
tm.assert_series_equal(df.squeeze(), df['A'])
with catch_warnings(record=True):
simplefilter("ignore", FutureWarning)
p = tm.makePanel().reindex(items=['ItemA'])
tm.assert_frame_equal(p.squeeze(), p['ItemA'])
p = tm.makePanel().reindex(items=['ItemA'], minor_axis=['A'])
tm.assert_series_equal(p.squeeze(), p.loc['ItemA', :, 'A'])
# don't fail with 0 length dimensions GH11229 & GH8999
empty_series = Series([], name='five')
empty_frame = DataFrame([empty_series])
with catch_warnings(record=True):
simplefilter("ignore", FutureWarning)
empty_panel = Panel({'six': empty_frame})
[tm.assert_series_equal(empty_series, higher_dim.squeeze())
for higher_dim in [empty_series, empty_frame, empty_panel]]
# axis argument
df = tm.makeTimeDataFrame(nper=1).iloc[:, :1]
assert df.shape == (1, 1)
tm.assert_series_equal(df.squeeze(axis=0), df.iloc[0])
tm.assert_series_equal(df.squeeze(axis='index'), df.iloc[0])
tm.assert_series_equal(df.squeeze(axis=1), df.iloc[:, 0])
tm.assert_series_equal(df.squeeze(axis='columns'), df.iloc[:, 0])
assert df.squeeze() == df.iloc[0, 0]
pytest.raises(ValueError, df.squeeze, axis=2)
pytest.raises(ValueError, df.squeeze, axis='x')
df = tm.makeTimeDataFrame(3)
tm.assert_frame_equal(df.squeeze(axis=0), df)
def test_numpy_squeeze(self):
s = tm.makeFloatSeries()
tm.assert_series_equal(np.squeeze(s), s)
df = tm.makeTimeDataFrame().reindex(columns=['A'])
tm.assert_series_equal(np.squeeze(df), df['A'])
def test_transpose(self):
msg = (r"transpose\(\) got multiple values for "
r"keyword argument 'axes'")
for s in [tm.makeFloatSeries(), tm.makeStringSeries(),
tm.makeObjectSeries()]:
# calls implementation in pandas/core/base.py
tm.assert_series_equal(s.transpose(), s)
for df in [tm.makeTimeDataFrame()]:
tm.assert_frame_equal(df.transpose().transpose(), df)
with catch_warnings(record=True):
simplefilter("ignore", FutureWarning)
for p in [tm.makePanel()]:
tm.assert_panel_equal(p.transpose(2, 0, 1)
.transpose(1, 2, 0), p)
tm.assert_raises_regex(TypeError, msg, p.transpose,
2, 0, 1, axes=(2, 0, 1))
def test_numpy_transpose(self):
msg = "the 'axes' parameter is not supported"
s = tm.makeFloatSeries()
tm.assert_series_equal(
np.transpose(s), s)
tm.assert_raises_regex(ValueError, msg,
np.transpose, s, axes=1)
df = tm.makeTimeDataFrame()
tm.assert_frame_equal(np.transpose(
np.transpose(df)), df)
tm.assert_raises_regex(ValueError, msg,
np.transpose, df, axes=1)
with catch_warnings(record=True):
simplefilter("ignore", FutureWarning)
p = tm.makePanel()
tm.assert_panel_equal(np.transpose(
np.transpose(p, axes=(2, 0, 1)),
axes=(1, 2, 0)), p)
def test_take(self):
indices = [1, 5, -2, 6, 3, -1]
for s in [tm.makeFloatSeries(), tm.makeStringSeries(),
tm.makeObjectSeries()]:
out = s.take(indices)
expected = Series(data=s.values.take(indices),
index=s.index.take(indices), dtype=s.dtype)
tm.assert_series_equal(out, expected)
for df in [tm.makeTimeDataFrame()]:
out = df.take(indices)
expected = DataFrame(data=df.values.take(indices, axis=0),
index=df.index.take(indices),
columns=df.columns)
tm.assert_frame_equal(out, expected)
indices = [-3, 2, 0, 1]
with catch_warnings(record=True):
simplefilter("ignore", FutureWarning)
for p in [tm.makePanel()]:
out = p.take(indices)
expected = Panel(data=p.values.take(indices, axis=0),
items=p.items.take(indices),
major_axis=p.major_axis,
minor_axis=p.minor_axis)
tm.assert_panel_equal(out, expected)
def test_take_invalid_kwargs(self):
indices = [-3, 2, 0, 1]
s = tm.makeFloatSeries()
df = tm.makeTimeDataFrame()
with catch_warnings(record=True):
simplefilter("ignore", FutureWarning)
p = tm.makePanel()
for obj in (s, df, p):
msg = r"take\(\) got an unexpected keyword argument 'foo'"
tm.assert_raises_regex(TypeError, msg, obj.take,
indices, foo=2)
msg = "the 'out' parameter is not supported"
tm.assert_raises_regex(ValueError, msg, obj.take,
indices, out=indices)
msg = "the 'mode' parameter is not supported"
tm.assert_raises_regex(ValueError, msg, obj.take,
indices, mode='clip')
def test_equals(self):
s1 = pd.Series([1, 2, 3], index=[0, 2, 1])
s2 = s1.copy()
assert s1.equals(s2)
s1[1] = 99
assert not s1.equals(s2)
# NaNs compare as equal
s1 = pd.Series([1, np.nan, 3, np.nan], index=[0, 2, 1, 3])
s2 = s1.copy()
assert s1.equals(s2)
s2[0] = 9.9
assert not s1.equals(s2)
idx = MultiIndex.from_tuples([(0, 'a'), (1, 'b'), (2, 'c')])
s1 = Series([1, 2, np.nan], index=idx)
s2 = s1.copy()
assert s1.equals(s2)
# Add object dtype column with nans
index = np.random.random(10)
df1 = DataFrame(
np.random.random(10, ), index=index, columns=['floats'])
df1['text'] = 'the sky is so blue. we could use more chocolate.'.split(
)
df1['start'] = date_range('2000-1-1', periods=10, freq='T')
df1['end'] = date_range('2000-1-1', periods=10, freq='D')
df1['diff'] = df1['end'] - df1['start']
df1['bool'] = (np.arange(10) % 3 == 0)
df1.loc[::2] = np.nan
df2 = df1.copy()
assert df1['text'].equals(df2['text'])
assert df1['start'].equals(df2['start'])
assert df1['end'].equals(df2['end'])
assert df1['diff'].equals(df2['diff'])
assert df1['bool'].equals(df2['bool'])
assert df1.equals(df2)
assert not df1.equals(object)
# different dtype
different = df1.copy()
different['floats'] = different['floats'].astype('float32')
assert not df1.equals(different)
# different index
different_index = -index
different = df2.set_index(different_index)
assert not df1.equals(different)
# different columns
different = df2.copy()
different.columns = df2.columns[::-1]
assert not df1.equals(different)
# DatetimeIndex
index = pd.date_range('2000-1-1', periods=10, freq='T')
df1 = df1.set_index(index)
df2 = df1.copy()
assert df1.equals(df2)
# MultiIndex
df3 = df1.set_index(['text'], append=True)
df2 = df1.set_index(['text'], append=True)
assert df3.equals(df2)
df2 = df1.set_index(['floats'], append=True)
assert not df3.equals(df2)
# NaN in index
df3 = df1.set_index(['floats'], append=True)
df2 = df1.set_index(['floats'], append=True)
assert df3.equals(df2)
# GH 8437
a = pd.Series([False, np.nan])
b = pd.Series([False, np.nan])
c = pd.Series(index=range(2))
d = pd.Series(index=range(2))
e = pd.Series(index=range(2))
f = pd.Series(index=range(2))
c[:-1] = d[:-1] = e[0] = f[0] = False
assert a.equals(a)
assert a.equals(b)
assert a.equals(c)
assert a.equals(d)
assert a.equals(e)
assert e.equals(f)
def test_describe_raises(self):
with catch_warnings(record=True):
simplefilter("ignore", FutureWarning)
with pytest.raises(NotImplementedError):
tm.makePanel().describe()
def test_pipe(self):
df = DataFrame({'A': [1, 2, 3]})
f = lambda x, y: x ** y
result = df.pipe(f, 2)
expected = DataFrame({'A': [1, 4, 9]})
assert_frame_equal(result, expected)
result = df.A.pipe(f, 2)
assert_series_equal(result, expected.A)
def test_pipe_tuple(self):
df = DataFrame({'A': [1, 2, 3]})
f = lambda x, y: y
result = df.pipe((f, 'y'), 0)
assert_frame_equal(result, df)
result = df.A.pipe((f, 'y'), 0)
assert_series_equal(result, df.A)
def test_pipe_tuple_error(self):
df = DataFrame({"A": [1, 2, 3]})
f = lambda x, y: y
with pytest.raises(ValueError):
df.pipe((f, 'y'), x=1, y=0)
with pytest.raises(ValueError):
df.A.pipe((f, 'y'), x=1, y=0)
def test_pipe_panel(self):
with catch_warnings(record=True):
simplefilter("ignore", FutureWarning)
wp = Panel({'r1': DataFrame({"A": [1, 2, 3]})})
f = lambda x, y: x + y
result = wp.pipe(f, 2)
expected = wp + 2
assert_panel_equal(result, expected)
result = wp.pipe((f, 'y'), x=1)
expected = wp + 1
assert_panel_equal(result, expected)
with pytest.raises(ValueError):
result = wp.pipe((f, 'y'), x=1, y=1)
@pytest.mark.parametrize('box', [pd.Series, pd.DataFrame])
def test_axis_classmethods(self, box):
obj = box()
values = (list(box._AXIS_NAMES.keys()) +
list(box._AXIS_NUMBERS.keys()) +
list(box._AXIS_ALIASES.keys()))
for v in values:
assert obj._get_axis_number(v) == box._get_axis_number(v)
assert obj._get_axis_name(v) == box._get_axis_name(v)
assert obj._get_block_manager_axis(v) == \
box._get_block_manager_axis(v)
|
bsd-3-clause
| 2,502,932,496,458,174,000
| 34.03001
| 79
| 0.520063
| false
| 3.707962
| true
| false
| false
|
tdjordan/tortoisegit
|
gitproc.py
|
1
|
3594
|
#
# front-end for TortoiseHg dialogs
#
# Copyright (C) 2007 TK Soh <teekaysoh@gmail.com>
#
import os
import sys
from tortoisegit.tgitutil import get_prog_root
# always use git exe installed with TortoiseHg
tgitdir = get_prog_root()
try:
os.environ['PATH'] = os.path.pathsep.join([tgitdir, os.environ['PATH']])
except KeyError:
os.environ['PATH'] = tgitdir
if not sys.stdin.isatty():
try:
import win32traceutil
except ImportError:
pass
except pywintypes.error:
pass
# Map gitproc commands to dialog modules in gitgtk/
from gitgtk import commit, status, addremove, tagadd, tags, history, merge
from gitgtk import diff, revisions, update, serve, clone, synch, gitcmd, about
from gitgtk import recovery, tgitconfig, datamine
_dialogs = { 'commit' : commit, 'status' : status, 'revert' : status,
'add' : addremove, 'remove' : addremove, 'tag' : tagadd,
'tags' : tags, 'log' : history, 'history': history,
'diff' : diff, 'merge' : merge, 'tip' : revisions,
'parents': revisions, 'heads' : revisions, 'update' : update,
'clone' : clone, 'serve' : serve, 'synch' : synch,
'about' : about, 'config' : tgitconfig, 'recovery': recovery,
'datamine': datamine }
def get_list_from_file(filename):
fd = open(filename, "r")
lines = [ x.replace("\n", "") for x in fd.readlines() ]
fd.close()
return lines
def get_option(args):
import getopt
long_opt_list = ('command=', 'exepath=', 'listfile=', 'root=', 'cwd=',
'deletelistfile', 'nogui')
opts, args = getopt.getopt(args, "c:e:l:dR:", long_opt_list)
# Set default options
options = {}
options['gitcmd'] = 'help'
options['cwd'] = os.getcwd()
options['files'] = []
options['gui'] = True
listfile = None
delfile = False
for o, a in opts:
if o in ("-c", "--command"):
options['gitcmd'] = a
elif o in ("-l", "--listfile"):
listfile = a
elif o in ("-d", "--deletelistfile"):
delfile = True
elif o in ("--nogui"):
options['gui'] = False
elif o in ("-R", "--root"):
options['root'] = a
elif o in ("--cwd"):
options['cwd'] = a
if listfile:
options['files'] = get_list_from_file(listfile)
if delfile:
os.unlink(listfile)
return (options, args)
def parse(args):
option, args = get_option(args)
cmdline = ['git', option['gitcmd']]
if 'root' in option:
cmdline.append('--repository')
cmdline.append(option['root'])
cmdline.extend(args)
cmdline.extend(option['files'])
option['cmdline'] = cmdline
global _dialogs
dialog = _dialogs.get(option['gitcmd'], gitcmd)
dialog.run(**option)
def run_trapped(args):
try:
dlg = parse(sys.argv[1:])
except:
import traceback
from gitgtk.dialog import error_dialog
tr = traceback.format_exc()
print tr
error_dialog(None, "Error executing gitproc", tr)
if __name__=='__main__':
#dlg = parse(['-c', 'help', '--', '-v'])
#dlg = parse(['-c', 'log', '--root', 'c:\git\h1', '--', '-l1'])
#dlg = parse(['-c', 'status', '--root', 'c:\hg\h1', ])
#dlg = parse(['-c', 'add', '--root', 'c:\hg\h1', '--listfile', 'c:\\hg\\h1\\f1', '--notify'])
#dlg = parse(['-c', 'rollback', '--root', 'c:\\hg\\h1'])
print "gitproc sys.argv =", sys.argv
dlg = run_trapped(sys.argv[1:])
|
gpl-2.0
| -3,280,306,436,063,970,300
| 30.80531
| 97
| 0.553422
| false
| 3.355742
| false
| false
| false
|
Guymer/PyGuymer
|
return_dict_of_ISO_subtitle_streams.py
|
1
|
2124
|
# -*- coding: utf-8 -*-
##############################################################################################
# This file is deprecated because Python 2.x is deprecated #
# A Python 3.x version of this file can be found at: #
# #
# https://github.com/Guymer/PyGuymer3/blob/master/return_dict_of_ISO_subtitle_streams.py #
##############################################################################################
def return_dict_of_ISO_subtitle_streams(fname, usr_track = -1):
# Import modules ...
import subprocess
import xml.etree.ElementTree
# Check input ...
if usr_track == -1:
raise Exception("no track was requested")
# Find track info ...
proc = subprocess.Popen(
[
"lsdvd",
"-x",
"-Ox",
fname
],
stderr = subprocess.PIPE,
stdout = subprocess.PIPE
)
stdout, stderr = proc.communicate()
if proc.returncode != 0:
raise Exception(u"\"lsdvd\" command failed")
# Clean up ...
# NOTE: "lsdvd" sometimes returns invalid XML as it does not: escape
# characters; or remove invalid characters.
stdout = unicode(stdout, "utf-8", "ignore").replace(u"&", u"&")
# Loop over all tracks ...
for track in xml.etree.ElementTree.fromstring(stdout).findall("track"):
# Skip if this track is not the chosen one ...
if int(track.find("ix").text) != int(usr_track):
continue
# Create empty dictionary ...
ans = {}
# Loop over all subtitle channels in this track ...
for subp in track.findall("subp"):
# Append information ...
ans[subp.find("streamid").text] = {
"content" : subp.find("content").text,
"langcode" : subp.find("langcode").text,
"language" : subp.find("language").text
}
# Return dictionary ...
return ans
|
apache-2.0
| -7,274,281,633,515,023,000
| 35.62069
| 94
| 0.469397
| false
| 4.637555
| false
| false
| false
|
hyperspy/hyperspy_gui_ipywidgets
|
hyperspy_gui_ipywidgets/tests/test_tools.py
|
1
|
9853
|
import numpy as np
import hyperspy.api as hs
from hyperspy_gui_ipywidgets.tests.utils import KWARGS
from hyperspy.signal_tools import (Signal1DCalibration, ImageContrastEditor,
EdgesRange)
class TestTools:
def setup_method(self, method):
self.s = hs.signals.Signal1D(1 + np.arange(100)**2)
self.s.change_dtype('float')
self.s.axes_manager[0].offset = 10
self.s.axes_manager[0].scale = 2
self.s.axes_manager[0].units = "m"
def test_calibrate(self):
s = self.s
cal = Signal1DCalibration(s)
cal.ss_left_value = 10
cal.ss_right_value = 30
wd = cal.gui(**KWARGS)["ipywidgets"]["wdict"]
wd["new_left"].value = 0
wd["new_right"].value = 10
wd["units"].value = "nm"
wd["apply_button"]._click_handlers(wd["apply_button"]) # Trigger it
assert s.axes_manager[0].scale == 1
assert s.axes_manager[0].offset == 0
assert s.axes_manager[0].units == "nm"
def test_calibrate_from_s(self):
s = self.s
wd = s.calibrate(**KWARGS)["ipywidgets"]["wdict"]
wd["left"].value = 10
wd["right"].value = 30
wd["new_left"].value = 1
wd["new_right"].value = 11
wd["units"].value = "nm"
assert wd["offset"].value == 1
assert wd["scale"].value == 1
wd["apply_button"]._click_handlers(wd["apply_button"]) # Trigger it
assert s.axes_manager[0].scale == 1
assert s.axes_manager[0].offset == 1
assert s.axes_manager[0].units == "nm"
def test_smooth_sg(self):
s = self.s
s.add_gaussian_noise(0.1)
s2 = s.deepcopy()
wd = s.smooth_savitzky_golay(**KWARGS)["ipywidgets"]["wdict"]
wd["window_length"].value = 11
wd["polynomial_order"].value = 5
wd["differential_order"].value = 1
wd["color"].value = "red"
wd["apply_button"]._click_handlers(wd["apply_button"]) # Trigger it
s2.smooth_savitzky_golay(polynomial_order=5, window_length=11,
differential_order=1)
np.testing.assert_allclose(s.data, s2.data)
def test_smooth_lowess(self):
s = self.s
s.add_gaussian_noise(0.1)
s2 = s.deepcopy()
wd = s.smooth_lowess(**KWARGS)["ipywidgets"]["wdict"]
wd["smoothing_parameter"].value = 0.9
wd["number_of_iterations"].value = 3
wd["color"].value = "red"
wd["apply_button"]._click_handlers(wd["apply_button"]) # Trigger it
s2.smooth_lowess(smoothing_parameter=0.9, number_of_iterations=3)
np.testing.assert_allclose(s.data, s2.data)
def test_smooth_tv(self):
s = self.s
s.add_gaussian_noise(0.1)
s2 = s.deepcopy()
wd = s.smooth_tv(**KWARGS)["ipywidgets"]["wdict"]
wd["smoothing_parameter"].value = 300
wd["color"].value = "red"
wd["apply_button"]._click_handlers(wd["apply_button"]) # Trigger it
s2.smooth_tv(smoothing_parameter=300)
np.testing.assert_allclose(s.data, s2.data)
def test_filter_butterworth(self):
s = self.s
s.add_gaussian_noise(0.1)
s2 = s.deepcopy()
wd = s.filter_butterworth(**KWARGS)["ipywidgets"]["wdict"]
wd["cutoff"].value = 0.5
wd["order"].value = 3
wd["type"].value = "high"
wd["color"].value = "red"
wd["apply_button"]._click_handlers(wd["apply_button"]) # Trigger it
s2.filter_butterworth(
cutoff_frequency_ratio=0.5,
order=3,
type="high")
np.testing.assert_allclose(s.data, s2.data)
def test_remove_background(self):
s = self.s
s.add_gaussian_noise(0.1)
s2 = s.remove_background(
signal_range=(15., 50.),
background_type='Polynomial',
polynomial_order=2,
fast=False,
zero_fill=True)
wd = s.remove_background(**KWARGS)["ipywidgets"]["wdict"]
assert wd["polynomial_order"].layout.display == "none" # not visible
wd["background_type"].value = "Polynomial"
assert wd["polynomial_order"].layout.display == "" # visible
wd["polynomial_order"].value = 2
wd["fast"].value = False
wd["zero_fill"] = True
wd["left"].value = 15.
wd["right"].value = 50.
wd["apply_button"]._click_handlers(wd["apply_button"]) # Trigger it
np.testing.assert_allclose(s.data[2:], s2.data[2:])
np.testing.assert_allclose(np.zeros(2), s2.data[:2])
def test_spikes_removal_tool(self):
s = hs.signals.Signal1D(np.ones((2, 3, 30)))
# Add three spikes
s.data[1, 0, 1] += 2
s.data[0, 2, 29] += 1
s.data[1, 2, 14] += 1
wd = s.spikes_removal_tool(**KWARGS)["ipywidgets"]["wdict"]
def next():
wd["next_button"]._click_handlers(wd["next_button"])
def previous():
wd["previous_button"]._click_handlers(wd["previous_button"])
def remove():
wd["remove_button"]._click_handlers(wd["remove_button"])
wd["threshold"].value = 1.5
next()
assert s.axes_manager.indices == (0, 1)
wd["threshold"].value = 0.5
assert s.axes_manager.indices == (0, 0)
next()
assert s.axes_manager.indices == (2, 0)
next()
assert s.axes_manager.indices == (0, 1)
previous()
assert s.axes_manager.indices == (2, 0)
wd["add_noise"].value = False
remove()
assert s.data[0, 2, 29] == 1
assert s.axes_manager.indices == (0, 1)
remove()
assert s.data[1, 0, 1] == 1
assert s.axes_manager.indices == (2, 1)
np.random.seed(1)
wd["add_noise"].value = True
wd["interpolator_kind"].value = "Spline"
wd["spline_order"].value = 3
remove()
assert s.data[1, 2, 14] == 0
assert s.axes_manager.indices == (0, 0)
def test_constrast_editor(self):
# To get this test to work, matplotlib backend needs to set to 'Agg'
np.random.seed(1)
im = hs.signals.Signal2D(np.random.random((32, 32)))
im.plot()
ceditor = ImageContrastEditor(im._plot.signal_plot)
ceditor.ax.figure.canvas.draw_idle()
wd = ceditor.gui(**KWARGS)["ipywidgets"]["wdict"]
assert wd["linthresh"].layout.display == "none" # not visible
assert wd["linscale"].layout.display == "none" # not visible
assert wd["gamma"].layout.display == "none" # not visible
wd["bins"].value = 50
assert ceditor.bins == 50
wd["norm"].value = 'Log'
assert ceditor.norm == 'Log'
assert wd["linthresh"].layout.display == "none" # not visible
assert wd["linscale"].layout.display == "none" # not visible
wd["norm"].value = 'Symlog'
assert ceditor.norm == 'Symlog'
assert wd["linthresh"].layout.display == "" # visible
assert wd["linscale"].layout.display == "" # visible
assert wd["linthresh"].value == 0.01 # default value
assert wd["linscale"].value == 0.1 # default value
wd["linthresh"].value = 0.1
assert ceditor.linthresh == 0.1
wd["linscale"].value = 0.2
assert ceditor.linscale == 0.2
wd["norm"].value = 'Linear'
percentile = [1.0, 99.0]
wd["percentile"].value = percentile
assert ceditor.vmin_percentile == percentile[0]
assert ceditor.vmax_percentile == percentile[1]
assert im._plot.signal_plot.vmin == f'{percentile[0]}th'
assert im._plot.signal_plot.vmax == f'{percentile[1]}th'
wd["norm"].value = 'Power'
assert ceditor.norm == 'Power'
assert wd["gamma"].layout.display == "" # visible
assert wd["gamma"].value == 1.0 # default value
wd["gamma"].value = 0.1
assert ceditor.gamma == 0.1
assert wd["auto"].value is True # default value
wd["auto"].value = False
assert ceditor.auto is False
wd["left"].value = 0.2
assert ceditor.ss_left_value == 0.2
wd["right"].value = 0.5
assert ceditor.ss_right_value == 0.5
# Setting the span selector programmatically from the widgets will
# need to be implemented properly
wd["apply_button"]._click_handlers(wd["apply_button"]) # Trigger it
# assert im._plot.signal_plot.vmin == 0.2
# assert im._plot.signal_plot.vmax == 0.5
# Reset to default values
wd["reset_button"]._click_handlers(wd["reset_button"]) # Trigger it
assert im._plot.signal_plot.vmin == '0.0th'
assert im._plot.signal_plot.vmax == '100.0th'
def test_eels_table_tool(self):
s = hs.datasets.artificial_data.get_core_loss_eels_line_scan_signal(True)
s.plot()
er = EdgesRange(s)
er.ss_left_value = 500
er.ss_right_value = 550
wd = er.gui(**KWARGS)["ipywidgets"]["wdict"]
wd["update"]._click_handlers(wd["update"]) # refresh the table
assert wd["units"].value == 'eV'
assert wd["left"].value == 500
assert wd["right"].value == 550
assert len(wd['gb'].children) == 36 # 9 edges displayed
wd['major'].value = True
wd["update"]._click_handlers(wd["update"]) # refresh the table
assert len(wd['gb'].children) == 24 # 6 edges displayed
assert wd['gb'].children[4].description == 'Sb_M4'
wd['order'].value = 'ascending'
wd["update"]._click_handlers(wd["update"]) # refresh the table
assert wd['gb'].children[4].description == 'V_L3'
wd["reset"]._click_handlers(wd["reset"]) # reset the selector
assert len(wd['gb'].children) == 4 # only header displayed
|
gpl-3.0
| 6,585,736,890,534,469,000
| 37.944664
| 81
| 0.563077
| false
| 3.464487
| true
| false
| false
|
google-research/language
|
language/nqg/model/parser/training/training_utils.py
|
1
|
3956
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities to define model training loop."""
from language.nqg.model.parser.training import forest_utils
import tensorflow as tf
def get_training_step(optimizer, model, verbose=False):
"""Get training step function."""
forest_score_function = forest_utils.get_forest_score_function(
verbose=verbose)
def training_step(inputs):
"""Executes a step of training."""
with tf.GradientTape() as tape:
loss = tf.constant(0.0, dtype=tf.float32)
application_scores_batch = model(inputs["wordpiece_ids"],
inputs["num_wordpieces"],
inputs["application_span_begin"],
inputs["application_span_end"],
inputs["application_rule_idx"])
nu_num_nodes_batch = tf.squeeze(inputs["nu_num_nodes"], 1)
de_num_nodes_batch = tf.squeeze(inputs["de_num_nodes"], 1)
with tf.name_scope("forest_score"):
# TODO(petershaw): Consider a batched implementation of
# forest_score_function to avoid iteration over examples in the batch.
for idx in tf.range(model.batch_size):
application_scores = application_scores_batch[idx]
nu_node_type = inputs["nu_node_type"][idx]
nu_node_1_idx = inputs["nu_node_1_idx"][idx]
nu_node_2_idx = inputs["nu_node_2_idx"][idx]
nu_application_idx = inputs["nu_application_idx"][idx]
nu_num_nodes = nu_num_nodes_batch[idx]
# Log score for numerator (sum over derivations of target).
nu_score = forest_score_function(application_scores, nu_num_nodes,
nu_node_type, nu_node_1_idx,
nu_node_2_idx, nu_application_idx)
de_node_type = inputs["de_node_type"][idx]
de_node_1_idx = inputs["de_node_1_idx"][idx]
de_node_2_idx = inputs["de_node_2_idx"][idx]
de_application_idx = inputs["de_application_idx"][idx]
de_num_nodes = de_num_nodes_batch[idx]
# Log score for denominator (partition function).
de_score = forest_score_function(application_scores, de_num_nodes,
de_node_type, de_node_1_idx,
de_node_2_idx, de_application_idx)
# -log(numerator/denominator) = log(denominator) - log(numerator)
example_loss = de_score - nu_score
loss += example_loss
loss /= tf.cast(model.batch_size, dtype=tf.float32)
variables = model.trainable_variables
gradients = tape.gradient(loss, variables)
optimizer.apply_gradients(zip(gradients, variables))
return loss
return training_step
def get_train_for_n_steps_fn(strategy, optimizer, model):
"""Return train_for_n_steps_fn."""
training_step = get_training_step(optimizer, model)
@tf.function
def train_for_n_steps_fn(iterator, steps):
mean_loss = tf.constant(0.0, dtype=tf.float32)
for _ in tf.range(steps):
inputs = next(iterator)
loss = strategy.run(training_step, args=(inputs,))
mean_loss += strategy.reduce(tf.distribute.ReduceOp.MEAN, loss, axis=None)
mean_loss /= tf.cast(steps, dtype=tf.float32)
return mean_loss
return train_for_n_steps_fn
|
apache-2.0
| 2,828,534,664,817,358,000
| 40.642105
| 80
| 0.623357
| false
| 3.822222
| false
| false
| false
|
Nablaquabla/sns-analysis
|
sns-test.py
|
1
|
2499
|
import os
import time as tm
import sys
# Handles the creation of condor files for a given set of directories
# -----------------------------------------------------------------------------
def createCondorFile(dataDir,outDir,time):
# Condor submission file name convention: run-day-time.condor
with open('/home/bjs66/CondorFiles/test.condor','w') as f:
# Fixed program location'
f.write('Executable = /home/bjs66/GitHub/sns-analysis/sns-analysis-v4\n')
# Arguments passed to the exe:
# Set main run directory, e.g. Run-15-10-02-27-32-23/151002
# Set current time to be analzyed (w/o .zip extension!), e.g. 184502
# Set output directory, eg Output/ Run-15-10-02-27-32-23/151002
f.write('Arguments = \"1 %s %s %s 1\"\n'%(dataDir,time,outDir))
# Standard cluster universe
f.write('universe = vanilla\n')
f.write('getenv = true\n')
# Program needs at least 250 MB of free memory to hold unzipped data
f.write('request_memory = 400\n')
# Output, error and log name convention: run-day-time.log/out/err
f.write('log = ../../Logs/test.log\n')
f.write('Output = ../../Outs/test.out\n')
f.write('Error = ../../Errs/test.err\n')
# Do not write any emails
f.write('notification = never\n')
f.write('+Department = Physics\n')
f.write('should_transfer_files = NO\n')
# Add single job to queue
f.write('Queue')
# Main function handling all internals
# -----------------------------------------------------------------------------
def main():
# Choose main directory, i.e. ~/csi/beam_on_data/Run-15-06-25-xyz/
mainRunDir = '/var/phy/project/phil/grayson/COHERENT/CsI/'
# Choose output directory, i.e. ~/output/Run-15-06-25-xyz/
mainOutDir = '/var/phy/project/phil/grayson/COHERENT/CsI/bjs-analysis/test/'
pathToFile = 'beam_on_data/Run-15-09-21-20-58-01/150923/'
time = '065101'
dataRunDir = mainRunDir + pathToFile
createCondorFile(dataRunDir,mainOutDir,time)
cmd = 'condor_submit /home/bjs66/CondorFiles/test.condor'
os.system(cmd)
if __name__ == '__main__':
main()
|
gpl-3.0
| -34,610,708,783,304,316
| 28.4
| 81
| 0.517407
| false
| 3.590517
| true
| false
| false
|
seikichi/tuitwi
|
tuitwi/state.py
|
1
|
11226
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import os
import curses
import curses.ascii
class State(object):
'''Stateパターン用の基底クラス'''
def __init__(self, stdscr, form):
self._form = form
self._stdscr = stdscr
self._func = {}
self._func[curses.KEY_RESIZE] = self._resize
self._func['default'] = self._do_nothing
def _resize(self):
self._form.resize(self._stdscr)
self._form.controls['edit_line'].cur_set()
return self
def _do_nothing(self, ch):
return self
def execute(self, ch):
if self._func.get(ch):
return self._func[ch]()
else:
return self._func['default'](ch)
class ExitState(State):
'''終了を確認する'''
def __init__(self, stdscr, form, viewstate):
State.__init__(self, stdscr, form)
self._viewstate = viewstate
self._form.controls['status_line'].text = u'ほんとに終了する? (y/n)'
self._func[ord('y')] = self._quit
self._func[ord('n')] = self._cancel
def _quit(self):
return None
def _cancel(self):
self._form.controls['status_line'].text = u'TuiTwi ver 0.2'
return self._viewstate
class ConfirmDestroyMessageState(State):
'''postの削除の確認'''
def __init__(self, stdscr, form, viewstate):
State.__init__(self, stdscr, form)
self._viewstate = viewstate
self._form.controls['status_line'].text = u'発言を削除しますか? (y/n)'
self._func[ord('y')] = self._yes
self._func[ord('n')] = self._no
def _yes(self):
i = self._form.controls['view_tab'].current_win.current_status().id
self._viewstate.queue.put(("DestroyStatus", i))
return self._viewstate
def _no(self):
self._form.controls['status_line'].text = u'TuiTwi ver 0.2'
return self._viewstate
class SearchInputState(State):
'''検索語句を入力する'''
def __init__(self, stdscr, form, viewstate):
State.__init__(self, stdscr, form)
curses.curs_set(True)
self._viewstate = viewstate
self._form.controls['status_line'].text = u'検索語句を入力して下さい.無ければTABで戻れます.'
self._form.controls['search_line'].show()
self._form.controls['edit_line'].hide()
self._func[curses.ascii.TAB] = self._quit
self._func[curses.ascii.CR] = self._func[curses.ascii.LF] = self._update
self._func['default'] = self._edit
def _update(self):
text = self._form.controls['search_line'].text
self._viewstate.search_word = text
self._form.controls['fullstatus_area'].keyword = text
self._form.controls['search_word_line'].text = "search word: %s" % text
curses.curs_set(False)
return self._quit()
def _quit(self):
curses.curs_set(False)
self._form.controls['status_line'].text = u'TuiTwi ver 0.2'
self._form.controls['search_line'].hide()
self._form.controls['edit_line'].show()
return self._viewstate
def _edit(self, ch):
self._form.controls['search_line'].edit(ch)
return self
class HelpState(State):
'''ヘルプの表示'''
def __init__(self, stdscr, form, viewstate):
State.__init__(self, stdscr, form)
self._form.controls['help_area'].show()
self._form.controls['fullstatus_area'].hide()
self._form.controls['view_tab'].hide()
self._form.controls['status_line'].text = u"press 'q' to quit help."
self._viewstate = viewstate
self._func[ord('q')] = self._quit
def _quit(self):
self._form.controls['status_line'].text = u"TuiTwi ver 0.2"
self._viewstate.resume()
return self._viewstate
class EditState(State):
'''発言を入力する'''
def __init__(self, stdscr, form, viewstate):
State.__init__(self, stdscr, form)
curses.curs_set(True)
self._viewstate = viewstate
self._func[curses.ascii.TAB] = self._view
self._func[curses.ascii.CR] = self._func[curses.ascii.LF] = self._update
self._func['default'] = self._edit
def _update(self):
self._viewstate.queue.put(("PostUpdate", self._form.controls['edit_line'].text, self._viewstate.reply_id))
self._form.controls['edit_line'].clear()
return self._view()
def _view(self):
curses.curs_set(False)
return self._viewstate
def _edit(self, ch):
self._form.controls['edit_line'].edit(ch)
return self
class ViewState(State):
'''閲覧用.'''
def __init__(self, stdscr, form, queue, conf):
State.__init__(self, stdscr, form)
curses.curs_set(False)
self._form.controls['status_line'].text = u'TuiTwi ver 0.2'
self._form.controls['view_tab'].show()
self._form.controls['fullstatus_area'].show()
self._form.controls['help_area'].hide()
self._form.controls['search_line'].hide()
self._queue = queue
self._command = conf.get('options').get('browser_command')
self._search_word = ''
self._conf = conf
self.reply_id = None
self._func[ord('q')] = self._quit
self._func[ord('j')] = self._func[curses.KEY_DOWN] = self._next
self._func[ord('k')] = self._func[curses.KEY_UP] = self._prev
self._func[ord('g')] = self._top
self._func[ord('G')] = self._bottom
self._func[ord('r')] = self._update
self._func[ord('f')] = self._fav
self._func[ord('n')] = self._next_user_post
self._func[ord('p')] = self._prev_user_post
self._func[ord('P')] = self._move_to_reply_to
self._func[ord('N')] = self._move_to_reply_from
self._func[ord('h')] = self._func[curses.KEY_LEFT] = self._prev_tab
self._func[ord('l')] = self._func[curses.KEY_RIGHT] = self._next_tab
self._func[ord('o')] = self._open
self._func[ord('H')] = self._home
self._func[ord('R')] = self._rt
self._func[curses.ascii.DC2] = self._official_rt
self._func[ord('/')] = self._search_input
self._func[ord('d')] = self._delete
self._func[curses.ascii.SO] = self._search_next
self._func[curses.ascii.DLE] = self._search_prev
self._func[curses.ascii.CR] = self._func[curses.ascii.LF] = self._reply
self._func[curses.ascii.ACK] = self._func[ord(' ')] = self._scroll_down
self._func[curses.ascii.STX] = self._func[ord('-')] = self._scroll_up
self._func[ord('q')] = self._quit
self._func[curses.ascii.TAB] = self._edit
self._func[ord('?')] = self._help
def get_search_word(self): return self._search_word
def set_search_word(self, val): self._search_word = val
search_word = property(get_search_word, set_search_word)
@property
def queue(self): return self._queue
def resume(self):
self._form.controls['help_area'].hide()
self._form.controls['view_tab'].show()
self._form.controls['fullstatus_area'].show()
def execute(self, ch):
ret = State.execute(self, ch)
self._form.controls['fullstatus_area'].status = self._form.controls['view_tab'].current_win.current_status()
return ret
def _delete(self):
status = self._form.controls['view_tab'].current_win.current_status()
if status and self._conf['credential']['user'] == status.user.screen_name:
return ConfirmDestroyMessageState(self._stdscr, self._form, self)
else:
return self
def _search_input(self):
return SearchInputState(self._stdscr, self._form, self)
def _search_next(self):
self._form.controls['view_tab'].current_win.search_next_word(self._search_word)
return self
def _search_prev(self):
self._form.controls['view_tab'].current_win.search_prev_word(self._search_word)
return self
def _open(self):
# TODO(seikichi) URLの連結あやしい?
status = self._form.controls['view_tab'].current_win.current_status()
url = "http://twitter.com/%s/status/%d" % (status.user.screen_name, status.id)
os.system(self._command % url)
return self
def _home(self):
status = self._form.controls['view_tab'].current_win.current_status()
url = "http://twitter.com/%s" % status.user.screen_name
os.system(self._command % url)
return self
def _next_tab(self):
self._form.controls['view_tab'].next_tab()
return self
def _prev_tab(self):
self._form.controls['view_tab'].prev_tab()
return self
def _move_to_reply_from(self):
self._form.controls['view_tab'].current_win.move_to_reply_from()
return self
def _move_to_reply_to(self):
self._form.controls['view_tab'].current_win.move_to_reply_to()
return self
def _prev_user_post(self):
self._form.controls['view_tab'].current_win.prev_user_post()
return self
def _next_user_post(self):
self._form.controls['view_tab'].current_win.next_user_post()
return self
def _fav(self):
status = self._form.controls['view_tab'].current_win.current_status()
if not status.favorited:
self.queue.put(("CreateFavorite", status))
else:
self.queue.put(("DestroyFavorite", status))
return self
def _reply(self):
win = self._form.controls['view_tab'].current_win
if win.current_status() is not None:
self.reply_id = win.current_status().id
self._form.controls['edit_line'].insert_string(win.reply_string())
return EditState(self._stdscr, self._form, self)
def _official_rt(self):
status = self._form.controls['view_tab'].current_win.current_status()
if status is not None:
self._queue.put(('OfficialRT', status.id))
return self
def _rt(self):
status = self._form.controls['view_tab'].current_win.current_status()
if status is not None:
self._form.controls['edit_line'].insert_rt(status)
return EditState(self._stdscr, self._form, self)
def _update(self):
self._queue.put(('GetFriendsTimeline',))
return self
def _scroll_down(self):
self._form.controls['view_tab'].current_win.scroll_down()
return self
def _scroll_up(self):
self._form.controls['view_tab'].current_win.scroll_up()
return self
def _top(self):
self._form.controls['view_tab'].current_win.move_to_top()
return self
def _bottom(self):
self._form.controls['view_tab'].current_win.move_to_bottom()
return self
def _next(self):
self._form.controls['view_tab'].current_win.next()
return self
def _edit(self):
return EditState(self._stdscr, self._form, self)
def _prev(self):
self._form.controls['view_tab'].current_win.prev()
return self
def _help(self):
return HelpState(self._stdscr, self._form, self)
def _quit(self):
return ExitState(self._stdscr, self._form, self)
|
mit
| -3,273,382,893,051,582,000
| 32.550152
| 116
| 0.588965
| false
| 3.380704
| false
| false
| false
|
knaggsy2000/stormforce-mq
|
plugins/plugin_core_serverdetails.py
|
1
|
8457
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#########################################################################
# Copyright/License Notice (Modified BSD License) #
#########################################################################
#########################################################################
# Copyright (c) 2008-2012, 2014, 2016 Daniel Knaggs - 2E0DPK/M6DPK #
# All rights reserved. #
# #
# Redistribution and use in source and binary forms, with or without #
# modification, are permitted provided that the following conditions #
# are met: - #
# #
# * Redistributions of source code must retain the above copyright #
# notice, this list of conditions and the following disclaimer. #
# #
# * Redistributions in binary form must reproduce the above copyright #
# notice, this list of conditions and the following disclaimer in #
# the documentation and/or other materials provided with the #
# distribution. #
# #
# * Neither the name of the author nor the names of its contributors #
# may be used to endorse or promote products derived from this #
# software without specific prior written permission. #
# #
# * This Software is not to be used for safety purposes. #
# #
# * You agree and abide the Disclaimer for your Boltek products. #
# #
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS #
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT #
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR #
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT #
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, #
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT #
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, #
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY #
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT #
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE #
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #
#########################################################################
###################################################
# StormForce Server Details Plugin #
###################################################
from plugin_core_base import PluginBase
from smq_shared import MQ
###########
# Classes #
###########
class Plugin(PluginBase):
def __init__(self):
self.SERVER_COPYRIGHT = "(c)2008-2012, 2014, 2016 - Daniel Knaggs"
self.SERVER_NAME = "StormForce MQ"
self.SERVER_VERSION = "0.2.0"
self.STRIKE_COPYRIGHT = "Lightning Data (c) 2016 - Daniel Knaggs"
self.UPDATE_PERIOD = 1.
PluginBase.__init__(self)
self.MQ_ROUTING_KEY = "{0}.core.serverdetails".format(self.MQ_ROUTING_KEY)
self.MQ_RX_ENABLED = False
def getScriptPath(self):
return self.os.path.realpath(__file__)
def readXMLSettings(self):
PluginBase.readXMLSettings(self)
if self.os.path.exists(self.XML_SETTINGS_FILE):
xmldoc = self.minidom.parse(self.XML_SETTINGS_FILE)
myvars = xmldoc.getElementsByTagName("Setting")
for var in myvars:
for key in var.attributes.keys():
val = str(var.attributes[key].value)
if key == "Enabled":
self.ENABLED = self.cBool(val)
elif key == "StrikeCopyright":
self.STRIKE_COPYRIGHT = val
elif key == "UpdatePeriod":
self.UPDATE_PERIOD = float(val)
def run(self):
self.log.debug("Starting...")
time_wait = self.datetime.now() + self.timedelta(seconds = self.UPDATE_PERIOD)
while self.running:
t = self.datetime.now()
if t >= time_wait:
try:
myconn = []
self.db.connectToDatabase(myconn)
rows = self.db.executeSQLQuery("SELECT ServerStarted, DATE_PART('epoch', ServerStarted) AS ServerStartedUT, DATE_PART('epoch', LOCALTIMESTAMP) - DATE_PART('epoch', ServerStarted) AS ServerUptime, ServerApplication, ServerCopyright, ServerVersion, StrikeCopyright FROM tblServerDetails LIMIT 1", conn = myconn)
self.db.disconnectFromDatabase(myconn)
# Send out the server details
self.log.info("Sending out the server details...")
for row in rows:
m = self.constructMessage("ServerDetails", {"ServerStarted": str(row[0]), "ServerStartedUT": row[1], "ServerUptime": row[2], "ServerApplication": row[3], "ServerCopyright": row[4], "ServerVersion": row[5], "StrikeCopyright": row[6]})
self.mq.publishMessage(m[1], headers = m[0])
break
except Exception, ex:
self.log.error("An error occurred while running the current time.")
self.log.error(ex)
finally:
time_wait = self.datetime.now() + self.timedelta(seconds = self.UPDATE_PERIOD)
self.time.sleep(0.1)
def start(self, use_threading = True):
PluginBase.start(self, use_threading)
if self.ENABLED:
self.log.info("Starting server details...")
self.running = True
if use_threading:
t = self.threading.Thread(target = self.run)
t.setDaemon(1)
t.start()
else:
self.run()
def stop(self):
if self.ENABLED:
self.running = False
def updateDatabase(self):
PluginBase.updateDatabase(self)
myconn = []
self.db.connectToDatabase(myconn)
##########
# Tables #
##########
self.log.info("Creating tables...")
# tblServerDetails
self.log.debug("TABLE: tblServerDetails")
self.db.executeSQLCommand("DROP TABLE IF EXISTS tblServerDetails CASCADE", conn = myconn)
self.db.executeSQLCommand("CREATE TABLE tblServerDetails(ID bigserial PRIMARY KEY)", conn = myconn) # MEMORY
self.db.executeSQLCommand("ALTER TABLE tblServerDetails ADD COLUMN ServerStarted timestamp", conn = myconn)
self.db.executeSQLCommand("ALTER TABLE tblServerDetails ADD COLUMN ServerApplication varchar(20)", conn = myconn)
self.db.executeSQLCommand("ALTER TABLE tblServerDetails ADD COLUMN ServerCopyright varchar(100)", conn = myconn)
self.db.executeSQLCommand("ALTER TABLE tblServerDetails ADD COLUMN ServerVersion varchar(8)", conn = myconn)
self.db.executeSQLCommand("ALTER TABLE tblServerDetails ADD COLUMN StrikeCopyright varchar(100)", conn = myconn)
self.db.executeSQLCommand("INSERT INTO tblServerDetails(ServerStarted, ServerApplication, ServerCopyright, ServerVersion, StrikeCopyright) VALUES(LOCALTIMESTAMP, %(ServerApplication)s, %(ServerCopyright)s, %(ServerVersion)s, %(StrikeCopyright)s)", {"ServerApplication": self.SERVER_NAME, "ServerCopyright": self.SERVER_COPYRIGHT, "ServerVersion": self.SERVER_VERSION, "StrikeCopyright": self.STRIKE_COPYRIGHT}, myconn)
self.db.disconnectFromDatabase(myconn)
def writeXMLSettings(self):
PluginBase.writeXMLSettings(self)
if not self.os.path.exists(self.XML_SETTINGS_FILE):
xmldoc = self.minidom.Document()
settings = xmldoc.createElement("PluginServerDetails")
xmldoc.appendChild(settings)
var = xmldoc.createElement("Setting")
var.setAttribute("Enabled", str(self.ENABLED))
settings.appendChild(var)
var = xmldoc.createElement("Setting")
var.setAttribute("StrikeCopyright", str(self.STRIKE_COPYRIGHT))
settings.appendChild(var)
var = xmldoc.createElement("Setting")
var.setAttribute("UpdatePeriod", str(self.UPDATE_PERIOD))
settings.appendChild(var)
xmloutput = file(self.XML_SETTINGS_FILE, "w")
xmloutput.write(xmldoc.toprettyxml())
xmloutput.close()
########
# Main #
########
if __name__ == "__main__":
try:
p = Plugin()
p.start(use_threading = False)
p = None
except Exception, ex:
print "Exception: {0}".format(ex)
|
bsd-3-clause
| 8,567,733,712,468,731,000
| 36.923767
| 420
| 0.601987
| false
| 3.989151
| false
| false
| false
|
kaichogami/wavenet
|
model.py
|
1
|
15261
|
"""Model for wavenet. Defines ops in tensorflow sense"""
import numpy as np
import tensorflow as tf
MIN_DIL = 2
MAX_DIL = 4096
def _dilated_convolution(X, filters, dilation, name):
"""Helper function to carry out dilated convolution
Parameters
==========
X : tf.Tensor of shape(batch, width, height, in_channels)
The input data
filters : tf.Tensor of shape(height, width, in_channels, out_channels)
The filter tensor
dilation : int
the dilation factor
"""
return tf.nn.atrous_conv2d(X, filters, dilation, "SAME", name)
def _create_variable(name, shape):
"""Helped function to create variables using xavier initialization
Parameters
==========
name : string
Then name of the variable
shape : tuple, list
The shape of the variable
"""
return tf.get_variable(name, shape=shape,
initializer=tf.contrib.layers.xavier_initializer())
class Wavenet:
"""Model for Wavenet.
Parameters
==========
audio_frequency : int, secs
The frequency of the audio
receptive_seconds : int, secs
The size of the receptive field in seconds.
filter_width : int,
Size of the filter.
residual_channels : int
No of filters to learn for residual block.
dilation_channels : int
No of filters to learn for dilation block.
skip_channels : int
No of filters to learn for skip block.
quantization_channels : int
No of channels to encode the audio with
"""
def __init__(self, audio_frequency, receptive_seconds,
filter_width,residual_channels,
dilation_channels, skip_channels, quantization_channels):
self.audio_frequency = audio_frequency
self.receptive_seconds = receptive_seconds
self.filter_width = filter_width
self.residual_channels = residual_channels
self.dilation_channels = dilation_channels
self.skip_channels = skip_channels
self.quantization_channels = quantization_channels
self.dilations = _get_dilations(audio_frequency, receptive_seconds)
self.variables = self._get_all_variables()
self.quantization_channels = quantization_channels
def _get_all_variables(self):
"""Helper function to create a dict of all variables
"""
variables = dict()
# first causal convolution
with tf.variable_scope("initial_causal_conv"):
variables['initial_filter'] = _create_variable("filter",
[1,1,
self.quantization_channels,
self.residual_channels])
variables['dilated_stack'] = list()
# Dilated stack dictionary with list of variables
with tf.variable_scope('dilated_stack'):
for i, _ in enumerate(self.dilations):
current = dict()
with tf.variable_scope("dilated_layer_{}".format(i)):
current['filter'] = _create_variable(
"filter", [1, self.filter_width,
self.residual_channels,
self.dilation_channels])
current['gate'] = _create_variable(
"gate", [1, self.filter_width,
self.residual_channels,
self.dilation_channels])
current['skip'] = _create_variable(
"skip", [1, self.filter_width,
self.dilation_channels,
self.skip_channels])
variables['dilated_stack'].append(current)
with tf.variable_scope('post_processing'):
variables['post_1'] = _create_variable(
"post_1", [1, 1, self.skip_channels, self.skip_channels])
variables['post_2'] = _create_variable(
"post_2", [1, 1, self.skip_channels,
self.quantization_channels])
return variables
def _dilated_stack(self, X, dilation, layer_index):
"""create dilation layer or use it again.
Parameters
==========
X : np.ndarray or tf.tensor of shape(batch_size, height, width,
in_channels)
Input to the dilation stack
dilation : int
The dilation rate.
layer_index : int
Index of layer. Used for defining scope.
Output
======
residual, skip: np.ndarray of shape(batch_size, height, width,
in_channels)
Output of the dilated stack
"""
with tf.variable_scope('dilated_layer_{}'.format(layer_index)):
var_dict = self.variables['dilated_stack'][layer_index]
conv_filter = _dilated_convolution(X, var_dict['filter'],
dilation, name="conv_filter")
conv_gate = _dilated_convolution(X, var_dict['gate'],
dilation, name="conv_gate")
# final output
# Question: should the final skip and residual convolution have
# different weight vector or same? here, the same is used.
out = tf.tanh(conv_filter) * tf.sigmoid(conv_gate)
out = tf.nn.conv2d(out, var_dict['skip'], padding="SAME", strides=[1,1,1,1])
# return residual and skip output
return out + X, out
def create_network(self, X):
"""Create the network, by using dilated stack, postprocessing.
Parameters
==========
X : np.ndarray, of shape(batch, height, width, in_channels)
The input data.
Output
======
conv2 : np.ndarray of shape(batch, height, width, in_channels)
The output of the total network, unnormalized
"""
with tf.variable_scope('initial_causal_conv'):
initial_conv_result = tf.nn.conv2d(X, self.variables[
'initial_filter'],
padding="SAME", strides=[1,1,1,1])
residual = initial_conv_result
# create dilated stack results
skip_list = list()
with tf.variable_scope("dilated_stack"):
for i, dilation in enumerate(self.dilations):
residual, skip_result = self._dilated_stack(residual, dilation,
i)
skip_list.append(skip_result)
# post-processing
# addition --> Relu --> convolution --> Relu --> convolution
with tf.variable_scope("post_processing"):
total_output = sum(skip_list)
relu1 = tf.nn.tanh(total_output)
conv1 = tf.nn.conv2d(relu1, self.variables['post_1'],
padding="SAME", strides=[1,1,1,1])
relu2 = tf.nn.tanh(conv1)
conv2 = tf.nn.conv2d(relu2, self.variables['post_2'],
padding="SAME", strides=[1,1,1,1])
return conv2
def loss(self, input_samples):
"""Generate the cross entropy loss and reduce mean between batches
Parameters
==========
input_samples : np.ndarray of shape(batch, height, width, in_channels)
The input samples
"""
with tf.variable_scope("loss"):
# flip the input samples so that convolution depends on previous
# samples
input_samples = tf.reverse(input_samples, [2])
input_samples = _mu_law_encode(input_samples,
self.quantization_channels)
encoded = self._one_hot(input_samples)
network_output = self.create_network(encoded)
network_output = tf.reshape(network_output,
[1, 1, -1,
self.quantization_channels])
# slice receptive field from the end(of flipped audio
# signal) to preserve causility
shape = network_output.shape
receptive_samples = _get_rounded_receptive_samples(self.audio_frequency,
self.receptive_seconds)
output_sliced = tf.slice(network_output, [0, 0, 0, 0],
[-1, -1, int(shape[2]-receptive_samples),
-1])
encoded_sliced = tf.slice(encoded, [0, 0, 0, 0],
[-1, -1, int(shape[2]-receptive_samples),
-1])
sliced_shape = encoded_sliced.shape
# shift the input by left(reversed audio)
encoded_shifted = tf.slice(tf.pad(encoded_sliced, [[0,0], [0,0], [1,0], [0,0]]),
[0,0,0,0], [-1,-1, int(sliced_shape[2]),
-1])
# reshape to find the cross entropy loss
output_sliced = tf.reshape(output_sliced, [-1, self.quantization_channels])
encoded_shifted = tf.reshape(encoded_shifted, [-1, self.quantization_channels])
loss = tf.nn.softmax_cross_entropy_with_logits(
logits = output_sliced,
labels = encoded_shifted)
average_loss = tf.reduce_mean(loss)
return average_loss
def _generate_next_sample(self, waveform):
"""Generate the probabilty distribution of the next sample,
based on current waveform.
Parameters
==========
waveform : np.ndarray of shape(batch, in_height, in_width,
quantization_channels)
reversed input waveform
Output
======
new_waveform : np.ndarray of shape(batch, in_height,
in_width,
quantization_channels)
reversed generated waveform
"""
with tf.variable_scope("Generate"):
encoded = self._one_hot(waveform)
network_output = self.create_network(encoded)
out = tf.reshape(network_output, [-1, self.quantization_channels])
prob = tf.nn.softmax(out)
# return index + 1 to get the quantization channel value
return tf.to_int32(tf.reshape(tf.argmax(prob, axis=1)[0], [1,1,1,1])) + 1
def generate(self, seconds, song):
"""Generate audio based on trained model.
Output
======
generated_audio : np.ndarray of shape(out_width)
"""
with tf.variable_scope("Generate"):
receptive_samples = _get_rounded_receptive_samples(self.audio_frequency,
self.receptive_seconds)
total_samples = _get_receptive_samples(self.audio_frequency,
seconds)
# randomly generate first samples
if len(song) < receptive_samples:
print(len(song), receptive_samples)
raise ValueError("enter longer song or shorter receptive field")
current = song[1000:receptive_samples+3000]
current = np.reshape(current, [1,1,current.shape[0], 1])
total_waveform = tf.to_int32(tf.reverse(np.copy(current), [2]))
current = tf.reverse(current, [2])
current = _mu_law_encode(current, self.quantization_channels)
for i in xrange(receptive_samples, total_samples):
next_sample = self._generate_next_sample(current)
total_waveform = tf.concat([next_sample, total_waveform], 2)
# insert the next sample at the beginning and pop the last element
current = tf.slice(current, [0,0,0,0], [-1,-1,int(current.shape[2]-1),-1])
current = tf.concat([next_sample, current], 2)
print(i)
return _mu_law_decode(tf.reverse(total_waveform, [2]),
self.quantization_channels)
def _one_hot(self, input_samples):
"""Helper function to one_hot input samples.
"""
encoded = tf.one_hot(input_samples, depth=self.quantization_channels,
dtype=tf.float32)
return tf.reshape(encoded, [1, 1, -1, self.quantization_channels])
def _get_receptive_samples(audio_frequency, receptive_field):
"""helper function to get receptive seconds"""
return audio_frequency * receptive_field
def _get_dilations(audio_frequency, receptive_field):
"""Create dilated factors list based on receiptive field
These dilated factors are in the power of 2, till a max limit
after which they start again.
Parameters
==========
audio_frequency : int, in Khz
Frequency of the audio
receptive_field : int,
No of seconds to take into account
"""
receptive_samples = _get_rounded_receptive_samples(audio_frequency,
receptive_field)
limit = np.log2(receptive_samples)
dilated_list = list()
counter = 0
while True:
for j in xrange(int(np.log2(MIN_DIL)), int(np.log2(MAX_DIL)) + 1):
if counter == limit:
return dilated_list
dilated_list.append(2**j)
counter += 1
def _get_rounded_receptive_samples(audio_frequency, receptive_field):
"""Get rounded receptive samples nearest to the power of 2
"""
receptive_samples = _get_receptive_samples(audio_frequency,
receptive_field)
return 2 ** int(np.floor(np.log2(receptive_samples)))
def _mu_law_encode(audio, quantization_channels):
'''Quantizes waveform amplitudes.'''
with tf.name_scope('encode'):
mu = tf.to_float(quantization_channels - 1)
# Perform mu-law companding transformation (ITU-T, 1988).
# Minimum operation is here to deal with rare large amplitudes caused
# by resampling.
safe_audio_abs = tf.minimum(tf.abs(audio), 1.0)
magnitude = tf.log1p(mu * safe_audio_abs) / tf.log1p(mu)
signal = tf.sign(audio) * magnitude
# Quantize signal to the specified number of levels.
return tf.to_int32((signal + 1) / 2 * mu + 0.5)
def _mu_law_decode(output, quantization_channels):
'''Recovers waveform from quantized values.'''
# copied from https://github.com/ibab/tensorflow-wavenet/blob/master/wavenet/ops.py
mu = quantization_channels - 1
# Map values back to [-1, 1].
signal = 2 * (tf.to_float(output) / mu) - 1
# Perform inverse of mu-law transformation.
magnitude = (1. / mu) * ((1 + mu)**abs(signal) - 1)
return tf.sign(signal) * magnitude
|
mit
| -5,600,043,305,285,726,000
| 39.914209
| 92
| 0.539283
| false
| 4.33059
| false
| false
| false
|
ping/instagram_private_api
|
instagram_private_api/endpoints/collections.py
|
1
|
3411
|
import json
from ..compatpatch import ClientCompatPatch
class CollectionsEndpointsMixin(object):
"""For endpoints in related to collections functionality."""
def list_collections(self):
return self._call_api('collections/list/')
def collection_feed(self, collection_id, **kwargs):
"""
Get the items in a collection.
:param collection_id: Collection ID
:return:
"""
endpoint = 'feed/collection/{collection_id!s}/'.format(**{'collection_id': collection_id})
res = self._call_api(endpoint, query=kwargs)
if self.auto_patch and res.get('items'):
[ClientCompatPatch.media(m['media'], drop_incompat_keys=self.drop_incompat_keys)
for m in res.get('items', []) if m.get('media')]
return res
def create_collection(self, name, added_media_ids=None):
"""
Create a new collection.
:param name: Name for the collection
:param added_media_ids: list of media_ids
:return:
.. code-block:: javascript
{
"status": "ok",
"collection_id": "1700000000123",
"cover_media": {
"media_type": 1,
"original_width": 1080,
"original_height": 1080,
"id": 1492726080000000,
"image_versions2": {
"candidates": [
{
"url": "http://scontent-xx4-1.cdninstagram.com/...123.jpg",
"width": 1080,
"height": 1080
},
...
]
}
},
"collection_name": "A Collection"
}
"""
params = {'name': name}
if added_media_ids and isinstance(added_media_ids, str):
added_media_ids = [added_media_ids]
if added_media_ids:
params['added_media_ids'] = json.dumps(added_media_ids, separators=(',', ':'))
params.update(self.authenticated_params)
return self._call_api('collections/create/', params=params)
def edit_collection(self, collection_id, added_media_ids):
"""
Add media IDs to an existing collection.
:param collection_id: Collection ID
:param added_media_ids: list of media IDs
:return: Returns same object as :meth:`create_collection`
"""
if isinstance(added_media_ids, str):
added_media_ids = [added_media_ids]
params = {
'added_media_ids': json.dumps(added_media_ids, separators=(',', ':'))
}
params.update(self.authenticated_params)
endpoint = 'collections/{collection_id!s}/edit/'.format(**{'collection_id': collection_id})
return self._call_api(endpoint, params=params)
def delete_collection(self, collection_id):
"""
Delete a collection.
:param collection_id: Collection ID
:return:
.. code-block:: javascript
{
"status": "ok"
}
"""
params = self.authenticated_params
endpoint = 'collections/{collection_id!s}/delete/'.format(**{'collection_id': collection_id})
return self._call_api(endpoint, params=params)
|
mit
| 5,582,873,594,909,179,000
| 34.905263
| 101
| 0.522427
| false
| 4.406977
| false
| false
| false
|
sudhir-serpentcs/business-requirement
|
business_requirement_deliverable_project/models/project.py
|
1
|
3606
|
# -*- coding: utf-8 -*-
# © 2016 Elico Corp (https://www.elico-corp.com).
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
from openerp import api, fields, models
from openerp.tools.translate import _
from openerp.exceptions import ValidationError
class Project(models.Model):
_inherit = "project.project"
origin = fields.Char('Source Document')
business_requirement_id = fields.Many2one(
'business.requirement',
string='Business Requirement',
help='Link the Project and the business requirement',
)
business_requirement_deliverable_id = fields.Many2one(
comodel_name='business.requirement.deliverable',
string='Business Requirement Deliverable',
help='Link the Project and the business requirement deliverable',
)
@api.multi
def generate_project_wizard(self):
br_ids = self.env.context.get('br_ids', False)
from_project = False
if not br_ids:
br_ids = self.br_ids
from_project = True
default_uom = self.env['project.config.settings'].\
get_default_time_unit('time_unit').get('time_unit', False)
if not default_uom:
raise ValidationError(
_("""Please set working time default unit in project
config settings"""))
lines = self.env['business.requirement.resource']
for br in br_ids:
if br.state not in ['stakeholder_approval', 'cancel', 'done']:
raise ValidationError(
_("All business requirements of the project should "
"be stakeholder_approval/canceled/done"))
for deliverables in br.deliverable_lines:
for line in deliverables.resource_ids:
if line.resource_type != 'task':
continue
generated = self.env['project.task'].search(
[('br_resource_id', '=', line.id)],
limit=1)
if generated:
continue
lines |= line
for resource_line in br.resource_lines.filtered(
lambda resource: resource.resource_type == 'task'):
generated = self.env['project.task'].search(
[('br_resource_id', '=', resource_line.id)],
limit=1)
if generated:
continue
lines |= resource_line
if not lines and not br.linked_project:
raise ValidationError(
_("""There is no available business requirement resource line
to generate task"""))
if from_project:
br_ids.filtered(lambda br_id: not br_id.parent_id)
vals = {
'partner_id': self.partner_id.id,
'project_id': self.id,
'br_ids': [(6, 0, br_ids.ids)]
}
wizard_obj = self.env['br.generate.projects']
wizard = wizard_obj.with_context(
default_uom=default_uom, br_ids=False).create(vals)
action = wizard.wizard_view()
return action
class ProjectTask(models.Model):
_inherit = "project.task"
business_requirement_id = fields.Many2one(
'business.requirement',
string='Business Requirement',
help='Link the task and the business requirement',
)
br_resource_id = fields.Many2one(
comodel_name='business.requirement.resource',
string='Business Requirement Resource',
ondelete='set null'
)
|
agpl-3.0
| -5,362,266,994,235,058,000
| 36.947368
| 77
| 0.56699
| false
| 4.434194
| false
| false
| false
|
crowdworks/redash
|
redash/cli/groups.py
|
1
|
3319
|
from __future__ import print_function
from sys import exit
from sqlalchemy.orm.exc import NoResultFound
from flask.cli import AppGroup
from click import argument, option
from redash import models
manager = AppGroup(help="Groups management commands.")
@manager.command()
@argument('name')
@option('--org', 'organization', default='default',
help="The organization the user belongs to (leave blank for "
"'default').")
@option('--permissions', default=None,
help="Comma separated list of permissions ('create_dashboard',"
" 'create_query', 'edit_dashboard', 'edit_query', "
"'view_query', 'view_source', 'execute_query', 'list_users',"
" 'schedule_query', 'list_dashboards', 'list_alerts',"
" 'list_data_sources') (leave blank for default).")
def create(name, permissions=None, organization='default'):
print("Creating group (%s)..." % (name))
org = models.Organization.get_by_slug(organization)
permissions = extract_permissions_string(permissions)
print("permissions: [%s]" % ",".join(permissions))
try:
models.db.session.add(models.Group(
name=name, org=org,
permissions=permissions))
models.db.session.commit()
except Exception as e:
print("Failed create group: %s" % e.message)
exit(1)
@manager.command()
@argument('group_id')
@option('--permissions', default=None,
help="Comma separated list of permissions ('create_dashboard',"
" 'create_query', 'edit_dashboard', 'edit_query',"
" 'view_query', 'view_source', 'execute_query', 'list_users',"
" 'schedule_query', 'list_dashboards', 'list_alerts',"
" 'list_data_sources') (leave blank for default).")
def change_permissions(group_id, permissions=None):
print("Change permissions of group %s ..." % group_id)
try:
group = models.Group.query.get(group_id)
except NoResultFound:
print("User [%s] not found." % group_id)
exit(1)
permissions = extract_permissions_string(permissions)
print("current permissions [%s] will be modify to [%s]" % (
",".join(group.permissions), ",".join(permissions)))
group.permissions = permissions
try:
models.db.session.add(group)
models.db.session.commit()
except Exception as e:
print("Failed change permission: %s" % e.message)
exit(1)
def extract_permissions_string(permissions):
if permissions is None:
permissions = models.Group.DEFAULT_PERMISSIONS
else:
permissions = permissions.split(',')
permissions = [p.strip() for p in permissions]
return permissions
@manager.command()
@option('--org', 'organization', default=None,
help="The organization to limit to (leave blank for all).")
def list(organization=None):
"""List all groups"""
if organization:
org = models.Organization.get_by_slug(organization)
groups = models.Group.query.filter(models.Group.org == org)
else:
groups = models.Group.query
for i, group in enumerate(groups):
if i > 0:
print("-" * 20)
print("Id: {}\nName: {}\nType: {}\nOrganization: {}\nPermission: {}".format(
group.id, group.name, group.type, group.org.slug, ",".join(group.permissions)))
|
bsd-2-clause
| -6,601,441,871,846,674,000
| 32.525253
| 91
| 0.637843
| false
| 3.941805
| false
| false
| false
|
DarthMaulware/EquationGroupLeaks
|
Leak #5 - Lost In Translation/windows/Resources/StLa/PyScripts/Strangeland.py
|
1
|
7705
|
import dsz.version.checks
import dsz.lp
import dsz.version
import dsz.ui
import dsz.path
import dsz.file
import dsz.control
import dsz.menu
import dsz.env
tool = 'StLa'
version = '1.2.0.1'
resDir = dsz.lp.GetResourcesDirectory()
logdir = dsz.lp.GetLogsDirectory()
STLA_PATH = ('%s%s' % (resDir, tool))
def stlaverify(input):
storageSuccessFlag = True
success = True
if dsz.file.Exists('tm154d.da', ('%s\\..\\temp' % systemPath)):
dsz.ui.Echo('tm154d.da dump file exists ... this should not be here', dsz.ERROR)
if dsz.file.Exists('tm154p.da', ('%s\\..\\temp' % systemPath)):
dsz.ui.Echo('tm154p.da overflow file exists ... log may be full', dsz.ERROR)
if dsz.file.Exists('tm154_.da', ('%s\\..\\temp' % systemPath)):
dsz.ui.Echo('tm154_.da config file exists ... ', dsz.GOOD)
if dsz.file.Exists('tm154o.da', ('%s\\..\\temp' % systemPath)):
dsz.ui.Echo('tm154o.da storage file exists ... SUCCESSFUL', dsz.GOOD)
else:
dsz.ui.Echo('tm154o.da storage file missing ... FAILED', dsz.ERROR)
storageSuccessFlag = False
if (storageSuccessFlag == True):
dsz.ui.Echo('STRANGELAND should be installed on target... only way to confirm is with DOUBLEFEATURE', dsz.GOOD)
else:
dsz.ui.Echo("STRANGELAND doesn't look like it is on target... only way to confirm is with DOUBLEFEATURE", dsz.ERROR)
success = False
return success
def dll_u(dllfile):
dsz.ui.Echo(('Executing %s via dllload -export dll_u' % dllfile))
dsz.control.echo.Off()
runsuccess = dsz.cmd.Run(('dllload -export dll_u -library "%s"' % dllfile))
dsz.control.echo.On()
if (not runsuccess):
dsz.ui.Echo(('Could not execute %s via dll_u' % dllfile), dsz.ERROR)
return False
dsz.ui.Echo(('Successfully executed %s via dll_u' % dllfile), dsz.GOOD)
return True
def collectfiles():
dsz.control.echo.Off()
runsuccess = dsz.cmd.Run('processinfo -minimal', dsz.RUN_FLAG_RECORD)
dsz.control.echo.On()
(currentPath, file) = dsz.path.Split(dsz.cmd.data.Get('processinfo::modules::module::modulename', dsz.TYPE_STRING)[0])
dsz.ui.Echo(('Getting collection file, "%s\\Tprf3~"' % currentPath))
dsz.control.echo.Off()
runsuccess = dsz.cmd.Run(('get "%s\\Tprf3~"' % currentPath), dsz.RUN_FLAG_RECORD)
dsz.control.echo.On()
if (not runsuccess):
dsz.ui.Echo(('Could not get collection file, %s\\Tprf3~. You may need to collect and clean this manually.' % currentPath), dsz.ERROR)
return False
getfilename = dsz.cmd.data.Get('FileLocalName::localname', dsz.TYPE_STRING)[0]
dsz.ui.Echo(('Deleting collection file, %s\\Tprf3~' % currentPath))
dsz.control.echo.Off()
if (not dsz.cmd.Run(('delete "%s\\Tprf3~"' % currentPath))):
dsz.ui.Echo(('Could not delete collection file, "%s\\Tprf3~". You may need to clean this manually.' % currentPath), dsz.ERROR)
dsz.control.echo.On()
dsz.ui.Echo('Moving file to NOSEND directory...')
dsz.control.echo.Off()
dsz.cmd.Run(('local mkdir %s\\GetFiles\\NOSEND' % logdir))
dsz.cmd.Run(('local mkdir %s\\GetFiles\\STRANGELAND_Decrypted' % logdir))
if (not dsz.cmd.Run(('local move %s\\GetFiles\\%s %s\\GetFiles\\NOSEND\\%s' % (logdir, getfilename, logdir, getfilename)))):
dsz.ui.Echo('Failed to move files to NOSEND', dsz.ERROR)
dsz.control.echo.On()
return parsefile(('%s\\GetFiles\\NOSEND\\%s' % (logdir, getfilename)))
def parsefile(file):
(path, filename) = dsz.path.Split(file)
dsz.control.echo.Off()
runsuccess = dsz.cmd.Run(('local run -command "%s\\Tools\\i386-winnt\\SlDecoder.exe %s %s\\GetFiles\\STRANGELAND_Decrypted\\%s.xml"' % (STLA_PATH, file, logdir, filename)), dsz.RUN_FLAG_RECORD)
dsz.control.echo.On()
if (not runsuccess):
dsz.ui.Echo('There was an error parsing the collection', dsz.ERROR)
return runsuccess
def stlaparse(input):
fullpath = dsz.ui.GetString('Please enter the full path to the file you want parse: ', '')
if (fullpath == ''):
dsz.ui.Echo('No string entered', dsz.ERROR)
return False
return parsefile(fullpath)
def stlainstall(input):
if dsz.version.checks.IsOs64Bit():
dll_path = 'Uploads\\x64\\mssli64.dll'
else:
dll_path = 'Uploads\\i386\\mssli.dll'
return dll_u(('%s\\%s' % (STLA_PATH, dll_path)))
def stlacollect(input):
if dsz.version.checks.IsOs64Bit():
dll_path = 'Uploads\\x64\\mssld64.dll'
else:
dll_path = 'Uploads\\i386\\mssld.dll'
if dll_u(('%s\\%s' % (STLA_PATH, dll_path))):
return collectfiles()
return False
def stlauninstall(input):
if dsz.version.checks.IsOs64Bit():
dll_path = 'Uploads\\x64\\msslu64.dll'
else:
dll_path = 'Uploads\\i386\\msslu.dll'
if (not dll_u(('%s\\%s' % (STLA_PATH, dll_path)))):
dsz.ui.Echo('Failed to load the uninstaller. Process aborted.', dsz.ERROR)
return False
if (not collectfiles()):
dsz.ui.Echo('Failed to collect and parse file.', dsz.ERROR)
if dsz.file.Exists('tm154*.da', ('%s\\..\\temp' % systemPath)):
dsz.ui.Echo('tm154*.da files exist, deleting')
dsz.control.echo.Off()
if (not dsz.cmd.Run(('delete -mask tm154*.da -path "%s\\..\\temp" -max 1' % systemPath))):
dsz.ui.Echo('Failed to delete tm154*.da', dsz.ERROR)
dsz.control.echo.On()
return True
def main():
menuOption = 0
if dsz.version.checks.IsOs64Bit():
architecture = 'x64'
else:
architecture = 'x86'
if dsz.path.windows.GetSystemPath():
global systemPath
systemPath = dsz.path.windows.GetSystemPath()
else:
dsz.ui.Echo('Could not find system path', dsz.ERROR)
return 0
menu_list = list()
menu_list.append({dsz.menu.Name: 'Install', dsz.menu.Function: stlainstall})
menu_list.append({dsz.menu.Name: 'Uninstall', dsz.menu.Function: stlauninstall})
menu_list.append({dsz.menu.Name: 'Verify Install', dsz.menu.Function: stlaverify})
menu_list.append({dsz.menu.Name: 'Collect and Parse', dsz.menu.Function: stlacollect})
menu_list.append({dsz.menu.Name: 'Parse Local', dsz.menu.Function: stlaparse})
while (menuOption != (-1)):
(retvalue, menuOption) = dsz.menu.ExecuteSimpleMenu(('\n\n===============================\nSTRANGELAND v%s %s Menu\n===============================\n' % (version, architecture)), menu_list)
if (menuOption == 0):
if (retvalue == True):
dsz.lp.RecordToolUse(tool, version, 'DEPLOYED', 'Successful')
if (retvalue == False):
dsz.lp.RecordToolUse(tool, version, 'DEPLOYED', 'Unsuccessful')
elif (menuOption == 1):
if (retvalue == True):
dsz.lp.RecordToolUse(tool, version, 'DELETED', 'Successful')
if (retvalue == False):
dsz.lp.RecordToolUse(tool, version, 'DELETED', 'Unsuccessful')
elif (menuOption == 2):
if (retvalue == True):
dsz.lp.RecordToolUse(tool, version, 'EXERCISED', 'Successful')
if (retvalue == False):
dsz.lp.RecordToolUse(tool, version, 'EXERCISED', 'Unsuccessful')
elif (menuOption == 3):
if (retvalue == True):
dsz.lp.RecordToolUse(tool, version, 'EXERCISED', 'Successful')
if (retvalue == False):
dsz.lp.RecordToolUse(tool, version, 'EXERCISED', 'Unsuccessful')
dsz.ui.Echo('**********************************')
dsz.ui.Echo('* STRANGELAND script completed. *')
dsz.ui.Echo('**********************************')
return 0
if (__name__ == '__main__'):
main()
|
unlicense
| -3,926,526,841,632,269,000
| 44.064327
| 197
| 0.615834
| false
| 3.137215
| false
| false
| false
|
crickert1234/ParamAP
|
ParamAP.py
|
1
|
51951
|
#!/usr/bin/env python3
'''
ParamAP.py (parametrization of sinoatrial myocyte action potentials)
Copyright (C) 2018 Christian Rickert <christian.rickert@ucdenver.edu>
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
'''
# imports
# runtime
import fnmatch
import functools
import gc
import math
import os
import sys
# numpy
import numpy as np
# scipy
import scipy.signal as sp_sig
import scipy.spatial as sp_spat
import scipy.stats as sp_stat
# matplotlib
import matplotlib.backends.backend_pdf as mpbp
import matplotlib.pyplot as mpp
# variables
APEXT = 0.5 # margin of ap extension (%)
FFRAME = 0.5 # time interval for filtering (ms)
POLYNOM = 2 # polynomial order used for filtering
# functions
def askboolean(dlabel="custom boolean", dval=True):
"""Returns a boolean provided by the user."""
if dval: # True
dstr = "Y/n"
else: # False
dstr = "y/N"
while True:
uchoice = input(dlabel + " [" + dstr + "]: ") or dstr
if uchoice.lower().startswith("y") and not uchoice.endswith("N"):
print("True\n")
return True # break
elif (uchoice.endswith("N") and not uchoice.startswith("Y")) or uchoice.lower().startswith("n"):
print("False\n")
return False # break
else:
continue
def askext(dlabel="custom extension", dext='atf'):
"""Returns a file extention provided by the user."""
while True:
uext = str(input("Enter " + dlabel + " [" + dext + "]: ")).lower() or dext
if uext not in ["dat", "log", "pdf"] and len(uext) == 3:
print(uext + "\n")
return uext # break
else:
print("Invalid file extension!\n")
continue
def askunit(dlabel="custom unit", daxis='', dunit=''):
"""Returns a unit provided by the user."""
while True:
uunit = input("Enter " + dlabel + " [" + dunit + "]: ") or dunit
if daxis in ["x", "X"]:
if uunit in ["ms", "s"]:
print(uunit + "\n")
return uunit # break
else:
print("Invalid unit for X-axis!\n")
continue
elif daxis in ["y", "Y"]:
if uunit in ["mV", "V"]:
print(uunit + "\n")
return uunit # break
else:
print("Invalid unit for Y-axis!\n")
continue
def askvalue(dlabel="custom value", dval=1.0, dunit="", dtype="float"):
"""Returns a value provided by the user."""
while True:
try:
uval = float(input("Enter " + dlabel + " [" + str(dval) + "]" + dunit + ": ") or dval)
break
except ValueError:
print("Non-numerical input!\n")
continue
if dtype == "float": # default
pass
elif dtype == "int":
uval = int(round(uval))
print(str(uval) + "\n")
return uval
def getfiles(path='/home/user/', pattern='*'):
"""Returns all files in path matching the pattern."""
abspath = os.path.abspath(path)
for fileobject in os.listdir(abspath):
filename = os.path.join(abspath, fileobject)
if os.path.isfile(filename) and fnmatch.fnmatchcase(fileobject, pattern):
yield os.path.join(abspath, filename)
def getneighbors(origin_i=np.empty(0), vicinity=np.empty(0), origin_x=np.empty(0), origin_y=np.empty(0), hwidth=float("inf"), fheight=0.0, limit=None, within=float("inf"), bads=False):
"""Returns all nearest-neighbors in ascending (i.e. increasing distance) order."""
neighbors = np.zeros(0)
badorigins = np.zeros(0)
vicinity_kdt = sp_spat.KDTree(list(zip(vicinity, np.zeros(vicinity.size)))) # KDTree for the nearest neighbors search
for origin in origin_i:
neighbor_left, neighbor_right = False, False
for position in vicinity_kdt.query([origin, 0.0], k=limit, distance_upper_bound=within)[1]: # return nearest neighbors in ascending order
if not neighbor_left or not neighbor_right:
neighbor = vicinity[position]
if (abs(origin_x[origin]-origin_x[neighbor]) <= hwidth) and (abs(origin_y[origin]-origin_y[neighbor]) >= fheight): # relative criteria for minima left and right of maximum
if not neighbor_left and (neighbor < origin): # criteria for minimum left of maximum only
neighbors = np.append(neighbors, neighbor)
neighbor_left = True
elif not neighbor_right and (neighbor > origin): # criteria for minimum right of maximum only
neighbors = np.append(neighbors, neighbor)
neighbor_right = True
else: # odd origins with missing neighbors
badorigins = np.append(badorigins, np.argwhere(origin == origin_i))
neighbors = np.sort(np.unique(neighbors)) # unique elements only
if neighbors.size <= 1: # missing neighbor
if neighbor_left:
neighbors = np.append(neighbors, 0.0) # append neighbor_right
if neighbor_right:
neighbors = np.insert(neighbors, 0, 0.0) # insert neighbor_left
badorigins = np.sort(np.unique(badorigins))
return (neighbors.astype(int), badorigins.astype(int)) if bads else (neighbors.astype(int))
def getrunavg(xdata=np.empty(0), xinterval=FFRAME, porder=POLYNOM):
"""Returns the running average count based on a given time interval."""
tmprun = int(round(xinterval/(xdata[1]-xdata[0])))
while tmprun <= porder: # prevents filtering
tmprun += 1
return (tmprun) if tmprun % 2 else (tmprun + 1) # odd number
def getpartitions(pstart=0, pstop=100, pint=5, pmin=10):
"""Returns a partition list in percent to segment an interval."""
plist = []
for part_l in list(range(int(pstart), int(pstop)+int(pint), int(pint))):
for part_r in list(range(int(pstart), int(pstop)+int(pint), int(pint))):
if part_r > part_l and part_r-part_l >= int(pmin): # no duplication or empty partitions, minimum size
plist.append([part_l, part_r]) # return statement removes the outmost list
return plist
def getbestlinearfit(xaxis=np.empty(0), yaxis=np.empty(0), xmin=0.0, xmax=1.0, pstart=0, pstop=100, pint=1, pmin=10):
"""Returns the best linear fit from segments of an interval."""
bst_r = 0 # regression coefficient
seg_i = np.argwhere((xaxis >= xmin) & (xaxis <= xmax)).ravel() # analyzing partial segment only
seg_t = xaxis[seg_i[-1]]-xaxis[seg_i[0]] # full interval from partial segment
seg_m, seg_n, seg_r = 0.0, 0.0, 0.0
for partition in getpartitions(pstart, pstop, pint, pmin):
seg_i = np.argwhere((xaxis >= (avgfmin_x[0]+(seg_t*partition[0]/100))) & (xaxis <= (avgfmin_x[0]+(seg_t*partition[1]/100)))).ravel() # 'ravel()' required for 'sp_stat.linregress()'
seg_x = xaxis[seg_i]
seg_y = yaxis[seg_i]
seg_m, seg_n, seg_r = sp_stat.linregress(seg_x, seg_y)[0:3] # tuple unpacking and linear regression of partial ap segment
if math.pow(seg_r, 2.0) >= math.pow(bst_r, 2.0):
bst_m, bst_n, bst_r = seg_m, seg_n, seg_r
bst_i, bst_x, bst_y = seg_i, seg_x, seg_y
# print(partition[0], " - ", partition[1], " : ", str(partition[1]-partition[0]), " ~ ", str(math.pow(bst_r, 2.0))) # shows progress, but is slow!
return (bst_i, bst_x, bst_y, bst_m, bst_n, bst_r)
def mpp_setup(title='Plot title', xlabel='Time [ ]', ylabel='Voltage [ ]'):
"""Provides a title and axes labels to a Matplotlib plot."""
mpp.title(title)
mpp.xlabel(xlabel)
mpp.ylabel(ylabel)
def readfile(inputfile='name'):
"""Extracts the xy pairs from an ASCII raw file and stores its values into a numpy array."""
defux = ["ms", "s"]
defuy = ["mV", "V"]
inpunits = False
with open(inputfile, 'r') as datafile:
line = 1
inpuxy = [] # header might be missing
while line <= 25: # arbitrary Clampfit header limit for ATF
headerline = datafile.readline()
if headerline.startswith("\""):
inpuxy = str(headerline).split() # last line of header contains units
skipline = line
if not inpuxy:
skipline = 0
line += 1
try:
inpux = inpuxy[1][1:-2]
inpuy = inpuxy[4][1:-2]
except IndexError: # missing header
inpux, inpuy = str(defux)[1:-1], str(defuy)[1:-1]
else: # header found
if inpux in defux and inpuy in defuy:
inpunits = True
datafile.seek(0) # reset the file index to the first byte
inp_xy = np.loadtxt(datafile, dtype='float64', delimiter='\t', skiprows=skipline, unpack=True) # slower than np.genfromtxt or native python, but uses less main memory at peak
return inp_xy, inpunits, inpux, inpuy
# main routine
AUTHOR = "Copyright (C) 2018 Christian Rickert"
SEPARBOLD = 79*'='
SEPARNORM = 79*'-'
SOFTWARE = "ParamAP"
VERSION = "version 1.1," # (2018-03-10)
WORKDIR = SOFTWARE # working directory for parameterization
print('{0:^79}'.format(SEPARBOLD) + os.linesep)
GREETER = '{0:<{w0}}{1:<{w1}}{2:<{w2}}'.format(SOFTWARE, VERSION, AUTHOR, w0=len(SOFTWARE)+1, w1=len(VERSION)+1, w2=len(AUTHOR)+1)
INTERMEDIATELINE1 = '{0:}'.format("Laboratory of Cathy Proenza")
INTERMEDIATELINE2 = '{0:}'.format("Department of Physiology & Biophysics")
INTERMEDIATELINE3 = '{0:}'.format("University of Colorado, Anschutz Medical Campus")
DISCLAIMER = "ParamAP is distributed in the hope that it will be useful, but it comes without\nany guarantee or warranty. This program is free software; you can redistribute\nit and/or modify it under the terms of the GNU General Public License:"
URL = "https://www.gnu.org/licenses/gpl-2.0.en.html"
print('{0:^79}'.format(GREETER) + os.linesep)
print('{0:^79}'.format(INTERMEDIATELINE1))
print('{0:^79}'.format(INTERMEDIATELINE2))
print('{0:^79}'.format(INTERMEDIATELINE3) + os.linesep)
print('{0:^79}'.format(DISCLAIMER) + os.linesep)
print('{0:^79}'.format(URL) + os.linesep)
print('{0:^79}'.format(SEPARBOLD) + os.linesep)
# customize use case
AUTORUN = askboolean("Use automatic mode?", False)
SERIES = askboolean("Run time series analysis?", False)
APMODE = askboolean("Analyze action potentials?", True)
print('{0:^79}'.format(SEPARNORM))
# set up working directory
WORKPATH = os.path.abspath(WORKDIR)
if not os.path.exists(WORKPATH):
os.mkdir(WORKPATH)
print("FOLDER:\t" + WORKPATH + "\n")
FILE = 0 # file
EXTENSION = askext(dlabel="custom file type", dext='atf') # file extension used to filter files in working directory
if SERIES:
AVG_FRAME = askvalue(dlabel="analysis frame time", dval=5000.0, dunit=' ms') # time interval for series analysis (ms)
ATFFILES = getfiles(path=WORKDIR, pattern=("*." + EXTENSION))
for ATFFILE in ATFFILES: # iterate through files
name = os.path.splitext(os.path.split(ATFFILE)[1])[0]
print('{0:^79}'.format(SEPARNORM))
print("FILE:\t" + str(name) + os.linesep)
ap_amp = 50.0 # minimum acceptable ap amplitude (mV)
ap_hwd = 250.0 # maximum acceptable ap half width (ms)
ap_max = 50.0 # maximum acceptable ap value (mV)
ap_min = -10.0 # minimum acceptable ap value (mV)
mdp_max = -50.0 # maximum acceptable mdp value (mV)
mdp_min = -90.0 # minimum acceptable mdp value (mV)
wm_der = 1.0 # window multiplier for derivative filtering
wm_max = 4.0 # window multiplier for maximum detection
wm_min = 16.0 # window multiplier for minimum detection
# read file raw data
sys.stdout.write(">> READING... ")
sys.stdout.flush()
RAW_XY, UNITS, UNIT_X, UNIT_Y = readfile(ATFFILE)
if not UNITS: # missing or incomplete units from header
print("\n")
UNIT_X = askunit(dlabel="X-axis unit", daxis="X", dunit=UNIT_X)
UNIT_Y = askunit(dlabel="Y-axis unit", daxis="Y", dunit=UNIT_Y)
sys.stdout.write(1*"\t")
toms = 1000.0 if UNIT_X == "s" else 1.0
RAW_XY[0] *= toms # full X-axis, UNIT_X = "ms"
raw_x = RAW_XY[0] # partial X-axis for time series analysis
tomv = 1000.0 if UNIT_Y == "V" else 1.0
RAW_XY[1] *= tomv # full Y-axis, UNIT_Y = "mV"
raw_y = RAW_XY[1] # partial Y-axis for time series analysis
runavg = getrunavg(RAW_XY[0]) # used for filtering and peak detection
ipg_t = RAW_XY[0][1]-RAW_XY[0][0] # time increment for interpolation grid
if not APMODE: # avoid noise artifacts in beat detection mode
runavg = 10.0*runavg+1
wm_max *= 1.5
wm_min = wm_max
avg_start = RAW_XY[0][0] # interval start for averaging
avg_stop = RAW_XY[0][-1] # interval stop for averaging
sys.stdout.write(8*"\t" + " [OK]\n")
sys.stdout.flush()
while True: # repeat data analysis for current file
startpdf = True # overwrite existing file
segment = 0.0
while True: # time series analysis
try:
# create raw data plot
sys.stdout.write(">> PLOTTING... ")
sys.stdout.flush()
mpp_setup(title="Raw data: " + name, xlabel='Time (ms)', ylabel='Voltage (mV)')
mpp.plot(raw_x, raw_y, '0.75') # raw data (grey line)
if startpdf:
pdf_file = mpbp.PdfPages(os.path.join(WORKDIR, name + ".pdf"), keep_empty=False) # multi-pdf file
startpdf = False # append existing file
mpp.tight_layout() # avoid label overlaps
if segment == 0.0:
mpp.savefig(pdf_file, format='pdf', dpi=600) # save before .show()!
sys.stdout.write(8*"\t" + " [OK]\n")
sys.stdout.flush()
if not AUTORUN:
mpp.show()
# set parameters for averaging
sys.stdout.write(">> SETTING... ")
sys.stdout.flush()
if not AUTORUN:
print("\n")
if segment == 0.0: # initialize values
avg_start = askvalue(dlabel="analysis start time", dval=avg_start, dunit=' ms')
avg_stop = askvalue(dlabel="analysis stop time", dval=avg_stop, dunit=' ms')
ap_max = askvalue(dlabel="upper limit for maxima", dval=ap_max, dunit=' mV')
ap_min = askvalue(dlabel="lower limit for maxima", dval=ap_min, dunit=' mV')
mdp_max = askvalue(dlabel="upper limit for minima", dval=mdp_max, dunit=' mV')
mdp_min = askvalue(dlabel="lower limit for minima", dval=mdp_min, dunit=' mV')
if APMODE:
ap_hwd = askvalue(dlabel="maximum peak half width", dval=ap_hwd, dunit=' ms')
ap_amp = askvalue(dlabel="minimum peak amplitude", dval=ap_amp, dunit=' mV')
runavg = askvalue(dlabel="running average window size", dval=runavg, dunit='', dtype='int')
wm_der = askvalue(dlabel="window multiplier for derivative", dval=wm_der, dunit='')
wm_max = askvalue(dlabel="window multiplier for maxima", dval=wm_max, dunit='')
wm_min = askvalue(dlabel="window multiplier for minima", dval=wm_min, dunit='')
mpp.clf() # clear canvas
if segment == 0.0: # set first frame
tmp_start = avg_start + (segment*AVG_FRAME if SERIES else 0.0)
tmp_stop = (tmp_start + AVG_FRAME) if SERIES else avg_stop
raw_i = np.argwhere((RAW_XY[0] >= tmp_start) & (RAW_XY[0] <= tmp_stop)).ravel()
raw_x = RAW_XY[0][raw_i[0]:raw_i[-1]+1]
raw_y = RAW_XY[1][raw_i[0]:raw_i[-1]+1]
sys.stdout.write(("" if AUTORUN else 1*"\t") + 8*"\t" + " [OK]\n")
sys.stdout.flush()
# filter noise of raw data with Savitzky-Golay
sys.stdout.write(">> FILTERING... ")
sys.stdout.flush()
rawf_y = sp_sig.savgol_filter(raw_y, runavg, POLYNOM, mode='nearest')
sys.stdout.write(7*"\t" + " [OK]\n")
sys.stdout.flush()
# detect extrema in filtered raw data
sys.stdout.write(">> SEARCHING... ")
sys.stdout.flush()
if AUTORUN: # use unrestricted dataset (slower)
# detect maxima in filtered raw data
tmpavg = int(round(wm_max*runavg)) if int(round(wm_max*runavg)) % 2 else int(round(wm_max*runavg))+1
rawfmax_iii = np.asarray(sp_sig.argrelmax(rawf_y, order=tmpavg)).ravel() # unfiltered maxima
rawfmax_x = raw_x[rawfmax_iii]
rawfmax_y = rawf_y[rawfmax_iii]
# detect minima in filtered raw data
tmpavg = int(round(wm_min*runavg)) if int(round(wm_min*runavg)) % 2 else int(round(wm_min*runavg))+1
rawfmin_iii = np.asarray(sp_sig.argrelmin(rawf_y, order=tmpavg)).ravel() # unfiltered minima
rawfmin_x = raw_x[rawfmin_iii]
rawfmin_y = rawf_y[rawfmin_iii]
sys.stdout.write(7*"\t" + " [OK]\n")
sys.stdout.flush()
else: # use restricted dataset (faster)
# detect maxima in filtered raw data
tmpmax_x = raw_x[np.intersect1d(np.argwhere(rawf_y >= ap_min), np.argwhere(rawf_y <= ap_max))]
tmpmax_y = rawf_y[np.intersect1d(np.argwhere(rawf_y >= ap_min), np.argwhere(rawf_y <= ap_max))]
tmpavg = int(round(wm_max*runavg)) if int(round(wm_max*runavg)) % 2 else int(round(wm_max*runavg))+1
rawfmax_iii = np.asarray(sp_sig.argrelmax(tmpmax_y, order=tmpavg)).ravel() # unfiltered maxima
rawfmax_ii = np.asarray(np.where(np.in1d(raw_x.ravel(), np.intersect1d(raw_x, tmpmax_x[rawfmax_iii]).ravel()).reshape(raw_x.shape))).ravel() # back to full dataset
rawfmax_x = raw_x[rawfmax_ii]
rawfmax_y = rawf_y[rawfmax_ii]
# detect minima in filtered raw data
tmpmin_x = raw_x[np.intersect1d(np.argwhere(rawf_y >= mdp_min), np.argwhere(rawf_y <= mdp_max))]
tmpmin_y = rawf_y[np.intersect1d(np.argwhere(rawf_y >= mdp_min), np.argwhere(rawf_y <= mdp_max))]
tmpavg = int(round(wm_min*runavg)) if int(round(wm_min*runavg)) % 2 else int(round(wm_min*runavg))+1
rawfmin_iii = np.asarray(sp_sig.argrelmin(tmpmin_y, order=tmpavg)).ravel() # unfiltered minima
rawfmin_ii = np.asarray(np.where(np.in1d(raw_x.ravel(), np.intersect1d(raw_x, tmpmin_x[rawfmin_iii]).ravel()).reshape(raw_x.shape))).ravel()
rawfmin_x = raw_x[rawfmin_ii]
rawfmin_y = rawf_y[rawfmin_ii]
sys.stdout.write(7*"\t" + " [OK]\n")
sys.stdout.flush()
# analyze and reduce extrema in filtered raw data
sys.stdout.write(">> REDUCING... ")
sys.stdout.flush()
rawfmax_m = np.mean(rawfmax_y) # rough estimate due to assignment errors
rawfmin_m = np.mean(rawfmin_y)
rawfmaxmin_m = (rawfmax_m + rawfmin_m) / 2.0 # center between unreduced maxima and minima within limits (may differ from average of AVGMAX and AVGMIN)
if AUTORUN: # estimate range for reduction of extrema
# reduce maxima from unrestricted dataset
rawfmax_ii = np.argwhere(rawfmax_y >= rawfmaxmin_m).ravel() # use center to discriminate between maxima and minima
rawfmax_x = rawfmax_x[rawfmax_ii]
rawfmax_y = rawfmax_y[rawfmax_ii]
rawfmax_std = np.std(rawfmax_y, ddof=1) # standard deviation from the (estimated) arithmetic mean
ap_max = np.mean(rawfmax_y) + 4.0 * rawfmax_std # 99% confidence interval
ap_min = np.mean(rawfmax_y) - 4.0 * rawfmax_std
rawfmax_ii = functools.reduce(np.intersect1d, (rawfmax_iii, np.argwhere(rawf_y >= ap_min), np.argwhere(rawf_y <= ap_max)))
rawfmax_x = raw_x[rawfmax_ii]
rawfmax_y = rawf_y[rawfmax_ii]
# reduce minima from unrestricted dataset
rawfmin_ii = np.argwhere(rawfmin_y <= rawfmaxmin_m)
rawfmin_x = rawfmin_x[rawfmin_ii].ravel()
rawfmin_y = rawfmin_y[rawfmin_ii].ravel()
rawfmin_std = np.std(rawfmin_y, ddof=1)
mdp_max = np.mean(rawfmin_y) + 4.0 * rawfmin_std
mdp_min = np.mean(rawfmin_y) - 4.0 *rawfmin_std
rawfmin_ii = functools.reduce(np.intersect1d, (rawfmin_iii, np.argwhere(rawf_y >= mdp_min), np.argwhere(rawf_y <= mdp_max)))
rawfmin_x = raw_x[rawfmin_ii]
rawfmin_y = rawf_y[rawfmin_ii]
if APMODE: # check extrema for consistency - reduce maxima
badmax_ii = np.zeros(0)
badmin_ii = np.zeros(0)
rawfmin_i, badmax_ii = getneighbors(rawfmax_ii, rawfmin_ii, raw_x, rawf_y, ap_hwd, ap_amp, bads=True)
rawfmax_i = np.delete(rawfmax_ii, badmax_ii)
rawfmin_i = rawfmin_i.astype(int) # casting required for indexing
# check extrema for boundary violations - reduce maxima and minima
while True: # rough check, assignment happens later
if rawfmax_i[0] < rawfmin_i[0]: # starts with a maximum
rawfmax_i = rawfmax_i[1:]
continue
elif rawfmin_i[1] < rawfmax_i[0]: # starts with two minima
rawfmin_i = rawfmin_i[1:]
continue
elif rawfmax_i[-1] > rawfmin_i[-1]: # ends with a maximum
rawfmax_i = rawfmax_i[0:-1]
continue
elif rawfmin_i[-2] > rawfmax_i[-1]: # ends with two minima
rawfmin_i = rawfmin_i[0:-1]
continue
else:
break
rawfmax_x = raw_x[rawfmax_i] # filtered and extracted maxima
rawfmax_y = rawf_y[rawfmax_i]
# assign minima to corresponding maxima - reduce minima
minmaxmin = np.asarray([3*[0] for i in range(rawfmax_i.size)]) # [[min_left_index, max_index, min_right_index], ...]
rawfmin_kdt = sp_spat.KDTree(list(zip(rawfmin_i, np.zeros(rawfmin_i.size))))
i = 0 # index
for max_i in rawfmax_i:
min_left, min_right = False, False
minmaxmin[i][1] = max_i
for order_i in rawfmin_kdt.query([max_i, 0.0], k=None)[1]:
min_i = rawfmin_i[order_i]
if not min_left and (min_i < max_i):
minmaxmin[i][0] = min_i
min_left = True
elif not min_right and (min_i > max_i):
minmaxmin[i][2] = min_i
min_right = True
i += 1
rawfmin_i = np.unique(minmaxmin[:, [0, 2]].ravel())
rawfmin_x = raw_x[rawfmin_i] # filtered and extracted minima
rawfmin_y = rawf_y[rawfmin_i]
# find largest distance between left minima and maxima
ipg_hwl, ipg_tmp = 0.0, 0.0
for min_l, max_c in minmaxmin[:, [0, 1]]:
ipg_tmp = raw_x[max_c] - raw_x[min_l]
if ipg_tmp > ipg_hwl:
ipg_hwl = ipg_tmp
# find largest distance between right minima and maxima
ipg_hwr, ipg_tmp = 0.0, 0.0
for max_c, min_r in minmaxmin[:, [1, 2]]:
ipg_tmp = raw_x[min_r] - raw_x[max_c]
if ipg_tmp > ipg_hwr:
ipg_hwr = ipg_tmp
else: # beating rate
rawfmax_x = raw_x[rawfmax_ii] # pre-filtered maxima
rawfmax_y = rawf_y[rawfmax_ii]
rawfmin_x = raw_x[rawfmin_ii] # pre-filtered minima
rawfmin_y = rawf_y[rawfmin_ii]
rawfmax_m = np.mean(rawfmax_y) # refined estimate due to exlusion (ap_mode)
rawfmin_m = np.mean(rawfmin_y)
if rawfmax_y.size == 0: # no APs detected
raise Warning
else: # two or more APs
frate = 60000.0*(rawfmax_y.size/(rawfmax_x[-1]-rawfmax_x[0])) if rawfmax_y.size > 1 else float('nan') # AP firing rate (FR) [1/min]
sys.stdout.write(8*"\t" + " [OK]\n")
sys.stdout.flush()
# create extrema plot
sys.stdout.write(">> PLOTTING... ")
sys.stdout.flush()
mpp_setup(title="Extrema: " + name, xlabel='Time (ms)', ylabel='Voltage (mV)')
mpp.plot([raw_x[0], raw_x[-1]], [0.0, 0.0], '0.85') # X-Axis (grey line)
mpp.plot([raw_x[0], raw_x[-1]], [rawfmaxmin_m, rawfmaxmin_m], 'k--') # center between unfiltered maxima and unfiltered minima, i.e. not between AVGMAX and AVGMIN (black dashed line)
mpp.plot(raw_x, raw_y, '0.50', raw_x, rawf_y, 'r') # raw data and averaged data (grey, red line)
mpp.plot([raw_x[0], raw_x[-1]], [ap_max, ap_max], 'b') # upper limit for maxima (blue dotted line)
mpp.plot([raw_x[0], raw_x[-1]], [ap_min, ap_min], 'b:') # lower limit for maxima (blue dotted line)
mpp.plot([rawfmax_x, rawfmax_x], [ap_min, ap_max], 'b') # accepted maxima (blue line)
mpp.plot([raw_x[0], raw_x[-1]], [mdp_min, mdp_min], 'g') # lower limit for minima (green line)
mpp.plot([raw_x[0], raw_x[-1]], [mdp_max, mdp_max], 'g:') # upper limit for minima (green dotted line)
mpp.plot([rawfmin_x, rawfmin_x], [mdp_min, mdp_max], 'g') # accepted minima (green line)
mpp.plot([rawfmax_x[0], rawfmax_x[-1]], [rawfmax_m, rawfmax_m], 'k') # average of maxima, time interval used for firing rate count (black line)
mpp.plot([rawfmin_x[0], rawfmin_x[-1]], [rawfmin_m, rawfmin_m], 'k') # average of minima (black line)
mpp.plot(raw_x[rawfmax_ii], rawf_y[rawfmax_ii], 'bo') # maxima (blue dots)
mpp.plot(raw_x[rawfmin_ii], rawf_y[rawfmin_ii], 'go') # minima (green dots)
mpp.figtext(0.12, 0.90, "{0:<s} {1:<.4G}".format("AVGMAX (mV):", rawfmax_m), ha='left', va='center')
mpp.figtext(0.12, 0.87, "{0:<s} {1:<.4G}".format("FR (AP/min):", frate), ha='left', va='center')
mpp.figtext(0.12, 0.84, "{0:<s} {1:<.4G}".format("AVGMIN (mV):", rawfmin_m), ha='left', va='center')
mpp.tight_layout()
sys.stdout.write(8*"\t" + " [OK]\n")
sys.stdout.flush()
sys.stdout.write(">> SAVING... ")
sys.stdout.flush()
mpp.savefig(pdf_file, format='pdf', dpi=600)
sys.stdout.write(8*"\t" + " [OK]\n")
sys.stdout.flush()
if not AUTORUN:
mpp.show()
mpp.clf()
if APMODE:
# slice raw data segments by minima and align by maxima
sys.stdout.write(">> AVERAGING... ")
sys.stdout.flush()
# align ap segments by maxima, extend and average ap segments
ipg_max = float((1.0+APEXT)*ipg_hwr)
ipg_min = -float((1.0+APEXT)*ipg_hwl)
avg_x = np.arange(ipg_min, ipg_max, ipg_t, dtype='float64') # interpolation grid
avgxsize = avg_x.size
avg_y = np.zeros(avgxsize, dtype='float64') # ap average array
mpp.subplot2grid((4, 1), (0, 0), rowspan=3) # upper subplot
timestamp = "[" + str(round(tmp_start, 2)) + "ms-" + str(round(tmp_stop, 2)) + "ms]"
mpp_setup(title='Analysis: ' + name + ' ' + timestamp, xlabel='Time (ms)', ylabel='Voltage (mV)')
mpp.plot([avg_x[0], avg_x[-1]], [0.0, 0.0], '0.85') # X-axis
n = 0 # current maximum
for min_l, max_c, min_r in minmaxmin: # slicing of ap segments, extend ap parts if possible
minext_l = int(min_l - APEXT*(max_c - min_l)) # use int for index slicing
minext_r = int(min_r + APEXT*(min_r - max_c))
# prepare ap segment
tmp_x = np.asarray(raw_x[:] - raw_x[max_c]) # align by maximum
tmp_y = np.interp(avg_x, tmp_x, raw_y[:])
# average ap segments
if n == 0: # first average
avg_y = np.copy(tmp_y)
else: # all other averages
i = 0 # array index
nw = (1.0/(n+1.0)) # new data weight
pw = (n/(n+1.0)) # previous data weight
for y in np.nditer(avg_y, op_flags=['readwrite']):
y[...] = pw*y + nw*tmp_y[i] # integrate raw data into averaged data
i += 1
n += 1
mpp.plot(avg_x, tmp_y, '0.75') # plot aligned raw data segments
sys.stdout.write("\t\t\t\t\t\t\t [OK]\n")
sys.stdout.flush()
# analyze AP parameters with given criteria
sys.stdout.write(">> ANALYZING... ")
sys.stdout.flush()
# filter noise of averaged data with Savitzky-Golay
avgf_y = sp_sig.savgol_filter(avg_y, runavg, POLYNOM, mode='nearest')
# detect "Peak potential: Maximum potential of AP" (PP) (mV)
avgfmax_i = np.argwhere(avg_x == 0.0) # data point for maximum centered
if not avgfmax_i: # data point for maximum left or right of center
tmpavg = int(round(wm_max*runavg)) if int(round(wm_max*runavg)) % 2 else int(round(wm_max*runavg))+1
avgfmax_ii = np.asarray(sp_sig.argrelmax(avgf_y, order=tmpavg)).ravel() # find all maxima
avgfmax_i = avgfmax_ii[np.argmin(np.abs(avg_x[avgfmax_ii] - 0.0))] # return the maximum closes to X = 0.0
avgfmax_x = avg_x[avgfmax_i]
avgfmax_y = avgf_y[avgfmax_i]
pp_y = float(avgfmax_y)
pp = pp_y
# detect and reduce (several) minima in filtered average data,
tmpavg = int(round(wm_min*runavg)) if int(round(wm_min*runavg)) % 2 else int(round(wm_min*runavg))+1
avgfmin_ii = np.asarray(sp_sig.argrelmin(avgf_y, order=tmpavg)).ravel() # find all minima
avgfmin_i = getneighbors(np.asarray([avgfmax_i]), avgfmin_ii, avg_x, avgf_y, ap_hwd, ap_amp)
avgfmin_x = avg_x[avgfmin_i]
avgfmin_y = avgf_y[avgfmin_i]
# determine "Maximum diastolic potential 1: Minimum potential preceding PP" (MDP1) (mV)
mdp1_i = avgfmin_i[0]
mdp1_x = avg_x[mdp1_i]
mdp1_y = avgf_y[mdp1_i]
mdp1 = mdp1_y
# determine "Maximum diastolic potential 2: Minimum potential following PP" (MDP2) (mV)
mdp2_i = avgfmin_i[-1]
mdp2_x = avg_x[mdp2_i]
mdp2_y = avgf_y[mdp2_i]
mdp2 = mdp2_y
# determine "Cycle length: Time interval MDP1-MDP2" (CL) (ms)
cl = float(mdp2_x - mdp1_x)
# determine "Action potential amplitude: Potential difference of PP minus MDP2" (APA) (mV)
apa = pp - mdp2
# determine "AP duration 50: Time interval at 50% of maximum repolarization" (APD50) (ms)
apd50_l = (pp - 0.50*apa) # threshold value
apd50_i = functools.reduce(np.intersect1d, (np.argwhere(avgf_y > apd50_l), np.argwhere(avg_x >= mdp1_x), np.argwhere(avg_x <= mdp2_x)))
apd50_x = (avg_x[apd50_i[0]-1], avg_x[apd50_i[-1]+1]) # equal or smaller than apd50_l
apd50_y = (avgf_y[apd50_i[0]-1], avgf_y[apd50_i[-1]+1])
apd50 = float(apd50_x[-1] - apd50_x[0])
# determine "AP duration 90: Time interval at 90% of maximum repolarization" (APD90) (ms)
apd90_l = pp - 0.90*apa
apd90_i = functools.reduce(np.intersect1d, (np.argwhere(avgf_y > apd90_l), np.argwhere(avg_x >= mdp1_x), np.argwhere(avg_x <= mdp2_x)))
apd90_x = (avg_x[apd90_i[0]-1], avg_x[apd90_i[-1]+1]) # equal or smaller than apd90_l
apd90_y = (avgf_y[apd90_i[0]-1], avgf_y[apd90_i[-1]+1])
apd90 = float(apd90_x[-1] - apd90_x[0])
# calculate derivative of averaged data (mV/ms)
avgfg_y = np.ediff1d(avgf_y) # dY/1, differences between values
avgfg_y = np.insert(avgfg_y, 0, avgfg_y[0]) # preserve array size
avgfg_y = avgfg_y / ipg_t # dY/dX, differences per increment
# filter derivative of averaged data
tmpavg = int(round(wm_der*runavg)) if int(round(wm_der*runavg)) % 2 else int(round(wm_der*runavg))+1
avgfgf_y = sp_sig.savgol_filter(avgfg_y, tmpavg, POLYNOM, mode='nearest')
# determine "Maximum upstroke velocity: Maximum of derivative between MDP1 and PP" (MUV) (mV/ms)
tmpavg = int(round(wm_max*runavg)) if int(round(wm_max*runavg)) % 2 else int(round(wm_max*runavg))+1
avgfgfmax_ii = functools.reduce(np.intersect1d, (sp_sig.argrelmax(avgfgf_y, order=tmpavg), np.argwhere(avg_x >= mdp1_x), np.argwhere(avg_x <= avgfmax_x)))
avgfgfmax_i = getneighbors(np.asarray([avgfmax_i]), avgfgfmax_ii, avg_x, avgfgf_y)[0] # avoid errors from large ap part extensions
avgfgfmax_x = avg_x[avgfgfmax_i]
avgfgfmax_y = avgfgf_y[avgfgfmax_i]
muv = float(avgfgfmax_y)
# determine "Maximum repolarization rate: Minimum of derivative between PP and MDP2" (MRR) (mV/ms)
tmpavg = int(round(wm_min*runavg)) if int(round(wm_min*runavg)) % 2 else int(round(wm_min*runavg))+1
avgfgfmin_ii = functools.reduce(np.intersect1d, (sp_sig.argrelmin(avgfgf_y, order=tmpavg), np.argwhere(avg_x >= avgfmax_x), np.argwhere(avg_x <= mdp2_x)))
avgfgfmin_i = getneighbors(np.asarray([apd90_i[-1]+1]), avgfgfmin_ii, avg_x, avgfgf_y)[0] # mrr or trr
avgfgfmin_i = np.append(avgfgfmin_i, getneighbors(np.asarray([avgfgfmax_i]), avgfgfmin_ii, avg_x, avgfgf_y)[1]) # trr only
if avgfgfmin_i[0] == avgfgfmin_i[1]: # no trr
trr = 0.0
else:
# determine "Transient repolarization rate: Second minimum of derivative between PP and MDP2 after PP, if distinct from MRR" (TRR) (mV/ms)
trr = float(avgfgf_y[avgfgfmin_i][1])
avgfgfmin_x = avg_x[avgfgfmin_i]
avgfgfmin_y = avgfgf_y[avgfgfmin_i]
mrr = float(avgfgf_y[avgfgfmin_i][0])
# approximate diastolic duration in filtered derivative
da_i, da_x, da_y, da_m, da_n, da_r = getbestlinearfit(avg_x, avgfgf_y, mdp1_x, apd90_x[0], 10, 90, 1, 40) # get a baseline for the derivative before exceeding the threshold
# determine "Threshold potential: Potential separating DD and APD." (THR) (mV)
thr_i = functools.reduce(np.intersect1d, (np.argwhere(avgfgf_y >= ((da_m*avg_x + da_n) + 0.5)), np.argwhere(avg_x >= avg_x[da_i[-1]]), np.argwhere(avg_x <= apd50_x[0])))[0].astype(int) # determine baseline-corrected threshold level
thr_x = avg_x[thr_i]
thr_y = avgf_y[thr_i]
thr = float(thr_y)
# determine "Early diastolic duration: Time from MDP1 to end of linear fit for DDR" (EDD) (ms)
edd_i, edd_x, edd_y, edd_m, edd_n, edd_r = getbestlinearfit(avg_x, avgf_y, mdp1_x, thr_x, 10, 50, 1, 20) # fit EDD within the threshold level determined earlier
edd = float(edd_x[-1]-mdp1_x)
# determine "Diastolic depolarization rate: Potential change rate at end of EDD" (DDR) (mV/ms)
ddr = float(edd_m) # or: np.mean(avgfgf_y[edd_i])
# determine "Diastolic duration: EDD plus LDD" (DD) (ms)
dd = float(thr_x - mdp1_x)
# determine "Late diastolic duration: Time from end of linear fit for DDR to THR" (LDD) (ms)
ldd = float(thr_x - edd_x[-1])
# determine "Action potential duration: Time between THR and MDP2" (APD) (ms)
apd = float(mdp2_x - thr_x)
sys.stdout.write("\t\t\t\t\t\t\t [OK]\n")
sys.stdout.flush()
# create analysis plot
sys.stdout.write(">> PLOTTING... ") # the X-axis and the individual segments are already plotted during averaging
sys.stdout.flush()
mpp.plot([mdp1_x, thr_x], [mdp1_y, mdp1_y], 'k-.') # DD (black dashed/dotted line)
mpp.plot([thr_x, mdp2_x], [mdp2_y, mdp2_y], 'k') # APD (black line)
mpp.plot([apd50_x[0], apd50_x[1]], [apd50_y[1], apd50_y[1]], 'k') # APD50 (black line)
mpp.plot([apd90_x[0], apd90_x[1]], [apd90_y[1], apd90_y[1]], 'k') # APD90 (black line)
mpp.plot([mdp1_x, mdp1_x], [mdp1_y, 0.0], 'k:') # MDP1 indicator (black dotted line)
mpp.plot([mdp2_x, mdp2_x], [mdp2_y, 0.0], 'k:') # MDP2 indicator (black dotted line)
mpp.plot([avgfgfmax_x, avgfgfmax_x], [mdp2_y, avgf_y[avgfgfmax_i]], 'k:') # MUV indicator (black dotted line)
mpp.plot([avgfgfmin_x[0], avgfgfmin_x[0]], [mdp2_y, avgf_y[avgfgfmin_i[0]]], 'k:') # MRR indicator (black dotted line)
if trr:
mpp.plot([avgfgfmin_x[1], avgfgfmin_x[1]], [mdp2_y, avgf_y[avgfgfmin_i[1]]], 'k:') # TRR indicator (black dotted line)
mpp.plot([edd_x[-1], edd_x[-1]], [mdp2_y, 0.0], 'k:') # EDD/LDD separator (black dashed line)
mpp.plot([thr_x, thr_x], [thr_y, 0.0], 'k:') # DD/APD upper separator (black dotted line)
mpp.plot([thr_x, thr_x], [mdp2_y, thr_y], 'k:') # DD/APD lower separator (black dotted line)
mpp.plot(avg_x, avg_y, 'k', avg_x, avgf_y, 'r') # averaged data and filtered averaged data (black, red lines)
mpp.plot(avg_x[edd_i], avgf_y[edd_i], 'g') # best linear fit segment for DDR (green line)
mpp.plot(avg_x, (edd_m*avg_x + edd_n), 'k--') # DDR (black dashed line)
mpp.plot([edd_x[-1]], [edd_y[-1]], 'ko') # EDD-LDD separator (black dot)
mpp.plot([apd50_x[1]], [apd50_y[1]], 'ko') # APD50 (black dots)
mpp.plot(apd90_x[1], apd90_y[1], 'ko') # APD90 (black dots)
mpp.plot(thr_x, avgf_y[thr_i], 'ro') # THR (red dot)
mpp.plot(avgfgfmax_x, avgf_y[avgfgfmax_i], 'wo') # MUV (white dot)
mpp.plot(avgfgfmin_x[0], avgf_y[avgfgfmin_i[0]], 'wo') # MRR (white dot)
if trr:
mpp.plot(avgfgfmin_x[1], avgf_y[avgfgfmin_i[1]], 'wo') # TRR (dot)
mpp.plot(avgfmax_x, pp_y, 'bo') # PP (blue dot)
mpp.plot(avgfmin_x, avgfmin_y, 'go') # MDP1, MDP2 (green dots)
mpp.figtext(0.12, 0.90, "{0:<s} {1:<.4G}".format("APs (#):", rawfmax_y.size), ha='left', va='center')
mpp.figtext(0.12, 0.87, "{0:<s} {1:<.4G}".format("FR (AP/min):", frate), ha='left', va='center')
mpp.figtext(0.12, 0.84, "{0:<s} {1:<.4G}".format("CL (ms):", cl), ha='left', va='center')
mpp.figtext(0.12, 0.81, "{0:<s} {1:<.4G}".format("DD (ms):", dd), ha='left', va='center')
mpp.figtext(0.12, 0.78, "{0:<s} {1:<.4G}".format("EDD (ms):", edd), ha='left', va='center')
mpp.figtext(0.12, 0.75, "{0:<s} {1:<.4G}".format("LDD (ms):", ldd), ha='left', va='center')
mpp.figtext(0.12, 0.72, "{0:<s} {1:<.4G}".format("APD (ms):", apd), ha='left', va='center')
mpp.figtext(0.12, 0.69, "{0:<s} {1:<.4G}".format("APD50 (ms):", apd50), ha='left', va='center')
mpp.figtext(0.12, 0.66, "{0:<s} {1:<.4G}".format("APD90 (ms):", apd90), ha='left', va='center')
mpp.figtext(0.12, 0.63, "{0:<s} {1:<.4G}".format("MDP1 (mV):", mdp1), ha='left', va='center')
mpp.figtext(0.12, 0.60, "{0:<s} {1:<.4G}".format("MDP2 (mV):", mdp2), ha='left', va='center')
mpp.figtext(0.12, 0.57, "{0:<s} {1:<.4G}".format("THR (mV):", thr), ha='left', va='center')
mpp.figtext(0.12, 0.54, "{0:<s} {1:<.4G}".format("PP (mV):", pp), ha='left', va='center')
mpp.figtext(0.12, 0.51, "{0:<s} {1:<.4G}".format("APA (mV):", apa), ha='left', va='center')
mpp.figtext(0.12, 0.48, "{0:<s} {1:<.4G}".format("DDR (mV/ms):", ddr), ha='left', va='center')
mpp.figtext(0.12, 0.45, "{0:<s} {1:<.4G}".format("MUV (mV/ms):", muv), ha='left', va='center')
mpp.figtext(0.12, 0.42, "{0:<s} {1:<.4G}".format("TRR (mV/ms):", trr), ha='left', va='center')
mpp.figtext(0.12, 0.39, "{0:<s} {1:<.4G}".format("MRR (mV/ms):", mrr), ha='left', va='center')
mpp.subplot2grid((4, 1), (3, 0)) # lower subplot
mpp_setup(title="", xlabel='Time (ms)', ylabel='(mV/ms)')
mpp.plot([avg_x[0], avg_x[-1]], [0.0, 0.0], '0.85') # x axis
mpp.plot([avgfgfmin_x[0], avgfgfmin_x[0]], [avgfgfmin_y[0], avgfgfmax_y], 'k:') # MRR indicator (black dotted line)
if trr:
mpp.plot([avgfgfmin_x[1], avgfgfmin_x[1]], [avgfgfmin_y[1], avgfgfmax_y], 'k:') # TRR indicator (black dotted line)
mpp.plot([thr_x, thr_x], [avgfgf_y[thr_i], avgfgfmax_y], 'k:') # THR indicator (black dotted line)
mpp.plot(avg_x, avgfg_y, 'c', avg_x, avgfgf_y, 'm') # derivative and filtered derivative
mpp.plot(avg_x[da_i], avgfgf_y[da_i], 'g') # best linear fit segment for THR (green line)
mpp.plot(avg_x, (da_m*avg_x + da_n), 'k--') # best linear fit for THR (black dashed line)
mpp.plot(thr_x, avgfgf_y[thr_i], 'ro') # THR (red dot)
mpp.plot(avgfgfmax_x, avgfgfmax_y, 'bo') # derivative maximum (blue dot)
mpp.plot(avgfgfmin_x, avgfgfmin_y, 'go') # derivative minima (green dots)
sys.stdout.write(8*"\t" + " [OK]\n")
sys.stdout.flush()
# data summary
sys.stdout.write(">> SAVING... ")
sys.stdout.flush()
avg_file = os.path.join(WORKDIR, name + "_" + timestamp + "_avg.dat")
uheader = "" +\
"Analysis start time: " + 4*"\t" + str(tmp_start) + " ms\n" + \
"Analysis stop time:" + 4*"\t" + str(tmp_stop) + " ms\n" + \
"Upper limit for maxima:" + 3*"\t" + str(ap_max) + " mV\n" + \
"Lower limit for maxima:" + 3*"\t" + str(ap_min) + " mV\n" + \
"Upper limit for minima:" + 3*"\t" + str(mdp_max) + " mV\n" + \
"Lower limit for minima:" + 3*"\t" + str(mdp_min) + " mV\n" + \
"Maximum peak half width:" + 3*"\t" + str(ap_hwd) + " ms\n" + \
"Minimum peak amplitude:" + 3*"\t" + str(ap_amp) + " mV\n" + \
"Running average window size:" + 2*"\t" + str(runavg) + "\n" + \
"Window multiplier for derivative:" + "\t" + str(wm_der) + "\n" + \
"Window multiplier for maxima:" + 2*"\t" + str(wm_max) + "\n" + \
"Window multiplier for minima:" + 2*"\t" + str(wm_min) + "\n" + \
"Time (ms)" + "\t" + "Averaged signal (mV)" + "\t" + "Filtered average (mV)"
np.savetxt(avg_file, np.column_stack((avg_x, avg_y, avgf_y)), fmt='%e', delimiter='\t', header=uheader)
mpp.tight_layout()
mpp.savefig(pdf_file, format='pdf', dpi=600)
sum_file = os.path.join(WORKDIR, "ParamAP.log")
newfile = not bool(os.path.exists(sum_file))
with open(sum_file, 'a') as targetfile: # append file
if newfile: # write header
targetfile.write(
"{0:s}\t{1:s}\t{2:s}\t{3:s}\t{4:s}\t{5:s}\t{6:s}\t{7:s}\t{8:s}\t{9:s}\t{10:s}\t{11:s}\t{12:s}\t{13:s}\t{14:s}\t{15:s}\t{16:s}\t{17:s}\t{18:s}\t{19:s}\t{20:s}".format(
"File ( )", "Start (ms)", "Stop (ms)", "APs (#)", "FR (AP/min)", "CL (ms)", "DD (ms)", "EDD (ms)", "LDD (ms)", "APD (ms)", "APD50 (ms)", "APD90 (ms)", "MDP1 (mV)", "MDP2 (mV)", "THR (mV)", "PP (mV)", "APA (mV)", "DDR (mV/ms)", "MUV (mV/ms)", "TRR (mV/ms)", "MRR (mV/ms)") + "\n")
targetfile.write(
"{0:s}\t{1:4G}\t{2:4G}\t{3:4G}\t{4:4G}\t{5:4G}\t{6:4G}\t{7:4G}\t{8:4G}\t{9:4G}\t{10:4G}\t{11:4G}\t{12:4G}\t{13:4G}\t{14:4G}\t{15:4G}\t{16:4G}\t{17:4G}\t{18:4G}\t{19:4G}\t{20:4G}".format(
name, tmp_start, tmp_stop, rawfmax_y.size, frate, cl, dd, edd, ldd, apd, apd50, apd90, mdp1, mdp2, thr, pp, apa, ddr, muv, trr, mrr) + "\n")
targetfile.flush()
sys.stdout.write(8*"\t" + " [OK]\n")
sys.stdout.flush()
if not AUTORUN:
mpp.show()
except IndexError as ierr: # check running average and window multiplier
sys.stdout.write("\n" + 9*"\t" + " [ER]")
print("\r ## Run failed. Detection of extrema or threshold failed.")
except PermissionError as perr: # file already opened or storage read-only
sys.stdout.write("\n" + 9*"\t" + " [ER]")
print("\r ## Run failed. File access denied by system.")
except Warning as werr: # increase averaging window time
sys.stdout.write("\n" + 9*"\t" + " [ER]")
print("\r ## Run failed. Identification of action potentials failed.")
except Exception as uerr: # unknown
sys.stdout.write("\n" + 9*"\t" + " [UN]")
print("\r ## Run failed. Error was: {0}".format(uerr) + ".")
except KeyboardInterrupt as kerr: # user canceled this file
sys.stdout.write("\n" + 9*"\t" + " [KO]")
print("\r ## Run skipped. Canceled by user.")
if SERIES: # check for next frame
if tmp_stop + AVG_FRAME <= avg_stop:
segment += 1.0
tmp_start = avg_start + segment*AVG_FRAME # prepare next frame for preview
tmp_stop = tmp_start + AVG_FRAME
raw_i = np.argwhere((RAW_XY[0] >= tmp_start) & (RAW_XY[0] <= tmp_stop)).ravel()
raw_x = RAW_XY[0][raw_i[0]:raw_i[-1]+1]
raw_y = RAW_XY[1][raw_i[0]:raw_i[-1]+1]
print()
print("RUN:\t" + str(int(segment + 1)) + "/" + str(math.floor((avg_stop-avg_start)/AVG_FRAME)))
print()
else: # not enough data left in file
break
else: # no time series analysis
break
if not AUTORUN: # check for next file
print()
nextfile = askboolean("Continue with next file?", True)
if nextfile:
break
else: # re-run current file
raw_x = RAW_XY[0] # recover original rawdata
raw_y = RAW_XY[1]
continue
else: # autorun
break
# housekeeping after each file
FILE += 1
sys.stdout.write(">> CLEANING... ")
sys.stdout.flush()
pdf_file.close() # close multi-pdf file and remove if empty
mpp.clf() # clear canvas
gc.collect() # start garbage collection to prevent memory fragmentation
sys.stdout.write(8*"\t" + " [OK]\n")
sys.stdout.flush()
# print summary
print('{0:^79}'.format(SEPARBOLD))
SUMMARY = "End of run: " + str(FILE) + str(" files" if FILE != 1 else " file") + " processed."
print('{0:^79}'.format(SUMMARY))
print('{0:^79}'.format(SEPARBOLD) + os.linesep)
WAIT = input("Press ENTER to end this program.")
|
gpl-2.0
| 1,966,777,943,420,654,000
| 57.713793
| 315
| 0.51666
| false
| 3.196985
| false
| false
| false
|
baohaojun/dico
|
dicoweb/settings-sample.py
|
1
|
2553
|
# Django settings for Dicoweb project.
#
# This file is part of GNU Dico.
# Copyright (C) 2008-2010, 2012 Wojciech Polak
#
# GNU Dico is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Dico is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Dico. If not, see <http://www.gnu.org/licenses/>.
import os
SITE_ROOT = os.path.dirname (os.path.realpath (__file__))
DEBUG = True
TEMPLATE_DEBUG = True
ADMINS = (
('Your Name', 'Your e-mail address'),
)
MANAGERS = ADMINS
DATABASE_ENGINE = ''
DATABASE_NAME = ''
DATABASE_USER = ''
DATABASE_PASSWORD = ''
DATABASE_HOST = ''
DATABASE_PORT = ''
SITE_ID = 1
USE_I18N = True
TIME_ZONE = 'Europe/Warsaw'
LANGUAGE_CODE = 'en-us'
LANGUAGE_COOKIE_NAME = 'dicoweb_lang'
SESSION_COOKIE_NAME = 'dicoweb_sid'
SESSION_ENGINE = 'django.contrib.sessions.backends.file'
SESSION_EXPIRE_AT_BROWSER_CLOSE = True
# Caching, see http://docs.djangoproject.com/en/dev/topics/cache/#topics-cache
CACHE_BACKEND = 'memcached://127.0.0.1:11211/'
# Absolute path to the directory that holds media/static files.
MEDIA_ROOT = os.path.join (SITE_ROOT, 'static')
# URL that handles the media served from MEDIA_ROOT.
MEDIA_URL = 'static'
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'SET THIS TO A RANDOM STRING'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.load_template_source',
'django.template.loaders.app_directories.load_template_source',
)
MIDDLEWARE_CLASSES = (
'django.middleware.cache.UpdateCacheMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.locale.LocaleMiddleware',
'django.middleware.gzip.GZipMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.cache.FetchFromCacheMiddleware',
)
ROOT_URLCONF = 'dicoweb.urls'
TEMPLATE_DIRS = (
os.path.join (SITE_ROOT, 'templates'),
)
INSTALLED_APPS = (
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'dicoweb',
)
DICT_SERVERS = ('gnu.org.ua',)
DICT_TIMEOUT = 10
|
gpl-3.0
| 7,779,301,981,850,678,000
| 27.685393
| 78
| 0.725421
| false
| 3.350394
| false
| false
| false
|
danielktaylor/PyLimitBook
|
pylimitbook/book.py
|
1
|
4650
|
#!/usr/bin/python
from collections import deque
from pylimitbook.tick import Bid, Ask, Trade
from pylimitbook.tree import Tree
from builtins import input
from six.moves import cStringIO as StringIO
def parse_csv(columns, line):
"""
Parse a CSV line that has ',' as a separator.
Columns is a list of the column names, must match the number of
comma-separated values in the input line.
"""
data = {}
split = line.split(',')
for idx, name in enumerate(columns):
data[name] = split[idx]
return data
class Book(object):
def __init__(self):
self.trades = deque(maxlen=100) # Index [0] is most recent trade
self.bids = Tree()
self.asks = Tree()
self.last_tick = None
self.last_timestamp = 0
def process_bid_ask(self, tick):
"""
Generic method to process bid or ask.
"""
tree = self.asks
if tick.is_bid:
tree = self.bids
if tick.qty == 0:
# Quantity is zero -> remove the entry
tree.remove_order_by_id(tick.id_num)
else:
if tree.order_exists(tick.id_num):
tree.update_order(tick)
else:
# New order
tree.insert_tick(tick)
def bid(self, csv):
columns = ['event', 'symbol', 'exchange', 'id_num', 'qty', 'price', 'timestamp']
data = parse_csv(columns, csv)
bid = Bid(data)
if bid.timestamp > self.last_timestamp:
self.last_timestamp = bid.timestamp
self.last_tick = bid
self.process_bid_ask(bid)
return bid
def bid_split(self, symbol, id_num, qty, price, timestamp):
data = {
'timestamp': timestamp,
'qty': qty,
'price': price,
'id_num': id_num
}
bid = Bid(data)
if bid.timestamp > self.last_timestamp:
self.last_timestamp = bid.timestamp
self.last_tick = bid
self.process_bid_ask(bid)
return bid
def ask(self, csv):
columns = ['event', 'symbol', 'exchange', 'id_num', 'qty', 'price', 'timestamp']
data = parse_csv(columns, csv)
ask = Ask(data)
if ask.timestamp > self.last_timestamp:
self.last_timestamp = ask.timestamp
self.last_tick = ask
self.process_bid_ask(ask)
return ask
def ask_split(self, symbol, id_num, qty, price, timestamp):
data = {
'timestamp': timestamp,
'qty': qty,
'price': price,
'id_num': id_num
}
ask = Ask(data)
if ask.timestamp > self.last_timestamp:
self.last_timestamp = ask.timestamp
self.last_tick = ask
self.process_bid_ask(ask)
return ask
def trade(self, csv):
columns = ['event', 'symbol', 'exchange', 'id_num', 'qty', 'price', 'timestamp']
data = parse_csv(columns, csv)
data['id_num'] = 0
trade = Trade(data)
if trade.timestamp > self.last_timestamp:
self.last_timestamp = trade.timestamp
self.last_tick = trade
self.trades.appendleft(trade)
return trade
def trade_split(self, symbol, qty, price, timestamp):
data = {
'timestamp': timestamp,
'qty': qty,
'price': price,
'id_num': 0
}
trade = Trade(data)
if trade.timestamp > self.last_timestamp:
self.last_timestamp = trade.timestamp
self.last_tick = trade
self.trades.appendleft(trade)
return trade
def __str__(self):
# Efficient string concat
file_str = StringIO()
file_str.write("------ Bids -------\n")
if self.bids != None and len(self.bids) > 0:
for k, v in self.bids.price_tree.items(reverse=True):
file_str.write('%s' % v)
file_str.write("\n------ Asks -------\n")
if self.asks != None and len(self.asks) > 0:
for k, v in self.asks.price_tree.items():
file_str.write('%s' % v)
file_str.write("\n------ Trades ------\n")
if self.trades != None and len(self.trades) > 0:
num = 0
for entry in self.trades:
if num < 5:
file_str.write(str(entry.qty) + " @ " \
+ str(entry.price / 10000) \
+ " (" + str(entry.timestamp) + ")\n")
num += 1
else:
break
file_str.write("\n")
return file_str.getvalue()
|
mit
| -3,987,454,480,899,093,000
| 31.517483
| 88
| 0.517849
| false
| 3.79902
| false
| false
| false
|
evernote/pootle
|
pootle/apps/pootle_store/views.py
|
1
|
33949
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2010-2013 Zuza Software Foundation
# Copyright 2013-2014 Evernote Corporation
#
# This file is part of Pootle.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>.
import logging
from itertools import groupby
from django.contrib.auth import get_user_model
from django.core.exceptions import ObjectDoesNotExist
from django.core.urlresolvers import reverse
from django.db.models import Max, Q
from django.http import HttpResponse, Http404
from django.shortcuts import redirect, render
from django.template import loader, RequestContext
from django.utils.translation import to_locale, ugettext as _
from django.utils.translation.trans_real import parse_accept_lang_header
from django.utils import timezone
from django.views.decorators.cache import never_cache
from django.views.decorators.http import require_http_methods
from translate.filters.decorators import Category
from translate.lang import data
from pootle.core.decorators import (get_path_obj, get_resource,
permission_required)
from pootle.core.exceptions import Http400
from pootle.core.mixins.treeitem import CachedMethods
from pootle_app.models.permissions import check_user_permission
from pootle_misc.checks import check_names
from pootle_misc.forms import make_search_form
from pootle_misc.util import ajax_required, jsonify, to_int
from pootle_statistics.models import (Submission, SubmissionFields,
SubmissionTypes)
from .decorators import get_unit_context
from .fields import to_python
from .forms import (unit_comment_form_factory, unit_form_factory,
highlight_whitespace)
from .models import Unit, SuggestionStates
from .signals import translation_submitted
from .templatetags.store_tags import (highlight_diffs, pluralize_source,
pluralize_target)
from .util import (UNTRANSLATED, FUZZY, TRANSLATED, STATES_MAP,
find_altsrcs)
#: Mapping of allowed sorting criteria.
#: Keys are supported query strings, values are the field + order that
#: will be used against the DB.
ALLOWED_SORTS = {
'units': {
'oldest': 'mtime',
'newest': '-mtime',
},
'suggestions': {
'oldest': 'submission__suggestion__creation_time',
'newest': '-submission__suggestion__creation_time',
},
'submissions': {
'oldest': 'submission__creation_time',
'newest': '-submission__creation_time',
},
}
#: List of fields from `ALLOWED_SORTS` that can be sorted by simply using
#: `order_by(field)`
SIMPLY_SORTED = ['units']
def get_alt_src_langs(request, user, translation_project):
language = translation_project.language
project = translation_project.project
source_language = project.source_language
langs = user.alt_src_langs.exclude(
id__in=(language.id, source_language.id)
).filter(translationproject__project=project)
if not user.alt_src_langs.count():
from pootle_language.models import Language
accept = request.META.get('HTTP_ACCEPT_LANGUAGE', '')
for accept_lang, unused in parse_accept_lang_header(accept):
if accept_lang == '*':
continue
simplified = data.simplify_to_common(accept_lang)
normalized = to_locale(data.normalize_code(simplified))
code = to_locale(accept_lang)
if (normalized in
('en', 'en_US', source_language.code, language.code) or
code in ('en', 'en_US', source_language.code, language.code)):
continue
langs = Language.objects.filter(
code__in=(normalized, code),
translationproject__project=project,
)
if langs.count():
break
return langs
def get_search_query(form, units_queryset):
words = form.cleaned_data['search'].split()
result = units_queryset.none()
if 'source' in form.cleaned_data['sfields']:
subresult = units_queryset
for word in words:
subresult = subresult.filter(source_f__icontains=word)
result = result | subresult
if 'target' in form.cleaned_data['sfields']:
subresult = units_queryset
for word in words:
subresult = subresult.filter(target_f__icontains=word)
result = result | subresult
if 'notes' in form.cleaned_data['sfields']:
translator_subresult = units_queryset
developer_subresult = units_queryset
for word in words:
translator_subresult = translator_subresult.filter(
translator_comment__icontains=word,
)
developer_subresult = developer_subresult.filter(
developer_comment__icontains=word,
)
result = result | translator_subresult | developer_subresult
if 'locations' in form.cleaned_data['sfields']:
subresult = units_queryset
for word in words:
subresult = subresult.filter(locations__icontains=word)
result = result | subresult
return result
def get_search_exact_query(form, units_queryset):
phrase = form.cleaned_data['search']
result = units_queryset.none()
if 'source' in form.cleaned_data['sfields']:
subresult = units_queryset.filter(source_f__contains=phrase)
result = result | subresult
if 'target' in form.cleaned_data['sfields']:
subresult = units_queryset.filter(target_f__contains=phrase)
result = result | subresult
if 'notes' in form.cleaned_data['sfields']:
translator_subresult = units_queryset
developer_subresult = units_queryset
translator_subresult = translator_subresult.filter(
translator_comment__contains=phrase,
)
developer_subresult = developer_subresult.filter(
developer_comment__contains=phrase,
)
result = result | translator_subresult | developer_subresult
if 'locations' in form.cleaned_data['sfields']:
subresult = units_queryset.filter(locations__contains=phrase)
result = result | subresult
return result
def get_search_step_query(form, units_queryset):
"""Narrows down units query to units matching search string."""
if 'exact' in form.cleaned_data['soptions']:
logging.debug(u"Using exact database search")
return get_search_exact_query(form, units_queryset)
return get_search_query(form, units_queryset)
def get_step_query(request, units_queryset):
"""Narrows down unit query to units matching conditions in GET."""
if 'filter' in request.GET:
unit_filter = request.GET['filter']
username = request.GET.get('user', None)
sort_by_param = request.GET.get('sort', None)
sort_on = 'units'
user = request.profile
if username is not None:
User = get_user_model()
try:
user = User.objects.get(username=username)
except User.DoesNotExist:
pass
if unit_filter:
match_queryset = units_queryset.none()
if unit_filter == 'all':
match_queryset = units_queryset
elif unit_filter == 'translated':
match_queryset = units_queryset.filter(state=TRANSLATED)
elif unit_filter == 'untranslated':
match_queryset = units_queryset.filter(state=UNTRANSLATED)
elif unit_filter == 'fuzzy':
match_queryset = units_queryset.filter(state=FUZZY)
elif unit_filter == 'incomplete':
match_queryset = units_queryset.filter(
Q(state=UNTRANSLATED) | Q(state=FUZZY),
)
elif unit_filter == 'suggestions':
match_queryset = units_queryset.filter(
suggestion__state=SuggestionStates.PENDING
).distinct()
elif unit_filter in ('my-suggestions', 'user-suggestions'):
match_queryset = units_queryset.filter(
suggestion__state=SuggestionStates.PENDING,
suggestion__user=user,
).distinct()
sort_on = 'suggestions'
elif unit_filter == 'user-suggestions-accepted':
match_queryset = units_queryset.filter(
suggestion__state=SuggestionStates.ACCEPTED,
suggestion__user=user,
).distinct()
elif unit_filter == 'user-suggestions-rejected':
match_queryset = units_queryset.filter(
suggestion__state=SuggestionStates.REJECTED,
suggestion__user=user,
).distinct()
elif unit_filter in ('my-submissions', 'user-submissions'):
match_queryset = units_queryset.filter(
submission__submitter=user,
submission__type__in=SubmissionTypes.EDIT_TYPES,
).distinct()
sort_on = 'submissions'
elif (unit_filter in ('my-submissions-overwritten',
'user-submissions-overwritten')):
match_queryset = units_queryset.filter(
submission__submitter=user,
).exclude(submitted_by=user).distinct()
elif unit_filter == 'checks' and 'checks' in request.GET:
checks = request.GET['checks'].split(',')
if checks:
match_queryset = units_queryset.filter(
qualitycheck__false_positive=False,
qualitycheck__name__in=checks,
).distinct()
sort_by = ALLOWED_SORTS[sort_on].get(sort_by_param, None)
if sort_by is not None:
if sort_on in SIMPLY_SORTED:
match_queryset = match_queryset.order_by(sort_by)
else:
# It's necessary to use `Max()` here because we can't
# use `distinct()` and `order_by()` at the same time
# (unless PostreSQL is used and `distinct(field_name)`)
sort_by_max = '%s__max' % sort_by
# Omit leading `-` sign
max_field = sort_by[1:] if sort_by[0] == '-' else sort_by
match_queryset = match_queryset.annotate(Max(max_field)) \
.order_by(sort_by_max)
units_queryset = match_queryset
if 'search' in request.GET and 'sfields' in request.GET:
# Accept `sfields` to be a comma-separated string of fields (#46)
GET = request.GET.copy()
sfields = GET['sfields']
if isinstance(sfields, unicode) and u',' in sfields:
GET.setlist('sfields', sfields.split(u','))
# use the search form for validation only
search_form = make_search_form(GET)
if search_form.is_valid():
units_queryset = get_search_step_query(search_form, units_queryset)
return units_queryset
#
# Views used with XMLHttpRequest requests.
#
def _filter_ctx_units(units_qs, unit, how_many, gap=0):
"""Returns ``how_many``*2 units that are before and after ``index``."""
result = {'before': [], 'after': []}
if how_many and unit.index - gap > 0:
before = units_qs.filter(store=unit.store_id, index__lt=unit.index) \
.order_by('-index')[gap:how_many+gap]
result['before'] = _build_units_list(before, reverse=True)
result['before'].reverse()
#FIXME: can we avoid this query if length is known?
if how_many:
after = units_qs.filter(store=unit.store_id,
index__gt=unit.index)[gap:how_many+gap]
result['after'] = _build_units_list(after)
return result
def _prepare_unit(unit):
"""Constructs a dictionary with relevant `unit` data."""
return {
'id': unit.id,
'url': unit.get_translate_url(),
'isfuzzy': unit.isfuzzy(),
'source': [source[1] for source in pluralize_source(unit)],
'target': [target[1] for target in pluralize_target(unit)],
}
def _path_units_with_meta(path, units):
"""Constructs a dictionary which contains a list of `units`
corresponding to `path` as well as its metadata.
"""
meta = None
units_list = []
for unit in iter(units):
if meta is None:
# XXX: Watch out for the query count
store = unit.store
tp = store.translation_project
project = tp.project
meta = {
'source_lang': project.source_language.code,
'source_dir': project.source_language.direction,
'target_lang': tp.language.code,
'target_dir': tp.language.direction,
'project_code': project.code,
'project_style': project.checkstyle,
}
units_list.append(_prepare_unit(unit))
return {
path: {
'meta': meta,
'units': units_list,
},
}
def _build_units_list(units, reverse=False):
"""Given a list/queryset of units, builds a list with the unit data
contained in a dictionary ready to be returned as JSON.
:return: A list with unit id, source, and target texts. In case of
having plural forms, a title for the plural form is also provided.
"""
return_units = []
for unit in iter(units):
return_units.append(_prepare_unit(unit))
return return_units
@ajax_required
def get_units(request):
"""Gets source and target texts and its metadata.
:return: A JSON-encoded string containing the source and target texts
grouped by the store they belong to.
The optional `count` GET parameter defines the chunk size to
consider. The user's preference will be used by default.
When the `initial` GET parameter is present, a sorted list of
the result set ids will be returned too.
"""
pootle_path = request.GET.get('path', None)
if pootle_path is None:
raise Http400(_('Arguments missing.'))
User = get_user_model()
request.profile = User.get(request.user)
limit = request.profile.get_unit_rows()
units_qs = Unit.objects.get_for_path(pootle_path, request.profile)
units_qs = units_qs.select_related(
'store__translation_project__project',
'store__translation_project__language',
)
step_queryset = get_step_query(request, units_qs)
is_initial_request = request.GET.get('initial', False)
chunk_size = request.GET.get('count', limit)
uids_param = filter(None, request.GET.get('uids', '').split(u','))
uids = filter(None, map(to_int, uids_param))
units = None
unit_groups = []
uid_list = []
if is_initial_request:
# Not using `values_list()` here because it doesn't know about all
# existing relations when `extra()` has been used before in the
# queryset. This affects annotated names such as those ending in
# `__max`, where Django thinks we're trying to lookup a field on a
# relationship field.
# https://code.djangoproject.com/ticket/19434
uid_list = [u.id for u in step_queryset]
if len(uids) == 1:
try:
uid = uids[0]
index = uid_list.index(uid)
begin = max(index - chunk_size, 0)
end = min(index + chunk_size + 1, len(uid_list))
uids = uid_list[begin:end]
except ValueError:
raise Http404 # `uid` not found in `uid_list`
else:
count = 2 * chunk_size
units = step_queryset[:count]
if units is None and uids:
units = step_queryset.filter(id__in=uids)
units_by_path = groupby(units, lambda x: x.store.pootle_path)
for pootle_path, units in units_by_path:
unit_groups.append(_path_units_with_meta(pootle_path, units))
response = {
'unitGroups': unit_groups,
}
if uid_list:
response['uIds'] = uid_list
return HttpResponse(jsonify(response), content_type="application/json")
def _is_filtered(request):
"""Checks if unit list is filtered."""
return ('filter' in request.GET or 'checks' in request.GET or
'user' in request.GET or
('search' in request.GET and 'sfields' in request.GET))
@ajax_required
@get_unit_context('view')
def get_more_context(request, unit):
"""Retrieves more context units.
:return: An object in JSON notation that contains the source and target
texts for units that are in the context of unit ``uid``.
"""
store = request.store
json = {}
gap = int(request.GET.get('gap', 0))
qty = int(request.GET.get('qty', 1))
json["ctx"] = _filter_ctx_units(store.units, unit, qty, gap)
rcode = 200
response = jsonify(json)
return HttpResponse(response, status=rcode, content_type="application/json")
@never_cache
@get_unit_context('view')
def timeline(request, unit):
"""Returns a JSON-encoded string including the changes to the unit
rendered in HTML.
"""
timeline = Submission.objects.filter(unit=unit, field__in=[
SubmissionFields.TARGET, SubmissionFields.STATE,
SubmissionFields.COMMENT, SubmissionFields.NONE
]).exclude(
field=SubmissionFields.COMMENT,
creation_time=unit.commented_on
).order_by("id")
timeline = timeline.select_related("submitter__user",
"translation_project__language")
User = get_user_model()
entries_group = []
context = {}
# Group by submitter id and creation_time because
# different submissions can have same creation time
for key, values in \
groupby(timeline,
key=lambda x: "%d\001%s" % (x.submitter.id, x.creation_time)):
entry_group = {
'entries': [],
}
for item in values:
# Only add creation_time information for the whole entry group once
entry_group['datetime'] = item.creation_time
# Only add submitter information for the whole entry group once
entry_group.setdefault('submitter', item.submitter)
context.setdefault('language', item.translation_project.language)
entry = {
'field': item.field,
'field_name': SubmissionFields.NAMES_MAP[item.field],
}
if item.field == SubmissionFields.STATE:
entry['old_value'] = STATES_MAP[int(to_python(item.old_value))]
entry['new_value'] = STATES_MAP[int(to_python(item.new_value))]
elif item.check:
entry.update({
'check_name': item.check.name,
'check_display_name': check_names[item.check.name],
'checks_url': reverse('pootle-staticpages-display',
args=['help/quality-checks']),
'action': {
SubmissionTypes.MUTE_CHECK: 'Muted',
SubmissionTypes.UNMUTE_CHECK: 'Unmuted'
}.get(item.type, '')
})
else:
entry['new_value'] = to_python(item.new_value)
entry_group['entries'].append(entry)
entries_group.append(entry_group)
if (len(entries_group) > 0 and
entries_group[0]['datetime'] == unit.creation_time):
entries_group[0]['created'] = True
else:
created = {
'created': True,
'submitter': User.objects.get_system_user(),
}
if unit.creation_time:
created['datetime'] = unit.creation_time
entries_group[:0] = [created]
# Let's reverse the chronological order
entries_group.reverse()
context['entries_group'] = entries_group
if request.is_ajax():
# The client will want to confirm that the response is relevant for
# the unit on screen at the time of receiving this, so we add the uid.
json = {'uid': unit.id}
t = loader.get_template('editor/units/xhr_timeline.html')
c = RequestContext(request, context)
json['timeline'] = t.render(c).replace('\n', '')
response = jsonify(json)
return HttpResponse(response, content_type="application/json")
else:
return render(request, "editor/units/timeline.html", context)
@ajax_required
@require_http_methods(['POST', 'DELETE'])
@get_unit_context('translate')
def comment(request, unit):
"""Dispatches the comment action according to the HTTP verb."""
if request.method == 'DELETE':
return delete_comment(request, unit)
elif request.method == 'POST':
return save_comment(request, unit)
def delete_comment(request, unit):
"""Deletes a comment by blanking its contents and records a new
submission.
"""
unit.commented_by = None
unit.commented_on = None
language = request.translation_project.language
comment_form_class = unit_comment_form_factory(language)
form = comment_form_class({}, instance=unit, request=request)
if form.is_valid():
form.save()
json = {}
rcode = 200
else:
json = {'msg': _("Failed to remove comment.")}
rcode = 400
response = jsonify(json)
return HttpResponse(response, status=rcode, content_type="application/json")
def save_comment(request, unit):
"""Stores a new comment for the given ``unit``.
:return: If the form validates, the cleaned comment is returned.
An error message is returned otherwise.
"""
# Update current unit instance's attributes
unit.commented_by = request.profile
unit.commented_on = timezone.now()
language = request.translation_project.language
form = unit_comment_form_factory(language)(request.POST, instance=unit,
request=request)
if form.is_valid():
form.save()
context = {
'unit': unit,
'language': language,
}
t = loader.get_template('editor/units/xhr_comment.html')
c = RequestContext(request, context)
json = {'comment': t.render(c)}
rcode = 200
else:
json = {'msg': _("Comment submission failed.")}
rcode = 400
response = jsonify(json)
return HttpResponse(response, status=rcode, content_type="application/json")
@never_cache
@ajax_required
@get_unit_context('view')
def get_edit_unit(request, unit):
"""Given a store path ``pootle_path`` and unit id ``uid``, gathers all the
necessary information to build the editing widget.
:return: A templatised editing widget is returned within the ``editor``
variable and paging information is also returned if the page
number has changed.
"""
json = {}
translation_project = request.translation_project
language = translation_project.language
if unit.hasplural():
snplurals = len(unit.source.strings)
else:
snplurals = None
form_class = unit_form_factory(language, snplurals, request)
form = form_class(instance=unit, request=request)
comment_form_class = unit_comment_form_factory(language)
comment_form = comment_form_class({}, instance=unit, request=request)
store = unit.store
directory = store.parent
user = request.profile
alt_src_langs = get_alt_src_langs(request, user, translation_project)
project = translation_project.project
template_vars = {
'unit': unit,
'form': form,
'comment_form': comment_form,
'store': store,
'directory': directory,
'profile': user,
'user': request.user,
'project': project,
'language': language,
'source_language': translation_project.project.source_language,
'cantranslate': check_user_permission(user, "translate", directory),
'cansuggest': check_user_permission(user, "suggest", directory),
'canreview': check_user_permission(user, "review", directory),
'is_admin': check_user_permission(user, 'administrate', directory),
'altsrcs': find_altsrcs(unit, alt_src_langs, store=store,
project=project),
}
if translation_project.project.is_terminology or store.is_terminology:
t = loader.get_template('editor/units/term_edit.html')
else:
t = loader.get_template('editor/units/edit.html')
c = RequestContext(request, template_vars)
json['editor'] = t.render(c)
json['tm_suggestions'] = unit.get_tm_suggestions()
rcode = 200
# Return context rows if filtering is applied but
# don't return any if the user has asked not to have it
current_filter = request.GET.get('filter', 'all')
show_ctx = request.COOKIES.get('ctxShow', 'true')
if ((_is_filtered(request) or current_filter not in ('all',)) and
show_ctx == 'true'):
# TODO: review if this first 'if' branch makes sense
if translation_project.project.is_terminology or store.is_terminology:
json['ctx'] = _filter_ctx_units(store.units, unit, 0)
else:
ctx_qty = int(request.COOKIES.get('ctxQty', 1))
json['ctx'] = _filter_ctx_units(store.units, unit, ctx_qty)
response = jsonify(json)
return HttpResponse(response, status=rcode, content_type="application/json")
@get_unit_context('view')
def permalink_redirect(request, unit):
return redirect(request.build_absolute_uri(unit.get_translate_url()))
@ajax_required
@get_path_obj
@permission_required('view')
@get_resource
def get_qualitycheck_stats(request, *args, **kwargs):
failing_checks = request.resource_obj.get_checks()
response = jsonify(failing_checks)
return HttpResponse(response, mimetype="application/json")
@ajax_required
@get_path_obj
@permission_required('view')
@get_resource
def get_overview_stats(request, *args, **kwargs):
stats = request.resource_obj.get_stats()
response = jsonify(stats)
return HttpResponse(response, mimetype="application/json")
@ajax_required
@get_unit_context('translate')
def submit(request, unit):
"""Processes translation submissions and stores them in the database.
:return: An object in JSON notation that contains the previous and last
units for the unit next to unit ``uid``.
"""
json = {}
translation_project = request.translation_project
language = translation_project.language
if unit.hasplural():
snplurals = len(unit.source.strings)
else:
snplurals = None
# Store current time so that it is the same for all submissions
current_time = timezone.now()
form_class = unit_form_factory(language, snplurals, request)
form = form_class(request.POST, instance=unit, request=request)
if form.is_valid():
if form.updated_fields:
for field, old_value, new_value in form.updated_fields:
sub = Submission(
creation_time=current_time,
translation_project=translation_project,
submitter=request.profile,
unit=unit,
store=unit.store,
field=field,
type=SubmissionTypes.NORMAL,
old_value=old_value,
new_value=new_value,
similarity=form.cleaned_data['similarity'],
mt_similarity=form.cleaned_data['mt_similarity'],
)
sub.save()
# Update current unit instance's attributes
# important to set these attributes after saving Submission
# because we need to access the unit's state before it was saved
if SubmissionFields.TARGET in \
map(lambda x: x[0], form.updated_fields):
form.instance.submitted_by = request.profile
form.instance.submitted_on = current_time
form.instance.reviewed_by = None
form.instance.reviewed_on = None
form.instance._log_user = request.profile
form.save()
translation_submitted.send(
sender=translation_project,
unit=form.instance,
profile=request.profile,
)
has_critical_checks = unit.qualitycheck_set.filter(
category=Category.CRITICAL
).exists()
if has_critical_checks:
can_review = check_user_permission(request.profile, 'review',
unit.store.parent)
ctx = {
'canreview': can_review,
'unit': unit
}
template = loader.get_template('editor/units/xhr_checks.html')
context = RequestContext(request, ctx)
json['checks'] = template.render(context)
rcode = 200
json['user_score'] = request.profile.public_score
else:
# Form failed
#FIXME: we should display validation errors here
rcode = 400
json["msg"] = _("Failed to process submission.")
response = jsonify(json)
return HttpResponse(response, status=rcode, content_type="application/json")
@ajax_required
@get_unit_context('suggest')
def suggest(request, unit):
"""Processes translation suggestions and stores them in the database.
:return: An object in JSON notation that contains the previous and last
units for the unit next to unit ``uid``.
"""
json = {}
translation_project = request.translation_project
language = translation_project.language
if unit.hasplural():
snplurals = len(unit.source.strings)
else:
snplurals = None
form_class = unit_form_factory(language, snplurals, request)
form = form_class(request.POST, instance=unit, request=request)
if form.is_valid():
if form.instance._target_updated:
# TODO: Review if this hackish method is still necessary
#HACKISH: django 1.2 stupidly modifies instance on
# model form validation, reload unit from db
unit = Unit.objects.get(id=unit.id)
unit.add_suggestion(
form.cleaned_data['target_f'],
user=request.profile,
similarity=form.cleaned_data['similarity'],
mt_similarity=form.cleaned_data['mt_similarity'],
)
json['user_score'] = request.profile.public_score
rcode = 200
else:
# Form failed
#FIXME: we should display validation errors here
rcode = 400
json["msg"] = _("Failed to process suggestion.")
response = jsonify(json)
return HttpResponse(response, status=rcode, content_type="application/json")
@ajax_required
@get_unit_context('review')
def reject_suggestion(request, unit, suggid):
json = {
'udbid': unit.id,
'sugid': suggid,
}
if request.POST.get('reject'):
try:
sugg = unit.suggestion_set.get(id=suggid)
except ObjectDoesNotExist:
raise Http404
unit.reject_suggestion(sugg, request.translation_project,
request.profile)
json['user_score'] = request.profile.public_score
response = jsonify(json)
return HttpResponse(response, content_type="application/json")
@ajax_required
@get_unit_context('review')
def accept_suggestion(request, unit, suggid):
json = {
'udbid': unit.id,
'sugid': suggid,
}
if request.POST.get('accept'):
try:
suggestion = unit.suggestion_set.get(id=suggid)
except ObjectDoesNotExist:
raise Http404
unit.accept_suggestion(suggestion, request.translation_project,
request.profile)
json['user_score'] = request.profile.public_score
json['newtargets'] = [highlight_whitespace(target)
for target in unit.target.strings]
json['newdiffs'] = {}
for sugg in unit.get_suggestions():
json['newdiffs'][sugg.id] = \
[highlight_diffs(unit.target.strings[i], target)
for i, target in enumerate(sugg.target.strings)]
response = jsonify(json)
return HttpResponse(response, content_type="application/json")
@ajax_required
@get_unit_context('review')
def toggle_qualitycheck(request, unit, check_id):
json = {}
json["udbid"] = unit.id
json["checkid"] = check_id
try:
unit.toggle_qualitycheck(check_id,
bool(request.POST.get('mute')), request.profile)
except ObjectDoesNotExist:
raise Http404
response = jsonify(json)
return HttpResponse(response, content_type="application/json")
|
gpl-2.0
| 359,415,182,582,297,860
| 34.474399
| 80
| 0.605732
| false
| 4.168079
| false
| false
| false
|
rgbkrk/binder
|
web/app.py
|
1
|
4824
|
import Queue
import json
import signal
from tornado import gen
from tornado.ioloop import IOLoop
from tornado.web import Application, RequestHandler
from tornado.httpserver import HTTPServer
from binder.service import Service
from binder.app import App
from .builder import Builder
# TODO move settings into a config file
PORT = 8080
NUM_WORKERS = 10
PRELOAD = True
QUEUE_SIZE = 50
ALLOW_ORIGIN = True
build_queue = Queue.Queue(QUEUE_SIZE)
class BinderHandler(RequestHandler):
def get(self):
if ALLOW_ORIGIN:
self.set_header('Access-Control-Allow-Origin', '*')
def post(self):
if ALLOW_ORIGIN:
self.set_header('Access-Control-Allow-Origin', '*')
class BuildHandler(BinderHandler):
def _is_malformed(self, spec):
# by default, there aren't any required fields in an app specification
pass
def _write_build_state(self, app):
if app.build_state == App.BuildState.BUILDING:
self.write({"build_status": "building"})
elif app.build_state == App.BuildState.FAILED:
self.write({"build_status": "failed"})
elif app.build_state == App.BuildState.COMPLETED:
self.write({"build_status": "completed"})
else:
self.write({"build_status": "unknown"})
class GithubHandler(BuildHandler):
def _is_malformed(self, spec):
# in the GithubHandler, the repo field is inferred from organization/repo
return "repo" in spec
def _make_app_name(self, organization, repo):
return organization + "-" + repo
class GithubStatusHandler(GithubHandler):
def get(self, organization, repo):
super(GithubStatusHandler, self).get()
app_name = self._make_app_name(organization, repo)
app = App.get_app(app_name)
if not app:
self.set_status(404)
self.write({"error": "app does not exist"})
else:
self._write_build_state(app)
class GithubBuildHandler(GithubHandler):
@gen.coroutine
def get(self, organization, repo):
# if the app is still building, return an error. If the app is built, deploy it and return
# the redirect url
super(GithubHandler, self).get()
app_name = self._make_app_name(organization, repo)
app = App.get_app(app_name)
if app and app.build_state == App.BuildState.COMPLETED:
redirect_url = app.deploy("single-node")
self.write({"redirect_url": redirect_url})
else:
self.set_status(404)
self.write({"error": "no app available to deploy"})
def post(self, organization, repo):
# if the spec is properly formed, create/build the app
super(GithubBuildHandler, self).post()
print("request.body: {}".format(self.request.body))
spec = json.loads(self.request.body)
if self._is_malformed(spec):
self.set_status(400)
self.write({"error": "malformed app specification"})
else:
try:
spec["name"] = self._make_app_name(organization, repo)
spec["repo"] = "https://www.github.com/{0}/{1}".format(organization, repo)
build_queue.put(spec)
self.write({"success": "app submitted to build queue"})
except Queue.Full:
self.write({"error": "build queue full"})
class OtherSourceHandler(BuildHandler):
def get(self, app_id):
pass
def post(self, app_id):
pass
class ServicesHandler(BinderHandler):
def get(self):
super(ServicesHandler, self).get()
services = Service.get_service()
self.write({"services": [service.full_name for service in services]})
class AppsHandler(BinderHandler):
def get(self):
super(AppsHandler, self).get()
apps = App.get_app()
self.write({"apps": [app.name for app in apps]})
def sig_handler(sig, frame):
IOLoop.instance().add_callback(shutdown)
def shutdown():
print("Shutting down...")
IOLoop.instance().stop()
builder.stop()
def main():
application = Application([
(r"/apps/(?P<organization>.+)/(?P<repo>.+)/status", GithubStatusHandler),
(r"/apps/(?P<organization>.+)/(?P<repo>.+)", GithubBuildHandler),
(r"/apps/(?P<app_id>.+)", OtherSourceHandler),
(r"/services", ServicesHandler),
(r"/apps", AppsHandler)
], debug=True)
signal.signal(signal.SIGTERM, sig_handler)
signal.signal(signal.SIGINT, sig_handler)
global builder
builder = Builder(build_queue, PRELOAD)
builder.start()
http_server = HTTPServer(application)
http_server.listen(PORT)
print("Binder API server running on port {}".format(PORT))
IOLoop.current().start()
if __name__ == "__main__":
main()
|
apache-2.0
| 1,329,055,373,027,206,000
| 28.595092
| 98
| 0.623134
| false
| 3.899757
| false
| false
| false
|
blaze33/django
|
tests/modeltests/model_forms/tests.py
|
1
|
66035
|
from __future__ import absolute_import, unicode_literals
import datetime
import os
from decimal import Decimal
from django import forms
from django.core.files.uploadedfile import SimpleUploadedFile
from django.core.validators import ValidationError
from django.db import connection
from django.db.models.query import EmptyQuerySet
from django.forms.models import model_to_dict
from django.utils._os import upath
from django.utils.unittest import skipUnless
from django.test import TestCase
from django.utils import six
from .models import (Article, ArticleStatus, BetterWriter, BigInt, Book,
Category, CommaSeparatedInteger, CustomFieldForExclusionModel, DerivedBook,
DerivedPost, ExplicitPK, FlexibleDatePost, ImprovedArticle,
ImprovedArticleWithParentLink, Inventory, Post, Price,
Product, TextFile, Writer, WriterProfile, test_images)
if test_images:
from .models import ImageFile, OptionalImageFile
class ImageFileForm(forms.ModelForm):
class Meta:
model = ImageFile
class OptionalImageFileForm(forms.ModelForm):
class Meta:
model = OptionalImageFile
class ProductForm(forms.ModelForm):
class Meta:
model = Product
class PriceForm(forms.ModelForm):
class Meta:
model = Price
class BookForm(forms.ModelForm):
class Meta:
model = Book
class DerivedBookForm(forms.ModelForm):
class Meta:
model = DerivedBook
class ExplicitPKForm(forms.ModelForm):
class Meta:
model = ExplicitPK
fields = ('key', 'desc',)
class PostForm(forms.ModelForm):
class Meta:
model = Post
class DerivedPostForm(forms.ModelForm):
class Meta:
model = DerivedPost
class CustomWriterForm(forms.ModelForm):
name = forms.CharField(required=False)
class Meta:
model = Writer
class FlexDatePostForm(forms.ModelForm):
class Meta:
model = FlexibleDatePost
class BaseCategoryForm(forms.ModelForm):
class Meta:
model = Category
class ArticleForm(forms.ModelForm):
class Meta:
model = Article
class ArticleForm(forms.ModelForm):
class Meta:
model = Article
class PartialArticleForm(forms.ModelForm):
class Meta:
model = Article
fields = ('headline','pub_date')
class RoykoForm(forms.ModelForm):
class Meta:
model = Writer
class TestArticleForm(forms.ModelForm):
class Meta:
model = Article
class PartialArticleFormWithSlug(forms.ModelForm):
class Meta:
model = Article
fields=('headline', 'slug', 'pub_date')
class ArticleStatusForm(forms.ModelForm):
class Meta:
model = ArticleStatus
class InventoryForm(forms.ModelForm):
class Meta:
model = Inventory
class SelectInventoryForm(forms.Form):
items = forms.ModelMultipleChoiceField(Inventory.objects.all(), to_field_name='barcode')
class CustomFieldForExclusionForm(forms.ModelForm):
class Meta:
model = CustomFieldForExclusionModel
fields = ['name', 'markup']
class ShortCategory(forms.ModelForm):
name = forms.CharField(max_length=5)
slug = forms.CharField(max_length=5)
url = forms.CharField(max_length=3)
class ImprovedArticleForm(forms.ModelForm):
class Meta:
model = ImprovedArticle
class ImprovedArticleWithParentLinkForm(forms.ModelForm):
class Meta:
model = ImprovedArticleWithParentLink
class BetterWriterForm(forms.ModelForm):
class Meta:
model = BetterWriter
class WriterProfileForm(forms.ModelForm):
class Meta:
model = WriterProfile
class TextFileForm(forms.ModelForm):
class Meta:
model = TextFile
class BigIntForm(forms.ModelForm):
class Meta:
model = BigInt
class ModelFormWithMedia(forms.ModelForm):
class Media:
js = ('/some/form/javascript',)
css = {
'all': ('/some/form/css',)
}
class Meta:
model = TextFile
class CommaSeparatedIntegerForm(forms.ModelForm):
class Meta:
model = CommaSeparatedInteger
class PriceFormWithoutQuantity(forms.ModelForm):
class Meta:
model = Price
exclude = ('quantity',)
class ModelFormBaseTest(TestCase):
def test_base_form(self):
self.assertEqual(list(BaseCategoryForm.base_fields),
['name', 'slug', 'url'])
def test_extra_fields(self):
class ExtraFields(BaseCategoryForm):
some_extra_field = forms.BooleanField()
self.assertEqual(list(ExtraFields.base_fields),
['name', 'slug', 'url', 'some_extra_field'])
def test_replace_field(self):
class ReplaceField(forms.ModelForm):
url = forms.BooleanField()
class Meta:
model = Category
self.assertTrue(isinstance(ReplaceField.base_fields['url'],
forms.fields.BooleanField))
def test_override_field(self):
class WriterForm(forms.ModelForm):
book = forms.CharField(required=False)
class Meta:
model = Writer
wf = WriterForm({'name': 'Richard Lockridge'})
self.assertTrue(wf.is_valid())
def test_limit_fields(self):
class LimitFields(forms.ModelForm):
class Meta:
model = Category
fields = ['url']
self.assertEqual(list(LimitFields.base_fields),
['url'])
def test_exclude_fields(self):
class ExcludeFields(forms.ModelForm):
class Meta:
model = Category
exclude = ['url']
self.assertEqual(list(ExcludeFields.base_fields),
['name', 'slug'])
def test_confused_form(self):
class ConfusedForm(forms.ModelForm):
""" Using 'fields' *and* 'exclude'. Not sure why you'd want to do
this, but uh, "be liberal in what you accept" and all.
"""
class Meta:
model = Category
fields = ['name', 'url']
exclude = ['url']
self.assertEqual(list(ConfusedForm.base_fields),
['name'])
def test_mixmodel_form(self):
class MixModelForm(BaseCategoryForm):
""" Don't allow more than one 'model' definition in the
inheritance hierarchy. Technically, it would generate a valid
form, but the fact that the resulting save method won't deal with
multiple objects is likely to trip up people not familiar with the
mechanics.
"""
class Meta:
model = Article
# MixModelForm is now an Article-related thing, because MixModelForm.Meta
# overrides BaseCategoryForm.Meta.
self.assertEqual(
list(MixModelForm.base_fields),
['headline', 'slug', 'pub_date', 'writer', 'article', 'categories', 'status']
)
def test_article_form(self):
self.assertEqual(
list(ArticleForm.base_fields),
['headline', 'slug', 'pub_date', 'writer', 'article', 'categories', 'status']
)
def test_bad_form(self):
#First class with a Meta class wins...
class BadForm(ArticleForm, BaseCategoryForm):
pass
self.assertEqual(
list(BadForm.base_fields),
['headline', 'slug', 'pub_date', 'writer', 'article', 'categories', 'status']
)
def test_subcategory_form(self):
class SubCategoryForm(BaseCategoryForm):
""" Subclassing without specifying a Meta on the class will use
the parent's Meta (or the first parent in the MRO if there are
multiple parent classes).
"""
pass
self.assertEqual(list(SubCategoryForm.base_fields),
['name', 'slug', 'url'])
def test_subclassmeta_form(self):
class SomeCategoryForm(forms.ModelForm):
checkbox = forms.BooleanField()
class Meta:
model = Category
class SubclassMeta(SomeCategoryForm):
""" We can also subclass the Meta inner class to change the fields
list.
"""
class Meta(SomeCategoryForm.Meta):
exclude = ['url']
self.assertHTMLEqual(
str(SubclassMeta()),
"""<tr><th><label for="id_name">Name:</label></th><td><input id="id_name" type="text" name="name" maxlength="20" /></td></tr>
<tr><th><label for="id_slug">Slug:</label></th><td><input id="id_slug" type="text" name="slug" maxlength="20" /></td></tr>
<tr><th><label for="id_checkbox">Checkbox:</label></th><td><input type="checkbox" name="checkbox" id="id_checkbox" /></td></tr>"""
)
def test_orderfields_form(self):
class OrderFields(forms.ModelForm):
class Meta:
model = Category
fields = ['url', 'name']
self.assertEqual(list(OrderFields.base_fields),
['url', 'name'])
self.assertHTMLEqual(
str(OrderFields()),
"""<tr><th><label for="id_url">The URL:</label></th><td><input id="id_url" type="text" name="url" maxlength="40" /></td></tr>
<tr><th><label for="id_name">Name:</label></th><td><input id="id_name" type="text" name="name" maxlength="20" /></td></tr>"""
)
def test_orderfields2_form(self):
class OrderFields2(forms.ModelForm):
class Meta:
model = Category
fields = ['slug', 'url', 'name']
exclude = ['url']
self.assertEqual(list(OrderFields2.base_fields),
['slug', 'name'])
class TestWidgetForm(forms.ModelForm):
class Meta:
model = Category
fields = ['name', 'url', 'slug']
widgets = {
'name': forms.Textarea,
'url': forms.TextInput(attrs={'class': 'url'})
}
class TestWidgets(TestCase):
def test_base_widgets(self):
frm = TestWidgetForm()
self.assertHTMLEqual(
str(frm['name']),
'<textarea id="id_name" rows="10" cols="40" name="name"></textarea>'
)
self.assertHTMLEqual(
str(frm['url']),
'<input id="id_url" type="text" class="url" name="url" maxlength="40" />'
)
self.assertHTMLEqual(
str(frm['slug']),
'<input id="id_slug" type="text" name="slug" maxlength="20" />'
)
class IncompleteCategoryFormWithFields(forms.ModelForm):
"""
A form that replaces the model's url field with a custom one. This should
prevent the model field's validation from being called.
"""
url = forms.CharField(required=False)
class Meta:
fields = ('name', 'slug')
model = Category
class IncompleteCategoryFormWithExclude(forms.ModelForm):
"""
A form that replaces the model's url field with a custom one. This should
prevent the model field's validation from being called.
"""
url = forms.CharField(required=False)
class Meta:
exclude = ['url']
model = Category
class ValidationTest(TestCase):
def test_validates_with_replaced_field_not_specified(self):
form = IncompleteCategoryFormWithFields(data={'name': 'some name', 'slug': 'some-slug'})
assert form.is_valid()
def test_validates_with_replaced_field_excluded(self):
form = IncompleteCategoryFormWithExclude(data={'name': 'some name', 'slug': 'some-slug'})
assert form.is_valid()
def test_notrequired_overrides_notblank(self):
form = CustomWriterForm({})
assert form.is_valid()
# unique/unique_together validation
class UniqueTest(TestCase):
def setUp(self):
self.writer = Writer.objects.create(name='Mike Royko')
def test_simple_unique(self):
form = ProductForm({'slug': 'teddy-bear-blue'})
self.assertTrue(form.is_valid())
obj = form.save()
form = ProductForm({'slug': 'teddy-bear-blue'})
self.assertEqual(len(form.errors), 1)
self.assertEqual(form.errors['slug'], ['Product with this Slug already exists.'])
form = ProductForm({'slug': 'teddy-bear-blue'}, instance=obj)
self.assertTrue(form.is_valid())
def test_unique_together(self):
"""ModelForm test of unique_together constraint"""
form = PriceForm({'price': '6.00', 'quantity': '1'})
self.assertTrue(form.is_valid())
form.save()
form = PriceForm({'price': '6.00', 'quantity': '1'})
self.assertFalse(form.is_valid())
self.assertEqual(len(form.errors), 1)
self.assertEqual(form.errors['__all__'], ['Price with this Price and Quantity already exists.'])
def test_unique_null(self):
title = 'I May Be Wrong But I Doubt It'
form = BookForm({'title': title, 'author': self.writer.pk})
self.assertTrue(form.is_valid())
form.save()
form = BookForm({'title': title, 'author': self.writer.pk})
self.assertFalse(form.is_valid())
self.assertEqual(len(form.errors), 1)
self.assertEqual(form.errors['__all__'], ['Book with this Title and Author already exists.'])
form = BookForm({'title': title})
self.assertTrue(form.is_valid())
form.save()
form = BookForm({'title': title})
self.assertTrue(form.is_valid())
def test_inherited_unique(self):
title = 'Boss'
Book.objects.create(title=title, author=self.writer, special_id=1)
form = DerivedBookForm({'title': 'Other', 'author': self.writer.pk, 'special_id': '1', 'isbn': '12345'})
self.assertFalse(form.is_valid())
self.assertEqual(len(form.errors), 1)
self.assertEqual(form.errors['special_id'], ['Book with this Special id already exists.'])
def test_inherited_unique_together(self):
title = 'Boss'
form = BookForm({'title': title, 'author': self.writer.pk})
self.assertTrue(form.is_valid())
form.save()
form = DerivedBookForm({'title': title, 'author': self.writer.pk, 'isbn': '12345'})
self.assertFalse(form.is_valid())
self.assertEqual(len(form.errors), 1)
self.assertEqual(form.errors['__all__'], ['Book with this Title and Author already exists.'])
def test_abstract_inherited_unique(self):
title = 'Boss'
isbn = '12345'
dbook = DerivedBook.objects.create(title=title, author=self.writer, isbn=isbn)
form = DerivedBookForm({'title': 'Other', 'author': self.writer.pk, 'isbn': isbn})
self.assertFalse(form.is_valid())
self.assertEqual(len(form.errors), 1)
self.assertEqual(form.errors['isbn'], ['Derived book with this Isbn already exists.'])
def test_abstract_inherited_unique_together(self):
title = 'Boss'
isbn = '12345'
dbook = DerivedBook.objects.create(title=title, author=self.writer, isbn=isbn)
form = DerivedBookForm({
'title': 'Other',
'author': self.writer.pk,
'isbn': '9876',
'suffix1': '0',
'suffix2': '0'
})
self.assertFalse(form.is_valid())
self.assertEqual(len(form.errors), 1)
self.assertEqual(form.errors['__all__'],
['Derived book with this Suffix1 and Suffix2 already exists.'])
def test_explicitpk_unspecified(self):
"""Test for primary_key being in the form and failing validation."""
form = ExplicitPKForm({'key': '', 'desc': '' })
self.assertFalse(form.is_valid())
def test_explicitpk_unique(self):
"""Ensure keys and blank character strings are tested for uniqueness."""
form = ExplicitPKForm({'key': 'key1', 'desc': ''})
self.assertTrue(form.is_valid())
form.save()
form = ExplicitPKForm({'key': 'key1', 'desc': ''})
self.assertFalse(form.is_valid())
self.assertEqual(len(form.errors), 3)
self.assertEqual(form.errors['__all__'], ['Explicit pk with this Key and Desc already exists.'])
self.assertEqual(form.errors['desc'], ['Explicit pk with this Desc already exists.'])
self.assertEqual(form.errors['key'], ['Explicit pk with this Key already exists.'])
def test_unique_for_date(self):
p = Post.objects.create(title="Django 1.0 is released",
slug="Django 1.0", subtitle="Finally", posted=datetime.date(2008, 9, 3))
form = PostForm({'title': "Django 1.0 is released", 'posted': '2008-09-03'})
self.assertFalse(form.is_valid())
self.assertEqual(len(form.errors), 1)
self.assertEqual(form.errors['title'], ['Title must be unique for Posted date.'])
form = PostForm({'title': "Work on Django 1.1 begins", 'posted': '2008-09-03'})
self.assertTrue(form.is_valid())
form = PostForm({'title': "Django 1.0 is released", 'posted': '2008-09-04'})
self.assertTrue(form.is_valid())
form = PostForm({'slug': "Django 1.0", 'posted': '2008-01-01'})
self.assertFalse(form.is_valid())
self.assertEqual(len(form.errors), 1)
self.assertEqual(form.errors['slug'], ['Slug must be unique for Posted year.'])
form = PostForm({'subtitle': "Finally", 'posted': '2008-09-30'})
self.assertFalse(form.is_valid())
self.assertEqual(form.errors['subtitle'], ['Subtitle must be unique for Posted month.'])
form = PostForm({'subtitle': "Finally", "title": "Django 1.0 is released",
"slug": "Django 1.0", 'posted': '2008-09-03'}, instance=p)
self.assertTrue(form.is_valid())
form = PostForm({'title': "Django 1.0 is released"})
self.assertFalse(form.is_valid())
self.assertEqual(len(form.errors), 1)
self.assertEqual(form.errors['posted'], ['This field is required.'])
def test_inherited_unique_for_date(self):
p = Post.objects.create(title="Django 1.0 is released",
slug="Django 1.0", subtitle="Finally", posted=datetime.date(2008, 9, 3))
form = DerivedPostForm({'title': "Django 1.0 is released", 'posted': '2008-09-03'})
self.assertFalse(form.is_valid())
self.assertEqual(len(form.errors), 1)
self.assertEqual(form.errors['title'], ['Title must be unique for Posted date.'])
form = DerivedPostForm({'title': "Work on Django 1.1 begins", 'posted': '2008-09-03'})
self.assertTrue(form.is_valid())
form = DerivedPostForm({'title': "Django 1.0 is released", 'posted': '2008-09-04'})
self.assertTrue(form.is_valid())
form = DerivedPostForm({'slug': "Django 1.0", 'posted': '2008-01-01'})
self.assertFalse(form.is_valid())
self.assertEqual(len(form.errors), 1)
self.assertEqual(form.errors['slug'], ['Slug must be unique for Posted year.'])
form = DerivedPostForm({'subtitle': "Finally", 'posted': '2008-09-30'})
self.assertFalse(form.is_valid())
self.assertEqual(form.errors['subtitle'], ['Subtitle must be unique for Posted month.'])
form = DerivedPostForm({'subtitle': "Finally", "title": "Django 1.0 is released",
"slug": "Django 1.0", 'posted': '2008-09-03'}, instance=p)
self.assertTrue(form.is_valid())
def test_unique_for_date_with_nullable_date(self):
p = FlexibleDatePost.objects.create(title="Django 1.0 is released",
slug="Django 1.0", subtitle="Finally", posted=datetime.date(2008, 9, 3))
form = FlexDatePostForm({'title': "Django 1.0 is released"})
self.assertTrue(form.is_valid())
form = FlexDatePostForm({'slug': "Django 1.0"})
self.assertTrue(form.is_valid())
form = FlexDatePostForm({'subtitle': "Finally"})
self.assertTrue(form.is_valid())
form = FlexDatePostForm({'subtitle': "Finally", "title": "Django 1.0 is released",
"slug": "Django 1.0"}, instance=p)
self.assertTrue(form.is_valid())
class ModelToDictTests(TestCase):
"""
Tests for forms.models.model_to_dict
"""
def test_model_to_dict_many_to_many(self):
categories=[
Category(name='TestName1', slug='TestName1', url='url1'),
Category(name='TestName2', slug='TestName2', url='url2'),
Category(name='TestName3', slug='TestName3', url='url3')
]
for c in categories:
c.save()
writer = Writer(name='Test writer')
writer.save()
art = Article(
headline='Test article',
slug='test-article',
pub_date=datetime.date(1988, 1, 4),
writer=writer,
article='Hello.'
)
art.save()
for c in categories:
art.categories.add(c)
art.save()
with self.assertNumQueries(1):
d = model_to_dict(art)
#Ensure all many-to-many categories appear in model_to_dict
for c in categories:
self.assertIn(c.pk, d['categories'])
#Ensure many-to-many relation appears as a list
self.assertIsInstance(d['categories'], list)
class OldFormForXTests(TestCase):
def test_base_form(self):
self.assertEqual(Category.objects.count(), 0)
f = BaseCategoryForm()
self.assertHTMLEqual(
str(f),
"""<tr><th><label for="id_name">Name:</label></th><td><input id="id_name" type="text" name="name" maxlength="20" /></td></tr>
<tr><th><label for="id_slug">Slug:</label></th><td><input id="id_slug" type="text" name="slug" maxlength="20" /></td></tr>
<tr><th><label for="id_url">The URL:</label></th><td><input id="id_url" type="text" name="url" maxlength="40" /></td></tr>"""
)
self.assertHTMLEqual(
str(f.as_ul()),
"""<li><label for="id_name">Name:</label> <input id="id_name" type="text" name="name" maxlength="20" /></li>
<li><label for="id_slug">Slug:</label> <input id="id_slug" type="text" name="slug" maxlength="20" /></li>
<li><label for="id_url">The URL:</label> <input id="id_url" type="text" name="url" maxlength="40" /></li>"""
)
self.assertHTMLEqual(
str(f["name"]),
"""<input id="id_name" type="text" name="name" maxlength="20" />""")
def test_auto_id(self):
f = BaseCategoryForm(auto_id=False)
self.assertHTMLEqual(
str(f.as_ul()),
"""<li>Name: <input type="text" name="name" maxlength="20" /></li>
<li>Slug: <input type="text" name="slug" maxlength="20" /></li>
<li>The URL: <input type="text" name="url" maxlength="40" /></li>"""
)
def test_with_data(self):
self.assertEqual(Category.objects.count(), 0)
f = BaseCategoryForm({'name': 'Entertainment',
'slug': 'entertainment',
'url': 'entertainment'})
self.assertTrue(f.is_valid())
self.assertEqual(f.cleaned_data['name'], 'Entertainment')
self.assertEqual(f.cleaned_data['slug'], 'entertainment')
self.assertEqual(f.cleaned_data['url'], 'entertainment')
c1 = f.save()
# Testing wether the same object is returned from the
# ORM... not the fastest way...
self.assertEqual(c1, Category.objects.all()[0])
self.assertEqual(c1.name, "Entertainment")
self.assertEqual(Category.objects.count(), 1)
f = BaseCategoryForm({'name': "It's a test",
'slug': 'its-test',
'url': 'test'})
self.assertTrue(f.is_valid())
self.assertEqual(f.cleaned_data['name'], "It's a test")
self.assertEqual(f.cleaned_data['slug'], 'its-test')
self.assertEqual(f.cleaned_data['url'], 'test')
c2 = f.save()
# Testing wether the same object is returned from the
# ORM... not the fastest way...
self.assertEqual(c2, Category.objects.get(pk=c2.pk))
self.assertEqual(c2.name, "It's a test")
self.assertEqual(Category.objects.count(), 2)
# If you call save() with commit=False, then it will return an object that
# hasn't yet been saved to the database. In this case, it's up to you to call
# save() on the resulting model instance.
f = BaseCategoryForm({'name': 'Third test', 'slug': 'third-test', 'url': 'third'})
self.assertEqual(f.is_valid(), True)
self.assertEqual(f.cleaned_data['url'], 'third')
self.assertEqual(f.cleaned_data['name'], 'Third test')
self.assertEqual(f.cleaned_data['slug'], 'third-test')
c3 = f.save(commit=False)
self.assertEqual(c3.name, "Third test")
self.assertEqual(Category.objects.count(), 2)
c3.save()
self.assertEqual(Category.objects.count(), 3)
# If you call save() with invalid data, you'll get a ValueError.
f = BaseCategoryForm({'name': '', 'slug': 'not a slug!', 'url': 'foo'})
self.assertEqual(f.errors['name'], ['This field is required.'])
self.assertEqual(f.errors['slug'], ["Enter a valid 'slug' consisting of letters, numbers, underscores or hyphens."])
self.assertEqual(f.cleaned_data, {'url': 'foo'})
with self.assertRaises(ValueError):
f.save()
f = BaseCategoryForm({'name': '', 'slug': '', 'url': 'foo'})
with self.assertRaises(ValueError):
f.save()
# Create a couple of Writers.
w_royko = Writer(name='Mike Royko')
w_royko.save()
w_woodward = Writer(name='Bob Woodward')
w_woodward.save()
# ManyToManyFields are represented by a MultipleChoiceField, ForeignKeys and any
# fields with the 'choices' attribute are represented by a ChoiceField.
f = ArticleForm(auto_id=False)
self.assertHTMLEqual(six.text_type(f), '''<tr><th>Headline:</th><td><input type="text" name="headline" maxlength="50" /></td></tr>
<tr><th>Slug:</th><td><input type="text" name="slug" maxlength="50" /></td></tr>
<tr><th>Pub date:</th><td><input type="text" name="pub_date" /></td></tr>
<tr><th>Writer:</th><td><select name="writer">
<option value="" selected="selected">---------</option>
<option value="%s">Bob Woodward</option>
<option value="%s">Mike Royko</option>
</select></td></tr>
<tr><th>Article:</th><td><textarea rows="10" cols="40" name="article"></textarea></td></tr>
<tr><th>Categories:</th><td><select multiple="multiple" name="categories">
<option value="%s">Entertainment</option>
<option value="%s">It's a test</option>
<option value="%s">Third test</option>
</select><br /><span class="helptext"> Hold down "Control", or "Command" on a Mac, to select more than one.</span></td></tr>
<tr><th>Status:</th><td><select name="status">
<option value="" selected="selected">---------</option>
<option value="1">Draft</option>
<option value="2">Pending</option>
<option value="3">Live</option>
</select></td></tr>''' % (w_woodward.pk, w_royko.pk, c1.pk, c2.pk, c3.pk))
# You can restrict a form to a subset of the complete list of fields
# by providing a 'fields' argument. If you try to save a
# model created with such a form, you need to ensure that the fields
# that are _not_ on the form have default values, or are allowed to have
# a value of None. If a field isn't specified on a form, the object created
# from the form can't provide a value for that field!
f = PartialArticleForm(auto_id=False)
self.assertHTMLEqual(six.text_type(f), '''<tr><th>Headline:</th><td><input type="text" name="headline" maxlength="50" /></td></tr>
<tr><th>Pub date:</th><td><input type="text" name="pub_date" /></td></tr>''')
# When the ModelForm is passed an instance, that instance's current values are
# inserted as 'initial' data in each Field.
w = Writer.objects.get(name='Mike Royko')
f = RoykoForm(auto_id=False, instance=w)
self.assertHTMLEqual(six.text_type(f), '''<tr><th>Name:</th><td><input type="text" name="name" value="Mike Royko" maxlength="50" /><br /><span class="helptext">Use both first and last names.</span></td></tr>''')
art = Article(
headline='Test article',
slug='test-article',
pub_date=datetime.date(1988, 1, 4),
writer=w,
article='Hello.'
)
art.save()
art_id_1 = art.id
self.assertEqual(art_id_1 is not None, True)
f = TestArticleForm(auto_id=False, instance=art)
self.assertHTMLEqual(f.as_ul(), '''<li>Headline: <input type="text" name="headline" value="Test article" maxlength="50" /></li>
<li>Slug: <input type="text" name="slug" value="test-article" maxlength="50" /></li>
<li>Pub date: <input type="text" name="pub_date" value="1988-01-04" /></li>
<li>Writer: <select name="writer">
<option value="">---------</option>
<option value="%s">Bob Woodward</option>
<option value="%s" selected="selected">Mike Royko</option>
</select></li>
<li>Article: <textarea rows="10" cols="40" name="article">Hello.</textarea></li>
<li>Categories: <select multiple="multiple" name="categories">
<option value="%s">Entertainment</option>
<option value="%s">It's a test</option>
<option value="%s">Third test</option>
</select> <span class="helptext"> Hold down "Control", or "Command" on a Mac, to select more than one.</span></li>
<li>Status: <select name="status">
<option value="" selected="selected">---------</option>
<option value="1">Draft</option>
<option value="2">Pending</option>
<option value="3">Live</option>
</select></li>''' % (w_woodward.pk, w_royko.pk, c1.pk, c2.pk, c3.pk))
f = TestArticleForm({
'headline': 'Test headline',
'slug': 'test-headline',
'pub_date': '1984-02-06',
'writer': six.text_type(w_royko.pk),
'article': 'Hello.'
}, instance=art)
self.assertEqual(f.errors, {})
self.assertEqual(f.is_valid(), True)
test_art = f.save()
self.assertEqual(test_art.id == art_id_1, True)
test_art = Article.objects.get(id=art_id_1)
self.assertEqual(test_art.headline, 'Test headline')
# You can create a form over a subset of the available fields
# by specifying a 'fields' argument to form_for_instance.
f = PartialArticleFormWithSlug({
'headline': 'New headline',
'slug': 'new-headline',
'pub_date': '1988-01-04'
}, auto_id=False, instance=art)
self.assertHTMLEqual(f.as_ul(), '''<li>Headline: <input type="text" name="headline" value="New headline" maxlength="50" /></li>
<li>Slug: <input type="text" name="slug" value="new-headline" maxlength="50" /></li>
<li>Pub date: <input type="text" name="pub_date" value="1988-01-04" /></li>''')
self.assertEqual(f.is_valid(), True)
new_art = f.save()
self.assertEqual(new_art.id == art_id_1, True)
new_art = Article.objects.get(id=art_id_1)
self.assertEqual(new_art.headline, 'New headline')
# Add some categories and test the many-to-many form output.
self.assertQuerysetEqual(new_art.categories.all(), [])
new_art.categories.add(Category.objects.get(name='Entertainment'))
self.assertQuerysetEqual(new_art.categories.all(), ["Entertainment"])
f = TestArticleForm(auto_id=False, instance=new_art)
self.assertHTMLEqual(f.as_ul(), '''<li>Headline: <input type="text" name="headline" value="New headline" maxlength="50" /></li>
<li>Slug: <input type="text" name="slug" value="new-headline" maxlength="50" /></li>
<li>Pub date: <input type="text" name="pub_date" value="1988-01-04" /></li>
<li>Writer: <select name="writer">
<option value="">---------</option>
<option value="%s">Bob Woodward</option>
<option value="%s" selected="selected">Mike Royko</option>
</select></li>
<li>Article: <textarea rows="10" cols="40" name="article">Hello.</textarea></li>
<li>Categories: <select multiple="multiple" name="categories">
<option value="%s" selected="selected">Entertainment</option>
<option value="%s">It's a test</option>
<option value="%s">Third test</option>
</select> <span class="helptext"> Hold down "Control", or "Command" on a Mac, to select more than one.</span></li>
<li>Status: <select name="status">
<option value="" selected="selected">---------</option>
<option value="1">Draft</option>
<option value="2">Pending</option>
<option value="3">Live</option>
</select></li>''' % (w_woodward.pk, w_royko.pk, c1.pk, c2.pk, c3.pk))
# Initial values can be provided for model forms
f = TestArticleForm(
auto_id=False,
initial={
'headline': 'Your headline here',
'categories': [str(c1.id), str(c2.id)]
})
self.assertHTMLEqual(f.as_ul(), '''<li>Headline: <input type="text" name="headline" value="Your headline here" maxlength="50" /></li>
<li>Slug: <input type="text" name="slug" maxlength="50" /></li>
<li>Pub date: <input type="text" name="pub_date" /></li>
<li>Writer: <select name="writer">
<option value="" selected="selected">---------</option>
<option value="%s">Bob Woodward</option>
<option value="%s">Mike Royko</option>
</select></li>
<li>Article: <textarea rows="10" cols="40" name="article"></textarea></li>
<li>Categories: <select multiple="multiple" name="categories">
<option value="%s" selected="selected">Entertainment</option>
<option value="%s" selected="selected">It's a test</option>
<option value="%s">Third test</option>
</select> <span class="helptext"> Hold down "Control", or "Command" on a Mac, to select more than one.</span></li>
<li>Status: <select name="status">
<option value="" selected="selected">---------</option>
<option value="1">Draft</option>
<option value="2">Pending</option>
<option value="3">Live</option>
</select></li>''' % (w_woodward.pk, w_royko.pk, c1.pk, c2.pk, c3.pk))
f = TestArticleForm({
'headline': 'New headline',
'slug': 'new-headline',
'pub_date': '1988-01-04',
'writer': six.text_type(w_royko.pk),
'article': 'Hello.',
'categories': [six.text_type(c1.id), six.text_type(c2.id)]
}, instance=new_art)
new_art = f.save()
self.assertEqual(new_art.id == art_id_1, True)
new_art = Article.objects.get(id=art_id_1)
self.assertQuerysetEqual(new_art.categories.order_by('name'),
["Entertainment", "It's a test"])
# Now, submit form data with no categories. This deletes the existing categories.
f = TestArticleForm({'headline': 'New headline', 'slug': 'new-headline', 'pub_date': '1988-01-04',
'writer': six.text_type(w_royko.pk), 'article': 'Hello.'}, instance=new_art)
new_art = f.save()
self.assertEqual(new_art.id == art_id_1, True)
new_art = Article.objects.get(id=art_id_1)
self.assertQuerysetEqual(new_art.categories.all(), [])
# Create a new article, with categories, via the form.
f = ArticleForm({'headline': 'The walrus was Paul', 'slug': 'walrus-was-paul', 'pub_date': '1967-11-01',
'writer': six.text_type(w_royko.pk), 'article': 'Test.', 'categories': [six.text_type(c1.id), six.text_type(c2.id)]})
new_art = f.save()
art_id_2 = new_art.id
self.assertEqual(art_id_2 not in (None, art_id_1), True)
new_art = Article.objects.get(id=art_id_2)
self.assertQuerysetEqual(new_art.categories.order_by('name'), ["Entertainment", "It's a test"])
# Create a new article, with no categories, via the form.
f = ArticleForm({'headline': 'The walrus was Paul', 'slug': 'walrus-was-paul', 'pub_date': '1967-11-01',
'writer': six.text_type(w_royko.pk), 'article': 'Test.'})
new_art = f.save()
art_id_3 = new_art.id
self.assertEqual(art_id_3 not in (None, art_id_1, art_id_2), True)
new_art = Article.objects.get(id=art_id_3)
self.assertQuerysetEqual(new_art.categories.all(), [])
# Create a new article, with categories, via the form, but use commit=False.
# The m2m data won't be saved until save_m2m() is invoked on the form.
f = ArticleForm({'headline': 'The walrus was Paul', 'slug': 'walrus-was-paul', 'pub_date': '1967-11-01',
'writer': six.text_type(w_royko.pk), 'article': 'Test.', 'categories': [six.text_type(c1.id), six.text_type(c2.id)]})
new_art = f.save(commit=False)
# Manually save the instance
new_art.save()
art_id_4 = new_art.id
self.assertEqual(art_id_4 not in (None, art_id_1, art_id_2, art_id_3), True)
# The instance doesn't have m2m data yet
new_art = Article.objects.get(id=art_id_4)
self.assertQuerysetEqual(new_art.categories.all(), [])
# Save the m2m data on the form
f.save_m2m()
self.assertQuerysetEqual(new_art.categories.order_by('name'), ["Entertainment", "It's a test"])
# Here, we define a custom ModelForm. Because it happens to have the same fields as
# the Category model, we can just call the form's save() to apply its changes to an
# existing Category instance.
cat = Category.objects.get(name='Third test')
self.assertEqual(cat.name, "Third test")
self.assertEqual(cat.id == c3.id, True)
form = ShortCategory({'name': 'Third', 'slug': 'third', 'url': '3rd'}, instance=cat)
self.assertEqual(form.save().name, 'Third')
self.assertEqual(Category.objects.get(id=c3.id).name, 'Third')
# Here, we demonstrate that choices for a ForeignKey ChoiceField are determined
# at runtime, based on the data in the database when the form is displayed, not
# the data in the database when the form is instantiated.
f = ArticleForm(auto_id=False)
self.assertHTMLEqual(f.as_ul(), '''<li>Headline: <input type="text" name="headline" maxlength="50" /></li>
<li>Slug: <input type="text" name="slug" maxlength="50" /></li>
<li>Pub date: <input type="text" name="pub_date" /></li>
<li>Writer: <select name="writer">
<option value="" selected="selected">---------</option>
<option value="%s">Bob Woodward</option>
<option value="%s">Mike Royko</option>
</select></li>
<li>Article: <textarea rows="10" cols="40" name="article"></textarea></li>
<li>Categories: <select multiple="multiple" name="categories">
<option value="%s">Entertainment</option>
<option value="%s">It's a test</option>
<option value="%s">Third</option>
</select> <span class="helptext"> Hold down "Control", or "Command" on a Mac, to select more than one.</span></li>
<li>Status: <select name="status">
<option value="" selected="selected">---------</option>
<option value="1">Draft</option>
<option value="2">Pending</option>
<option value="3">Live</option>
</select></li>''' % (w_woodward.pk, w_royko.pk, c1.pk, c2.pk, c3.pk))
c4 = Category.objects.create(name='Fourth', url='4th')
self.assertEqual(c4.name, 'Fourth')
w_bernstein = Writer.objects.create(name='Carl Bernstein')
self.assertEqual(w_bernstein.name, 'Carl Bernstein')
self.assertHTMLEqual(f.as_ul(), '''<li>Headline: <input type="text" name="headline" maxlength="50" /></li>
<li>Slug: <input type="text" name="slug" maxlength="50" /></li>
<li>Pub date: <input type="text" name="pub_date" /></li>
<li>Writer: <select name="writer">
<option value="" selected="selected">---------</option>
<option value="%s">Bob Woodward</option>
<option value="%s">Carl Bernstein</option>
<option value="%s">Mike Royko</option>
</select></li>
<li>Article: <textarea rows="10" cols="40" name="article"></textarea></li>
<li>Categories: <select multiple="multiple" name="categories">
<option value="%s">Entertainment</option>
<option value="%s">It's a test</option>
<option value="%s">Third</option>
<option value="%s">Fourth</option>
</select> <span class="helptext"> Hold down "Control", or "Command" on a Mac, to select more than one.</span></li>
<li>Status: <select name="status">
<option value="" selected="selected">---------</option>
<option value="1">Draft</option>
<option value="2">Pending</option>
<option value="3">Live</option>
</select></li>''' % (w_woodward.pk, w_bernstein.pk, w_royko.pk, c1.pk, c2.pk, c3.pk, c4.pk))
# ModelChoiceField ############################################################
f = forms.ModelChoiceField(Category.objects.all())
self.assertEqual(list(f.choices), [
('', '---------'),
(c1.pk, 'Entertainment'),
(c2.pk, "It's a test"),
(c3.pk, 'Third'),
(c4.pk, 'Fourth')])
with self.assertRaises(ValidationError):
f.clean('')
with self.assertRaises(ValidationError):
f.clean(None)
with self.assertRaises(ValidationError):
f.clean(0)
self.assertEqual(f.clean(c3.id).name, 'Third')
self.assertEqual(f.clean(c2.id).name, "It's a test")
# Add a Category object *after* the ModelChoiceField has already been
# instantiated. This proves clean() checks the database during clean() rather
# than caching it at time of instantiation.
c5 = Category.objects.create(name='Fifth', url='5th')
self.assertEqual(c5.name, 'Fifth')
self.assertEqual(f.clean(c5.id).name, 'Fifth')
# Delete a Category object *after* the ModelChoiceField has already been
# instantiated. This proves clean() checks the database during clean() rather
# than caching it at time of instantiation.
Category.objects.get(url='5th').delete()
with self.assertRaises(ValidationError):
f.clean(c5.id)
f = forms.ModelChoiceField(Category.objects.filter(pk=c1.id), required=False)
self.assertEqual(f.clean(''), None)
f.clean('')
self.assertEqual(f.clean(str(c1.id)).name, "Entertainment")
with self.assertRaises(ValidationError):
f.clean('100')
# queryset can be changed after the field is created.
f.queryset = Category.objects.exclude(name='Fourth')
self.assertEqual(list(f.choices), [
('', '---------'),
(c1.pk, 'Entertainment'),
(c2.pk, "It's a test"),
(c3.pk, 'Third')])
self.assertEqual(f.clean(c3.id).name, 'Third')
with self.assertRaises(ValidationError):
f.clean(c4.id)
# check that we can safely iterate choices repeatedly
gen_one = list(f.choices)
gen_two = f.choices
self.assertEqual(gen_one[2], (c2.pk, "It's a test"))
self.assertEqual(list(gen_two), [
('', '---------'),
(c1.pk, 'Entertainment'),
(c2.pk, "It's a test"),
(c3.pk, 'Third')])
# check that we can override the label_from_instance method to print custom labels (#4620)
f.queryset = Category.objects.all()
f.label_from_instance = lambda obj: "category " + str(obj)
self.assertEqual(list(f.choices), [
('', '---------'),
(c1.pk, 'category Entertainment'),
(c2.pk, "category It's a test"),
(c3.pk, 'category Third'),
(c4.pk, 'category Fourth')])
# ModelMultipleChoiceField ####################################################
f = forms.ModelMultipleChoiceField(Category.objects.all())
self.assertEqual(list(f.choices), [
(c1.pk, 'Entertainment'),
(c2.pk, "It's a test"),
(c3.pk, 'Third'),
(c4.pk, 'Fourth')])
with self.assertRaises(ValidationError):
f.clean(None)
with self.assertRaises(ValidationError):
f.clean([])
self.assertQuerysetEqual(f.clean([c1.id]), ["Entertainment"])
self.assertQuerysetEqual(f.clean([c2.id]), ["It's a test"])
self.assertQuerysetEqual(f.clean([str(c1.id)]), ["Entertainment"])
self.assertQuerysetEqual(f.clean([str(c1.id), str(c2.id)]), ["Entertainment", "It's a test"])
self.assertQuerysetEqual(f.clean([c1.id, str(c2.id)]), ["Entertainment", "It's a test"])
self.assertQuerysetEqual(f.clean((c1.id, str(c2.id))), ["Entertainment", "It's a test"])
with self.assertRaises(ValidationError):
f.clean(['100'])
with self.assertRaises(ValidationError):
f.clean('hello')
with self.assertRaises(ValidationError):
f.clean(['fail'])
# Add a Category object *after* the ModelMultipleChoiceField has already been
# instantiated. This proves clean() checks the database during clean() rather
# than caching it at time of instantiation.
# Note, we are using an id of 1006 here since tests that run before
# this may create categories with primary keys up to 6. Use
# a number that is will not conflict.
c6 = Category.objects.create(id=1006, name='Sixth', url='6th')
self.assertEqual(c6.name, 'Sixth')
self.assertQuerysetEqual(f.clean([c6.id]), ["Sixth"])
# Delete a Category object *after* the ModelMultipleChoiceField has already been
# instantiated. This proves clean() checks the database during clean() rather
# than caching it at time of instantiation.
Category.objects.get(url='6th').delete()
with self.assertRaises(ValidationError):
f.clean([c6.id])
f = forms.ModelMultipleChoiceField(Category.objects.all(), required=False)
self.assertIsInstance(f.clean([]), EmptyQuerySet)
self.assertIsInstance(f.clean(()), EmptyQuerySet)
with self.assertRaises(ValidationError):
f.clean(['10'])
with self.assertRaises(ValidationError):
f.clean([str(c3.id), '10'])
with self.assertRaises(ValidationError):
f.clean([str(c1.id), '10'])
# queryset can be changed after the field is created.
f.queryset = Category.objects.exclude(name='Fourth')
self.assertEqual(list(f.choices), [
(c1.pk, 'Entertainment'),
(c2.pk, "It's a test"),
(c3.pk, 'Third')])
self.assertQuerysetEqual(f.clean([c3.id]), ["Third"])
with self.assertRaises(ValidationError):
f.clean([c4.id])
with self.assertRaises(ValidationError):
f.clean([str(c3.id), str(c4.id)])
f.queryset = Category.objects.all()
f.label_from_instance = lambda obj: "multicategory " + str(obj)
self.assertEqual(list(f.choices), [
(c1.pk, 'multicategory Entertainment'),
(c2.pk, "multicategory It's a test"),
(c3.pk, 'multicategory Third'),
(c4.pk, 'multicategory Fourth')])
# OneToOneField ###############################################################
self.assertEqual(list(ImprovedArticleForm.base_fields), ['article'])
self.assertEqual(list(ImprovedArticleWithParentLinkForm.base_fields), [])
bw = BetterWriter(name='Joe Better', score=10)
bw.save()
self.assertEqual(sorted(model_to_dict(bw)),
['id', 'name', 'score', 'writer_ptr'])
form = BetterWriterForm({'name': 'Some Name', 'score': 12})
self.assertEqual(form.is_valid(), True)
bw2 = form.save()
bw2.delete()
form = WriterProfileForm()
self.assertHTMLEqual(form.as_p(), '''<p><label for="id_writer">Writer:</label> <select name="writer" id="id_writer">
<option value="" selected="selected">---------</option>
<option value="%s">Bob Woodward</option>
<option value="%s">Carl Bernstein</option>
<option value="%s">Joe Better</option>
<option value="%s">Mike Royko</option>
</select></p>
<p><label for="id_age">Age:</label> <input type="text" name="age" id="id_age" /></p>''' % (w_woodward.pk, w_bernstein.pk, bw.pk, w_royko.pk))
data = {
'writer': six.text_type(w_woodward.pk),
'age': '65',
}
form = WriterProfileForm(data)
instance = form.save()
self.assertEqual(six.text_type(instance), 'Bob Woodward is 65')
form = WriterProfileForm(instance=instance)
self.assertHTMLEqual(form.as_p(), '''<p><label for="id_writer">Writer:</label> <select name="writer" id="id_writer">
<option value="">---------</option>
<option value="%s" selected="selected">Bob Woodward</option>
<option value="%s">Carl Bernstein</option>
<option value="%s">Joe Better</option>
<option value="%s">Mike Royko</option>
</select></p>
<p><label for="id_age">Age:</label> <input type="text" name="age" value="65" id="id_age" /></p>''' % (w_woodward.pk, w_bernstein.pk, bw.pk, w_royko.pk))
def test_file_field(self):
# Test conditions when files is either not given or empty.
f = TextFileForm(data={'description': 'Assistance'})
self.assertEqual(f.is_valid(), False)
f = TextFileForm(data={'description': 'Assistance'}, files={})
self.assertEqual(f.is_valid(), False)
# Upload a file and ensure it all works as expected.
f = TextFileForm(
data={'description': 'Assistance'},
files={'file': SimpleUploadedFile('test1.txt', b'hello world')})
self.assertEqual(f.is_valid(), True)
self.assertEqual(type(f.cleaned_data['file']), SimpleUploadedFile)
instance = f.save()
self.assertEqual(instance.file.name, 'tests/test1.txt')
instance.file.delete()
f = TextFileForm(
data={'description': 'Assistance'},
files={'file': SimpleUploadedFile('test1.txt', b'hello world')})
self.assertEqual(f.is_valid(), True)
self.assertEqual(type(f.cleaned_data['file']), SimpleUploadedFile)
instance = f.save()
self.assertEqual(instance.file.name, 'tests/test1.txt')
# Check if the max_length attribute has been inherited from the model.
f = TextFileForm(
data={'description': 'Assistance'},
files={'file': SimpleUploadedFile('test-maxlength.txt', b'hello world')})
self.assertEqual(f.is_valid(), False)
# Edit an instance that already has the file defined in the model. This will not
# save the file again, but leave it exactly as it is.
f = TextFileForm(
data={'description': 'Assistance'},
instance=instance)
self.assertEqual(f.is_valid(), True)
self.assertEqual(f.cleaned_data['file'].name, 'tests/test1.txt')
instance = f.save()
self.assertEqual(instance.file.name, 'tests/test1.txt')
# Delete the current file since this is not done by Django.
instance.file.delete()
# Override the file by uploading a new one.
f = TextFileForm(
data={'description': 'Assistance'},
files={'file': SimpleUploadedFile('test2.txt', b'hello world')}, instance=instance)
self.assertEqual(f.is_valid(), True)
instance = f.save()
self.assertEqual(instance.file.name, 'tests/test2.txt')
# Delete the current file since this is not done by Django.
instance.file.delete()
f = TextFileForm(
data={'description': 'Assistance'},
files={'file': SimpleUploadedFile('test2.txt', b'hello world')})
self.assertEqual(f.is_valid(), True)
instance = f.save()
self.assertEqual(instance.file.name, 'tests/test2.txt')
# Delete the current file since this is not done by Django.
instance.file.delete()
instance.delete()
# Test the non-required FileField
f = TextFileForm(data={'description': 'Assistance'})
f.fields['file'].required = False
self.assertEqual(f.is_valid(), True)
instance = f.save()
self.assertEqual(instance.file.name, '')
f = TextFileForm(
data={'description': 'Assistance'},
files={'file': SimpleUploadedFile('test3.txt', b'hello world')}, instance=instance)
self.assertEqual(f.is_valid(), True)
instance = f.save()
self.assertEqual(instance.file.name, 'tests/test3.txt')
# Instance can be edited w/out re-uploading the file and existing file should be preserved.
f = TextFileForm(
data={'description': 'New Description'},
instance=instance)
f.fields['file'].required = False
self.assertEqual(f.is_valid(), True)
instance = f.save()
self.assertEqual(instance.description, 'New Description')
self.assertEqual(instance.file.name, 'tests/test3.txt')
# Delete the current file since this is not done by Django.
instance.file.delete()
instance.delete()
f = TextFileForm(
data={'description': 'Assistance'},
files={'file': SimpleUploadedFile('test3.txt', b'hello world')})
self.assertEqual(f.is_valid(), True)
instance = f.save()
self.assertEqual(instance.file.name, 'tests/test3.txt')
# Delete the current file since this is not done by Django.
instance.file.delete()
instance.delete()
def test_big_integer_field(self):
bif = BigIntForm({'biggie': '-9223372036854775808'})
self.assertEqual(bif.is_valid(), True)
bif = BigIntForm({'biggie': '-9223372036854775809'})
self.assertEqual(bif.is_valid(), False)
self.assertEqual(bif.errors, {'biggie': ['Ensure this value is greater than or equal to -9223372036854775808.']})
bif = BigIntForm({'biggie': '9223372036854775807'})
self.assertEqual(bif.is_valid(), True)
bif = BigIntForm({'biggie': '9223372036854775808'})
self.assertEqual(bif.is_valid(), False)
self.assertEqual(bif.errors, {'biggie': ['Ensure this value is less than or equal to 9223372036854775807.']})
@skipUnless(test_images, "PIL not installed")
def test_image_field(self):
# ImageField and FileField are nearly identical, but they differ slighty when
# it comes to validation. This specifically tests that #6302 is fixed for
# both file fields and image fields.
with open(os.path.join(os.path.dirname(upath(__file__)), "test.png"), 'rb') as fp:
image_data = fp.read()
with open(os.path.join(os.path.dirname(upath(__file__)), "test2.png"), 'rb') as fp:
image_data2 = fp.read()
f = ImageFileForm(
data={'description': 'An image'},
files={'image': SimpleUploadedFile('test.png', image_data)})
self.assertEqual(f.is_valid(), True)
self.assertEqual(type(f.cleaned_data['image']), SimpleUploadedFile)
instance = f.save()
self.assertEqual(instance.image.name, 'tests/test.png')
self.assertEqual(instance.width, 16)
self.assertEqual(instance.height, 16)
# Delete the current file since this is not done by Django, but don't save
# because the dimension fields are not null=True.
instance.image.delete(save=False)
f = ImageFileForm(
data={'description': 'An image'},
files={'image': SimpleUploadedFile('test.png', image_data)})
self.assertEqual(f.is_valid(), True)
self.assertEqual(type(f.cleaned_data['image']), SimpleUploadedFile)
instance = f.save()
self.assertEqual(instance.image.name, 'tests/test.png')
self.assertEqual(instance.width, 16)
self.assertEqual(instance.height, 16)
# Edit an instance that already has the (required) image defined in the model. This will not
# save the image again, but leave it exactly as it is.
f = ImageFileForm(data={'description': 'Look, it changed'}, instance=instance)
self.assertEqual(f.is_valid(), True)
self.assertEqual(f.cleaned_data['image'].name, 'tests/test.png')
instance = f.save()
self.assertEqual(instance.image.name, 'tests/test.png')
self.assertEqual(instance.height, 16)
self.assertEqual(instance.width, 16)
# Delete the current file since this is not done by Django, but don't save
# because the dimension fields are not null=True.
instance.image.delete(save=False)
# Override the file by uploading a new one.
f = ImageFileForm(
data={'description': 'Changed it'},
files={'image': SimpleUploadedFile('test2.png', image_data2)}, instance=instance)
self.assertEqual(f.is_valid(), True)
instance = f.save()
self.assertEqual(instance.image.name, 'tests/test2.png')
self.assertEqual(instance.height, 32)
self.assertEqual(instance.width, 48)
# Delete the current file since this is not done by Django, but don't save
# because the dimension fields are not null=True.
instance.image.delete(save=False)
instance.delete()
f = ImageFileForm(
data={'description': 'Changed it'},
files={'image': SimpleUploadedFile('test2.png', image_data2)})
self.assertEqual(f.is_valid(), True)
instance = f.save()
self.assertEqual(instance.image.name, 'tests/test2.png')
self.assertEqual(instance.height, 32)
self.assertEqual(instance.width, 48)
# Delete the current file since this is not done by Django, but don't save
# because the dimension fields are not null=True.
instance.image.delete(save=False)
instance.delete()
# Test the non-required ImageField
# Note: In Oracle, we expect a null ImageField to return '' instead of
# None.
if connection.features.interprets_empty_strings_as_nulls:
expected_null_imagefield_repr = ''
else:
expected_null_imagefield_repr = None
f = OptionalImageFileForm(data={'description': 'Test'})
self.assertEqual(f.is_valid(), True)
instance = f.save()
self.assertEqual(instance.image.name, expected_null_imagefield_repr)
self.assertEqual(instance.width, None)
self.assertEqual(instance.height, None)
f = OptionalImageFileForm(
data={'description': 'And a final one'},
files={'image': SimpleUploadedFile('test3.png', image_data)}, instance=instance)
self.assertEqual(f.is_valid(), True)
instance = f.save()
self.assertEqual(instance.image.name, 'tests/test3.png')
self.assertEqual(instance.width, 16)
self.assertEqual(instance.height, 16)
# Editing the instance without re-uploading the image should not affect the image or its width/height properties
f = OptionalImageFileForm(
data={'description': 'New Description'},
instance=instance)
self.assertEqual(f.is_valid(), True)
instance = f.save()
self.assertEqual(instance.description, 'New Description')
self.assertEqual(instance.image.name, 'tests/test3.png')
self.assertEqual(instance.width, 16)
self.assertEqual(instance.height, 16)
# Delete the current file since this is not done by Django.
instance.image.delete()
instance.delete()
f = OptionalImageFileForm(
data={'description': 'And a final one'},
files={'image': SimpleUploadedFile('test4.png', image_data2)}
)
self.assertEqual(f.is_valid(), True)
instance = f.save()
self.assertEqual(instance.image.name, 'tests/test4.png')
self.assertEqual(instance.width, 48)
self.assertEqual(instance.height, 32)
instance.delete()
# Test callable upload_to behavior that's dependent on the value of another field in the model
f = ImageFileForm(
data={'description': 'And a final one', 'path': 'foo'},
files={'image': SimpleUploadedFile('test4.png', image_data)})
self.assertEqual(f.is_valid(), True)
instance = f.save()
self.assertEqual(instance.image.name, 'foo/test4.png')
instance.delete()
def test_media_on_modelform(self):
# Similar to a regular Form class you can define custom media to be used on
# the ModelForm.
f = ModelFormWithMedia()
self.assertHTMLEqual(six.text_type(f.media), '''<link href="/some/form/css" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/some/form/javascript"></script>''')
f = CommaSeparatedIntegerForm({'field': '1,2,3'})
self.assertEqual(f.is_valid(), True)
self.assertEqual(f.cleaned_data, {'field': '1,2,3'})
f = CommaSeparatedIntegerForm({'field': '1a,2'})
self.assertEqual(f.errors, {'field': ['Enter only digits separated by commas.']})
f = CommaSeparatedIntegerForm({'field': ',,,,'})
self.assertEqual(f.is_valid(), True)
self.assertEqual(f.cleaned_data, {'field': ',,,,'})
f = CommaSeparatedIntegerForm({'field': '1.2'})
self.assertEqual(f.errors, {'field': ['Enter only digits separated by commas.']})
f = CommaSeparatedIntegerForm({'field': '1,a,2'})
self.assertEqual(f.errors, {'field': ['Enter only digits separated by commas.']})
f = CommaSeparatedIntegerForm({'field': '1,,2'})
self.assertEqual(f.is_valid(), True)
self.assertEqual(f.cleaned_data, {'field': '1,,2'})
f = CommaSeparatedIntegerForm({'field': '1'})
self.assertEqual(f.is_valid(), True)
self.assertEqual(f.cleaned_data, {'field': '1'})
# This Price instance generated by this form is not valid because the quantity
# field is required, but the form is valid because the field is excluded from
# the form. This is for backwards compatibility.
form = PriceFormWithoutQuantity({'price': '6.00'})
self.assertEqual(form.is_valid(), True)
price = form.save(commit=False)
with self.assertRaises(ValidationError):
price.full_clean()
# The form should not validate fields that it doesn't contain even if they are
# specified using 'fields', not 'exclude'.
class Meta:
model = Price
fields = ('price',)
form = PriceFormWithoutQuantity({'price': '6.00'})
self.assertEqual(form.is_valid(), True)
# The form should still have an instance of a model that is not complete and
# not saved into a DB yet.
self.assertEqual(form.instance.price, Decimal('6.00'))
self.assertEqual(form.instance.quantity is None, True)
self.assertEqual(form.instance.pk is None, True)
# Choices on CharField and IntegerField
f = ArticleForm()
with self.assertRaises(ValidationError):
f.fields['status'].clean('42')
f = ArticleStatusForm()
with self.assertRaises(ValidationError):
f.fields['status'].clean('z')
def test_foreignkeys_which_use_to_field(self):
apple = Inventory.objects.create(barcode=86, name='Apple')
pear = Inventory.objects.create(barcode=22, name='Pear')
core = Inventory.objects.create(barcode=87, name='Core', parent=apple)
field = forms.ModelChoiceField(Inventory.objects.all(), to_field_name='barcode')
self.assertEqual(tuple(field.choices), (
('', '---------'),
(86, 'Apple'),
(87, 'Core'),
(22, 'Pear')))
form = InventoryForm(instance=core)
self.assertHTMLEqual(six.text_type(form['parent']), '''<select name="parent" id="id_parent">
<option value="">---------</option>
<option value="86" selected="selected">Apple</option>
<option value="87">Core</option>
<option value="22">Pear</option>
</select>''')
data = model_to_dict(core)
data['parent'] = '22'
form = InventoryForm(data=data, instance=core)
core = form.save()
self.assertEqual(core.parent.name, 'Pear')
class CategoryForm(forms.ModelForm):
description = forms.CharField()
class Meta:
model = Category
fields = ['description', 'url']
self.assertEqual(list(CategoryForm.base_fields),
['description', 'url'])
self.assertHTMLEqual(six.text_type(CategoryForm()), '''<tr><th><label for="id_description">Description:</label></th><td><input type="text" name="description" id="id_description" /></td></tr>
<tr><th><label for="id_url">The URL:</label></th><td><input id="id_url" type="text" name="url" maxlength="40" /></td></tr>''')
# to_field_name should also work on ModelMultipleChoiceField ##################
field = forms.ModelMultipleChoiceField(Inventory.objects.all(), to_field_name='barcode')
self.assertEqual(tuple(field.choices), ((86, 'Apple'), (87, 'Core'), (22, 'Pear')))
self.assertQuerysetEqual(field.clean([86]), ['Apple'])
form = SelectInventoryForm({'items': [87, 22]})
self.assertEqual(form.is_valid(), True)
self.assertEqual(len(form.cleaned_data), 1)
self.assertQuerysetEqual(form.cleaned_data['items'], ['Core', 'Pear'])
def test_model_field_that_returns_none_to_exclude_itself_with_explicit_fields(self):
self.assertEqual(list(CustomFieldForExclusionForm.base_fields),
['name'])
self.assertHTMLEqual(six.text_type(CustomFieldForExclusionForm()),
'''<tr><th><label for="id_name">Name:</label></th><td><input id="id_name" type="text" name="name" maxlength="10" /></td></tr>''')
|
bsd-3-clause
| -1,320,706,784,182,750,000
| 42.529993
| 219
| 0.605936
| false
| 3.788366
| true
| false
| false
|
dfreedman55/Python4NetworkEngineers
|
week2/exercise3.py
|
1
|
1448
|
#!/usr/bin/env python
import telnetlib
import time
import socket
import sys
class DefineNetworkDevice(object):
def __init__(self, ip, uname, pword):
'''
Object initialization
'''
self.ip = ip
self.uname = uname
self.pword = pword
self.TELNET_PORT = 23
self.TELNET_TIMEOUT = 6
def TelnetConnect(self):
'''
Connect, Receive Username Prompt, Send Username, Receive Password Prompt, Send Password, Receive Router Prompt
'''
self.connection = telnetlib.Telnet(self.ip, self.TELNET_PORT, self.TELNET_TIMEOUT)
# output = self.connection.read_until('sername:', TELNET_TIMEOUT)
# print output
self.connection.write(self.uname + '\n')
time.sleep(1)
# output = self.connection.read_until('assword:', TELNET_TIMEOUT)
# print output
self.connection.write(self.pword + '\n')
time.sleep(1)
# output = self.connection.read_very_eager()
# print output
def TelnetSendCommand(self, command):
'''
Send command to established telnet session
'''
self.connection.write(command + '\n')
time.sleep(1)
def TelnetReceiveData(self):
'''
Receive command output from establish telnet session
'''
output = self.connection.read_very_eager()
print output
# if __name__ == '__main__':
rtr1 = DefineNetworkDevice('50.76.53.27', 'pyclass', '88newclass')
rtr1.TelnetConnect()
rtr1.TelnetSendCommand('terminal length 0')
rtr1.TelnetReceiveData()
rtr1.TelnetSendCommand('show version')
rtr1.TelnetReceiveData()
|
gpl-2.0
| -9,119,668,020,921,804,000
| 24.403509
| 112
| 0.709945
| false
| 2.961145
| false
| false
| false
|
sirex/databot
|
databot/pipes.py
|
1
|
26342
|
import collections
import datetime
import itertools
import sqlalchemy as sa
import traceback
import tqdm
from databot.db.serializers import serrow, serkey
from databot.db.utils import strip_prefix, create_row, get_or_create, Row
from databot.db.windowedquery import windowed_query
from databot.db.models import Compression
from databot.handlers import download, html
from databot.bulkinsert import BulkInsert
from databot.exporters.services import export
from databot.expressions.base import Expression
from databot.tasks import Task
from databot.services import merge_rows
NONE = object()
def keyvalueitems(key, value=None):
if isinstance(key, tuple) and value is None and len(key) == 2:
return [key]
elif isinstance(key, collections.Iterable) and not isinstance(key, (str, bytes)):
items = iter(key)
else:
return [(key, value)]
try:
item = next(items)
except StopIteration:
return []
if isinstance(item, tuple):
return itertools.chain([item], items)
else:
return itertools.chain([(item, None)], ((k, None) for k in items))
class ItemNotFound(Exception):
pass
class PipeErrors(Task):
def __init__(self, task):
super().__init__()
self.task = task
self.bot = task.bot
def __call__(self, key=None, reverse=False):
if self.task.source:
state = self.task.get_state()
error = self.task.target.models.errors.alias('error')
table = self.task.source.table.alias('table')
# Filter by key if provided
if key is not None:
row = self.task.source.last(key)
if row is None:
return
where = sa.and_(
error.c.state_id == state.id,
error.c.row_id == row.id,
)
else:
where = error.c.state_id == state.id
# Ordering
if reverse:
order_by = error.c.id.desc()
else:
order_by = error.c.id
# Query if all tables stored in same database
if self.task.target.samedb and self.task.source.samedb:
query = (
sa.select([error, table], use_labels=True).
select_from(
error.
join(table, error.c.row_id == table.c.id)
).
where(where).
order_by(order_by)
)
for row in windowed_query(self.task.target.engine, query, table.c.id):
item = strip_prefix(row, 'error_')
item['row'] = create_row(strip_prefix(row, 'table_'))
yield item
# Query if some tables are stored in external database
else:
query = error.select(where).order_by(order_by)
for err in windowed_query(self.task.target.engine, query, error.c.id):
query = table.select(table.c.id == err['row_id'])
row = self.task.source.engine.execute(query).first()
if row:
yield Row(err, row=create_row(row))
def last(self, key=None):
for err in self(key, reverse=True):
return err
def count(self):
if self.task.source:
error = self.task.target.models.errors
state = self.task.get_state()
return self.task.target.engine.execute(error.count(error.c.state_id == state.id)).scalar()
else:
return 0
def rows(self):
for error in self():
yield error.row
def items(self):
for row in self.rows():
yield row.key, row.value
def keys(self):
for row in self.rows():
yield row.key
def values(self):
for row in self.rows():
yield row.value
def report(self, error_or_row, message, bulk=None):
now = datetime.datetime.utcnow()
if 'retries' in error_or_row:
error = error_or_row
self.task.target.engine.execute(
self.bot.models.errors.update(sa.and_(
self.bot.models.errors.c.state_id == error.state_id,
self.bot.models.errors.c.row_id == error.row_id,
)).values(
retries=self.bot.models.errors.c.retries + 1,
traceback=message,
updated=datetime.datetime.utcnow(),
),
)
elif bulk:
row = error_or_row
state = self.task.get_state()
bulk.append(dict(
state_id=state.id,
row_id=row.id,
retries=0,
traceback=message,
created=now,
updated=now,
))
else:
row = error_or_row
state = self.task.get_state()
self.bot.engine.execute(
self.bot.models.errors.insert(),
state_id=state.id,
row_id=row.id,
retries=0,
traceback=message,
created=now,
updated=now,
)
def resolve(self, key=None):
if self.task.source:
state = self.task.get_state()
error = self.task.target.models.errors
table = self.task.source.table
if key is None:
self.task.target.engine.execute(error.delete(error.c.state_id == state.id))
elif self.task.target.samedb and self.task.source.samedb:
query = (
sa.select([error.c.id]).
select_from(table.join(error, table.c.id == error.c.row_id)).
where(sa.and_(error.c.state_id == state.id, table.c.key == serkey(key)))
)
if self.bot.engine.name == 'mysql':
# http://stackoverflow.com/a/45498/475477
query = sa.select([query.alias().c.id])
self.task.target.engine.execute(error.delete(error.c.id.in_(query)))
else:
query = table.select(table.c.key == serkey(key))
row_ids = {row['id'] for row in self.task.source.engine.execute(query)}
if row_ids:
query = error.delete(sa.and_(error.c.state_id == state.id, error.c.row_id.in_(row_ids)))
self.task.target.engine.execute(query)
class TaskPipe(Task):
def __init__(self, bot, source, target):
super().__init__()
self.bot = bot
self.source = source
self.target = target
self.errors = PipeErrors(self)
def __repr__(self):
return '<databot.pipes.TaskPipe(%r, %r) at 0x%x>' % (
self.source.name if self.source else None,
self.target.name,
id(self),
)
def get_state(self):
return get_or_create(self.target.engine, self.target.models.state, ['source_id', 'target_id'], dict(
source_id=(self.source.id if self.source else None),
target_id=self.target.id,
offset=0,
))
def is_filled(self):
if self.source:
table = self.source.table
state = self.get_state()
query = table.select(table.c.id > state.offset).limit(1)
return len(self.source.engine.execute(query).fetchall()) > 0
else:
return False
def reset(self):
engine = self.target.engine
models = self.target.models
state = self.get_state()
engine.execute(models.state.update(models.state.c.id == state.id), offset=0)
return self
def skip(self):
engine = self.target.engine
models = self.target.models
state = self.get_state()
source = self.source.table
query = sa.select([source.c.id]).order_by(source.c.id.desc()).limit(1)
offset = self.source.engine.execute(query).scalar()
if offset:
engine.execute(models.state.update(models.state.c.id == state.id), offset=offset)
return self
def offset(self, value=None):
"""Move cursor to the specified offset.
For example, let say you have 5 items in your pipe:
[-----]
Then you will get following state after calling offset:
offset(1) [*----]
offset(-1) [****-]
offset(3) [***--]
offset(10) [*****]
offset(0) [-----]
"""
state = self.get_state()
source = self.source.table
offset = None
if value:
query = sa.select([source.c.id])
if value > 0:
query = query.where(source.c.id > state.offset).order_by(source.c.id.asc())
else:
query = query.where(source.c.id < state.offset).order_by(source.c.id.desc())
query = query.limit(1).offset(abs(value) - 1)
offset = self.source.engine.execute(query).scalar()
if offset is None:
if value > 0:
return self.skip()
else:
return self.reset()
if offset is not None:
self.target.engine.execute(
self.target.models.state.update(self.target.models.state.c.id == state.id),
offset=offset,
)
return self
def count(self):
"""How much items left to process."""
if self.source:
state = self.get_state()
table = self.source.table
return self.source.engine.execute(table.count(table.c.id > state.offset)).scalar()
else:
return 0
def rows(self):
if self.source:
table = self.source.table
query = table.select(table.c.id > self.get_state().offset).order_by(table.c.id)
for row in windowed_query(self.source.engine, query, table.c.id):
yield create_row(row)
def items(self):
for row in self.rows():
yield row.key, row.value
def keys(self):
for row in self.rows():
yield row.key
def values(self):
for row in self.rows():
yield row.value
def _verbose_append(self, handler, row, bulk, append=True):
print('-' * 72, file=self.bot.output.output)
print('source: id=%d key=%r' % (row.id, row.key), file=self.bot.output.output)
for key, value in keyvalueitems(handler(row)):
if append:
self.target.append(key, value, bulk=bulk)
self.bot.output.key_value(key, value, short=True)
def call(self, handler, error_limit=NONE):
error_limit = self.bot.error_limit if error_limit is NONE else error_limit
state = self.get_state()
desc = '%s -> %s' % (self.source, self.target)
if self.bot.retry:
self.retry(handler)
if self.bot.verbosity == 1 and not self.bot.debug:
total = min(self.bot.limit, self.count()) if self.bot.limit else self.count()
rows = tqdm.tqdm(self.rows(), desc, total, leave=True)
else:
rows = self.rows()
def post_save():
if row:
engine = self.target.engine
models = self.target.models
engine.execute(models.state.update(models.state.c.id == state.id), offset=row.id)
pipe = BulkInsert(self.target.engine, self.target.table)
errors = BulkInsert(self.target.engine, self.target.models.errors)
if not self.bot.debug:
pipe.post_save(post_save)
n = 0
n_errors = 0
row = None
interrupt = None
last_row = None
for row in rows:
if self.bot.limit and n >= self.bot.limit:
row = last_row
break
if self.bot.debug:
self._verbose_append(handler, row, pipe, append=False)
else:
try:
if self.bot.verbosity > 1:
self._verbose_append(handler, row, pipe)
else:
self.target.append(handler(row), bulk=pipe)
except KeyboardInterrupt as e:
interrupt = e
break
except Exception as e:
n_errors += 1
if error_limit is not None and n_errors >= error_limit:
interrupt = e
if self.bot.verbosity > 0:
print('Interrupting bot because error limit of %d was reached.' % error_limit)
self.bot.output.key_value(row.key, row.value, short=True)
if error_limit > 0:
self.errors.report(row, traceback.format_exc(), errors)
row = last_row
break
else:
self.errors.report(row, traceback.format_exc(), errors)
n += 1
last_row = row
pipe.save(post_save=True)
errors.save()
if self.bot.verbosity > 1:
print('%s, rows processed: %d' % (desc, n))
if interrupt:
raise interrupt
return self
def retry(self, handler):
desc = '%s -> %s (retry)' % (self.source, self.target)
if self.bot.verbosity == 1 and not self.bot.debug:
total = min(self.bot.limit, self.errors.count()) if self.bot.limit else self.errors.count()
errors = tqdm.tqdm(self.errors(), desc, total, leave=True, file=self.bot.output.output)
else:
errors = self.errors()
def post_save():
nonlocal error_ids
if error_ids:
engine = self.target.engine
models = self.target.models
engine.execute(models.errors.delete(models.errors.c.id.in_(error_ids)))
error_ids = []
pipe = BulkInsert(self.target.engine, self.target.table)
pipe.post_save(post_save)
n = 0
interrupt = None
error_ids = []
for error in errors:
if self.bot.limit and n >= self.bot.limit:
break
if self.bot.debug:
self._verbose_append(handler, error.row, pipe, append=False)
error_ids.append(error.id)
else:
try:
if self.bot.verbosity > 1:
self._verbose_append(handler, error.row, pipe)
else:
self.target.append(handler(error.row), bulk=pipe)
except KeyboardInterrupt as e:
interrupt = e
break
except:
self.errors.report(error, traceback.format_exc())
else:
error_ids.append(error.id)
n += 1
pipe.save(post_save=True)
if self.bot.verbosity > 1:
print('%s, errors retried: %d' % (desc, n))
if interrupt:
raise interrupt
return self
def download(self, urls=None, **kwargs):
kwargs.setdefault('delay', self.bot.download_delay)
urls = urls or Expression().key
return self.call(download.download(self.bot.requests, urls, **kwargs))
def select(self, key, value=None, **kwargs):
return self.call(html.Select(key, value, **kwargs))
def dedup(self):
self.target.dedup()
def compact(self):
self.target.compact()
def age(self, key=None):
return self.target.age(key)
def max(self, expr):
row = max((row for row in self.source.rows()), key=expr._eval, default=None)
if row:
self.target.append(row.key, row.value)
return self
def min(self, expr):
row = min((row for row in self.source.rows()), key=expr._eval, default=None)
if row:
self.target.append(row.key, row.value)
return self
class Pipe(Task):
def __init__(self, bot, id, name, table, engine, samedb=True, compress=None):
"""
Parameters
----------
bot : databot.Bot
id : int
Primary key of this pipe from ``databot.db.models.pipes.id``.
name : str
Human readable pipe identifier.
table: sqlalchemy.Table
A table where data is stored.
engine : sqlalchemy.Engine
samedb : bool
Identifies if this pipe is stored in same database as other pipes of ``bot``.
If a pipe is stored in an external database, some queries will be executed in a bit different way.
compress : databot.db.models.Compression or bool, optional
Data compression algorithm.
"""
super().__init__()
self.bot = bot
self.id = id
self.name = name
self.table = table
self.models = bot.models
self.engine = engine
self.samedb = samedb
self.compression = Compression.gzip if compress is True else compress
self.tasks = {}
def __str__(self):
return self.name
def __repr__(self):
return '<databot.pipes.Pipe(%r) at ox%x>' % (self.name, id(self))
def __call__(self, source):
source_id = source.id if source else None
if source_id not in self.tasks:
self.tasks[source_id] = TaskPipe(self.bot, source, self)
return self.tasks[source_id]
def append(self, key, value=None, conn=None, bulk=None, only_missing=False, progress=None, total=-1):
"""Append data to the pipe
You can call this method in following ways::
append(key)
append(key, value)
append((key, value))
append([key, key, ...])
append([(key, value), (key, value), ...])
"""
conn = conn or self.engine
# Progress bar
rows = keyvalueitems(key, value)
if progress and self.bot.verbosity == 1 and not self.bot.debug:
rows = tqdm.tqdm(rows, progress, total, file=self.bot.output.output, leave=True)
# Bulk insert start
save_bulk = False
if bulk is None:
save_bulk = True
bulk = BulkInsert(conn, self.table)
# Append
for key, value in rows:
# Skip all items if key is None
if key is not None and (not only_missing or not self.exists(key)):
now = datetime.datetime.utcnow()
bulk.append(serrow(key, value, created=now, compression=self.compression))
# Bulk insert finish
if save_bulk:
bulk.save()
return self
def clean(self, age=None, now=None, key=None):
if key is not None:
row = self.last(key)
if row is None:
raise ItemNotFound()
else:
query = self.table.delete(self.table.c.id == row.id)
elif age:
now = now or datetime.datetime.utcnow()
timestamp = now - age
query = self.table.delete(self.table.c.created <= timestamp)
else:
query = self.table.delete()
self.engine.execute(query)
return self
def dedup(self):
"""Delete all records with duplicate keys except ones created first."""
agg = (
sa.select([self.table.c.key, sa.func.min(self.table.c.id).label('id')]).
group_by(self.table.c.key).
having(sa.func.count(self.table.c.id) > 1).
alias()
)
query = (
sa.select([self.table.c.id]).
select_from(self.table.join(agg, sa.and_(
self.table.c.key == agg.c.key,
self.table.c.id != agg.c.id,
)))
)
if self.engine.name == 'mysql':
# http://stackoverflow.com/a/45498/475477
query = sa.select([query.alias().c.id])
self.engine.execute(self.table.delete(self.table.c.id.in_(query)))
return self
def compact(self):
"""Delete all records with duplicate keys except ones created last."""
agg = (
sa.select([self.table.c.key, sa.func.max(self.table.c.id).label('id')]).
group_by(self.table.c.key).
having(sa.func.count(self.table.c.id) > 1).
alias()
)
query = (
sa.select([self.table.c.id]).
select_from(self.table.join(agg, sa.and_(
self.table.c.key == agg.c.key,
self.table.c.id != agg.c.id,
)))
)
if self.engine.name == 'mysql':
# http://stackoverflow.com/a/45498/475477
query = sa.select([query.alias().c.id])
self.engine.execute(self.table.delete(self.table.c.id.in_(query)))
return self
def merge(self):
"""Merge all duplicate value, newer values overwrites older values.
Dicts will be merged recursively.
After merge, old values will be left as is, use compact to remove them.
"""
query = self.table.select().order_by(self.table.c.key, self.table.c.created)
rows = (create_row(row) for row in windowed_query(self.engine, query, self.table.c.id))
self.append(merge_rows((row.key, row.value) for row in rows))
return self
def compress(self):
table = self.table
rows = self.rows()
if self.bot.verbosity == 1:
rows = tqdm.tqdm(rows, ('compress %s' % self.name), total=self.count(), file=self.bot.output.output)
for row in rows:
if row.compression != Compression.gzip:
data = serrow(row.key, row.value, created=row.created, compression=Compression.gzip)
self.engine.execute(table.update().where(table.c.id == row['id']).values(data))
def decompress(self):
table = self.table
rows = self.rows()
if self.bot.verbosity == 1:
rows = tqdm.tqdm(rows, ('decompress %s' % self.name), total=self.count(), file=self.bot.output.output)
for row in rows:
if row.compression is not None:
data = serrow(row.key, row.value, created=row.created, compression=None)
self.engine.execute(table.update().where(table.c.id == row['id']).values(data))
def last(self, key=None):
if key:
query = self.table.select().where(self.table.c.key == serkey(key)).order_by(self.table.c.id.desc())
else:
query = self.table.select().order_by(self.table.c.id.desc())
row = self.engine.execute(query).first()
return create_row(row) if row else None
def age(self, key=None):
row = self.last(key)
return (datetime.datetime.utcnow() - row.created) if row else datetime.timedelta.max
def count(self):
return self.engine.execute(self.table.count()).scalar()
def rows(self, desc=False):
order_by = self.table.c.id.desc() if desc else self.table.c.id
query = self.table.select().order_by(order_by)
for row in windowed_query(self.engine, query, self.table.c.id):
yield create_row(row)
def items(self):
for row in self.rows():
yield row.key, row.value
def keys(self):
for row in self.rows():
yield row.key
def values(self):
for row in self.rows():
yield row.value
def exists(self, key):
query = sa.select([sa.exists().where(self.table.c.key == serkey(key))])
return self.engine.execute(query).scalar()
def getall(self, key, reverse=False):
order_by = self.table.c.id.desc() if reverse else self.table.c.id
query = self.table.select().where(self.table.c.key == serkey(key)).order_by(order_by)
for row in windowed_query(self.engine, query, self.table.c.id):
yield create_row(row)
def get(self, key, default=Exception):
rows = self.getall(key)
try:
row = next(rows)
except StopIteration:
if default is Exception:
raise ValueError('%r not found.' % key)
else:
return default
try:
next(rows)
except StopIteration:
return row
else:
raise ValueError('%r returned more that one row.' % key)
def export(self, dest, **kwargs):
return export(self.rows(), dest, **kwargs)
def download(self, urls=None, **kwargs):
"""Download list of URLs and store downloaded content into a pipe.
Parameters
----------
urls : None or str or list or callable or databot.rowvalue.Row
List of URLs to download.
URL's can be provided in following ways:
- `str` - string containing single URL.
- `list` - list of strings where each string is a URL.
- `None` - takes URLs from connected pipe's key field.
- `databot.rowvalue.Row` - takes URLs from a specified location in a row.
For example, code below will take all rows from `a` pipe and will take URL from `a.row.value.url`, which
is `http://example.com`.
.. code-block:: python
import databot
bot = databot.Bot()
a = bot.define('a').append([(1, {'url': 'http://example.com'})])
bot.define('b').download(a.row.value.url)
delay : int
Amount of seconds to delay between requests.
By default delay is `bot.download_delay`.
"""
kwargs.setdefault('delay', self.bot.download_delay)
urls = [urls] if isinstance(urls, str) else urls
fetch = download.download(self.bot.requests, urls, **kwargs)
for url in urls:
try:
self.append(fetch(url))
except KeyboardInterrupt:
raise
except Exception as e:
self.bot.output.key_value(url, None, short=True)
raise
return self
|
agpl-3.0
| -4,314,377,841,758,033,000
| 33.033592
| 118
| 0.534242
| false
| 3.990003
| false
| false
| false
|
wateraccounting/wa
|
Collect/ALEXI/monthly.py
|
1
|
1291
|
# -*- coding: utf-8 -*-
"""
Created on Mon Aug 28 07:54:17 2017
@author: tih
"""
import os
import sys
from DataAccess import DownloadData
def main(Dir, Startdate='', Enddate='', latlim=[-60, 70], lonlim=[-180, 180], Waitbar = 1):
"""
This function downloads monthly ALEXI data
Keyword arguments:
Dir -- 'C:/file/to/path/'
Startdate -- 'yyyy-mm-dd'
Enddate -- 'yyyy-mm-dd'
latlim -- [ymin, ymax] (values must be between -60 and 70)
lonlim -- [xmin, xmax] (values must be between -180 and 180)
"""
print '\nDownload monthly ALEXI evapotranspiration data for the period %s till %s' %(Startdate, Enddate)
# Download data
DownloadData(Dir, Startdate, Enddate, latlim, lonlim, Waitbar)
# Define directory
Dir_ALEXI_Weekly = os.path.join(Dir, 'Evaporation', 'ALEXI', 'Weekly')
Dir_ALEXI_Monthly = os.path.join(Dir, 'Evaporation', 'ALEXI', 'Monthly')
# Create output directory
if not os.path.exists(Dir_ALEXI_Monthly):
os.mkdir(Dir_ALEXI_Monthly)
# Create monthly maps based on weekly maps
import wa.Functions.Start.Weekly_to_monthly_flux as Week2month
Week2month.Nearest_Interpolate(Dir_ALEXI_Weekly, Startdate, Enddate, Dir_ALEXI_Monthly)
if __name__ == '__main__':
main(sys.argv)
|
apache-2.0
| 8,970,401,376,894,572,000
| 31.275
| 108
| 0.656081
| false
| 3.110843
| false
| false
| false
|
GStreamer/cerbero
|
recipes/custom.py
|
1
|
6402
|
# -*- Mode: Python -*- vi:si:et:sw=4:sts=4:ts=4:syntax=python
import os
from collections import defaultdict
from cerbero.build import recipe
from cerbero.build.source import SourceType
from cerbero.build.cookbook import CookBook
from cerbero.enums import License, FatalError
def running_on_cerbero_ci():
return os.environ.get('CI_PROJECT_NAME', '') == 'cerbero'
class GStreamer(recipe.Recipe):
licenses = [License.LGPLv2Plus]
version = '1.19.0.1'
tagged_for_release = False
# Decide what stype to use
use_git = True
if tagged_for_release:
# If we're using a manifest, that means we want to use the specified
# commits and remotes.
use_git = recipe.Recipe._using_manifest_force_git
# If we're tagged for release and we're running on Cerbero CI, we want
# to use the release tarballs even if a manifest is specified, because
# we want to test that the tarballs work.
if running_on_cerbero_ci():
use_git = False
if use_git:
stype = SourceType.GIT
remotes = {'origin': 'https://gitlab.freedesktop.org/gstreamer/%(name)s.git'}
if int(version.split('.')[1]) % 2 == 0:
# Even version, use the specific branch
commit = 'origin/' + '.'.join(version.split('.')[0:2])
else:
# Odd version, use git master
commit = 'origin/master'
else:
stype = SourceType.TARBALL
url = 'https://gstreamer.freedesktop.org/src/%(name)s/%(name)s-%(version)s.tar.xz'
tarball_dirname = '%(name)s-%(version)s'
def enable_plugin(self, plugin, category, variant, option=None, dep=None):
if option is None:
option = plugin
if getattr(self.config.variants, variant):
if dep is not None:
self.deps.append(dep)
plugin = 'lib/gstreamer-1.0/libgst' + plugin
if not hasattr(self, 'files_plugins_' + category):
setattr(self, 'files_plugins_' + category, [])
f = getattr(self, 'files_plugins_' + category)
f += [plugin + '%(mext)s']
if not hasattr(self, 'files_plugins_{}_devel'.format(category)):
setattr(self, 'files_plugins_{}_devel'.format(category), [])
d = getattr(self, 'files_plugins_{}_devel'.format(category))
d += [plugin + '.a', plugin + '.la']
self.meson_options[option] = 'enabled'
else:
self.meson_options[option] = 'disabled'
def _remove_files_category_entry(self, files_category, entry):
if hasattr(self, files_category):
fc = getattr(self, files_category)
if entry in fc:
fc.remove(entry)
return
platform_files_category = 'platform_' + files_category
if hasattr(self, platform_files_category):
pf = getattr(self, platform_files_category)
if self.config.target_platform not in pf:
raise FatalError('plugin {!r} not found in category {!r}'.format(entry, files_category))
pfc = getattr(self, platform_files_category)[self.config.target_platform]
if entry in pfc:
pfc.remove(entry)
return
raise FatalError('{} not found in category {}'.format(entry, files_category))
def _remove_plugin_file(self, plugin, category):
plugin = 'lib/gstreamer-1.0/libgst' + plugin
plugin_shared_lib = plugin + '%(mext)s'
plugin_static_lib = plugin + '.a'
plugin_libtool_lib = plugin + '.la'
self._remove_files_category_entry('files_plugins_' + category, plugin_shared_lib)
self._remove_files_category_entry('files_plugins_{}_devel'.format(category), plugin_static_lib)
self._remove_files_category_entry('files_plugins_{}_devel'.format(category), plugin_libtool_lib)
def disable_plugin(self, plugin, category, option=None, dep=None, library_name=None):
if option is None:
option = plugin
if dep is not None and dep in self.deps:
self.deps.remove(dep)
self._remove_plugin_file(plugin, category)
if library_name is not None:
library = 'libgst' + library_name + '-1.0'
self.files_libs.remove(library)
pcname = 'lib/pkgconfig/gstreamer-' + library_name + '-1.0.pc'
self.files_plugins_devel.remove(pcname)
includedir = 'include/gstreamer-1.0/gst/' + library_name
self.files_plugins_devel.remove(includedir)
libincdir = 'lib/gstreamer-1.0/include/gst/' + library_name
if libincdir in self.files_plugins_devel:
self.files_plugins_devel.remove(libincdir)
self.meson_options[option] = 'disabled'
def list_gstreamer_1_0_plugins_by_category(config):
cookbook = CookBook(config)
plugins = defaultdict(list)
for r in ['gstreamer-1.0', 'gst-plugins-base-1.0', 'gst-plugins-good-1.0',
'gst-plugins-bad-1.0', 'gst-plugins-ugly-1.0', 'libnice',
'gst-libav-1.0', 'gst-editing-services-1.0', 'gst-rtsp-server-1.0']:
r = cookbook.get_recipe(r)
for attr_name in dir(r):
if attr_name.startswith('files_plugins_'):
cat_name = attr_name[len('files_plugins_'):]
plugins_list = getattr(r, attr_name)
elif attr_name.startswith('platform_files_plugins_'):
cat_name = attr_name[len('platform_files_plugins_'):]
plugins_dict = getattr(r, attr_name)
plugins_list = plugins_dict.get(config.target_platform, [])
else:
continue
for e in plugins_list:
if not e.startswith('lib/gstreamer-'):
continue
c = e.split('/')
if len(c) != 3:
continue
e = c[2]
# we only care about files with the replaceable %(mext)s extension
if not e.endswith ('%(mext)s'):
continue
if e.startswith('libgst'):
e = e[6:-8]
else:
e = e[3:-8]
plugins[cat_name].append(e)
return plugins
|
lgpl-2.1
| 1,029,590,351,478,202,200
| 44.728571
| 104
| 0.565761
| false
| 3.83583
| true
| false
| false
|
emilroz/openmicroscopy
|
components/tools/OmeroPy/src/omero/util/populate_metadata.py
|
1
|
21324
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Populate bulk metadata tables from delimited text files.
"""
#
# Copyright (C) 2011-2014 University of Dundee. All rights reserved.
#
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
import tempfile
import logging
import time
import sys
import csv
import re
from threading import Thread
from StringIO import StringIO
from getpass import getpass
from getopt import getopt, GetoptError
from Queue import Queue
import omero.clients
from omero.rtypes import rdouble, rstring, rint
from omero.model import DatasetAnnotationLink, DatasetI, FileAnnotationI, \
OriginalFileI, PlateI, PlateAnnotationLinkI, ScreenI, \
ScreenAnnotationLinkI
from omero.grid import ImageColumn, LongColumn, PlateColumn, StringColumn, \
WellColumn
from omero.util.temp_files import create_path, remove_path
from omero import client
from populate_roi import ThreadPool
from xml.etree.cElementTree import XML, Element, SubElement, ElementTree, dump, iterparse
log = logging.getLogger("omero.util.populate_metadata")
def usage(error):
"""Prints usage so that we don't have to. :)"""
cmd = sys.argv[0]
print """%s
Usage: %s [options] <target_object> <file>
Runs metadata population code for a given object.
Options:
-s OMERO hostname to use [defaults to "localhost"]
-p OMERO port to use [defaults to 4064]
-u OMERO username to use
-w OMERO password
-k OMERO session key to use
-i Dump measurement information and exit (no population)
-d Print debug statements
Examples:
%s -s localhost -p 14064 -u bob Plate:6 metadata.csv
Report bugs to ome-devel@lists.openmicroscopy.org.uk""" % (error, cmd, cmd)
sys.exit(2)
# Global thread pool for use by workers
thread_pool = None
# Special column names we may add depending on the data type
PLATE_NAME_COLUMN = 'Plate Name'
WELL_NAME_COLUMN = 'Well Name'
class Skip(object):
"""Instance to denote a row skip request."""
pass
class MetadataError(Exception):
"""
Raised by the metadata parsing context when an error condition
is reached.
"""
pass
class HeaderResolver(object):
"""
Header resolver for known header names which is responsible for creating
the column set for the OMERO.tables instance.
"""
DEFAULT_COLUMN_SIZE = 1
plate_keys = {
'well': WellColumn,
'field': ImageColumn,
'row': LongColumn,
'column': LongColumn,
'wellsample': ImageColumn
}
screen_keys = dict({
'plate': PlateColumn,
}, **plate_keys)
def __init__(self, target_object, headers):
self.target_object = target_object
self.headers = [v.replace('/', '\\') for v in headers]
self.headers_as_lower = [v.lower() for v in self.headers]
def create_columns(self):
target_class = self.target_object.__class__
target_id = self.target_object.id.val
if ScreenI is target_class:
log.debug('Creating columns for Screen:%d' % target_id)
return self.create_columns_screen()
if PlateI is target_class:
log.debug('Creating columns for Plate:%d' % target_id)
return self.create_columns_plate()
if DatasetI is target_class:
log.debug('Creating columns for Dataset:%d' % target_id)
return self.create_columns_dataset()
raise MetadataError('Unsupported target object class: %s' \
% target_class)
def create_columns_screen(self):
columns = list()
for i, header_as_lower in enumerate(self.headers_as_lower):
name = self.headers[i]
try:
column = self.screen_keys[header_as_lower](name, '', list())
except KeyError:
column = StringColumn(name, '', self.DEFAULT_COLUMN_SIZE,
list())
columns.append(column)
for column in columns:
if column.__class__ is PlateColumn:
columns.append(StringColumn(PLATE_NAME_COLUMN, '',
self.DEFAULT_COLUMN_SIZE, list()))
if column.__class__ is WellColumn:
columns.append(StringColumn(WELL_NAME_COLUMN, '',
self.DEFAULT_COLUMN_SIZE, list()))
return columns
def create_columns_plate(self):
columns = list()
for i, header_as_lower in enumerate(self.headers_as_lower):
name = self.headers[i]
try:
column = self.plate_keys[header_as_lower](name, '', list())
except KeyError:
column = StringColumn(name, '', self.DEFAULT_COLUMN_SIZE,
list())
columns.append(column)
for column in columns:
if column.__class__ is PlateColumn:
columns.append(StringColumn(PLATE_NAME_COLUMN, '',
self.DEFAULT_COLUMN_SIZE, list()))
if column.__class__ is WellColumn:
columns.append(StringColumn(WELL_NAME_COLUMN, '',
self.DEFAULT_COLUMN_SIZE, list()))
return columns
def create_columns_dataset(self):
raise Exception('To be implemented!')
class ValueResolver(object):
"""
Value resolver for column types which is responsible for filling up
non-metadata columns with their OMERO data model identifiers.
"""
AS_ALPHA = [chr(v) for v in range(97, 122 + 1)] # a-z
WELL_REGEX = re.compile(r'^([a-zA-Z]+)(\d+)$')
def __init__(self, client, target_object):
self.client = client
self.target_object = target_object
self.target_class = self.target_object.__class__
if PlateI is self.target_class:
return self.load_plate()
if DatasetI is self.target_class:
return self.load_dataset()
if ScreenI is self.target_class:
return self.load_screen()
raise MetadataError('Unsupported target object class: %s' \
% target_class)
def load_screen(self):
query_service = self.client.getSession().getQueryService()
parameters = omero.sys.ParametersI()
parameters.addId(self.target_object.id.val)
log.debug('Loading Screen:%d' % self.target_object.id.val)
self.target_object = query_service.findByQuery(
'select s from Screen as s '
'join fetch s.plateLinks as p_link '
'join fetch p_link.child as p '
'where s.id = :id', parameters, {'omero.group': '-1'})
if self.target_object is None:
raise MetadataError('Could not find target object!')
self.wells_by_location = dict()
self.wells_by_id = dict()
self.plates_by_name = dict()
self.plates_by_id = dict()
for plate in (l.child for l in self.target_object.copyPlateLinks()):
parameters = omero.sys.ParametersI()
parameters.addId(plate.id.val)
plate = query_service.findByQuery(
'select p from Plate as p '
'join fetch p.wells as w '
'join fetch w.wellSamples as ws '
'where p.id = :id', parameters, {'omero.group': '-1'})
self.plates_by_name[plate.name.val] = plate
self.plates_by_id[plate.id.val] = plate
wells_by_location = dict()
wells_by_id = dict()
self.wells_by_location[plate.name.val] = wells_by_location
self.wells_by_id[plate.id.val] = wells_by_id
self.parse_plate(plate, wells_by_location, wells_by_id)
def load_plate(self):
query_service = self.client.getSession().getQueryService()
parameters = omero.sys.ParametersI()
parameters.addId(self.target_object.id.val)
log.debug('Loading Plate:%d' % self.target_object.id.val)
self.target_object = query_service.findByQuery(
'select p from Plate as p '
'join fetch p.wells as w '
'join fetch w.wellSamples as ws '
'where p.id = :id', parameters, {'omero.group': '-1'})
if self.target_object is None:
raise MetadataError('Could not find target object!')
self.wells_by_location = dict()
self.wells_by_id = dict()
wells_by_location = dict()
wells_by_id = dict()
self.wells_by_location[self.target_object.name.val] = wells_by_location
self.wells_by_id[self.target_object.id.val] = wells_by_id
self.parse_plate(self.target_object, wells_by_location, wells_by_id)
def parse_plate(self, plate, wells_by_location, wells_by_id):
# TODO: This should use the PlateNamingConvention. We're assuming rows
# as alpha and columns as numeric.
for well in plate.copyWells():
wells_by_id[well.id.val] = well
row = well.row.val
# 0 offsetted is not what people use in reality
column = str(well.column.val + 1)
try:
columns = wells_by_location[self.AS_ALPHA[row]]
except KeyError:
wells_by_location[self.AS_ALPHA[row]] = columns = dict()
columns[column] = well
log.debug('Completed parsing plate: %s' % plate.name.val)
for row in wells_by_location:
log.debug('%s: %r' % (row, wells_by_location[row].keys()))
def load_dataset(self):
raise Exception('To be implemented!')
def resolve(self, column, value, row):
column_class = column.__class__
column_as_lower = column.name.lower()
if WellColumn is column_class:
m = self.WELL_REGEX.match(value)
if m is None or len(m.groups()) != 2:
raise MetadataError(
'Cannot parse well identifier "%s" from row: %r' % \
(value, [o[1] for o in row]))
plate_row = m.group(1).lower()
plate_column = str(long(m.group(2)))
if len(self.wells_by_location) == 1:
wells_by_location = self.wells_by_location.values()[0]
log.debug('Parsed "%s" row: %s column: %s' % \
(value, plate_row, plate_column))
else:
for column, plate in row:
if column.__class__ is PlateColumn:
wells_by_location = self.wells_by_location[plate]
log.debug('Parsed "%s" row: %s column: %s plate: %s' % \
(value, plate_row, plate_column, plate))
break
try:
return wells_by_location[plate_row][plate_column].id.val
except KeyError:
log.debug('Row: %s Column: %s not found!' % \
(plate_row, plate_column))
return -1L
if PlateColumn is column_class:
try:
return self.plates_by_name[value].id.val
except KeyError:
log.warn('Screen is missing plate: %s' % value)
return Skip()
if column_as_lower in ('row', 'column') \
and column_class is LongColumn:
try:
# The value is not 0 offsetted
return long(value) - 1
except ValueError:
return long(self.AS_ALPHA.index(value.lower()))
if StringColumn is column_class:
return value
raise MetadataError('Unsupported column class: %s' % column_class)
class ParsingContext(object):
"""Generic parsing context for CSV files."""
def __init__(self, client, target_object, file):
self.client = client
self.target_object = target_object
self.file = file
self.value_resolver = ValueResolver(self.client, self.target_object)
def create_annotation_link(self):
self.target_class = self.target_object.__class__
if ScreenI is self.target_class:
return ScreenAnnotationLinkI()
if PlateI is self.target_class:
return PlateAnnotationLinkI()
if DatasetI is self.target_class:
return DatasetAnnotationLinkI()
raise MetadataError('Unsupported target object class: %s' \
% target_class)
def get_column_widths(self):
widths = list()
for column in self.columns:
try:
widths.append(column.size)
except AttributeError:
widths.append(None)
return widths
def parse_from_handle(self, data):
rows = list(csv.reader(data, delimiter=','))
log.debug('Header: %r' % rows[0])
header_resolver = HeaderResolver(self.target_object, rows[0])
self.columns = header_resolver.create_columns()
log.debug('Columns: %r' % self.columns)
self.populate(rows[1:])
self.post_process()
log.debug('Column widths: %r' % self.get_column_widths())
log.debug('Columns: %r' % \
[(o.name, len(o.values)) for o in self.columns])
# Paranoid debugging
#for i in range(len(self.columns[0].values)):
# values = list()
# for column in self.columns:
# values.append(column.values[i])
# log.debug('Row: %r' % values)
def parse(self):
data = open(self.file, 'U')
try:
return self.parse_from_handle(data)
finally:
data.close()
def populate(self, rows):
value = None
for row in rows:
values = list()
row = [(self.columns[i], value) for i, value in enumerate(row)]
for column, original_value in row:
value = self.value_resolver.resolve(column, original_value, row)
if value.__class__ is Skip:
break
values.append(value)
try:
if value.__class__ is not long:
column.size = max(column.size, len(value))
except TypeError:
log.error('Original value "%s" now "%s" of bad type!' % \
(original_value, value))
raise
if value.__class__ is not Skip:
values.reverse()
for column in self.columns:
if column.name in (PLATE_NAME_COLUMN, WELL_NAME_COLUMN):
continue
try:
column.values.append(values.pop())
except IndexError:
log.error('Column %s has no values to pop.' % \
column.name)
raise
def post_process(self):
columns_by_name = dict()
plate_column = None
well_column = None
well_name_column = None
plate_name_column = None
for column in self.columns:
columns_by_name[column.name] = column
if column.__class__ is PlateColumn:
plate_column = column
elif column.__class__ is WellColumn:
well_column = column
elif column.name == WELL_NAME_COLUMN:
well_name_column = column
elif column.name == PLATE_NAME_COLUMN:
plate_name_column = column
if well_name_column is None and plate_name_column is None:
log.info('Nothing to do during post processing.')
for i in range(0, len(self.columns[0].values)):
if well_name_column is not None:
if PlateI is self.value_resolver.target_class:
plate = self.value_resolver.target_object.id.val
elif ScreenI is self.value_resolver.target_class:
plate = columns_by_name['Plate'].values[i]
try:
well = self.value_resolver.wells_by_id[plate]
well = well[well_column.values[i]]
row = well.row.val
col = well.column.val
except KeyError:
log.error('Missing row or column for well name population!')
raise
row = self.value_resolver.AS_ALPHA[row]
v = '%s%d' % (row, col + 1)
well_name_column.size = max(well_name_column.size, len(v))
well_name_column.values.append(v)
else:
log.info('Missing well name column, skipping.')
if plate_name_column is not None:
plate = columns_by_name['Plate'].values[i]
plate = self.value_resolver.plates_by_id[plate]
v = plate.name.val
plate_name_column.size = max(plate_name_column.size, len(v))
plate_name_column.values.append(v)
else:
log.info('Missing plate name column, skipping.')
def write_to_omero(self):
sf = self.client.getSession()
group = str(self.value_resolver.target_object.details.group.id.val)
sr = sf.sharedResources()
update_service = sf.getUpdateService()
name = 'bulk_annotations'
table = sr.newTable(1, name, {'omero.group': group})
if table is None:
raise MetadataError(
"Unable to create table: %s" % name)
original_file = table.getOriginalFile()
log.info('Created new table OriginalFile:%d' % original_file.id.val)
table.initialize(self.columns)
log.info('Table initialized with %d columns.' % (len(self.columns)))
table.addData(self.columns)
log.info('Added data column data.')
table.close()
file_annotation = FileAnnotationI()
file_annotation.ns = \
rstring('openmicroscopy.org/omero/bulk_annotations')
file_annotation.description = rstring(name)
file_annotation.file = OriginalFileI(original_file.id.val, False)
link = self.create_annotation_link()
link.parent = self.target_object
link.child = file_annotation
update_service.saveObject(link, {'omero.group': group})
def parse_target_object(target_object):
type, id = target_object.split(':')
if 'Dataset' == type:
return DatasetI(long(id), False)
if 'Plate' == type:
return PlateI(long(id), False)
if 'Screen' == type:
return ScreenI(long(id), False)
raise ValueError('Unsupported target object: %s' % target_object)
if __name__ == "__main__":
try:
options, args = getopt(sys.argv[1:], "s:p:u:w:k:id")
except GetoptError, (msg, opt):
usage(msg)
try:
target_object, file = args
target_object = parse_target_object(target_object)
except ValueError:
usage('Target object and file must be a specified!')
username = None
password = None
hostname = 'localhost'
port = 4064 # SSL
info = False
session_key = None
logging_level = logging.INFO
thread_count = 1
for option, argument in options:
if option == "-u":
username = argument
if option == "-w":
password = argument
if option == "-s":
hostname = argument
if option == "-p":
port = int(argument)
if option == "-i":
info = True
if option == "-k":
session_key = argument
if option == "-d":
logging_level = logging.DEBUG
if option == "-t":
thread_count = int(argument)
if session_key is None and username is None:
usage("Username must be specified!")
if session_key is None and hostname is None:
usage("Host name must be specified!")
if session_key is None and password is None:
password = getpass()
logging.basicConfig(level = logging_level)
client = client(hostname, port)
client.setAgent("OMERO.populate_metadata")
client.enableKeepAlive(60)
try:
if session_key is not None:
client.joinSession(session_key)
else:
client.createSession(username, password)
log.debug('Creating pool of %d threads' % thread_count)
thread_pool = ThreadPool(thread_count)
ctx = ParsingContext(client, target_object, file)
ctx.parse()
if not info:
ctx.write_to_omero()
finally:
pass
client.closeSession()
|
gpl-2.0
| -6,591,823,371,403,086,000
| 38.343173
| 89
| 0.570718
| false
| 4.031002
| false
| false
| false
|
indictools/grammar
|
common.py
|
1
|
7812
|
import sys,os
import time
from os import walk, path
from os.path import splitext, join
from json import dumps
from config import *
import signal
import subprocess
import re
import shutil
import glob
from flask import *
def wl_batchprocess(args, cmd, func):
wloads = args.get('wlnames').split(',')
print "In wl" + cmd
print dumps(wloads)
return (make_response(dumps(func(args))))
def urlize(pathsuffix, text = None, newtab = True):
tabclause = ""
if newtab:
tabclause = 'target="_blank"'
if not text:
text = pathsuffix
return '<a href="/workloads/taillog/15/' + pathsuffix + '" ' + tabclause + '>' + text + '</a>';
def get_all_jsons(path, pattern):
"""
path - where to begin folder scan
"""
pathprefix = repodir()
selected_files = []
print pathprefix
full_path=None
for root, dirs, files in os.walk(path, followlinks=True):
for f in files:
full_path = join(root, f)
ext = splitext(f)[1]
if ext != ".json" :
continue
wpath = full_path.replace(pathprefix + "/", "")
#print "wpath:",wpath
if pattern and not re.search(pattern, full_path):
continue
selected_files.append(wpath)
return selected_files
subprocs = set()
def signal_children(subprocs, signum):
sent_signal = False
for proc in subprocs:
if proc.poll() is None:
sent_signal = True
print "wlwizard child: Killing " + str(proc.pid)
try:
os.killpg(os.getpgid(proc.pid), signal.SIGINT)
except Exception as e:
print e
return False
#proc.send_signal(signum)
return sent_signal
def handle_signal(signum, frame):
print "wlwizard child: caught signal " + str(signum) + " .."
try:
while signal_children(subprocs, signum) == True:
print "wlwizard handler: sleeping"
time.sleep(10)
except Exception as e:
print "wlwizard handler: ", e
def fork_work(wloadname, cmdname, func, parms = {}):
#workload_dirpath = pubroot()+'/'
wdir = join(repodir(), wloadname)
createdir(wdir) #create workload-directory inside parsed folder
logfile = join(wdir, cmdname + "-log.txt")
pidfile = join(wdir, cmdname + ".pid")
print "pidfile:",pidfile
pid = os.fork()
if pid == 0:
# Child
os.setsid()
mypid = os.getpid()
# try:
# Flask(__name__).stop()
# except Exception as e:
# print "Error closing server socket:", e
with open(logfile, 'w', 1) as f:
# Redirect stdout and stderr to logfile
sys.stdout = sys.stderr = f
ret = 1
with open(pidfile, 'w') as f:
f.write(str(mypid))
try :
os.chdir(wdir)
ret = func(wdir, wloadname, cmdname, parms)
except Exception as e:
print "wlwizard fork_child: ", e
print "wlwizard fork_child: removing pid file" + join(wdir, cmdname + ".pid")
print "wlwizard: Workdir: ",wdir
os.remove(join(wdir, cmdname + ".pid"))
print "wlwizard: in child, exiting"
os._exit(ret)
# Parent
return 'Started. ' + urlize(join(wloadname, cmdname + "-log.txt"), \
"Click for details", True)
def dummy_work(wdir, wloadname, cmdname):
# Do the work of the child process here
createdir(join(wdir, "parsed"))#creating directory called parsed
print "IP-addrs:",ipaddrs
print "in child, sleeping"
time.sleep(10000)
return 0
def do_externalcmd(cmd):
#subprocs = set()
cmdstr = " ".join(cmd)
print cmdstr
signal.signal(signal.SIGINT, handle_signal)
signal.signal(signal.SIGCHLD, signal.SIG_IGN)
signal.signal(signal.SIGHUP, signal.SIG_IGN)
proc = subprocess.Popen(cmd,shell=False, \
preexec_fn=os.setsid, \
close_fds=True, stdout=sys.stdout, stderr=sys.stderr)
subprocs.add(proc)
while proc.poll() is None:
print "wlwizard child: awaiting subprocess to complete ..."
proc.wait()
#signal_children(subprocs, signal.SIGINT)
print "wlwizard child: subprocess ended..."
return 0
def do_parse(wdir, wloadname, cmdname, parms):
rawfiles = glob.glob("raw/*.raw")
cfgfiles = glob.glob("raw/*[pP]rofile*.txt")
objfiles = glob.glob("raw/*obj*graph*.txt")
#try:
cmd = [cmdpath("processdump.pl"), "-o", "."]
if parms.get('compact') == 'on':
cmd.append("-compact")
# if parms.get('nocharts') == 'on':
# cmd.append("-nographs")
# #os._exit(0)
if cfgfiles:
profile = ["-cfg", ','.join(cfgfiles)]
cmd.extend(profile)
elif objfiles:
objgraph = ["-obj", ','.join(objfiles)]
cmd.extend(objgraph)
cmd.extend(rawfiles)
return do_externalcmd(cmd);
def do_capture(wdir, wloadname, cmdname, parms):
createdir(join(wdir, "raw")) # creating raw directory so
# this workload gets listed
cmd = [cmdpath("wlcollect.pl"), "-o", "raw"]
cmd.extend(parms['ipaddrs'])
return do_externalcmd(cmd)
def wlparse(parms):
wloadnames = parms.get('wlnames').split(',')
print "inside wlparse " + ",".join(wloadnames)
response = []
for w in wloadnames:
wdir = join(repodir(), w)
pidfile = join(wdir, "parse.pid")
if os.path.exists(pidfile):
response.append({ "wlname" : w,
"status" : "Parsing in progress; skipped." });
else:
resp = fork_work(w, "parse", do_parse, parms)
print "return:",resp
response.append({ "wlname" : w,
"status" : resp});
return response
def do_stop(w, cmdname, sig=signal.SIGINT):
response = []
pidfile = join(join(repodir(), w), cmdname + ".pid")
print "pid:",pidfile
if os.path.exists(pidfile):
with open(pidfile) as f:
pid = int(f.read())
print "Stopping workload " + cmdname + " of " + w + " (pid " + str(pid) + ") ..."
try:
os.kill(pid, sig)
#os.remove(pidfile)
response.append({ "wlname" : w,
"status" : cmdname + " stopped (process id " + str(pid) + ")"
});
except Exception as e:
print "Error: ", e
print "pidfile path:",pidfile
os.remove(pidfile)
else:
response.append({ "wlname" : w,
"status" : cmdname + " not running." });
return response
def wlcstop(args):
wloadnames = args.get('wlnames').split(',')
print "inside wlstop " + ",".join(wloadnames)
response = []
for w in wloadnames:
response.extend(do_stop(w, "replay"))
response.extend(do_stop(w, "capture"))
response.extend(do_stop(w, "parse"))
#print dumps(response,indent=4)
return response
def wldelete(args):
wloadnames = args.get('wlnames').split(',')
wlcstop(args)
response = []
for w in wloadnames:
print "inside wldelete " + w
wdir = join(repodir(), w)
try:
if os.path.exists(wdir):
print "deleting " + wdir
shutil.rmtree(wdir)
response.append({ "wlname" : w,
"status" : "Success" })
except Exception as e:
print "Error in rmtree " + wdir + ": ", e
response.append({ "wlname" : w, "status" : "Failed: " + str(e) })
#print dumps(response, indent=4)
return response
|
gpl-3.0
| -2,206,379,886,968,884,200
| 31.280992
| 99
| 0.549283
| false
| 3.640261
| false
| false
| false
|
gongghy/checkio_python
|
Home/The_Most_Wanted_Letter.py
|
1
|
1140
|
def checkio(text):
text = text.lower()
text = [letter for letter in text if letter.isalpha()]
d = dict.fromkeys(text, 0)
for char in text:
d[char] += 1
value = 0
for item in d.items():
if item[1] > value:
value = item[1]
lesser_keys = []
for item in d.items():
if item[1] < value:
lesser_keys.append(item[0])
for char in lesser_keys:
d.pop(char)
max_keys = list(d.keys())
max_keys.sort()
# replace this for solution
return max_keys[0]
if __name__ == '__main__':
# These "asserts" using only for self-checking and not necessary for auto-testing
assert checkio("Hello World!") == "l", "Hello test"
assert checkio("How do you do?") == "o", "O is most wanted"
assert checkio("One") == "e", "All letter only once."
assert checkio("Oops!") == "o", "Don't forget about lower case."
assert checkio("AAaooo!!!!") == "a", "Only letters."
assert checkio("abe") == "a", "The First."
print("Start the long test")
assert checkio("a" * 9000 + "b" * 1000) == "a", "Long."
print("The local tests are done.")
|
mit
| 6,739,730,774,419,538,000
| 34.625
| 85
| 0.566667
| false
| 3.323615
| false
| false
| false
|
cmars/pystdf
|
pystdf/IO.py
|
1
|
7630
|
#
# PySTDF - The Pythonic STDF Parser
# Copyright (C) 2006 Casey Marshall
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
import sys
import struct
import re
from pystdf.Types import *
from pystdf import V4
from pystdf.Pipeline import DataSource
def appendFieldParser(fn, action):
"""Append a field parsing function to a record parsing function.
This is used to build record parsing functions based on the record type specification."""
def newRecordParser(*args):
fields = fn(*args)
try:
fields.append(action(*args))
except EndOfRecordException: pass
return fields
return newRecordParser
class Parser(DataSource):
def readAndUnpack(self, header, fmt):
size = struct.calcsize(fmt)
if (size > header.len):
self.inp.read(header.len)
header.len = 0
raise EndOfRecordException()
buf = self.inp.read(size)
if len(buf) == 0:
self.eof = 1
raise EofException()
header.len -= len(buf)
val,=struct.unpack(self.endian + fmt, buf)
if isinstance(val,bytes):
return val.decode("ascii")
else:
return val
def readAndUnpackDirect(self, fmt):
size = struct.calcsize(fmt)
buf = self.inp.read(size)
if len(buf) == 0:
self.eof = 1
raise EofException()
val,=struct.unpack(self.endian + fmt, buf)
return val
def readField(self, header, stdfFmt):
return self.readAndUnpack(header, packFormatMap[stdfFmt])
def readFieldDirect(self, stdfFmt):
return self.readAndUnpackDirect(packFormatMap[stdfFmt])
def readCn(self, header):
if header.len == 0:
raise EndOfRecordException()
slen = self.readField(header, "U1")
if slen > header.len:
self.inp.read(header.len)
header.len = 0
raise EndOfRecordException()
if slen == 0:
return ""
buf = self.inp.read(slen);
if len(buf) == 0:
self.eof = 1
raise EofException()
header.len -= len(buf)
val,=struct.unpack(str(slen) + "s", buf)
return val.decode("ascii")
def readBn(self, header):
blen = self.readField(header, "U1")
bn = []
for i in range(0, blen):
bn.append(self.readField(header, "B1"))
return bn
def readDn(self, header):
dbitlen = self.readField(header, "U2")
dlen = dbitlen / 8
if dbitlen % 8 > 0:
dlen+=1
dn = []
for i in range(0, int(dlen)):
dn.append(self.readField(header, "B1"))
return dn
def readVn(self, header):
vlen = self.readField(header, "U2")
vn = []
for i in range(0, vlen):
fldtype = self.readField(header, "B1")
if fldtype in self.vnMap:
vn.append(self.vnMap[fldtype](header))
return vn
def readArray(self, header, indexValue, stdfFmt):
if (stdfFmt == 'N1'):
self.readArray(header, indexValue/2+indexValue%2, 'U1')
return
arr = []
for i in range(int(indexValue)):
arr.append(self.unpackMap[stdfFmt](header, stdfFmt))
return arr
def readHeader(self):
hdr = RecordHeader()
hdr.len = self.readFieldDirect("U2")
hdr.typ = self.readFieldDirect("U1")
hdr.sub = self.readFieldDirect("U1")
return hdr
def __detectEndian(self):
self.eof = 0
header = self.readHeader()
if header.typ != 0 and header.sub != 10:
raise InitialSequenceException()
cpuType = self.readFieldDirect("U1")
if self.reopen_fn:
self.inp = self.reopen_fn()
else:
self.inp.seek(0)
if cpuType == 2:
return '<'
else:
return '>'
def header(self, header): pass
def parse_records(self, count=0):
i = 0
self.eof = 0
fields = None
try:
while self.eof==0:
header = self.readHeader()
self.header(header)
if (header.typ, header.sub) in self.recordMap:
recType = self.recordMap[(header.typ, header.sub)]
recParser = self.recordParsers[(header.typ, header.sub)]
fields = recParser(self, header, [])
if len(fields) < len(recType.columnNames):
fields += [None] * (len(recType.columnNames) - len(fields))
self.send((recType, fields))
else:
self.inp.read(header.len)
if count:
i += 1
if i >= count: break
except EofException: pass
def auto_detect_endian(self):
if self.inp.tell() == 0:
self.endian = '@'
self.endian = self.__detectEndian()
def parse(self, count=0):
self.begin()
try:
self.auto_detect_endian()
self.parse_records(count)
self.complete()
except Exception as exception:
self.cancel(exception)
raise
def getFieldParser(self, fieldType):
if (fieldType.startswith("k")):
fieldIndex, arrayFmt = re.match('k(\d+)([A-Z][a-z0-9]+)', fieldType).groups()
return lambda self, header, fields: self.readArray(header, fields[int(fieldIndex)], arrayFmt)
else:
parseFn = self.unpackMap[fieldType]
return lambda self, header, fields: parseFn(header, fieldType)
def createRecordParser(self, recType):
fn = lambda self, header, fields: fields
for stdfType in recType.fieldStdfTypes:
fn = appendFieldParser(fn, self.getFieldParser(stdfType))
return fn
def __init__(self, recTypes=V4.records, inp=sys.stdin, reopen_fn=None, endian=None):
DataSource.__init__(self, ['header']);
self.eof = 1
self.recTypes = set(recTypes)
self.inp = inp
self.reopen_fn = reopen_fn
self.endian = endian
self.recordMap = dict(
[ ( (recType.typ, recType.sub), recType )
for recType in recTypes ])
self.unpackMap = {
"C1": self.readField,
"B1": self.readField,
"U1": self.readField,
"U2": self.readField,
"U4": self.readField,
"U8": self.readField,
"I1": self.readField,
"I2": self.readField,
"I4": self.readField,
"I8": self.readField,
"R4": self.readField,
"R8": self.readField,
"Cn": lambda header, fmt: self.readCn(header),
"Bn": lambda header, fmt: self.readBn(header),
"Dn": lambda header, fmt: self.readDn(header),
"Vn": lambda header, fmt: self.readVn(header)
}
self.recordParsers = dict(
[ ( (recType.typ, recType.sub), self.createRecordParser(recType) )
for recType in recTypes ])
self.vnMap = {
0: lambda header: self.inp.read(header, 1),
1: lambda header: self.readField(header, "U1"),
2: lambda header: self.readField(header, "U2"),
3: lambda header: self.readField(header, "U4"),
4: lambda header: self.readField(header, "I1"),
5: lambda header: self.readField(header, "I2"),
6: lambda header: self.readField(header, "I4"),
7: lambda header: self.readField(header, "R4"),
8: lambda header: self.readField(header, "R8"),
10: lambda header: self.readCn(header),
11: lambda header: self.readBn(header),
12: lambda header: self.readDn(header),
13: lambda header: self.readField(header, "U1")
}
|
gpl-2.0
| -556,588,033,002,586,940
| 29.277778
| 99
| 0.634076
| false
| 3.394128
| false
| false
| false
|
dmsurti/mayavi
|
tvtk/pyface/ui/wx/decorated_scene.py
|
1
|
11998
|
#------------------------------------------------------------------------------
#
# Copyright (c) 2006, Enthought, Inc.
# All rights reserved.
#
# This software is provided without warranty under the terms of the BSD
# license included in enthought/LICENSE.txt and may be redistributed only
# under the conditions described in the aforementioned license. The license
# is also available online at http://www.enthought.com/licenses/BSD.txt
#
# Thanks for using Enthought open source!
#
# Authors: Prabhu Ramachandran <prabhu_r@users.sf.net>,
# Dave Peterson <dpeterson@enthought.com>
#
#------------------------------------------------------------------------------
""" A VTK interactor scene which provides a convenient toolbar that allows the
user to set the camera view, turn on the axes indicator, etc.
"""
# System imports.
from os.path import dirname
import wx
# Enthought library imports.
from pyface.api import ImageResource, FileDialog, OK
from pyface.action.api import ToolBarManager, Group, Action
from tvtk.api import tvtk
from traits.api import Instance, false, List, Either
# Local imports.
from .scene import Scene
###########################################################################
# 'DecoratedScene' class
###########################################################################
class DecoratedScene(Scene):
"""A VTK interactor scene which provides a convenient toolbar that
allows the user to set the camera view, turn on the axes indicator
etc.
"""
#######################################################################
# Traits
#######################################################################
if hasattr(tvtk, 'OrientationMarkerWidget'):
# The tvtk orientation marker widget. This only exists in VTK
# 5.x.
marker = Instance(tvtk.OrientationMarkerWidget, ())
# The tvtk axes that will be shown for the orientation.
axes = Instance(tvtk.AxesActor, ())
else:
marker = None
axes = None
# Determine if the orientation axis is shown or not.
show_axes = false
# The list of actions represented in the toolbar
actions = List(Either(Action, Group))
##########################################################################
# `object` interface
##########################################################################
def __init__(self, parent, **traits):
super(DecoratedScene, self).__init__(parent, **traits)
self._setup_axes_marker()
def __get_pure_state__(self):
"""Allows us to pickle the scene."""
# The control attribute is not picklable since it is a VTK
# object so we remove it.
d = super(DecoratedScene, self).__get_pure_state__()
for x in ['_content', '_panel', '_sizer', '_tool_bar', 'actions']:
d.pop(x, None)
return d
##########################################################################
# Non-public interface.
##########################################################################
def _create_control(self, parent):
""" Create the toolkit-specific control that represents the widget.
Overridden to wrap the Scene control within a panel that
also contains a toolbar.
"""
# Create a panel as a wrapper of the scene toolkit control. This
# allows us to also add additional controls.
self._panel = wx.Panel(parent, -1, style=wx.CLIP_CHILDREN)
self._sizer = wx.BoxSizer(wx.VERTICAL)
self._panel.SetSizer(self._sizer)
# Add our toolbar to the panel.
tbm = self._get_tool_bar_manager()
self._tool_bar = tbm.create_tool_bar(self._panel)
self._sizer.Add(self._tool_bar, 0, wx.EXPAND)
# Create the actual scene content
self._content = super(DecoratedScene, self)._create_control(
self._panel)
self._sizer.Add(self._content, 1, wx.EXPAND)
# Ensure the child controls are laid-out.
self._sizer.Layout()
return self._panel
def _setup_axes_marker(self):
axes = self.axes
if axes is None:
# For VTK versions < 5.0.
return
axes.set(
normalized_tip_length=(0.4, 0.4, 0.4),
normalized_shaft_length=(0.6, 0.6, 0.6),
shaft_type='cylinder'
)
p = axes.x_axis_caption_actor2d.caption_text_property
axes.y_axis_caption_actor2d.caption_text_property = p
axes.z_axis_caption_actor2d.caption_text_property = p
p.set(color=(1,1,1), shadow=False, italic=False)
self._background_changed(self.background)
self.marker.set(key_press_activation=False)
self.marker.orientation_marker = axes
def _get_tool_bar_manager(self):
""" Returns the tool_bar_manager for this scene.
"""
tbm = ToolBarManager( *self.actions )
return tbm
def _get_image_path(self):
"""Returns the directory which contains the images used by the
toolbar."""
# So that we can find the images.
import tvtk.pyface.api
return dirname(tvtk.pyface.api.__file__)
def _toggle_projection(self):
""" Toggle between perspective and parallel projection, this
is used for the toolbar.
"""
if self._panel is not None:
self.parallel_projection = not self.parallel_projection
def _toggle_axes(self):
"""Used by the toolbar to turn on/off the axes indicator.
"""
if self._panel is not None:
self.show_axes = not self.show_axes
def _save_snapshot(self):
"""Invoked by the toolbar menu to save a snapshot of the scene
to an image. Note that the extension of the filename
determines what image type is saved. The default is PNG.
"""
if self._panel is not None:
wildcard = "PNG images (*.png)|*.png|Determine by extension (*.*)|*.*"
dialog = FileDialog(
parent = self._panel,
title = 'Save scene to image',
action = 'save as',
default_filename = "snapshot.png",
wildcard = wildcard
)
if dialog.open() == OK:
# The extension of the path will determine the actual
# image type saved.
self.save(dialog.path)
def _configure_scene(self):
"""Invoked when the toolbar icon for configuration is clicked.
"""
self.edit_traits()
######################################################################
# Trait handlers.
######################################################################
def _show_axes_changed(self):
marker = self.marker
if (self._vtk_control is not None) and (marker is not None):
if not self.show_axes:
marker.interactor = None
marker.enabled = False
else:
marker.interactor = self.interactor
marker.enabled = True
self.render()
def _background_changed(self, value):
# Depending on the background, this sets the axes text and
# outline color to something that should be visible.
axes = self.axes
if (self._vtk_control is not None) and (axes is not None):
p = self.axes.x_axis_caption_actor2d.caption_text_property
m = self.marker
s = value[0] + value[1] + value[2]
if s <= 1.0:
p.color = (1,1,1)
m.set_outline_color(1,1,1)
else:
p.color = (0,0,0)
m.set_outline_color(0,0,0)
self.render()
def _actions_default(self):
return [
Group(
Action(
image = ImageResource('16x16/x-axis',
search_path = [self._get_image_path()],
),
tooltip = "View along the -X axis",
on_perform = self.x_minus_view,
),
Action(
image = ImageResource('16x16/x-axis',
search_path = [self._get_image_path()],
),
tooltip = "View along the +X axis",
on_perform = self.x_plus_view,
),
Action(
image = ImageResource('16x16/y-axis',
search_path = [self._get_image_path()],
),
tooltip = "View along the -Y axis",
on_perform = self.y_minus_view,
),
Action(
image = ImageResource('16x16/y-axis',
search_path = [self._get_image_path()],
),
tooltip = "View along the +Y axis",
on_perform = self.y_plus_view,
),
Action(
image = ImageResource('16x16/z-axis',
search_path = [self._get_image_path()],
),
tooltip = "View along the -Z axis",
on_perform = self.z_minus_view,
),
Action(
image = ImageResource('16x16/z-axis',
search_path = [self._get_image_path()],
),
tooltip = "View along the +Z axis",
on_perform = self.z_plus_view,
),
Action(
image = ImageResource('16x16/isometric',
search_path = [self._get_image_path()],
),
tooltip = "Obtain an isometric view",
on_perform = self.isometric_view,
),
),
Group(
Action(
image = ImageResource('16x16/parallel',
search_path = [self._get_image_path()],
),
tooltip = 'Toggle parallel projection',
style="toggle",
on_perform = self._toggle_projection,
checked = self.parallel_projection,
),
Action(
image = ImageResource('16x16/origin_glyph',
search_path = [self._get_image_path()],
),
tooltip = 'Toggle axes indicator',
style="toggle",
enabled=(self.marker is not None),
on_perform = self._toggle_axes,
checked = self.show_axes,
),
Action(
image = ImageResource('16x16/fullscreen',
search_path = [self._get_image_path()],
),
tooltip = 'Full Screen (press "q" or "e" or ESC to exit fullscreen)',
style="push",
on_perform = self._full_screen_fired,
),
),
Group(
Action(
image = ImageResource('16x16/save',
search_path = [self._get_image_path()],
),
tooltip = "Save a snapshot of this scene",
on_perform = self._save_snapshot,
),
Action(
image = ImageResource('16x16/configure',
search_path = [self._get_image_path()],
),
tooltip = 'Configure the scene',
style="push",
on_perform = self._configure_scene,
),
),
]
|
bsd-3-clause
| 49,747,729,586,761,630
| 37.210191
| 89
| 0.471245
| false
| 4.692217
| false
| false
| false
|
yati-sagade/RyDyrect
|
settings.py
|
1
|
1727
|
# Initialize App Engine and import the default settings (DB backend, etc.).
# If you want to use a different backend you have to remove all occurences
# of "djangoappengine" from this file.
from djangoappengine.settings_base import *
import os
import people
# Activate django-dbindexer for the default database
DATABASES['native'] = DATABASES['default']
DATABASES['default'] = {'ENGINE': 'dbindexer', 'TARGET': 'native'}
AUTOLOAD_SITECONF = 'indexes'
#My conf
MY_EMAIL = 'yati.sagade@gmail.com'
MY_NAME = 'Yati Sagade'
#End my conf
SECRET_KEY = '=r-$b*8hglm+858&9t043hlm6-&6-3d3vfc4((7yd0dbrakhvi'
INSTALLED_APPS = (
# 'django.contrib.admin',
'django.contrib.contenttypes',
'django.contrib.auth',
'django.contrib.sessions',
'djangotoolbox',
'autoload',
'dbindexer',
'people',
# djangoappengine should come last, so it can override a few manage.py commands
'djangoappengine',
)
MIDDLEWARE_CLASSES = (
# This loads the index definitions, so it has to come first
'autoload.middleware.AutoloadMiddleware',
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
)
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
'django.core.context_processors.request',
'django.core.context_processors.media',
)
# This test runner captures stdout and associates tracebacks with their
# corresponding output. Helps a lot with print-debugging.
TEST_RUNNER = 'djangotoolbox.test.CapturingTestSuiteRunner'
ADMIN_MEDIA_PREFIX = '/media/admin/'
TEMPLATE_DIRS = (os.path.join(os.path.dirname(__file__), 'templates'),)
ROOT_URLCONF = 'urls'
|
bsd-3-clause
| 59,960,683,604,811,500
| 31.584906
| 83
| 0.734221
| false
| 3.454
| false
| false
| false
|
avih/treeherder
|
deployment/update/update.py
|
1
|
5356
|
"""
Deploy this project in stage/production.
Requires commander_ which is installed on the systems that need it.
.. _commander: https://github.com/oremj/commander
"""
import os
import requests
import sys
from commander.deploy import hostgroups, task
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
import commander_settings as settings # noqa
env_file = os.path.join(settings.SRC_DIR, 'treeherder-env.sh')
th_service_src = os.path.join(settings.SRC_DIR, 'treeherder-service')
is_prod = 'treeherder.mozilla.org' in settings.SRC_DIR
def run_local_with_env(ctx, cmd):
# For commands run from the admin node, we have to manually set the environment
# variables, since the admin node is shared by both stage and prod.
ctx.local("source {} && {}".format(env_file, cmd))
@task
def pre_update(ctx, ref=settings.UPDATE_REF):
# Update the code to a specific git reference (branch/tag/sha) and write
# info about the current repository state to a publicly visible file.
with ctx.lcd(th_service_src):
ctx.local('git fetch --quiet origin %s' % ref)
ctx.local('git reset --hard FETCH_HEAD')
ctx.local('find . -type f -name "*.pyc" -delete')
ctx.local('git status -s')
@task
def update(ctx):
# Create/populate a virtualenv that will be rsynced later along with the source.
with ctx.lcd(settings.SRC_DIR):
activate_script = os.path.join(settings.SRC_DIR, 'venv', 'bin', 'activate_this.py')
# Peep doesn't yet cache downloaded files, so we reuse the virtualenv to speed up deploys.
if not os.path.exists(activate_script):
ctx.local('virtualenv --python=python2.7 venv')
# Activate virtualenv.
execfile(activate_script, dict(__file__=activate_script))
# Install requirements using peep, so hashes are verified.
with ctx.lcd(th_service_src):
ctx.local('python2.7 bin/peep.py install -r requirements/common.txt')
# Make the virtualenv relocatable since paths are hard-coded by default.
ctx.local('virtualenv --relocatable venv')
# Fix lib64 symlink to be relative instead of absolute.
with ctx.lcd('venv'):
ctx.local('rm -f lib64')
ctx.local('ln -s lib lib64')
with ctx.lcd(th_service_src):
# Install nodejs non-dev packages, needed for the grunt build.
ctx.local("npm install --production")
# Generate the UI assets in the `dist/` directory.
ctx.local("./node_modules/.bin/grunt build --production")
# Make the current Git revision accessible at <site-root>/revision.txt
ctx.local("git rev-parse HEAD > dist/revision.txt")
# Generate gzipped versions of files that would benefit from compression, that
# WhiteNoise can then serve in preference to the originals. This is required
# since WhiteNoise's Django storage backend only gzips assets handled by
# collectstatic, and so does not affect files in the `dist/` directory.
ctx.local("python2.7 -m whitenoise.gzip dist")
# Collect the static files (eg for the Persona or Django admin UI)
run_local_with_env(ctx, "python2.7 manage.py collectstatic --noinput")
# Update the database schema, if necessary.
run_local_with_env(ctx, "python2.7 manage.py migrate --noinput")
# Update reference data & tasks config from the in-repo fixtures.
run_local_with_env(ctx, "python2.7 manage.py load_initial_data")
# Populate the datasource table and create the connected databases.
run_local_with_env(ctx, "python2.7 manage.py init_datasources")
@task
def deploy(ctx):
# Use the local, IT-written deploy script to check in changes.
ctx.local(settings.DEPLOY_SCRIPT)
# Rsync the updated code to the nodes & restart processes. These are
# separated out into their own functions, since the IRC bot output includes
# the task function name which is useful given how long these steps take.
deploy_rabbit()
deploy_web_app()
deploy_etl()
deploy_log()
ping_newrelic()
@task
def deploy_rabbit(ctx):
deploy_nodes(ctx, settings.RABBIT_HOSTGROUP, 'rabbit')
@task
def deploy_web_app(ctx):
deploy_nodes(ctx, settings.WEB_HOSTGROUP, 'web')
@task
def deploy_etl(ctx):
deploy_nodes(ctx, settings.ETL_HOSTGROUP, 'etl')
@task
def deploy_log(ctx):
deploy_nodes(ctx, settings.LOG_HOSTGROUP, 'log')
def deploy_nodes(ctx, hostgroup, node_type):
# Run the remote update script on each node in the specified hostgroup.
@hostgroups(hostgroup, remote_kwargs={'ssh_key': settings.SSH_KEY})
def rsync_code(ctx):
ctx.remote(settings.REMOTE_UPDATE_SCRIPT)
rsync_code()
env_flag = '-p' if is_prod else '-s'
ctx.local('/root/bin/restart-jobs %s %s' % (env_flag, node_type))
@task
def ping_newrelic(ctx):
data = {
'deployment[application_id]': settings.NEW_RELIC_APP_ID,
'deployment[user]': 'Chief',
}
headers = {'x-api-key': settings.NEW_RELIC_API_KEY}
r = requests.post('https://api.newrelic.com/deployments.xml',
data=data, headers=headers, timeout=30)
try:
r.raise_for_status()
except requests.exceptions.HTTPError:
print("HTTPError {} notifying New Relic: {}".format(r.status_code, r.text))
raise
|
mpl-2.0
| -1,774,610,298,137,748,000
| 36.985816
| 98
| 0.673824
| false
| 3.570667
| false
| false
| false
|
spivachuk/sovrin-node
|
indy_node/test/state_proof/test_state_proofs_for_get_requests.py
|
1
|
12398
|
import base64
import random
import time
import base58
import pytest
from common.serializers import serialization
from common.serializers.serialization import state_roots_serializer
from crypto.bls.bls_multi_signature import MultiSignature, MultiSignatureValue
from plenum.bls.bls_store import BlsStore
from plenum.common.constants import TXN_TYPE, TARGET_NYM, RAW, DATA, \
IDENTIFIER, NAME, VERSION, ROLE, VERKEY, KeyValueStorageType, \
STATE_PROOF, ROOT_HASH, MULTI_SIGNATURE, PROOF_NODES, TXN_TIME, CURRENT_PROTOCOL_VERSION, DOMAIN_LEDGER_ID
from plenum.common.txn_util import reqToTxn, append_txn_metadata, append_payload_metadata
from plenum.common.types import f
from indy_common.constants import \
ATTRIB, CLAIM_DEF, SCHEMA, CLAIM_DEF_FROM, CLAIM_DEF_SCHEMA_REF, CLAIM_DEF_SIGNATURE_TYPE, \
CLAIM_DEF_PUBLIC_KEYS, CLAIM_DEF_TAG, SCHEMA_NAME, SCHEMA_VERSION, SCHEMA_ATTR_NAMES
from indy_common.types import Request
from indy_node.persistence.attribute_store import AttributeStore
from indy_node.persistence.idr_cache import IdrCache
from indy_node.server.domain_req_handler import DomainReqHandler
from plenum.common.util import get_utc_epoch, friendlyToRaw, rawToFriendly, \
friendlyToHex, hexToFriendly
from state.pruning_state import PruningState
from storage.kv_in_memory import KeyValueStorageInMemory
from indy_common.state import domain
@pytest.fixture()
def bls_store():
return BlsStore(key_value_type=KeyValueStorageType.Memory,
data_location=None,
key_value_storage_name="BlsInMemoryStore",
serializer=serialization.multi_sig_store_serializer)
@pytest.fixture()
def request_handler(bls_store):
state = PruningState(KeyValueStorageInMemory())
cache = IdrCache('Cache', KeyValueStorageInMemory())
attr_store = AttributeStore(KeyValueStorageInMemory())
return DomainReqHandler(ledger=None,
state=state,
config=None,
requestProcessor=None,
idrCache=cache,
attributeStore=attr_store,
bls_store=bls_store,
ts_store=None)
def extract_proof(result, expected_multi_sig):
proof = result[STATE_PROOF]
assert proof
assert proof[ROOT_HASH]
assert proof[PROOF_NODES]
multi_sign = proof[MULTI_SIGNATURE]
assert multi_sign
assert multi_sign == expected_multi_sig
return proof
def save_multi_sig(request_handler):
multi_sig_value = MultiSignatureValue(ledger_id=DOMAIN_LEDGER_ID,
state_root_hash=state_roots_serializer.serialize(
bytes(request_handler.state.committedHeadHash)),
txn_root_hash='2' * 32,
pool_state_root_hash='1' * 32,
timestamp=get_utc_epoch())
multi_sig = MultiSignature('0' * 32, ['Alpha', 'Beta', 'Gamma'], multi_sig_value)
request_handler.bls_store.put(multi_sig)
return multi_sig.as_dict()
def is_proof_verified(request_handler,
proof, path,
value, seq_no, txn_time, ):
encoded_value = domain.encode_state_value(value, seq_no, txn_time)
proof_nodes = base64.b64decode(proof[PROOF_NODES])
root_hash = base58.b58decode(proof[ROOT_HASH])
verified = request_handler.state.verify_state_proof(
root_hash,
path,
encoded_value,
proof_nodes,
serialized=True
)
return verified
def test_state_proofs_for_get_attr(request_handler):
# Adding attribute
nym = 'Gw6pDLhcBcoQesN72qfotTgFa7cbuqZpkX3Xo6pLhPhv'
attr_key = 'last_name'
raw_attribute = '{"last_name":"Anderson"}'
seq_no = 0
txn_time = int(time.time())
identifier = "6ouriXMZkLeHsuXrN1X1fd"
txn = {
TXN_TYPE: ATTRIB,
TARGET_NYM: nym,
RAW: raw_attribute,
}
txn = append_txn_metadata(reqToTxn(Request(operation=txn,
protocolVersion=CURRENT_PROTOCOL_VERSION,
identifier=identifier)),
seq_no=seq_no, txn_time=txn_time)
request_handler._addAttr(txn)
request_handler.state.commit()
multi_sig = save_multi_sig(request_handler)
# Getting attribute
get_request = Request(
operation={
TARGET_NYM: nym,
RAW: 'last_name'
},
signatures={},
protocolVersion=CURRENT_PROTOCOL_VERSION
)
result = request_handler.handleGetAttrsReq(get_request)
proof = extract_proof(result, multi_sig)
attr_value = result[DATA]
assert attr_value == raw_attribute
# Verifying signed state proof
path = domain.make_state_path_for_attr(nym, attr_key)
assert is_proof_verified(request_handler,
proof, path,
domain.hash_of(attr_value), seq_no, txn_time)
def test_state_proofs_for_get_claim_def(request_handler):
# Adding claim def
nym = 'Gw6pDLhcBcoQesN72qfotTgFa7cbuqZpkX3Xo6pLhPhv'
seq_no = 0
txn_time = int(time.time())
identifier = "6ouriXMZkLeHsuXrN1X1fd"
schema_seqno = 0
signature_type = 'CL'
key_components = '{"key_components": []}'
tag = 'tag1'
txn = {
TXN_TYPE: CLAIM_DEF,
TARGET_NYM: nym,
CLAIM_DEF_SCHEMA_REF: schema_seqno,
CLAIM_DEF_PUBLIC_KEYS: key_components,
CLAIM_DEF_TAG: tag
}
txn = append_txn_metadata(reqToTxn(Request(operation=txn,
protocolVersion=CURRENT_PROTOCOL_VERSION,
identifier=identifier)),
seq_no=seq_no, txn_time=txn_time)
txn = append_payload_metadata(txn, frm=nym)
request_handler._addClaimDef(txn)
request_handler.state.commit()
multi_sig = save_multi_sig(request_handler)
# Getting claim def
request = Request(
operation={
IDENTIFIER: nym,
CLAIM_DEF_FROM: nym,
CLAIM_DEF_SCHEMA_REF: schema_seqno,
CLAIM_DEF_SIGNATURE_TYPE: signature_type,
CLAIM_DEF_TAG: tag
},
signatures={},
protocolVersion=CURRENT_PROTOCOL_VERSION
)
result = request_handler.handleGetClaimDefReq(request)
proof = extract_proof(result, multi_sig)
assert result[DATA] == key_components
# Verifying signed state proof
path = domain.make_state_path_for_claim_def(nym, schema_seqno,
signature_type, tag)
assert is_proof_verified(request_handler,
proof, path,
key_components, seq_no, txn_time)
def test_state_proofs_for_get_schema(request_handler):
# Adding schema
nym = 'Gw6pDLhcBcoQesN72qfotTgFa7cbuqZpkX3Xo6pLhPhv'
seq_no = 0
txn_time = int(time.time())
identifier = "6ouriXMZkLeHsuXrN1X1fd"
schema_name = "schema_a"
schema_version = "1.0"
# data = '{"name": "schema_a", "version": "1.0"}'
schema_key = {SCHEMA_NAME: schema_name,
SCHEMA_VERSION: schema_version}
data = {**schema_key,
SCHEMA_ATTR_NAMES: ["Some_Attr", "Attr1"]}
txn = {
TXN_TYPE: SCHEMA,
DATA: data,
}
txn = append_txn_metadata(reqToTxn(Request(operation=txn,
protocolVersion=CURRENT_PROTOCOL_VERSION,
identifier=identifier)),
seq_no=seq_no, txn_time=txn_time)
txn = append_payload_metadata(txn, frm=nym)
request_handler._addSchema(txn)
request_handler.state.commit()
multi_sig = save_multi_sig(request_handler)
# Getting schema
request = Request(
operation={
TARGET_NYM: nym,
DATA: schema_key
},
signatures={},
protocolVersion=CURRENT_PROTOCOL_VERSION
)
result = request_handler.handleGetSchemaReq(request)
proof = extract_proof(result, multi_sig)
assert result[DATA] == data
data.pop(NAME)
data.pop(VERSION)
# Verifying signed state proof
path = domain.make_state_path_for_schema(nym, schema_name, schema_version)
assert is_proof_verified(request_handler,
proof, path,
data, seq_no, txn_time)
def prep_multi_sig(request_handler, nym, role, verkey, seq_no):
txn_time = int(time.time())
identifier = "6ouriXMZkLeHsuXrN1X1fd"
# Adding nym
data = {
f.IDENTIFIER.nm: nym,
ROLE: role,
VERKEY: verkey,
f.SEQ_NO.nm: seq_no,
TXN_TIME: txn_time,
}
txn = append_txn_metadata(reqToTxn(Request(operation=data,
protocolVersion=CURRENT_PROTOCOL_VERSION,
identifier=identifier)),
seq_no=seq_no, txn_time=txn_time)
txn = append_payload_metadata(txn, frm=nym)
request_handler.updateNym(nym, txn)
request_handler.state.commit()
multi_sig = save_multi_sig(request_handler)
return data, multi_sig
def get_nym_verify_proof(request_handler, nym, data, multi_sig):
request = Request(
operation={
TARGET_NYM: nym
},
signatures={},
protocolVersion=CURRENT_PROTOCOL_VERSION
)
result = request_handler.handleGetNymReq(request)
proof = extract_proof(result, multi_sig)
assert proof
if data:
assert result[DATA]
result_data = request_handler.stateSerializer.deserialize(result[DATA])
result_data.pop(TARGET_NYM, None)
assert result_data == data
# Verifying signed state proof
path = request_handler.nym_to_state_key(nym)
# If the value does not exist, serialisation should be null and
# verify_state_proof needs to be given null (None). This is done to
# differentiate between absence of value and presence of empty string value
serialised_value = request_handler.stateSerializer.serialize(data) if data else None
proof_nodes = base64.b64decode(proof[PROOF_NODES])
root_hash = base58.b58decode(proof[ROOT_HASH])
return request_handler.state.verify_state_proof(
root_hash,
path,
serialised_value,
proof_nodes,
serialized=True
)
def test_state_proofs_for_get_nym(request_handler):
nym = 'Gw6pDLhcBcoQesN72qfotTgFa7cbuqZpkX3Xo6pLhPhv'
role = "2"
verkey = "~7TYfekw4GUagBnBVCqPjiC"
seq_no = 1
# Check for existing nym
data, multi_sig = prep_multi_sig(request_handler, nym, role, verkey, seq_no)
assert get_nym_verify_proof(request_handler, nym, data, multi_sig)
# Shuffle the bytes of nym
h = list(friendlyToHex(nym))
random.shuffle(h)
garbled_nym = hexToFriendly(bytes(h))
data[f.IDENTIFIER.nm] = garbled_nym
# `garbled_nym` does not exist, proof should verify but data is null
assert get_nym_verify_proof(request_handler, garbled_nym, None, multi_sig)
def test_no_state_proofs_if_protocol_version_less(request_handler):
nym = 'Gw6pDLhcBcoQesN72qfotTgFa7cbuqZpkX3Xo6pLhPhv'
role = "2"
verkey = "~7TYfekw4GUagBnBVCqPjiC"
identifier = "6ouriXMZkLeHsuXrN1X1fd"
seq_no = 0
txn_time = int(time.time())
# Adding nym
data = {
f.IDENTIFIER.nm: nym,
ROLE: role,
VERKEY: verkey,
f.SEQ_NO.nm: seq_no,
TXN_TIME: txn_time,
}
txn = append_txn_metadata(reqToTxn(Request(operation=data,
protocolVersion=CURRENT_PROTOCOL_VERSION,
identifier=identifier)),
seq_no=seq_no, txn_time=txn_time)
txn = append_payload_metadata(txn, frm=nym)
request_handler.updateNym(nym, txn)
request_handler.state.commit()
multi_sig = save_multi_sig(request_handler)
# Getting nym
request = Request(
operation={
TARGET_NYM: nym
},
signatures={}
)
result = request_handler.handleGetNymReq(request)
assert STATE_PROOF not in result
|
apache-2.0
| 3,612,552,977,990,105,600
| 34.022599
| 110
| 0.611228
| false
| 3.529177
| true
| false
| false
|
hubert667/AIR
|
build/billiard/billiard/_reduction3.py
|
2
|
7954
|
#
# Module which deals with pickling of objects.
#
# multiprocessing/reduction.py
#
# Copyright (c) 2006-2008, R Oudkerk
# Licensed to PSF under a Contributor Agreement.
#
from __future__ import absolute_import
import copyreg
import functools
import io
import os
import pickle
import socket
import sys
__all__ = ['send_handle', 'recv_handle', 'ForkingPickler', 'register', 'dump']
HAVE_SEND_HANDLE = (sys.platform == 'win32' or
(hasattr(socket, 'CMSG_LEN') and
hasattr(socket, 'SCM_RIGHTS') and
hasattr(socket.socket, 'sendmsg')))
#
# Pickler subclass
#
class ForkingPickler(pickle.Pickler):
'''Pickler subclass used by multiprocessing.'''
_extra_reducers = {}
_copyreg_dispatch_table = copyreg.dispatch_table
def __init__(self, *args):
super().__init__(*args)
self.dispatch_table = self._copyreg_dispatch_table.copy()
self.dispatch_table.update(self._extra_reducers)
@classmethod
def register(cls, type, reduce):
'''Register a reduce function for a type.'''
cls._extra_reducers[type] = reduce
@classmethod
def dumps(cls, obj, protocol=None):
buf = io.BytesIO()
cls(buf, protocol).dump(obj)
return buf.getbuffer()
loads = pickle.loads
register = ForkingPickler.register
def dump(obj, file, protocol=None):
'''Replacement for pickle.dump() using ForkingPickler.'''
ForkingPickler(file, protocol).dump(obj)
#
# Platform specific definitions
#
if sys.platform == 'win32':
# Windows
__all__ += ['DupHandle', 'duplicate', 'steal_handle']
import _winapi
def duplicate(handle, target_process=None, inheritable=False):
'''Duplicate a handle. (target_process is a handle not a pid!)'''
if target_process is None:
target_process = _winapi.GetCurrentProcess()
return _winapi.DuplicateHandle(
_winapi.GetCurrentProcess(), handle, target_process,
0, inheritable, _winapi.DUPLICATE_SAME_ACCESS)
def steal_handle(source_pid, handle):
'''Steal a handle from process identified by source_pid.'''
source_process_handle = _winapi.OpenProcess(
_winapi.PROCESS_DUP_HANDLE, False, source_pid)
try:
return _winapi.DuplicateHandle(
source_process_handle, handle,
_winapi.GetCurrentProcess(), 0, False,
_winapi.DUPLICATE_SAME_ACCESS | _winapi.DUPLICATE_CLOSE_SOURCE)
finally:
_winapi.CloseHandle(source_process_handle)
def send_handle(conn, handle, destination_pid):
'''Send a handle over a local connection.'''
dh = DupHandle(handle, _winapi.DUPLICATE_SAME_ACCESS, destination_pid)
conn.send(dh)
def recv_handle(conn):
'''Receive a handle over a local connection.'''
return conn.recv().detach()
class DupHandle(object):
'''Picklable wrapper for a handle.'''
def __init__(self, handle, access, pid=None):
if pid is None:
# We just duplicate the handle in the current process and
# let the receiving process steal the handle.
pid = os.getpid()
proc = _winapi.OpenProcess(_winapi.PROCESS_DUP_HANDLE, False, pid)
try:
self._handle = _winapi.DuplicateHandle(
_winapi.GetCurrentProcess(),
handle, proc, access, False, 0)
finally:
_winapi.CloseHandle(proc)
self._access = access
self._pid = pid
def detach(self):
'''Get the handle. This should only be called once.'''
# retrieve handle from process which currently owns it
if self._pid == os.getpid():
# The handle has already been duplicated for this process.
return self._handle
# We must steal the handle from the process whose pid is self._pid.
proc = _winapi.OpenProcess(_winapi.PROCESS_DUP_HANDLE, False,
self._pid)
try:
return _winapi.DuplicateHandle(
proc, self._handle, _winapi.GetCurrentProcess(),
self._access, False, _winapi.DUPLICATE_CLOSE_SOURCE)
finally:
_winapi.CloseHandle(proc)
else:
# Unix
__all__ += ['DupFd', 'sendfds', 'recvfds']
import array
# On MacOSX we should acknowledge receipt of fds -- see Issue14669
ACKNOWLEDGE = sys.platform == 'darwin'
def sendfds(sock, fds):
'''Send an array of fds over an AF_UNIX socket.'''
fds = array.array('i', fds)
msg = bytes([len(fds) % 256])
sock.sendmsg([msg], [(socket.SOL_SOCKET, socket.SCM_RIGHTS, fds)])
if ACKNOWLEDGE and sock.recv(1) != b'A':
raise RuntimeError('did not receive acknowledgement of fd')
def recvfds(sock, size):
'''Receive an array of fds over an AF_UNIX socket.'''
a = array.array('i')
bytes_size = a.itemsize * size
msg, ancdata, flags, addr = sock.recvmsg(
1, socket.CMSG_LEN(bytes_size),
)
if not msg and not ancdata:
raise EOFError
try:
if ACKNOWLEDGE:
sock.send(b'A')
if len(ancdata) != 1:
raise RuntimeError(
'received %d items of ancdata' % len(ancdata),
)
cmsg_level, cmsg_type, cmsg_data = ancdata[0]
if (cmsg_level == socket.SOL_SOCKET and
cmsg_type == socket.SCM_RIGHTS):
if len(cmsg_data) % a.itemsize != 0:
raise ValueError
a.frombytes(cmsg_data)
assert len(a) % 256 == msg[0]
return list(a)
except (ValueError, IndexError):
pass
raise RuntimeError('Invalid data received')
def send_handle(conn, handle, destination_pid): # noqa
'''Send a handle over a local connection.'''
fd = conn.fileno()
with socket.fromfd(fd, socket.AF_UNIX, socket.SOCK_STREAM) as s:
sendfds(s, [handle])
def recv_handle(conn): # noqa
'''Receive a handle over a local connection.'''
fd = conn.fileno()
with socket.fromfd(fd, socket.AF_UNIX, socket.SOCK_STREAM) as s:
return recvfds(s, 1)[0]
def DupFd(fd):
'''Return a wrapper for an fd.'''
from .forking import Popen
return Popen.duplicate_for_child(fd)
#
# Try making some callable types picklable
#
def _reduce_method(m):
if m.__self__ is None:
return getattr, (m.__class__, m.__func__.__name__)
else:
return getattr, (m.__self__, m.__func__.__name__)
class _C:
def f(self):
pass
register(type(_C().f), _reduce_method)
def _reduce_method_descriptor(m):
return getattr, (m.__objclass__, m.__name__)
register(type(list.append), _reduce_method_descriptor)
register(type(int.__add__), _reduce_method_descriptor)
def _reduce_partial(p):
return _rebuild_partial, (p.func, p.args, p.keywords or {})
def _rebuild_partial(func, args, keywords):
return functools.partial(func, *args, **keywords)
register(functools.partial, _reduce_partial)
#
# Make sockets picklable
#
if sys.platform == 'win32':
def _reduce_socket(s):
from .resource_sharer import DupSocket
return _rebuild_socket, (DupSocket(s),)
def _rebuild_socket(ds):
return ds.detach()
register(socket.socket, _reduce_socket)
else:
def _reduce_socket(s): # noqa
df = DupFd(s.fileno())
return _rebuild_socket, (df, s.family, s.type, s.proto)
def _rebuild_socket(df, family, type, proto): # noqa
fd = df.detach()
return socket.socket(family, type, proto, fileno=fd)
register(socket.socket, _reduce_socket)
|
gpl-3.0
| 4,350,371,272,115,698,000
| 30.943775
| 79
| 0.587126
| false
| 3.85555
| false
| false
| false
|
Afonasev/Blog
|
backend/posts/management/commands/fill_fake_data.py
|
1
|
2311
|
from random import choice, randint
from django.contrib.auth import get_user_model
from django.core.management.base import BaseCommand
from faker import Faker
from backend.posts import models
fake = Faker()
def make_text(min_paragraphs, max_paragraphs):
return '\n'.join(fake.paragraphs(
nb=randint(min_paragraphs, max_paragraphs)
))
class Command(BaseCommand):
help = 'Fill fake data for dev server'
def handle(self, *args, **options):
admin = self._create_admin_user()
tag_ids = self._create_tags()
posts = self._create_posts(author=admin, tags=tag_ids)
self._create_comments(author=admin, posts=posts)
self.stdout.write(self.style.SUCCESS('Fake data filled!'))
@staticmethod
def _create_admin_user():
return get_user_model().objects.create_user(
username='admin',
password='password13',
is_staff=True,
is_superuser=True,
)
def _create_tags(self):
tag_names = set()
for _ in range(15):
tag_names.add(fake.word())
tag_ids = []
for name in tag_names:
tag = models.Tag(title=name)
tag.save()
tag_ids.append(tag.id)
return tag_ids
def _create_posts(self, author, tags):
posts = []
for _ in range(100):
post = models.Post(
author=author,
title=fake.sentence(nb_words=randint(3, 8)),
text=make_text(10, 30),
hidden=choice([True, False, False]),
)
post.save()
post_tags = set()
for _ in range(randint(3, 8)):
post_tags.add(choice(tags))
for tag in post_tags:
post.tags.add(tag)
post.save()
posts.append(post)
return posts
def _create_comments(self, author, posts):
for post in posts:
for _ in range(randint(5, 20)):
has_author = randint(1, 5) < 2
models.Comment(
post=post,
author=author if has_author else None,
username=fake.user_name() if not has_author else None,
text=make_text(1, 3),
).save()
|
mit
| -8,112,549,477,395,559,000
| 27.182927
| 74
| 0.536132
| false
| 3.984483
| false
| false
| false
|
exaile/exaile
|
plugins/ipconsole/__init__.py
|
1
|
8594
|
# This plugin is adapted from the Python Console plugin and the IPython
# cookbook at:
# http://ipython.scipy.org/moin/Cookbook/EmbeddingInGTK
# Copyright (C) 2009-2010 Brian Parma
# Updated 2012 Brian Parma
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import logging
import sys
import site
from gi.repository import Gdk
from gi.repository import Gtk
from gi.repository import GLib
from xl.nls import gettext as _
from xl import event
from xl import settings as xl_settings
from xl import providers
from xlgui.widgets import menu
from xlgui import guiutil
from . import ipconsoleprefs
from . import ipython_view as ip
FONT = "Luxi Mono 10"
SETTINGS_STRING = 'plugin_ipconsole_option_set'
LOGGER = logging.getLogger(__name__)
class Quitter:
"""Simple class to handle exit, similar to Python 2.5's.
This Quitter is used to circumvent IPython's circumvention
of the builtin Quitter, since it prevents exaile form closing."""
def __init__(self, exit_function, name):
self.exit_function = exit_function
self.name = name
def __repr__(self):
return 'Type %s() to exit.' % self.name
def __call__(self):
self.exit_function() # Passed in exit function
site.setquit() # Restore default builtins
exit() # Call builtin
class IPView(ip.IPythonView):
'''Extend IPythonView to support closing with Ctrl+D'''
__text_color = None
__background_color = None
__font = None
__css_provider = None
__text_color_str = None
__background_color_str = None
__font_str = None
__iptheme = None
def __init__(self, namespace):
ip.IPythonView.__init__(self)
event.add_ui_callback(self.__on_option_set, SETTINGS_STRING)
self.set_wrap_mode(Gtk.WrapMode.CHAR)
self.updateNamespace(namespace) # expose exaile (passed in)
# prevent exit and quit - freezes window? does bad things
self.updateNamespace({'exit': None, 'quit': None})
style_context = self.get_style_context()
self.__css_provider = Gtk.CssProvider()
style_context.add_provider(
self.__css_provider, Gtk.STYLE_PROVIDER_PRIORITY_APPLICATION
)
# Trigger setup through options
for option in ('text_color', 'background_color', 'font'):
self.__on_option_set(
None, xl_settings, 'plugin/ipconsole/{option}'.format(option=option)
)
def __on_option_set(self, _event, settings, option):
if option == 'plugin/ipconsole/font':
pango_font_str = settings.get_option(option, FONT)
self.__font_str = guiutil.css_from_pango_font_description(pango_font_str)
GLib.idle_add(self.__update_css)
if option == 'plugin/ipconsole/text_color':
rgba_str = settings.get_option(option, 'lavender')
rgba = Gdk.RGBA()
rgba.parse(rgba_str)
self.__text_color_str = "color: " + guiutil.css_from_rgba_without_alpha(
rgba
)
GLib.idle_add(self.__update_css)
if option == 'plugin/ipconsole/background_color':
rgba_str = settings.get_option(option, 'black')
rgba = Gdk.RGBA()
rgba.parse(rgba_str)
self.__background_color_str = (
"background-color: " + guiutil.css_from_rgba_without_alpha(rgba)
)
GLib.idle_add(self.__update_css)
def __update_css(self):
if (
self.__text_color_str is None
or self.__background_color_str is None
or self.__font_str is None
):
# early initialization state: not all properties have been initialized yet
return False
data_str = "text {%s; %s;} textview {%s;}" % (
self.__background_color_str,
self.__text_color_str,
self.__font_str,
)
self.__css_provider.load_from_data(data_str.encode('utf-8'))
return False
def onKeyPressExtend(self, key_event):
if ip.IPythonView.onKeyPressExtend(self, key_event):
return True
if key_event.string == '\x04': # ctrl+d
self.destroy()
class IPythonConsoleWindow(Gtk.Window):
"""
A Gtk Window with an embedded IPython Console.
"""
__ipv = None
def __init__(self, namespace):
Gtk.Window.__init__(self)
self.set_title(_("IPython Console - Exaile"))
self.set_size_request(750, 550)
self.set_resizable(True)
self.__ipv = IPView(namespace)
self.__ipv.connect('destroy', lambda *_widget: self.destroy())
self.__ipv.updateNamespace({'self': self}) # Expose self to IPython
# make it scrollable
scrolled_window = Gtk.ScrolledWindow()
scrolled_window.set_policy(Gtk.PolicyType.AUTOMATIC, Gtk.PolicyType.AUTOMATIC)
scrolled_window.add(self.__ipv)
scrolled_window.show_all()
self.add(scrolled_window)
event.add_ui_callback(self.on_option_set, SETTINGS_STRING)
def on_option_set(self, _event, settings, option):
if option == 'plugin/ipconsole/opacity':
if sys.platform.startswith("win32"):
# Setting opacity on Windows crashes with segfault,
# see https://bugzilla.gnome.org/show_bug.cgi?id=674449
# Ignore this option.
return
value = settings.get_option(option, 80.0)
value = value / 100
if value > 1:
value = 1
self.set_opacity(value)
class IPConsolePlugin:
"""
This class holds the IPConsole plugin itself
"""
__console_window = None
__exaile = None
def enable(self, exaile):
"""
Called when plugin is enabled, or when exaile is loaded with the plugin
on by default.
"""
self.__exaile = exaile
def on_gui_loaded(self):
"""
Called when Exaile finished loading its GUI
"""
# Trigger initial setup through options:
if xl_settings.get_option('plugin/ipconsole/autostart', False):
self.__show_console()
# add menuitem to tools menu
item = menu.simple_menu_item(
'ipconsole',
['plugin-sep'],
_('Show _IPython Console'),
callback=lambda *_args: self.__show_console(),
)
providers.register('menubar-tools-menu', item)
def teardown(self, _exaile):
"""
Called when Exaile is shutting down
"""
# if window is open, kill it
if self.__console_window is not None:
self.__console_window.destroy()
def disable(self, exaile):
"""
Called when the plugin is disabled
"""
for item in providers.get('menubar-tools-menu'):
if item.name == 'ipconsole':
providers.unregister('menubar-tools-menu', item)
break
self.teardown(exaile)
def __show_console(self):
"""
Display window when the menu item is clicked.
"""
if self.__console_window is None:
import xl
import xlgui
self.__console_window = IPythonConsoleWindow(
{'exaile': self.__exaile, 'xl': xl, 'xlgui': xlgui}
)
self.__console_window.connect('destroy', self.__console_destroyed)
self.__console_window.present()
self.__console_window.on_option_set(
None, xl_settings, 'plugin/ipconsole/opacity'
)
def __console_destroyed(self, *_args):
"""
Called when the window is closed.
"""
self.__console_window = None
def get_preferences_pane(self):
"""
Called by Exaile when ipconsole preferences pane should be shown
"""
return ipconsoleprefs
plugin_class = IPConsolePlugin
|
gpl-2.0
| 9,188,069,743,243,977,000
| 31.187266
| 86
| 0.603677
| false
| 3.951264
| false
| false
| false
|
curtisalexander/learning
|
python/talk-python/jumpstart/file-search/program.py
|
1
|
1784
|
from collections import namedtuple
import os
SearchResult = namedtuple('SearchResult',
'file, line, text')
def main():
print_header()
folder = get_folder_from_user()
if not folder:
print("Sorry we can't search that location.")
return
text = get_search_text_from_user()
if not text:
print("We can't search for nothing!")
return
matches = search_folders(folder, text)
for m in matches:
print('------- MATCH -------')
print(f'file: {m.file}')
print(f'line: {m.line}')
print(f'match: {m.text.strip()}')
print()
def print_header():
print('-----------------')
print(' FILE SEARCH ')
print('-----------------')
print()
def get_folder_from_user():
folder = input('What folder do you want to search? ')
if not folder or not folder.strip():
return None
if not os.path.isdir(folder):
return None
return os.path.abspath(folder)
def get_search_text_from_user():
text = input('What are you searching for [single phrases only]? ')
return text
def search_folders(folder, text):
items = os.listdir(folder)
for item in items:
full_item = os.path.join(folder, item)
if os.path.isdir(full_item):
yield from search_folders(full_item, text)
else:
yield from search_file(full_item, text)
def search_file(filename, search_text):
with open(filename, 'r', encoding='utf-8') as fin:
line_num = 0
for line in fin:
line_num += 1
if line.lower().find(search_text) >= 0:
m = SearchResult(line=line_num, file=filename, text=line)
yield m
if __name__ == '__main__':
main()
|
mit
| -3,788,486,095,612,305,400
| 22.473684
| 73
| 0.556054
| false
| 3.811966
| false
| false
| false
|
lnls-sirius/dev-packages
|
siriuspy/siriuspy/clientconfigdb/configdb_client.py
|
1
|
9599
|
"""Define a class to communicate with configuration database API."""
import json as _json
import datetime as _datetime
from urllib import parse as _parse
from urllib.request import Request as _Request, urlopen as _urlopen
from urllib.error import URLError as _URLError
import dateutil.parser
import numpy as _np
from .. import envars as _envars
from . import _templates
class ConfigDBClient:
"""Perform operation on configuration database."""
_TIMEOUT_DEFAULT = 2.0
_INVALID_CHARACTERS = '\\/:;,?!$'
def __init__(self, url=None, config_type=None):
"""Class constructor.
Parameters
----------
url : str | None
Configuration service host address. For default 'None' value
the URL defined in siripy.envars is used.
"""
self._url = url or _envars.SRVURL_CONFIGDB
self._config_type = config_type
@property
def config_type(self):
"""Type of configuration."""
return self._config_type
@config_type.setter
def config_type(self, name):
if isinstance(name, str):
self._config_type = name
@property
def url(self):
"""Server URL."""
return self._url
@property
def connected(self):
"""Return connection state."""
try:
self.get_dbsize()
except ConfigDBException as err:
return not err.server_code == -2
return True
def get_dbsize(self):
"""Return estimated size of configuration database."""
return self._make_request(stats=True)['size']
def get_nrconfigs(self):
"""Return estimated size of configuration database."""
return self._make_request(stats=True)['count']
def get_config_types(self):
"""Get configuration types existing as database entries."""
return self._make_request()
@staticmethod
def get_config_types_from_templates():
"""Return list of configuration types as defined in templates."""
return list(_templates.get_config_types())
def find_configs(self,
name=None,
begin=None,
end=None,
config_type=None,
discarded=False):
"""Find configurations matching search criteria.
Parameters
----------
discarded : True | False (default) | None
If True, return only discarded configurations, if False, return
only configurations in use. If None, return all configurations
matching the other criteria.
"""
config_type = self._process_config_type(config_type)
# build search dictionary
find_dict = dict(config_type=config_type)
if name is not None:
find_dict['name'] = name
if begin is not None or end is not None:
find_dict['created'] = {}
if begin is not None:
find_dict['created']['$gte'] = begin
if end is not None:
find_dict['created']['$lte'] = end
return self._make_request(
config_type=config_type, discarded=discarded, data=find_dict)
def get_config_value(self, name, config_type=None, discarded=False):
"""Get value field of a given configuration."""
config_type = self._process_config_type(config_type)
return self._make_request(
config_type=config_type, name=name, discarded=discarded)['value']
def get_config_info(self, name, config_type=None, discarded=False):
"""Get information of a given configuration."""
config_type = self._process_config_type(config_type)
res = self.find_configs(
name=name, config_type=config_type, discarded=discarded)
if not res:
raise ConfigDBException(
{'code': 404, 'message': 'Configuration no found.'})
return res[0]
def rename_config(self, oldname, newname, config_type=None):
"""Rename configuration in database."""
config_type = self._process_config_type(config_type)
if not isinstance(newname, str):
raise TypeError(
'Config name must be str, not {}!'.format(type(newname)))
if not self.check_valid_configname(newname):
raise ValueError("There are invalid characters in config name!")
return self._make_request(
config_type=config_type, name=oldname, newname=newname,
method='POST')
def insert_config(self, name, value, config_type=None):
"""Insert configuration into database."""
config_type = self._process_config_type(config_type)
if not isinstance(name, str):
raise TypeError(
'Config name must be str, not {}!'.format(type(name)))
if not self.check_valid_configname(name):
raise ValueError("There are invalid characters in config name!")
if not self.check_valid_value(value, config_type=config_type):
raise TypeError('Incompatible configuration value!')
self._make_request(
config_type=config_type, name=name, method='POST', data=value)
def delete_config(self, name, config_type=None):
"""Mark a valid configuration as discarded."""
config_type = self._process_config_type(config_type)
return self._make_request(
config_type=config_type, name=name, method='DELETE')
def retrieve_config(self, name, config_type=None):
"""Mark a discarded configuration as valid."""
config_type = self._process_config_type(config_type)
return self._make_request(
config_type=config_type, name=name, discarded=True, method='POST')
def get_value_from_template(self, config_type=None):
"""Return value of a configuration type."""
config_type = self._process_config_type(config_type)
return _templates.get_template(config_type)
def check_valid_value(self, value, config_type=None):
"""Check whether values data corresponds to a configuration type."""
config_type = self._process_config_type(config_type)
return _templates.check_value(config_type, value)
@classmethod
def check_valid_configname(cls, name):
"Check if `name` is a valid name for configurations."
return not set(name) & set(cls._INVALID_CHARACTERS)
@staticmethod
def conv_timestamp_txt_2_flt(timestamp):
"""Convert timestamp format from text to float."""
return dateutil.parser.parse(timestamp).timestamp()
@staticmethod
def conv_timestamp_flt_2_txt(timestamp):
"""Convert timestamp format from float to text."""
return str(_datetime.datetime.fromtimestamp(timestamp))
# --- private methods ---
def _process_config_type(self, config_type):
config_type = config_type or self._config_type
if not config_type:
raise ValueError(
'You must define a `config_type` attribute or' +
' provide it in method call.')
return config_type
def _make_request(self, method='GET', data=None, **kwargs):
try:
return self._request(method, data, **kwargs)
except ConfigDBException as err:
if err.server_code == -2:
self._rotate_server_url()
return self._request(method, data, **kwargs)
else:
raise err
def _request(self, method='GET', data=None, **kwargs):
url = self._create_url(**kwargs)
if data is None:
request = _Request(url=url, method=method)
else:
request = _Request(
url=url, method=method,
headers={"Content-Type": "application/json"},
data=_json.dumps(data, default=_jsonify_numpy).encode())
try:
url_conn = _urlopen(
request, timeout=ConfigDBClient._TIMEOUT_DEFAULT)
response = _json.loads(url_conn.read().decode("utf-8"))
except _json.JSONDecodeError:
response = {"code": -1, "message": "JSON decode error"}
except _URLError as err:
response = {'code': -2, 'message': str(err)}
# print(response)
if response['code'] != 200:
raise ConfigDBException(response)
return response['result']
def _rotate_server_url(self):
if self._url != _envars.SRVURL_CONFIGDB_2:
self._url = _envars.SRVURL_CONFIGDB_2
else:
self._url = _envars.SRVURL_CONFIGDB
def _create_url(self, config_type=None, name=None, discarded=False,
stats=False, newname=None):
url = self.url
if stats:
return url + '/stats'
url += '/configs'
if newname:
url += '/rename'
if discarded:
url += '/discarded'
if config_type:
url += '/' + config_type
if name:
url += '/' + name
if newname:
url += '/' + newname
return _parse.quote(url, safe='/:')
class ConfigDBException(Exception):
"""Default exception raised for configDB server errors."""
def __init__(self, response):
"""."""
super().__init__('{code:d}: {message:s}.'.format(**response))
self.server_code = response['code']
self.server_message = response['message']
def _jsonify_numpy(obj):
if isinstance(obj, _np.ndarray):
return obj.tolist()
raise TypeError('Object is not JSON serializable.')
|
gpl-3.0
| -8,357,856,691,299,465,000
| 34.420664
| 78
| 0.59277
| false
| 4.314157
| true
| false
| false
|
ttreeagency/PootleTypo3Org
|
pootle/apps/pootle_store/util.py
|
1
|
6417
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2004-2012 Zuza Software Foundation
#
# This file is part of translate.
#
# translate is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# translate is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with translate; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
import copy
import os
from django.conf import settings
from django.utils.translation import ugettext_lazy as _
from pootle_misc.aggregate import sum_column
from pootle_misc.util import dictsum
# Unit States
#: Unit is no longer part of the store
OBSOLETE = -100
#: Empty unit
UNTRANSLATED = 0
#: Marked as fuzzy, typically means translation needs more work
FUZZY = 50
#: Unit is fully translated
TRANSLATED = 200
# Map for retrieving natural names for unit states
STATES_MAP = {
OBSOLETE: _("Obsolete"),
UNTRANSLATED: _("Untranslated"),
FUZZY: _("Needs work"),
TRANSLATED: _("Translated"),
}
def add_trailing_slash(path):
"""If path does not end with /, add it and return."""
if len(path) > 0 and path[-1] == os.sep:
return path
else:
return path + os.sep
def relative_real_path(p):
if p.startswith(settings.PODIRECTORY):
return p[len(add_trailing_slash(settings.PODIRECTORY)):]
else:
return p
def absolute_real_path(p):
if not p.startswith(settings.PODIRECTORY):
return os.path.join(settings.PODIRECTORY, p)
else:
return p
empty_quickstats = {'fuzzy': 0,
'fuzzysourcewords': 0,
'review': 0,
'total': 0,
'totalsourcewords': 0,
'translated': 0,
'translatedsourcewords': 0,
'translatedtargetwords': 0,
'untranslated': 0,
'untranslatedsourcewords': 0,
'errors': 0}
def statssum(queryset, empty_stats=empty_quickstats):
totals = empty_stats
for item in queryset:
try:
totals = dictsum(totals, item.getquickstats())
except:
totals['errors'] += 1
return totals
empty_completestats = {0: {u'isfuzzy': 0,
'errors': 0} }
def completestatssum(queryset, empty_stats=empty_completestats):
totals = copy.deepcopy(empty_stats)
for item in queryset:
try:
item_totals = item.getcompletestats()
for cat in set(item_totals) | set(totals):
totals[cat] = dictsum(totals.get(cat, {}),
item_totals.get(cat, {}))
except:
totals[0]['errors'] += 1
return totals
def calculate_stats(units):
"""Calculate translation statistics for a given `units` queryset."""
total = sum_column(units,
['source_wordcount'], count=True)
untranslated = sum_column(units.filter(state=UNTRANSLATED),
['source_wordcount'], count=True)
fuzzy = sum_column(units.filter(state=FUZZY),
['source_wordcount'], count=True)
translated = sum_column(units.filter(state=TRANSLATED),
['source_wordcount', 'target_wordcount'],
count=True)
result = {'errors': 0}
result['total'] = total['count']
if result['total'] == 0:
result['totalsourcewords'] = 0
else:
result['totalsourcewords'] = total['source_wordcount']
result['fuzzy'] = fuzzy['count']
if result['fuzzy'] == 0:
result['fuzzysourcewords'] = 0
else:
result['fuzzysourcewords'] = fuzzy['source_wordcount']
result['untranslated'] = untranslated['count']
if result['untranslated'] == 0:
result['untranslatedsourcewords'] = 0
else:
result['untranslatedsourcewords'] = untranslated['source_wordcount']
result['translated'] = translated['count']
if result['translated'] == 0:
result['translatedsourcewords'] = 0
result['translatedtargetwords'] = 0
else:
result['translatedsourcewords'] = translated['source_wordcount']
result['translatedtargetwords'] = translated['target_wordcount']
return result
def suggestions_sum(queryset):
total = 0
for item in queryset:
total += item.get_suggestion_count()
return total
def find_altsrcs(unit, alt_src_langs, store=None, project=None):
from pootle_store.models import Unit
store = store or unit.store
project = project or store.translation_project.project
altsrcs = Unit.objects.filter(
unitid_hash=unit.unitid_hash,
store__translation_project__project=project,
store__translation_project__language__in=alt_src_langs,
state=TRANSLATED) \
.select_related(
'store', 'store__translation_project',
'store__translation_project__language')
if project.get_treestyle() == 'nongnu':
altsrcs = altsrcs.filter(store__name=store.name)
return altsrcs
def get_sugg_list(unit):
"""Get suggested translations and rated scores for the given unit.
:return: List of tuples containing the suggestion and the score for
it in case it's a terminology project. Otherwise the score
part is filled with False values.
"""
sugg_list = []
scores = {}
suggestions = unit.get_suggestions()
# Avoid the votes query if we're not editing terminology
if (suggestions and (unit.store.is_terminology or
unit.store.translation_project.project.is_terminology)):
from voting.models import Vote
scores = Vote.objects.get_scores_in_bulk(suggestions)
for sugg in suggestions:
score = scores.get(sugg.id, False)
sugg_list.append((sugg, score))
return sugg_list
|
gpl-2.0
| 8,377,143,370,565,134,000
| 30.455882
| 76
| 0.616332
| false
| 4.013133
| false
| false
| false
|
steven-murray/pydftools
|
pydftools/model.py
|
1
|
4956
|
"""
A module for defining generative distribution function models.
All models *must* be subclassed from :class:`~Model`, which provides the abstract base methods required to implement.
"""
import numpy as np
from .utils import numerical_jac, numerical_hess
class Model(object):
"""
Base class defining a generative distribution function model
All models *must* be subclassed from this, which provides the abstract base methods required to implement.
The primary method is :meth:`~gdf`, which defines the generative distribution, though the class also provides
information about the parameters and other useful things.
Parameters
----------
p0 : sequence
A vector of parameters to use as the default for any methods that require them.
Examples
--------
Evaluate and plot a Schechter function
>>> import numpy as np
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(7,11,100)
>>> mass = 10**x
>>> parameters = (-2,10,-1.5)
>>> model = Schechter(parameters)
>>> plt.plot(mass, model.gdf(x, parameters))
>>> plt.xscale('log')
>>> plt.yscale('log')
Any model can be inspected before instantiation. Its default parameters are (using Schechter as an example):
>>> Schechter._p0_default
Its equation is
>>> Schechter.gdf_equation
And the names of its parameters are
>>> Schechter.names_text
"""
"Latex-Equation for gdf"
gdf_equation = None
"Text-friendly parameter names"
names_text = None
"Latex-friendly parameters names"
names = None
"Default value for p0 for the model"
p0 = None
def __init__(self, p0=None):
if p0 is not None:
self.p0 = p0
if not hasattr(self, "n_param"):
if self.names is not None:
self.n_param = len(self.names)
elif self.names_text is not None:
self.n_param = len(self.names)
else:
raise ValueError("Model has not specified the number of parameters")
def gdf(self, x, p):
"""
The generative distribution function.
Parameters
----------
x : array-like
The n-dimensional variate.
p : tuple
The parameters of the distribution.
Returns
-------
phi : array-like
Array of same size as `x`, with value at each point.
"""
pass
def gdf_jacobian(self, x, p):
"""
The jacobian of the GDF as a function of x at point p.
"""
jac = numerical_jac(lambda p: self.gdf(x, p), p)
return jac
def gdf_hessian(self, x, p):
"""
The jacobian of the GDF as a function of x at point p.
"""
return numerical_hess(lambda p: self.gdf(x, p), p)
class Schechter(Model):
"""
A Schechter function model.
"""
p0 = (-2.0, 11.0, -1.3)
names_text = ["log_10 (phi_star)", "log_10 (M_star)", "alpha"]
names = [r"$\log_{10} \phi_\star$", r"$\log_{10} M_\star$", r"$\alpha$"]
gdf_equation = r"$\frac{dN}{dVdx} = \log(10) \phi_\star \mu^{\alpha+1} \exp(-\mu)$, where $\mu = 10^{x - \log_{10} M_\star}$"
def gdf(self, x, p):
mu = 10 ** (x - p[1])
return np.log(10) * 10 ** p[0] * mu ** (p[2] + 1) * np.exp(-mu)
def gdf_jacobian(self, x, p):
g = self.gdf(x, p)
return (
np.log(10)
* g
* np.array([np.ones_like(x), (-p[2] - 1) + 10 ** (x - p[1]), (x - p[1])])
)
def gdf_hessian(self, x, p):
g = self.gdf(x, p)
jac = self.gdf_jacobian(x, p)
p00 = jac[0]
p01 = jac[1]
p02 = jac[2]
p22 = jac[2] * (x - p[1])
p11 = (
jac[1] * (-p[2] - 1)
- np.log(10) * 10 ** (x - p[1]) * g
+ 10 ** (x - p[1]) * jac[1]
)
p12 = jac[1] * x - g - p[1] * jac[1]
return np.log(10) * np.array(
[[p00, p01, p02], [p01, p11, p12], [p02, p12, p22]]
)
class MRP(Model):
"""
An MRP model (see Murray, Robotham, Power, 2017)
"""
p0 = (-2.0, 11.0, -1.0, 1)
names_text = ["log_10 (phi_star)", "log_10 (M_star)", "alpha", "beta"]
names = [r"$\log_{10} \phi_\star$", r"$\log_{10} M_\star$", r"$\alpha$", r"$\beta$"]
gdf_equation = r"$\frac{dN}{dVdx} = \log(10) \beta \phi_\star \mu^{\alpha+1} \exp(-\mu^\beta)$, where $\mu = 10^{x - \log_{10} M_\star}$"
def gdf(self, x, p):
mu = 10 ** (x - p[1])
return (
np.log(10) * p[3] * 10 ** p[0] * mu ** (p[2] + 1) * np.exp(-mu ** abs(p[3]))
)
class PL(Model):
"""
A power-law model.
"""
p0 = (2.0, -1.0)
names_text = ("log_10(A)", "alpha")
names = (r"$\log_{10}A$", r"$\alpha$")
gdf_equation = r"$\frac{dN}{dVdx} = A 10^{\alpha x}$"
def gdf(self, x, p):
return 10 ** p[0] * (10 ** (p[1] * x))
|
mit
| 2,237,806,698,947,694,600
| 25.934783
| 141
| 0.521186
| false
| 3.150668
| false
| false
| false
|
ControCurator/controcurator
|
cronjobs/clusterComments.py
|
1
|
25964
|
import numpy as np
import pandas as pd
import nltk
from nltk.stem.snowball import SnowballStemmer
from sklearn.feature_extraction.text import TfidfVectorizer
from bs4 import BeautifulSoup
import re
import os
import codecs
from sklearn import feature_extraction
from sklearn.cluster import KMeans
from pprint import pprint
from sklearn.manifold import MDS
from sklearn.metrics.pairwise import cosine_similarity
import os
import matplotlib.pyplot as plt
import matplotlib as mpl
from textwrap import wrap
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
from scipy.spatial import distance
from elasticsearch import Elasticsearch
from elasticsearch_dsl.connections import connections
connections.create_connection(hosts=['http://controcurator.org:80/ess'])
es = Elasticsearch(
['http://controcurator.org/ess/'],
port=80)
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
'''
query = {
"query": {
"bool": {
"must": [
{
"match_all": {}
}
]
}
},
"from": 0,
"size": 400
}
response = es.search(index="controcurator", doc_type="article", body=query)
'''
#article = es.get(index="controcurator", doc_type="article",id="58ed3daee4b0e0ec04effff7")
stopwords = nltk.corpus.stopwords.words('english')
stemmer = SnowballStemmer("english")
#response['hits']['hits'] = [hit for hit in response['hits']['hits'] if 'comments' in hit['_source']]
#response['hits']['hits'].sort(key=lambda d: len(d['_source']['comments']), reverse=True)
def tokenize_and_stem(text):
# first tokenize by sentence, then by word to ensure that punctuation is caught as it's own token
tokens = [word for sent in nltk.sent_tokenize(text) for word in nltk.word_tokenize(sent)]
filtered_tokens = []
# filter out any tokens not containing letters (e.g., numeric tokens, raw punctuation)
for token in tokens:
if re.search('[a-zA-Z]', token):
filtered_tokens.append(token)
stems = [stemmer.stem(t) for t in filtered_tokens]
return stems
tfidf_vectorizer = TfidfVectorizer(max_df=0.9, max_features=200000,
min_df=0.1, stop_words='english',
use_idf=True, tokenizer=tokenize_and_stem, ngram_range=(1,3))
fcluster, axcluster = plt.subplots(6, 8,figsize=(24, 16))
fsenti, axsenti = plt.subplots(6, 8,figsize=(24, 16))
ftype, axtype = plt.subplots(6, 8,figsize=(24, 16))
#fig, ax = plt.subplots(figsize=(20, 10)) # set size
col = 0
row = 0
cluster_colors = {0: '#1b9e77', 1: '#d95f02', 2: '#7570b3', 3: '#e7298a', 4: '#66a61e'}
type_colors = {'guardian': '#FF9900', 'twitter': '#000099'}
senti_colors = {'neg': '#CC0000', 'neu': '#CCCCCC', 'pos' : '#00CC00'}
def classifySentiment(score):
if score < 0:
return 'neg'
elif score > 0:
return 'pos'
else:
return 'neu'
articles = ['https://www.theguardian.com/commentisfree/2017/apr/11/working-class-public-spaces-musee-d-orsay',
'https://www.theguardian.com/football/2017/apr/11/juventus-barcelona-champions-league-quarter-final-match-report',
'https://www.theguardian.com/world/2017/apr/11/us-defense-syria-chemical-weapons-attacks-assad-regime',
'https://www.theguardian.com/society/2017/apr/11/parents-fighting-to-keep-baby-charlie-gard-life-support-lose-high-court-battle',
'https://www.theguardian.com/football/2017/apr/11/borussia-dortmund-explosion-team-bus',
'https://www.theguardian.com/education/2017/apr/12/new-free-schools-despite-secondary-staff-cuts',
'https://www.theguardian.com/politics/2017/mar/21/martin-mcguinness-northern-ireland-former-deputy-first-minister-dies',
'https://www.theguardian.com/politics/2017/apr/12/foreign-states-may-have-interfered-in-brexit-vote-report-says',
'https://www.theguardian.com/us-news/2017/apr/11/homeland-security-searches-electronics-border',
'https://www.theguardian.com/environment/2017/mar/22/princess-anne-backs-gm-crops-livestock-unlike-prince-charles',
'https://www.theguardian.com/music/2017/apr/11/palestine-music-expo-pmx-musicians-shaking-up-the-occupied-territories',
'https://www.theguardian.com/world/2017/apr/11/g7-rejects-uk-call-for-sanctions-against-russia-and-syria',
'https://www.theguardian.com/commentisfree/2017/apr/11/frontline-brexit-culture-wars-ask-comedian-al-murray',
'https://www.theguardian.com/news/2017/apr/11/painting-a-new-picture-of-the-little-ice-age-weatherwatch',
'https://www.theguardian.com/us-news/2017/apr/11/detroit-michigan-500-dollar-house-rust-belt-america',
'https://www.theguardian.com/global-development/2017/apr/11/worrying-trend-as-aid-money-stays-in-wealthiest-countries',
'https://www.theguardian.com/society/2017/apr/11/recorded-childhood-cancers-rise-worldwide-world-health-organization',
'https://www.theguardian.com/commentisfree/2016/dec/08/modern-day-hermits-share-experiences',
'https://www.theguardian.com/football/2017/mar/22/ronnie-moran-liverpool-dies',
'https://www.theguardian.com/lifeandstyle/2017/apr/11/vision-thing-how-babies-colour-in-the-world',
'https://www.theguardian.com/world/2017/apr/11/nurses-grant-dying-man-final-wish-cigarette-glass-wine',
'https://www.theguardian.com/business/2017/apr/11/labour-declare-war-late-payers-marks-spencer-jeremy-corbyn',
'https://www.theguardian.com/science/2017/apr/12/scientists-unravel-mystery-of-the-loose-shoelace',
'https://www.theguardian.com/us-news/2017/apr/11/united-airlines-shares-plummet-passenger-removal-controversy',
'https://www.theguardian.com/business/2017/apr/11/judges-reject-us-bankers-claim-to-be-randy-work-genius-in-divorce-case',
'https://www.theguardian.com/business/2017/apr/12/tesco-profits-1bn-growth-supermarket',
'https://www.theguardian.com/money/2017/apr/11/probate-fees-plan-is-daft-as-well-as-devious',
'https://www.theguardian.com/commentisfree/2017/apr/11/donald-trump-russia-rex-tillersons-visit-syria',
'https://www.theguardian.com/environment/2017/apr/12/uk-butterflies-worst-hit-in-2016-with-70-of-species-in-decline-study-finds',
'https://www.theguardian.com/business/2017/apr/11/developing-countries-demands-for-better-life-must-be-met-says-world-bank-head',
'https://www.theguardian.com/politics/2017/apr/12/devon-and-cornwall-pcc-expenses-inquiry-prosecutors',
'https://www.theguardian.com/politics/shortcuts/2017/apr/11/deep-england-brexit-britain',
'https://www.theguardian.com/society/2017/apr/11/uk-supreme-court-denies-tobacco-firms-permission-for-plain-packaging-appeal',
'https://www.theguardian.com/society/2017/mar/21/dawn-butler-stood-up-for-deaf-people-but-we-need-more-than-gestures',
'https://www.theguardian.com/technology/2017/apr/11/gordon-ramsay-father-in-law-admits-hacking-company-computers',
'https://www.theguardian.com/tv-and-radio/2017/mar/20/richard-hammond-injured-in-grand-tour-crash-in-mozambique',
'https://www.theguardian.com/us-news/2017/apr/11/sean-spicer-hitler-chemical-weapons-holocaust-assad',
'https://www.theguardian.com/science/2017/mar/22/face-medieval-cambridge-man-emerges-700-years-after-death',
'https://www.theguardian.com/society/2017/mar/22/new-alzheimers-test-can-predict-age-when-disease-will-appear',
'https://www.theguardian.com/world/2017/apr/11/national-archives-mi5-file-new-zealand-diplomat-paddy-costello-kgb-spy',
'https://www.theguardian.com/australia-news/2017/mar/22/british-war-veteran-granted-permanent-residency-in-australia-ending-visa-drama',
'https://www.theguardian.com/books/2017/apr/11/x-men-illustrator-alleged-anti-christian-messages-marvel-ardian-syaf',
'https://www.theguardian.com/business/2017/apr/12/burger-king-ok-google-commercial',
'https://www.theguardian.com/business/2017/apr/12/edf-customers-price-rise-electricity-gas-energy',
'https://www.theguardian.com/business/2017/apr/12/ship-oil-rig-pioneer-spirit-shell-north-sea-decommissioning',
'https://www.theguardian.com/business/2017/mar/22/asian-shares-drop-investors-fear-trump-wont-deliver-promises',
'https://www.theguardian.com/football/2017/apr/11/tony-adams-vows-to-give-granada-players-a-kick-up-the-arse',
'https://www.theguardian.com/football/2017/mar/22/football-transfer-rumours-jermain-defoe-back-to-west-ham',
'https://www.theguardian.com/global-development/2017/apr/11/india-acts-to-help-acid-attack-victims',
'https://www.theguardian.com/money/2017/apr/11/student-loan-interest-rate-rise-uk-inflation-brexit',
'https://www.theguardian.com/uk-news/2017/mar/17/coroner-warns-of-dangers-after-man-electrocuted-in-bath-while-charging-phone',
'https://www.theguardian.com/business/2017/mar/22/london-taxi-company-coventry-electric-cabs-jobs-brexit',
'https://www.theguardian.com/commentisfree/2016/dec/14/experiences-accessing-mental-health-services-uk',
'https://www.theguardian.com/commentisfree/2017/apr/11/france-left-europe-jean-luc-melenchon-presidential-election',
'https://www.theguardian.com/commentisfree/2017/apr/11/sean-spicers-hitler-holocaust-speak-volumes',
'https://www.theguardian.com/commentisfree/2017/apr/11/united-airlines-flying-while-asian-fear',
'https://www.theguardian.com/environment/2017/mar/22/country-diary-long-mynd-shropshire-light-spout-waterfall',
'https://www.theguardian.com/football/2017/apr/11/borussia-dortmund-shock-team-bus-explosions',
'https://www.theguardian.com/football/2017/mar/17/stewart-downing-middlesbrough-karanka-row-agnew',
'https://www.theguardian.com/football/2017/mar/22/which-football-manager-has-been-sacked-by-one-club-the-most-times',
'https://www.theguardian.com/music/2017/mar/16/ed-sheeran-headline-sunday-night-glastonbury-2017',
'https://www.theguardian.com/sport/2017/apr/11/pennsylvania-woman-jail-threats-youth-football-league-officials',
'https://www.theguardian.com/sport/blog/2017/mar/22/talking-horses-best-wednesday-bets-for-warwick-and-newcastle',
'https://www.theguardian.com/technology/2017/mar/17/youtube-and-google-search-for-answers',
'https://www.theguardian.com/tv-and-radio/2017/mar/19/neighbours-tv-soap-could-disappear-from-british-screens',
'https://www.theguardian.com/uk-news/2017/apr/11/boris-johnson-full-support-failure-secure-sanctions-syria-russia',
'https://www.theguardian.com/world/2017/mar/22/brussels-unveil-terror-victims-memorial-one-year-after-attacks',
'https://www.theguardian.com/world/2017/mar/22/north-korea-missile-test-failure',
'https://www.theguardian.com/business/2017/mar/16/bank-of-england-uk-interest-rates-monetary-policy-committee',
'https://www.theguardian.com/business/2017/mar/21/inflation-uk-wages-lag-behind-prices-mark-carney',
'https://www.theguardian.com/business/2017/mar/22/nervous-markets-take-fright-at-prospect-of-trump-failing-to-deliver',
'https://www.theguardian.com/commentisfree/2016/dec/21/i-lost-my-mum-seven-weeks-ago-our-readers-on-coping-with-grief-at-christmas',
'https://www.theguardian.com/commentisfree/2017/jan/06/brexit-vote-have-you-applied-for-a-second-passport',
'https://www.theguardian.com/fashion/2017/mar/22/fiorucci-why-the-disco-friendly-label-is-perfect-for-2017',
'https://www.theguardian.com/film/2017/mar/17/from-the-corner-of-the-oval-obama-white-house-movie',
'https://www.theguardian.com/film/2017/mar/22/film-franchises-terminator-sequel-arnold-schwarzenegger-die-hard-alien',
'https://www.theguardian.com/law/2017/apr/12/judge-sacked-over-online-posts-calling-his-critics-donkeys',
'https://www.theguardian.com/lifeandstyle/2017/mar/17/monopoly-board-game-new-tokens-vote',
'https://www.theguardian.com/music/2017/mar/16/stormzy-condemns-nme-for-using-him-as-poster-boy-for-depression',
'https://www.theguardian.com/music/2017/mar/21/los-angeles-police-mistake-wyclef-jean-suspect-assault-case',
'https://www.theguardian.com/politics/2017/mar/22/uk-based-airlines-told-to-move-to-europe-after-brexit-or-lose-major-routes',
'https://www.theguardian.com/society/2017/apr/11/national-social-care-service-centralised-nhs',
'https://www.theguardian.com/sport/2017/mar/17/wales-france-six-nations-world-rankings',
'https://www.theguardian.com/tv-and-radio/2017/mar/22/n-word-taboo-tv-carmichael-show-atlanta-insecure-language',
'https://www.theguardian.com/uk-news/2017/mar/16/man-dies-explosion-former-petrol-station-highgate-north-london-swains-lane',
'https://www.theguardian.com/us-news/2017/mar/17/national-weather-service-forecasting-temperatures-storms',
'https://www.theguardian.com/us-news/2017/mar/22/fbi-muslim-employees-discrimination-religion-middle-east-travel',
'https://www.theguardian.com/us-news/2017/mar/22/zapier-pay-employees-move-silicon-valley-startup',
'https://www.theguardian.com/world/2017/mar/17/fleeing-from-dantes-hell-on-mount-etna',
'https://www.theguardian.com/world/2017/mar/22/gay-clergyman-jeffrey-johns-turned-down-welsh-bishop-twice-before-claims',
'https://www.theguardian.com/world/2017/mar/23/apple-paid-no-tax-in-new-zealand-for-at-least-a-decade-reports-say',
'https://www.theguardian.com/books/2017/mar/22/comics-chavez-redline-transformers-v-gi-joe',
'https://www.theguardian.com/business/2017/apr/11/uk-inflation-rate-stays-three-year-high',
'https://www.theguardian.com/commentisfree/2017/apr/12/charlie-gard-legal-aid',
'https://www.theguardian.com/commentisfree/2017/mar/22/rights-gig-economy-self-employed-worker',
'https://www.theguardian.com/media/2017/mar/14/face-off-mps-and-social-media-giants-online-hate-speech-facebook-twitter',
'https://www.theguardian.com/music/2017/apr/11/michael-buble-wife-says-son-noah-is-recovering-from-cancer',
'https://www.theguardian.com/society/2017/apr/11/bullying-and-violence-grip-out-of-control-guys-marsh-jail-dorset',
'https://www.theguardian.com/stage/2017/mar/22/trisha-brown-obituary',
'https://www.theguardian.com/travel/2017/mar/22/10-best-clubs-in-amsterdam-chosen-by-dj-experts',
'https://www.theguardian.com/us-news/2017/apr/11/us-universal-healthcare-single-payer-rallies',
'https://www.theguardian.com/us-news/2017/mar/22/us-border-agent-sexually-assaults-teenage-sisters-texas',
'https://www.theguardian.com/world/2017/apr/11/hundreds-of-refugees-missing-after-dunkirk-camp-fire',
'https://www.theguardian.com/world/2017/mar/22/unicef-condemns-sale-cambodian-breast-milk-us-mothers-firm-ambrosia-labs',
'https://www.theguardian.com/world/commentisfree/2017/mar/17/week-in-patriarchy-bbc-dad-jessica-valenti',
'https://www.theguardian.com/business/2017/mar/15/us-federal-reserve-raises-interest-rates-to-1',
'https://www.theguardian.com/business/2017/mar/21/london-cycle-courier-was-punished-for-refusing-work-after-eight-hours-in-cold',
'https://www.theguardian.com/football/2017/mar/17/tottenham-harry-kane-return-injury',
'https://www.theguardian.com/politics/2017/mar/15/browse-of-commons-explore-uk-parliament-with-first-virtual-tour',
'https://www.theguardian.com/politics/2017/mar/21/martin-mcguinness-sinn-fein-members-carry-coffin-home-in-derry',
'https://www.theguardian.com/sport/2017/mar/18/ireland-england-six-nations-dublin',
'https://www.theguardian.com/us-news/2017/mar/20/ivanka-trump-west-wing-office-security-clearance',
'https://www.theguardian.com/film/2017/mar/21/look-on-the-sweet-side-of-love-actually',
'https://www.theguardian.com/media/2017/mar/20/jamie-oliver-new-show-deal-channel-4-tv',
'https://www.theguardian.com/politics/2017/mar/16/theresa-may-vows-absolute-faith-in-hammond-after-u-turn',
'https://www.theguardian.com/politics/2017/mar/21/nicola-sturgeon-accused-of-hypocrisy-as-independence-debate-begins',
'https://www.theguardian.com/sport/2017/mar/17/jailed-transgender-fell-runner-thought-uk-athletics-was-trying-to-kill-her',
'https://www.theguardian.com/uk-news/2017/mar/16/former-marine-cleared-alexander-blackman-freed-immediately-ex-soldier-jail',
'https://www.theguardian.com/world/2017/mar/16/india-brexit-and-the-legacy-of-empire-in-africa',
'https://www.theguardian.com/world/2017/mar/18/a-good-looking-bird-the-bush-stone-curlew-that-loves-its-own-reflection',
'https://www.theguardian.com/world/2017/mar/21/electronics-ban-middle-east-flights-safety-hazards-airline-profit',
'https://www.theguardian.com/business/2017/mar/14/us-federal-reserve-interest-rates-janet-yellen-donald-trump',
'https://www.theguardian.com/business/2017/mar/16/rupert-murdoch-sky-bid-uk-ofcom',
'https://www.theguardian.com/business/2017/mar/20/us-forbids-devices-larger-cell-phones-flights-13-countries',
'https://www.theguardian.com/business/2017/mar/22/uk-ceos-national-living-wage-equality-trust-pay-gap',
'https://www.theguardian.com/football/2017/mar/17/arsene-wenger-granit-xhaka-referees',
'https://www.theguardian.com/lifeandstyle/2017/mar/17/chorizo-chicken-lemon-yoghurt-cavolo-nero-recipe-anna-hansen',
'https://www.theguardian.com/politics/2017/mar/17/george-osborne-london-evening-standard-editor-appointment-evgeny-lebedev',
'https://www.theguardian.com/uk-news/2017/mar/16/scotland-cannot-afford-to-ignore-its-deficit',
'https://www.theguardian.com/uk-news/2017/mar/17/prince-william-visits-paris-for-the-first-time-since-mother-dianas-death',
'https://www.theguardian.com/us-news/2017/mar/16/oc-actor-mischa-barton-speaks-out-sex-tapes-scandal',
'https://www.theguardian.com/world/2017/mar/15/uk-government-child-slavery-products-sold-britain-innovation-fund',
'https://www.theguardian.com/commentisfree/2017/mar/17/the-guardian-view-on-brexit-and-publishing-a-hardcore-problem',
'https://www.theguardian.com/politics/2017/mar/21/osborne-becomes-the-remainers-great-hope',
'https://www.theguardian.com/society/2017/mar/16/scotlands-exam-body-to-ensure-invigilators-get-living-wage',
'https://www.theguardian.com/society/2017/mar/18/rural-deprivation-and-ill-health-in-england-in-danger-of-being-overlooked',
'https://www.theguardian.com/sport/2017/mar/16/michael-oleary-team-not-ruling-out-return-mullins-yard-cheltenham-festival-horse-racing',
'https://www.theguardian.com/sport/2017/mar/17/ireland-v-england-lions-six-nations-rugby-union',
'https://www.theguardian.com/sport/2017/mar/18/this-is-your-night-conlans-dream-debut-wipes-out-nightmares-of-the-past',
'https://www.theguardian.com/sport/2017/mar/21/bha-dope-tests-horses-racecourse',
'https://www.theguardian.com/sport/2017/mar/21/donald-trump-colin-kaepernick-free-agent-anthem-protest',
'https://www.theguardian.com/uk-news/2017/mar/16/protect-survive-nuclear-war-republished-pamphlet',
'https://www.theguardian.com/uk-news/2017/mar/21/sisters-al-najjar-sue-cumberland-hotel-london-brutal-hammer-attack',
'https://www.theguardian.com/uk-news/2017/mar/22/what-support-does-your-employer-give-to-fathers',
'https://www.theguardian.com/artanddesign/2017/mar/21/winged-bull-and-giant-dollop-of-cream-to-adorn-trafalgar-squares-fourth-plinth',
'https://www.theguardian.com/books/2017/mar/17/the-bone-readers-jacob-ross-caribbean-thriller-jhalak-prize',
'https://www.theguardian.com/business/2017/mar/11/democrats-question-trump-conflict-of-interest-deutsche-bank-investigation-money-laundering',
'https://www.theguardian.com/business/2017/mar/17/barclays-bob-diamond-panmure-gordon',
'https://www.theguardian.com/commentisfree/2017/mar/15/brexit-was-an-english-vote-for-independence-you-cant-begrudge-the-scots-the-same',
'https://www.theguardian.com/environment/2017/mar/21/the-snow-buntings-drift-takes-them-much-further-than-somerset',
'https://www.theguardian.com/fashion/2017/mar/21/art-colour-victoria-beckham-van-gogh-fashion',
'https://www.theguardian.com/lifeandstyle/2017/mar/17/i-am-26-and-find-it-hard-to-meet-people-on-the-same-wavelength-as-me',
'https://www.theguardian.com/lifeandstyle/shortcuts/2017/mar/21/open-a-window-and-have-a-cold-shower-could-being-chilly-improve-your-health',
'https://www.theguardian.com/society/2017/mar/22/four-supersized-prisons-to-be-built-england-and-wales-elizabeth-truss-plan',
'https://www.theguardian.com/sport/2017/mar/17/ben-youngs-england-ireland-grand-slam-six-nations',
'https://www.theguardian.com/technology/2017/mar/17/google-ads-bike-helmets-adverts',
'https://www.theguardian.com/us-news/2017/mar/20/fbi-director-comey-confirms-investigation-trump-russia',
'https://www.theguardian.com/world/2017/mar/17/time-for-a-declaration-of-war-on-happiness']
# go through each file
for file in articles[0:5]:
query = {
"query": {
"constant_score": {
"filter": {
"term": {
"url": file
}
}
}
},
"from": 0,
"size": 1
}
response = es.search(index="controcurator", doc_type="article", body=query)
article = response['hits']['hits'][0]
print article['_source']['url']
print article['_id']
#for article in response['hits']['hits']:
if 'comments' not in article['_source']:
print "-- NO COMMENTS --"
continue
print len(article['_source']['comments'])
if len(article['_source']['comments']) > 500:
print "-- TOO MANY COMMENTS --"
continue
if len(article['_source']['comments']) < 50:
print "-- NOT ENOUGH COMMENTS --"
continue
# vectorization
tfidf_matrix = tfidf_vectorizer.fit_transform([c['text'] for c in article['_source']['comments']])
# clustering
num_clusters = 5
km = KMeans(n_clusters=num_clusters)
km.fit(tfidf_matrix)
centers = km.cluster_centers_
clusters = km.labels_.tolist()
# distances
similarity_distance = 1 - cosine_similarity(tfidf_matrix)
mds = MDS(n_components=2, dissimilarity="precomputed", random_state=1)
pos = mds.fit_transform(similarity_distance)
# save results to comments
for i, cluster in enumerate(clusters):
article['_source']['comments'][i]['cluster'] = cluster
article['_source']['comments'][i]['cluster_x'] = pos[i][0]
article['_source']['comments'][i]['cluster_y'] = pos[i][1]
#for comment in article['_source']['comments']:
# print comment['cluster'],',',comment['cluster_x'],',',comment['cluster_y'],',',comment['text'].encode('UTF-8')
for c in article['_source']['comments']:
if 'type' not in c:
c['type'] = 'guardian'
data = [{'x':c['cluster_x'], 'y':c['cluster_y'], 'label':c['cluster'], 'sentiment': classifySentiment(c['sentiment']['sentiment']), 'type':c['type'], 'title':c['text'].replace('\r', '').replace('\n', '')} for c in article['_source']['comments']]
#create data frame that has the result of the MDS plus the cluster numbers and titles
clustergroups = pd.DataFrame().from_dict(data).groupby('label')
typegroups = pd.DataFrame().from_dict(data).groupby('type')
sentigroups = pd.DataFrame().from_dict(data).groupby('sentiment')
#fig, ax = plt.subplots(figsize=(20, 10)) # set size
# ax.margins(0.05) # Optional, just adds 5% padding to the autoscaling
#iterate through groups to layer the plot
#note that I use the cluster_name and cluster_color dicts with the 'name' lookup to return the appropriate color/label
# ms: marker size
for name, group in clustergroups:
axcluster[row, col].plot(group.x, group.y, marker='o', linestyle='', ms=5, color=cluster_colors[name],
mec='none')
axcluster[row, col].set_aspect('auto')
axcluster[row, col].tick_params(\
axis= 'x', # changes apply to the x-axis
which='both', # both major and minor ticks are affected
bottom='off', # ticks along the bottom edge are off
top='off', # ticks along the top edge are off
labelbottom='off')
axcluster[row, col].tick_params(\
axis= 'y', # changes apply to the y-axis
which='both', # both major and minor ticks are affected
left='off', # ticks along the bottom edge are off
top='off', # ticks along the top edge are off
labelleft='off')
axcluster[row, col].set_title("\n".join(wrap(article['_source']['document']['title'], 30)),fontsize=8)
for name, group in typegroups:
axtype[row, col].plot(group.x, group.y, marker='o', linestyle='', ms=5, color=type_colors[name],
mec='none')
axtype[row, col].set_aspect('auto')
axtype[row, col].tick_params(\
axis= 'x', # changes apply to the x-axis
which='both', # both major and minor ticks are affected
bottom='off', # ticks along the bottom edge are off
top='off', # ticks along the top edge are off
labelbottom='off')
axtype[row, col].tick_params(\
axis= 'y', # changes apply to the y-axis
which='both', # both major and minor ticks are affected
left='off', # ticks along the bottom edge are off
top='off', # ticks along the top edge are off
labelleft='off')
axtype[row, col].set_title("\n".join(wrap(article['_source']['document']['title'], 30)),fontsize=8)
#title.set_y(1.05)
for name, group in sentigroups:
axsenti[row, col].plot(group.x, group.y, marker='o', linestyle='', ms=5, color=senti_colors[name],
mec='none')
axsenti[row, col].set_aspect('auto')
axsenti[row, col].tick_params(\
axis= 'x', # changes apply to the x-axis
which='both', # both major and minor ticks are affected
bottom='off', # ticks along the bottom edge are off
top='off', # ticks along the top edge are off
labelbottom='off')
axsenti[row, col].tick_params(\
axis= 'y', # changes apply to the y-axis
which='both', # both major and minor ticks are affected
left='off', # ticks along the bottom edge are off
top='off', # ticks along the top edge are off
labelleft='off')
axsenti[row, col].set_title("\n".join(wrap(article['_source']['document']['title'], 30)),fontsize=8)
#ax.legend(numpoints=1) #show legend with only 1 point
#add label in x,y position with the label as the film title
# for i in range(len(df)):
# ax.text(df.ix[i]['x'], df.ix[i]['y'], df.ix[i]['title'], size=8)
col += 1
if col > 7:
col = 0
row += 1
if row > 5:
break
print article['_source']['document']['title'].encode('UTF-8')
for name, group in sentigroups:
avgx = group.x.mean()
avgy = group.y.mean()
group['dist'] = group.apply(lambda row: float(distance.pdist([(row['x'], row['y']), (avgx, avgy)])), axis=1)
print group
print "==="
# Fine-tune figure; hide x ticks for top plots and y ticks for right plots
#plt.setp([a.get_xticklabels() for a in axarr[:,-1]], visible=False)
#plt.setp([a.get_yticklabels() for a in axarr[0,:]], visible=False)
#plt.show() #show the plot
#fcluster.savefig('img/clusters.png', dpi=200)
#ftype.savefig('img/type.png', dpi=200)
#fsenti.savefig('img/sentiment.png', dpi=200)
|
mit
| 6,142,809,010,730,646,000
| 55.689956
| 246
| 0.749615
| false
| 2.592252
| false
| false
| false
|
perrette/iis
|
setup.py
|
1
|
2732
|
#!/usr/bin/env python
"""
"""
#from distutils.core import setup
import os, sys, re
from distutils.core import setup
import warnings
with open('README.md') as file:
long_description = file.read()
#
# Track version after pandas' setup.py
#
MAJOR = 0
MINOR = 0
MICRO = 0
ISRELEASED = False
VERSION = '%d.%d.%d' % (MAJOR, MINOR, MICRO)
QUALIFIER = ''
FULLVERSION = VERSION
write_version = True
if not ISRELEASED:
import subprocess
FULLVERSION += '.dev'
pipe = None
for cmd in ['git','git.cmd']:
try:
pipe = subprocess.Popen([cmd, "describe", "--always", "--match", "v[0-9]*"],
stdout=subprocess.PIPE)
(so,serr) = pipe.communicate()
if pipe.returncode == 0:
break
except:
pass
if pipe is None or pipe.returncode != 0:
# no git, or not in git dir
if os.path.exists('iis/version.py'):
warnings.warn("WARNING: Couldn't get git revision, using existing iis/version.py")
write_version = False
else:
warnings.warn("WARNING: Couldn't get git revision, using generic version string")
else:
# have git, in git dir, but may have used a shallow clone (travis does this)
rev = so.strip()
# makes distutils blow up on Python 2.7
if sys.version_info[0] >= 3:
rev = rev.decode('ascii')
if not rev.startswith('v') and re.match("[a-zA-Z0-9]{7,9}",rev):
# partial clone, manually construct version string
# this is the format before we started using git-describe
# to get an ordering on dev version strings.
rev ="v%s.dev-%s" % (VERSION, rev)
# Strip leading v from tags format "vx.y.z" to get th version string
FULLVERSION = rev.lstrip('v')
else:
FULLVERSION += QUALIFIER
#
# Actually important part
#
setup(name='iis',
version=FULLVERSION,
author='Mahe Perrette',
author_email='mahe.perrette@pik-potsdam.de',
description='Iterative, bayesian methods to tune an ensemble of models',
keywords=('fortran','template','namelist'),
# basic stuff here
packages = ['iis'],
long_description=long_description,
url='https://github.com/perrette/iis',
license = "MIT",
)
def write_version_py(filename=None):
cnt = """\
version = '%s'
short_version = '%s'
"""
if not filename:
#filename = os.path.join(
# os.path.dirname(__file__), 'dimarray', 'version.py')
filename = os.path.join('iis', 'version.py')
with open(filename, 'w') as a:
a.write(cnt % (FULLVERSION, VERSION))
# Write version.py to dimarray
if write_version:
write_version_py()
|
mit
| 8,472,331,363,098,328,000
| 26.877551
| 94
| 0.598097
| false
| 3.604222
| false
| false
| false
|
ericmjl/influenza-reassortment-detector
|
full_affmat.py
|
1
|
1077
|
import pandas as pd
import sys
class FullAffmatCompiler(object):
"""docstring for FullAffmatCompiler"""
def __init__(self, handle):
super(FullAffmatCompiler, self).__init__()
self.handle = handle
self.summed_affmat = pd.DataFrame()
self.current_df = None
self.affmats = dict()
def run(self):
for segment in range(1,9):
print('Currently processing segment {0}'.format(segment))
self.affmats[segment] = self.read_affmat(segment)
self.summed_affmat = self.affmats[1] + self.affmats[2] + self.affmats[3] + self.affmats[4] + self.affmats[5] + self.affmats[6] + self.affmats[7] + self.affmats[8]
self.summed_affmat.to_hdf(path_or_buf='{0} Summed Affmats.h5'.format(self.handle), key='full', mode='w')
def read_affmat(self, segment):
key = 'segment{0}'.format(segment)
return pd.read_hdf('{0} Thresholded Segment Affmats.h5'.format(self.handle), key=key)
if __name__ == '__main__':
handle = sys.argv[1]
fac = FullAffmatCompiler(handle)
fac.run()
|
mit
| -9,035,768,609,521,674,000
| 34.933333
| 170
| 0.627669
| false
| 3.130814
| false
| false
| false
|
widdowquinn/THAPBI
|
ITS_region_genomic_coverage/get_genes_from_GFF.py
|
1
|
1976
|
#!/usr/bin/env python
#author: Peter Thorpe September 2016. The James Hutton Insitute,Dundee,UK.
#Title:
#script to get the gene columns only from GFF"
#imports
import os
import sys
from sys import stdin,argv
import sys
import datetime
from optparse import OptionParser
###########################################################################
def write_out_ITS_GFF(gff, out):
"""function parse and print GFF lines that
correspond to gene only """
gff_file = open(gff, "r")
out_file = open(out, "w")
for line in gff_file:
if line.startswith("#"):
continue
assert len(line.split("\t")) ==9 ,"GFF fields wrong length should be 9"
scaffold,aug,cds_type,start,stop,e,f,g,gene_info = line.split("\t")
if cds_type =="gene":
out_file.write(line)
gff_file.close()
out_file.close()
###########################################################################
if "-v" in sys.argv or "--version" in sys.argv:
print ("v0.0.1")
sys.exit(0)
usage = """Use as follows:
Title:
script to get the gene columns only from GFF
$ get_genes_from_GFF.py --gff gff.out -o out.gff
"""
parser = OptionParser(usage=usage)
parser.add_option("-g", "--gff", dest="gff", default=None,
help="predicted gene in gff3 format",
metavar="FILE")
parser.add_option("-o", "--out_file", dest="out_file",
default="ITS_GFF.out",
help="output line corresponding to genes only.")
(options, args) = parser.parse_args()
gff = options.gff
out_file = options.out_file
#run the program
if not os.path.isfile(gff):
print("sorry, couldn't open the file: " + ex.strerror + "\n")
print ("current working directory is :", os.getcwd() + "\n")
print ("files are :", [f for f in os.listdir('.')])
sys_exit("\n\nInput blast file not found: %s" % gff)
# call the top function
write_out_ITS_GFF(gff, out_file)
|
mit
| 6,784,813,885,300,704,000
| 23.395062
| 79
| 0.566296
| false
| 3.372014
| false
| false
| false
|
GermanRuizMarcos/Classical-Composer-Classification
|
code_8/classification_1.py
|
1
|
8408
|
'''
AUDIO CLASSICAL COMPOSER IDENTIFICATION BASED ON:
A SPECTRAL BANDWISE FEATURE-BASED SYSTEM
'''
import essentia
from essentia.standard import *
import glob
import numpy as np
import arff
from essentia.standard import *
from scipy import stats
# Dataset creation with specific attributes (spectral features) and a specific class (composer's name)
'''
Audio files trasformed into the frequency domain through a 1024-sample STFT with 50% overlap.
The spectrum is divided into 50 mel-spaced bands.
'''
dirList = glob.glob("/home/usuario/Escritorio/SMC/2 term/Music Information Retrieval/Classical Composer Identification/periods/baroque/*.wav")
fft = FFT()
melbands = MelBands(numberBands = 50)
flatness = FlatnessDB()
rolloff = RollOff()
centroid = SpectralCentroidTime()
flux = Flux()
energy = EnergyBand()
zero = ZeroCrossingRate()
spectrum = Spectrum()
w = Windowing(type = 'hann')
mfcc = MFCC()
f = open('definitive_train.txt', 'wb')
f.write('@RELATION "composer dataset"\n')
f.write('\n')
f.write('@ATTRIBUTE filename STRING\n')
f.write('@ATTRIBUTE MFCC-0 REAL\n')
f.write('@ATTRIBUTE MFCC-1 REAL\n')
f.write('@ATTRIBUTE MFCC-2 REAL\n')
f.write('@ATTRIBUTE MFCC-3 REAL\n')
f.write('@ATTRIBUTE MFCC-4 REAL\n')
f.write('@ATTRIBUTE MFCC-5 REAL\n')
f.write('@ATTRIBUTE MFCC-6 REAL\n')
f.write('@ATTRIBUTE MFCC-7 REAL\n')
f.write('@ATTRIBUTE MFCC-8 REAL\n')
f.write('@ATTRIBUTE MFCC-9 REAL\n')
f.write('@ATTRIBUTE MFCC-10 REAL\n')
f.write('@ATTRIBUTE MFCC-11 REAL\n')
f.write('@ATTRIBUTE MFCC-12 REAL\n')
f.write('@ATTRIBUTE flatness-mean REAL\n')
f.write('@ATTRIBUTE flatness-variance REAL\n')
f.write('@ATTRIBUTE rolloff-mean REAL\n')
f.write('@ATTRIBUTE rolloff-variance REAL\n')
f.write('@ATTRIBUTE centroid-mean REAL\n')
f.write('@ATTRIBUTE centroid-variance REAL\n')
f.write('@ATTRIBUTE flux-mean REAL\n')
f.write('@ATTRIBUTE flux-variance REAL\n')
f.write('@ATTRIBUTE energy-mean REAL\n')
f.write('@ATTRIBUTE energy-variance REAL\n')
f.write('@ATTRIBUTE ZCR-mean REAL\n')
f.write('@ATTRIBUTE ZCR-variance REAL\n')
f.write('@ATTRIBUTE flatness-std REAL\n')
f.write('@ATTRIBUTE flatness-hmean REAL\n')
f.write('@ATTRIBUTE period {baroque, classical, romantic}\n')
f.write('\n')
f.write('@DATA\n')
for audio_file in dirList:
flat = []
rol = []
cen = []
flu = []
ene = []
zer = []
mfccs = []
stft = []
# Loading audio
audio = MonoLoader(filename = audio_file)()
for frame in FrameGenerator(audio, frameSize = 1024, hopSize = 512, startFromZero=True):
bands = melbands(spectrum(frame))
stft.append(fft(frame))
flat.append(flatness(bands))
rol.append(rolloff(bands))
cen.append(centroid(bands))
flu.append(flux(bands))
ene.append(energy(bands))
zer.append(zero(frame))
mfcc_bands, mfcc_coeffs = mfcc(spectrum(w(frame)))
mfccs.append(mfcc_coeffs)
composer = 'bach'
period = 'baroque'
f.write('%s' %audio_file.split('/')[-1].split('(')[0])
f.write(',')
f.write('%r' %np.mean(mfccs[0]))
f.write(',')
f.write('%r' %np.mean(mfccs[1]))
f.write(',')
f.write('%r' %np.mean(mfccs[2]))
f.write(',')
f.write('%r' %np.mean(mfccs[3]))
f.write(',')
f.write('%r' %np.mean(mfccs[4]))
f.write(',')
f.write('%r' %np.mean(mfccs[5]))
f.write(',')
f.write('%r' %np.mean(mfccs[6]))
f.write(',')
f.write('%r' %np.mean(mfccs[7]))
f.write(',')
f.write('%r' %np.mean(mfccs[8]))
f.write(',')
f.write('%r' %np.mean(mfccs[9]))
f.write(',')
f.write('%r' %np.mean(mfccs[10]))
f.write(',')
f.write('%r' %np.mean(mfccs[11]))
f.write(',')
f.write('%r' %np.mean(mfccs[12]))
f.write(',')
f.write('%r' %np.mean(flat))
f.write(',')
f.write('%r' %np.var(flat))
f.write(',')
f.write('%r' %np.mean(rol))
f.write(',')
f.write('%r' %np.var(rol))
f.write(',')
f.write('%r' %np.mean(cen))
f.write(',')
f.write('%r' %np.var(cen))
f.write(',')
f.write('%r' %np.mean(flu))
f.write(',')
f.write('%r' %np.var(flu))
f.write(',')
f.write('%r' %np.mean(ene))
f.write(',')
f.write('%r' %np.var(ene))
f.write(',')
f.write('%r' %np.mean(zer))
f.write(',')
f.write('%r' %np.var(zer))
f.write(',')
f.write('%r' %np.std(flat))
f.write(',')
f.write('%r' %stats.hmean(flat))
f.write(',')
f.write('%s' %period)
f.write('\n')
# 2
dirList = glob.glob("/home/usuario/Escritorio/SMC/2 term/Music Information Retrieval/Classical Composer Identification/periods/classical/*.wav")
for audio_file in dirList:
flat = []
rol = []
cen = []
flu = []
ene = []
zer = []
mfccs = []
# Loading audio
audio = MonoLoader(filename = audio_file)()
for frame in FrameGenerator(audio, frameSize = 1024, hopSize = 512, startFromZero=True):
bands = melbands(spectrum(frame))
flat.append(flatness(bands))
rol.append(rolloff(bands))
cen.append(centroid(bands))
flu.append(flux(bands))
ene.append(energy(bands))
zer.append(zero(frame))
mfcc_bands, mfcc_coeffs = mfcc(spectrum(w(frame)))
mfccs.append(mfcc_coeffs)
composer = 'beethoven'
period = 'classical'
f.write('%s' %audio_file.split('/')[-1].split('(')[0])
f.write(',')
f.write('%r' %np.mean(mfccs[0]))
f.write(',')
f.write('%r' %np.mean(mfccs[1]))
f.write(',')
f.write('%r' %np.mean(mfccs[2]))
f.write(',')
f.write('%r' %np.mean(mfccs[3]))
f.write(',')
f.write('%r' %np.mean(mfccs[4]))
f.write(',')
f.write('%r' %np.mean(mfccs[5]))
f.write(',')
f.write('%r' %np.mean(mfccs[6]))
f.write(',')
f.write('%r' %np.mean(mfccs[7]))
f.write(',')
f.write('%r' %np.mean(mfccs[8]))
f.write(',')
f.write('%r' %np.mean(mfccs[9]))
f.write(',')
f.write('%r' %np.mean(mfccs[10]))
f.write(',')
f.write('%r' %np.mean(mfccs[11]))
f.write(',')
f.write('%r' %np.mean(mfccs[12]))
f.write(',')
f.write('%r' %np.mean(flat))
f.write(',')
f.write('%r' %np.var(flat))
f.write(',')
f.write('%r' %np.mean(rol))
f.write(',')
f.write('%r' %np.var(rol))
f.write(',')
f.write('%r' %np.mean(cen))
f.write(',')
f.write('%r' %np.var(cen))
f.write(',')
f.write('%r' %np.mean(flu))
f.write(',')
f.write('%r' %np.var(flu))
f.write(',')
f.write('%r' %np.mean(ene))
f.write(',')
f.write('%r' %np.var(ene))
f.write(',')
f.write('%r' %np.mean(zer))
f.write(',')
f.write('%r' %np.var(zer))
f.write(',')
f.write('%r' %np.std(flat))
f.write(',')
f.write('%r' %stats.hmean(flat))
f.write(',')
f.write('%s' %period)
f.write('\n')
# 3
dirList = glob.glob("/home/usuario/Escritorio/SMC/2 term/Music Information Retrieval/Classical Composer Identification/periods/romantic/*.wav")
for audio_file in dirList:
flat = []
rol = []
cen = []
flu = []
ene = []
zer = []
mfccs = []
# Loading audio
audio = MonoLoader(filename = audio_file)()
for frame in FrameGenerator(audio, frameSize = 1024, hopSize = 512, startFromZero=True):
bands = melbands(spectrum(frame))
flat.append(flatness(bands))
rol.append(rolloff(bands))
cen.append(centroid(bands))
flu.append(flux(bands))
ene.append(energy(bands))
zer.append(zero(frame))
mfcc_bands, mfcc_coeffs = mfcc(spectrum(w(frame)))
mfccs.append(mfcc_coeffs)
composer = 'chopin'
period = 'romantic'
f.write('%s' %audio_file.split('/')[-1].split('(')[0])
f.write(',')
f.write('%r' %np.mean(mfccs[0]))
f.write(',')
f.write('%r' %np.mean(mfccs[1]))
f.write(',')
f.write('%r' %np.mean(mfccs[2]))
f.write(',')
f.write('%r' %np.mean(mfccs[3]))
f.write(',')
f.write('%r' %np.mean(mfccs[4]))
f.write(',')
f.write('%r' %np.mean(mfccs[5]))
f.write(',')
f.write('%r' %np.mean(mfccs[6]))
f.write(',')
f.write('%r' %np.mean(mfccs[7]))
f.write(',')
f.write('%r' %np.mean(mfccs[8]))
f.write(',')
f.write('%r' %np.mean(mfccs[9]))
f.write(',')
f.write('%r' %np.mean(mfccs[10]))
f.write(',')
f.write('%r' %np.mean(mfccs[11]))
f.write(',')
f.write('%r' %np.mean(mfccs[12]))
f.write(',')
f.write('%r' %np.mean(flat))
f.write(',')
f.write('%r' %np.var(flat))
f.write(',')
f.write('%r' %np.mean(rol))
f.write(',')
f.write('%r' %np.var(rol))
f.write(',')
f.write('%r' %np.mean(cen))
f.write(',')
f.write('%r' %np.var(cen))
f.write(',')
f.write('%r' %np.mean(flu))
f.write(',')
f.write('%r' %np.var(flu))
f.write(',')
f.write('%r' %np.mean(ene))
f.write(',')
f.write('%r' %np.var(ene))
f.write(',')
f.write('%r' %np.mean(zer))
f.write(',')
f.write('%r' %np.var(zer))
f.write(',')
f.write('%r' %np.std(flat))
f.write(',')
f.write('%r' %stats.hmean(flat))
f.write(',')
f.write('%s' %period)
f.write('\n')
f.write('%\n')
f.write('%\n')
f.write('%\n')
f.close()
|
gpl-3.0
| 4,417,720,493,671,316,500
| 23.16092
| 144
| 0.614534
| false
| 2.340757
| false
| false
| false
|
juharris/tensorflow
|
tensorflow/contrib/layers/python/layers/target_column.py
|
1
|
19116
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""TargetColumn abstract a single head in the model.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six
from tensorflow.contrib import losses
from tensorflow.contrib import metrics as metrics_lib
from tensorflow.contrib.framework import deprecated
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
@deprecated(
"2016-11-12",
"This file will be removed after the deprecation date."
"Please switch to "
"third_party/tensorflow/contrib/learn/python/learn/estimators/head.py")
def regression_target(label_name=None,
weight_column_name=None,
target_dimension=1):
"""Creates a _TargetColumn for linear regression.
Args:
label_name: String, name of the key in label dict. Can be null if label
is a tensor (single headed models).
weight_column_name: A string defining feature column name representing
weights. It is used to down weight or boost examples during training. It
will be multiplied by the loss of the example.
target_dimension: dimension of the target for multilabels.
Returns:
An instance of _TargetColumn
"""
return _RegressionTargetColumn(loss_fn=_mean_squared_loss,
label_name=label_name,
weight_column_name=weight_column_name,
target_dimension=target_dimension)
# TODO(zakaria): Add logistic_regression_target
@deprecated(
"2016-11-12",
"This file will be removed after the deprecation date."
"Please switch to "
"third_party/tensorflow/contrib/learn/python/learn/estimators/head.py")
def multi_class_target(n_classes, label_name=None, weight_column_name=None):
"""Creates a _TargetColumn for multi class single label classification.
The target column uses softmax cross entropy loss.
Args:
n_classes: Integer, number of classes, must be >= 2
label_name: String, name of the key in label dict. Can be null if label
is a tensor (single headed models).
weight_column_name: A string defining feature column name representing
weights. It is used to down weight or boost examples during training. It
will be multiplied by the loss of the example.
Returns:
An instance of _MultiClassTargetColumn.
Raises:
ValueError: if n_classes is < 2
"""
if n_classes < 2:
raise ValueError("n_classes must be > 1 for classification.")
if n_classes == 2:
loss_fn = _log_loss_with_two_classes
else:
loss_fn = _softmax_cross_entropy_loss
return _MultiClassTargetColumn(loss_fn=loss_fn,
n_classes=n_classes,
label_name=label_name,
weight_column_name=weight_column_name)
@deprecated(
"2016-11-12",
"This file will be removed after the deprecation date."
"Please switch to "
"third_party/tensorflow/contrib/learn/python/learn/estimators/head.py")
def binary_svm_target(label_name=None, weight_column_name=None):
"""Creates a _TargetColumn for binary classification with SVMs.
The target column uses binary hinge loss.
Args:
label_name: String, name of the key in label dict. Can be null if label
is a tensor (single headed models).
weight_column_name: A string defining feature column name representing
weights. It is used to down weight or boost examples during training. It
will be multiplied by the loss of the example.
Returns:
An instance of _TargetColumn.
"""
return _BinarySvmTargetColumn(label_name=label_name,
weight_column_name=weight_column_name)
@deprecated(
"2016-11-12",
"This file will be removed after the deprecation date."
"Please switch to "
"third_party/tensorflow/contrib/learn/python/learn/estimators/head.py")
class ProblemType(object):
UNSPECIFIED = 0
CLASSIFICATION = 1
LINEAR_REGRESSION = 2
LOGISTIC_REGRESSION = 3
class _TargetColumn(object):
"""_TargetColumn is the abstraction for a single head in a model.
Args:
loss_fn: a function that returns the loss tensor.
num_label_columns: Integer, number of label columns.
label_name: String, name of the key in label dict. Can be null if label
is a tensor (single headed models).
weight_column_name: A string defining feature column name representing
weights. It is used to down weight or boost examples during training. It
will be multiplied by the loss of the example.
Raises:
ValueError: if loss_fn or n_classes are missing.
"""
def __init__(self, loss_fn, num_label_columns, label_name,
weight_column_name, problem_type):
if not loss_fn:
raise ValueError("loss_fn must be provided")
if num_label_columns is None: # n_classes can be 0
raise ValueError("num_label_columns must be provided")
self._loss_fn = loss_fn
self._num_label_columns = num_label_columns
self._label_name = label_name
self._weight_column_name = weight_column_name
self._problem_type = problem_type
def logits_to_predictions(self, logits, proba=False):
# Abstrat, Subclasses must implement.
raise NotImplementedError()
def get_eval_ops(self, features, logits, targets, metrics=None):
"""Returns eval op."""
raise NotImplementedError
@property
def label_name(self):
return self._label_name
@property
def weight_column_name(self):
return self._weight_column_name
@property
def num_label_columns(self):
return self._num_label_columns
def get_weight_tensor(self, features):
if not self._weight_column_name:
return None
else:
return array_ops.reshape(
math_ops.to_float(features[self._weight_column_name]),
shape=(-1,))
@property
def problem_type(self):
return self._problem_type
def _weighted_loss(self, loss, weight_tensor):
"""Returns cumulative weighted loss."""
unweighted_loss = array_ops.reshape(loss, shape=(-1,))
weighted_loss = math_ops.mul(unweighted_loss,
array_ops.reshape(
weight_tensor, shape=(-1,)))
return weighted_loss
def training_loss(self, logits, target, features, name="training_loss"):
"""Returns training loss tensor for this head.
Training loss is different from the loss reported on the tensorboard as we
should respect the example weights when computing the gradient.
L = sum_{i} w_{i} * l_{i} / B
where B is the number of examples in the batch, l_{i}, w_{i} are individual
losses, and example weight.
Args:
logits: logits, a float tensor.
target: either a tensor for labels or in multihead case, a dict of string
to target tensor.
features: features dict.
name: Op name.
Returns:
Loss tensor.
"""
target = target[self.name] if isinstance(target, dict) else target
loss_unweighted = self._loss_fn(logits, target)
weight_tensor = self.get_weight_tensor(features)
if weight_tensor is None:
return math_ops.reduce_mean(loss_unweighted, name=name)
loss_weighted = self._weighted_loss(loss_unweighted, weight_tensor)
return math_ops.reduce_mean(loss_weighted, name=name)
def loss(self, logits, target, features):
"""Returns loss tensor for this head.
The loss returned is the weighted average.
L = sum_{i} w_{i} * l_{i} / sum_{i} w_{i}
Args:
logits: logits, a float tensor.
target: either a tensor for labels or in multihead case, a dict of string
to target tensor.
features: features dict.
Returns:
Loss tensor.
"""
target = target[self.name] if isinstance(target, dict) else target
loss_unweighted = self._loss_fn(logits, target)
weight_tensor = self.get_weight_tensor(features)
if weight_tensor is None:
return math_ops.reduce_mean(loss_unweighted, name="loss")
loss_weighted = self._weighted_loss(loss_unweighted, weight_tensor)
return math_ops.div(
math_ops.reduce_sum(loss_weighted),
math_ops.to_float(math_ops.reduce_sum(weight_tensor)),
name="loss")
class _RegressionTargetColumn(_TargetColumn):
"""_TargetColumn for regression."""
def __init__(self, loss_fn, label_name, weight_column_name, target_dimension):
super(_RegressionTargetColumn, self).__init__(
loss_fn=loss_fn,
num_label_columns=target_dimension,
label_name=label_name,
weight_column_name=weight_column_name,
problem_type=ProblemType.LINEAR_REGRESSION)
def logits_to_predictions(self, logits, proba=False):
if self.num_label_columns == 1:
return array_ops.squeeze(logits, squeeze_dims=[1])
return logits
def get_eval_ops(self, features, logits, targets, metrics=None):
loss = self.loss(logits, targets, features)
result = {"loss": metrics_lib.streaming_mean(loss)}
if metrics:
predictions = self.logits_to_predictions(logits, proba=False)
result.update(_run_metrics(predictions, targets, metrics,
self.get_weight_tensor(features)))
return result
class _MultiClassTargetColumn(_TargetColumn):
"""_TargetColumn for classification."""
# TODO(zakaria): support multilabel.
def __init__(self, loss_fn, n_classes, label_name, weight_column_name):
if n_classes < 2:
raise ValueError("n_classes must be >= 2")
super(_MultiClassTargetColumn, self).__init__(
loss_fn=loss_fn,
num_label_columns=1 if n_classes == 2 else n_classes,
label_name=label_name,
weight_column_name=weight_column_name,
problem_type=ProblemType.CLASSIFICATION)
def logits_to_predictions(self, logits, proba=False):
if self.num_label_columns == 1:
logits = array_ops.concat(1, [array_ops.zeros_like(logits), logits])
if proba:
return nn.softmax(logits)
else:
return math_ops.argmax(logits, 1)
def _default_eval_metrics(self):
if self._num_label_columns == 1:
return get_default_binary_metrics_for_eval(thresholds=[.5])
return {}
def get_eval_ops(self, features, logits, targets, metrics=None):
loss = self.loss(logits, targets, features)
result = {"loss": metrics_lib.streaming_mean(loss)}
# Adds default metrics.
if metrics is None:
# TODO(b/29366811): This currently results in both an "accuracy" and an
# "accuracy/threshold_0.500000_mean" metric for binary classification.
metrics = {("accuracy", "classes"): metrics_lib.streaming_accuracy}
predictions = math_ops.sigmoid(logits)
targets_float = math_ops.to_float(targets)
default_metrics = self._default_eval_metrics()
for metric_name, metric_op in default_metrics.items():
result[metric_name] = metric_op(predictions, targets_float)
class_metrics = {}
proba_metrics = {}
for name, metric_op in six.iteritems(metrics):
if isinstance(name, tuple):
if len(name) != 2:
raise ValueError("Ignoring metric {}. It returned a tuple with "
"len {}, expected 2.".format(name, len(name)))
else:
if name[1] not in ["classes", "probabilities"]:
raise ValueError("Ignoring metric {}. The 2nd element of its "
"name should be either 'classes' or "
"'probabilities'.".format(name))
elif name[1] == "classes":
class_metrics[name[0]] = metric_op
else:
proba_metrics[name[0]] = metric_op
elif isinstance(name, str):
class_metrics[name] = metric_op
else:
raise ValueError("Ignoring metric {}. Its name is not in the correct "
"form.".format(name))
if class_metrics:
class_predictions = self.logits_to_predictions(logits, proba=False)
result.update(_run_metrics(class_predictions, targets, class_metrics,
self.get_weight_tensor(features)))
if proba_metrics:
predictions = self.logits_to_predictions(logits, proba=True)
result.update(_run_metrics(predictions, targets, proba_metrics,
self.get_weight_tensor(features)))
return result
class _BinarySvmTargetColumn(_MultiClassTargetColumn):
"""_TargetColumn for binary classification using SVMs."""
def __init__(self, label_name, weight_column_name):
def loss_fn(logits, target):
check_shape_op = control_flow_ops.Assert(
math_ops.less_equal(array_ops.rank(target), 2),
["target's shape should be either [batch_size, 1] or [batch_size]"])
with ops.control_dependencies([check_shape_op]):
target = array_ops.reshape(
target, shape=[array_ops.shape(target)[0], 1])
return losses.hinge_loss(logits, target)
super(_BinarySvmTargetColumn, self).__init__(
loss_fn=loss_fn,
n_classes=2,
label_name=label_name,
weight_column_name=weight_column_name)
def logits_to_predictions(self, logits, proba=False):
if proba:
raise ValueError(
"logits to probabilities is not supported for _BinarySvmTargetColumn")
logits = array_ops.concat(1, [array_ops.zeros_like(logits), logits])
return math_ops.argmax(logits, 1)
# TODO(zakaria): use contrib losses.
def _mean_squared_loss(logits, target):
# To prevent broadcasting inside "-".
if len(target.get_shape()) == 1:
target = array_ops.expand_dims(target, dim=[1])
logits.get_shape().assert_is_compatible_with(target.get_shape())
return math_ops.square(logits - math_ops.to_float(target))
def _log_loss_with_two_classes(logits, target):
# sigmoid_cross_entropy_with_logits requires [batch_size, 1] target.
if len(target.get_shape()) == 1:
target = array_ops.expand_dims(target, dim=[1])
loss_vec = nn.sigmoid_cross_entropy_with_logits(logits,
math_ops.to_float(target))
return loss_vec
def _softmax_cross_entropy_loss(logits, target):
# Check that we got int32/int64 for classification.
if (not target.dtype.is_compatible_with(dtypes.int64) and
not target.dtype.is_compatible_with(dtypes.int32)):
raise ValueError("Target's dtype should be int32, int64 or compatible. "
"Instead got %s." % target.dtype)
# sparse_softmax_cross_entropy_with_logits requires [batch_size] target.
if len(target.get_shape()) == 2:
target = array_ops.squeeze(target, squeeze_dims=[1])
loss_vec = nn.sparse_softmax_cross_entropy_with_logits(logits, target)
return loss_vec
def _run_metrics(predictions, targets, metrics, weights):
result = {}
targets = math_ops.cast(targets, predictions.dtype)
for name, metric in six.iteritems(metrics or {}):
if weights is not None:
result[name] = metric(predictions, targets, weights=weights)
else:
result[name] = metric(predictions, targets)
return result
@deprecated(
"2016-11-12",
"This file will be removed after the deprecation date."
"Please switch to "
"third_party/tensorflow/contrib/learn/python/learn/estimators/head.py")
def get_default_binary_metrics_for_eval(thresholds):
"""Returns a dictionary of basic metrics for logistic regression.
Args:
thresholds: List of floating point thresholds to use for accuracy,
precision, and recall metrics. If None, defaults to [0.5].
Returns:
Dictionary mapping metrics string names to metrics functions.
"""
metrics = {}
metrics[_MetricKeys.PREDICTION_MEAN] = _predictions_streaming_mean
metrics[_MetricKeys.TARGET_MEAN] = _targets_streaming_mean
# Also include the streaming mean of the label as an accuracy baseline, as
# a reminder to users.
metrics[_MetricKeys.ACCURACY_BASELINE] = _targets_streaming_mean
metrics[_MetricKeys.AUC] = _streaming_auc
for threshold in thresholds:
metrics[_MetricKeys.ACCURACY_MEAN % threshold] = _accuracy_at_threshold(
threshold)
# Precision for positive examples.
metrics[_MetricKeys.PRECISION_MEAN % threshold] = _streaming_at_threshold(
metrics_lib.streaming_precision_at_thresholds, threshold)
# Recall for positive examples.
metrics[_MetricKeys.RECALL_MEAN % threshold] = _streaming_at_threshold(
metrics_lib.streaming_recall_at_thresholds, threshold)
return metrics
def _float_weights_or_none(weights):
if weights is None:
return None
return math_ops.to_float(weights)
def _targets_streaming_mean(unused_predictions, targets, weights=None):
return metrics_lib.streaming_mean(targets, weights=weights)
def _predictions_streaming_mean(predictions, unused_targets, weights=None):
return metrics_lib.streaming_mean(predictions, weights=weights)
def _streaming_auc(predictions, targets, weights=None):
return metrics_lib.streaming_auc(predictions, targets,
weights=_float_weights_or_none(weights))
def _accuracy_at_threshold(threshold):
def _accuracy_metric(predictions, targets, weights=None):
threshold_predictions = math_ops.to_float(
math_ops.greater_equal(predictions, threshold))
return metrics_lib.streaming_accuracy(predictions=threshold_predictions,
labels=targets,
weights=weights)
return _accuracy_metric
def _streaming_at_threshold(streaming_metrics_fn, threshold):
def _streaming_metrics(predictions, targets, weights=None):
precision_tensor, update_op = streaming_metrics_fn(
predictions, labels=targets, thresholds=[threshold],
weights=_float_weights_or_none(weights))
return array_ops.squeeze(precision_tensor), update_op
return _streaming_metrics
class _MetricKeys(object):
AUC = "auc"
PREDICTION_MEAN = "labels/prediction_mean"
TARGET_MEAN = "labels/actual_target_mean"
ACCURACY_BASELINE = "accuracy/baseline_target_mean"
ACCURACY_MEAN = "accuracy/threshold_%f_mean"
PRECISION_MEAN = "precision/positive_threshold_%f_mean"
RECALL_MEAN = "recall/positive_threshold_%f_mean"
|
apache-2.0
| 887,826,027,601,003,500
| 35.411429
| 80
| 0.672003
| false
| 3.928483
| false
| false
| false
|
dbhirko/ansible-modules-extras
|
cloud/vmware/vsphere_copy.py
|
1
|
6194
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright 2015 Dag Wieers <dag@wieers.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: vsphere_copy
short_description: Copy a file to a vCenter datastore
description:
- Upload files to a vCenter datastore
version_added: 2.0
author: Dag Wieers (@dagwieers) <dag@wieers.com>
options:
host:
description:
- The vCenter server on which the datastore is available.
required: true
login:
description:
- The login name to authenticate on the vCenter server.
required: true
password:
description:
- The password to authenticate on the vCenter server.
required: true
src:
description:
- The file to push to vCenter
required: true
datacenter:
description:
- The datacenter on the vCenter server that holds the datastore.
required: true
datastore:
description:
- The datastore on the vCenter server to push files to.
required: true
path:
description:
- The file to push to the datastore on the vCenter server.
required: true
validate_certs:
description:
- If C(no), SSL certificates will not be validated. This should only be
set to C(no) when no other option exists.
required: false
default: 'yes'
choices: ['yes', 'no']
notes:
- "This module ought to be run from a system that can access vCenter directly and has the file to transfer.
It can be the normal remote target or you can change it either by using C(transport: local) or using C(delegate_to)."
- Tested on vSphere 5.5
'''
EXAMPLES = '''
- vsphere_copy: host=vhost login=vuser password=vpass src=/some/local/file datacenter='DC1 Someplace' datastore=datastore1 path=some/remote/file
transport: local
- vsphere_copy: host=vhost login=vuser password=vpass src=/other/local/file datacenter='DC2 Someplace' datastore=datastore2 path=other/remote/file
delegate_to: other_system
'''
import atexit
import urllib
import mmap
import errno
import socket
def vmware_path(datastore, datacenter, path):
''' Constructs a URL path that VSphere accepts reliably '''
path = "/folder/%s" % path.lstrip("/")
# Due to a software bug in vSphere, it fails to handle ampersand in datacenter names
# The solution is to do what vSphere does (when browsing) and double-encode ampersands, maybe others ?
datacenter = datacenter.replace('&', '%26')
if not path.startswith("/"):
path = "/" + path
params = dict( dsName = datastore )
if datacenter:
params["dcPath"] = datacenter
params = urllib.urlencode(params)
return "%s?%s" % (path, params)
def main():
module = AnsibleModule(
argument_spec = dict(
host = dict(required=True, aliases=[ 'hostname' ]),
login = dict(required=True, aliases=[ 'username' ]),
password = dict(required=True),
src = dict(required=True, aliases=[ 'name' ]),
datacenter = dict(required=True),
datastore = dict(required=True),
dest = dict(required=True, aliases=[ 'path' ]),
validate_certs = dict(required=False, default=True, type='bool'),
),
# Implementing check-mode using HEAD is impossible, since size/date is not 100% reliable
supports_check_mode = False,
)
host = module.params.get('host')
login = module.params.get('login')
password = module.params.get('password')
src = module.params.get('src')
datacenter = module.params.get('datacenter')
datastore = module.params.get('datastore')
dest = module.params.get('dest')
validate_certs = module.params.get('validate_certs')
fd = open(src, "rb")
atexit.register(fd.close)
data = mmap.mmap(fd.fileno(), 0, access=mmap.ACCESS_READ)
atexit.register(data.close)
remote_path = vmware_path(datastore, datacenter, dest)
url = 'https://%s%s' % (host, remote_path)
headers = {
"Content-Type": "application/octet-stream",
"Content-Length": str(len(data)),
}
try:
r = open_url(url, data=data, headers=headers, method='PUT',
url_username=login, url_password=password, validate_certs=validate_certs,
force_basic_auth=True)
except socket.error, e:
if isinstance(e.args, tuple) and e[0] == errno.ECONNRESET:
# VSphere resets connection if the file is in use and cannot be replaced
module.fail_json(msg='Failed to upload, image probably in use', status=None, errno=e[0], reason=str(e), url=url)
else:
module.fail_json(msg=str(e), status=None, errno=e[0], reason=str(e), url=url)
except Exception, e:
error_code = -1
try:
if isinstance(e[0], int):
error_code = e[0]
except KeyError:
pass
module.fail_json(msg=str(e), status=None, errno=error_code, reason=str(e), url=url)
status = r.getcode()
if 200 <= status < 300:
module.exit_json(changed=True, status=status, reason=r.msg, url=url)
else:
length = r.headers.get('content-length', None)
if r.headers.get('transfer-encoding', '').lower() == 'chunked':
chunked = 1
else:
chunked = 0
module.fail_json(msg='Failed to upload', errno=None, status=status, reason=r.msg, length=length, headers=dict(r.headers), chunked=chunked, url=url)
# Import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.urls import *
if __name__ == '__main__':
main()
|
gpl-3.0
| -8,349,510,817,290,760,000
| 34.597701
| 155
| 0.656926
| false
| 3.825818
| false
| false
| false
|
TAJaroszewski/lma_contrail_monitoring
|
deployment_scripts/puppet/modules/lma_collector/files/collectd/openstack_keystone.py
|
1
|
3086
|
#!/usr/bin/python
# Copyright 2015 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Collectd plugin for getting statistics from Keystone
import collectd
import collectd_base as base
import collectd_openstack as openstack
PLUGIN_NAME = 'keystone'
INTERVAL = openstack.INTERVAL
class KeystoneStatsPlugin(openstack.CollectdPlugin):
""" Class to report the statistics on Keystone service.
number of tenants, users broken down by state
number of roles
"""
@base.read_callback_wrapper
def read_callback(self):
def groupby(d):
return 'enabled' if d.get('enabled') else 'disabled'
# tenants
r = self.get('keystone', 'tenants')
if not r:
self.logger.warning('Could not find Keystone tenants')
return
tenants_details = r.json().get('tenants', [])
status = self.count_objects_group_by(tenants_details,
group_by_func=groupby)
for s, nb in status.iteritems():
self.dispatch_value('tenants', nb, meta={'state': s})
# users
r = self.get('keystone', 'users')
if not r:
self.logger.warning('Could not find Keystone users')
return
users_details = r.json().get('users', [])
status = self.count_objects_group_by(users_details,
group_by_func=groupby)
for s, nb in status.iteritems():
self.dispatch_value('users', nb, meta={'state': s})
# roles
r = self.get('keystone', 'OS-KSADM/roles')
if not r:
self.logger.warning('Could not find Keystone roles')
return
roles = r.json().get('roles', [])
self.dispatch_value('roles', len(roles))
def dispatch_value(self, name, value, meta=None):
v = collectd.Values(
plugin=PLUGIN_NAME, # metric source
type='gauge',
type_instance=name,
interval=INTERVAL,
# w/a for https://github.com/collectd/collectd/issues/716
meta=meta or {'0': True},
values=[value]
)
v.dispatch()
plugin = KeystoneStatsPlugin(collectd)
def config_callback(conf):
plugin.config_callback(conf)
def notification_callback(notification):
plugin.notification_callback(notification)
def read_callback():
plugin.read_callback()
collectd.register_config(config_callback)
collectd.register_notification(notification_callback)
collectd.register_read(read_callback, INTERVAL)
|
apache-2.0
| 3,808,081,965,423,770,000
| 30.814433
| 74
| 0.629618
| false
| 4.098274
| false
| false
| false
|
asgeirrr/pgantomizer
|
pgantomizer/anonymize.py
|
1
|
8192
|
import argparse
import logging
import os
import subprocess
import sys
import psycopg2
import yaml
from .utils import get_in
DEFAULT_PK_COLUMN_NAME = 'id'
ANONYMIZE_DATA_TYPE = {
'timestamp with time zone': "'1111-11-11 11:11:11.111111+00'",
'date': "'1111-11-11'",
'boolean': 'random() > 0.5',
'integer': 'ceil(random() * 100)',
'smallint': 'ceil(random() * 100)',
'numeric': 'floor(random() * 10)',
'character varying': lambda column, pk_name: "'{}_' || {}".format(column, pk_name),
'text': lambda column, pk_name: "'{}_' || {}".format(column, pk_name),
'inet': "'111.111.111.111'"
}
CUSTOM_ANONYMIZATION_RULES = {
'aggregate_length': lambda column, _: 'length({})'.format(column)
}
DB_ARG_NAMES = ('dbname', 'user', 'password', 'host', 'port')
DB_ENV_NAMES = ('ANONYMIZED_DB_NAME', 'ANONYMIZED_DB_USER', 'ANONYMIZED_DB_PASS', 'ANONYMIZED_DB_HOST',
'ANONYMIZED_DB_PORT')
class PgantomizerError(Exception):
pass
class MissingAnonymizationRuleError(PgantomizerError):
pass
class InvalidAnonymizationSchemaError(PgantomizerError):
pass
def get_table_pk_name(schema, table):
return schema[table].get('pk', DEFAULT_PK_COLUMN_NAME) if schema[table] else DEFAULT_PK_COLUMN_NAME
def get_db_args_from_env():
return {name: os.environ.get(var) for name, var in zip(DB_ARG_NAMES, DB_ENV_NAMES)}
def get_psql_db_args(db_args):
return '-d {dbname} -U {user} -h {host} -p {port}'.format(**db_args)
def drop_schema(db_args):
subprocess.run(
'PGPASSWORD={password} psql {db_args} -c "DROP SCHEMA public CASCADE; CREATE SCHEMA public;" {redirect}'.format(
password=db_args.get('password'),
db_args=get_psql_db_args(db_args),
redirect='' if logging.getLogger().getEffectiveLevel() == logging.DEBUG else '>/dev/null 2>&1'),
shell=True
)
def load_db_to_new_instance(filename, db_args):
if not os.path.isfile(filename):
raise IOError('Dump file {} is not a file.'.format(filename))
os.putenv('PGPASSWORD', db_args.get('password'))
drop_schema(db_args)
subprocess.run(
'PGPASSWORD={password} pg_restore -Fc -j 8 {db_args} {filename} {redirect}'.format(
password=db_args.get('password'),
db_args=get_psql_db_args(db_args), filename=filename,
redirect='' if logging.getLogger().getEffectiveLevel() == logging.DEBUG else '>/dev/null 2>&1'),
shell=True
)
def prepare_column_for_anonymization(conn, cursor, table, column, data_type):
"""
Some data types such as VARCHAR are anonymized in such a manner that the anonymized value can be longer that
the length constrain on the column. Therefore, the constraint is enlarged.
"""
if data_type == 'character varying':
logging.debug('Extending length of varchar {}.{}'.format(table, column))
cursor.execute("ALTER TABLE {table} ALTER COLUMN {column} TYPE varchar(250);".format(
table=table,
column=column
))
conn.commit()
def check_schema(cursor, schema, db_args):
for table in schema:
try:
cursor.execute("SELECT {columns} FROM {table};".format(
columns='"{}"'.format('", "'.join(schema[table].get('raw', []) + [get_table_pk_name(schema, table)])),
table=table
))
except psycopg2.ProgrammingError as e:
raise InvalidAnonymizationSchemaError(str(e))
def anonymize_column(cursor, schema, table, column, data_type):
if column == get_table_pk_name(schema, table) or (schema[table] and column in schema[table].get('raw', [])):
logging.debug('Skipping anonymization of {}.{}'.format(table, column))
elif data_type in ANONYMIZE_DATA_TYPE:
custom_rule = get_in(schema, [table, 'custom_rules', column])
if custom_rule and custom_rule not in CUSTOM_ANONYMIZATION_RULES:
raise MissingAnonymizationRuleError('Custom rule "{}" is not defined'.format(custom_rule))
anonymization = CUSTOM_ANONYMIZATION_RULES[custom_rule] if custom_rule else ANONYMIZE_DATA_TYPE[data_type]
cursor.execute("UPDATE {table} SET {column} = {value};".format(
table=table,
column=column,
value=anonymization(column, get_table_pk_name(schema, table)) if callable(anonymization) else anonymization
))
logging.debug('Anonymized {}.{}'.format(table, column))
else:
raise MissingAnonymizationRuleError('No rule to anonymize type "{}"'.format(data_type))
def anonymize_db(schema, db_args):
with psycopg2.connect(**db_args) as conn:
with conn.cursor() as cursor:
check_schema(cursor, schema, db_args)
cursor.execute("SELECT table_name FROM information_schema.tables WHERE table_schema = 'public';")
for table_name in cursor.fetchall():
cursor.execute("SELECT column_name, data_type FROM information_schema.columns "
"WHERE table_schema = 'public' AND table_name = '{}'".format(table_name[0]))
for column_name, data_type in cursor.fetchall():
prepare_column_for_anonymization(conn, cursor, table_name[0], column_name, data_type)
anonymize_column(cursor, schema, table_name[0], column_name, data_type)
def load_anonymize_remove(dump_file, schema, leave_dump=False, db_args=None):
schema = yaml.load(open(schema))
db_args = db_args or get_db_args_from_env()
try:
load_db_to_new_instance(dump_file, db_args)
anonymize_db(schema, db_args)
except Exception: # Any exception must result into droping the schema to prevent sensitive data leakage
drop_schema(db_args)
raise
finally:
if not leave_dump:
subprocess.run(['rm', dump_file])
def main():
parser = argparse.ArgumentParser(description='Load data from a Postgres dump to a specified instance '
'and anonymize it according to rules specified in a YAML config file.',
epilog='Beware that all tables in the target DB are dropped '
'prior to loading the dump and anonymization. See README.md for details.')
parser.add_argument('-v', '--verbose', action='count', help='increase output verbosity')
parser.add_argument('-l', '--leave-dump', action='store_true', help='do not delete dump file after anonymization')
parser.add_argument('--schema', help='YAML config file with anonymization rules for all tables', required=True,
default='./schema.yaml')
parser.add_argument('-f', '--dump-file', help='path to the dump of DB to load and anonymize',
default='to_anonymize.sql')
parser.add_argument('--dbname', help='name of the database to dump')
parser.add_argument('--user', help='name of the Postgres user with access to the anonymized database')
parser.add_argument('--password', help='password of the Postgres user with access to the anonymized database',
default='')
parser.add_argument('--host', help='host where the DB is running', default='localhost')
parser.add_argument('--port', help='port where the DB is running', default='5432')
args = parser.parse_args()
if args.verbose:
logging.basicConfig(format="%(levelname)s: %(message)s", level=logging.DEBUG)
else:
logging.basicConfig(format="%(levelname)s: %(message)s")
if not os.path.isfile(args.dump_file):
sys.exit('File with dump "{}" does not exist.'.format(args.dump_file))
if not os.path.isfile(args.schema):
sys.exit('File with schema "{}" does not exist.'.format(args.schema))
db_args = ({name: value for name, value in zip(DB_ARG_NAMES, (args.dbname, args.user, args.password, args.host,
args.port))}
if args.dbname and args.user else None)
load_anonymize_remove(args.dump_file, args.schema, args.leave_dump, db_args)
if __name__ == '__main__':
main()
|
bsd-3-clause
| -4,770,951,618,853,237,000
| 41.226804
| 120
| 0.630005
| false
| 3.675191
| false
| false
| false
|
CptDemocracy/Python
|
MITx-6.00.1x-EDX-Introduction-to-Computer-Science/Week-6/PSET-6/decryptStory.py
|
1
|
3795
|
"""
PSET-6
Problem 2: Decryption (decryptStory)
Now that you have all the pieces to the puzzle, please use them to
decode the file story.txt. In the skeleton file, you will see a method
getStoryString() that will return the encrypted version of the story.
Fill in the following function; it should create the wordList, obtain
the story, and then decrypt the story. Be sure you've read through
the whole file to see what helper functions we've defined for you that
may assist you in these tasks! This function will be only a few lines
of code (our solution does it in 4 lines).
"""
import string
import random
import operator
# helper classes code
# --------------------------------
class CharAlphaASCII(object):
ALPHA_LEN = 26
ASCII_CHARS = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"
ASCII_CHARS_IND = {'A': 0, 'C': 2, 'B': 1, 'E': 4, 'D': 3, 'G': 6, 'F': 5, \
'I': 8, 'H': 7, 'K': 10, 'J': 9, 'M': 12, 'L': 11, \
'O': 14, 'N': 13, 'Q': 16, 'P': 15, 'S': 18, 'R': 17, \
'U': 20, 'T': 19, 'W': 22, 'V': 21, 'Y': 24, 'X': 23, \
'Z': 25, \
'a': 26, 'c': 28, 'b': 27, 'e': 30, 'd': 29, 'g': 32, \
'f': 31, 'i': 34, 'h': 33, 'k': 36, 'j': 35, 'm': 38, \
'l': 37, 'o': 40, 'n': 39, 'q': 42, 'p': 41, 's': 44, \
'r': 43, 'u': 46, 't': 45, 'w': 48, 'v': 47, 'y': 50, \
'x': 49, 'z': 51}
def __init__(self, char):
if len(char) > 1:
raise ValueError("CharAlphaASCII can't be more than 1 of length")
if not char.isalpha():
raise ValueError("CharAlphaASCII only accepts ASCII alpha chars")
self.char = char[0]
def __add__(self, num):
if type(num) != int:
raise TypeError
return CharAlphaASCII( self.operation(num, operator.add) )
def __sub__(self, num):
if type(num) != int:
raise TypeError
return CharAlphaASCII( self.operation(num, operator.sub) )
def __str__(self):
return self.char
def __lt__(self, char):
return self.char < char
def __le__(self, char):
return self.char <= char
def __eq__(self, char):
return self.char == char
def __gt__(self, char):
return self.char > char
def __ge__(self, char):
return self.char >= char
def __len__(self, char):
return len(self.char)
def operation(self, num, op):
if type(num) != int:
raise TypeError
index = self.ASCII_CHARS_IND[self.char]
if index < self.ALPHA_LEN:
newIndex = op(index, num) % self.ALPHA_LEN
else:
newIndex = op(index, num) % self.ALPHA_LEN + self.ALPHA_LEN
return self.ASCII_CHARS[newIndex]
def ToUnicode(self):
return ord(self.char)
class Cstr(str, object):
def __init__(self, s):
self.s = s
def __add__(self, s):
return Cstr(self.s + str(s))
def __str__(self):
return self.s
# --------------------------------
# END of helper classes code
def applyCoder_CSTR(text, shift):
"""
Applies the coder to the text. Returns the encoded text.
text: string
coder: dict with mappings of characters to shifted characters
returns: text after mapping coder chars to original text
"""
cs = Cstr("")
for char in text:
if char.isalpha():
C = CharAlphaASCII(char)
C += shift
cs += C
else:
cs += char
return str(cs)
def decryptStory():
wordList = loadWords()
story = getStoryString()
return applyCoder_CSTR(story, findBestShift(wordList, story))
|
mit
| 3,336,012,462,027,109,400
| 30.106557
| 80
| 0.529644
| false
| 3.583569
| false
| false
| false
|
asipto/kamcli
|
kamcli/commands/cmd_aliasdb.py
|
1
|
5403
|
import click
from sqlalchemy import create_engine
from kamcli.ioutils import ioutils_dbres_print
from kamcli.cli import pass_context
from kamcli.cli import parse_user_spec
@click.group("aliasdb", help="Manage database user aliases")
@pass_context
def cli(ctx):
pass
@cli.command("add", short_help="Add a user-alias pair")
@click.option(
"table",
"--table",
default="dbaliases",
help="Name of database table (default: dbaliases)",
)
@click.argument("userid", metavar="<userid>")
@click.argument("aliasid", metavar="<aliasid>")
@pass_context
def aliasdb_add(ctx, table, userid, aliasid):
"""Add a user-alias pair into database
\b
Parameters:
<userid> - username, AoR or SIP URI for subscriber
<aliasid> - username, AoR or SIP URI for alias
"""
udata = parse_user_spec(ctx, userid)
adata = parse_user_spec(ctx, aliasid)
ctx.vlog(
"Adding user [%s@%s] with alias [%s@%s]",
udata["username"],
udata["domain"],
adata["username"],
adata["domain"],
)
e = create_engine(ctx.gconfig.get("db", "rwurl"))
e.execute(
"insert into " + table + " (username, domain, alias_username, "
"alias_domain) values (%s, %s, %s, %s)",
udata["username"],
udata["domain"],
adata["username"],
adata["domain"],
)
@cli.command("rm", short_help="Remove records for a user and/or alias")
@click.option(
"table",
"--table",
default="dbaliases",
help="Name of database table (default: dbaliases)",
)
@click.option(
"matchalias",
"--match-alias",
is_flag=True,
help="Match userid value as alias (when given one argument)",
)
@click.argument("userid", metavar="<userid>")
@click.argument("aliasid", metavar="<aliasid>", nargs=-1)
@pass_context
def aliasdb_rm(ctx, table, matchalias, userid, aliasid):
"""Remove a user from groups (revoke privilege)
\b
Parameters:
<userid> - username, AoR or SIP URI for subscriber
<aliasid> - username, AoR or SIP URI for alias (optional)
"""
udata = parse_user_spec(ctx, userid)
ctx.log(
"Removing alias for record [%s@%s]", udata["username"], udata["domain"]
)
e = create_engine(ctx.gconfig.get("db", "rwurl"))
if not aliasid:
if matchalias:
e.execute(
"delete from " + table + " where alias_username=%s and "
"alias_domain=%s",
udata["username"],
udata["domain"],
)
else:
e.execute(
"delete from " + table + " where username=%s and domain=%s",
udata["username"],
udata["domain"],
)
else:
for a in aliasid:
adata = parse_user_spec(ctx, a)
e.execute(
"delete from " + table + " where username=%s and domain=%s "
"and alias_username=%s and alias_domain=%s",
udata["username"],
udata["domain"],
adata["username"],
adata["domain"],
)
@cli.command("show", short_help="Show user aliases")
@click.option(
"oformat",
"--output-format",
"-F",
type=click.Choice(["raw", "json", "table", "dict"]),
default=None,
help="Format the output",
)
@click.option(
"ostyle",
"--output-style",
"-S",
default=None,
help="Style of the output (tabulate table format)",
)
@click.option(
"table",
"--table",
default="dbaliases",
help="Name of database table (default: dbaliases)",
)
@click.option(
"matchalias",
"--match-alias",
is_flag=True,
help="Match userid value as alias",
)
@click.argument("userid", nargs=-1, metavar="[<userid>]")
@pass_context
def aliasdb_show(ctx, oformat, ostyle, table, matchalias, userid):
"""Show details for user aliases
\b
Parameters:
[<userid>] - username, AoR or SIP URI for user or alias
- it can be a list of userids
- if not provided then all aliases are shown
"""
if not userid:
ctx.vlog("Showing all records")
e = create_engine(ctx.gconfig.get("db", "rwurl"))
res = e.execute("select * from " + table)
ioutils_dbres_print(ctx, oformat, ostyle, res)
else:
for u in userid:
udata = parse_user_spec(ctx, u)
e = create_engine(ctx.gconfig.get("db", "rwurl"))
if matchalias:
ctx.vlog(
"Showing records for alias [%s@%s]",
udata["username"],
udata["domain"],
)
res = e.execute(
"select * from " + table + " where alias_username=%s "
"and alias_domain=%s",
udata["username"],
udata["domain"],
)
else:
ctx.vlog(
"Showing records for user [%s@%s]",
udata["username"],
udata["domain"],
)
res = e.execute(
"select * from " + table + " where username=%s and "
"domain=%s",
udata["username"],
udata["domain"],
)
ioutils_dbres_print(ctx, oformat, ostyle, res)
|
gpl-2.0
| -459,473,105,445,511,100
| 28.850829
| 79
| 0.531371
| false
| 3.770412
| false
| false
| false
|
mdtux89/amr-eager
|
action.py
|
1
|
1131
|
#!/usr/bin/env python
#coding=utf-8
'''
Definition of Action class. In AMREAGER, an action can be either 'shift', 'reduce', 'rarc'
or 'larc'. When it's a shift, the argument is the subgraph triggered by the token. When it's a reduce,
the argument is used to specify the optional reeentrant edge to create. For rarcs and rarcs, the
argument is the label for those edges.
@author: Marco Damonte (m.damonte@sms.ed.ac.uk)
@since: 03-10-16
'''
class Action:
def __init__(self, name, argv = None):
assert (name == "shift" or name == "larc" or name == "rarc" or name == "reduce")
self.name = name
self.argv = argv
def __repr__(self):
return '<%s %s %s>' % (self.__class__.__name__, self.name, self.argv)
def __eq__(self, other):
return self.name == other.name and self.argv == other.argv
def get_id(self):
act_id = 0
if self.name == "shift":
act_id = 1
elif self.name == "reduce":
act_id = 2
elif self.name == "larc":
act_id = 3
elif self.name == "rarc":
act_id = 4
return act_id
|
bsd-2-clause
| -1,735,350,711,124,829,000
| 30.416667
| 102
| 0.576481
| false
| 3.307018
| false
| false
| false
|
Quihico/repository.spartacus
|
temp/script.module.python.koding.aio/lib/koding/video.py
|
1
|
16114
|
# -*- coding: utf-8 -*-
# script.module.python.koding.aio
# Python Koding AIO (c) by whufclee (info@totalrevolution.tv)
# Python Koding AIO is licensed under a
# Creative Commons Attribution-NonCommercial-NoDerivatives 4.0 International License.
# You should have received a copy of the license along with this
# work. If not, see http://creativecommons.org/licenses/by-nc-nd/4.0.
# IMPORTANT: If you choose to use the special noobsandnerds features which hook into their server
# please make sure you give approptiate credit in your add-on description (noobsandnerds.com)
#
# Please make sure you've read and understood the license, this code can NOT be used commercially
# and it can NOT be modified and redistributed. If you're found to be in breach of this license
# then any affected add-ons will be blacklisted and will not be able to work on the same system
# as any other add-ons which use this code. Thank you for your cooperation.
import os
import shutil
import xbmc
import xbmcgui
from __init__ import dolog
from guitools import Show_Busy
from systemtools import Last_Error
dp = xbmcgui.DialogProgress()
check_started = xbmc.translatePath('special://profile/addon_data/script.module.python.koding.aio/temp/playback_in_progress')
#----------------------------------------------------------------
# TUTORIAL #
def Check_Playback(ignore_dp=False,timeout=10):
"""
This function will return true or false based on video playback. Simply start a stream
(whether via an add-on, direct link to URL or local storage doesn't matter), the code will
then work out if playback is successful. This uses a number of checks and should take into
account all potential glitches which can occur during playback. The return should happen
within a second or two of playback being successful (or not).
CODE: Check_Playback()
AVAILABLE PARAMS:
ignore_dp - By default this is set to True but if set to False
this will ignore the DialogProgress window. If you use a DP while
waiting for the stream to start then you'll want to set this True.
Please bare in mind the reason this check is in place and enabled
by default is because some streams do bring up a DialogProgress
when initiated (such as f4m proxy links) and disabling this check
in those circumstances can cause false positives.
timeout - This is the amount of time you want to allow for playback
to start before sending back a response of False. Please note if
ignore_dp is set to True then it will also add a potential 10s extra
to this amount if a DialogProgress window is open. The default setting
for this is 10s.
EXAMPLE CODE:
xbmc.Player().play('http://totalrevolution.tv/videos/python_koding/Browse_To_Folder.mov')
isplaying = koding.Check_Playback()
if isplaying:
dialog.ok('PLAYBACK SUCCESSFUL','Congratulations, playback was successful')
xbmc.Player().stop()
else:
dialog.ok('PLAYBACK FAILED','Sorry, playback failed :(')
~"""
if not os.path.exists(check_started):
os.makedirs(check_started)
if not ignore_dp:
isdialog = True
counter = 1
# Check if the progress window is active and wait for playback
while isdialog:
dolog('### Current Window: %s' % xbmc.getInfoLabel('System.CurrentWindow'))
dolog('### Current XML: %s' % xbmc.getInfoLabel('Window.Property(xmlfile)'))
dolog('### Progress Dialog active, sleeping for %s seconds' % counter)
xbmc.sleep(1000)
if xbmc.getCondVisibility('Window.IsActive(progressdialog)') or (xbmc.getInfoLabel('Window.Property(xmlfile)') == 'DialogProgress.xml'):
isdialog = True
else:
isdialog = False
counter += 1
dolog('counter: %s' % counter)
# Given the DialogProgress 10 seconds to finish and it's still up - time to close it
if counter == 10:
try:
dolog('attempting to send click to close dp')
xbmc.executebuiltin('SendClick()')
if dp.iscanceled():
dp.close()
except:
dolog('### FAILED TO CLOSE DP')
isplaying = xbmc.Player().isPlaying()
counter = 1
if xbmc.Player().isPlayingAudio():
return True
# If xbmc player is not yet active give it some time to initialise
while not isplaying and counter < timeout:
xbmc.sleep(1000)
isplaying = xbmc.Player().isPlaying()
dolog('### XBMC Player not yet active, sleeping for %s seconds' % counter)
counter += 1
success = 0
counter = 0
# If it's playing give it time to physically start streaming then attempt to pull some info
if isplaying:
xbmc.sleep(1000)
while not success and counter < 10:
try:
if xbmc.Player().isPlayingVideo():
infotag = xbmc.Player().getVideoInfoTag()
vidtime = xbmc.Player().getTime()
if vidtime > 0:
success = 1
# If playback doesn't start automatically (buffering) we force it to play
else:
dolog('### Playback active but time at zero, trying to unpause')
xbmc.executebuiltin('PlayerControl(Play)')
xbmc.sleep(2000)
vidtime = xbmc.Player().getTime()
if vidtime > 0:
success = 1
# If no infotag or time could be pulled then we assume playback failed, try and stop the xbmc.player
except:
counter += 1
xbmc.sleep(1000)
# Check if the busy dialog is still active from previous locked up playback attempt
isbusy = xbmc.getCondVisibility('Window.IsActive(busydialog)')
counter = 1
while isbusy:
dolog('### Busy dialog active, sleeping for %ss' % counter)
xbmc.sleep(1000)
isbusy = xbmc.getCondVisibility('Window.IsActive(busydialog)')
counter += 1
if counter == 5:
xbmc.executebuiltin('Dialog.Close(busydialog)')
if not success:
xbmc.executebuiltin('PlayerControl(Stop)')
dolog('### Failed playback, stopped stream')
shutil.rmtree(check_started)
return False
else:
shutil.rmtree(check_started)
return True
#----------------------------------------------------------------
# TUTORIAL #
def Play_Video(video,showbusy=True,content='video',ignore_dp=False,timeout=10, item=None):
"""
This will attempt to play a video and return True or False on
whether or not playback was successful. This function is similar
to Check_Playback but this actually tries a number of methods to
play the video whereas Check_Playback does not actually try to
play a video - it will just return True/False on whether or not
a video is currently playing.
CODE: Play_Video(video, [showbusy, content])
AVAILABLE PARAMS:
(*) video - This is the path to the video, this can be a local
path, online path or a channel number from the PVR.
showbusy - By default this is set to True which means while the
function is attempting to playback the video the user will see the
busy dialog. Set to False if you prefer this not to appear but do
bare in mind a user may navigate to another section and try playing
something else if they think this isn't doing anything.
content - By default this is set to 'video', however if you're
passing through audio you may want to set this to 'music' so the
system can correctly set the tags for artist, song etc.
ignore_dp - By default this is set to True but if set to False
this will ignore the DialogProgress window. If you use a DP while
waiting for the stream to start then you'll want to set this True.
Please bare in mind the reason this check is in place and enabled
by default is because some streams do bring up a DialogProgress
when initiated (such as f4m proxy links) and disabling this check
in those circumstances can cause false positives.
timeout - This is the amount of time you want to allow for playback
to start before sending back a response of False. Please note if
ignore_dp is set to True then it will also add a potential 10s extra
to this amount if a DialogProgress window is open. The default setting
for this is 10s.
EXAMPLE CODE:
isplaying = koding.Play_Video('http://totalrevolution.tv/videos/python_koding/Browse_To_Folder.mov')
if isplaying:
dialog.ok('PLAYBACK SUCCESSFUL','Congratulations, playback was successful')
xbmc.Player().stop()
else:
dialog.ok('PLAYBACK FAILED','Sorry, playback failed :(')
~"""
dolog('### ORIGINAL VIDEO: %s'%video)
import urlresolver
try: import simplejson as json
except: import json
if not item:
meta = {}
for i in ['title', 'originaltitle', 'tvshowtitle', 'year', 'season', 'episode', 'genre', 'rating', 'votes',
'director', 'writer', 'plot', 'tagline']:
try:
meta[i] = xbmc.getInfoLabel('listitem.%s' % i)
except:
pass
meta = dict((k, v) for k, v in meta.iteritems() if not v == '')
if 'title' not in meta:
meta['title'] = xbmc.getInfoLabel('listitem.label')
icon = xbmc.getInfoLabel('listitem.icon')
item = xbmcgui.ListItem(path=video, iconImage =icon, thumbnailImage=icon)
if content == "music":
try:
meta['artist'] = xbmc.getInfoLabel('listitem.artist')
item.setInfo(type='Music', infoLabels={'title': meta['title'], 'artist': meta['artist']})
except:
item.setInfo(type='Video', infoLabels=meta)
else:
item.setInfo(type='Video', infoLabels=meta)
else:
item.setInfo(type='Video', infoLabels=meta)
playback = False
if showbusy:
Show_Busy()
# if a plugin path is sent we try activate window
if video.startswith('plugin://'):
try:
dolog('Attempting to play via xbmc.Player().play() method')
xbmc.Player().play(video)
# dolog('Attempting to play via XBMC.ActivateWindow(10025, ...) method')
# xbmc.executebuiltin('XBMC.ActivateWindow(10025,%s)' % video)
playback = Check_Playback(ignore_dp,timeout)
is_in_progress = True
progress_count = 0
while is_in_progress:
xbmc.sleep(1000)
progress_count += 1
dolog('Progress check is active, sleeping %s'%progress_count)
is_in_progress = os.path.exists(check_started)
except:
dolog(Last_Error())
# If an XBMC action has been sent through we do an executebuiltin command
elif video.startswith('ActivateWindow') or video.startswith('RunAddon') or video.startswith('RunScript') or video.startswith('PlayMedia'):
try:
dolog('Attempting to play via xbmc.executebuiltin method')
xbmc.executebuiltin('%s'%video)
playback = Check_Playback(ignore_dp,timeout)
is_in_progress = True
progress_count = 0
while is_in_progress:
xbmc.sleep(1000)
progress_count += 1
dolog('Progress check is active, sleeping %s'%progress_count)
is_in_progress = os.path.exists(check_started)
except:
dolog(Last_Error())
elif ',' in video:
# Standard xbmc.player method (a comma in url seems to throw urlresolver off)
try:
dolog('Attempting to play via xbmc.Player.play() method')
xbmc.Player().play('%s'%video, item)
playback = Check_Playback(ignore_dp,timeout)
is_in_progress = True
progress_count = 0
while is_in_progress:
xbmc.sleep(1000)
progress_count += 1
dolog('Progress check is active, sleeping %s'%progress_count)
is_in_progress = os.path.exists(check_started)
# Attempt to resolve via urlresolver
except:
try:
dolog('Attempting to resolve via urlresolver module')
dolog('video = %s'%video)
hmf = urlresolver.HostedMediaFile(url=video, include_disabled=False, include_universal=True)
if hmf.valid_url() == True:
video = hmf.resolve()
dolog('### VALID URL, RESOLVED: %s'%video)
xbmc.Player().play('%s' % video, item)
playback = Check_Playback(ignore_dp,timeout)
is_in_progress = True
progress_count = 0
while is_in_progress:
xbmc.sleep(1000)
progress_count += 1
dolog('Progress check is active, sleeping %s'%progress_count)
is_in_progress = os.path.exists(check_started)
except:
dolog(Last_Error())
# Play from a db entry - untested
elif video.isdigit():
dolog('### Video is digit, presuming it\'s a db item')
command = ('{"jsonrpc": "2.0", "id":"1", "method": "Player.Open","params":{"item":{"channelid":%s}}}' % url)
xbmc.executeJSONRPC(command)
playback = Check_Playback(ignore_dp,timeout)
is_in_progress = True
progress_count = 0
while is_in_progress:
xbmc.sleep(1000)
progress_count += 1
dolog('Progress check is active, sleeping %s'%progress_count)
is_in_progress = os.path.exists(check_started)
else:
# Attempt to resolve via urlresolver
try:
dolog('Attempting to resolve via urlresolver module')
dolog('video = %s'%video)
hmf = urlresolver.HostedMediaFile(url=video, include_disabled=False, include_universal=True)
if hmf.valid_url() == True:
video = hmf.resolve()
dolog('### VALID URL, RESOLVED: %s'%video)
xbmc.Player().play('%s' % video, item)
playback = Check_Playback(ignore_dp,timeout)
is_in_progress = True
progress_count = 0
while is_in_progress:
xbmc.sleep(1000)
progress_count += 1
dolog('Progress check is active, sleeping %s'%progress_count)
is_in_progress = os.path.exists(check_started)
# Standard xbmc.player method
except:
try:
dolog('Attempting to play via xbmc.Player.play() method')
xbmc.Player().play('%s' % video, item)
playback = Check_Playback(ignore_dp,timeout)
is_in_progress = True
progress_count = 0
while is_in_progress:
xbmc.sleep(1000)
progress_count += 1
dolog('Progress check is active, sleeping %s'%progress_count)
is_in_progress = os.path.exists(check_started)
except:
dolog(Last_Error())
dolog('Playback status: %s' % playback)
Show_Busy(False)
return playback
#----------------------------------------------------------------
# TUTORIAL #
def Sleep_If_Playback_Active():
"""
This will allow you to pause code while kodi is playing audio or video
CODE: Sleep_If_Playback_Active()
EXAMPLE CODE:
dialog.ok('PLAY A VIDEO','We will now attempt to play a video, once you stop this video you should see a dialog.ok message.')
xbmc.Player().play('http://download.blender.org/peach/bigbuckbunny_movies/big_buck_bunny_720p_stereo.avi')
xbmc.sleep(3000) # Give kodi enough time to load up the video
koding.Sleep_If_Playback_Active()
dialog.ok('PLAYBACK FINISHED','The playback has now been finished so this dialog code has now been initiated')
~"""
isplaying = xbmc.Player().isPlaying()
while isplaying:
xbmc.sleep(500)
isplaying = xbmc.Player().isPlaying()
|
gpl-2.0
| 1,564,265,599,120,319,500
| 40.738342
| 148
| 0.616946
| false
| 4.077449
| false
| false
| false
|
shiquanwang/pylearn2
|
pylearn2/scripts/tutorials/softmax_regression/tests/test_softmaxreg.py
|
1
|
1450
|
"""
Test for softmax_regression.ipynb
"""
import os
from pylearn2.testing.skip import skip_if_no_data
from pylearn2.config import yaml_parse
from theano import config
def test():
skip_if_no_data()
dirname = os.path.join(os.path.abspath(os.path.dirname(__file__)), '..')
with open(os.path.join(dirname, 'sr_dataset.yaml'), 'r') as f:
dataset = f.read()
if config.mode == "DEBUG_MODE":
hyper_params = {'train_stop': 10}
else:
hyper_params = {'train_stop': 50}
dataset = dataset % (hyper_params)
with open(os.path.join(dirname, 'sr_model.yaml'), 'r') as f:
model = f.read()
with open(os.path.join(dirname, 'sr_algorithm.yaml'), 'r') as f:
algorithm = f.read()
if config.mode == "DEBUG_MODE":
hyper_params = {'batch_size': 10,
'valid_stop': 50010}
else:
hyper_params = {'batch_size': 10,
'valid_stop': 50050}
algorithm = algorithm % (hyper_params)
with open(os.path.join(dirname, 'sr_train.yaml'), 'r') as f:
train = f.read()
save_path = os.path.dirname(os.path.realpath(__file__))
train = train % locals()
train = yaml_parse.load(train)
train.main_loop()
try:
os.remove("{}/softmax_regression.pkl".format(save_path))
os.remove("{}/softmax_regression_best.pkl".format(save_path))
except:
pass
if __name__ == '__main__':
test()
|
bsd-3-clause
| -464,289,127,205,744,400
| 24.892857
| 76
| 0.577241
| false
| 3.341014
| false
| false
| false
|
CCharlieLi/StaffManagmentSystem
|
Website/admin.py
|
1
|
1775
|
from django.contrib import admin
from Website.models import *
from django import forms
from django.utils.translation import ugettext_lazy
from Skyrover.widgets import KindEditor
# Register your models here.
class kindeditorNewsForm(forms.ModelForm):
Content = forms.CharField(label=ugettext_lazy(u"Content"), widget=KindEditor(attrs={'rows':15, 'cols':100}),required=True)
class Meta:
model = News
fields = "__all__"
class NewsAdmin(admin.ModelAdmin):
list_display = ('Title','Publsh_Date')
form = kindeditorNewsForm
class kindeditorAnnounceForm(forms.ModelForm):
Content = forms.CharField(label=ugettext_lazy(u"Content"), widget=KindEditor(attrs={'rows':15, 'cols':100}),required=True)
class Meta:
model = Announce
fields = "__all__"
class AnnounceAdmin(admin.ModelAdmin):
list_display = ('Title','Publsh_Date')
form = kindeditorAnnounceForm
class PolicyAdmin(admin.ModelAdmin):
list_display = ('Name','Publsh_Date','keyword')
class MagazineAdmin(admin.ModelAdmin):
list_display = ('Name','Publsh_Date','keyword')
class PartnershipAdmin(admin.ModelAdmin):
list_display = ('Name','PeopleType')
class kindeditorPeopleForm(forms.ModelForm):
Content = forms.CharField(label=ugettext_lazy(u"Content"), widget=KindEditor(attrs={'rows':15, 'cols':100}),required=True)
class Meta:
model = People
fields = "__all__"
class PeopleAdmin(admin.ModelAdmin):
list_display = ('Name','PeopleType')
form = kindeditorPeopleForm
admin.site.register(News,NewsAdmin)
admin.site.register(Announce,AnnounceAdmin)
admin.site.register(Policy,PolicyAdmin)
admin.site.register(Magazine,MagazineAdmin)
admin.site.register(PeopleType)
admin.site.register(Partnership,PartnershipAdmin)
admin.site.register(People,PeopleAdmin)
admin.site.register(Group)
|
gpl-2.0
| -58,941,322,863,837,096
| 31.272727
| 123
| 0.761127
| false
| 3.21558
| false
| false
| false
|
uclmr/inferbeddings
|
scripts/fb15k/UCL_FB15K_adv_v3.1.py
|
1
|
3991
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import itertools
import os.path
import sys
import argparse
import logging
def cartesian_product(dicts):
return (dict(zip(dicts, x)) for x in itertools.product(*dicts.values()))
def summary(configuration):
kvs = sorted([(k, v) for k, v in configuration.items()], key=lambda e: e[0])
return '_'.join([('%s=%s' % (k, v)) for (k, v) in kvs])
def to_cmd(c, _path=None):
if _path is None:
_path = '/home/pminervi/workspace/inferbeddings/'
command = 'python3 {}/bin/kbp-cli.py' \
' --train {}/data/fb15k/freebase_mtr100_mte100-train.txt' \
' --valid {}/data/fb15k/freebase_mtr100_mte100-valid.txt' \
' --test {}/data/fb15k/freebase_mtr100_mte100-test.txt' \
' --clauses {}/data/fb15k/clauses/clauses_0.999.pl' \
' --nb-epochs {}' \
' --lr {}' \
' --nb-batches {}' \
' --model {}' \
' --similarity {}' \
' --margin {}' \
' --embedding-size {}' \
' --adv-lr {} --adv-init-ground --adversary-epochs {}' \
' --discriminator-epochs {} --adv-weight {} --adv-batch-size {}' \
' --predicate-norm 1'.format(_path, _path, _path, _path, _path,
c['epochs'], c['lr'], c['batches'],
c['model'], c['similarity'],
c['margin'], c['embedding_size'],
c['adv_lr'], c['adv_epochs'],
c['disc_epochs'], c['adv_weight'], c['adv_batch_size'])
return command
def to_logfile(c, path):
outfile = "%s/ucl_fb15k_adv_v3.1.%s.log" % (path, summary(c))
return outfile
def main(argv):
def formatter(prog):
return argparse.HelpFormatter(prog, max_help_position=100, width=200)
argparser = argparse.ArgumentParser('Generating experiments for the UCL cluster', formatter_class=formatter)
argparser.add_argument('--debug', '-D', action='store_true', help='Debug flag')
argparser.add_argument('--path', '-p', action='store', type=str, default=None, help='Path')
args = argparser.parse_args(argv)
hyperparameters_space = dict(
epochs=[100],
optimizer=['adagrad'],
lr=[.1],
batches=[10],
model=['ComplEx'],
similarity=['dot'],
margin=[2, 5, 10],
embedding_size=[20, 50, 100, 150, 200],
adv_lr=[.1],
adv_epochs=[0, 1, 10],
disc_epochs=[1, 10],
adv_weight=[0, 1, 10, 100, 1000, 10000],
adv_batch_size=[1, 10, 100]
)
configurations = cartesian_product(hyperparameters_space)
path = '/home/pminervi/workspace/inferbeddings/logs/ucl_fb15k_adv_v3.1/'
if not os.path.exists(path):
os.makedirs(path)
for job_id, cfg in enumerate(configurations):
logfile = to_logfile(cfg, path)
completed = False
if os.path.isfile(logfile):
with open(logfile, 'r', encoding='utf-8', errors='ignore') as f:
content = f.read()
completed = '### MICRO (test filtered)' in content
if not completed:
line = '{} >> {} 2>&1'.format(to_cmd(cfg, _path=args.path), logfile)
if args.debug:
print(line)
else:
file_name = 'ucl_fb15k_adv_v3.1_{}.job'.format(job_id)
alias = ''
job_script = '#$ -S /bin/bash\n' \
'#$ -wd /tmp/\n' \
'#$ -l h_vmem=12G,tmem=12G\n' \
'#$ -l h_rt=96:00:00\n' \
'{}\n{}\n'.format(alias, line)
with open(file_name, 'w') as f:
f.write(job_script)
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
main(sys.argv[1:])
|
mit
| -8,338,077,182,654,248,000
| 34.633929
| 112
| 0.496617
| false
| 3.513204
| false
| false
| false
|
cmoutard/mne-python
|
mne/io/brainvision/brainvision.py
|
1
|
20471
|
# -*- coding: utf-8 -*-
"""Conversion tool from Brain Vision EEG to FIF"""
# Authors: Teon Brooks <teon.brooks@gmail.com>
# Christian Brodbeck <christianbrodbeck@nyu.edu>
# Eric Larson <larson.eric.d@gmail.com>
#
# License: BSD (3-clause)
import os
import time
import re
import warnings
import numpy as np
from ...utils import verbose, logger
from ..constants import FIFF
from ..meas_info import _empty_info
from ..base import _BaseRaw, _check_update_montage
from ..utils import _read_segments_file
from ...externals.six import StringIO
from ...externals.six.moves import configparser
class RawBrainVision(_BaseRaw):
"""Raw object from Brain Vision EEG file
Parameters
----------
vhdr_fname : str
Path to the EEG header file.
montage : str | None | instance of Montage
Path or instance of montage containing electrode positions.
If None, sensor locations are (0,0,0). See the documentation of
:func:`mne.channels.read_montage` for more information.
eog : list or tuple
Names of channels or list of indices that should be designated
EOG channels. Values should correspond to the vhdr file.
Default is ``('HEOGL', 'HEOGR', 'VEOGb')``.
misc : list or tuple
Names of channels or list of indices that should be designated
MISC channels. Values should correspond to the electrodes
in the vhdr file. Default is ``()``.
scale : float
The scaling factor for EEG data. Units are in volts. Default scale
factor is 1. For microvolts, the scale factor would be 1e-6. This is
used when the header file does not specify the scale factor.
preload : bool
If True, all data are loaded at initialization.
If False, data are not read until save.
response_trig_shift : int | None
An integer that will be added to all response triggers when reading
events (stimulus triggers will be unaffected). If None, response
triggers will be ignored. Default is 0 for backwards compatibility, but
typically another value or None will be necessary.
event_id : dict | None
The id of the event to consider. If None (default),
only stimulus events are added to the stimulus channel. If dict,
the keys will be mapped to trigger values on the stimulus channel
in addition to the stimulus events. Keys are case-sensitive.
Example: {'SyncStatus': 1; 'Pulse Artifact': 3}.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
See Also
--------
mne.io.Raw : Documentation of attribute and methods.
"""
@verbose
def __init__(self, vhdr_fname, montage=None,
eog=('HEOGL', 'HEOGR', 'VEOGb'), misc=(),
scale=1., preload=False, response_trig_shift=0,
event_id=None, verbose=None):
# Channel info and events
logger.info('Extracting parameters from %s...' % vhdr_fname)
vhdr_fname = os.path.abspath(vhdr_fname)
info, fmt, self._order, mrk_fname, montage = _get_vhdr_info(
vhdr_fname, eog, misc, scale, montage)
events = _read_vmrk_events(mrk_fname, event_id, response_trig_shift)
_check_update_montage(info, montage)
with open(info['filename'], 'rb') as f:
f.seek(0, os.SEEK_END)
n_samples = f.tell()
dtype_bytes = _fmt_byte_dict[fmt]
self.preload = False # so the event-setting works
last_samps = [(n_samples // (dtype_bytes * (info['nchan'] - 1))) - 1]
self._create_event_ch(events, last_samps[0] + 1)
super(RawBrainVision, self).__init__(
info, last_samps=last_samps, filenames=[info['filename']],
orig_format=fmt, preload=preload, verbose=verbose)
def _read_segment_file(self, data, idx, fi, start, stop, cals, mult):
"""Read a chunk of raw data"""
# read data
dtype = _fmt_dtype_dict[self.orig_format]
n_data_ch = len(self.ch_names) - 1
_read_segments_file(self, data, idx, fi, start, stop, cals, mult,
dtype=dtype, n_channels=n_data_ch,
trigger_ch=self._event_ch)
def get_brainvision_events(self):
"""Retrieve the events associated with the Brain Vision Raw object
Returns
-------
events : array, shape (n_events, 3)
Events, each row consisting of an (onset, duration, trigger)
sequence.
"""
return self._events.copy()
def set_brainvision_events(self, events):
"""Set the events and update the synthesized stim channel
Parameters
----------
events : array, shape (n_events, 3)
Events, each row consisting of an (onset, duration, trigger)
sequence.
"""
self._create_event_ch(events)
def _create_event_ch(self, events, n_samp=None):
"""Create the event channel"""
if n_samp is None:
n_samp = self.last_samp - self.first_samp + 1
events = np.array(events, int)
if events.ndim != 2 or events.shape[1] != 3:
raise ValueError("[n_events x 3] shaped array required")
# update events
self._event_ch = _synthesize_stim_channel(events, n_samp)
self._events = events
if self.preload:
self._data[-1] = self._event_ch
def _read_vmrk_events(fname, event_id=None, response_trig_shift=0):
"""Read events from a vmrk file
Parameters
----------
fname : str
vmrk file to be read.
event_id : dict | None
The id of the event to consider. If dict, the keys will be mapped to
trigger values on the stimulus channel. Example:
{'SyncStatus': 1; 'Pulse Artifact': 3}. If empty dict (default),
only stimulus events are added to the stimulus channel.
response_trig_shift : int | None
Integer to shift response triggers by. None ignores response triggers.
Returns
-------
events : array, shape (n_events, 3)
An array containing the whole recording's events, each row representing
an event as (onset, duration, trigger) sequence.
"""
if event_id is None:
event_id = dict()
# read vmrk file
with open(fname, 'rb') as fid:
txt = fid.read().decode('utf-8')
header = txt.split('\n')[0].strip()
_check_mrk_version(header)
if (response_trig_shift is not None and
not isinstance(response_trig_shift, int)):
raise TypeError("response_trig_shift must be an integer or None")
# extract Marker Infos block
m = re.search("\[Marker Infos\]", txt)
if not m:
return np.zeros(0)
mk_txt = txt[m.end():]
m = re.search("\[.*\]", mk_txt)
if m:
mk_txt = mk_txt[:m.start()]
# extract event information
items = re.findall("^Mk\d+=(.*)", mk_txt, re.MULTILINE)
events = []
for info in items:
mtype, mdesc, onset, duration = info.split(',')[:4]
onset = int(onset)
duration = (int(duration) if duration.isdigit() else 1)
try:
trigger = int(re.findall('[A-Za-z]*\s*?(\d+)', mdesc)[0])
except IndexError:
trigger = None
if mtype.lower().startswith('response'):
if response_trig_shift is not None:
trigger += response_trig_shift
else:
trigger = None
if mdesc in event_id:
trigger = event_id[mdesc]
if trigger:
events.append((onset, duration, trigger))
events = np.array(events).reshape(-1, 3)
return events
def _synthesize_stim_channel(events, n_samp):
"""Synthesize a stim channel from events read from a vmrk file
Parameters
----------
events : array, shape (n_events, 3)
Each row representing an event as (onset, duration, trigger) sequence
(the format returned by _read_vmrk_events).
n_samp : int
The number of samples.
Returns
-------
stim_channel : array, shape (n_samples,)
An array containing the whole recording's event marking
"""
# select events overlapping buffer
onset = events[:, 0]
# create output buffer
stim_channel = np.zeros(n_samp, int)
for onset, duration, trigger in events:
stim_channel[onset:onset + duration] = trigger
return stim_channel
def _check_hdr_version(header):
tags = ['Brain Vision Data Exchange Header File Version 1.0',
'Brain Vision Data Exchange Header File Version 2.0']
if header not in tags:
raise ValueError("Currently only support %r, not %r"
"Contact MNE-Developers for support."
% (str(tags), header))
def _check_mrk_version(header):
tags = ['Brain Vision Data Exchange Marker File, Version 1.0',
'Brain Vision Data Exchange Marker File, Version 2.0']
if header not in tags:
raise ValueError("Currently only support %r, not %r"
"Contact MNE-Developers for support."
% (str(tags), header))
_orientation_dict = dict(MULTIPLEXED='F', VECTORIZED='C')
_fmt_dict = dict(INT_16='short', INT_32='int', IEEE_FLOAT_32='single')
_fmt_byte_dict = dict(short=2, int=4, single=4)
_fmt_dtype_dict = dict(short='<i2', int='<i4', single='<f4')
_unit_dict = {'V': 1., u'µV': 1e-6}
def _get_vhdr_info(vhdr_fname, eog, misc, scale, montage):
"""Extracts all the information from the header file.
Parameters
----------
vhdr_fname : str
Raw EEG header to be read.
eog : list of str
Names of channels that should be designated EOG channels. Names should
correspond to the vhdr file.
misc : list of str
Names of channels that should be designated MISC channels. Names
should correspond to the electrodes in the vhdr file.
scale : float
The scaling factor for EEG data. Units are in volts. Default scale
factor is 1.. For microvolts, the scale factor would be 1e-6. This is
used when the header file does not specify the scale factor.
montage : str | True | None | instance of Montage
Path or instance of montage containing electrode positions.
If None, sensor locations are (0,0,0). See the documentation of
:func:`mne.channels.read_montage` for more information.
Returns
-------
info : Info
The measurement info.
fmt : str
The data format in the file.
edf_info : dict
A dict containing Brain Vision specific parameters.
events : array, shape (n_events, 3)
Events from the corresponding vmrk file.
"""
scale = float(scale)
ext = os.path.splitext(vhdr_fname)[-1]
if ext != '.vhdr':
raise IOError("The header file must be given to read the data, "
"not the '%s' file." % ext)
with open(vhdr_fname, 'rb') as f:
# extract the first section to resemble a cfg
header = f.readline().decode('utf-8').strip()
_check_hdr_version(header)
settings = f.read().decode('utf-8')
if settings.find('[Comment]') != -1:
params, settings = settings.split('[Comment]')
else:
params, settings = settings, ''
cfg = configparser.ConfigParser()
if hasattr(cfg, 'read_file'): # newer API
cfg.read_file(StringIO(params))
else:
cfg.readfp(StringIO(params))
# get sampling info
# Sampling interval is given in microsec
sfreq = 1e6 / cfg.getfloat('Common Infos', 'SamplingInterval')
info = _empty_info(sfreq)
# check binary format
assert cfg.get('Common Infos', 'DataFormat') == 'BINARY'
order = cfg.get('Common Infos', 'DataOrientation')
if order not in _orientation_dict:
raise NotImplementedError('Data Orientation %s is not supported'
% order)
order = _orientation_dict[order]
fmt = cfg.get('Binary Infos', 'BinaryFormat')
if fmt not in _fmt_dict:
raise NotImplementedError('Datatype %s is not supported' % fmt)
fmt = _fmt_dict[fmt]
# load channel labels
info['nchan'] = cfg.getint('Common Infos', 'NumberOfChannels') + 1
ch_names = [''] * info['nchan']
cals = np.empty(info['nchan'])
ranges = np.empty(info['nchan'])
cals.fill(np.nan)
ch_dict = dict()
for chan, props in cfg.items('Channel Infos'):
n = int(re.findall(r'ch(\d+)', chan)[0]) - 1
props = props.split(',')
if len(props) < 4:
props += ('V',)
name, _, resolution, unit = props[:4]
ch_dict[chan] = name
ch_names[n] = name
if resolution == "":
if not(unit): # For truncated vhdrs (e.g. EEGLAB export)
resolution = 0.000001
else:
resolution = 1. # for files with units specified, but not res
unit = unit.replace(u'\xc2', u'') # Remove unwanted control characters
cals[n] = float(resolution)
ranges[n] = _unit_dict.get(unit, unit) * scale
# create montage
if montage is True:
from ...transforms import _sphere_to_cartesian
from ...channels.montage import Montage
montage_pos = list()
montage_names = list()
for ch in cfg.items('Coordinates'):
montage_names.append(ch_dict[ch[0]])
radius, theta, phi = map(float, ch[1].split(','))
# 1: radius, 2: theta, 3: phi
pos = _sphere_to_cartesian(r=radius, theta=theta, phi=phi)
montage_pos.append(pos)
montage_sel = np.arange(len(montage_pos))
montage = Montage(montage_pos, montage_names, 'Brainvision',
montage_sel)
ch_names[-1] = 'STI 014'
cals[-1] = 1.
ranges[-1] = 1.
if np.isnan(cals).any():
raise RuntimeError('Missing channel units')
# Attempts to extract filtering info from header. If not found, both are
# set to zero.
settings = settings.splitlines()
idx = None
if 'Channels' in settings:
idx = settings.index('Channels')
settings = settings[idx + 1:]
for idx, setting in enumerate(settings):
if re.match('#\s+Name', setting):
break
else:
idx = None
if idx:
lowpass = []
highpass = []
for i, ch in enumerate(ch_names[:-1], 1):
line = settings[idx + i].split()
assert ch in line
highpass.append(line[5])
lowpass.append(line[6])
if len(highpass) == 0:
pass
elif all(highpass):
if highpass[0] == 'NaN':
pass # Placeholder for future use. Highpass set in _empty_info
elif highpass[0] == 'DC':
info['highpass'] = 0.
else:
info['highpass'] = float(highpass[0])
else:
info['highpass'] = np.min(np.array(highpass, dtype=np.float))
warnings.warn('%s' % ('Channels contain different highpass '
'filters. Highest filter setting will '
'be stored.'))
if len(lowpass) == 0:
pass
elif all(lowpass):
if lowpass[0] == 'NaN':
pass # Placeholder for future use. Lowpass set in _empty_info
else:
info['lowpass'] = float(lowpass[0])
else:
info['lowpass'] = np.min(np.array(lowpass, dtype=np.float))
warnings.warn('%s' % ('Channels contain different lowpass filters.'
' Lowest filter setting will be stored.'))
# Post process highpass and lowpass to take into account units
header = settings[idx].split(' ')
header = [h for h in header if len(h)]
if '[s]' in header[4] and (info['highpass'] > 0):
info['highpass'] = 1. / info['highpass']
if '[s]' in header[5]:
info['lowpass'] = 1. / info['lowpass']
# locate EEG and marker files
path = os.path.dirname(vhdr_fname)
info['filename'] = os.path.join(path, cfg.get('Common Infos', 'DataFile'))
info['meas_date'] = int(time.time())
info['buffer_size_sec'] = 1. # reasonable default
# Creates a list of dicts of eeg channels for raw.info
logger.info('Setting channel info structure...')
info['chs'] = []
info['ch_names'] = ch_names
for idx, ch_name in enumerate(ch_names):
if ch_name in eog or idx in eog or idx - info['nchan'] in eog:
kind = FIFF.FIFFV_EOG_CH
coil_type = FIFF.FIFFV_COIL_NONE
unit = FIFF.FIFF_UNIT_V
elif ch_name in misc or idx in misc or idx - info['nchan'] in misc:
kind = FIFF.FIFFV_MISC_CH
coil_type = FIFF.FIFFV_COIL_NONE
unit = FIFF.FIFF_UNIT_V
elif ch_name == 'STI 014':
kind = FIFF.FIFFV_STIM_CH
coil_type = FIFF.FIFFV_COIL_NONE
unit = FIFF.FIFF_UNIT_NONE
else:
kind = FIFF.FIFFV_EEG_CH
coil_type = FIFF.FIFFV_COIL_EEG
unit = FIFF.FIFF_UNIT_V
info['chs'].append(dict(
ch_name=ch_name, coil_type=coil_type, kind=kind, logno=idx + 1,
scanno=idx + 1, cal=cals[idx], range=ranges[idx], loc=np.zeros(12),
unit=unit, unit_mul=0., # always zero- mne manual pg. 273
coord_frame=FIFF.FIFFV_COORD_HEAD))
# for stim channel
mrk_fname = os.path.join(path, cfg.get('Common Infos', 'MarkerFile'))
info._check_consistency()
return info, fmt, order, mrk_fname, montage
def read_raw_brainvision(vhdr_fname, montage=None,
eog=('HEOGL', 'HEOGR', 'VEOGb'), misc=(),
scale=1., preload=False, response_trig_shift=0,
event_id=None, verbose=None):
"""Reader for Brain Vision EEG file
Parameters
----------
vhdr_fname : str
Path to the EEG header file.
montage : str | None | instance of Montage
Path or instance of montage containing electrode positions.
If None, sensor locations are (0,0,0). See the documentation of
:func:`mne.channels.read_montage` for more information.
eog : list or tuple of str
Names of channels or list of indices that should be designated
EOG channels. Values should correspond to the vhdr file
Default is ``('HEOGL', 'HEOGR', 'VEOGb')``.
misc : list or tuple of str
Names of channels or list of indices that should be designated
MISC channels. Values should correspond to the electrodes
in the vhdr file. Default is ``()``.
scale : float
The scaling factor for EEG data. Units are in volts. Default scale
factor is 1. For microvolts, the scale factor would be 1e-6. This is
used when the header file does not specify the scale factor.
preload : bool
If True, all data are loaded at initialization.
If False, data are not read until save.
response_trig_shift : int | None
An integer that will be added to all response triggers when reading
events (stimulus triggers will be unaffected). If None, response
triggers will be ignored. Default is 0 for backwards compatibility, but
typically another value or None will be necessary.
event_id : dict | None
The id of the event to consider. If None (default),
only stimulus events are added to the stimulus channel. If dict,
the keys will be mapped to trigger values on the stimulus channel
in addition to the stimulus events. Keys are case-sensitive.
Example: {'SyncStatus': 1; 'Pulse Artifact': 3}.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
raw : instance of RawBrainVision
A Raw object containing BrainVision data.
See Also
--------
mne.io.Raw : Documentation of attribute and methods.
"""
raw = RawBrainVision(vhdr_fname=vhdr_fname, montage=montage, eog=eog,
misc=misc, scale=scale,
preload=preload, verbose=verbose, event_id=event_id,
response_trig_shift=response_trig_shift)
return raw
|
bsd-3-clause
| -8,863,967,679,030,398,000
| 37.91635
| 79
| 0.593796
| false
| 3.794957
| false
| false
| false
|
cstlee/kafkamark
|
scripts/kafkamark_plot.py
|
1
|
1296
|
# ISC License
#
# Copyright (c) 2017, Stanford University
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH
# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
# AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT,
# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
# PERFORMANCE OF THIS SOFTWARE.
'''
usage:
kafkamark plot [options] <datafile>
options:
-h, --help
'''
import matplotlib.pyplot as plt
import numpy as np
from docopt import docopt
def plot(argv):
args = docopt(__doc__, argv=argv)
x = []
y = []
with open(args['<datafile>'], 'r') as f:
for line in f.readlines():
if line[0] == '#':
continue
data = line.split()
x.append(float(data[0]))
y.append(float(data[1]))
plt.semilogx(x, y)
plt.show()
|
isc
| 2,795,149,768,533,533,000
| 29.139535
| 79
| 0.677469
| false
| 3.857143
| false
| false
| false
|
weedge/doubanFmSpeackerPi
|
plugin/fm/baseFM.py
|
1
|
3321
|
# -*- coding: utf-8-*-
import os
import logging
import pipes
import tempfile
import subprocess
import psutil
import signal
import lib.appPath
from lib.baseClass import AbstractClass
class AbstractFM(AbstractClass):
"""
Generic parent class for FM class
"""
@classmethod
def is_available(cls):
return (super(cls, cls).is_available() and
lib.diagnose.check_executable('mplayer'))
def mplay(self, url):
cmd = ['mplayer', str(url)]
cmd_str = ' '.join([pipes.quote(arg) for arg in cmd])
self._logger.debug('Executing %s', cmd_str)
with tempfile.TemporaryFile() as f:
self._mplay_process = subprocess.Popen(cmd,stdout=f,stderr=f,preexec_fn=os.setsid)
self._logger.debug("mplayer pid: '%d'", self._mplay_process.pid)
#正在播放的时候保存mplayer pid(这个pid为进程组id)
pid_file = os.path.join(lib.appPath.DATA_PATH,self.__class__.__name__+"_mplay.pid")
with open(pid_file, 'w') as pid_fp:
pid_fp.write(str(self._mplay_process.pid))
pid_fp.close()
self._mplay_process.wait()
#播放完删除mplayer pid 文件
if os.path.exists(pid_file):
os.remove(pid_file)
f.seek(0)
output = f.read()
if output:
self._logger.debug("Output was: '%s'", output)
def kill_mplay_procsss(self):
'''
kill当前播放的mplay进程 (进程id从文件中获取)
'''
pid_file = os.path.join(lib.appPath.DATA_PATH,self.__class__.__name__+"_mplay.pid")
if os.path.exists(pid_file):
with open(pid_file, 'r') as f:
pid = int(f.read())
f.close()
if pid:
self._logger.debug("pgkill mplay pid: %d",pid)
os.killpg(pid,signal.SIGKILL)
def suspend_mplay_process(self):
'''
挂起当前播放的mplay进程 (进程id从文件中获取)
'''
res = None
pid_file = os.path.join(lib.appPath.DATA_PATH,self.__class__.__name__+"_mplay.pid")
with open(pid_file, 'r') as f:
pid = int(f.read())
f.close()
if pid:
self._logger.debug("suspend mplay pid: %d",pid)
res = psutil.Process(pid).suspend()
return res
def resume_mplay_process(self):
'''
唤醒当前播放的mplay进程 (进程id从文件中获取)
'''
pid_file = os.path.join(lib.appPath.DATA_PATH,self.__class__.__name__+"_mplay.pid")
with open(pid_file, 'r') as f:
pid = int(f.read())
f.close()
if pid:
self._logger.debug("resume mplay pid: %d",pid)
res = psutil.Process(pid).resume()
return res
def login(self):
pass
def getAccessToken(self):
pass
def getSong(self):
pass
def setLikeSong(self):
pass
def setUnLikeSong(self):
pass
def setHateSong(self):
pass
def downloadSong(self):
pass
def next(self):
pass
def stop(self):
pass
def play(self):
pass
|
apache-2.0
| 4,241,096,129,657,073,000
| 25.822034
| 95
| 0.52575
| false
| 3.266254
| false
| false
| false
|
balloob/pychromecast
|
examples/yleareena_example.py
|
1
|
2570
|
"""
Example on how to use the Yle Areena Controller
"""
import argparse
import logging
import sys
from time import sleep
import pychromecast
from pychromecast.controllers.yleareena import YleAreenaController
import zeroconf
logger = logging.getLogger(__name__)
# Change to the name of your Chromecast
CAST_NAME = "My Chromecast"
parser = argparse.ArgumentParser(
description="Example on how to use the Yle Areena Controller.")
parser.add_argument('--show-debug', help='Enable debug log',
action='store_true')
parser.add_argument('--cast',
help='Name of cast device (default: "%(default)s")',
default=CAST_NAME)
parser.add_argument('--program', help='Areena Program ID',
default="1-50097921")
parser.add_argument('--audio_language', help='audio_language',
default="")
parser.add_argument('--text_language', help='text_language',
default="off")
args = parser.parse_args()
if args.show_debug:
logging.basicConfig(level=logging.DEBUG)
if args.show_zeroconf_debug:
print("Zeroconf version: " + zeroconf.__version__)
logging.getLogger("zeroconf").setLevel(logging.DEBUG)
def get_kaltura_id(program_id):
"""
Dive into the yledl internals and fetch the kaltura player id.
This can be used with Chromecast
"""
from yledl.streamfilters import StreamFilters
from yledl.http import HttpClient
from yledl.localization import TranslationChooser
from yledl.extractors import extractor_factory
from yledl.titleformatter import TitleFormatter
title_formatter = TitleFormatter()
language_chooser = TranslationChooser('fin')
httpclient = HttpClient(None)
stream_filters = StreamFilters()
url = 'https://areena.yle.fi/{}'.format(program_id)
extractor = extractor_factory(url, stream_filters, language_chooser, httpclient)
pid = extractor.program_id_from_url(url)
info = extractor.program_info_for_pid(pid, url, title_formatter, None)
return info.media_id.split('-')[-1]
chromecasts, browser = pychromecast.get_listed_chromecasts(friendly_names=[args.cast])
if not chromecasts:
print('No chromecast with name "{}" discovered'.format(args.cast))
sys.exit(1)
cast = chromecasts[0]
# Start socket client's worker thread and wait for initial status update
cast.wait()
yt = YleAreenaController()
cast.register_handler(yt)
yt.play_areena_media(entry_id=get_kaltura_id(args.program), audio_language=args.audio_language, text_language=args.text_language)
sleep(10)
|
mit
| -3,048,973,992,877,498,000
| 31.125
| 129
| 0.706226
| false
| 3.697842
| false
| false
| false
|
ecodiv/code-snippets
|
cross-validation/cross_fold_validation.py
|
1
|
8776
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
DESCRIPTION: Code to run a n-fold cross validation on the results of the GRASS
GIS v.surf.bspline and v.surf.idw function. This code is used in
a tutorial about carrying out n-fold cross validation in GRASS
GIS (https://tutorials.ecodiv.earth/toc/cross_validation.html.
NOTE: Code should work on GRASS GIS 7.2 + and should be run from
within a GRASS session.
@author: pvbreugel add ecodiv dot earth (2016)
"""
# Modules
# -----------------------------------------------------------------------------
import os
import sys
import numpy as np
import uuid
import tempfile
import string
from grass.pygrass.modules import Module
import grass.script as gs
from subprocess import PIPE
# Functions
# -----------------------------------------------------------------------------
def tmpname(prefix):
"""Generate a tmp name which contains prefix
Store the name in the global list.
Use only for raster maps.
"""
tmpf = prefix + str(uuid.uuid4())
tmpf = string.replace(tmpf, '-', '_')
return tmpf
def bspline_param(vectormap, depvar):
"""Get output bspline parameter estimates"""
stfact = Module("v.surf.bspline", flags="ec", input=vectormap,
column=depvar, memory=1024, stdout_=PIPE).outputs.stdout
stepdist = float(stfact.split(':')[-1].strip())
stfact = Module("v.surf.bspline", flags="c", input=vectormap,
ew_step=stepdist, ns_step=stepdist, column=depvar,
memory=1024, stdout_=PIPE).outputs.stdout
stfact = stfact.replace(" ", "")
stfact = stfact.split("\n")[1:-1]
stfact = [z.split("|") for z in stfact]
stfact = [[float(x) for x in y if x != ''] for y in stfact]
minlambda = min(stfact, key=lambda x: abs(x[1]))[0]
return(stepdist, minlambda)
def bspline_validation(vector, column, lambda_i, ew_step, ns_step, keep,
npartitions=4, method='bilinear', solver='cholesky',
maxit=10000, memory=2048):
"""Compute validation statistics (rsme) for bspline extrapolation"""
# Temporary map
tmpmap = tmpname("cvbase_")
Module("g.copy", vector=[vector, tmpmap])
# Compute rsme over model with all callibration points
tmpout = tmpname("cvtmp4_")
Module("v.surf.bspline", input=tmpmap, column=column, ew_step=ew_step,
ns_step=ns_step, method=method, lambda_i=lambda_i,
solver=solver, maxit=maxit, memory=memory, raster_output=tmpout)
Module("v.what.rast", map=tmpmap, raster=tmpout, column="bspline")
stats = Module("db.select", flags="c", sql="SELECT {},bspline FROM {}".
format(column, tmpmap), stdout_=PIPE).outputs.stdout
stats = stats.replace("\n", "|")[:-1].split("|")
stats = (np.asarray([float(x) for x in stats], dtype="float").
reshape(len(stats)/2, 2))
rsme_all = np.sqrt(np.mean(np.diff(stats, axis=1)**2))
if keep:
Module("g.rename", raster=[tmpout, keep])
else:
Module("g.remove", type="raster", name=tmpout, flags="f")
# Run n-fold crossvalidation
if npartitions > 0:
Module("v.kcv", map=tmpmap, npartitions=npartitions)
rsme = []
for i in range(1, npartitions+1):
tmp_cal = tmpname("cvtmp_calibrate_")
tmp_val = tmpname("cvtmp_validate_")
tmpout1 = tmpname("cvtmp_output_1_")
tmpout2 = tmpname("cvtmp_output_2_")
tmpout3 = tmpname("cvtmp_output_3_")
Module("v.extract", flags="r", input=tmpmap, output=tmp_cal,
where="part={}".format(i))
Module("v.extract", input=tmpmap, where="part={}".format(i),
output=tmp_val)
Module("v.surf.bspline", input=tmp_cal, column=column,
ew_step=ew_step, ns_step=ns_step, method=method,
lambda_i=lambda_i, solver=solver, maxit=maxit,
memory=memory, output=tmpout1, sparse_input=tmp_val)
Module("v.category", input=tmpout1, output=tmpout2,
option="del", cat=-1)
Module("v.category", input=tmpout2, output=tmpout3, option="add")
Module("v.db.addtable", map=tmpout3)
Module("v.db.addcolumn", map=tmpout3,
columns=("x double precision, y double precision, "
"z double precision"))
Module("v.to.db", map=tmpout3, option="coor", columns="x,y,z")
# TODO: need to find out how to use the from_ with Module
gs.run_command("v.distance", from_=tmpout3, to=tmp_val,
upload="to_attr", column="x", to_column=column)
stats = Module("db.select", flags="c", sql="SELECT x, z FROM {}".
format(tmpout3), stdout_=PIPE).outputs.stdout
stats = stats.replace("\n", "|")[:-1].split("|")
stats = (np.asarray([float(x) for x in stats], dtype="float").
reshape(len(stats)/2, 2))
rsme.append(np.sqrt(np.mean(np.diff(stats, axis=1)**2)))
Module("g.remove", type="vector", pattern="cvtmp_*", flags="f")
Module("g.remove", type="vector", pattern="cvbase_*", flags="f")
return {'rsme_full': rsme_all, 'rsme_cv_mean': np.asarray(rsme).mean(),
'rsme_cv_std': np.asarray(rsme).std(), 'rsme_cv': rsme}
else:
return {'rsme_full': rsme_all}
def idw_validation(vector, column, keep, npoints=12, power=2, npartitions=10,
memory=2048):
"""Compute validation statistics (rsme) for idw extrapolation"""
# Temporary map
tmpmap = tmpname("cvbase_")
Module("g.copy", vector=[vector, tmpmap])
# Compute rsme over model with all callibration points
tmpout = tmpname("cvtmp4_")
Module("v.surf.idw", input=tmpmap, column=column, npoints=npoints,
power=power, output=tmpout)
Module("v.what.rast", map=tmpmap, raster=tmpout, column="idw")
stats = Module("db.select", flags="c", sql="SELECT {},idw FROM {}".
format(column, tmpmap), stdout_=PIPE).outputs.stdout
stats = stats.replace("\n", "|")[:-1].split("|")
stats = (np.asarray([float(x) for x in stats], dtype="float").
reshape(len(stats)/2, 2))
rsme_all = np.sqrt(np.mean(np.diff(stats, axis=1)**2))
if keep:
Module("g.rename", raster=[tmpout, keep])
else:
Module("g.remove", type="raster", name=tmpout, flags="f")
# Run n-fold crossvalidation
if npartitions > 0:
Module("v.kcv", map=tmpmap, npartitions=npartitions)
rsme = []
for i in range(1, npartitions+1):
tmppnt = tmpname("cvtmp2_")
tmpspa = tmpname("cvtmp3_")
tmpout = tmpname("cvtmp4_")
Module("v.extract", flags="r", input=tmpmap, output=tmppnt,
where="part={}".format(i))
Module("v.extract", input=tmpmap, where="part={}".format(i),
output=tmpspa)
Module("v.surf.idw", input=tmppnt, column=column, npoints=npoints,
power=power, output=tmpout)
Module("v.what.rast", map=tmpspa, raster=tmpout, column="idw")
stats = Module("db.select", flags="c",
sql="SELECT {},idw FROM {}".
format(column, tmpspa), stdout_=PIPE).outputs.stdout
stats = stats.replace("\n", "|")[:-1].split("|")
stats = (np.asarray([float(x) for x in stats], dtype="float").
reshape(len(stats)/2, 2))
rsme.append(np.sqrt(np.mean(np.diff(stats, axis=1)**2)))
Module("g.remove", type="all", pattern="cvtmp*", flags="f")
Module("g.remove", type="vector", pattern="cvbase_*", flags="f")
# Return output
return {'rsme_full': rsme_all, 'rsme_cv_mean': np.asarray(rsme).mean(),
'rsme_cv_std': np.asarray(rsme).std(), 'rsme_cv': rsme}
# Example
# -----------------------------------------------------------------------------
# Determine parameters
stepdist, minlambda = bspline_param(vectormap="households2", depvar="lv")
# Compute evaluation statistics
bspline_stats = bspline_validation(vector="households2", column="lv",
keep="lv2_bspline", lambda_i=minlambda,
ew_step=stepdist, ns_step=stepdist,
npartitions=10, method='bilinear',
solver='cholesky', maxit=10000, memory=2048)
idw_validation(vector="households2", column="lv", npoints=12, power=2,
npartitions=10, keep="lv2_idw")
|
gpl-3.0
| 8,661,852,449,883,056,000
| 38.178571
| 79
| 0.559594
| false
| 3.584967
| false
| false
| false
|
arider/riderml
|
riderml/regression/gradient_descent.py
|
1
|
6474
|
import numpy
ETA_PLUS = 1.2
ETA_MINUS = 0.5
def stochastic_gradient_descent(function,
derivative,
x, y,
theta=None,
iterations=100,
learning_rate=0.000001,
shuffle=True,
batch_size=.2):
"""
Gradient descent with mini batches. Batch_size is a float for proportion
of the data or an int. 1 means standard stochastic gradient descent.
args:
function - a function taking two arrays and returning one
derivative - derivative of the function
x - a numpy array; instances in rows
y - a numpy array
theta - initial coefficients.
iterations - number of iterations to do
learning_rate - initial learning rate
shuffle - whether or not to shuffle the data before each iteration
batch_size - proportion or integer size of batches.
"""
if theta is None:
theta = numpy.random.rand(x.shape[1], y.shape[1])
assert x.shape[1] == theta.shape[0]
# translate float into batch size int
batch_number = float(batch_size)
if batch_size < 1 and batch_size > 0:
batch_number = int(batch_size * x.shape[0])
if batch_number < 1:
batch_number = 1
# initialize feature specific learning rates
delta = numpy.zeros(theta.shape)
delta += learning_rate
previous_gradient = numpy.zeros([x.shape[1], theta.shape[1]])
current_theta = numpy.array(theta)
for iteration in range(iterations):
# shuffle data
if shuffle:
inds = numpy.random.permutation(range(x.shape[0]))
x = x[inds]
y = y[inds]
# process batches
batch_index = 0
for i in range(int(x.shape[0] / batch_number)):
if i == int(x.shape[0] / batch_number) - 1:
batch_inds = range(int(batch_index * batch_number), x.shape[0])
else:
batch_inds = range(int(batch_index * batch_number),
int((batch_index + 1) * batch_number))
batch_x = x[batch_inds]
batch_y = y[batch_inds]
loss = function(batch_x, current_theta) - batch_y
# avg gradient per example
gradient = (derivative(batch_x, theta).T.dot(loss) /
batch_x.shape[0])
# update the learning rate
sign = numpy.sign(gradient * previous_gradient)
for ci in range(sign.shape[1]):
for f in range(sign.shape[0]):
if sign[f, ci] < 0.:
delta[f, ci] = ETA_MINUS * delta[f, ci]
gradient[f, ci] = 0.
elif sign[f, ci] > 0.:
delta[f, ci] = ETA_PLUS * delta[f, ci]
current_theta -= numpy.sign(gradient) * delta
previous_gradient = gradient
batch_index += 1
return current_theta
def gradient_descent(function,
derivative,
x, y,
theta=None,
iterations=100,
learning_rate=0.000001,
shuffle=True):
"""
Gradient descent -- use irprop- algorithm to adjust learning rate on a
per-feature basis
arguments:
function - the function to learn parameters of (takes (x, theta))
ex: logistic, linear, etc....
derivative - the derivative of the function
x - the input data in a matrix at least (1, 1)
y - the response variable(s)
theta - coefficients array
iterations - number of iterations
learning_rate - the learning rate, float
shuffle - permute the data at each iteration
"""
if theta is None:
theta = numpy.random.rand(x.shape[1], y.shape[1])
# parameters for rprop
previous_gradient = numpy.zeros([x.shape[1], theta.shape[1]])
delta = numpy.zeros(theta.shape)
delta += learning_rate
for i in range(0, int(iterations)):
if shuffle:
inds = numpy.random.permutation(range(x.shape[0]))
x = x[inds]
y = y[inds]
# avg gradient per example
loss = function(x, theta) - y
gradient = derivative(x, theta).T.dot(loss) / x.shape[0]
# update the learning rate
sign = gradient * previous_gradient
for ci in range(sign.shape[1]):
for f in range(sign.shape[0]):
if sign[f, ci] < 0.:
delta[f, ci] = ETA_MINUS * delta[f, ci]
gradient[f, ci] = 0.
elif sign[f, ci] > 0.:
delta[f, ci] = ETA_PLUS * delta[f, ci]
theta -= numpy.sign(gradient) * delta
previous_gradient = gradient
return theta
def adagrad(function, d_function, x, y, theta, iterations,
learning_rate=0.01, shuffle=True, smoothing=.5):
"""
Gradient descent -- use rprop algorithm to adjust learning rate on a
per-feature basis
arguments:
function - the function to learn parameters of (takes x, theta)
derivative - the derivative of the function
ex: logistic, linear, etc....
x - the input data in a matrix at least (1, 1)
y - the response variable(s)
theta - coefficients array
iterations - number of iterations
learning_rate - the learning rate, float
shuffle - permute the data at each iteration
smoothing - exponential smoothing in case adagrad is too
aggressive in step size
"""
running_gradient = numpy.zeros(theta.shape)
for iteration in range(iterations):
loss = function(x, theta) - y
gradient = loss.T.dot(d_function(x)) / x.shape[0]
# the step size is too aggressive with 'canonical' adagrad on
# non-sparse problems, so we use exponential smoothing instead of
# running_gradient += gradient ** 2
if smoothing:
running_gradient = (smoothing * running_gradient +
(1 - smoothing) * (gradient ** 2).T)
else:
running_gradient += gradient ** 2
lr = numpy.multiply(1. / (numpy.sqrt(running_gradient)), gradient.T)
theta -= learning_rate * lr
return theta
|
mit
| -6,365,893,648,222,809,000
| 34.571429
| 79
| 0.545103
| false
| 4.287417
| false
| false
| false
|
dstanek/keystone
|
keystone/common/ldap/core.py
|
1
|
76540
|
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import codecs
import functools
import os.path
import re
import sys
import weakref
import ldap.filter
import ldappool
from oslo_log import log
import six
from six.moves import map, zip
from keystone import exception
from keystone.i18n import _
from keystone.i18n import _LW
LOG = log.getLogger(__name__)
LDAP_VALUES = {'TRUE': True, 'FALSE': False}
CONTROL_TREEDELETE = '1.2.840.113556.1.4.805'
LDAP_SCOPES = {'one': ldap.SCOPE_ONELEVEL,
'sub': ldap.SCOPE_SUBTREE}
LDAP_DEREF = {'always': ldap.DEREF_ALWAYS,
'default': None,
'finding': ldap.DEREF_FINDING,
'never': ldap.DEREF_NEVER,
'searching': ldap.DEREF_SEARCHING}
LDAP_TLS_CERTS = {'never': ldap.OPT_X_TLS_NEVER,
'demand': ldap.OPT_X_TLS_DEMAND,
'allow': ldap.OPT_X_TLS_ALLOW}
# RFC 4511 (The LDAP Protocol) defines a list containing only the OID '1.1' to
# indicate that no attributes should be returned besides the DN.
DN_ONLY = ['1.1']
_utf8_encoder = codecs.getencoder('utf-8')
def utf8_encode(value):
"""Encode a basestring to UTF-8.
If the string is unicode encode it to UTF-8, if the string is
str then assume it's already encoded. Otherwise raise a TypeError.
:param value: A basestring
:returns: UTF-8 encoded version of value
:raises: TypeError if value is not basestring
"""
if isinstance(value, six.text_type):
return _utf8_encoder(value)[0]
elif isinstance(value, six.binary_type):
return value
else:
raise TypeError("value must be basestring, "
"not %s" % value.__class__.__name__)
_utf8_decoder = codecs.getdecoder('utf-8')
def utf8_decode(value):
"""Decode a from UTF-8 into unicode.
If the value is a binary string assume it's UTF-8 encoded and decode
it into a unicode string. Otherwise convert the value from its
type into a unicode string.
:param value: value to be returned as unicode
:returns: value as unicode
:raises: UnicodeDecodeError for invalid UTF-8 encoding
"""
if isinstance(value, six.binary_type):
return _utf8_decoder(value)[0]
return six.text_type(value)
def py2ldap(val):
"""Type convert a Python value to a type accepted by LDAP (unicode).
The LDAP API only accepts strings for values therefore convert
the value's type to a unicode string. A subsequent type conversion
will encode the unicode as UTF-8 as required by the python-ldap API,
but for now we just want a string representation of the value.
:param val: The value to convert to a LDAP string representation
:returns: unicode string representation of value.
"""
if isinstance(val, bool):
return u'TRUE' if val else u'FALSE'
else:
return six.text_type(val)
def enabled2py(val):
"""Similar to ldap2py, only useful for the enabled attribute."""
try:
return LDAP_VALUES[val]
except KeyError:
pass
try:
return int(val)
except ValueError:
pass
return utf8_decode(val)
def ldap2py(val):
"""Convert an LDAP formatted value to Python type used by OpenStack.
Virtually all LDAP values are stored as UTF-8 encoded strings.
OpenStack prefers values which are unicode friendly.
:param val: LDAP formatted value
:returns: val converted to preferred Python type
"""
return utf8_decode(val)
def convert_ldap_result(ldap_result):
"""Convert LDAP search result to Python types used by OpenStack.
Each result tuple is of the form (dn, attrs), where dn is a string
containing the DN (distinguished name) of the entry, and attrs is
a dictionary containing the attributes associated with the
entry. The keys of attrs are strings, and the associated values
are lists of strings.
OpenStack wants to use Python types of its choosing. Strings will
be unicode, truth values boolean, whole numbers int's, etc. DN's will
also be decoded from UTF-8 to unicode.
:param ldap_result: LDAP search result
:returns: list of 2-tuples containing (dn, attrs) where dn is unicode
and attrs is a dict whose values are type converted to
OpenStack preferred types.
"""
py_result = []
at_least_one_referral = False
for dn, attrs in ldap_result:
ldap_attrs = {}
if dn is None:
# this is a Referral object, rather than an Entry object
at_least_one_referral = True
continue
for kind, values in attrs.items():
try:
val2py = enabled2py if kind == 'enabled' else ldap2py
ldap_attrs[kind] = [val2py(x) for x in values]
except UnicodeDecodeError:
LOG.debug('Unable to decode value for attribute %s', kind)
py_result.append((utf8_decode(dn), ldap_attrs))
if at_least_one_referral:
LOG.debug(('Referrals were returned and ignored. Enable referral '
'chasing in keystone.conf via [ldap] chase_referrals'))
return py_result
def safe_iter(attrs):
if attrs is None:
return
elif isinstance(attrs, list):
for e in attrs:
yield e
else:
yield attrs
def parse_deref(opt):
try:
return LDAP_DEREF[opt]
except KeyError:
raise ValueError(_('Invalid LDAP deref option: %(option)s. '
'Choose one of: %(options)s') %
{'option': opt,
'options': ', '.join(LDAP_DEREF.keys()), })
def parse_tls_cert(opt):
try:
return LDAP_TLS_CERTS[opt]
except KeyError:
raise ValueError(_(
'Invalid LDAP TLS certs option: %(option)s. '
'Choose one of: %(options)s') % {
'option': opt,
'options': ', '.join(LDAP_TLS_CERTS.keys())})
def ldap_scope(scope):
try:
return LDAP_SCOPES[scope]
except KeyError:
raise ValueError(
_('Invalid LDAP scope: %(scope)s. Choose one of: %(options)s') % {
'scope': scope,
'options': ', '.join(LDAP_SCOPES.keys())})
def prep_case_insensitive(value):
"""Prepare a string for case-insensitive comparison.
This is defined in RFC4518. For simplicity, all this function does is
lowercase all the characters, strip leading and trailing whitespace,
and compress sequences of spaces to a single space.
"""
value = re.sub(r'\s+', ' ', value.strip().lower())
return value
def is_ava_value_equal(attribute_type, val1, val2):
"""Returns True if and only if the AVAs are equal.
When comparing AVAs, the equality matching rule for the attribute type
should be taken into consideration. For simplicity, this implementation
does a case-insensitive comparison.
Note that this function uses prep_case_insenstive so the limitations of
that function apply here.
"""
return prep_case_insensitive(val1) == prep_case_insensitive(val2)
def is_rdn_equal(rdn1, rdn2):
"""Returns True if and only if the RDNs are equal.
* RDNs must have the same number of AVAs.
* Each AVA of the RDNs must be the equal for the same attribute type. The
order isn't significant. Note that an attribute type will only be in one
AVA in an RDN, otherwise the DN wouldn't be valid.
* Attribute types aren't case sensitive. Note that attribute type
comparison is more complicated than implemented. This function only
compares case-insentive. The code should handle multiple names for an
attribute type (e.g., cn, commonName, and 2.5.4.3 are the same).
Note that this function uses is_ava_value_equal to compare AVAs so the
limitations of that function apply here.
"""
if len(rdn1) != len(rdn2):
return False
for attr_type_1, val1, dummy in rdn1:
found = False
for attr_type_2, val2, dummy in rdn2:
if attr_type_1.lower() != attr_type_2.lower():
continue
found = True
if not is_ava_value_equal(attr_type_1, val1, val2):
return False
break
if not found:
return False
return True
def is_dn_equal(dn1, dn2):
"""Returns True if and only if the DNs are equal.
Two DNs are equal if they've got the same number of RDNs and if the RDNs
are the same at each position. See RFC4517.
Note that this function uses is_rdn_equal to compare RDNs so the
limitations of that function apply here.
:param dn1: Either a string DN or a DN parsed by ldap.dn.str2dn.
:param dn2: Either a string DN or a DN parsed by ldap.dn.str2dn.
"""
if not isinstance(dn1, list):
dn1 = ldap.dn.str2dn(utf8_encode(dn1))
if not isinstance(dn2, list):
dn2 = ldap.dn.str2dn(utf8_encode(dn2))
if len(dn1) != len(dn2):
return False
for rdn1, rdn2 in zip(dn1, dn2):
if not is_rdn_equal(rdn1, rdn2):
return False
return True
def dn_startswith(descendant_dn, dn):
"""Returns True if and only if the descendant_dn is under the dn.
:param descendant_dn: Either a string DN or a DN parsed by ldap.dn.str2dn.
:param dn: Either a string DN or a DN parsed by ldap.dn.str2dn.
"""
if not isinstance(descendant_dn, list):
descendant_dn = ldap.dn.str2dn(utf8_encode(descendant_dn))
if not isinstance(dn, list):
dn = ldap.dn.str2dn(utf8_encode(dn))
if len(descendant_dn) <= len(dn):
return False
# Use the last len(dn) RDNs.
return is_dn_equal(descendant_dn[-len(dn):], dn)
@six.add_metaclass(abc.ABCMeta)
class LDAPHandler(object):
'''Abstract class which defines methods for a LDAP API provider.
Native Keystone values cannot be passed directly into and from the
python-ldap API. Type conversion must occur at the LDAP API
boudary, examples of type conversions are:
* booleans map to the strings 'TRUE' and 'FALSE'
* integer values map to their string representation.
* unicode strings are encoded in UTF-8
In addition to handling type conversions at the API boundary we
have the requirement to support more than one LDAP API
provider. Currently we have:
* python-ldap, this is the standard LDAP API for Python, it
requires access to a live LDAP server.
* Fake LDAP which emulates python-ldap. This is used for
testing without requiring a live LDAP server.
To support these requirements we need a layer that performs type
conversions and then calls another LDAP API which is configurable
(e.g. either python-ldap or the fake emulation).
We have an additional constraint at the time of this writing due to
limitations in the logging module. The logging module is not
capable of accepting UTF-8 encoded strings, it will throw an
encoding exception. Therefore all logging MUST be performed prior
to UTF-8 conversion. This means no logging can be performed in the
ldap APIs that implement the python-ldap API because those APIs
are defined to accept only UTF-8 strings. Thus the layer which
performs type conversions must also do the logging. We do the type
conversions in two steps, once to convert all Python types to
unicode strings, then log, then convert the unicode strings to
UTF-8.
There are a variety of ways one could accomplish this, we elect to
use a chaining technique whereby instances of this class simply
call the next member in the chain via the "conn" attribute. The
chain is constructed by passing in an existing instance of this
class as the conn attribute when the class is instantiated.
Here is a brief explanation of why other possible approaches were
not used:
subclassing
To perform the wrapping operations in the correct order
the type convesion class would have to subclass each of
the API providers. This is awkward, doubles the number of
classes, and does not scale well. It requires the type
conversion class to be aware of all possible API
providers.
decorators
Decorators provide an elegant solution to wrap methods and
would be an ideal way to perform type conversions before
calling the wrapped function and then converting the
values returned from the wrapped function. However
decorators need to be aware of the method signature, it
has to know what input parameters need conversion and how
to convert the result. For an API like python-ldap which
has a large number of different method signatures it would
require a large number of specialized
decorators. Experience has shown it's very easy to apply
the wrong decorator due to the inherent complexity and
tendency to cut-n-paste code. Another option is to
parameterize the decorator to make it "smart". Experience
has shown such decorators become insanely complicated and
difficult to understand and debug. Also decorators tend to
hide what's really going on when a method is called, the
operations being performed are not visible when looking at
the implemation of a decorated method, this too experience
has shown leads to mistakes.
Chaining simplifies both wrapping to perform type conversion as
well as the substitution of alternative API providers. One simply
creates a new instance of the API interface and insert it at the
front of the chain. Type conversions are explicit and obvious.
If a new method needs to be added to the API interface one adds it
to the abstract class definition. Should one miss adding the new
method to any derivations of the abstract class the code will fail
to load and run making it impossible to forget updating all the
derived classes.
'''
@abc.abstractmethod
def __init__(self, conn=None):
self.conn = conn
@abc.abstractmethod
def connect(self, url, page_size=0, alias_dereferencing=None,
use_tls=False, tls_cacertfile=None, tls_cacertdir=None,
tls_req_cert='demand', chase_referrals=None, debug_level=None,
use_pool=None, pool_size=None, pool_retry_max=None,
pool_retry_delay=None, pool_conn_timeout=None,
pool_conn_lifetime=None):
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def set_option(self, option, invalue):
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def get_option(self, option):
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def simple_bind_s(self, who='', cred='',
serverctrls=None, clientctrls=None):
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def unbind_s(self):
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def add_s(self, dn, modlist):
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def search_s(self, base, scope,
filterstr='(objectClass=*)', attrlist=None, attrsonly=0):
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def search_ext(self, base, scope,
filterstr='(objectClass=*)', attrlist=None, attrsonly=0,
serverctrls=None, clientctrls=None,
timeout=-1, sizelimit=0):
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def result3(self, msgid=ldap.RES_ANY, all=1, timeout=None,
resp_ctrl_classes=None):
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def modify_s(self, dn, modlist):
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def delete_s(self, dn):
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def delete_ext_s(self, dn, serverctrls=None, clientctrls=None):
raise exception.NotImplemented() # pragma: no cover
class PythonLDAPHandler(LDAPHandler):
'''Implementation of the LDAPHandler interface which calls the
python-ldap API.
Note, the python-ldap API requires all string values to be UTF-8
encoded. The KeystoneLDAPHandler enforces this prior to invoking
the methods in this class.
'''
def __init__(self, conn=None):
super(PythonLDAPHandler, self).__init__(conn=conn)
def connect(self, url, page_size=0, alias_dereferencing=None,
use_tls=False, tls_cacertfile=None, tls_cacertdir=None,
tls_req_cert='demand', chase_referrals=None, debug_level=None,
use_pool=None, pool_size=None, pool_retry_max=None,
pool_retry_delay=None, pool_conn_timeout=None,
pool_conn_lifetime=None):
_common_ldap_initialization(url=url,
use_tls=use_tls,
tls_cacertfile=tls_cacertfile,
tls_cacertdir=tls_cacertdir,
tls_req_cert=tls_req_cert,
debug_level=debug_level)
self.conn = ldap.initialize(url)
self.conn.protocol_version = ldap.VERSION3
if alias_dereferencing is not None:
self.conn.set_option(ldap.OPT_DEREF, alias_dereferencing)
self.page_size = page_size
if use_tls:
self.conn.start_tls_s()
if chase_referrals is not None:
self.conn.set_option(ldap.OPT_REFERRALS, int(chase_referrals))
def set_option(self, option, invalue):
return self.conn.set_option(option, invalue)
def get_option(self, option):
return self.conn.get_option(option)
def simple_bind_s(self, who='', cred='',
serverctrls=None, clientctrls=None):
return self.conn.simple_bind_s(who, cred, serverctrls, clientctrls)
def unbind_s(self):
return self.conn.unbind_s()
def add_s(self, dn, modlist):
return self.conn.add_s(dn, modlist)
def search_s(self, base, scope,
filterstr='(objectClass=*)', attrlist=None, attrsonly=0):
return self.conn.search_s(base, scope, filterstr,
attrlist, attrsonly)
def search_ext(self, base, scope,
filterstr='(objectClass=*)', attrlist=None, attrsonly=0,
serverctrls=None, clientctrls=None,
timeout=-1, sizelimit=0):
return self.conn.search_ext(base, scope,
filterstr, attrlist, attrsonly,
serverctrls, clientctrls,
timeout, sizelimit)
def result3(self, msgid=ldap.RES_ANY, all=1, timeout=None,
resp_ctrl_classes=None):
# The resp_ctrl_classes parameter is a recent addition to the
# API. It defaults to None. We do not anticipate using it.
# To run with older versions of python-ldap we do not pass it.
return self.conn.result3(msgid, all, timeout)
def modify_s(self, dn, modlist):
return self.conn.modify_s(dn, modlist)
def delete_s(self, dn):
return self.conn.delete_s(dn)
def delete_ext_s(self, dn, serverctrls=None, clientctrls=None):
return self.conn.delete_ext_s(dn, serverctrls, clientctrls)
def _common_ldap_initialization(url, use_tls=False, tls_cacertfile=None,
tls_cacertdir=None, tls_req_cert=None,
debug_level=None):
'''Method for common ldap initialization between PythonLDAPHandler and
PooledLDAPHandler.
'''
LOG.debug("LDAP init: url=%s", url)
LOG.debug('LDAP init: use_tls=%s tls_cacertfile=%s tls_cacertdir=%s '
'tls_req_cert=%s tls_avail=%s',
use_tls, tls_cacertfile, tls_cacertdir,
tls_req_cert, ldap.TLS_AVAIL)
if debug_level is not None:
ldap.set_option(ldap.OPT_DEBUG_LEVEL, debug_level)
using_ldaps = url.lower().startswith("ldaps")
if use_tls and using_ldaps:
raise AssertionError(_('Invalid TLS / LDAPS combination'))
# The certificate trust options apply for both LDAPS and TLS.
if use_tls or using_ldaps:
if not ldap.TLS_AVAIL:
raise ValueError(_('Invalid LDAP TLS_AVAIL option: %s. TLS '
'not available') % ldap.TLS_AVAIL)
if tls_cacertfile:
# NOTE(topol)
# python ldap TLS does not verify CACERTFILE or CACERTDIR
# so we add some extra simple sanity check verification
# Also, setting these values globally (i.e. on the ldap object)
# works but these values are ignored when setting them on the
# connection
if not os.path.isfile(tls_cacertfile):
raise IOError(_("tls_cacertfile %s not found "
"or is not a file") %
tls_cacertfile)
ldap.set_option(ldap.OPT_X_TLS_CACERTFILE, tls_cacertfile)
elif tls_cacertdir:
# NOTE(topol)
# python ldap TLS does not verify CACERTFILE or CACERTDIR
# so we add some extra simple sanity check verification
# Also, setting these values globally (i.e. on the ldap object)
# works but these values are ignored when setting them on the
# connection
if not os.path.isdir(tls_cacertdir):
raise IOError(_("tls_cacertdir %s not found "
"or is not a directory") %
tls_cacertdir)
ldap.set_option(ldap.OPT_X_TLS_CACERTDIR, tls_cacertdir)
if tls_req_cert in list(LDAP_TLS_CERTS.values()):
ldap.set_option(ldap.OPT_X_TLS_REQUIRE_CERT, tls_req_cert)
else:
LOG.debug("LDAP TLS: invalid TLS_REQUIRE_CERT Option=%s",
tls_req_cert)
class MsgId(list):
'''Wrapper class to hold connection and msgid.'''
pass
def use_conn_pool(func):
'''Use this only for connection pool specific ldap API.
This adds connection object to decorated API as next argument after self.
'''
def wrapper(self, *args, **kwargs):
# assert isinstance(self, PooledLDAPHandler)
with self._get_pool_connection() as conn:
self._apply_options(conn)
return func(self, conn, *args, **kwargs)
return wrapper
class PooledLDAPHandler(LDAPHandler):
'''Implementation of the LDAPHandler interface which uses pooled
connection manager.
Pool specific configuration is defined in [ldap] section.
All other LDAP configuration is still used from [ldap] section
Keystone LDAP authentication logic authenticates an end user using its DN
and password via LDAP bind to establish supplied password is correct.
This can fill up the pool quickly (as pool re-uses existing connection
based on its bind data) and would not leave space in pool for connection
re-use for other LDAP operations.
Now a separate pool can be established for those requests when related flag
'use_auth_pool' is enabled. That pool can have its own size and
connection lifetime. Other pool attributes are shared between those pools.
If 'use_pool' is disabled, then 'use_auth_pool' does not matter.
If 'use_auth_pool' is not enabled, then connection pooling is not used for
those LDAP operations.
Note, the python-ldap API requires all string values to be UTF-8
encoded. The KeystoneLDAPHandler enforces this prior to invoking
the methods in this class.
'''
# Added here to allow override for testing
Connector = ldappool.StateConnector
auth_pool_prefix = 'auth_pool_'
connection_pools = {} # static connector pool dict
def __init__(self, conn=None, use_auth_pool=False):
super(PooledLDAPHandler, self).__init__(conn=conn)
self.who = ''
self.cred = ''
self.conn_options = {} # connection specific options
self.page_size = None
self.use_auth_pool = use_auth_pool
self.conn_pool = None
def connect(self, url, page_size=0, alias_dereferencing=None,
use_tls=False, tls_cacertfile=None, tls_cacertdir=None,
tls_req_cert='demand', chase_referrals=None, debug_level=None,
use_pool=None, pool_size=None, pool_retry_max=None,
pool_retry_delay=None, pool_conn_timeout=None,
pool_conn_lifetime=None):
_common_ldap_initialization(url=url,
use_tls=use_tls,
tls_cacertfile=tls_cacertfile,
tls_cacertdir=tls_cacertdir,
tls_req_cert=tls_req_cert,
debug_level=debug_level)
self.page_size = page_size
# Following two options are not added in common initialization as they
# need to follow a sequence in PythonLDAPHandler code.
if alias_dereferencing is not None:
self.set_option(ldap.OPT_DEREF, alias_dereferencing)
if chase_referrals is not None:
self.set_option(ldap.OPT_REFERRALS, int(chase_referrals))
if self.use_auth_pool: # separate pool when use_auth_pool enabled
pool_url = self.auth_pool_prefix + url
else:
pool_url = url
try:
self.conn_pool = self.connection_pools[pool_url]
except KeyError:
self.conn_pool = ldappool.ConnectionManager(
url,
size=pool_size,
retry_max=pool_retry_max,
retry_delay=pool_retry_delay,
timeout=pool_conn_timeout,
connector_cls=self.Connector,
use_tls=use_tls,
max_lifetime=pool_conn_lifetime)
self.connection_pools[pool_url] = self.conn_pool
def set_option(self, option, invalue):
self.conn_options[option] = invalue
def get_option(self, option):
value = self.conn_options.get(option)
# if option was not specified explicitly, then use connection default
# value for that option if there.
if value is None:
with self._get_pool_connection() as conn:
value = conn.get_option(option)
return value
def _apply_options(self, conn):
# if connection has a lifetime, then it already has options specified
if conn.get_lifetime() > 30:
return
for option, invalue in self.conn_options.items():
conn.set_option(option, invalue)
def _get_pool_connection(self):
return self.conn_pool.connection(self.who, self.cred)
def simple_bind_s(self, who='', cred='',
serverctrls=None, clientctrls=None):
'''Not using use_conn_pool decorator here as this API takes cred as
input.
'''
self.who = who
self.cred = cred
with self._get_pool_connection() as conn:
self._apply_options(conn)
def unbind_s(self):
# After connection generator is done `with` statement execution block
# connection is always released via finally block in ldappool.
# So this unbind is a no op.
pass
@use_conn_pool
def add_s(self, conn, dn, modlist):
return conn.add_s(dn, modlist)
@use_conn_pool
def search_s(self, conn, base, scope,
filterstr='(objectClass=*)', attrlist=None, attrsonly=0):
return conn.search_s(base, scope, filterstr, attrlist,
attrsonly)
def search_ext(self, base, scope,
filterstr='(objectClass=*)', attrlist=None, attrsonly=0,
serverctrls=None, clientctrls=None,
timeout=-1, sizelimit=0):
'''This API is asynchoronus API which returns MsgId instance to be used
in result3 call.
To work with result3 API in predicatable manner, same LDAP connection
is needed which provided msgid. So wrapping used connection and msgid
in MsgId class. The connection associated with search_ext is released
once last hard reference to MsgId object is freed. This will happen
when the method is done with returned MsgId usage.
'''
conn_ctxt = self._get_pool_connection()
conn = conn_ctxt.__enter__()
try:
msgid = conn.search_ext(base, scope,
filterstr, attrlist, attrsonly,
serverctrls, clientctrls,
timeout, sizelimit)
except Exception:
conn_ctxt.__exit__(*sys.exc_info())
raise
res = MsgId((conn, msgid))
weakref.ref(res, functools.partial(conn_ctxt.__exit__,
None, None, None))
return res
def result3(self, msgid, all=1, timeout=None,
resp_ctrl_classes=None):
'''This method is used to wait for and return the result of an
operation previously initiated by one of the LDAP asynchronous
operation routines (eg search_ext()) It returned an invocation
identifier (a message id) upon successful initiation of their
operation.
Input msgid is expected to be instance of class MsgId which has LDAP
session/connection used to execute search_ext and message idenfier.
The connection associated with search_ext is released once last hard
reference to MsgId object is freed. This will happen when function
which requested msgId and used it in result3 exits.
'''
conn, msg_id = msgid
return conn.result3(msg_id, all, timeout)
@use_conn_pool
def modify_s(self, conn, dn, modlist):
return conn.modify_s(dn, modlist)
@use_conn_pool
def delete_s(self, conn, dn):
return conn.delete_s(dn)
@use_conn_pool
def delete_ext_s(self, conn, dn, serverctrls=None, clientctrls=None):
return conn.delete_ext_s(dn, serverctrls, clientctrls)
class KeystoneLDAPHandler(LDAPHandler):
'''Convert data types and perform logging.
This LDAP inteface wraps the python-ldap based interfaces. The
python-ldap interfaces require string values encoded in UTF-8. The
OpenStack logging framework at the time of this writing is not
capable of accepting strings encoded in UTF-8, the log functions
will throw decoding errors if a non-ascii character appears in a
string.
Prior to the call Python data types are converted to a string
representation as required by the LDAP APIs.
Then logging is performed so we can track what is being
sent/received from LDAP. Also the logging filters security
sensitive items (i.e. passwords).
Then the string values are encoded into UTF-8.
Then the LDAP API entry point is invoked.
Data returned from the LDAP call is converted back from UTF-8
encoded strings into the Python data type used internally in
OpenStack.
'''
def __init__(self, conn=None):
super(KeystoneLDAPHandler, self).__init__(conn=conn)
self.page_size = 0
def __enter__(self):
return self
def _disable_paging(self):
# Disable the pagination from now on
self.page_size = 0
def connect(self, url, page_size=0, alias_dereferencing=None,
use_tls=False, tls_cacertfile=None, tls_cacertdir=None,
tls_req_cert='demand', chase_referrals=None, debug_level=None,
use_pool=None, pool_size=None,
pool_retry_max=None, pool_retry_delay=None,
pool_conn_timeout=None, pool_conn_lifetime=None):
self.page_size = page_size
return self.conn.connect(url, page_size, alias_dereferencing,
use_tls, tls_cacertfile, tls_cacertdir,
tls_req_cert, chase_referrals,
debug_level=debug_level,
use_pool=use_pool,
pool_size=pool_size,
pool_retry_max=pool_retry_max,
pool_retry_delay=pool_retry_delay,
pool_conn_timeout=pool_conn_timeout,
pool_conn_lifetime=pool_conn_lifetime)
def set_option(self, option, invalue):
return self.conn.set_option(option, invalue)
def get_option(self, option):
return self.conn.get_option(option)
def simple_bind_s(self, who='', cred='',
serverctrls=None, clientctrls=None):
LOG.debug("LDAP bind: who=%s", who)
who_utf8 = utf8_encode(who)
cred_utf8 = utf8_encode(cred)
return self.conn.simple_bind_s(who_utf8, cred_utf8,
serverctrls=serverctrls,
clientctrls=clientctrls)
def unbind_s(self):
LOG.debug("LDAP unbind")
return self.conn.unbind_s()
def add_s(self, dn, modlist):
ldap_attrs = [(kind, [py2ldap(x) for x in safe_iter(values)])
for kind, values in modlist]
logging_attrs = [(kind, values
if kind != 'userPassword'
else ['****'])
for kind, values in ldap_attrs]
LOG.debug('LDAP add: dn=%s attrs=%s',
dn, logging_attrs)
dn_utf8 = utf8_encode(dn)
ldap_attrs_utf8 = [(kind, [utf8_encode(x) for x in safe_iter(values)])
for kind, values in ldap_attrs]
return self.conn.add_s(dn_utf8, ldap_attrs_utf8)
def search_s(self, base, scope,
filterstr='(objectClass=*)', attrlist=None, attrsonly=0):
# NOTE(morganfainberg): Remove "None" singletons from this list, which
# allows us to set mapped attributes to "None" as defaults in config.
# Without this filtering, the ldap query would raise a TypeError since
# attrlist is expected to be an iterable of strings.
if attrlist is not None:
attrlist = [attr for attr in attrlist if attr is not None]
LOG.debug('LDAP search: base=%s scope=%s filterstr=%s '
'attrs=%s attrsonly=%s',
base, scope, filterstr, attrlist, attrsonly)
if self.page_size:
ldap_result = self._paged_search_s(base, scope,
filterstr, attrlist)
else:
base_utf8 = utf8_encode(base)
filterstr_utf8 = utf8_encode(filterstr)
if attrlist is None:
attrlist_utf8 = None
else:
attrlist_utf8 = list(map(utf8_encode, attrlist))
ldap_result = self.conn.search_s(base_utf8, scope,
filterstr_utf8,
attrlist_utf8, attrsonly)
py_result = convert_ldap_result(ldap_result)
return py_result
def search_ext(self, base, scope,
filterstr='(objectClass=*)', attrlist=None, attrsonly=0,
serverctrls=None, clientctrls=None,
timeout=-1, sizelimit=0):
if attrlist is not None:
attrlist = [attr for attr in attrlist if attr is not None]
LOG.debug('LDAP search_ext: base=%s scope=%s filterstr=%s '
'attrs=%s attrsonly=%s'
'serverctrls=%s clientctrls=%s timeout=%s sizelimit=%s',
base, scope, filterstr, attrlist, attrsonly,
serverctrls, clientctrls, timeout, sizelimit)
return self.conn.search_ext(base, scope,
filterstr, attrlist, attrsonly,
serverctrls, clientctrls,
timeout, sizelimit)
def _paged_search_s(self, base, scope, filterstr, attrlist=None):
res = []
use_old_paging_api = False
# The API for the simple paged results control changed between
# python-ldap 2.3 and 2.4. We need to detect the capabilities
# of the python-ldap version we are using.
if hasattr(ldap, 'LDAP_CONTROL_PAGE_OID'):
use_old_paging_api = True
lc = ldap.controls.SimplePagedResultsControl(
controlType=ldap.LDAP_CONTROL_PAGE_OID,
criticality=True,
controlValue=(self.page_size, ''))
page_ctrl_oid = ldap.LDAP_CONTROL_PAGE_OID
else:
lc = ldap.controls.libldap.SimplePagedResultsControl(
criticality=True,
size=self.page_size,
cookie='')
page_ctrl_oid = ldap.controls.SimplePagedResultsControl.controlType
base_utf8 = utf8_encode(base)
filterstr_utf8 = utf8_encode(filterstr)
if attrlist is None:
attrlist_utf8 = None
else:
attrlist = [attr for attr in attrlist if attr is not None]
attrlist_utf8 = list(map(utf8_encode, attrlist))
msgid = self.conn.search_ext(base_utf8,
scope,
filterstr_utf8,
attrlist_utf8,
serverctrls=[lc])
# Endless loop request pages on ldap server until it has no data
while True:
# Request to the ldap server a page with 'page_size' entries
rtype, rdata, rmsgid, serverctrls = self.conn.result3(msgid)
# Receive the data
res.extend(rdata)
pctrls = [c for c in serverctrls
if c.controlType == page_ctrl_oid]
if pctrls:
# LDAP server supports pagination
if use_old_paging_api:
est, cookie = pctrls[0].controlValue
lc.controlValue = (self.page_size, cookie)
else:
cookie = lc.cookie = pctrls[0].cookie
if cookie:
# There is more data still on the server
# so we request another page
msgid = self.conn.search_ext(base_utf8,
scope,
filterstr_utf8,
attrlist_utf8,
serverctrls=[lc])
else:
# Exit condition no more data on server
break
else:
LOG.warning(_LW('LDAP Server does not support paging. '
'Disable paging in keystone.conf to '
'avoid this message.'))
self._disable_paging()
break
return res
def result3(self, msgid=ldap.RES_ANY, all=1, timeout=None,
resp_ctrl_classes=None):
ldap_result = self.conn.result3(msgid, all, timeout, resp_ctrl_classes)
LOG.debug('LDAP result3: msgid=%s all=%s timeout=%s '
'resp_ctrl_classes=%s ldap_result=%s',
msgid, all, timeout, resp_ctrl_classes, ldap_result)
py_result = convert_ldap_result(ldap_result)
return py_result
def modify_s(self, dn, modlist):
ldap_modlist = [
(op, kind, (None if values is None
else [py2ldap(x) for x in safe_iter(values)]))
for op, kind, values in modlist]
logging_modlist = [(op, kind, (values if kind != 'userPassword'
else ['****']))
for op, kind, values in ldap_modlist]
LOG.debug('LDAP modify: dn=%s modlist=%s',
dn, logging_modlist)
dn_utf8 = utf8_encode(dn)
ldap_modlist_utf8 = [
(op, kind, (None if values is None
else [utf8_encode(x) for x in safe_iter(values)]))
for op, kind, values in ldap_modlist]
return self.conn.modify_s(dn_utf8, ldap_modlist_utf8)
def delete_s(self, dn):
LOG.debug("LDAP delete: dn=%s", dn)
dn_utf8 = utf8_encode(dn)
return self.conn.delete_s(dn_utf8)
def delete_ext_s(self, dn, serverctrls=None, clientctrls=None):
LOG.debug('LDAP delete_ext: dn=%s serverctrls=%s clientctrls=%s',
dn, serverctrls, clientctrls)
dn_utf8 = utf8_encode(dn)
return self.conn.delete_ext_s(dn_utf8, serverctrls, clientctrls)
def __exit__(self, exc_type, exc_val, exc_tb):
self.unbind_s()
_HANDLERS = {}
def register_handler(prefix, handler):
_HANDLERS[prefix] = handler
def _get_connection(conn_url, use_pool=False, use_auth_pool=False):
for prefix, handler in _HANDLERS.items():
if conn_url.startswith(prefix):
return handler()
if use_pool:
return PooledLDAPHandler(use_auth_pool=use_auth_pool)
else:
return PythonLDAPHandler()
def filter_entity(entity_ref):
"""Filter out private items in an entity dict.
:param entity_ref: the entity dictionary. The 'dn' field will be removed.
'dn' is used in LDAP, but should not be returned to the user. This
value may be modified.
:returns: entity_ref
"""
if entity_ref:
entity_ref.pop('dn', None)
return entity_ref
class BaseLdap(object):
DEFAULT_OU = None
DEFAULT_STRUCTURAL_CLASSES = None
DEFAULT_ID_ATTR = 'cn'
DEFAULT_OBJECTCLASS = None
DEFAULT_FILTER = None
DEFAULT_EXTRA_ATTR_MAPPING = []
DUMB_MEMBER_DN = 'cn=dumb,dc=nonexistent'
NotFound = None
notfound_arg = None
options_name = None
model = None
attribute_options_names = {}
immutable_attrs = []
attribute_ignore = []
tree_dn = None
def __init__(self, conf):
self.LDAP_URL = conf.ldap.url
self.LDAP_USER = conf.ldap.user
self.LDAP_PASSWORD = conf.ldap.password
self.LDAP_SCOPE = ldap_scope(conf.ldap.query_scope)
self.alias_dereferencing = parse_deref(conf.ldap.alias_dereferencing)
self.page_size = conf.ldap.page_size
self.use_tls = conf.ldap.use_tls
self.tls_cacertfile = conf.ldap.tls_cacertfile
self.tls_cacertdir = conf.ldap.tls_cacertdir
self.tls_req_cert = parse_tls_cert(conf.ldap.tls_req_cert)
self.attribute_mapping = {}
self.chase_referrals = conf.ldap.chase_referrals
self.debug_level = conf.ldap.debug_level
# LDAP Pool specific attribute
self.use_pool = conf.ldap.use_pool
self.pool_size = conf.ldap.pool_size
self.pool_retry_max = conf.ldap.pool_retry_max
self.pool_retry_delay = conf.ldap.pool_retry_delay
self.pool_conn_timeout = conf.ldap.pool_connection_timeout
self.pool_conn_lifetime = conf.ldap.pool_connection_lifetime
# End user authentication pool specific config attributes
self.use_auth_pool = self.use_pool and conf.ldap.use_auth_pool
self.auth_pool_size = conf.ldap.auth_pool_size
self.auth_pool_conn_lifetime = conf.ldap.auth_pool_connection_lifetime
if self.options_name is not None:
self.suffix = conf.ldap.suffix
dn = '%s_tree_dn' % self.options_name
self.tree_dn = (getattr(conf.ldap, dn)
or '%s,%s' % (self.DEFAULT_OU, self.suffix))
idatt = '%s_id_attribute' % self.options_name
self.id_attr = getattr(conf.ldap, idatt) or self.DEFAULT_ID_ATTR
objclass = '%s_objectclass' % self.options_name
self.object_class = (getattr(conf.ldap, objclass)
or self.DEFAULT_OBJECTCLASS)
for k, v in self.attribute_options_names.items():
v = '%s_%s_attribute' % (self.options_name, v)
self.attribute_mapping[k] = getattr(conf.ldap, v)
attr_mapping_opt = ('%s_additional_attribute_mapping' %
self.options_name)
attr_mapping = (getattr(conf.ldap, attr_mapping_opt)
or self.DEFAULT_EXTRA_ATTR_MAPPING)
self.extra_attr_mapping = self._parse_extra_attrs(attr_mapping)
ldap_filter = '%s_filter' % self.options_name
self.ldap_filter = getattr(conf.ldap,
ldap_filter) or self.DEFAULT_FILTER
allow_create = '%s_allow_create' % self.options_name
self.allow_create = getattr(conf.ldap, allow_create)
allow_update = '%s_allow_update' % self.options_name
self.allow_update = getattr(conf.ldap, allow_update)
allow_delete = '%s_allow_delete' % self.options_name
self.allow_delete = getattr(conf.ldap, allow_delete)
member_attribute = '%s_member_attribute' % self.options_name
self.member_attribute = getattr(conf.ldap, member_attribute, None)
self.structural_classes = self.DEFAULT_STRUCTURAL_CLASSES
if self.notfound_arg is None:
self.notfound_arg = self.options_name + '_id'
attribute_ignore = '%s_attribute_ignore' % self.options_name
self.attribute_ignore = getattr(conf.ldap, attribute_ignore)
self.use_dumb_member = conf.ldap.use_dumb_member
self.dumb_member = (conf.ldap.dumb_member or
self.DUMB_MEMBER_DN)
self.subtree_delete_enabled = conf.ldap.allow_subtree_delete
def _not_found(self, object_id):
if self.NotFound is None:
return exception.NotFound(target=object_id)
else:
return self.NotFound(**{self.notfound_arg: object_id})
def _parse_extra_attrs(self, option_list):
mapping = {}
for item in option_list:
try:
ldap_attr, attr_map = item.split(':')
except Exception:
LOG.warn(_LW(
'Invalid additional attribute mapping: "%s". '
'Format must be <ldap_attribute>:<keystone_attribute>'),
item)
continue
mapping[ldap_attr] = attr_map
return mapping
def _is_dumb_member(self, member_dn):
"""Checks that member is a dumb member.
:param member_dn: DN of member to be checked.
"""
return (self.use_dumb_member
and is_dn_equal(member_dn, self.dumb_member))
def get_connection(self, user=None, password=None, end_user_auth=False):
use_pool = self.use_pool
pool_size = self.pool_size
pool_conn_lifetime = self.pool_conn_lifetime
if end_user_auth:
if not self.use_auth_pool:
use_pool = False
else:
pool_size = self.auth_pool_size
pool_conn_lifetime = self.auth_pool_conn_lifetime
conn = _get_connection(self.LDAP_URL, use_pool,
use_auth_pool=end_user_auth)
conn = KeystoneLDAPHandler(conn=conn)
conn.connect(self.LDAP_URL,
page_size=self.page_size,
alias_dereferencing=self.alias_dereferencing,
use_tls=self.use_tls,
tls_cacertfile=self.tls_cacertfile,
tls_cacertdir=self.tls_cacertdir,
tls_req_cert=self.tls_req_cert,
chase_referrals=self.chase_referrals,
debug_level=self.debug_level,
use_pool=use_pool,
pool_size=pool_size,
pool_retry_max=self.pool_retry_max,
pool_retry_delay=self.pool_retry_delay,
pool_conn_timeout=self.pool_conn_timeout,
pool_conn_lifetime=pool_conn_lifetime
)
if user is None:
user = self.LDAP_USER
if password is None:
password = self.LDAP_PASSWORD
# not all LDAP servers require authentication, so we don't bind
# if we don't have any user/pass
if user and password:
conn.simple_bind_s(user, password)
return conn
def _id_to_dn_string(self, object_id):
return u'%s=%s,%s' % (self.id_attr,
ldap.dn.escape_dn_chars(
six.text_type(object_id)),
self.tree_dn)
def _id_to_dn(self, object_id):
if self.LDAP_SCOPE == ldap.SCOPE_ONELEVEL:
return self._id_to_dn_string(object_id)
with self.get_connection() as conn:
search_result = conn.search_s(
self.tree_dn, self.LDAP_SCOPE,
u'(&(%(id_attr)s=%(id)s)(objectclass=%(objclass)s))' %
{'id_attr': self.id_attr,
'id': ldap.filter.escape_filter_chars(
six.text_type(object_id)),
'objclass': self.object_class},
attrlist=DN_ONLY)
if search_result:
dn, attrs = search_result[0]
return dn
else:
return self._id_to_dn_string(object_id)
@staticmethod
def _dn_to_id(dn):
return utf8_decode(ldap.dn.str2dn(utf8_encode(dn))[0][0][1])
def _ldap_res_to_model(self, res):
# LDAP attribute names may be returned in a different case than
# they are defined in the mapping, so we need to check for keys
# in a case-insensitive way. We use the case specified in the
# mapping for the model to ensure we have a predictable way of
# retrieving values later.
lower_res = {k.lower(): v for k, v in res[1].items()}
id_attrs = lower_res.get(self.id_attr.lower())
if not id_attrs:
message = _('ID attribute %(id_attr)s not found in LDAP '
'object %(dn)s') % ({'id_attr': self.id_attr,
'dn': res[0]})
raise exception.NotFound(message=message)
if len(id_attrs) > 1:
# FIXME(gyee): if this is a multi-value attribute and it has
# multiple values, we can't use it as ID. Retain the dn_to_id
# logic here so it does not potentially break existing
# deployments. We need to fix our read-write LDAP logic so
# it does not get the ID from DN.
message = _LW('ID attribute %(id_attr)s for LDAP object %(dn)s '
'has multiple values and therefore cannot be used '
'as an ID. Will get the ID from DN instead') % (
{'id_attr': self.id_attr,
'dn': res[0]})
LOG.warn(message)
id_val = self._dn_to_id(res[0])
else:
id_val = id_attrs[0]
obj = self.model(id=id_val)
for k in obj.known_keys:
if k in self.attribute_ignore:
continue
try:
map_attr = self.attribute_mapping.get(k, k)
if map_attr is None:
# Ignore attributes that are mapped to None.
continue
v = lower_res[map_attr.lower()]
except KeyError:
pass
else:
try:
obj[k] = v[0]
except IndexError:
obj[k] = None
return obj
def check_allow_create(self):
if not self.allow_create:
action = _('LDAP %s create') % self.options_name
raise exception.ForbiddenAction(action=action)
def check_allow_update(self):
if not self.allow_update:
action = _('LDAP %s update') % self.options_name
raise exception.ForbiddenAction(action=action)
def check_allow_delete(self):
if not self.allow_delete:
action = _('LDAP %s delete') % self.options_name
raise exception.ForbiddenAction(action=action)
def affirm_unique(self, values):
if values.get('name') is not None:
try:
self.get_by_name(values['name'])
except exception.NotFound:
pass
else:
raise exception.Conflict(type=self.options_name,
details=_('Duplicate name, %s.') %
values['name'])
if values.get('id') is not None:
try:
self.get(values['id'])
except exception.NotFound:
pass
else:
raise exception.Conflict(type=self.options_name,
details=_('Duplicate ID, %s.') %
values['id'])
def create(self, values):
self.affirm_unique(values)
object_classes = self.structural_classes + [self.object_class]
attrs = [('objectClass', object_classes)]
for k, v in values.items():
if k in self.attribute_ignore:
continue
if k == 'id':
# no need to check if v is None as 'id' will always have
# a value
attrs.append((self.id_attr, [v]))
elif v is not None:
attr_type = self.attribute_mapping.get(k, k)
if attr_type is not None:
attrs.append((attr_type, [v]))
extra_attrs = [attr for attr, name
in self.extra_attr_mapping.items()
if name == k]
for attr in extra_attrs:
attrs.append((attr, [v]))
if 'groupOfNames' in object_classes and self.use_dumb_member:
attrs.append(('member', [self.dumb_member]))
with self.get_connection() as conn:
conn.add_s(self._id_to_dn(values['id']), attrs)
return values
def _ldap_get(self, object_id, ldap_filter=None):
query = (u'(&(%(id_attr)s=%(id)s)'
u'%(filter)s'
u'(objectClass=%(object_class)s))'
% {'id_attr': self.id_attr,
'id': ldap.filter.escape_filter_chars(
six.text_type(object_id)),
'filter': (ldap_filter or self.ldap_filter or ''),
'object_class': self.object_class})
with self.get_connection() as conn:
try:
attrs = list(set(([self.id_attr] +
list(self.attribute_mapping.values()) +
list(self.extra_attr_mapping.keys()))))
res = conn.search_s(self.tree_dn,
self.LDAP_SCOPE,
query,
attrs)
except ldap.NO_SUCH_OBJECT:
return None
try:
return res[0]
except IndexError:
return None
def _ldap_get_all(self, ldap_filter=None):
query = u'(&%s(objectClass=%s))' % (ldap_filter or
self.ldap_filter or
'', self.object_class)
with self.get_connection() as conn:
try:
attrs = list(set(([self.id_attr] +
list(self.attribute_mapping.values()) +
list(self.extra_attr_mapping.keys()))))
return conn.search_s(self.tree_dn,
self.LDAP_SCOPE,
query,
attrs)
except ldap.NO_SUCH_OBJECT:
return []
def _ldap_get_list(self, search_base, scope, query_params=None,
attrlist=None):
query = u'(objectClass=%s)' % self.object_class
if query_params:
def calc_filter(attrname, value):
val_esc = ldap.filter.escape_filter_chars(value)
return '(%s=%s)' % (attrname, val_esc)
query = (u'(&%s%s)' %
(query, ''.join([calc_filter(k, v) for k, v in
query_params.items()])))
with self.get_connection() as conn:
return conn.search_s(search_base, scope, query, attrlist)
def get(self, object_id, ldap_filter=None):
res = self._ldap_get(object_id, ldap_filter)
if res is None:
raise self._not_found(object_id)
else:
return self._ldap_res_to_model(res)
def get_by_name(self, name, ldap_filter=None):
query = (u'(%s=%s)' % (self.attribute_mapping['name'],
ldap.filter.escape_filter_chars(
six.text_type(name))))
res = self.get_all(query)
try:
return res[0]
except IndexError:
raise self._not_found(name)
def get_all(self, ldap_filter=None):
return [self._ldap_res_to_model(x)
for x in self._ldap_get_all(ldap_filter)]
def update(self, object_id, values, old_obj=None):
if old_obj is None:
old_obj = self.get(object_id)
modlist = []
for k, v in values.items():
if k == 'id':
# id can't be modified.
continue
if k in self.attribute_ignore:
# Handle 'enabled' specially since can't disable if ignored.
if k == 'enabled' and (not v):
action = _("Disabling an entity where the 'enable' "
"attribute is ignored by configuration.")
raise exception.ForbiddenAction(action=action)
continue
# attribute value has not changed
if k in old_obj and old_obj[k] == v:
continue
if k in self.immutable_attrs:
msg = (_("Cannot change %(option_name)s %(attr)s") %
{'option_name': self.options_name, 'attr': k})
raise exception.ValidationError(msg)
if v is None:
if old_obj.get(k) is not None:
modlist.append((ldap.MOD_DELETE,
self.attribute_mapping.get(k, k),
None))
continue
current_value = old_obj.get(k)
if current_value is None:
op = ldap.MOD_ADD
modlist.append((op, self.attribute_mapping.get(k, k), [v]))
elif current_value != v:
op = ldap.MOD_REPLACE
modlist.append((op, self.attribute_mapping.get(k, k), [v]))
if modlist:
with self.get_connection() as conn:
try:
conn.modify_s(self._id_to_dn(object_id), modlist)
except ldap.NO_SUCH_OBJECT:
raise self._not_found(object_id)
return self.get(object_id)
def delete(self, object_id):
with self.get_connection() as conn:
try:
conn.delete_s(self._id_to_dn(object_id))
except ldap.NO_SUCH_OBJECT:
raise self._not_found(object_id)
def deleteTree(self, object_id):
tree_delete_control = ldap.controls.LDAPControl(CONTROL_TREEDELETE,
0,
None)
with self.get_connection() as conn:
try:
conn.delete_ext_s(self._id_to_dn(object_id),
serverctrls=[tree_delete_control])
except ldap.NO_SUCH_OBJECT:
raise self._not_found(object_id)
except ldap.NOT_ALLOWED_ON_NONLEAF:
# Most LDAP servers do not support the tree_delete_control.
# In these servers, the usual idiom is to first perform a
# search to get the entries to delete, then delete them in
# in order of child to parent, since LDAP forbids the
# deletion of a parent entry before deleting the children
# of that parent. The simplest way to do that is to delete
# the entries in order of the length of the DN, from longest
# to shortest DN.
dn = self._id_to_dn(object_id)
scope = ldap.SCOPE_SUBTREE
# With some directory servers, an entry with objectclass
# ldapsubentry will not be returned unless it is explicitly
# requested, by specifying the objectclass in the search
# filter. We must specify this, with objectclass=*, in an
# LDAP filter OR clause, in order to return all entries
filt = '(|(objectclass=*)(objectclass=ldapsubentry))'
# We only need the DNs of the entries. Since no attributes
# will be returned, we do not have to specify attrsonly=1.
entries = conn.search_s(dn, scope, filt, attrlist=DN_ONLY)
if entries:
for dn in sorted((e[0] for e in entries),
key=len, reverse=True):
conn.delete_s(dn)
else:
LOG.debug('No entries in LDAP subtree %s', dn)
def add_member(self, member_dn, member_list_dn):
"""Add member to the member list.
:param member_dn: DN of member to be added.
:param member_list_dn: DN of group to which the
member will be added.
:raises: exception.Conflict: If the user was already a member.
self.NotFound: If the group entry didn't exist.
"""
with self.get_connection() as conn:
try:
mod = (ldap.MOD_ADD, self.member_attribute, member_dn)
conn.modify_s(member_list_dn, [mod])
except ldap.TYPE_OR_VALUE_EXISTS:
raise exception.Conflict(_('Member %(member)s '
'is already a member'
' of group %(group)s') % {
'member': member_dn,
'group': member_list_dn})
except ldap.NO_SUCH_OBJECT:
raise self._not_found(member_list_dn)
def remove_member(self, member_dn, member_list_dn):
"""Remove member from the member list.
:param member_dn: DN of member to be removed.
:param member_list_dn: DN of group from which the
member will be removed.
:raises: self.NotFound: If the group entry didn't exist.
ldap.NO_SUCH_ATTRIBUTE: If the user wasn't a member.
"""
with self.get_connection() as conn:
try:
mod = (ldap.MOD_DELETE, self.member_attribute, member_dn)
conn.modify_s(member_list_dn, [mod])
except ldap.NO_SUCH_OBJECT:
raise self._not_found(member_list_dn)
def _delete_tree_nodes(self, search_base, scope, query_params=None):
query = u'(objectClass=%s)' % self.object_class
if query_params:
query = (u'(&%s%s)' %
(query, ''.join(['(%s=%s)'
% (k, ldap.filter.escape_filter_chars(v))
for k, v in
query_params.items()])))
not_deleted_nodes = []
with self.get_connection() as conn:
try:
nodes = conn.search_s(search_base, scope, query,
attrlist=DN_ONLY)
except ldap.NO_SUCH_OBJECT:
LOG.debug('Could not find entry with dn=%s', search_base)
raise self._not_found(self._dn_to_id(search_base))
else:
for node_dn, _t in nodes:
try:
conn.delete_s(node_dn)
except ldap.NO_SUCH_OBJECT:
not_deleted_nodes.append(node_dn)
if not_deleted_nodes:
LOG.warn(_LW("When deleting entries for %(search_base)s, could not"
" delete nonexistent entries %(entries)s%(dots)s"),
{'search_base': search_base,
'entries': not_deleted_nodes[:3],
'dots': '...' if len(not_deleted_nodes) > 3 else ''})
def filter_query(self, hints, query=None):
"""Applies filtering to a query.
:param hints: contains the list of filters, which may be None,
indicating that there are no filters to be applied.
If it's not None, then any filters satisfied here will be
removed so that the caller will know if any filters
remain to be applied.
:param query: LDAP query into which to include filters
:returns query: LDAP query, updated with any filters satisfied
"""
def build_filter(filter_, hints):
"""Build a filter for the query.
:param filter_: the dict that describes this filter
:param hints: contains the list of filters yet to be satisfied.
:returns query: LDAP query term to be added
"""
ldap_attr = self.attribute_mapping[filter_['name']]
val_esc = ldap.filter.escape_filter_chars(filter_['value'])
if filter_['case_sensitive']:
# NOTE(henry-nash): Although dependent on the schema being
# used, most LDAP attributes are configured with case
# insensitive matching rules, so we'll leave this to the
# controller to filter.
return
if filter_['name'] == 'enabled':
# NOTE(henry-nash): Due to the different options for storing
# the enabled attribute (e,g, emulated or not), for now we
# don't try and filter this at the driver level - we simply
# leave the filter to be handled by the controller. It seems
# unlikley that this will cause a signifcant performance
# issue.
return
# TODO(henry-nash): Currently there are no booleans (other than
# 'enabled' that is handled above) on which you can filter. If
# there were, we would need to add special handling here to
# convert the booleans values to 'TRUE' and 'FALSE'. To do that
# we would also need to know which filter keys were actually
# booleans (this is related to bug #1411478).
if filter_['comparator'] == 'equals':
query_term = (u'(%(attr)s=%(val)s)'
% {'attr': ldap_attr, 'val': val_esc})
elif filter_['comparator'] == 'contains':
query_term = (u'(%(attr)s=*%(val)s*)'
% {'attr': ldap_attr, 'val': val_esc})
elif filter_['comparator'] == 'startswith':
query_term = (u'(%(attr)s=%(val)s*)'
% {'attr': ldap_attr, 'val': val_esc})
elif filter_['comparator'] == 'endswith':
query_term = (u'(%(attr)s=*%(val)s)'
% {'attr': ldap_attr, 'val': val_esc})
else:
# It's a filter we don't understand, so let the caller
# work out if they need to do something with it.
return
return query_term
if query is None:
# make sure query is a string so the ldap filter is properly
# constructed from filter_list later
query = ''
if hints is None:
return query
filter_list = []
satisfied_filters = []
for filter_ in hints.filters:
if filter_['name'] not in self.attribute_mapping:
continue
new_filter = build_filter(filter_, hints)
if new_filter is not None:
filter_list.append(new_filter)
satisfied_filters.append(filter_)
if filter_list:
query = u'(&%s%s)' % (query, ''.join(filter_list))
# Remove satisfied filters, then the caller will know remaining filters
for filter_ in satisfied_filters:
hints.filters.remove(filter_)
return query
class EnabledEmuMixIn(BaseLdap):
"""Emulates boolean 'enabled' attribute if turned on.
Creates groupOfNames holding all enabled objects of this class, all missing
objects are considered disabled.
Options:
* $name_enabled_emulation - boolean, on/off
* $name_enabled_emulation_dn - DN of that groupOfNames, default is
cn=enabled_${name}s,${tree_dn}
Where ${name}s is the plural of self.options_name ('users' or 'tenants'),
${tree_dn} is self.tree_dn.
"""
def __init__(self, conf):
super(EnabledEmuMixIn, self).__init__(conf)
enabled_emulation = '%s_enabled_emulation' % self.options_name
self.enabled_emulation = getattr(conf.ldap, enabled_emulation)
enabled_emulation_dn = '%s_enabled_emulation_dn' % self.options_name
self.enabled_emulation_dn = getattr(conf.ldap, enabled_emulation_dn)
if not self.enabled_emulation_dn:
naming_attr_name = 'cn'
naming_attr_value = 'enabled_%ss' % self.options_name
sub_vals = (naming_attr_name, naming_attr_value, self.tree_dn)
self.enabled_emulation_dn = '%s=%s,%s' % sub_vals
naming_attr = (naming_attr_name, [naming_attr_value])
else:
# Extract the attribute name and value from the configured DN.
naming_dn = ldap.dn.str2dn(utf8_encode(self.enabled_emulation_dn))
naming_rdn = naming_dn[0][0]
naming_attr = (utf8_decode(naming_rdn[0]),
utf8_decode(naming_rdn[1]))
self.enabled_emulation_naming_attr = naming_attr
def _get_enabled(self, object_id, conn):
dn = self._id_to_dn(object_id)
query = '(member=%s)' % dn
try:
enabled_value = conn.search_s(self.enabled_emulation_dn,
ldap.SCOPE_BASE,
query, ['cn'])
except ldap.NO_SUCH_OBJECT:
return False
else:
return bool(enabled_value)
def _add_enabled(self, object_id):
with self.get_connection() as conn:
if not self._get_enabled(object_id, conn):
modlist = [(ldap.MOD_ADD,
'member',
[self._id_to_dn(object_id)])]
try:
conn.modify_s(self.enabled_emulation_dn, modlist)
except ldap.NO_SUCH_OBJECT:
attr_list = [('objectClass', ['groupOfNames']),
('member', [self._id_to_dn(object_id)]),
self.enabled_emulation_naming_attr]
if self.use_dumb_member:
attr_list[1][1].append(self.dumb_member)
conn.add_s(self.enabled_emulation_dn, attr_list)
def _remove_enabled(self, object_id):
modlist = [(ldap.MOD_DELETE,
'member',
[self._id_to_dn(object_id)])]
with self.get_connection() as conn:
try:
conn.modify_s(self.enabled_emulation_dn, modlist)
except (ldap.NO_SUCH_OBJECT, ldap.NO_SUCH_ATTRIBUTE):
pass
def create(self, values):
if self.enabled_emulation:
enabled_value = values.pop('enabled', True)
ref = super(EnabledEmuMixIn, self).create(values)
if 'enabled' not in self.attribute_ignore:
if enabled_value:
self._add_enabled(ref['id'])
ref['enabled'] = enabled_value
return ref
else:
return super(EnabledEmuMixIn, self).create(values)
def get(self, object_id, ldap_filter=None):
with self.get_connection() as conn:
ref = super(EnabledEmuMixIn, self).get(object_id, ldap_filter)
if ('enabled' not in self.attribute_ignore and
self.enabled_emulation):
ref['enabled'] = self._get_enabled(object_id, conn)
return ref
def get_all(self, ldap_filter=None):
if 'enabled' not in self.attribute_ignore and self.enabled_emulation:
# had to copy BaseLdap.get_all here to ldap_filter by DN
tenant_list = [self._ldap_res_to_model(x)
for x in self._ldap_get_all(ldap_filter)
if x[0] != self.enabled_emulation_dn]
with self.get_connection() as conn:
for tenant_ref in tenant_list:
tenant_ref['enabled'] = self._get_enabled(
tenant_ref['id'], conn)
return tenant_list
else:
return super(EnabledEmuMixIn, self).get_all(ldap_filter)
def update(self, object_id, values, old_obj=None):
if 'enabled' not in self.attribute_ignore and self.enabled_emulation:
data = values.copy()
enabled_value = data.pop('enabled', None)
ref = super(EnabledEmuMixIn, self).update(object_id, data, old_obj)
if enabled_value is not None:
if enabled_value:
self._add_enabled(object_id)
else:
self._remove_enabled(object_id)
ref['enabled'] = enabled_value
return ref
else:
return super(EnabledEmuMixIn, self).update(
object_id, values, old_obj)
def delete(self, object_id):
if self.enabled_emulation:
self._remove_enabled(object_id)
super(EnabledEmuMixIn, self).delete(object_id)
class ProjectLdapStructureMixin(object):
"""Project LDAP Structure shared between LDAP backends.
This is shared between the resource and assignment LDAP backends.
"""
DEFAULT_OU = 'ou=Groups'
DEFAULT_STRUCTURAL_CLASSES = []
DEFAULT_OBJECTCLASS = 'groupOfNames'
DEFAULT_ID_ATTR = 'cn'
NotFound = exception.ProjectNotFound
notfound_arg = 'project_id' # NOTE(yorik-sar): while options_name = tenant
options_name = 'project'
attribute_options_names = {'name': 'name',
'description': 'desc',
'enabled': 'enabled',
'domain_id': 'domain_id'}
immutable_attrs = ['name']
|
apache-2.0
| 6,774,204,902,072,156,000
| 38.926969
| 79
| 0.568709
| false
| 4.160687
| false
| false
| false
|
mfiers/Moa
|
moa/plugin/job/openLavaActor.py
|
1
|
9361
|
# Copyright 2009-2011 Mark Fiers
# The New Zealand Institute for Plant & Food Research
#
# This file is part of Moa - http://github.com/mfiers/Moa
#
# Licensed under the GPL license (see 'COPYING')
#
"""
**sgeActor** - Run jobs through SGE
-----------------------------------------------------------
"""
import os
import stat
import subprocess as sp
import sys
import tempfile
import jinja2
import moa.logger
import moa.ui
from moa.sysConf import sysConf
l = moa.logger.getLogger(__name__)
#l.setLevel(moa.logger.DEBUG)
def hook_defineCommandOptions(job, parser):
parser.add_argument('--ol', action='store_const', const='openlava',
dest='actorId', help='Use OpenLava as actor')
parser.add_argument('--olq', default='normal', dest='openlavaQueue',
help='The Openlava queue to submit this job to')
parser.add_argument('--olx', default='', dest='openlavaExtra',
help='Extra arguments for bsub')
parser.add_argument('--oln', default=1, type=int, dest='openlavaProcs',
help='The number of processors the jobs requires')
parser.add_argument('--oldummy', default=False, dest='openlavaDummy',
action='store_true',
help='Do not execute - just create a script to run')
parser.add_argument('--olm', default="", dest='openlavaHost',
help='The host to use for openlava')
def _writeOlTmpFile(wd, _script):
#save the file
tmpdir = os.path.join(wd, '.moa', 'tmp')
if not os.path.exists(tmpdir):
os.makedirs(tmpdir)
tf = tempfile.NamedTemporaryFile(dir=tmpdir, prefix='openlava.',
delete=False, suffix='.sh')
if isinstance(_script, list):
tf.write("\n".join(_script))
else:
tf.write(str(_script))
tf.close()
os.chmod(tf.name, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR)
return tf.name
@moa.actor.async
def openlavaRunner(wd, cl, conf={}, **kwargs):
"""
Run the job using OPENLAVA
what does this function do?
- put env in the environment
- Execute the commandline (in cl)
- store stdout & stderr in log files
- return the rc
"""
#see if we can get a command
command = kwargs.get('command', 'unknown')
if command == 'unknown':
l.critical("runner should be called with a command")
sys.exit(-1)
l.debug("starting openlava actor for %s" % command)
# this is a trick to get the real path of the log dir - but not of
# any underlying directory - in case paths are mounted differently
# on different hosts
outDir = os.path.abspath(os.path.join(wd, '.moa', 'log.latest'))
outDir = outDir.rsplit('.moa', 1)[0] + '.moa' + \
os.path.realpath(outDir).rsplit('.moa', 1)[1]
sysConf.job.data.openlava.outDir = outDir
if not os.path.exists(outDir):
try:
os.makedirs(outDir)
except OSError:
pass
#expect the cl to be nothing more than a single script to execute
outfile = os.path.join(outDir, 'stdout')
errfile = os.path.join(outDir, 'stderr')
sysConf.job.data.openlava.outfile = outfile
sysConf.job.data.openlava.errfile = errfile
bsub_cl = ['bsub']
sc = []
def s(*cl):
sc.append(" ".join(map(str, cl)))
s("#!/bin/bash")
s("#BSUB -o %s" % outfile)
s("#BSUB -e %s" % errfile)
s("#BSUB -q %s" % sysConf.args.openlavaQueue)
if '--oln' in sys.argv:
procs = sysConf.args.openlavaProcs
else:
procs = sysConf.job.conf.get('threads', sysConf.args.openlavaProcs)
s("#BSUB -C %d" % procs)
if sysConf.args.openlavaExtra.strip():
s("#BSUB %s" % sysConf.args.openlavaExtra)
if '--olm' in sys.argv:
s("#BSUB -m %s" % sysConf.args.openlavaHost)
#bsub_cl.extend(["-m", sysConf.args.openlavaHost])
if command == 'run':
prep_jids = sysConf.job.data.openlava.jids.get('prepare', [])
#hold until the 'prepare' jobs are done
#l.critical("Prepare jids - wait for these! %s" % prep_jids)
for j in prep_jids:
s("#BSUB -w 'done(%d)'" % j)
#bsub_cl.extend(["-w", "'done(%d)'" % j])
elif command == 'finish':
run_jids = sysConf.job.data.openlava.jids.get('run', [])
#hold until the 'prepare' jobs are done
for j in run_jids:
s("#BSUB -w 'done(%d)'" % j)
#bsub_cl.extend(["-w", "'done(%d)'" % j])
#give it a reasonable name
jobname = ("%s_%s" % (wd.split('/')[-1], command[0]))
bsub_cl.extend(['-J', jobname])
s("#BSUB -J '%s'" % jobname)
#dump the configuration in the environment
s("")
s("## ensure we're in the correct directory")
s("cd", wd)
s("")
s("## Defining moa specific environment variables")
s("")
confkeys = sorted(conf.keys())
for k in confkeys:
# to prevent collusion, prepend all env variables
# with 'moa_'
if k[0] == '_' or k[:3] == 'moa':
outk = k
else:
outk = 'moa_' + k
v = conf[k]
#this should not happen:
if ' ' in outk:
continue
if isinstance(v, list):
s("%s='%s'" % (outk, " ".join(v)))
elif isinstance(v, dict):
continue
else:
s("%s='%s'" % (outk, v))
s("")
s("## Run the command")
s("")
s(*cl)
if sysConf.args.openlavaDummy:
# Dummy mode - do not execute - just write the script.
ii = 0
while True:
outFile = os.path.join(wd, 'openlava.%s.%d.bash' % (command, ii))
if not os.path.exists(outFile):
break
ii += 1
with open(outFile, 'w') as F:
F.write("\n".join(sc))
moa.ui.message("Created openlava submit script: %s" %
outFile.rsplit('/', 1)[1])
moa.ui.message("now run:")
moa.ui.message(" %s < %s" % ((" ".join(map(str, bsub_cl))),
outFile.rsplit('/', 1)[1]))
return 0
tmpfile = _writeOlTmpFile(wd, sc)
moa.ui.message("Running %s:" % " ".join(map(str, bsub_cl)))
moa.ui.message("(copy of) the bsub script: %s" % tmpfile)
p = sp.Popen(map(str, bsub_cl), cwd=wd, stdout=sp.PIPE, stdin=sp.PIPE)
o, e = p.communicate("\n".join(sc))
jid = int(o.split("<")[1].split(">")[0])
moa.ui.message("Submitted a job to openlava with id %d" % jid)
if not sysConf.job.data.openlava.jids.get(command):
sysConf.job.data.openlava.jids[command] = []
#moa.ui.message("submitted job with openlava job id %s " % jid)
#store the job id submitted
if not sysConf.job.data.openlava.jids.get(command):
sysConf.job.data.openlava.jids[command] = []
if not sysConf.job.data.openlava.get('alljids'):
sysConf.job.data.openlava.alljids = []
sysConf.job.data.openlava.jids[command].append(jid)
sysConf.job.data.openlava.alljids.append(jid)
l.debug("jids stored %s" % str(sysConf.job.data.openlava.jids))
return p.returncode
OnSuccessScript = """#!/bin/bash
#BSUB -o {{ job.data.openlava.outfile }}
#BSUB -w {{ job.data.openlava.errfile }}
#BSUB -q {{ args.openlavaQueue }}
#BSUB -J "{{ job.data.openlava.uid }}_Ok"
{% if args.openlavaHost -%}
#BSUB -m {{ args.openlavaHost }}
{%- endif %}
#BSUB -w '({%- for j in job.data.openlava.alljids -%}
{%- if loop.index0 > 0 %}&&{% endif -%}
done({{j}})
{%- endfor -%})'
cd {{ job.wd }}
echo "Openlava OnSuccess Start"
echo "Killing the OnError job"
bkill -J "{{ job.data.openlava.uid }}_Err"
moasetstatus success
"""
OnErrorScript = """#!/bin/bash
## only run this job if there is a single job
#BSUB -o {{ job.data.openlava.outfile }}
#BSUB -w {{ job.data.openlava.errfile }}
#BSUB -q {{ args.openlavaQueue }}
#BSUB -J "{{ job.data.openlava.uid }}_Err"
{% if args.openlavaHost -%}
#BSUB -m {{ args.openlavaHost }}
{%- endif %}
#BSUB -w '({%- for j in job.data.openlava.alljids -%}
{%- if loop.index0 > 0 %}||{% endif -%}
exit({{j}},!=0)
{%- endfor -%}
)'
cd {{ job.wd }}
echo "Openlava OnError Start"
echo "Killing the all other jobs"
#killing all jobs
{% for j in job.data.openlava.alljids %}
bkill -s 9 {{ j }}
{% endfor %}
bkill -J "{{ job.data.openlava.uid }}_Ok"
moasetstatus error
"""
def hook_async_exit(job):
"""
Need to exit here, and reconvene once all jobs have executed
"""
#make sure that this is the correct actor
actor = moa.actor.getActor()
if actor.__name__ != 'openlavaRunner':
return
jidlist = sysConf.job.data.openlava.get('alljids', [])
if len(jidlist) == 0:
return
uid = "%s.%s" % (job.wd.split('/')[-1],max(jidlist))
sysConf.job.data.openlava.uid = uid
onsuccess = jinja2.Template(OnSuccessScript).render(sysConf)
onerror = jinja2.Template(OnErrorScript).render(sysConf)
with open('succ', 'w') as F:
F.write(onsuccess)
with open('onerr', 'w') as F:
F.write(onerror)
P = sp.Popen('bsub', stdin=sp.PIPE)
P.communicate(onsuccess)
P = sp.Popen('bsub', stdin=sp.PIPE)
P.communicate(onerror)
#register this actor globally
sysConf.actor.actors['openlava'] = openlavaRunner
sysConf.actor.openlava.jids = []
|
gpl-3.0
| -2,962,970,957,543,276,500
| 28.253125
| 77
| 0.577075
| false
| 3.141275
| false
| false
| false
|
hackerbot/DjangoDev
|
django/forms/models.py
|
2
|
55275
|
"""
Helper functions for creating Form classes from Django models
and database field objects.
"""
from __future__ import unicode_literals
from collections import OrderedDict
from itertools import chain
from django.core.exceptions import (
NON_FIELD_ERRORS, FieldError, ImproperlyConfigured, ValidationError,
)
from django.forms.fields import ChoiceField, Field
from django.forms.forms import BaseForm, DeclarativeFieldsMetaclass
from django.forms.formsets import BaseFormSet, formset_factory
from django.forms.utils import ErrorList
from django.forms.widgets import (
HiddenInput, MultipleHiddenInput, SelectMultiple,
)
from django.utils import six
from django.utils.encoding import force_text, smart_text
from django.utils.text import capfirst, get_text_list
from django.utils.translation import ugettext, ugettext_lazy as _
__all__ = (
'ModelForm', 'BaseModelForm', 'model_to_dict', 'fields_for_model',
'save_instance', 'ModelChoiceField', 'ModelMultipleChoiceField',
'ALL_FIELDS', 'BaseModelFormSet', 'modelformset_factory',
'BaseInlineFormSet', 'inlineformset_factory', 'modelform_factory',
)
ALL_FIELDS = '__all__'
def construct_instance(form, instance, fields=None, exclude=None):
"""
Constructs and returns a model instance from the bound ``form``'s
``cleaned_data``, but does not save the returned instance to the
database.
"""
from django.db import models
opts = instance._meta
cleaned_data = form.cleaned_data
file_field_list = []
for f in opts.fields:
if not f.editable or isinstance(f, models.AutoField) \
or f.name not in cleaned_data:
continue
if fields is not None and f.name not in fields:
continue
if exclude and f.name in exclude:
continue
# Defer saving file-type fields until after the other fields, so a
# callable upload_to can use the values from other fields.
if isinstance(f, models.FileField):
file_field_list.append(f)
else:
f.save_form_data(instance, cleaned_data[f.name])
for f in file_field_list:
f.save_form_data(instance, cleaned_data[f.name])
return instance
def save_instance(form, instance, fields=None, fail_message='saved',
commit=True, exclude=None, construct=True):
"""
Saves bound Form ``form``'s cleaned_data into model instance ``instance``.
If commit=True, then the changes to ``instance`` will be saved to the
database. Returns ``instance``.
If construct=False, assume ``instance`` has already been constructed and
just needs to be saved.
"""
if construct:
instance = construct_instance(form, instance, fields, exclude)
opts = instance._meta
if form.errors:
raise ValueError("The %s could not be %s because the data didn't"
" validate." % (opts.object_name, fail_message))
# Wrap up the saving of m2m data as a function.
def save_m2m():
cleaned_data = form.cleaned_data
# Note that for historical reasons we want to include also
# virtual_fields here. (GenericRelation was previously a fake
# m2m field).
for f in chain(opts.many_to_many, opts.virtual_fields):
if not hasattr(f, 'save_form_data'):
continue
if fields and f.name not in fields:
continue
if exclude and f.name in exclude:
continue
if f.name in cleaned_data:
f.save_form_data(instance, cleaned_data[f.name])
if commit:
# If we are committing, save the instance and the m2m data immediately.
instance.save()
save_m2m()
else:
# We're not committing. Add a method to the form to allow deferred
# saving of m2m data.
form.save_m2m = save_m2m
return instance
# ModelForms #################################################################
def model_to_dict(instance, fields=None, exclude=None):
"""
Returns a dict containing the data in ``instance`` suitable for passing as
a Form's ``initial`` keyword argument.
``fields`` is an optional list of field names. If provided, only the named
fields will be included in the returned dict.
``exclude`` is an optional list of field names. If provided, the named
fields will be excluded from the returned dict, even if they are listed in
the ``fields`` argument.
"""
# avoid a circular import
from django.db.models.fields.related import ManyToManyField
opts = instance._meta
data = {}
for f in chain(opts.concrete_fields, opts.virtual_fields, opts.many_to_many):
if not getattr(f, 'editable', False):
continue
if fields and f.name not in fields:
continue
if exclude and f.name in exclude:
continue
if isinstance(f, ManyToManyField):
# If the object doesn't have a primary key yet, just use an empty
# list for its m2m fields. Calling f.value_from_object will raise
# an exception.
if instance.pk is None:
data[f.name] = []
else:
# MultipleChoiceWidget needs a list of pks, not object instances.
qs = f.value_from_object(instance)
if qs._result_cache is not None:
data[f.name] = [item.pk for item in qs]
else:
data[f.name] = list(qs.values_list('pk', flat=True))
else:
data[f.name] = f.value_from_object(instance)
return data
def fields_for_model(model, fields=None, exclude=None, widgets=None,
formfield_callback=None, localized_fields=None,
labels=None, help_texts=None, error_messages=None,
field_classes=None):
"""
Returns a ``OrderedDict`` containing form fields for the given model.
``fields`` is an optional list of field names. If provided, only the named
fields will be included in the returned fields.
``exclude`` is an optional list of field names. If provided, the named
fields will be excluded from the returned fields, even if they are listed
in the ``fields`` argument.
``widgets`` is a dictionary of model field names mapped to a widget.
``formfield_callback`` is a callable that takes a model field and returns
a form field.
``localized_fields`` is a list of names of fields which should be localized.
``labels`` is a dictionary of model field names mapped to a label.
``help_texts`` is a dictionary of model field names mapped to a help text.
``error_messages`` is a dictionary of model field names mapped to a
dictionary of error messages.
``field_classes`` is a dictionary of model field names mapped to a form
field class.
"""
field_list = []
ignored = []
opts = model._meta
# Avoid circular import
from django.db.models.fields import Field as ModelField
sortable_virtual_fields = [f for f in opts.virtual_fields
if isinstance(f, ModelField)]
for f in sorted(chain(opts.concrete_fields, sortable_virtual_fields, opts.many_to_many)):
if not getattr(f, 'editable', False):
continue
if fields is not None and f.name not in fields:
continue
if exclude and f.name in exclude:
continue
kwargs = {}
if widgets and f.name in widgets:
kwargs['widget'] = widgets[f.name]
if localized_fields == ALL_FIELDS or (localized_fields and f.name in localized_fields):
kwargs['localize'] = True
if labels and f.name in labels:
kwargs['label'] = labels[f.name]
if help_texts and f.name in help_texts:
kwargs['help_text'] = help_texts[f.name]
if error_messages and f.name in error_messages:
kwargs['error_messages'] = error_messages[f.name]
if field_classes and f.name in field_classes:
kwargs['form_class'] = field_classes[f.name]
if formfield_callback is None:
formfield = f.formfield(**kwargs)
elif not callable(formfield_callback):
raise TypeError('formfield_callback must be a function or callable')
else:
formfield = formfield_callback(f, **kwargs)
if formfield:
field_list.append((f.name, formfield))
else:
ignored.append(f.name)
field_dict = OrderedDict(field_list)
if fields:
field_dict = OrderedDict(
[(f, field_dict.get(f)) for f in fields
if ((not exclude) or (exclude and f not in exclude)) and (f not in ignored)]
)
return field_dict
class ModelFormOptions(object):
def __init__(self, options=None):
self.model = getattr(options, 'model', None)
self.fields = getattr(options, 'fields', None)
self.exclude = getattr(options, 'exclude', None)
self.widgets = getattr(options, 'widgets', None)
self.localized_fields = getattr(options, 'localized_fields', None)
self.labels = getattr(options, 'labels', None)
self.help_texts = getattr(options, 'help_texts', None)
self.error_messages = getattr(options, 'error_messages', None)
self.field_classes = getattr(options, 'field_classes', None)
class ModelFormMetaclass(DeclarativeFieldsMetaclass):
def __new__(mcs, name, bases, attrs):
formfield_callback = attrs.pop('formfield_callback', None)
new_class = super(ModelFormMetaclass, mcs).__new__(mcs, name, bases, attrs)
if bases == (BaseModelForm,):
return new_class
opts = new_class._meta = ModelFormOptions(getattr(new_class, 'Meta', None))
# We check if a string was passed to `fields` or `exclude`,
# which is likely to be a mistake where the user typed ('foo') instead
# of ('foo',)
for opt in ['fields', 'exclude', 'localized_fields']:
value = getattr(opts, opt)
if isinstance(value, six.string_types) and value != ALL_FIELDS:
msg = ("%(model)s.Meta.%(opt)s cannot be a string. "
"Did you mean to type: ('%(value)s',)?" % {
'model': new_class.__name__,
'opt': opt,
'value': value,
})
raise TypeError(msg)
if opts.model:
# If a model is defined, extract form fields from it.
if opts.fields is None and opts.exclude is None:
raise ImproperlyConfigured(
"Creating a ModelForm without either the 'fields' attribute "
"or the 'exclude' attribute is prohibited; form %s "
"needs updating." % name
)
if opts.fields == ALL_FIELDS:
# Sentinel for fields_for_model to indicate "get the list of
# fields from the model"
opts.fields = None
fields = fields_for_model(opts.model, opts.fields, opts.exclude,
opts.widgets, formfield_callback,
opts.localized_fields, opts.labels,
opts.help_texts, opts.error_messages,
opts.field_classes)
# make sure opts.fields doesn't specify an invalid field
none_model_fields = [k for k, v in six.iteritems(fields) if not v]
missing_fields = (set(none_model_fields) -
set(new_class.declared_fields.keys()))
if missing_fields:
message = 'Unknown field(s) (%s) specified for %s'
message = message % (', '.join(missing_fields),
opts.model.__name__)
raise FieldError(message)
# Override default model fields with any custom declared ones
# (plus, include all the other declared fields).
fields.update(new_class.declared_fields)
else:
fields = new_class.declared_fields
new_class.base_fields = fields
return new_class
class BaseModelForm(BaseForm):
def __init__(self, data=None, files=None, auto_id='id_%s', prefix=None,
initial=None, error_class=ErrorList, label_suffix=None,
empty_permitted=False, instance=None):
opts = self._meta
if opts.model is None:
raise ValueError('ModelForm has no model class specified.')
if instance is None:
# if we didn't get an instance, instantiate a new one
self.instance = opts.model()
object_data = {}
else:
self.instance = instance
object_data = model_to_dict(instance, opts.fields, opts.exclude)
# if initial was provided, it should override the values from instance
if initial is not None:
object_data.update(initial)
# self._validate_unique will be set to True by BaseModelForm.clean().
# It is False by default so overriding self.clean() and failing to call
# super will stop validate_unique from being called.
self._validate_unique = False
super(BaseModelForm, self).__init__(data, files, auto_id, prefix, object_data,
error_class, label_suffix, empty_permitted)
# Apply ``limit_choices_to`` to each field.
for field_name in self.fields:
formfield = self.fields[field_name]
if hasattr(formfield, 'queryset') and hasattr(formfield, 'get_limit_choices_to'):
limit_choices_to = formfield.get_limit_choices_to()
if limit_choices_to is not None:
formfield.queryset = formfield.queryset.complex_filter(limit_choices_to)
def _get_validation_exclusions(self):
"""
For backwards-compatibility, several types of fields need to be
excluded from model validation. See the following tickets for
details: #12507, #12521, #12553
"""
exclude = []
# Build up a list of fields that should be excluded from model field
# validation and unique checks.
for f in self.instance._meta.fields:
field = f.name
# Exclude fields that aren't on the form. The developer may be
# adding these values to the model after form validation.
if field not in self.fields:
exclude.append(f.name)
# Don't perform model validation on fields that were defined
# manually on the form and excluded via the ModelForm's Meta
# class. See #12901.
elif self._meta.fields and field not in self._meta.fields:
exclude.append(f.name)
elif self._meta.exclude and field in self._meta.exclude:
exclude.append(f.name)
# Exclude fields that failed form validation. There's no need for
# the model fields to validate them as well.
elif field in self._errors.keys():
exclude.append(f.name)
# Exclude empty fields that are not required by the form, if the
# underlying model field is required. This keeps the model field
# from raising a required error. Note: don't exclude the field from
# validation if the model field allows blanks. If it does, the blank
# value may be included in a unique check, so cannot be excluded
# from validation.
else:
form_field = self.fields[field]
field_value = self.cleaned_data.get(field, None)
if not f.blank and not form_field.required and field_value in form_field.empty_values:
exclude.append(f.name)
return exclude
def clean(self):
self._validate_unique = True
return self.cleaned_data
def _update_errors(self, errors):
# Override any validation error messages defined at the model level
# with those defined at the form level.
opts = self._meta
for field, messages in errors.error_dict.items():
if (field == NON_FIELD_ERRORS and opts.error_messages and
NON_FIELD_ERRORS in opts.error_messages):
error_messages = opts.error_messages[NON_FIELD_ERRORS]
elif field in self.fields:
error_messages = self.fields[field].error_messages
else:
continue
for message in messages:
if (isinstance(message, ValidationError) and
message.code in error_messages):
message.message = error_messages[message.code]
self.add_error(None, errors)
def _post_clean(self):
opts = self._meta
exclude = self._get_validation_exclusions()
# a subset of `exclude` which won't have the InlineForeignKeyField
# if we're adding a new object since that value doesn't exist
# until after the new instance is saved to the database.
construct_instance_exclude = list(exclude)
# Foreign Keys being used to represent inline relationships
# are excluded from basic field value validation. This is for two
# reasons: firstly, the value may not be supplied (#12507; the
# case of providing new values to the admin); secondly the
# object being referred to may not yet fully exist (#12749).
# However, these fields *must* be included in uniqueness checks,
# so this can't be part of _get_validation_exclusions().
for name, field in self.fields.items():
if isinstance(field, InlineForeignKeyField):
if self.cleaned_data.get(name) is not None and self.cleaned_data[name]._state.adding:
construct_instance_exclude.append(name)
exclude.append(name)
# Update the model instance with self.cleaned_data.
self.instance = construct_instance(self, self.instance, opts.fields, construct_instance_exclude)
try:
self.instance.full_clean(exclude=exclude, validate_unique=False)
except ValidationError as e:
self._update_errors(e)
# Validate uniqueness if needed.
if self._validate_unique:
self.validate_unique()
def validate_unique(self):
"""
Calls the instance's validate_unique() method and updates the form's
validation errors if any were raised.
"""
exclude = self._get_validation_exclusions()
try:
self.instance.validate_unique(exclude=exclude)
except ValidationError as e:
self._update_errors(e)
def save(self, commit=True):
"""
Saves this ``form``'s cleaned_data into model instance
``self.instance``.
If commit=True, then the changes to ``instance`` will be saved to the
database. Returns ``instance``.
"""
if self.instance.pk is None:
fail_message = 'created'
else:
fail_message = 'changed'
return save_instance(self, self.instance, self._meta.fields,
fail_message, commit, self._meta.exclude,
construct=False)
save.alters_data = True
class ModelForm(six.with_metaclass(ModelFormMetaclass, BaseModelForm)):
pass
def modelform_factory(model, form=ModelForm, fields=None, exclude=None,
formfield_callback=None, widgets=None, localized_fields=None,
labels=None, help_texts=None, error_messages=None,
field_classes=None):
"""
Returns a ModelForm containing form fields for the given model.
``fields`` is an optional list of field names. If provided, only the named
fields will be included in the returned fields. If omitted or '__all__',
all fields will be used.
``exclude`` is an optional list of field names. If provided, the named
fields will be excluded from the returned fields, even if they are listed
in the ``fields`` argument.
``widgets`` is a dictionary of model field names mapped to a widget.
``localized_fields`` is a list of names of fields which should be localized.
``formfield_callback`` is a callable that takes a model field and returns
a form field.
``labels`` is a dictionary of model field names mapped to a label.
``help_texts`` is a dictionary of model field names mapped to a help text.
``error_messages`` is a dictionary of model field names mapped to a
dictionary of error messages.
``field_classes`` is a dictionary of model field names mapped to a form
field class.
"""
# Create the inner Meta class. FIXME: ideally, we should be able to
# construct a ModelForm without creating and passing in a temporary
# inner class.
# Build up a list of attributes that the Meta object will have.
attrs = {'model': model}
if fields is not None:
attrs['fields'] = fields
if exclude is not None:
attrs['exclude'] = exclude
if widgets is not None:
attrs['widgets'] = widgets
if localized_fields is not None:
attrs['localized_fields'] = localized_fields
if labels is not None:
attrs['labels'] = labels
if help_texts is not None:
attrs['help_texts'] = help_texts
if error_messages is not None:
attrs['error_messages'] = error_messages
if field_classes is not None:
attrs['field_classes'] = field_classes
# If parent form class already has an inner Meta, the Meta we're
# creating needs to inherit from the parent's inner meta.
parent = (object,)
if hasattr(form, 'Meta'):
parent = (form.Meta, object)
Meta = type(str('Meta'), parent, attrs)
# Give this new form class a reasonable name.
class_name = model.__name__ + str('Form')
# Class attributes for the new form class.
form_class_attrs = {
'Meta': Meta,
'formfield_callback': formfield_callback
}
if (getattr(Meta, 'fields', None) is None and
getattr(Meta, 'exclude', None) is None):
raise ImproperlyConfigured(
"Calling modelform_factory without defining 'fields' or "
"'exclude' explicitly is prohibited."
)
# Instantiate type(form) in order to use the same metaclass as form.
return type(form)(class_name, (form,), form_class_attrs)
# ModelFormSets ##############################################################
class BaseModelFormSet(BaseFormSet):
"""
A ``FormSet`` for editing a queryset and/or adding new objects to it.
"""
model = None
def __init__(self, data=None, files=None, auto_id='id_%s', prefix=None,
queryset=None, **kwargs):
self.queryset = queryset
self.initial_extra = kwargs.pop('initial', None)
defaults = {'data': data, 'files': files, 'auto_id': auto_id, 'prefix': prefix}
defaults.update(kwargs)
super(BaseModelFormSet, self).__init__(**defaults)
def initial_form_count(self):
"""Returns the number of forms that are required in this FormSet."""
if not (self.data or self.files):
return len(self.get_queryset())
return super(BaseModelFormSet, self).initial_form_count()
def _existing_object(self, pk):
if not hasattr(self, '_object_dict'):
self._object_dict = {o.pk: o for o in self.get_queryset()}
return self._object_dict.get(pk)
def _get_to_python(self, field):
"""
If the field is a related field, fetch the concrete field's (that
is, the ultimate pointed-to field's) to_python.
"""
while field.remote_field is not None:
field = field.remote_field.get_related_field()
return field.to_python
def _construct_form(self, i, **kwargs):
if self.is_bound and i < self.initial_form_count():
pk_key = "%s-%s" % (self.add_prefix(i), self.model._meta.pk.name)
pk = self.data[pk_key]
pk_field = self.model._meta.pk
to_python = self._get_to_python(pk_field)
pk = to_python(pk)
kwargs['instance'] = self._existing_object(pk)
if i < self.initial_form_count() and 'instance' not in kwargs:
kwargs['instance'] = self.get_queryset()[i]
if i >= self.initial_form_count() and self.initial_extra:
# Set initial values for extra forms
try:
kwargs['initial'] = self.initial_extra[i - self.initial_form_count()]
except IndexError:
pass
return super(BaseModelFormSet, self)._construct_form(i, **kwargs)
def get_queryset(self):
if not hasattr(self, '_queryset'):
if self.queryset is not None:
qs = self.queryset
else:
qs = self.model._default_manager.get_queryset()
# If the queryset isn't already ordered we need to add an
# artificial ordering here to make sure that all formsets
# constructed from this queryset have the same form order.
if not qs.ordered:
qs = qs.order_by(self.model._meta.pk.name)
# Removed queryset limiting here. As per discussion re: #13023
# on django-dev, max_num should not prevent existing
# related objects/inlines from being displayed.
self._queryset = qs
return self._queryset
def save_new(self, form, commit=True):
"""Saves and returns a new model instance for the given form."""
return form.save(commit=commit)
def save_existing(self, form, instance, commit=True):
"""Saves and returns an existing model instance for the given form."""
return form.save(commit=commit)
def save(self, commit=True):
"""Saves model instances for every form, adding and changing instances
as necessary, and returns the list of instances.
"""
if not commit:
self.saved_forms = []
def save_m2m():
for form in self.saved_forms:
form.save_m2m()
self.save_m2m = save_m2m
return self.save_existing_objects(commit) + self.save_new_objects(commit)
save.alters_data = True
def clean(self):
self.validate_unique()
def validate_unique(self):
# Collect unique_checks and date_checks to run from all the forms.
all_unique_checks = set()
all_date_checks = set()
forms_to_delete = self.deleted_forms
valid_forms = [form for form in self.forms if form.is_valid() and form not in forms_to_delete]
for form in valid_forms:
exclude = form._get_validation_exclusions()
unique_checks, date_checks = form.instance._get_unique_checks(exclude=exclude)
all_unique_checks = all_unique_checks.union(set(unique_checks))
all_date_checks = all_date_checks.union(set(date_checks))
errors = []
# Do each of the unique checks (unique and unique_together)
for uclass, unique_check in all_unique_checks:
seen_data = set()
for form in valid_forms:
# get data for each field of each of unique_check
row_data = (form.cleaned_data[field]
for field in unique_check if field in form.cleaned_data)
# Reduce Model instances to their primary key values
row_data = tuple(d._get_pk_val() if hasattr(d, '_get_pk_val') else d
for d in row_data)
if row_data and None not in row_data:
# if we've already seen it then we have a uniqueness failure
if row_data in seen_data:
# poke error messages into the right places and mark
# the form as invalid
errors.append(self.get_unique_error_message(unique_check))
form._errors[NON_FIELD_ERRORS] = self.error_class([self.get_form_error()])
# remove the data from the cleaned_data dict since it was invalid
for field in unique_check:
if field in form.cleaned_data:
del form.cleaned_data[field]
# mark the data as seen
seen_data.add(row_data)
# iterate over each of the date checks now
for date_check in all_date_checks:
seen_data = set()
uclass, lookup, field, unique_for = date_check
for form in valid_forms:
# see if we have data for both fields
if (form.cleaned_data and form.cleaned_data[field] is not None
and form.cleaned_data[unique_for] is not None):
# if it's a date lookup we need to get the data for all the fields
if lookup == 'date':
date = form.cleaned_data[unique_for]
date_data = (date.year, date.month, date.day)
# otherwise it's just the attribute on the date/datetime
# object
else:
date_data = (getattr(form.cleaned_data[unique_for], lookup),)
data = (form.cleaned_data[field],) + date_data
# if we've already seen it then we have a uniqueness failure
if data in seen_data:
# poke error messages into the right places and mark
# the form as invalid
errors.append(self.get_date_error_message(date_check))
form._errors[NON_FIELD_ERRORS] = self.error_class([self.get_form_error()])
# remove the data from the cleaned_data dict since it was invalid
del form.cleaned_data[field]
# mark the data as seen
seen_data.add(data)
if errors:
raise ValidationError(errors)
def get_unique_error_message(self, unique_check):
if len(unique_check) == 1:
return ugettext("Please correct the duplicate data for %(field)s.") % {
"field": unique_check[0],
}
else:
return ugettext("Please correct the duplicate data for %(field)s, "
"which must be unique.") % {
"field": get_text_list(unique_check, six.text_type(_("and"))),
}
def get_date_error_message(self, date_check):
return ugettext("Please correct the duplicate data for %(field_name)s "
"which must be unique for the %(lookup)s in %(date_field)s.") % {
'field_name': date_check[2],
'date_field': date_check[3],
'lookup': six.text_type(date_check[1]),
}
def get_form_error(self):
return ugettext("Please correct the duplicate values below.")
def save_existing_objects(self, commit=True):
self.changed_objects = []
self.deleted_objects = []
if not self.initial_forms:
return []
saved_instances = []
forms_to_delete = self.deleted_forms
for form in self.initial_forms:
obj = form.instance
if form in forms_to_delete:
# If the pk is None, it means that the object can't be
# deleted again. Possible reason for this is that the
# object was already deleted from the DB. Refs #14877.
if obj.pk is None:
continue
self.deleted_objects.append(obj)
if commit:
obj.delete()
elif form.has_changed():
self.changed_objects.append((obj, form.changed_data))
saved_instances.append(self.save_existing(form, obj, commit=commit))
if not commit:
self.saved_forms.append(form)
return saved_instances
def save_new_objects(self, commit=True):
self.new_objects = []
for form in self.extra_forms:
if not form.has_changed():
continue
# If someone has marked an add form for deletion, don't save the
# object.
if self.can_delete and self._should_delete_form(form):
continue
self.new_objects.append(self.save_new(form, commit=commit))
if not commit:
self.saved_forms.append(form)
return self.new_objects
def add_fields(self, form, index):
"""Add a hidden field for the object's primary key."""
from django.db.models import AutoField, OneToOneField, ForeignKey
self._pk_field = pk = self.model._meta.pk
# If a pk isn't editable, then it won't be on the form, so we need to
# add it here so we can tell which object is which when we get the
# data back. Generally, pk.editable should be false, but for some
# reason, auto_created pk fields and AutoField's editable attribute is
# True, so check for that as well.
def pk_is_not_editable(pk):
return ((not pk.editable) or (pk.auto_created or isinstance(pk, AutoField))
or (pk.remote_field and pk.remote_field.parent_link and pk_is_not_editable(pk.remote_field.model._meta.pk)))
if pk_is_not_editable(pk) or pk.name not in form.fields:
if form.is_bound:
# If we're adding the related instance, ignore its primary key
# as it could be an auto-generated default which isn't actually
# in the database.
pk_value = None if form.instance._state.adding else form.instance.pk
else:
try:
if index is not None:
pk_value = self.get_queryset()[index].pk
else:
pk_value = None
except IndexError:
pk_value = None
if isinstance(pk, OneToOneField) or isinstance(pk, ForeignKey):
qs = pk.remote_field.model._default_manager.get_queryset()
else:
qs = self.model._default_manager.get_queryset()
qs = qs.using(form.instance._state.db)
if form._meta.widgets:
widget = form._meta.widgets.get(self._pk_field.name, HiddenInput)
else:
widget = HiddenInput
form.fields[self._pk_field.name] = ModelChoiceField(qs, initial=pk_value, required=False, widget=widget)
super(BaseModelFormSet, self).add_fields(form, index)
def modelformset_factory(model, form=ModelForm, formfield_callback=None,
formset=BaseModelFormSet, extra=1, can_delete=False,
can_order=False, max_num=None, fields=None, exclude=None,
widgets=None, validate_max=False, localized_fields=None,
labels=None, help_texts=None, error_messages=None,
min_num=None, validate_min=False, field_classes=None):
"""
Returns a FormSet class for the given Django model class.
"""
meta = getattr(form, 'Meta', None)
if meta is None:
meta = type(str('Meta'), (object,), {})
if (getattr(meta, 'fields', fields) is None and
getattr(meta, 'exclude', exclude) is None):
raise ImproperlyConfigured(
"Calling modelformset_factory without defining 'fields' or "
"'exclude' explicitly is prohibited."
)
form = modelform_factory(model, form=form, fields=fields, exclude=exclude,
formfield_callback=formfield_callback,
widgets=widgets, localized_fields=localized_fields,
labels=labels, help_texts=help_texts,
error_messages=error_messages, field_classes=field_classes)
FormSet = formset_factory(form, formset, extra=extra, min_num=min_num, max_num=max_num,
can_order=can_order, can_delete=can_delete,
validate_min=validate_min, validate_max=validate_max)
FormSet.model = model
return FormSet
# InlineFormSets #############################################################
class BaseInlineFormSet(BaseModelFormSet):
"""A formset for child objects related to a parent."""
def __init__(self, data=None, files=None, instance=None,
save_as_new=False, prefix=None, queryset=None, **kwargs):
if instance is None:
self.instance = self.fk.remote_field.model()
else:
self.instance = instance
self.save_as_new = save_as_new
if queryset is None:
queryset = self.model._default_manager
if self.instance.pk is not None:
qs = queryset.filter(**{self.fk.name: self.instance})
else:
qs = queryset.none()
super(BaseInlineFormSet, self).__init__(data, files, prefix=prefix,
queryset=qs, **kwargs)
def initial_form_count(self):
if self.save_as_new:
return 0
return super(BaseInlineFormSet, self).initial_form_count()
def _construct_form(self, i, **kwargs):
form = super(BaseInlineFormSet, self)._construct_form(i, **kwargs)
if self.save_as_new:
# Remove the primary key from the form's data, we are only
# creating new instances
form.data[form.add_prefix(self._pk_field.name)] = None
# Remove the foreign key from the form's data
form.data[form.add_prefix(self.fk.name)] = None
# Set the fk value here so that the form can do its validation.
fk_value = self.instance.pk
if self.fk.remote_field.field_name != self.fk.remote_field.model._meta.pk.name:
fk_value = getattr(self.instance, self.fk.remote_field.field_name)
fk_value = getattr(fk_value, 'pk', fk_value)
setattr(form.instance, self.fk.get_attname(), fk_value)
return form
@classmethod
def get_default_prefix(cls):
return cls.fk.remote_field.get_accessor_name(model=cls.model).replace('+', '')
def save_new(self, form, commit=True):
# Ensure the latest copy of the related instance is present on each
# form (it may have been saved after the formset was originally
# instantiated).
setattr(form.instance, self.fk.name, self.instance)
# Use commit=False so we can assign the parent key afterwards, then
# save the object.
obj = form.save(commit=False)
pk_value = getattr(self.instance, self.fk.remote_field.field_name)
setattr(obj, self.fk.get_attname(), getattr(pk_value, 'pk', pk_value))
if commit:
obj.save()
# form.save_m2m() can be called via the formset later on if commit=False
if commit and hasattr(form, 'save_m2m'):
form.save_m2m()
return obj
def add_fields(self, form, index):
super(BaseInlineFormSet, self).add_fields(form, index)
if self._pk_field == self.fk:
name = self._pk_field.name
kwargs = {'pk_field': True}
else:
# The foreign key field might not be on the form, so we poke at the
# Model field to get the label, since we need that for error messages.
name = self.fk.name
kwargs = {
'label': getattr(form.fields.get(name), 'label', capfirst(self.fk.verbose_name))
}
if self.fk.remote_field.field_name != self.fk.remote_field.model._meta.pk.name:
kwargs['to_field'] = self.fk.remote_field.field_name
# If we're adding a new object, ignore a parent's auto-generated pk
# as it will be regenerated on the save request.
if self.instance._state.adding and form._meta.model._meta.pk.has_default():
self.instance.pk = None
form.fields[name] = InlineForeignKeyField(self.instance, **kwargs)
# Add the generated field to form._meta.fields if it's defined to make
# sure validation isn't skipped on that field.
if form._meta.fields:
if isinstance(form._meta.fields, tuple):
form._meta.fields = list(form._meta.fields)
form._meta.fields.append(self.fk.name)
def get_unique_error_message(self, unique_check):
unique_check = [field for field in unique_check if field != self.fk.name]
return super(BaseInlineFormSet, self).get_unique_error_message(unique_check)
def _get_foreign_key(parent_model, model, fk_name=None, can_fail=False):
"""
Finds and returns the ForeignKey from model to parent if there is one
(returns None if can_fail is True and no such field exists). If fk_name is
provided, assume it is the name of the ForeignKey field. Unless can_fail is
True, an exception is raised if there is no ForeignKey from model to
parent_model.
"""
# avoid circular import
from django.db.models import ForeignKey
opts = model._meta
if fk_name:
fks_to_parent = [f for f in opts.fields if f.name == fk_name]
if len(fks_to_parent) == 1:
fk = fks_to_parent[0]
if not isinstance(fk, ForeignKey) or \
(fk.remote_field.model != parent_model and
fk.remote_field.model not in parent_model._meta.get_parent_list()):
raise ValueError(
"fk_name '%s' is not a ForeignKey to '%s'." % (fk_name, parent_model._meta.label)
)
elif len(fks_to_parent) == 0:
raise ValueError(
"'%s' has no field named '%s'." % (model._meta.label, fk_name)
)
else:
# Try to discover what the ForeignKey from model to parent_model is
fks_to_parent = [
f for f in opts.fields
if isinstance(f, ForeignKey)
and (f.remote_field.model == parent_model
or f.remote_field.model in parent_model._meta.get_parent_list())
]
if len(fks_to_parent) == 1:
fk = fks_to_parent[0]
elif len(fks_to_parent) == 0:
if can_fail:
return
raise ValueError(
"'%s' has no ForeignKey to '%s'." % (
model._meta.label,
parent_model._meta.label,
)
)
else:
raise ValueError(
"'%s' has more than one ForeignKey to '%s'." % (
model._meta.label,
parent_model._meta.label,
)
)
return fk
def inlineformset_factory(parent_model, model, form=ModelForm,
formset=BaseInlineFormSet, fk_name=None,
fields=None, exclude=None, extra=3, can_order=False,
can_delete=True, max_num=None, formfield_callback=None,
widgets=None, validate_max=False, localized_fields=None,
labels=None, help_texts=None, error_messages=None,
min_num=None, validate_min=False, field_classes=None):
"""
Returns an ``InlineFormSet`` for the given kwargs.
You must provide ``fk_name`` if ``model`` has more than one ``ForeignKey``
to ``parent_model``.
"""
fk = _get_foreign_key(parent_model, model, fk_name=fk_name)
# enforce a max_num=1 when the foreign key to the parent model is unique.
if fk.unique:
max_num = 1
kwargs = {
'form': form,
'formfield_callback': formfield_callback,
'formset': formset,
'extra': extra,
'can_delete': can_delete,
'can_order': can_order,
'fields': fields,
'exclude': exclude,
'min_num': min_num,
'max_num': max_num,
'widgets': widgets,
'validate_min': validate_min,
'validate_max': validate_max,
'localized_fields': localized_fields,
'labels': labels,
'help_texts': help_texts,
'error_messages': error_messages,
'field_classes': field_classes,
}
FormSet = modelformset_factory(model, **kwargs)
FormSet.fk = fk
return FormSet
# Fields #####################################################################
class InlineForeignKeyField(Field):
"""
A basic integer field that deals with validating the given value to a
given parent instance in an inline.
"""
widget = HiddenInput
default_error_messages = {
'invalid_choice': _('The inline foreign key did not match the parent instance primary key.'),
}
def __init__(self, parent_instance, *args, **kwargs):
self.parent_instance = parent_instance
self.pk_field = kwargs.pop("pk_field", False)
self.to_field = kwargs.pop("to_field", None)
if self.parent_instance is not None:
if self.to_field:
kwargs["initial"] = getattr(self.parent_instance, self.to_field)
else:
kwargs["initial"] = self.parent_instance.pk
kwargs["required"] = False
super(InlineForeignKeyField, self).__init__(*args, **kwargs)
def clean(self, value):
if value in self.empty_values:
if self.pk_field:
return None
# if there is no value act as we did before.
return self.parent_instance
# ensure the we compare the values as equal types.
if self.to_field:
orig = getattr(self.parent_instance, self.to_field)
else:
orig = self.parent_instance.pk
if force_text(value) != force_text(orig):
raise ValidationError(self.error_messages['invalid_choice'], code='invalid_choice')
return self.parent_instance
def has_changed(self, initial, data):
return False
class ModelChoiceIterator(object):
def __init__(self, field):
self.field = field
self.queryset = field.queryset
def __iter__(self):
if self.field.empty_label is not None:
yield ("", self.field.empty_label)
for obj in self.queryset.iterator():
yield self.choice(obj)
def __len__(self):
return (len(self.queryset) +
(1 if self.field.empty_label is not None else 0))
def choice(self, obj):
return (self.field.prepare_value(obj), self.field.label_from_instance(obj))
class ModelChoiceField(ChoiceField):
"""A ChoiceField whose choices are a model QuerySet."""
# This class is a subclass of ChoiceField for purity, but it doesn't
# actually use any of ChoiceField's implementation.
default_error_messages = {
'invalid_choice': _('Select a valid choice. That choice is not one of'
' the available choices.'),
}
def __init__(self, queryset, empty_label="---------",
required=True, widget=None, label=None, initial=None,
help_text='', to_field_name=None, limit_choices_to=None,
*args, **kwargs):
if required and (initial is not None):
self.empty_label = None
else:
self.empty_label = empty_label
# Call Field instead of ChoiceField __init__() because we don't need
# ChoiceField.__init__().
Field.__init__(self, required, widget, label, initial, help_text,
*args, **kwargs)
self.queryset = queryset
self.limit_choices_to = limit_choices_to # limit the queryset later.
self.to_field_name = to_field_name
def get_limit_choices_to(self):
"""
Returns ``limit_choices_to`` for this form field.
If it is a callable, it will be invoked and the result will be
returned.
"""
if callable(self.limit_choices_to):
return self.limit_choices_to()
return self.limit_choices_to
def __deepcopy__(self, memo):
result = super(ChoiceField, self).__deepcopy__(memo)
# Need to force a new ModelChoiceIterator to be created, bug #11183
result.queryset = result.queryset
return result
def _get_queryset(self):
return self._queryset
def _set_queryset(self, queryset):
self._queryset = queryset
self.widget.choices = self.choices
queryset = property(_get_queryset, _set_queryset)
# this method will be used to create object labels by the QuerySetIterator.
# Override it to customize the label.
def label_from_instance(self, obj):
"""
This method is used to convert objects into strings; it's used to
generate the labels for the choices presented by this object. Subclasses
can override this method to customize the display of the choices.
"""
return smart_text(obj)
def _get_choices(self):
# If self._choices is set, then somebody must have manually set
# the property self.choices. In this case, just return self._choices.
if hasattr(self, '_choices'):
return self._choices
# Otherwise, execute the QuerySet in self.queryset to determine the
# choices dynamically. Return a fresh ModelChoiceIterator that has not been
# consumed. Note that we're instantiating a new ModelChoiceIterator *each*
# time _get_choices() is called (and, thus, each time self.choices is
# accessed) so that we can ensure the QuerySet has not been consumed. This
# construct might look complicated but it allows for lazy evaluation of
# the queryset.
return ModelChoiceIterator(self)
choices = property(_get_choices, ChoiceField._set_choices)
def prepare_value(self, value):
if hasattr(value, '_meta'):
if self.to_field_name:
return value.serializable_value(self.to_field_name)
else:
return value.pk
return super(ModelChoiceField, self).prepare_value(value)
def to_python(self, value):
if value in self.empty_values:
return None
try:
key = self.to_field_name or 'pk'
value = self.queryset.get(**{key: value})
except (ValueError, TypeError, self.queryset.model.DoesNotExist):
raise ValidationError(self.error_messages['invalid_choice'], code='invalid_choice')
return value
def validate(self, value):
return Field.validate(self, value)
def has_changed(self, initial, data):
initial_value = initial if initial is not None else ''
data_value = data if data is not None else ''
return force_text(self.prepare_value(initial_value)) != force_text(data_value)
class ModelMultipleChoiceField(ModelChoiceField):
"""A MultipleChoiceField whose choices are a model QuerySet."""
widget = SelectMultiple
hidden_widget = MultipleHiddenInput
default_error_messages = {
'list': _('Enter a list of values.'),
'invalid_choice': _('Select a valid choice. %(value)s is not one of the'
' available choices.'),
'invalid_pk_value': _('"%(pk)s" is not a valid value for a primary key.')
}
def __init__(self, queryset, required=True, widget=None, label=None,
initial=None, help_text='', *args, **kwargs):
super(ModelMultipleChoiceField, self).__init__(queryset, None,
required, widget, label, initial, help_text, *args, **kwargs)
def to_python(self, value):
if not value:
return []
return list(self._check_values(value))
def clean(self, value):
if self.required and not value:
raise ValidationError(self.error_messages['required'], code='required')
elif not self.required and not value:
return self.queryset.none()
if not isinstance(value, (list, tuple)):
raise ValidationError(self.error_messages['list'], code='list')
qs = self._check_values(value)
# Since this overrides the inherited ModelChoiceField.clean
# we run custom validators here
self.run_validators(value)
return qs
def _check_values(self, value):
"""
Given a list of possible PK values, returns a QuerySet of the
corresponding objects. Raises a ValidationError if a given value is
invalid (not a valid PK, not in the queryset, etc.)
"""
key = self.to_field_name or 'pk'
# deduplicate given values to avoid creating many querysets or
# requiring the database backend deduplicate efficiently.
try:
value = frozenset(value)
except TypeError:
# list of lists isn't hashable, for example
raise ValidationError(
self.error_messages['list'],
code='list',
)
for pk in value:
try:
self.queryset.filter(**{key: pk})
except (ValueError, TypeError):
raise ValidationError(
self.error_messages['invalid_pk_value'],
code='invalid_pk_value',
params={'pk': pk},
)
qs = self.queryset.filter(**{'%s__in' % key: value})
pks = set(force_text(getattr(o, key)) for o in qs)
for val in value:
if force_text(val) not in pks:
raise ValidationError(
self.error_messages['invalid_choice'],
code='invalid_choice',
params={'value': val},
)
return qs
def prepare_value(self, value):
if (hasattr(value, '__iter__') and
not isinstance(value, six.text_type) and
not hasattr(value, '_meta')):
return [super(ModelMultipleChoiceField, self).prepare_value(v) for v in value]
return super(ModelMultipleChoiceField, self).prepare_value(value)
def has_changed(self, initial, data):
if initial is None:
initial = []
if data is None:
data = []
if len(initial) != len(data):
return True
initial_set = set(force_text(value) for value in self.prepare_value(initial))
data_set = set(force_text(value) for value in data)
return data_set != initial_set
def modelform_defines_fields(form_class):
return (form_class is not None and (
hasattr(form_class, '_meta') and
(form_class._meta.fields is not None or
form_class._meta.exclude is not None)
))
|
bsd-3-clause
| 4,096,164,790,503,905,300
| 41.00228
| 124
| 0.589326
| false
| 4.337676
| false
| false
| false
|
rleigh-dundee/openmicroscopy
|
components/tools/OmeroWeb/omeroweb/settings.py
|
1
|
22862
|
#!/usr/bin/env python
#
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# # Django settings for OMERO.web project. # #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
#
#
# Copyright (c) 2008 University of Dundee.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Author: Aleksandra Tarkowska <A(dot)Tarkowska(at)dundee(dot)ac(dot)uk>, 2008.
#
# Version: 1.0
#
import os.path
import sys
import datetime
import logging
import omero
import omero.config
import omero.clients
import tempfile
import exceptions
import re
from django.utils import simplejson as json
from portalocker import LockException
logger = logging.getLogger(__name__)
# LOGS
# NEVER DEPLOY a site into production with DEBUG turned on.
# Debuging mode.
# A boolean that turns on/off debug mode.
# handler404 and handler500 works only when False
if os.environ.has_key('OMERO_HOME'):
OMERO_HOME =os.environ.get('OMERO_HOME')
else:
OMERO_HOME = os.path.join(os.path.dirname(__file__), '..', '..', '..')
OMERO_HOME = os.path.normpath(OMERO_HOME)
INSIGHT_JARS = os.path.join(OMERO_HOME, "lib", "insight").replace('\\','/')
WEBSTART = False
if os.path.isdir(INSIGHT_JARS):
WEBSTART = True
# Logging
LOGDIR = os.path.join(OMERO_HOME, 'var', 'log').replace('\\','/')
if not os.path.isdir(LOGDIR):
try:
os.makedirs(LOGDIR)
except Exception, x:
exctype, value = sys.exc_info()[:2]
raise exctype, value
# DEBUG: Never deploy a site into production with DEBUG turned on.
# Logging levels: logging.DEBUG, logging.INFO, logging.WARNING, logging.ERROR logging.CRITICAL
# FORMAT: 2010-01-01 00:00:00,000 INFO [omeroweb.webadmin.webadmin_utils ] (proc.1308 ) getGuestConnection:20 Open connection is not available
LOGGING = {
'version': 1,
'disable_existing_loggers': True,
'formatters': {
'standard': {
'format': '%(asctime)s %(levelname)5.5s [%(name)40.40s] (proc.%(process)5.5d) %(funcName)s:%(lineno)d %(message)s'
},
},
'handlers': {
'default': {
'level':'DEBUG',
'class':'logging.handlers.RotatingFileHandler',
'filename': os.path.join(LOGDIR, 'OMEROweb.log').replace('\\','/'),
'maxBytes': 1024*1024*5, # 5 MB
'backupCount': 5,
'formatter':'standard',
},
'request_handler': {
'level':'DEBUG',
'class':'logging.handlers.RotatingFileHandler',
'filename': os.path.join(LOGDIR, 'OMEROweb_request.log').replace('\\','/'),
'maxBytes': 1024*1024*5, # 5 MB
'backupCount': 5,
'formatter':'standard',
},
'null': {
'level':'DEBUG',
'class':'django.utils.log.NullHandler',
},
'console':{
'level':'DEBUG',
'class':'logging.StreamHandler',
'formatter': 'standard'
},
},
'loggers': {
'django.request': { # Stop SQL debug from logging to main logger
'handlers': ['request_handler'],
'level': 'DEBUG',
'propagate': False
},
'django': {
'handlers': ['null'],
'level': 'DEBUG',
'propagate': True
},
'': {
'handlers': ['default'],
'level': 'DEBUG',
'propagate': True
}
}
}
# Load custom settings from etc/grid/config.xml
# Tue 2 Nov 2010 11:03:18 GMT -- ticket:3228
from omero.util.concurrency import get_event
CONFIG_XML = os.path.join(OMERO_HOME, 'etc', 'grid', 'config.xml')
count = 10
event = get_event("websettings")
while True:
try:
CONFIG_XML = omero.config.ConfigXml(CONFIG_XML)
CUSTOM_SETTINGS = CONFIG_XML.as_map()
CONFIG_XML.close()
break
except LockException:
#logger.error("Exception while loading configuration retrying...", exc_info=True)
exctype, value = sys.exc_info()[:2]
count -= 1
if not count:
raise exctype, value
else:
event.wait(1) # Wait a total of 10 seconds
except:
#logger.error("Exception while loading configuration...", exc_info=True)
exctype, value = sys.exc_info()[:2]
raise exctype, value
del event
del count
del get_event
FASTCGI = "fastcgi"
FASTCGITCP = "fastcgi-tcp"
FASTCGI_TYPES = (FASTCGI, FASTCGITCP)
DEVELOPMENT = "development"
DEFAULT_SERVER_TYPE = FASTCGITCP
ALL_SERVER_TYPES = (FASTCGITCP, FASTCGI, DEVELOPMENT)
DEFAULT_SESSION_ENGINE = 'django.contrib.sessions.backends.file'
SESSION_ENGINE_VALUES = ('django.contrib.sessions.backends.db',
'django.contrib.sessions.backends.file',
'django.contrib.sessions.backends.cache',
'django.contrib.sessions.backends.cached_db')
def parse_boolean(s):
s = s.strip().lower()
if s in ('true', '1', 't'):
return True
return False
def parse_paths(s):
return [os.path.normpath(path) for path in json.loads(s)]
def check_server_type(s):
if s not in ALL_SERVER_TYPES:
raise ValueError("Unknown server type: %s. Valid values are: %s" % (s, ALL_SERVER_TYPES))
return s
def check_session_engine(s):
if s not in SESSION_ENGINE_VALUES:
raise ValueError("Unknown session engine: %s. Valid values are: %s" % (s, SESSION_ENGINE_VALUES))
return s
def identity(x):
return x
def remove_slash(s):
if s is not None and len(s) > 0:
if s.endswith("/"):
s = s[:-1]
return s
class LeaveUnset(exceptions.Exception):
pass
def leave_none_unset(s):
if s is None:
raise LeaveUnset()
return s
CUSTOM_SETTINGS_MAPPINGS = {
"omero.web.apps": ["ADDITIONAL_APPS", '[]', json.loads],
"omero.web.public.enabled": ["PUBLIC_ENABLED", "false", parse_boolean],
"omero.web.public.url_filter": ["PUBLIC_URL_FILTER", r'^/(?!webadmin)', re.compile],
"omero.web.public.server_id": ["PUBLIC_SERVER_ID", 1, int],
"omero.web.public.user": ["PUBLIC_USER", None, leave_none_unset],
"omero.web.public.password": ["PUBLIC_PASSWORD", None, leave_none_unset],
"omero.web.public.cache.enabled": ["PUBLIC_CACHE_ENABLED", "false", parse_boolean],
"omero.web.public.cache.key": ["PUBLIC_CACHE_KEY", "omero.web.public.cache.key", str],
"omero.web.public.cache.timeout": ["PUBLIC_CACHE_TIMEOUT", 60 * 60 * 24, int],
"omero.web.databases": ["DATABASES", '{}', json.loads],
"omero.web.admins": ["ADMINS", '[]', json.loads],
"omero.web.application_server": ["APPLICATION_SERVER", DEFAULT_SERVER_TYPE, check_server_type],
"omero.web.application_server.host": ["APPLICATION_SERVER_HOST", "0.0.0.0", str],
"omero.web.application_server.port": ["APPLICATION_SERVER_PORT", "4080", str],
"omero.web.application_server.max_requests": ["APPLICATION_SERVER_MAX_REQUESTS", 400, int],
"omero.web.ping_interval": ["PING_INTERVAL", 60000, int],
"omero.web.static_url": ["STATIC_URL", "/static/", str],
"omero.web.staticfile_dirs": ["STATICFILES_DIRS", '[]', json.loads],
"omero.web.index_template": ["INDEX_TEMPLATE", None, identity],
"omero.web.caches": ["CACHES", '{}', json.loads],
"omero.web.webgateway_cache": ["WEBGATEWAY_CACHE", None, leave_none_unset],
"omero.web.session_engine": ["SESSION_ENGINE", DEFAULT_SESSION_ENGINE, check_session_engine],
"omero.web.debug": ["DEBUG", "false", parse_boolean],
"omero.web.email_host": ["EMAIL_HOST", None, identity],
"omero.web.email_host_password": ["EMAIL_HOST_PASSWORD", None, identity],
"omero.web.email_host_user": ["EMAIL_HOST_USER", None, identity],
"omero.web.email_port": ["EMAIL_PORT", None, identity],
"omero.web.email_subject_prefix": ["EMAIL_SUBJECT_PREFIX", "[OMERO.web] ", str],
"omero.web.email_use_tls": ["EMAIL_USE_TLS", "false", parse_boolean],
"omero.web.logdir": ["LOGDIR", LOGDIR, str],
"omero.web.login_view": ["LOGIN_VIEW", "weblogin", str],
"omero.web.send_broken_link_emails": ["SEND_BROKEN_LINK_EMAILS", "true", parse_boolean],
"omero.web.server_email": ["SERVER_EMAIL", None, identity],
"omero.web.server_list": ["SERVER_LIST", '[["localhost", 4064, "omero"]]', json.loads],
# Configuration options for the viewer
"omero.web.viewer.initial_zoom_level": ["VIEWER_INITIAL_ZOOM_LEVEL", -1, int],
# the following parameters configure when to show/hide the 'Volume viewer' icon in the Image metadata panel
"omero.web.open_astex_max_side": ["OPEN_ASTEX_MAX_SIDE", 400, int],
"omero.web.open_astex_min_side": ["OPEN_ASTEX_MIN_SIDE", 20, int],
"omero.web.open_astex_max_voxels": ["OPEN_ASTEX_MAX_VOXELS", 27000000, int], # 300 x 300 x 300
"omero.web.scripts_to_ignore": ["SCRIPTS_TO_IGNORE", '["/omero/figure_scripts/Movie_Figure.py", "/omero/figure_scripts/Split_View_Figure.py", "/omero/figure_scripts/Thumbnail_Figure.py", "/omero/figure_scripts/ROI_Split_Figure.py", "/omero/export_scripts/Make_Movie.py"]', parse_paths],
# Add links to the top header: links are ['Link Text', 'link'], where the url is reverse("link") OR simply 'link' (for external urls)
"omero.web.ui.top_links": ["TOP_LINKS", '[]', json.loads], # E.g. '[["Webtest", "webtest_index"]]'
# Add plugins to the right-hand & center panels: plugins are ['Label', 'include.js', 'div_id']. The javascript loads data into $('#div_id').
"omero.web.ui.right_plugins": ["RIGHT_PLUGINS", '[["Acquisition", "webclient/data/includes/right_plugin.acquisition.js.html", "metadata_tab"],'\
#'["ROIs", "webtest/webclient_plugins/right_plugin.rois.js.html", "image_roi_tab"],'\
'["Preview", "webclient/data/includes/right_plugin.preview.js.html", "preview_tab"]]', json.loads],
# E.g. Center plugin: ["Channel overlay", "webtest/webclient_plugins/center_plugin.overlay.js.html", "channel_overlay_panel"]
"omero.web.ui.center_plugins": ["CENTER_PLUGINS", '['\
#'["Split View", "webclient/data/includes/center_plugin.splitview.js.html", "split_view_panel"],'\
'["Table", "webclient/data/includes/center_plugin.table.js.html", "image_table"]]', json.loads],
# sharing no longer use this variable. replaced by request.build_absolute_uri
# after testing this line should be removed.
# "omero.web.application_host": ["APPLICATION_HOST", None, remove_slash],
# WEBSTART
"omero.web.webstart_jar": ["WEBSTART_JAR", "omero.insight.jar", str],
"omero.web.webstart_icon": ["WEBSTART_ICON", "webstart/img/icon-omero-insight.png", str],
"omero.web.webstart_heap": ["WEBSTART_HEAP", "1024m", str],
"omero.web.webstart_host": ["WEBSTART_HOST", "localhost", str],
"omero.web.webstart_port": ["WEBSTART_PORT", "4064", str],
"omero.web.webstart_class": ["WEBSTART_CLASS", "org.openmicroscopy.shoola.Main", str],
"omero.web.webstart_title": ["WEBSTART_TITLE", "OMERO.insight", str],
"omero.web.webstart_vendor": ["WEBSTART_VENDOR", "The Open Microscopy Environment", str],
"omero.web.webstart_homepage": ["WEBSTART_HOMEPAGE", "http://www.openmicroscopy.org", str],
}
for key, values in CUSTOM_SETTINGS_MAPPINGS.items():
global_name, default_value, mapping = values
try:
global_value = CUSTOM_SETTINGS[key]
values.append(False)
except KeyError:
global_value = default_value
values.append(True)
try:
globals()[global_name] = mapping(global_value)
except ValueError:
raise ValueError("Invalid %s JSON: %r" % (global_name, global_value))
except LeaveUnset:
pass
if not DEBUG:
LOGGING['loggers']['django.request']['level'] = 'INFO'
LOGGING['loggers']['django']['level'] = 'INFO'
LOGGING['loggers']['']['level'] = 'INFO'
# TEMPLATE_DEBUG: A boolean that turns on/off template debug mode. If this is True, the fancy
# error page will display a detailed report for any TemplateSyntaxError. This report contains
# the relevant snippet of the template, with the appropriate line highlighted.
# Note that Django only displays fancy error pages if DEBUG is True, alternatively error
# is handled by:
# handler404 = "omeroweb.feedback.views.handler404"
# handler500 = "omeroweb.feedback.views.handler500"
TEMPLATE_DEBUG = DEBUG
from django.views.debug import cleanse_setting
for key in sorted(CUSTOM_SETTINGS_MAPPINGS):
values = CUSTOM_SETTINGS_MAPPINGS[key]
global_name, default_value, mapping, using_default = values
source = using_default and "default" or key
global_value = globals().get(global_name, None)
if global_name.isupper():
logger.debug("%s = %r (source:%s)", global_name, cleanse_setting(global_name, global_value), source)
SITE_ID = 1
# Local time zone for this installation. Choices can be found here:
# http://www.postgresql.org/docs/8.1/static/datetime-keywords.html#DATETIME-TIMEZONE-SET-TABLE
# although not all variations may be possible on all operating systems.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'Europe/London'
FIRST_DAY_OF_WEEK = 0 # 0-Monday, ... 6-Sunday
# LANGUAGE_CODE: A string representing the language code for this installation. This should be
# in standard language format. For example, U.S. English is "en-us".
LANGUAGE_CODE = 'en-gb'
# SECRET_KEY: A secret key for this particular Django installation. Used to provide a seed
# in secret-key hashing algorithms. Set this to a random string -- the longer, the better.
# django-admin.py startproject creates one automatically.
# Make this unique, and don't share it with anybody.
SECRET_KEY = '@@k%g#7=%4b6ib7yr1tloma&g0s2nni6ljf!m0h&x9c712c7yj'
# USE_I18N: A boolean that specifies whether Django's internationalization system should be enabled.
# This provides an easy way to turn it off, for performance. If this is set to False, Django will
# make some optimizations so as not to load the internationalization machinery.
USE_I18N = True
# MIDDLEWARE_CLASSES: A tuple of middleware classes to use.
# See https://docs.djangoproject.com/en/1.3/topics/http/middleware/.
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
#'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
)
# ROOT_URLCONF: A string representing the full Python import path to your root URLconf.
# For example: "mydjangoapps.urls". Can be overridden on a per-request basis by setting
# the attribute urlconf on the incoming HttpRequest object.
ROOT_URLCONF = 'omeroweb.urls'
# STATICFILES_FINDERS: The list of finder backends that know how to find static files
# in various locations. The default will find files stored in the STATICFILES_DIRS setting
# (using django.contrib.staticfiles.finders.FileSystemFinder) and in a static subdirectory
# of each app (using django.contrib.staticfiles.finders.AppDirectoriesFinder)
STATICFILES_FINDERS = (
"django.contrib.staticfiles.finders.FileSystemFinder",
"django.contrib.staticfiles.finders.AppDirectoriesFinder"
)
# STATIC_URL: URL to use when referring to static files located in STATIC_ROOT.
# Example: "/site_media/static/" or "http://static.example.com/".
# If not None, this will be used as the base path for media definitions and the staticfiles
# app. It must end in a slash if set to a non-empty value.
# This var is configurable by omero.web.static_url STATIC_URL = '/static/'
# STATIC_ROOT: The absolute path to the directory where collectstatic will collect static
# files for deployment. If the staticfiles contrib app is enabled (default) the collectstatic
# management command will collect static files into this directory.
STATIC_ROOT = os.path.join(os.path.dirname(__file__), 'static').replace('\\','/')
# STATICFILES_DIRS: This setting defines the additional locations the staticfiles app will
# traverse if the FileSystemFinder finder is enabled, e.g. if you use the collectstatic or
# findstatic management command or use the static file serving view.
if WEBSTART:
STATICFILES_DIRS += (("webstart/jars", INSIGHT_JARS),)
# TEMPLATE_CONTEXT_PROCESSORS: A tuple of callables that are used to populate the context
# in RequestContext. These callables take a request object as their argument and return
# a dictionary of items to be merged into the context.
TEMPLATE_CONTEXT_PROCESSORS = (
"django.core.context_processors.debug",
"django.core.context_processors.i18n",
"django.core.context_processors.media",
"django.core.context_processors.static",
"django.contrib.messages.context_processors.messages"
)
# TEMPLATE_LOADERS: A tuple of template loader classes, specified as strings. Each Loader class
# knows how to import templates from a particular source. Optionally, a tuple can be used
# instead of a string. The first item in the tuple should be the Loader's module, subsequent items
# are passed to the Loader during initialization.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)
# TEMPLATE_DIRS: List of locations of the template source files, in search order. Note that these
# paths should use Unix-style forward slashes, even on Windows.
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates". Always use
# forward slashes, even on Windows. Don't forget to use absolute paths, not relative paths.
# TEMPLATE_DIRS = ()
# INSTALLED_APPS: A tuple of strings designating all applications that are enabled in this Django
# installation. Each string should be a full Python path to a Python package that contains
# a Django application, as created by django-admin.py startapp.
INSTALLED_APPS = (
'django.contrib.staticfiles',
'django.contrib.markup',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'omeroweb.feedback',
'omeroweb.webadmin',
'omeroweb.webclient',
'omeroweb.webgateway',
'omeroweb.webtest',
'omeroweb.webredirect',
'omeroweb.webstart',
)
# ADDITONAL_APPS: We import any settings.py from apps. This allows them to modify settings.
for app in ADDITIONAL_APPS:
INSTALLED_APPS += ('omeroweb.%s' % app,)
try:
a = __import__('%s.settings' % app)
except ImportError:
logger.debug("Couldn't import settings from app: %s" % app)
# FEEDBACK_URL: Used in feedback.sendfeedback.SendFeedback class in order to submit
# error or comment messages to http://qa.openmicroscopy.org.uk.
FEEDBACK_URL = "qa.openmicroscopy.org.uk:80"
# IGNORABLE_404_STARTS:
# Default: ('/cgi-bin/', '/_vti_bin', '/_vti_inf')
# IGNORABLE_404_ENDS:
# Default: ('mail.pl', 'mailform.pl', 'mail.cgi', 'mailform.cgi', 'favicon.ico', '.php')
# SESSION_FILE_PATH: If you're using file-based session storage, this sets the directory in which Django
# will store session data. When the default value (None) is used, Django will use the standard temporary
# directory for the system.
SESSION_FILE_PATH = tempfile.gettempdir()
# SESSION_EXPIRE_AT_BROWSER_CLOSE: Whether to expire the session when the user closes his or her browser.
SESSION_EXPIRE_AT_BROWSER_CLOSE = True # False
# SESSION_COOKIE_AGE: The age of session cookies, in seconds. See How to use sessions.
SESSION_COOKIE_AGE = 86400 # 1 day in sec (86400)
# FILE_UPLOAD_TEMP_DIR: The directory to store data temporarily while uploading files.
FILE_UPLOAD_TEMP_DIR = tempfile.gettempdir()
# # FILE_UPLOAD_MAX_MEMORY_SIZE: The maximum size (in bytes) that an upload will be before it gets streamed
# to the file system.
FILE_UPLOAD_MAX_MEMORY_SIZE = 2621440 #default 2621440 (i.e. 2.5 MB).
# DEFAULT_IMG: Used in webclient.webclient_gateway.OmeroWebGateway.defaultThumbnail in order to load default
# image while thumbnail can't be retrieved from the server.
DEFAULT_IMG = os.path.join(os.path.dirname(__file__), 'webgateway', 'static', 'webgateway', 'img', 'image128.png').replace('\\','/')
# # DEFAULT_USER: Used in webclient.webclient_gateway.OmeroWebGateway.getExperimenterDefaultPhoto in order to load default
# avatar while experimenter photo can't be retrieved from the server.
DEFAULT_USER = os.path.join(os.path.dirname(__file__), 'webgateway', 'static', 'webgateway', 'img', 'personal32.png').replace('\\','/')
# MANAGERS: A tuple in the same format as ADMINS that specifies who should get broken-link notifications when
# SEND_BROKEN_LINK_EMAILS=True.
MANAGERS = ADMINS
# PAGE: Used in varous locations where large number of data is retrieved from the server.
try:
PAGE
except:
PAGE = 200
EMAIL_TEMPLATES = {
'create_share': {
'html_content':'<p>Hi,</p><p>I would like to share some of my data with you.<br/>Please find it on the <a href="%s?server=%i">%s?server=%i</a>.</p><p>%s</p>',
'text_content':'Hi, I would like to share some of my data with you. Please find it on the %s?server=%i. /n %s'
},
'add_member_to_share': {
'html_content':'<p>Hi,</p><p>I would like to share some of my data with you.<br/>Please find it on the <a href="%s?server=%i">%s?server=%i</a>.</p><p>%s</p>',
'text_content':'Hi, I would like to share some of my data with you. Please find it on the %s?server=%i. /n %s'
},
'remove_member_from_share': {
'html_content':'<p>You were removed from the share <a href="%s?server=%i">%s?server=%i</a>. This share is no longer available for you.</p>',
'text_content':'You were removed from the share %s?server=%i. This share is no longer available for you.'
},
'add_comment_to_share': {
'html_content':'<p>New comment is available on share <a href="%s?server=%i">%s?server=%i</a>.</p>',
'text_content':'New comment is available on share %s?server=%i.'
}
}
# Load server list and freeze
from webadmin.custom_models import Server
def load_server_list():
for s in SERVER_LIST:
server = (len(s) > 2) and unicode(s[2]) or None
Server(host=unicode(s[0]), port=int(s[1]), server=server)
Server.freeze()
load_server_list()
|
gpl-2.0
| -7,212,896,495,762,122,000
| 43.220503
| 290
| 0.673301
| false
| 3.407154
| true
| false
| false
|
DonnchaC/onionbalance
|
test/functional/test_publish_master_descriptor.py
|
1
|
5980
|
# -*- coding: utf-8 -*-
import os
import sys
import socket
import time
import pytest
import Crypto.PublicKey.RSA
import yaml
import pexpect
import stem.control
import onionbalance.util
# Skip functional tests if Chutney environment is not running.
pytestmark = pytest.mark.skipif(
"os.environ.get('CHUTNEY_ONION_ADDRESS') is None",
reason="Skipping functional test, no Chutney environment detected")
def parse_chutney_enviroment():
"""
Read environment variables and determine chutney instance and
client addresses.
"""
tor_client = os.environ.get('CHUTNEY_CLIENT_PORT')
assert tor_client
# Calculate the address and port of clients control port
client_address, client_socks_port = tor_client.split(':')
client_ip = socket.gethostbyname(client_address)
tor_client_number = int(client_socks_port) - 9000
# Control port in the 8000-8999 range, offset by Tor client number
control_port = 8000 + tor_client_number
assert control_port
# Retrieve instance onion address exported during chutney setup
instance_address = os.environ.get('CHUTNEY_ONION_ADDRESS')
assert instance_address # Need at least 1 instance address for test
if '.onion' in instance_address:
instance_address = instance_address[:16]
return {
'client_ip': client_ip,
'control_port': control_port,
'instances': [instance_address],
}
def create_test_config_file(tmppath, private_key=None, instances=None):
"""
Setup function to create a temp directory with master key and config file.
Returns a path to the temporary config file.
.. todo:: Refactor settings.py config creation to avoid code duplication
in integration tests.
"""
if not private_key:
private_key = Crypto.PublicKey.RSA.generate(1024)
# Write private key file
key_path = tmppath.join('private_key')
key_path.write(private_key.exportKey())
assert key_path.check()
# Create YAML OnionBalance settings file for these instances
service_data = {'key': str(key_path)}
service_data['instances'] = [{'address': addr} for addr in instances]
settings_data = {
'services': [service_data],
'STATUS_SOCKET_LOCATION': str(tmppath.join('control')),
}
config_yaml = yaml.dump(settings_data, default_flow_style=False)
config_path = tmppath.join('config.yaml')
config_path.write_binary(config_yaml.encode('utf-8'))
assert config_path.check()
return str(config_path)
def test_master_descriptor_publication(tmpdir):
"""
Functional test to run OnionBalance, publish a master descriptor and
check that it can be retrieved from the DHT.
"""
chutney_config = parse_chutney_enviroment()
private_key = Crypto.PublicKey.RSA.generate(1024)
master_onion_address = onionbalance.util.calc_onion_address(private_key)
config_file_path = create_test_config_file(
tmppath=tmpdir,
private_key=private_key,
instances=chutney_config.get('instances', []),
)
assert config_file_path
# Start an OnionBalance server and monitor for correct output with pexpect
server = pexpect.spawnu("onionbalance",
args=[
'-i', chutney_config.get('client_ip'),
'-p', str(chutney_config.get('control_port')),
'-c', config_file_path,
'-v', 'debug',
], logfile=sys.stdout, timeout=15)
# Check for expected output from OnionBalance
server.expect(u"Loaded the config file")
server.expect(u"introduction point set has changed")
server.expect(u"Published a descriptor", timeout=120)
# Check Tor control port gave an uploaded event.
server.expect(u"HS_DESC UPLOADED")
# Eek, sleep to wait for descriptor upload to all replicas to finish
time.sleep(10)
# .. todo:: Also need to check and raise for any warnings or errors
# that are emitted
# Try fetch and validate the descriptor with stem
with stem.control.Controller.from_port(
address=chutney_config.get('client_ip'),
port=chutney_config.get('control_port')
) as controller:
controller.authenticate()
# get_hidden_service_descriptor() will raise exceptions if it
# cannot find the descriptors
master_descriptor = controller.get_hidden_service_descriptor(
master_onion_address)
master_ips = master_descriptor.introduction_points()
# Try retrieve a descriptor for each instance
for instance_address in chutney_config.get('instances'):
instance_descriptor = controller.get_hidden_service_descriptor(
instance_address)
instance_ips = instance_descriptor.introduction_points()
# Check if all instance IPs were included in the master descriptor
assert (set(ip.identifier for ip in instance_ips) ==
set(ip.identifier for ip in master_ips))
# Check that the control socket was created
socket_path = tmpdir.join('control')
assert socket_path.check()
# Connect to the control socket and check the output
sock_client = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
sock_client.connect(str(socket_path))
# Read the data from the status socket
result = []
while True:
data = sock_client.recv(1024)
if not data:
break
result.append(data.decode('utf-8'))
result_data = ''.join(result)
# Check each instance is in the output
for instance_address in chutney_config.get('instances'):
assert instance_address in result_data
# Check all instances were online and all master descriptors uploaded
assert master_onion_address in result_data
assert '[offline]' not in result_data
assert '[not uploaded]' not in result_data
|
gpl-3.0
| -6,428,917,183,354,752,000
| 33.367816
| 78
| 0.661371
| false
| 4.107143
| true
| false
| false
|
codeman38/toggldesktop
|
third_party/cppclean/cpp/symbols.py
|
1
|
6773
|
# Copyright 2007 Neal Norwitz
# Portions Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Symbol Table utility code."""
from __future__ import absolute_import
from __future__ import unicode_literals
__author__ = 'nnorwitz@google.com (Neal Norwitz)'
class Error(BaseException):
"""Exception raised when lookup fails."""
class Symbol(object):
"""Data container used internally."""
def __init__(self, name, parts, namespace_stack):
self.name = name
self.parts = parts
self.namespace_stack = namespace_stack
class SymbolTable(object):
"""Symbol table that can perform namespace operations."""
def __init__(self):
# None is the global namespace.
self.namespaces = {None: {}}
def _lookup_namespace(self, symbol, namespace, kind):
"""Helper for lookup_symbol that only looks up variables in a
namespace.
Args:
symbol: Symbol
namespace: pointer into self.namespaces
kind: 'kind of namespace for error message'
"""
for namespace_part in symbol.parts:
namespace = namespace.get(namespace_part)
if namespace is None:
raise Error('%s not found in %snamespace at %s' %
(symbol.name, kind, namespace_part))
result = namespace
if not isinstance(namespace, dict):
# Occurs when a component is not a namespace.
break
return result
def _lookup_global(self, symbol):
"""Helper for lookup_symbol that only looks up global variables.
Args:
symbol: Symbol
"""
assert symbol.parts
namespace = self.namespaces
if len(symbol.parts) == 1:
# If there is only one part, look in globals.
namespace = self.namespaces[None]
try:
# Try to do a normal, global namespace lookup.
return self._lookup_namespace(symbol, namespace, 'global ')
except Error as orig_exc:
try:
# The normal lookup can fail if all of the parts aren't
# namespaces. This happens with OuterClass::Inner.
namespace = self.namespaces[None]
return self._lookup_namespace(symbol, namespace, 'global ')
except Error:
raise orig_exc
def _lookup_in_all_namespaces(self, symbol):
"""Helper for lookup_symbol that looks for symbols in all namespaces.
Args:
symbol: Symbol
"""
namespace = self.namespaces
# Create a stack of namespaces.
namespace_stack = []
for current in symbol.namespace_stack:
namespace = namespace.get(current)
if namespace is None or not isinstance(namespace, dict):
break
namespace_stack.append(namespace)
# Iterate through the stack in reverse order. Need to go from
# innermost namespace to outermost.
for namespace in reversed(namespace_stack):
try:
return self._lookup_namespace(symbol, namespace, '')
except Error:
pass
return None
def lookup_symbol(self, name, namespace_stack):
"""Returns AST node and module for symbol if found.
Args:
name: 'name of the symbol to lookup'
namespace_stack: None or ['namespaces', 'in', 'current', 'scope']
Returns:
(ast.Node, module (ie, any object stored with symbol)) if found
Raises:
Error if the symbol cannot be found.
"""
# TODO(nnorwitz): a convenient API for this depends on the
# representation of the name. e.g., does symbol_name contain
# ::, is symbol_name a list of colon separated names, how are
# names prefixed with :: handled. These have different lookup
# semantics (if leading ::) or change the desirable API.
# For now assume that the symbol_name contains :: and parse it.
symbol = Symbol(name, name.split('::'), namespace_stack)
assert symbol.parts
if symbol.parts[0] == '':
# Handle absolute (global) ::symbol_names.
symbol.parts = symbol.parts[1:]
elif namespace_stack is not None:
result = self._lookup_in_all_namespaces(symbol)
if result:
return result
return self._lookup_global(symbol)
def _add(self, symbol_name, namespace, node, module):
"""Helper function for adding symbols.
See add_symbol().
"""
result = symbol_name in namespace
namespace[symbol_name] = node, module
return not result
def add_symbol(self, symbol_name, namespace_stack, node, module):
"""Adds symbol_name defined in namespace_stack to the symbol table.
Args:
symbol_name: 'name of the symbol to lookup'
namespace_stack: None or ['namespaces', 'symbol', 'defined', 'in']
node: ast.Node that defines this symbol
module: module (any object) this symbol is defined in
Returns:
bool(if symbol was *not* already present)
"""
# TODO(nnorwitz): verify symbol_name doesn't contain :: ?
if namespace_stack:
# Handle non-global symbols (ie, in some namespace).
last_namespace = self.namespaces
for namespace in namespace_stack:
last_namespace = last_namespace.setdefault(namespace, {})
else:
last_namespace = self.namespaces[None]
return self._add(symbol_name, last_namespace, node, module)
def get_namespace(self, name_seq):
"""Returns the prefix of names from name_seq that are known namespaces.
Args:
name_seq: ['names', 'of', 'possible', 'namespace', 'to', 'find']
Returns:
['names', 'that', 'are', 'namespaces', 'possibly', 'empty', 'list']
"""
namespaces = self.namespaces
result = []
for name in name_seq:
namespaces = namespaces.get(name)
if not namespaces:
break
result.append(name)
return result
|
bsd-3-clause
| -7,632,433,835,013,923,000
| 32.696517
| 79
| 0.598996
| false
| 4.658184
| false
| false
| false
|
emccode/HeliosBurn
|
heliosburn/django/hbproject/webui/backends.py
|
1
|
1440
|
from django.conf import settings
from mongoengine.django.auth import User
import requests
import json
class HeliosAuthBackend(object):
"""
Authenticate against the API.
"""
def authenticate(self, username=None, password=None):
payload = {'username': username, 'password': password}
url = '%s/auth/login/' % (settings.API_BASE_URL,)
r = requests.post(url, data=json.dumps(payload))
if r.status_code == requests.codes.ok:
token = r.headers.get('x-auth-token')
if not token:
return None
try:
user = User.objects.get(username=username)
user.password = token
user.save()
except User.DoesNotExist:
# Create a new user. Note that we can set password
# to anything, because it won't be checked; the password
# from settings.py will.
user = User(username=username, password=token)
user.is_staff = True
user.is_superuser = True
user.save()
return user
elif r.status_code >= requests.codes.internal_server_error:
raise Exception('Server error. ' + str(r.status_code))
return None
def get_user(self, user_id):
try:
return User.objects.get(pk=user_id)
except User.DoesNotExist:
return None
|
mit
| -4,774,238,198,615,431,000
| 31.022222
| 72
| 0.565278
| false
| 4.417178
| false
| false
| false
|
commtrack/commtrack-old-to-del
|
apps/reports/custom/all/domain_summary.py
|
1
|
3326
|
from django.template.loader import render_to_string
import settings
from xformmanager.models import FormDefModel, Metadata
from receiver.models import Submission, Attachment
def domain_summary(request, domain=None, detail_view=True):
'''Domain Admin Summary Data'''
if not domain:
domain = request.extuser.domain
summary = DomainSummary(domain)
return render_to_string("custom/all/domain_summary.html",
{"MEDIA_URL": settings.MEDIA_URL, # we pretty sneakly have to explicitly pass this
"detail_view": detail_view,
"domain": domain,
"summary": summary})
class DomainSummary(object):
def __init__(self, domain):
self.form_data = []
self.chw_data = []
self.domain = domain
domain_meta = Metadata.objects.filter(formdefmodel__domain=domain)
domain_submits = Submission.objects.filter(domain=domain)
self.name = domain.name
self.submissions = domain_submits.count()
self.attachments = Attachment.objects.filter(submission__domain=domain).count()
self.first_submission = domain_submits.order_by("submit_time")[0].submit_time
self.last_submission = domain_submits.order_by("-submit_time")[0].submit_time
self.full_count = domain_meta.count()
chws = domain_meta.values_list('username', flat=True).distinct()
forms = FormDefModel.objects.filter(domain=domain)
blacklist = domain.get_blacklist()
for form in forms:
form_metas = domain_meta.filter(formdefmodel=form)
self.form_data.append({"form": form,
"count": form_metas.count(),
"first": _get_first_object(form_metas, "timeend", True),
"last": _get_first_object(form_metas, "timeend", False)
})
self.blacklist_count = 0
self.chw_blacklist_count = 0
self.chw_count = 0
for chw in chws:
chw_forms = domain_meta.filter(username=chw)
in_blacklist = chw in blacklist
self.chw_data.append({"name": chw,
"count": chw_forms.count(),
"in_blacklist": in_blacklist,
"first": _get_first_object(chw_forms, "timeend", True),
"last": _get_first_object(chw_forms, "timeend", False)
})
if in_blacklist:
self.chw_blacklist_count += 1
self.blacklist_count += chw_forms.count()
else:
self.chw_count += 1
self.count = self.full_count - self.blacklist_count
def chws(self):
"""Flat list of CHW's found in this domain"""
return self.chw_counts.keys()
def form_count(self):
"""Number of unique formss (types) found in this domain."""
return len(self.form_data)
def _get_first_object(queryset, column_name, first):
sort_str = "" if first else "-"
sorted_qs = queryset.order_by("%s%s" % (sort_str, column_name))
if sorted_qs.count() > 0:
return sorted_qs[0]
|
bsd-3-clause
| -5,803,733,305,685,974,000
| 42.776316
| 110
| 0.553818
| false
| 4.10111
| false
| false
| false
|
uaprom-summer-2015/Meowth
|
project/gallery.py
|
1
|
1876
|
import os
from werkzeug.datastructures import FileStorage
from project.models import UploadedImage
from PIL import Image
from PIL.ExifTags import TAGS
IM_EXTENSIONS = frozenset(['.jpg', '.jpeg', '.gif', '.png'])
def remove_exif_orientation(file_path):
ext = os.path.splitext(file_path)[1].lower()
if ext == '.jpg' or ext == '.jpeg':
img = Image.open(file_path)
exif = img._getexif()
if not exif:
return
orientation = 1
for (k, v) in exif.items():
if TAGS.get(k) == 'Orientation':
orientation = v
if orientation is 6:
img = img.rotate(-90)
elif orientation is 8:
img = img.rotate(90)
elif orientation is 3:
img = img.rotate(180)
elif orientation is 2:
img = img.transpose(Image.FLIP_LEFT_RIGHT)
elif orientation is 5:
img = img.rotate(-90).transpose(Image.FLIP_LEFT_RIGHT)
elif orientation is 7:
img = img.rotate(90).transpose(Image.FLIP_LEFT_RIGHT)
elif orientation is 4:
img = img.rotate(180).transpose(Image.FLIP_LEFT_RIGHT)
img.save(file_path)
def upload_file(file_path):
remove_exif_orientation(file_path)
with open(file_path, 'rb') as fp:
file = FileStorage(fp)
UploadedImage.bl.save_image(
image=file,
img_category=UploadedImage.IMG_CATEGORY.gallery,
do_sync=True,
)
def images(subdir):
for i in os.listdir(subdir):
_, extension = os.path.splitext(i)
if extension.lower() in IM_EXTENSIONS:
yield os.path.join(subdir, i)
def load_images(subdir=None):
if not subdir:
for _ in range(64):
upload_file('testdata/images/face-2.jpg')
else:
for fp in images(subdir):
upload_file(fp)
|
bsd-3-clause
| -2,487,392,880,310,597,000
| 28.3125
| 66
| 0.58209
| false
| 3.678431
| false
| false
| false
|
UManPychron/pychron
|
pychron/envisage/initialization/initialization_parser.py
|
1
|
13078
|
# ===============================================================================
# Copyright 2011 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
# from lxml.etree import Element
from __future__ import absolute_import
from __future__ import print_function
import inspect
# ============= standard library imports ========================
import os
import sys
from pyface.message_dialog import warning
# ============= local library imports ==========================
from pychron.core.helpers.strtools import to_bool
from pychron.core.xml.xml_parser import XMLParser
from pychron.paths import paths
lower = lambda x: x.lower() if x else None
def handle_uncaught_exception(func):
def _handle(*args, **kw):
try:
return func(*args, **kw)
except Exception as e:
import traceback
traceback.print_exc()
warning(None, 'There is a problem in your initialization file {}'.format(e))
sys.exit()
return _handle
def decorate_all(cls):
"""
adds the handle_uncaught_exception decorator to all methods of the class
"""
for name, m in inspect.getmembers(cls, inspect.ismethod):
setattr(cls, name, handle_uncaught_exception(m))
return cls
@decorate_all
class InitializationParser(XMLParser):
"""
"""
def __init__(self, *args, **kw):
ver = '_proc'
# ver = '_valve'
# ver ='_apis'
# ver = '_uv'
ver = '_exp'
# ver = '_exp_uv'
# ver= '_spec'
# ver = '_diode'
# ver = '_dash'
# ver = '_dash_client'
# ver = ''
p = os.path.join(paths.setup_dir, 'initialization{}.xml'.format(ver))
if not os.path.isfile(p):
p = os.path.join(paths.setup_dir, 'initialization.xml')
if not os.path.isfile(p):
warning(None, 'No initialization file.\n{} is not a valid file'.format(p))
sys.exit()
super(InitializationParser, self).__init__(p, *args, **kw)
def verify(self):
return self._syntax_error
def get_globals(self):
tree = self.get_root()
tree = tree.find('globals')
return tree.iter()
def set_bool_tag(self, tag, v):
tree = self.get_root()
tree = tree.find('globals')
elem = tree.find(tag)
if elem is not None:
elem.text = v
else:
tree.append(self.new_element(tag, v))
def add_plugin(self, category, name, save=True, enabled='false'):
tree = self.get_root()
tree = tree.find('plugins')
cat = tree.find(category)
if not cat:
tree.append(self.new_element(category, None))
cat = tree.find(category)
cat.append(self.new_element('plugin', name, enabled=enabled))
if save:
self.save()
def get_plugins(self, category=None, all_=False, element=False):
tree = self.get_root()
tree = tree.find('plugins')
if category:
cat = tree.find(category)
if cat is not None:
plugins = cat.findall('plugin')
else:
try:
plugins = tree.iter(tag='plugin')
except AttributeError:
plugins = tree.getiterator(tag='plugin')
if plugins:
return [p if element else p.text.strip()
for p in plugins if all_ or to_bool(p.get('enabled'))]
# def get_plugins_as_elements(self, category):
# tree = self._tree.find('plugins')
# cat = tree.find(category)
# if cat is not None:
# return cat.findall('plugin')
def get_global(self, tag):
root = self.get_root()
elem = root.find('globals')
if elem is not None:
g = elem.find(tag)
if g is not None:
return g.text.strip()
def get_plugin_groups(self, elem=False):
plugin = self.get_root().find('plugins')
return [t if elem else t.tag for t in list(plugin)]
def get_plugin_group(self, name):
return next((p for p in self.get_plugin_groups(elem=True)
if p.tag == name
), None)
def get_groups(self):
tree = self.get_root()
# root = tree.getroot()
return [t.tag for t in list(tree)]
def get_parameters(self, *args, **kw):
return self._get_parameters(all_=True, *args, **kw)
def get_parameter(self, subtree, name, all_=True, **kw):
pa = self._get_parameters(subtree, name, all_=all_, **kw)
if pa:
return pa[0]
def enable_manager(self, name, parent):
plugin = self.get_plugin(parent)
man = next((m for m in plugin.findall('manager') if m.text.strip() == name), None)
man.set('enabled', 'true')
self.save()
def disable_manager(self, name, parent):
plugin = self.get_plugin(parent)
man = next((m for m in plugin.findall('manager') if m.text.strip() == name), None)
man.set('enabled', 'false')
self.save()
def enable_device(self, name, plugin):
dev = self.get_device(plugin, name, None, element=True)
dev.set('enabled', 'true')
self.save()
def disable_device(self, name, plugin):
dev = self.get_device(plugin, name, None, element=True)
dev.set('enabled', 'false')
self.save()
def enable_plugin(self, name, category=None, save=True):
plugin = self.get_plugin(name, category)
if plugin is None:
self.add_plugin(category, name, save=save, enabled='true')
else:
plugin.set('enabled', 'true')
if save:
self.save()
def disable_plugin(self, name, category=None, save=True):
plugin = self.get_plugin(name, category)
if plugin is not None:
plugin.set('enabled', 'false')
if save:
self.save()
def get_flags(self, manager, **kw):
return self._get_parameters(manager, 'flag', **kw)
def get_timed_flags(self, manager, **kw):
return self._get_parameters(manager, 'timed_flag', **kw)
def get_valve_flags(self, manager, **kw):
return self._get_parameters(manager, 'valve_flag', **kw)
def get_rpc_params(self, manager):
if isinstance(manager, tuple):
manager = self.get_manager(*manager)
text = lambda x: x.text.strip() if x is not None else None
try:
rpc = manager.find('rpc')
mode = rpc.get('mode')
port = text(rpc.find('port'))
host = text(rpc.find('host'))
return mode, host, int(port),
except Exception as e:
pass
return None, None, None
def get_device(self, manager, devname, plugin, element=False):
if plugin:
man = self.get_plugin(plugin)
nman = next((d for d in man.findall('manager')
if d.text.strip() == manager), None)
if nman is not None:
man = nman
else:
man = self.get_plugin(manager)
# print manager, devname, plugin, man.text.strip()
# else:
# man = self.get_manager()
# if plugin is None:
# man = self.get_plugin(manager)
# else:
# man = self.get_manager(manager, plugin)
# if man is None:
# man = self.get_plugin_group(manager)
dev = next((d for d in man.findall('device')
if d.text.strip() == devname), None)
if not element and dev:
dev = dev.text.strip()
return dev
def get_devices(self, manager, **kw):
return self._get_parameters(manager, 'device', **kw)
def get_processor(self, manager, **kw):
p = self._get_parameters(manager, 'processor', **kw)
if p:
return p[0]
def get_processors(self):
# ps = []
# for p in self.get_plugins('Hardware'):
# pp = self.get_processor(p)
# if pp:
# ps.append(pp)
pl = self.get_plugin_group('hardware')
ps = [pi for pi in [self.get_processor(p)
for p in self.get_plugins('hardware', element=True)] if pi]
nps = self._get_parameters(pl, 'processor')
if nps:
ps += nps
return ps
def get_server(self, manager, **kw):
p = self._get_parameters(manager, 'server', **kw)
if p:
return p[0]
def get_servers(self):
servers = [pi for pi in [self.get_server(p)
for p in self.get_plugins('hardware', element=True)] if pi]
h = self.get_plugin_group('hardware')
if h is not None:
hs = self._get_parameters(h, 'server')
if hs:
servers += hs
return servers
def _get_parameters(self, subtree, tag, all_=False, element=False):
if subtree is None:
print(subtree)
return [d if element else d.text.strip()
for d in subtree.findall(tag)
if all_ or to_bool(d.get('enabled'))]
def get_managers(self, elem, all_=False, element=False):
return [m if element else m.text.strip()
for m in elem.findall('manager')
if all_ or to_bool(m.get('enabled'))]
def get_plugin(self, name, category=None):
if '_' in name:
if 'co2' in name:
name = name.split('_')[0].capitalize() + 'CO2'
elif 'uv' in name:
name = name.split('_')[0].capitalize() + 'UV'
else:
name = ''.join([a.capitalize() for a in name.split('_')])
else:
name = name[0].upper() + name[1:]
if not category:
category = self.get_categories()
if not isinstance(category, (list, tuple)):
category = (category, )
for cat in category:
elem = self._get_element(cat, name)
if elem is not None:
return elem
def get_manager(self, name, plugin):
if 'Manager' in plugin:
plugin = plugin.replace('Manager', '')
p = self.get_plugin(plugin)
man = next((pi for pi in p.findall('manager') if pi.text.strip() == name), None)
return man
def get_categories(self):
return ['general', 'data', 'hardware', 'social']
# root = self.get_root()
# tree = root.find('plugins')
# s = lambda x: x.tag
#
# cats = map(s, [c for c in tree.iter(etree.Element)])
# return list(set(cats))
#return map(s, set([c for c in tree.iter()]))
def _get_element(self, category, name, tag='plugin'):
root = self.get_root()
tree = root.find('plugins')
if category is None:
iterator = lambda: tree.iter(tag=tag)
# return next((p for p in tree.iter(tag=tag) if p.text.strip() == name), None)
# for p in tree.iter(tag=tag):
# if p.text.strip() == name:
# return p
else:
cat = tree.find(category)
# print 'asss', category, cat
if cat is not None:
iterator = lambda: cat.findall(tag)
else:
iterator = lambda: ''
# for plugin in cat.findall(tag):
# if plugin.text.strip() == name:
# return plugin
name = name.lower()
# for ii in iterator():
# print ii.text.strip().lower(), name
# if ii.text.strip().lower()==name:
# break
return next((p for p in iterator() if p.text.strip().lower() == name), None)
def get_systems(self):
p = self.get_plugin('ExtractionLine')
if p is not None:
return [(s.text.strip(), s.get('master_host')) for s in p.findall('system')]
return []
# def get_processors(self):
#
# cat = self._tree.find('remotehardware')
# pi = None
# if cat is not None:
# pi = cat.findall('processor')
#
# return [pii.text.strip() for pii in (pi if pi else [])]
# ============= EOF =============================================
|
apache-2.0
| 3,117,435,444,442,191,000
| 31.942065
| 97
| 0.523627
| false
| 3.917915
| false
| false
| false
|
ntduong/data-science-newbie
|
Articles_Data_Analysis/cluster_articles.py
|
1
|
1885
|
import numpy as np
import scipy.cluster.hierarchy as hier
import scipy.spatial.distance as dist
import matplotlib.pyplot as plt
import make_data
from transform import count_transform
from cluster_algos import kmeans, hcluster
def clustering(X, labels, algo='hcluster', n_clusters=5, figname='cluster_result.png'):
""" Clustering data.
Params:
X: ndarray of n x d size (n samples, d features)
labels: labels of samples, for visualizing result.
algo: specify clustering algorithms, e.g., "hcluster", "kmeans"
n_clusters: #.of.cluster in case of kmeans
figname: file name to save figure
"""
assert algo in ['hcluster', 'kmeans'], "Invalid algorithm!"
if algo == 'hcluster':
linkage_mat = hcluster(X, metric='correlation', method='average')
fig = plt.figure(figsize=(30,20), dpi=100)
fig.clf()
hier.dendrogram(linkage_mat, labels=labels, leaf_rotation=90, leaf_font_size=20)
plt.savefig(figname)
else:
labels = np.asarray(labels)
result = kmeans(X, n_clusters=n_clusters)
for cid in xrange(n_clusters):
print 'Cluster %d:' %(cid+1)
for a in labels[result == cid]:
print a.encode('utf-8')
print '-'*30
def main(url_file, use_tfidf=True):
word_cnt, sites, site_urls = make_data.get_sites_words(url_file)
sw_mat, word_list = make_data.make_site_by_word_mat(word_cnt, sites, freq=5, percent=0.7)
X = sw_mat
if use_tfidf:
X = count_transform(sw_mat)
labels = ['Normal Deviate', 'MLTheory', 'CNET', 'BBC', 'CNN', 'JP', 'CNN-Tech', 'TechReview', 'NYT-Tech', 'Time-World', 'Mark-Reid']
clustering(X, labels, algo='hcluster', figname='hcluster_site_by_word_tfidf.png')
if __name__ == '__main__':
main('txt/urls.txt', use_tfidf=True)
|
mit
| 4,438,244,582,114,250,000
| 39.12766
| 136
| 0.620159
| false
| 3.360071
| false
| false
| false
|
dannybrowne86/django-avatar
|
setup.py
|
2
|
1958
|
import codecs
import re
from os import path
from setuptools import setup, find_packages
def read(*parts):
filename = path.join(path.dirname(__file__), *parts)
with codecs.open(filename, encoding='utf-8') as fp:
return fp.read()
def find_version(*file_paths):
version_file = read(*file_paths)
version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]",
version_file, re.M)
if version_match:
return version_match.group(1)
raise RuntimeError("Unable to find version string.")
setup(
name='django-avatar',
version=find_version("avatar", "__init__.py"),
description="A Django app for handling user avatars",
long_description=read('README.rst'),
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
],
keywords='avatar, django',
author='Eric Florenzano',
author_email='floguy@gmail.com',
maintainer='Grant McConnaughey',
maintainer_email='grantmcconnaughey@gmail.com',
url='http://github.com/grantmcconnaughey/django-avatar/',
license='BSD',
packages=find_packages(exclude=['tests']),
package_data={
'avatar': [
'templates/notification/*/*.*',
'templates/avatar/*.html',
'locale/*/LC_MESSAGES/*',
'media/avatar/img/default.jpg',
],
},
install_requires=[
'Pillow>=2.0',
'django-appconf>=0.6',
],
zip_safe=False,
)
|
bsd-3-clause
| 9,097,149,291,649,415,000
| 31.098361
| 68
| 0.589888
| false
| 3.900398
| false
| false
| false
|
garbear/EventGhost
|
eg/Classes/MacroSelectButton.py
|
1
|
2415
|
# This file is part of EventGhost.
# Copyright (C) 2005 Lars-Peter Voss <bitmonster@eventghost.org>
#
# EventGhost is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# EventGhost is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EventGhost; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
import eg
import wx
class MacroSelectButton(wx.Window):
def __init__(self, parent, label, title, mesg, macro=None):
if macro is None:
macroName = ""
else:
macroName = macro.name
self.title = title
self.mesg = mesg
self.macro = macro
wx.Window.__init__(self, parent, -1)
self.textBox = eg.StaticTextBox(self, -1, macroName, size=(200, -1))
self.button = wx.Button(self, -1, label)
self.Bind(wx.EVT_BUTTON, self.OnButton)
sizer = wx.BoxSizer(wx.HORIZONTAL)
sizer.Add(self.textBox, 1, wx.EXPAND|wx.ALIGN_CENTER_VERTICAL)
sizer.Add(self.button, 0, wx.LEFT, 5)
self.SetSizer(sizer)
self.Bind(wx.EVT_SIZE, self.OnSize)
self.Bind(wx.EVT_SET_FOCUS, self.OnSetFocus)
self.Layout()
def OnSetFocus(self, dummyEvent):
self.button.SetFocus()
def OnSize(self, dummyEvent):
if self.GetAutoLayout():
self.Layout()
@eg.AsTasklet
def OnButton(self, dummyEvent):
result = eg.TreeItemBrowseDialog.GetModalResult(
self.title,
self.mesg,
self.macro,
(eg.MacroItem,),
parent=self
)
if result:
macro = result[0]
self.textBox.SetLabel(macro.name)
self.macro = macro
self.ProcessEvent(
wx.CommandEvent(wx.EVT_TEXT.evtType[0], self.GetId())
)
def GetValue(self):
return self.macro
|
gpl-2.0
| 1,440,151,838,803,465,500
| 29.776316
| 76
| 0.609938
| false
| 3.726852
| false
| false
| false
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.