code stringlengths 3 1.05M | repo_name stringlengths 5 104 | path stringlengths 4 251 | language stringclasses 1 value | license stringclasses 15 values | size int64 3 1.05M |
|---|---|---|---|---|---|
# Copyright 2022 The MT3 Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for note_sequences."""
from mt3 import event_codec
from mt3 import note_sequences
from mt3 import run_length_encoding
import note_seq
import numpy as np
import tensorflow as tf
codec = event_codec.Codec(
max_shift_steps=100,
steps_per_second=100,
event_ranges=[
event_codec.EventRange('pitch', note_seq.MIN_MIDI_PITCH,
note_seq.MAX_MIDI_PITCH),
event_codec.EventRange('velocity', 0, 127),
event_codec.EventRange('drum', note_seq.MIN_MIDI_PITCH,
note_seq.MAX_MIDI_PITCH),
event_codec.EventRange('program', note_seq.MIN_MIDI_PROGRAM,
note_seq.MAX_MIDI_PROGRAM),
event_codec.EventRange('tie', 0, 0)
])
class RunLengthEncodingTest(tf.test.TestCase):
def test_encode_and_index_note_sequence(self):
ns = note_seq.NoteSequence()
ns.notes.add(start_time=1.0,
end_time=1.1,
pitch=61,
velocity=100)
ns.notes.add(start_time=2.0,
end_time=2.1,
pitch=62,
velocity=100)
ns.notes.add(start_time=3.0,
end_time=3.1,
pitch=63,
velocity=100)
ns.total_time = ns.notes[-1].end_time
frame_times = np.arange(0, 4, step=.001)
event_times, event_values = note_sequences.note_sequence_to_onsets(ns)
events, event_start_indices, event_end_indices, _, _ = run_length_encoding.encode_and_index_events(
state=None, event_times=event_times, event_values=event_values,
encode_event_fn=note_sequences.note_event_data_to_events,
codec=codec, frame_times=frame_times)
self.assertEqual(len(frame_times), len(event_start_indices))
self.assertEqual(len(frame_times), len(event_end_indices))
self.assertLen(events, 403)
expected_events = ([1] * 100 +
[162] +
[1] * 100 +
[163] +
[1] * 100 +
[164] +
[1] * 100)
np.testing.assert_array_equal(expected_events, events)
self.assertEqual(event_start_indices[0], 0)
self.assertEqual(event_end_indices[0], 0)
self.assertEqual(162, events[100])
self.assertEqual(1.0, frame_times[1000])
self.assertEqual(event_start_indices[1000], 100)
self.assertEqual(event_end_indices[1000], 100)
self.assertEqual(163, events[201])
self.assertEqual(2.0, frame_times[2000])
self.assertEqual(event_start_indices[2000], 201)
self.assertEqual(event_end_indices[2000], 201)
self.assertEqual(164, events[302])
self.assertEqual(3.0, frame_times[3000])
self.assertEqual(event_start_indices[3000], 302)
self.assertEqual(event_end_indices[3000], 302)
self.assertEqual(1, events[-1])
self.assertEqual(3.999, frame_times[-1])
self.assertEqual(event_start_indices[-1], 402)
self.assertEqual(event_end_indices[-1], len(expected_events))
def test_encode_and_index_note_sequence_velocity(self):
ns = note_seq.NoteSequence()
ns.notes.add(start_time=1.0,
end_time=3.0,
pitch=61,
velocity=1)
ns.notes.add(start_time=2.0,
end_time=4.0,
pitch=62,
velocity=127)
ns.total_time = ns.notes[-1].end_time
frame_times = np.arange(0, 4, step=.001)
event_times, event_values = (
note_sequences.note_sequence_to_onsets_and_offsets(ns))
events, event_start_indices, event_end_indices, _, _ = run_length_encoding.encode_and_index_events(
state=None, event_times=event_times, event_values=event_values,
encode_event_fn=note_sequences.note_event_data_to_events,
codec=codec, frame_times=frame_times)
self.assertEqual(len(frame_times), len(event_start_indices))
self.assertEqual(len(frame_times), len(event_end_indices))
self.assertLen(events, 408)
expected_events = ([1] * 100 +
[230, 162] +
[1] * 100 +
[356, 163] +
[1] * 100 +
[229, 162] +
[1] * 100 +
[229, 163])
np.testing.assert_array_equal(expected_events, events)
self.assertEqual(event_start_indices[0], 0)
self.assertEqual(event_end_indices[0], 0)
self.assertEqual(230, events[100])
self.assertEqual(162, events[101])
self.assertEqual(1.0, frame_times[1000])
self.assertEqual(event_start_indices[1000], 100)
self.assertEqual(event_end_indices[1000], 100)
self.assertEqual(356, events[202])
self.assertEqual(163, events[203])
self.assertEqual(2.0, frame_times[2000])
self.assertEqual(event_start_indices[2000], 202)
self.assertEqual(event_end_indices[2000], 202)
self.assertEqual(229, events[304])
self.assertEqual(162, events[305])
self.assertEqual(3.0, frame_times[3000])
self.assertEqual(event_start_indices[3000], 304)
self.assertEqual(event_end_indices[3000], 304)
self.assertEqual(229, events[406])
self.assertEqual(163, events[407])
self.assertEqual(3.999, frame_times[-1])
self.assertEqual(event_start_indices[-1], 405)
self.assertEqual(event_end_indices[-1], len(expected_events))
def test_encode_and_index_note_sequence_multitrack(self):
ns = note_seq.NoteSequence()
ns.notes.add(start_time=0.0,
end_time=1.0,
pitch=37,
velocity=127,
is_drum=True)
ns.notes.add(start_time=1.0,
end_time=3.0,
pitch=61,
velocity=127,
program=0)
ns.notes.add(start_time=2.0,
end_time=4.0,
pitch=62,
velocity=127,
program=40)
ns.total_time = ns.notes[-1].end_time
frame_times = np.arange(0, 4, step=.001)
event_times, event_values = (
note_sequences.note_sequence_to_onsets_and_offsets_and_programs(ns))
(tokens, event_start_indices, event_end_indices, state_tokens,
state_event_indices) = run_length_encoding.encode_and_index_events(
state=note_sequences.NoteEncodingState(),
event_times=event_times, event_values=event_values,
encode_event_fn=note_sequences.note_event_data_to_events,
codec=codec, frame_times=frame_times,
encoding_state_to_events_fn=(
note_sequences.note_encoding_state_to_events))
self.assertEqual(len(frame_times), len(event_start_indices))
self.assertEqual(len(frame_times), len(event_end_indices))
self.assertEqual(len(frame_times), len(state_event_indices))
self.assertLen(tokens, 414)
expected_events = (
[event_codec.Event('velocity', 127), event_codec.Event('drum', 37)] +
[event_codec.Event('shift', 1)] * 100 +
[event_codec.Event('program', 0),
event_codec.Event('velocity', 127), event_codec.Event('pitch', 61)] +
[event_codec.Event('shift', 1)] * 100 +
[event_codec.Event('program', 40),
event_codec.Event('velocity', 127), event_codec.Event('pitch', 62)] +
[event_codec.Event('shift', 1)] * 100 +
[event_codec.Event('program', 0),
event_codec.Event('velocity', 0), event_codec.Event('pitch', 61)] +
[event_codec.Event('shift', 1)] * 100 +
[event_codec.Event('program', 40),
event_codec.Event('velocity', 0), event_codec.Event('pitch', 62)])
expected_tokens = [codec.encode_event(e) for e in expected_events]
np.testing.assert_array_equal(expected_tokens, tokens)
expected_state_events = [
event_codec.Event('tie', 0), # state prior to first drum
event_codec.Event('tie', 0), # state prior to first onset
event_codec.Event('program', 0), # state prior to second onset
event_codec.Event('pitch', 61), # |
event_codec.Event('tie', 0), # |
event_codec.Event('program', 0), # state prior to first offset
event_codec.Event('pitch', 61), # |
event_codec.Event('program', 40), # |
event_codec.Event('pitch', 62), # |
event_codec.Event('tie', 0), # |
event_codec.Event('program', 40), # state prior to second offset
event_codec.Event('pitch', 62), # |
event_codec.Event('tie', 0) # |
]
expected_state_tokens = [codec.encode_event(e)
for e in expected_state_events]
np.testing.assert_array_equal(expected_state_tokens, state_tokens)
self.assertEqual(event_start_indices[0], 0)
self.assertEqual(event_end_indices[0], 0)
self.assertEqual(state_event_indices[0], 0)
self.assertEqual(1.0, frame_times[1000])
self.assertEqual(event_start_indices[1000], 102)
self.assertEqual(event_end_indices[1000], 102)
self.assertEqual(state_event_indices[1000], 1)
self.assertEqual(2.0, frame_times[2000])
self.assertEqual(event_start_indices[2000], 205)
self.assertEqual(event_end_indices[2000], 205)
self.assertEqual(state_event_indices[2000], 2)
self.assertEqual(3.0, frame_times[3000])
self.assertEqual(event_start_indices[3000], 308)
self.assertEqual(event_end_indices[3000], 308)
self.assertEqual(state_event_indices[3000], 5)
self.assertEqual(3.999, frame_times[-1])
self.assertEqual(event_start_indices[-1], 410)
self.assertEqual(event_end_indices[-1], len(expected_events))
self.assertEqual(state_event_indices[-1], 10)
def test_encode_and_index_note_sequence_last_token_alignment(self):
ns = note_seq.NoteSequence()
ns.notes.add(start_time=0.0,
end_time=0.1,
pitch=60,
velocity=100)
ns.total_time = ns.notes[-1].end_time
frame_times = np.arange(0, 1.008, step=.008)
event_times, event_values = note_sequences.note_sequence_to_onsets(ns)
events, event_start_indices, event_end_indices, _, _ = run_length_encoding.encode_and_index_events(
state=None,
event_times=event_times,
event_values=event_values,
encode_event_fn=note_sequences.note_event_data_to_events,
codec=codec,
frame_times=frame_times)
self.assertEqual(len(frame_times), len(event_start_indices))
self.assertEqual(len(frame_times), len(event_end_indices))
self.assertLen(events, 102)
expected_events = [161] + [1] * 101
np.testing.assert_array_equal(expected_events, events)
self.assertEqual(event_start_indices[0], 0)
self.assertEqual(event_end_indices[0], 0)
self.assertEqual(event_start_indices[125], 101)
self.assertEqual(event_end_indices[125], 102)
def test_decode_note_sequence_events(self):
events = [25, 161, 50, 162]
decoding_state = note_sequences.NoteDecodingState()
invalid_ids, dropped_events = run_length_encoding.decode_events(
state=decoding_state, tokens=events, start_time=0, max_time=None,
codec=codec, decode_event_fn=note_sequences.decode_note_onset_event)
ns = note_sequences.flush_note_decoding_state(decoding_state)
self.assertEqual(0, invalid_ids)
self.assertEqual(0, dropped_events)
expected_ns = note_seq.NoteSequence(ticks_per_quarter=220)
expected_ns.notes.add(
pitch=60,
velocity=100,
start_time=0.25,
end_time=0.26)
expected_ns.notes.add(
pitch=61,
velocity=100,
start_time=0.50,
end_time=0.51)
expected_ns.total_time = 0.51
self.assertProtoEquals(expected_ns, ns)
def test_decode_note_sequence_events_onsets_only(self):
events = [5, 161, 25, 162]
decoding_state = note_sequences.NoteDecodingState()
invalid_ids, dropped_events = run_length_encoding.decode_events(
state=decoding_state, tokens=events, start_time=0, max_time=None,
codec=codec, decode_event_fn=note_sequences.decode_note_onset_event)
ns = note_sequences.flush_note_decoding_state(decoding_state)
self.assertEqual(0, invalid_ids)
self.assertEqual(0, dropped_events)
expected_ns = note_seq.NoteSequence(ticks_per_quarter=220)
expected_ns.notes.add(
pitch=60,
velocity=100,
start_time=0.05,
end_time=0.06)
expected_ns.notes.add(
pitch=61,
velocity=100,
start_time=0.25,
end_time=0.26)
expected_ns.total_time = 0.26
self.assertProtoEquals(expected_ns, ns)
def test_decode_note_sequence_events_velocity(self):
events = [5, 356, 161, 25, 229, 161]
decoding_state = note_sequences.NoteDecodingState()
invalid_ids, dropped_events = run_length_encoding.decode_events(
state=decoding_state, tokens=events, start_time=0, max_time=None,
codec=codec, decode_event_fn=note_sequences.decode_note_event)
ns = note_sequences.flush_note_decoding_state(decoding_state)
self.assertEqual(0, invalid_ids)
self.assertEqual(0, dropped_events)
expected_ns = note_seq.NoteSequence(ticks_per_quarter=220)
expected_ns.notes.add(
pitch=60,
velocity=127,
start_time=0.05,
end_time=0.25)
expected_ns.total_time = 0.25
self.assertProtoEquals(expected_ns, ns)
def test_decode_note_sequence_events_missing_offset(self):
events = [5, 356, 161, 10, 161, 25, 229, 161]
decoding_state = note_sequences.NoteDecodingState()
invalid_ids, dropped_events = run_length_encoding.decode_events(
state=decoding_state, tokens=events, start_time=0, max_time=None,
codec=codec, decode_event_fn=note_sequences.decode_note_event)
ns = note_sequences.flush_note_decoding_state(decoding_state)
self.assertEqual(0, invalid_ids)
self.assertEqual(0, dropped_events)
expected_ns = note_seq.NoteSequence(ticks_per_quarter=220)
expected_ns.notes.add(
pitch=60,
velocity=127,
start_time=0.05,
end_time=0.10)
expected_ns.notes.add(
pitch=60,
velocity=127,
start_time=0.10,
end_time=0.25)
expected_ns.total_time = 0.25
self.assertProtoEquals(expected_ns, ns)
def test_decode_note_sequence_events_multitrack(self):
events = [5, 525, 356, 161, 15, 356, 394, 25, 525, 229, 161]
decoding_state = note_sequences.NoteDecodingState()
invalid_ids, dropped_events = run_length_encoding.decode_events(
state=decoding_state, tokens=events, start_time=0, max_time=None,
codec=codec, decode_event_fn=note_sequences.decode_note_event)
ns = note_sequences.flush_note_decoding_state(decoding_state)
self.assertEqual(0, invalid_ids)
self.assertEqual(0, dropped_events)
expected_ns = note_seq.NoteSequence(ticks_per_quarter=220)
expected_ns.notes.add(
pitch=37,
velocity=127,
start_time=0.15,
end_time=0.16,
instrument=9,
is_drum=True)
expected_ns.notes.add(
pitch=60,
velocity=127,
start_time=0.05,
end_time=0.25,
program=40)
expected_ns.total_time = 0.25
self.assertProtoEquals(expected_ns, ns)
def test_decode_note_sequence_events_invalid_tokens(self):
events = [5, -1, 161, -2, 25, 162, 9999]
decoding_state = note_sequences.NoteDecodingState()
invalid_events, dropped_events = run_length_encoding.decode_events(
state=decoding_state, tokens=events, start_time=0, max_time=None,
codec=codec, decode_event_fn=note_sequences.decode_note_onset_event)
ns = note_sequences.flush_note_decoding_state(decoding_state)
self.assertEqual(3, invalid_events)
self.assertEqual(0, dropped_events)
expected_ns = note_seq.NoteSequence(ticks_per_quarter=220)
expected_ns.notes.add(
pitch=60,
velocity=100,
start_time=0.05,
end_time=0.06)
expected_ns.notes.add(
pitch=61,
velocity=100,
start_time=0.25,
end_time=0.26)
expected_ns.total_time = 0.26
self.assertProtoEquals(expected_ns, ns)
def test_decode_note_sequence_events_allow_event_at_exactly_max_time(self):
events = [161, 25, 162]
decoding_state = note_sequences.NoteDecodingState()
invalid_ids, dropped_events = run_length_encoding.decode_events(
state=decoding_state, tokens=events, start_time=1.0, max_time=1.25,
codec=codec, decode_event_fn=note_sequences.decode_note_onset_event)
ns = note_sequences.flush_note_decoding_state(decoding_state)
self.assertEqual(0, invalid_ids)
self.assertEqual(0, dropped_events)
expected_ns = note_seq.NoteSequence(ticks_per_quarter=220)
expected_ns.notes.add(
pitch=60,
velocity=100,
start_time=1.00,
end_time=1.01)
expected_ns.notes.add(
pitch=61,
velocity=100,
start_time=1.25,
end_time=1.26)
expected_ns.total_time = 1.26
self.assertProtoEquals(expected_ns, ns)
def test_decode_note_sequence_events_dropped_events(self):
events = [5, 161, 30, 162]
decoding_state = note_sequences.NoteDecodingState()
invalid_ids, dropped_events = run_length_encoding.decode_events(
state=decoding_state, tokens=events, start_time=1.0, max_time=1.25,
codec=codec, decode_event_fn=note_sequences.decode_note_onset_event)
ns = note_sequences.flush_note_decoding_state(decoding_state)
self.assertEqual(0, invalid_ids)
self.assertEqual(2, dropped_events)
expected_ns = note_seq.NoteSequence(ticks_per_quarter=220)
expected_ns.notes.add(
pitch=60,
velocity=100,
start_time=1.05,
end_time=1.06)
expected_ns.total_time = 1.06
self.assertProtoEquals(expected_ns, ns)
def test_decode_note_sequence_events_invalid_events(self):
events = [25, 230, 50, 161]
decoding_state = note_sequences.NoteDecodingState()
invalid_ids, dropped_events = run_length_encoding.decode_events(
state=decoding_state, tokens=events, start_time=0, max_time=None,
codec=codec, decode_event_fn=note_sequences.decode_note_onset_event)
ns = note_sequences.flush_note_decoding_state(decoding_state)
self.assertEqual(1, invalid_ids)
self.assertEqual(0, dropped_events)
expected_ns = note_seq.NoteSequence(ticks_per_quarter=220)
expected_ns.notes.add(
pitch=60,
velocity=100,
start_time=0.50,
end_time=0.51)
expected_ns.total_time = 0.51
self.assertProtoEquals(expected_ns, ns)
if __name__ == '__main__':
tf.test.main()
| magenta/mt3 | mt3/note_sequences_test.py | Python | apache-2.0 | 19,150 |
#!/usr/bin/env python
# vim:fileencoding=utf-8
from __future__ import (unicode_literals, division, absolute_import,
print_function)
__license__ = 'GPL v3'
__copyright__ = '2014, Kovid Goyal <kovid at kovidgoyal.net>'
import re
from lxml import etree
from cssutils.css import CSSRule
from cssselect import HTMLTranslator, parse
from cssselect.xpath import XPathExpr, is_safe_name
from cssselect.parser import SelectorSyntaxError
from calibre import force_unicode
from calibre.ebooks.oeb.base import OEB_STYLES, OEB_DOCS, XPNSMAP, XHTML_NS
from calibre.ebooks.oeb.normalize_css import normalize_filter_css, normalizers
from calibre.ebooks.oeb.stylizer import MIN_SPACE_RE, is_non_whitespace, xpath_lower_case, fix_namespace
from calibre.ebooks.oeb.polish.pretty import pretty_script_or_style
class NamespacedTranslator(HTMLTranslator):
def xpath_element(self, selector):
element = selector.element
if not element:
element = '*'
safe = True
else:
safe = is_safe_name(element)
if safe:
# We use the h: prefix for the XHTML namespace
element = 'h:%s' % element.lower()
xpath = XPathExpr(element=element)
if not safe:
xpath.add_name_test()
return xpath
class CaseInsensitiveAttributesTranslator(NamespacedTranslator):
'Treat class and id CSS selectors case-insensitively'
def xpath_class(self, class_selector):
"""Translate a class selector."""
x = self.xpath(class_selector.selector)
if is_non_whitespace(class_selector.class_name):
x.add_condition(
"%s and contains(concat(' ', normalize-space(%s), ' '), %s)"
% ('@class', xpath_lower_case('@class'), self.xpath_literal(
' '+class_selector.class_name.lower()+' ')))
else:
x.add_condition('0')
return x
def xpath_hash(self, id_selector):
"""Translate an ID selector."""
x = self.xpath(id_selector.selector)
return self.xpath_attrib_equals(x, xpath_lower_case('@id'),
(id_selector.id.lower()))
css_to_xpath = NamespacedTranslator().css_to_xpath
ci_css_to_xpath = CaseInsensitiveAttributesTranslator().css_to_xpath
def build_selector(text, case_sensitive=True):
func = css_to_xpath if case_sensitive else ci_css_to_xpath
try:
return etree.XPath(fix_namespace(func(text)), namespaces=XPNSMAP)
except Exception:
return None
def is_rule_used(root, selector, log, pseudo_pat, cache):
selector = pseudo_pat.sub('', selector)
selector = MIN_SPACE_RE.sub(r'\1', selector)
try:
xp = cache[(True, selector)]
except KeyError:
xp = cache[(True, selector)] = build_selector(selector)
try:
if xp(root):
return True
except Exception:
return True
# See if interpreting class and id selectors case-insensitively gives us
# matches. Strictly speaking, class and id selectors should be case
# sensitive for XHTML, but we err on the side of caution and not remove
# them, since case sensitivity depends on whether the html is rendered in
# quirks mode or not.
try:
xp = cache[(False, selector)]
except KeyError:
xp = cache[(False, selector)] = build_selector(selector, case_sensitive=False)
try:
return bool(xp(root))
except Exception:
return True
def filter_used_rules(root, rules, log, pseudo_pat, cache):
for rule in rules:
used = False
for selector in rule.selectorList:
text = selector.selectorText
if is_rule_used(root, text, log, pseudo_pat, cache):
used = True
break
if not used:
yield rule
def process_namespaces(sheet):
# Find the namespace prefix (if any) for the XHTML namespace, so that we
# can preserve it after processing
for prefix in sheet.namespaces:
if sheet.namespaces[prefix] == XHTML_NS:
return prefix
def preserve_htmlns_prefix(sheet, prefix):
if prefix is None:
while 'h' in sheet.namespaces:
del sheet.namespaces['h']
else:
sheet.namespaces[prefix] = XHTML_NS
def get_imported_sheets(name, container, sheets, recursion_level=10, sheet=None):
ans = set()
sheet = sheet or sheets[name]
for rule in sheet.cssRules.rulesOfType(CSSRule.IMPORT_RULE):
if rule.href:
iname = container.href_to_name(rule.href, name)
if iname in sheets:
ans.add(iname)
if recursion_level > 0:
for imported_sheet in tuple(ans):
ans |= get_imported_sheets(imported_sheet, container, sheets, recursion_level=recursion_level-1)
ans.discard(name)
return ans
def remove_unused_css(container, report=None, remove_unused_classes=False):
'''
Remove all unused CSS rules from the book. An unused CSS rule is one that does not match any actual content.
:param report: An optional callable that takes a single argument. It is called with information about the operations being performed.
:param remove_unused_classes: If True, class attributes in the HTML that do not match any CSS rules are also removed.
'''
report = report or (lambda x:x)
def safe_parse(name):
try:
return container.parsed(name)
except TypeError:
pass
sheets = {name:safe_parse(name) for name, mt in container.mime_map.iteritems() if mt in OEB_STYLES}
sheets = {k:v for k, v in sheets.iteritems() if v is not None}
import_map = {name:get_imported_sheets(name, container, sheets) for name in sheets}
if remove_unused_classes:
class_map = {name:{icu_lower(x) for x in classes_in_rule_list(sheet.cssRules)} for name, sheet in sheets.iteritems()}
sheet_namespace = {}
for sheet in sheets.itervalues():
sheet_namespace[sheet] = process_namespaces(sheet)
sheet.namespaces['h'] = XHTML_NS
style_rules = {name:tuple(sheet.cssRules.rulesOfType(CSSRule.STYLE_RULE)) for name, sheet in sheets.iteritems()}
num_of_removed_rules = num_of_removed_classes = 0
pseudo_pat = re.compile(r':(first-letter|first-line|link|hover|visited|active|focus|before|after)', re.I)
cache = {}
for name, mt in container.mime_map.iteritems():
if mt not in OEB_DOCS:
continue
root = container.parsed(name)
used_classes = set()
for style in root.xpath('//*[local-name()="style"]'):
if style.get('type', 'text/css') == 'text/css' and style.text:
sheet = container.parse_css(style.text)
if remove_unused_classes:
used_classes |= {icu_lower(x) for x in classes_in_rule_list(sheet.cssRules)}
imports = get_imported_sheets(name, container, sheets, sheet=sheet)
for imported_sheet in imports:
style_rules[imported_sheet] = tuple(filter_used_rules(root, style_rules[imported_sheet], container.log, pseudo_pat, cache))
if remove_unused_classes:
used_classes |= class_map[imported_sheet]
ns = process_namespaces(sheet)
sheet.namespaces['h'] = XHTML_NS
rules = tuple(sheet.cssRules.rulesOfType(CSSRule.STYLE_RULE))
unused_rules = tuple(filter_used_rules(root, rules, container.log, pseudo_pat, cache))
if unused_rules:
num_of_removed_rules += len(unused_rules)
[sheet.cssRules.remove(r) for r in unused_rules]
preserve_htmlns_prefix(sheet, ns)
style.text = force_unicode(sheet.cssText, 'utf-8')
pretty_script_or_style(container, style)
container.dirty(name)
for link in root.xpath('//*[local-name()="link" and @href]'):
sname = container.href_to_name(link.get('href'), name)
if sname not in sheets:
continue
style_rules[sname] = tuple(filter_used_rules(root, style_rules[sname], container.log, pseudo_pat, cache))
if remove_unused_classes:
used_classes |= class_map[sname]
for iname in import_map[sname]:
style_rules[iname] = tuple(filter_used_rules(root, style_rules[iname], container.log, pseudo_pat, cache))
if remove_unused_classes:
used_classes |= class_map[iname]
if remove_unused_classes:
for elem in root.xpath('//*[@class]'):
original_classes, classes = elem.get('class', '').split(), []
for x in original_classes:
if icu_lower(x) in used_classes:
classes.append(x)
if len(classes) != len(original_classes):
if classes:
elem.set('class', ' '.join(classes))
else:
del elem.attrib['class']
num_of_removed_classes += len(original_classes) - len(classes)
container.dirty(name)
for name, sheet in sheets.iteritems():
preserve_htmlns_prefix(sheet, sheet_namespace[sheet])
unused_rules = style_rules[name]
if unused_rules:
num_of_removed_rules += len(unused_rules)
[sheet.cssRules.remove(r) for r in unused_rules]
container.dirty(name)
if num_of_removed_rules > 0:
report(ngettext('Removed %d unused CSS style rule', 'Removed %d unused CSS style rules',
num_of_removed_rules) % num_of_removed_rules)
else:
report(_('No unused CSS style rules found'))
if remove_unused_classes:
if num_of_removed_classes > 0:
report(ngettext('Removed %d unused class from the HTML', 'Removed %d unused classes from the HTML',
num_of_removed_classes) % num_of_removed_classes)
else:
report(_('No unused class attributes found'))
return num_of_removed_rules + num_of_removed_classes > 0
def filter_declaration(style, properties):
changed = False
for prop in properties:
if style.removeProperty(prop) != '':
changed = True
all_props = set(style.keys())
for prop in style.getProperties():
n = normalizers.get(prop.name, None)
if n is not None:
normalized = n(prop.name, prop.propertyValue)
removed = properties.intersection(set(normalized))
if removed:
changed = True
style.removeProperty(prop.name)
for prop in set(normalized) - removed - all_props:
style.setProperty(prop, normalized[prop])
return changed
def filter_sheet(sheet, properties):
from cssutils.css import CSSRule
changed = False
remove = []
for rule in sheet.cssRules.rulesOfType(CSSRule.STYLE_RULE):
if filter_declaration(rule.style, properties):
changed = True
if rule.style.length == 0:
remove.append(rule)
for rule in remove:
sheet.cssRules.remove(rule)
return changed
def filter_css(container, properties, names=()):
'''
Remove the specified CSS properties from all CSS rules in the book.
:param properties: Set of properties to remove. For example: :code:`{'font-family', 'color'}`.
:param names: The files from which to remove the properties. Defaults to all HTML and CSS files in the book.
'''
if not names:
types = OEB_STYLES | OEB_DOCS
names = []
for name, mt in container.mime_map.iteritems():
if mt in types:
names.append(name)
properties = normalize_filter_css(properties)
doc_changed = False
for name in names:
mt = container.mime_map[name]
if mt in OEB_STYLES:
sheet = container.parsed(name)
filtered = filter_sheet(sheet, properties)
if filtered:
container.dirty(name)
doc_changed = True
elif mt in OEB_DOCS:
root = container.parsed(name)
changed = False
for style in root.xpath('//*[local-name()="style"]'):
if style.text and style.get('type', 'text/css') in {None, '', 'text/css'}:
sheet = container.parse_css(style.text)
if filter_sheet(sheet, properties):
changed = True
style.text = force_unicode(sheet.cssText, 'utf-8')
pretty_script_or_style(container, style)
for elem in root.xpath('//*[@style]'):
text = elem.get('style', None)
if text:
style = container.parse_css(text, is_declaration=True)
if filter_declaration(style, properties):
changed = True
if style.length == 0:
del elem.attrib['style']
else:
elem.set('style', force_unicode(style.getCssText(separator=' '), 'utf-8'))
if changed:
container.dirty(name)
doc_changed = True
return doc_changed
def _classes_in_selector(selector, classes):
for attr in ('selector', 'subselector', 'parsed_tree'):
s = getattr(selector, attr, None)
if s is not None:
_classes_in_selector(s, classes)
cn = getattr(selector, 'class_name', None)
if cn is not None:
classes.add(cn)
def classes_in_selector(text):
classes = set()
try:
for selector in parse(text):
_classes_in_selector(selector, classes)
except SelectorSyntaxError:
pass
return classes
def classes_in_rule_list(css_rules):
classes = set()
for rule in css_rules:
if rule.type == rule.STYLE_RULE:
classes |= classes_in_selector(rule.selectorText)
elif hasattr(rule, 'cssRules'):
classes |= classes_in_rule_list(rule.cssRules)
return classes
| sharad/calibre | src/calibre/ebooks/oeb/polish/css.py | Python | gpl-3.0 | 14,234 |
**********Region Growing Code**********
//Step 0: Initialization
i = 0
G = {g|vertices in Graph}
V = [] //Growing tree
for each element g in G: //Initialization, set all vertices.connectedness = -999
set g.connectedness=-999
//Step 1: Loop through all vertices
while G != []:
seed = vertex with max total connectedness in G //Initialization, find the seed to grow
u = seed
u.connectedness = max(connectedness(u, each element q in G that are adjacent to u)) //Seed.connectedness is set to be the max connectedness with its adjacent vertex
if u.connectedness< lambda:
else:
for each element q in all vertices that are adjacent to u:
if min(u.connectedness,connectedness(u, q)) > q.connectedness: //Update the vertices label
q.connectedness = min(u.connectedness,connectedness(u, q))
#if q.connectedness >= lambda: //set parent vertex to the searching vertex
q.parent = u
G = G - u //Remove vertex u from G
V = V + u //u is added to tree
u = vertex with max connectedness in all vertices that are adjacent to vertices in V // Find the new growing vertex
R[i] = V //ith region
i = i + 1 | ikangan/NetworkPartition | RG_Pseudocode.py | Python | gpl-3.0 | 2,212 |
"""
There are two types of functions:
1) defined function like exp or sin that has a name and body
(in the sense that function can be evaluated).
e = exp
2) undefined function with a name but no body. Undefined
functions can be defined using a Function class as follows:
f = Function('f')
(the result will be a Function instance)
3) this isn't implemented yet: anonymous function or lambda function that has
no name but has body with dummy variables. Examples of anonymous function
creation:
f = Lambda(x, exp(x)*x)
f = Lambda(exp(x)*x) # free symbols of expr define the number of args
f = Lambda(exp(x)*x) # free symbols in the expression define the number
# of arguments
f = exp * Lambda(x,x)
4) isn't implemented yet: composition of functions, like (sin+cos)(x), this
works in sympy core, but needs to be ported back to SymPy.
Examples
========
>>> import sympy
>>> f = sympy.Function("f")
>>> from sympy.abc import x
>>> f(x)
f(x)
>>> print sympy.srepr(f(x).func)
Function('f')
>>> f(x).args
(x,)
"""
from core import BasicMeta, C
from assumptions import ManagedProperties
from basic import Basic
from singleton import S
from sympify import sympify
from expr import Expr, AtomicExpr
from decorators import _sympifyit
from compatibility import iterable,is_sequence
from cache import cacheit
from numbers import Rational, Float
from add import Add
from sympy.core.containers import Tuple, Dict
from sympy.core.logic import fuzzy_and
from sympy.utilities import default_sort_key
from sympy.utilities.iterables import uniq
from sympy import mpmath
import sympy.mpmath.libmp as mlib
def _coeff_isneg(a):
"""Return True if the leading Number is negative.
Examples
========
>>> from sympy.core.function import _coeff_isneg
>>> from sympy import S, Symbol, oo, pi
>>> _coeff_isneg(-3*pi)
True
>>> _coeff_isneg(S(3))
False
>>> _coeff_isneg(-oo)
True
>>> _coeff_isneg(Symbol('n', negative=True)) # coeff is 1
False
"""
if a.is_Mul:
a = a.args[0]
return a.is_Number and a.is_negative
class PoleError(Exception):
pass
class ArgumentIndexError(ValueError):
def __str__(self):
return ("Invalid operation with argument number %s for Function %s" %
(self.args[1], self.args[0]))
class FunctionClass(ManagedProperties):
"""
Base class for function classes. FunctionClass is a subclass of type.
Use Function('<function name>' [ , signature ]) to create
undefined function classes.
"""
__metaclass__ = BasicMeta
_new = type.__new__
def __repr__(cls):
return cls.__name__
class Application(Basic):
"""
Base class for applied functions.
Instances of Application represent the result of applying an application of
any type to any object.
"""
__metaclass__ = FunctionClass
__slots__ = []
is_Function = True
nargs = None
@cacheit
def __new__(cls, *args, **options):
args = map(sympify, args)
evaluate = options.pop('evaluate', True)
if options:
raise ValueError("Unknown options: %s" % options)
if evaluate:
evaluated = cls.eval(*args)
if evaluated is not None:
return evaluated
return super(Application, cls).__new__(cls, *args)
@classmethod
def eval(cls, *args):
"""
Returns a canonical form of cls applied to arguments args.
The eval() method is called when the class cls is about to be
instantiated and it should return either some simplified instance
(possible of some other class), or if the class cls should be
unmodified, return None.
Examples of eval() for the function "sign"
---------------------------------------------
@classmethod
def eval(cls, arg):
if arg is S.NaN:
return S.NaN
if arg is S.Zero: return S.Zero
if arg.is_positive: return S.One
if arg.is_negative: return S.NegativeOne
if isinstance(arg, C.Mul):
coeff, terms = arg.as_coeff_Mul(rational=True)
if coeff is not S.One:
return cls(coeff) * cls(terms)
"""
return
@property
def func(self):
return self.__class__
def _eval_subs(self, old, new):
if (old.is_Function and new.is_Function and
old == self.func and
(self.nargs == new.nargs or not new.nargs or
isinstance(new.nargs, tuple) and self.nargs in new.nargs)):
return new(*self.args)
class Function(Application, Expr):
"""Base class for applied mathematical functions.
It also serves as a constructor for undefined function classes.
Examples
========
First example shows how to use Function as a constructor for undefined
function classes:
>>> from sympy import Function, Symbol
>>> x = Symbol('x')
>>> f = Function('f')
>>> g = Function('g')(x)
>>> f
f
>>> f(x)
f(x)
>>> g
g(x)
>>> f(x).diff(x)
Derivative(f(x), x)
>>> g.diff(x)
Derivative(g(x), x)
In the following example Function is used as a base class for
``my_func`` that represents a mathematical function *my_func*. Suppose
that it is well known, that *my_func(0)* is *1* and *my_func* at infinity
goes to *0*, so we want those two simplifications to occur automatically.
Suppose also that *my_func(x)* is real exactly when *x* is real. Here is
an implementation that honours those requirements:
>>> from sympy import Function, S, oo, I, sin
>>> class my_func(Function):
...
... nargs = 1
...
... @classmethod
... def eval(cls, x):
... if x.is_Number:
... if x is S.Zero:
... return S.One
... elif x is S.Infinity:
... return S.Zero
...
... def _eval_is_real(self):
... return self.args[0].is_real
...
>>> x = S('x')
>>> my_func(0) + sin(0)
1
>>> my_func(oo)
0
>>> my_func(3.54).n() # Not yet implemented for my_func.
my_func(3.54)
>>> my_func(I).is_real
False
In order for ``my_func`` to become useful, several other methods would
need to be implemented. See source code of some of the already
implemented functions for more complete examples.
"""
@property
def _diff_wrt(self):
"""Allow derivatives wrt functions.
Examples
========
>>> from sympy import Function, Symbol
>>> f = Function('f')
>>> x = Symbol('x')
>>> f(x)._diff_wrt
True
"""
return True
@cacheit
def __new__(cls, *args, **options):
# Handle calls like Function('f')
if cls is Function:
return UndefinedFunction(*args)
if cls.nargs is not None:
if isinstance(cls.nargs, tuple):
nargs = cls.nargs
else:
nargs = (cls.nargs,)
n = len(args)
if n not in nargs:
# XXX: exception message must be in exactly this format to make
# it work with NumPy's functions like vectorize(). The ideal
# solution would be just to attach metadata to the exception
# and change NumPy to take advantage of this.
temp = ('%(name)s takes exactly %(args)s '
'argument%(plural)s (%(given)s given)')
raise TypeError(temp %
{
'name': cls,
'args': cls.nargs,
'plural': 's'*(n != 1),
'given': n})
evaluate = options.get('evaluate', True)
result = super(Function, cls).__new__(cls, *args, **options)
if not evaluate or not isinstance(result, cls):
return result
pr = max(cls._should_evalf(a) for a in result.args)
pr2 = min(cls._should_evalf(a) for a in result.args)
if pr2 > 0:
return result.evalf(mlib.libmpf.prec_to_dps(pr))
return result
@classmethod
def _should_evalf(cls, arg):
"""
Decide if the function should automatically evalf().
By default (in this implementation), this happens if (and only if) the
ARG is a floating point number.
This function is used by __new__.
"""
if arg.is_Float:
return arg._prec
if not arg.is_Add:
return -1
re, im = arg.as_real_imag()
l = [a._prec for a in [re, im] if a.is_Float]
l.append(-1)
return max(l)
@classmethod
def class_key(cls):
funcs = {
'exp': 10,
'log': 11,
'sin': 20,
'cos': 21,
'tan': 22,
'cot': 23,
'sinh': 30,
'cosh': 31,
'tanh': 32,
'coth': 33,
'conjugate': 40,
're': 41,
'im': 42,
'arg': 43,
}
name = cls.__name__
try:
i = funcs[name]
except KeyError:
nargs = cls.nargs
i = 0 if nargs is None else 10000
return 4, i, name
@property
def is_commutative(self):
"""
Returns whether the functon is commutative.
"""
if all(getattr(t, 'is_commutative') for t in self.args):
return True
else:
return False
def _eval_evalf(self, prec):
# Lookup mpmath function based on name
fname = self.func.__name__
try:
if not hasattr(mpmath, fname):
from sympy.utilities.lambdify import MPMATH_TRANSLATIONS
fname = MPMATH_TRANSLATIONS[fname]
func = getattr(mpmath, fname)
except (AttributeError, KeyError):
try:
return C.Float(self._imp_(*self.args), prec)
except (AttributeError, TypeError):
return
# Convert all args to mpf or mpc
# Convert the arguments to *higher* precision than requested for the
# final result.
# XXX + 5 is a guess, it is similar to what is used in evalf.py. Should
# we be more intelligent about it?
try:
args = [arg._to_mpmath(prec + 5) for arg in self.args]
except ValueError:
return
# Set mpmath precision and apply. Make sure precision is restored
# afterwards
orig = mpmath.mp.prec
try:
mpmath.mp.prec = prec
v = func(*args)
finally:
mpmath.mp.prec = orig
return Expr._from_mpmath(v, prec)
def _eval_derivative(self, s):
# f(x).diff(s) -> x.diff(s) * f.fdiff(1)(s)
i = 0
l = []
for a in self.args:
i += 1
da = a.diff(s)
if da is S.Zero:
continue
try:
df = self.fdiff(i)
except ArgumentIndexError:
df = Function.fdiff(self, i)
l.append(df * da)
return Add(*l)
def _eval_is_commutative(self):
return fuzzy_and(a.is_commutative for a in self.args)
def as_base_exp(self):
"""
Returns the method as the 2-tuple (base, exponent).
"""
return self, S.One
def _eval_aseries(self, n, args0, x, logx):
"""
Compute an asymptotic expansion around args0, in terms of self.args.
This function is only used internally by _eval_nseries and should not
be called directly; derived classes can overwrite this to implement
asymptotic expansions.
"""
from sympy.utilities.misc import filldedent
raise PoleError(filldedent('''
Asymptotic expansion of %s around %s is
not implemented.''' % (type(self), args0)))
def _eval_nseries(self, x, n, logx):
"""
This function does compute series for multivariate functions,
but the expansion is always in terms of *one* variable.
Examples
========
>>> from sympy import atan2, O
>>> from sympy.abc import x, y
>>> atan2(x, y).series(x, n=2)
atan2(0, y) + x/y + O(x**2)
>>> atan2(x, y).series(y, n=2)
-y/x + atan2(x, 0) + O(y**2)
This function also computes asymptotic expansions, if necessary
and possible:
>>> from sympy import loggamma
>>> loggamma(1/x)._eval_nseries(x,0,None)
-1/x - log(x)/x + log(x)/2 + O(1)
"""
if self.func.nargs is None:
from sympy.utilities.misc import filldedent
raise NotImplementedError(filldedent('''
series for user-defined functions are not
supported.'''))
args = self.args
args0 = [t.limit(x, 0) for t in args]
if any(t.is_bounded == False for t in args0):
from sympy import oo, zoo, nan
# XXX could use t.as_leading_term(x) here but it's a little
# slower
a = [t.compute_leading_term(x, logx=logx) for t in args]
a0 = [t.limit(x, 0) for t in a]
if any ([t.has(oo, -oo, zoo, nan) for t in a0]):
return self._eval_aseries(n, args0, x, logx
)._eval_nseries(x, n, logx)
# Careful: the argument goes to oo, but only logarithmically so. We
# are supposed to do a power series expansion "around the
# logarithmic term". e.g.
# f(1+x+log(x))
# -> f(1+logx) + x*f'(1+logx) + O(x**2)
# where 'logx' is given in the argument
a = [t._eval_nseries(x, n, logx) for t in args]
z = [r - r0 for (r, r0) in zip(a, a0)]
p = [Dummy() for t in z]
q = []
v = None
for ai, zi, pi in zip(a0, z, p):
if zi.has(x):
if v is not None:
raise NotImplementedError
q.append(ai + pi)
v = pi
else:
q.append(ai)
e1 = self.func(*q)
if v is None:
return e1
s = e1._eval_nseries(v, n, logx)
o = s.getO()
s = s.removeO()
s = s.subs(v, zi).expand() + C.Order(o.expr.subs(v, zi), x)
return s
if (self.func.nargs == 1 and args0[0]) or self.func.nargs > 1:
e = self
e1 = e.expand()
if e == e1:
#for example when e = sin(x+1) or e = sin(cos(x))
#let's try the general algorithm
term = e.subs(x, S.Zero)
if term.is_bounded is False or term is S.NaN:
raise PoleError("Cannot expand %s around 0" % (self))
series = term
fact = S.One
for i in range(n-1):
i += 1
fact *= Rational(i)
e = e.diff(x)
subs = e.subs(x, S.Zero)
if subs is S.NaN:
# try to evaluate a limit if we have to
subs = e.limit(x, S.Zero)
if subs.is_bounded is False:
raise PoleError("Cannot expand %s around 0" % (self))
term = subs*(x**i)/fact
term = term.expand()
series += term
return series + C.Order(x**n, x)
return e1.nseries(x, n=n, logx=logx)
arg = self.args[0]
l = []
g = None
for i in xrange(n+2):
g = self.taylor_term(i, arg, g)
g = g.nseries(x, n=n, logx=logx)
l.append(g)
return Add(*l) + C.Order(x**n, x)
def _eval_rewrite(self, pattern, rule, **hints):
if hints.get('deep', False):
args = [a._eval_rewrite(pattern, rule, **hints) for a in self.args]
else:
args = self.args
if pattern is None or isinstance(self.func, pattern):
if hasattr(self, rule):
rewritten = getattr(self, rule)(*args)
if rewritten is not None:
return rewritten
return self.func(*args)
def fdiff(self, argindex=1):
"""
Returns the first derivative of the function.
"""
if self.nargs is not None:
if isinstance(self.nargs, tuple):
nargs = self.nargs[-1]
else:
nargs = self.nargs
if not (1<=argindex<=nargs):
raise ArgumentIndexError(self, argindex)
if not self.args[argindex-1].is_Symbol:
# See issue 1525 and issue 1620 and issue 2501
arg_dummy = C.Dummy('xi_%i' % argindex)
return Subs(Derivative(
self.subs(self.args[argindex-1], arg_dummy),
arg_dummy), arg_dummy, self.args[argindex-1])
return Derivative(self,self.args[argindex-1],evaluate=False)
def _eval_as_leading_term(self, x):
"""Stub that should be overridden by new Functions to return
the first non-zero term in a series if ever an x-dependent
argument whose leading term vanishes as x -> 0 might be encountered.
See, for example, cos._eval_as_leading_term.
"""
args = [a.as_leading_term(x) for a in self.args]
o = C.Order(1, x)
if any(x in a.free_symbols and o.contains(a) for a in args):
# Whereas x and any finite number are contained in O(1, x),
# expressions like 1/x are not. If any arg simplified to a
# vanishing expression as x -> 0 (like x or x**2, but not
# 3, 1/x, etc...) then the _eval_as_leading_term is needed
# to supply the first non-zero term of the series,
#
# e.g. expression leading term
# ---------- ------------
# cos(1/x) cos(1/x)
# cos(cos(x)) cos(1)
# cos(x) 1 <- _eval_as_leading_term needed
# sin(x) x <- _eval_as_leading_term needed
#
raise NotImplementedError(
'%s has no _eval_as_leading_term routine' % self.func)
else:
return self.func(*args)
@classmethod
def taylor_term(cls, n, x, *previous_terms):
"""General method for the taylor term.
This method is slow, because it differentiates n-times. Subclasses can
redefine it to make it faster by using the "previous_terms".
"""
x = sympify(x)
return cls(x).diff(x, n).subs(x, 0) * x**n / C.factorial(n)
class AppliedUndef(Function):
"""
Base class for expressions resulting from the application of an undefined
function.
"""
def __new__(cls, *args, **options):
args = map(sympify, args)
result = super(AppliedUndef, cls).__new__(cls, *args, **options)
result.nargs = len(args)
return result
class UndefinedFunction(FunctionClass):
"""
The (meta)class of undefined functions.
"""
def __new__(mcl, name):
return BasicMeta.__new__(mcl, name, (AppliedUndef,), {})
class WildFunction(Function, AtomicExpr):
"""
WildFunction() matches any expression but another WildFunction()
XXX is this as intended, does it work ?
"""
nargs = 1
def __new__(cls, name, **assumptions):
obj = Function.__new__(cls, name, **assumptions)
obj.name = name
return obj
def matches(self, expr, repl_dict={}):
if self.nargs is not None:
if not hasattr(expr,'nargs') or self.nargs != expr.nargs:
return None
repl_dict = repl_dict.copy()
repl_dict[self] = expr
return repl_dict
@property
def is_number(self):
return False
class Derivative(Expr):
"""
Carries out differentiation of the given expression with respect to symbols.
expr must define ._eval_derivative(symbol) method that returns
the differentiation result. This function only needs to consider the
non-trivial case where expr contains symbol and it should call the diff()
method internally (not _eval_derivative); Derivative should be the only
one to call _eval_derivative.
Ordering of variables:
If evaluate is set to True and the expression can not be evaluated, the
list of differentiation symbols will be sorted, that is, the expression is
assumed to have continuous derivatives up to the order asked. This sorting
assumes that derivatives wrt Symbols commute, derivatives wrt non-Symbols
commute, but Symbol and non-Symbol derivatives don't commute with each
other.
Derivative wrt non-Symbols:
This class also allows derivatives wrt non-Symbols that have _diff_wrt
set to True, such as Function and Derivative. When a derivative wrt a non-
Symbol is attempted, the non-Symbol is temporarily converted to a Symbol
while the differentiation is performed.
Note that this may seem strange, that Derivative allows things like
f(g(x)).diff(g(x)), or even f(cos(x)).diff(cos(x)). The motivation for
allowing this syntax is to make it easier to work with variational calculus
(i.e., the Euler-Lagrange method). The best way to understand this is that
the action of derivative with respect to a non-Symbol is defined by the
above description: the object is substituted for a Symbol and the
derivative is taken with respect to that. This action is only allowed for
objects for which this can be done unambiguously, for example Function and
Derivative objects. Note that this leads to what may appear to be
mathematically inconsistent results. For example::
>>> from sympy import cos, sin, sqrt
>>> from sympy.abc import x
>>> (2*cos(x)).diff(cos(x))
2
>>> (2*sqrt(1 - sin(x)**2)).diff(cos(x))
0
This appears wrong because in fact 2*cos(x) and 2*sqrt(1 - sin(x)**2) are
identically equal. However this is the wrong way to think of this. Think
of it instead as if we have something like this::
>>> from sympy.abc import c, s
>>> def F(u):
... return 2*u
...
>>> def G(u):
... return 2*sqrt(1 - u**2)
...
>>> F(cos(x))
2*cos(x)
>>> G(sin(x))
2*sqrt(-sin(x)**2 + 1)
>>> F(c).diff(c)
2
>>> F(c).diff(c)
2
>>> G(s).diff(c)
0
>>> G(sin(x)).diff(cos(x))
0
Here, the Symbols c and s act just like the functions cos(x) and sin(x),
respectively. Think of 2*cos(x) as f(c).subs(c, cos(x)) (or f(c) *at*
c = cos(x)) and 2*sqrt(1 - sin(x)**2) as g(s).subs(s, sin(x)) (or g(s) *at*
s = sin(x)), where f(u) == 2*u and g(u) == 2*sqrt(1 - u**2). Here, we
define the function first and evaluate it at the function, but we can
actually unambiguously do this in reverse in SymPy, because
expr.subs(Function, Symbol) is well-defined: just structurally replace the
function everywhere it appears in the expression.
This is actually the same notational convenience used in the Euler-Lagrange
method when one says F(t, f(t), f'(t)).diff(f(t)). What is actually meant
is that the expression in question is represented by some F(t, u, v) at
u = f(t) and v = f'(t), and F(t, f(t), f'(t)).diff(f(t)) simply means
F(t, u, v).diff(u) at u = f(t).
We do not allow to take derivative with respect to expressions where this
is not so well defined. For example, we do not allow expr.diff(x*y)
because there are multiple ways of structurally defining where x*y appears
in an expression, some of which may surprise the reader (for example, a
very strict definition would have that (x*y*z).diff(x*y) == 0).
>>> from sympy.abc import x, y, z
>>> (x*y*z).diff(x*y)
Traceback (most recent call last):
...
ValueError: Can't differentiate wrt the variable: x*y, 1
Note that this definition also fits in nicely with the definition of the
chain rule. Note how the chain rule in SymPy is defined using unevaluated
Subs objects::
>>> from sympy import symbols, Function
>>> f, g = symbols('f g', cls=Function)
>>> f(2*g(x)).diff(x)
2*Derivative(g(x), x)*Subs(Derivative(f(_xi_1), _xi_1), (_xi_1,), (2*g(x),))
>>> f(g(x)).diff(x)
Derivative(g(x), x)*Subs(Derivative(f(_xi_1), _xi_1), (_xi_1,), (g(x),))
Finally, note that, to be consistent with variational calculus, and to
ensure that the definition of substituting a Function for a Symbol in an
expression is well-defined, derivatives of functions are assumed to not be
related to the function. In other words, we have::
>>> from sympy import diff
>>> diff(f(x), x).diff(f(x))
0
The same is actually true for derivatives of different orders::
>>> diff(f(x), x, 2).diff(diff(f(x), x, 1))
0
>>> diff(f(x), x, 1).diff(diff(f(x), x, 2))
0
Note, any class can allow derivatives to be taken with respect to itself.
See the docstring of Expr._diff_wrt.
Examples
========
Some basic examples:
>>> from sympy import Derivative, Symbol, Function
>>> f = Function('f')
>>> g = Function('g')
>>> x = Symbol('x')
>>> y = Symbol('y')
>>> Derivative(x**2, x, evaluate=True)
2*x
>>> Derivative(Derivative(f(x,y), x), y)
Derivative(f(x, y), x, y)
>>> Derivative(f(x), x, 3)
Derivative(f(x), x, x, x)
>>> Derivative(f(x, y), y, x, evaluate=True)
Derivative(f(x, y), x, y)
Now some derivatives wrt functions:
>>> Derivative(f(x)**2, f(x), evaluate=True)
2*f(x)
>>> Derivative(f(g(x)), x, evaluate=True)
Derivative(g(x), x)*Subs(Derivative(f(_xi_1), _xi_1), (_xi_1,), (g(x),))
"""
is_Derivative = True
@property
def _diff_wrt(self):
"""Allow derivatives wrt Derivatives if it contains a function.
Examples
========
>>> from sympy import Function, Symbol, Derivative
>>> f = Function('f')
>>> x = Symbol('x')
>>> Derivative(f(x),x)._diff_wrt
True
>>> Derivative(x**2,x)._diff_wrt
False
"""
if self.expr.is_Function:
return True
else:
return False
def __new__(cls, expr, *variables, **assumptions):
expr = sympify(expr)
# There are no variables, we differentiate wrt all of the free symbols
# in expr.
if not variables:
variables = expr.free_symbols
if len(variables) != 1:
from sympy.utilities.misc import filldedent
raise ValueError(filldedent('''
Since there is more than one variable in the
expression, the variable(s) of differentiation
must be supplied to differentiate %s''' % expr))
# Standardize the variables by sympifying them and making appending a
# count of 1 if there is only one variable: diff(e,x)->diff(e,x,1).
variables = list(sympify(variables))
if not variables[-1].is_Integer or len(variables) == 1:
variables.append(S.One)
# Split the list of variables into a list of the variables we are diff
# wrt, where each element of the list has the form (s, count) where
# s is the entity to diff wrt and count is the order of the
# derivative.
variable_count = []
all_zero = True
i = 0
while i < len(variables) - 1: # process up to final Integer
v, count = variables[i: i+2]
iwas = i
if v._diff_wrt:
# We need to test the more specific case of count being an
# Integer first.
if count.is_Integer:
count = int(count)
i += 2
elif count._diff_wrt:
count = 1
i += 1
if i == iwas: # didn't get an update because of bad input
from sympy.utilities.misc import filldedent
raise ValueError(filldedent('''
Can\'t differentiate wrt the variable: %s, %s''' % (v, count)))
if all_zero and not count == 0:
all_zero = False
if count:
variable_count.append((v, count))
# We make a special case for 0th derivative, because there is no
# good way to unambiguously print this.
if all_zero:
return expr
# Pop evaluate because it is not really an assumption and we will need
# to track use it carefully below.
evaluate = assumptions.pop('evaluate', False)
# Look for a quick exit if there are symbols that don't appear in
# expression at all. Note, this cannnot check non-symbols like
# functions and Derivatives as those can be created by intermediate
# derivatives.
if evaluate:
symbol_set = set(sc[0] for sc in variable_count if sc[0].is_Symbol)
if symbol_set.difference(expr.free_symbols):
return S.Zero
# We make a generator so as to only generate a variable when necessary.
# If a high order of derivative is requested and the expr becomes 0
# after a few differentiations, then we won't need the other variables.
variablegen = (v for v, count in variable_count for i in xrange(count))
# If we can't compute the derivative of expr (but we wanted to) and
# expr is itself not a Derivative, finish building an unevaluated
# derivative class by calling Expr.__new__.
if (not (hasattr(expr, '_eval_derivative') and evaluate) and
(not isinstance(expr, Derivative))):
variables = list(variablegen)
# If we wanted to evaluate, we sort the variables into standard
# order for later comparisons. This is too agressive if evaluate
# is False, so we don't do it in that case.
if evaluate:
#TODO: check if assumption of discontinuous derivatives exist
variables = cls._sort_variables(variables)
# Here we *don't* need to reinject evaluate into assumptions
# because we are done with it and it is not an assumption that
# Expr knows about.
obj = Expr.__new__(cls, expr, *variables, **assumptions)
return obj
# Compute the derivative now by repeatedly calling the
# _eval_derivative method of expr for each variable. When this method
# returns None, the derivative couldn't be computed wrt that variable
# and we save the variable for later.
unhandled_variables = []
# Once we encouter a non_symbol that is unhandled, we stop taking
# derivatives entirely. This is because derivatives wrt functions
# don't commute with derivatives wrt symbols and we can't safely
# continue.
unhandled_non_symbol = False
for v in variablegen:
is_symbol = v.is_Symbol
if unhandled_non_symbol:
obj = None
else:
if not is_symbol:
new_v = C.Dummy('xi_%i' % i)
expr = expr.subs(v, new_v)
old_v = v
v = new_v
obj = expr._eval_derivative(v)
if not is_symbol:
if obj is not None:
obj = obj.subs(v, old_v)
v = old_v
if obj is None:
unhandled_variables.append(v)
if not is_symbol:
unhandled_non_symbol = True
elif obj is S.Zero:
return S.Zero
else:
expr = obj
if unhandled_variables:
unhandled_variables = cls._sort_variables(unhandled_variables)
expr = Expr.__new__(cls, expr, *unhandled_variables, **assumptions)
else:
# We got a Derivative at the end of it all, and we rebuild it by
# sorting its variables.
if isinstance(expr, Derivative):
expr = Derivative(
expr.args[0], *cls._sort_variables(expr.args[1:])
)
return expr
@classmethod
def _sort_variables(cls, vars):
"""Sort variables, but disallow sorting of non-symbols.
When taking derivatives, the following rules usually hold:
* Derivative wrt different symbols commute.
* Derivative wrt different non-symbols commute.
* Derivatives wrt symbols and non-symbols dont' commute.
Examples
--------
>>> from sympy import Derivative, Function, symbols
>>> vsort = Derivative._sort_variables
>>> x, y, z = symbols('x y z')
>>> f, g, h = symbols('f g h', cls=Function)
>>> vsort((x,y,z))
[x, y, z]
>>> vsort((h(x),g(x),f(x)))
[f(x), g(x), h(x)]
>>> vsort((z,y,x,h(x),g(x),f(x)))
[x, y, z, f(x), g(x), h(x)]
>>> vsort((x,f(x),y,f(y)))
[x, f(x), y, f(y)]
>>> vsort((y,x,g(x),f(x),z,h(x),y,x))
[x, y, f(x), g(x), z, h(x), x, y]
>>> vsort((z,y,f(x),x,f(x),g(x)))
[y, z, f(x), x, f(x), g(x)]
>>> vsort((z,y,f(x),x,f(x),g(x),z,z,y,x))
[y, z, f(x), x, f(x), g(x), x, y, z, z]
"""
sorted_vars = []
symbol_part = []
non_symbol_part = []
for v in vars:
if not v.is_Symbol:
if len(symbol_part) > 0:
sorted_vars.extend(sorted(symbol_part,
key=default_sort_key))
symbol_part = []
non_symbol_part.append(v)
else:
if len(non_symbol_part) > 0:
sorted_vars.extend(sorted(non_symbol_part,
key=default_sort_key))
non_symbol_part = []
symbol_part.append(v)
if len(non_symbol_part) > 0:
sorted_vars.extend(sorted(non_symbol_part,
key=default_sort_key))
if len(symbol_part) > 0:
sorted_vars.extend(sorted(symbol_part,
key=default_sort_key))
return sorted_vars
def _eval_is_commutative(self):
return self.expr.is_commutative
def _eval_derivative(self, v):
# If the variable s we are diff wrt is not in self.variables, we
# assume that we might be able to take the derivative.
if v not in self.variables:
obj = self.expr.diff(v)
if obj is S.Zero:
return S.Zero
if isinstance(obj, Derivative):
return Derivative(obj.expr, *(self.variables + obj.variables))
# The derivative wrt s could have simplified things such that the
# derivative wrt things in self.variables can now be done. Thus,
# we set evaluate=True to see if there are any other derivatives
# that can be done. The most common case is when obj is a simple
# number so that the derivative wrt anything else will vanish.
return Derivative(obj, *self.variables, **{'evaluate': True})
# In this case s was in self.variables so the derivatve wrt s has
# already been attempted and was not computed, either because it
# couldn't be or evaluate=False originally.
return Derivative(self.expr, *(self.variables + (v, )),
**{'evaluate': False})
def doit(self, **hints):
expr = self.expr
if hints.get('deep', True):
expr = expr.doit(**hints)
hints['evaluate'] = True
return Derivative(expr, *self.variables, **hints)
@_sympifyit('z0', NotImplementedError)
def doit_numerically(self, z0):
"""
Evaluate the derivative at z numerically.
When we can represent derivatives at a point, this should be folded
into the normal evalf. For now, we need a special method.
"""
from sympy import mpmath
from sympy.core.expr import Expr
if len(self.free_symbols) != 1 or len(self.variables) != 1:
raise NotImplementedError('partials and higher order derivatives')
z = list(self.free_symbols)[0]
def eval(x):
f0 = self.expr.subs(z, Expr._from_mpmath(x, prec=mpmath.mp.prec))
f0 = f0.evalf(mlib.libmpf.prec_to_dps(mpmath.mp.prec))
return f0._to_mpmath(mpmath.mp.prec)
return Expr._from_mpmath(mpmath.diff(eval,
z0._to_mpmath(mpmath.mp.prec)),
mpmath.mp.prec)
@property
def expr(self):
return self._args[0]
@property
def variables(self):
return self._args[1:]
@property
def free_symbols(self):
return self.expr.free_symbols
def _eval_subs(self, old, new):
if old in self.variables and not new.is_Symbol:
# Issue 1620
return Subs(self, old, new)
return Derivative(*map(lambda x: x._subs(old, new), self.args))
def _eval_lseries(self, x):
dx = self.args[1:]
for term in self.args[0].lseries(x):
yield Derivative(term, *dx)
def _eval_nseries(self, x, n, logx):
arg = self.args[0].nseries(x, n=n, logx=logx)
o = arg.getO()
dx = self.args[1:]
rv = [Derivative(a, *dx) for a in Add.make_args(arg.removeO())]
if o:
rv.append(o/x)
return Add(*rv)
def _eval_as_leading_term(self, x):
return self.args[0].as_leading_term(x)
class Lambda(Expr):
"""
Lambda(x, expr) represents a lambda function similar to Python's
'lambda x: expr'. A function of several variables is written as
Lambda((x, y, ...), expr).
A simple example:
>>> from sympy import Lambda
>>> from sympy.abc import x
>>> f = Lambda(x, x**2)
>>> f(4)
16
For multivariate functions, use:
>>> from sympy.abc import y, z, t
>>> f2 = Lambda((x, y, z, t), x + y**z + t**z)
>>> f2(1, 2, 3, 4)
73
A handy shortcut for lots of arguments:
>>> p = x, y, z
>>> f = Lambda(p, x + y*z)
>>> f(*p)
x + y*z
"""
is_Function = True
__slots__ = []
def __new__(cls, variables, expr):
try:
variables = Tuple(*variables)
except TypeError:
variables = Tuple(variables)
if len(variables) == 1 and variables[0] == expr:
return S.IdentityFunction
#use dummy variables internally, just to be sure
new_variables = [C.Dummy(arg.name) for arg in variables]
expr = sympify(expr).xreplace(dict(zip(variables, new_variables)))
obj = Expr.__new__(cls, Tuple(*new_variables), expr)
return obj
@property
def variables(self):
"""The variables used in the internal representation of the function"""
return self._args[0]
@property
def expr(self):
"""The return value of the function"""
return self._args[1]
@property
def free_symbols(self):
return self.expr.free_symbols - set(self.variables)
@property
def nargs(self):
"""The number of arguments that this function takes"""
return len(self._args[0])
def __call__(self, *args):
if len(args) != self.nargs:
from sympy.utilities.misc import filldedent
raise TypeError(filldedent('''
%s takes %d arguments (%d given)
''' % (self, self.nargs, len(args))))
return self.expr.xreplace(dict(zip(self.variables, args)))
def __eq__(self, other):
if not isinstance(other, Lambda):
return False
if self.nargs != other.nargs:
return False
selfexpr = self.args[1]
otherexpr = other.args[1]
otherexpr = otherexpr.xreplace(dict(zip(other.args[0], self.args[0])))
return selfexpr == otherexpr
def __ne__(self, other):
return not(self == other)
def __hash__(self):
return super(Lambda, self).__hash__()
def _hashable_content(self):
return (self.nargs, ) + tuple(sorted(self.free_symbols))
@property
def is_identity(self):
"""Return ``True`` if this ``Lambda`` is an identity function. """
if len(self.args) == 2:
return self.args[0] == self.args[1]
else:
return None
class Subs(Expr):
"""
Represents unevaluated substitutions of an expression.
``Subs(expr, x, x0)`` receives 3 arguments: an expression, a variable or
list of distinct variables and a point or list of evaluation points
corresponding to those variables.
``Subs`` objects are generally useful to represent unevaluated derivatives
calculated at a point.
The variables may be expressions, but they are subjected to the limitations
of subs(), so it is usually a good practice to use only symbols for
variables, since in that case there can be no ambiguity.
There's no automatic expansion - use the method .doit() to effect all
possible substitutions of the object and also of objects inside the
expression.
When evaluating derivatives at a point that is not a symbol, a Subs object
is returned. One is also able to calculate derivatives of Subs objects - in
this case the expression is always expanded (for the unevaluated form, use
Derivative()).
A simple example:
>>> from sympy import Subs, Function, sin
>>> from sympy.abc import x, y, z
>>> f = Function('f')
>>> e = Subs(f(x).diff(x), x, y)
>>> e.subs(y, 0)
Subs(Derivative(f(x), x), (x,), (0,))
>>> e.subs(f, sin).doit()
cos(y)
An example with several variables:
>>> Subs(f(x)*sin(y) + z, (x, y), (0, 1))
Subs(z + f(x)*sin(y), (x, y), (0, 1))
>>> _.doit()
z + f(0)*sin(1)
"""
def __new__(cls, expr, variables, point, **assumptions):
from sympy import Symbol
if not is_sequence(variables, Tuple):
variables = [variables]
variables = list(sympify(variables))
if uniq(variables) != variables:
repeated = [ v for v in set(variables)
if list(variables).count(v) > 1 ]
raise ValueError('cannot substitute expressions %s more than '
'once.' % repeated)
point = Tuple(*(point if is_sequence(point, Tuple) else [point]))
if len(point) != len(variables):
raise ValueError('Number of point values must be the same as '
'the number of variables.')
expr = sympify(expr)
# use symbols with names equal to the point value (with preppended _)
# to give a variable-independent expression
pre = "_"
pts = sorted(set(point), key=default_sort_key)
while 1:
s_pts = dict([(p, Symbol(pre + str(p))) for p in pts])
reps = [(v, s_pts[p])
for v, p in zip(variables, point)]
# if any underscore-preppended symbol is already a free symbol
# and is a variable with a different point value, then there
# is a clash, e.g. _0 clashes in Subs(_0 + _1, (_0, _1), (1, 0))
# because the new symbol that would be created is _1 but _1
# is already mapped to 0 so __0 and __1 are used for the new
# symbols
if any(r in expr.free_symbols and
r in variables and
Symbol(pre + str(point[variables.index(r)])) != r
for _, r in reps):
pre += "_"
continue
break
obj = Expr.__new__(cls, expr, Tuple(*variables), point)
obj._expr = expr.subs(reps)
return obj
def _eval_is_commutative(self):
return self.expr.is_commutative
def doit(self):
return self.expr.doit().subs(zip(self.variables, self.point))
def evalf(self, prec=None, **options):
if prec is None:
return self.doit().evalf(**options)
else:
return self.doit().evalf(prec, **options)
n = evalf
@property
def variables(self):
"""The variables to be evaluated"""
return self._args[1]
@property
def expr(self):
"""The expression on which the substitution operates"""
return self._args[0]
@property
def point(self):
"""The values for which the variables are to be substituted"""
return self._args[2]
@property
def free_symbols(self):
return (self.expr.free_symbols - set(self.variables) |
set(self.point.free_symbols))
def __eq__(self, other):
if not isinstance(other, Subs):
return False
return self._expr == other._expr
def __ne__(self, other):
return not(self == other)
def __hash__(self):
return super(Subs, self).__hash__()
def _hashable_content(self):
return (self._expr, )
def _eval_subs(self, old, new):
if old in self.variables:
pts = list(self.point.args)
pts[list(self.variables).index(old)] = new
return Subs(self.expr, self.variables, pts)
def _eval_derivative(self, s):
if s not in self.free_symbols:
return S.Zero
return Subs(self.expr.diff(s), self.variables, self.point).doit() \
+ Add(*[ Subs(point.diff(s) * self.expr.diff(arg),
self.variables, self.point).doit() for arg,
point in zip(self.variables, self.point) ])
def diff(f, *symbols, **kwargs):
"""
Differentiate f with respect to symbols.
This is just a wrapper to unify .diff() and the Derivative class; its
interface is similar to that of integrate(). You can use the same
shortcuts for multiple variables as with Derivative. For example,
diff(f(x), x, x, x) and diff(f(x), x, 3) both return the third derivative
of f(x).
You can pass evaluate=False to get an unevaluated Derivative class. Note
that if there are 0 symbols (such as diff(f(x), x, 0), then the result will
be the function (the zeroth derivative), even if evaluate=False.
Examples
========
>>> from sympy import sin, cos, Function, diff
>>> from sympy.abc import x, y
>>> f = Function('f')
>>> diff(sin(x), x)
cos(x)
>>> diff(f(x), x, x, x)
Derivative(f(x), x, x, x)
>>> diff(f(x), x, 3)
Derivative(f(x), x, x, x)
>>> diff(sin(x)*cos(y), x, 2, y, 2)
sin(x)*cos(y)
>>> type(diff(sin(x), x))
cos
>>> type(diff(sin(x), x, evaluate=False))
<class 'sympy.core.function.Derivative'>
>>> type(diff(sin(x), x, 0))
sin
>>> type(diff(sin(x), x, 0, evaluate=False))
sin
>>> diff(sin(x))
cos(x)
>>> diff(sin(x*y))
Traceback (most recent call last):
...
ValueError: specify differentiation variables to differentiate sin(x*y)
Note that ``diff(sin(x))`` syntax is meant only for convenience
in interactive sessions and should be avoided in library code.
References
==========
http://documents.wolfram.com/v5/Built-inFunctions/AlgebraicComputation/
Calculus/D.html
See Also
========
Derivative
"""
kwargs.setdefault('evaluate', True)
return Derivative(f, *symbols, **kwargs)
def expand(e, deep=True, modulus=None, power_base=True, power_exp=True, \
mul=True, log=True, multinomial=True, basic=True, **hints):
"""
Expand an expression using methods given as hints.
Hints evaluated unless explicitly set to False are: ``basic``, ``log``,
``multinomial``, ``mul``, ``power_base``, and ``power_exp`` The following
hints are supported but not applied unless set to True: ``complex``,
``func``, and ``trig``. In addition, the following meta-hints are
supported by some or all of the other hints: ``frac``, ``numer``,
``denom``, ``modulus``, and ``force``. ``deep`` is supported by all
hints. Additionally, subclasses of Expr may define their own hints or
meta-hints.
The ``basic`` hint is used for any special rewriting of an object that
should be done automatically (along with the other hints like ``mul``)
when expand is called. This is a catch-all hint to handle any sort of
expansion that may not be described by the existing hint names. To use
this hint an object should override the ``_eval_expand_basic`` method.
Objects may also define their own expand methods, which are not run by
default. See the API section below.
If ``deep`` is set to ``True`` (the default), things like arguments of
functions are recursively expanded. Use ``deep=False`` to only expand on
the top level.
If the ``force`` hint is used, assumptions about variables will be ignored
in making the expansion.
Hints
=====
These hints are run by default
mul
---
Distributes multiplication over addition:
>>> from sympy import cos, exp, sin
>>> from sympy.abc import x, y, z
>>> (y*(x + z)).expand(mul=True)
x*y + y*z
multinomial
-----------
Expand (x + y + ...)**n where n is a positive integer.
>>> ((x + y + z)**2).expand(multinomial=True)
x**2 + 2*x*y + 2*x*z + y**2 + 2*y*z + z**2
power_exp
---------
Expand addition in exponents into multiplied bases.
>>> exp(x + y).expand(power_exp=True)
exp(x)*exp(y)
>>> (2**(x + y)).expand(power_exp=True)
2**x*2**y
power_base
----------
Split powers of multiplied bases.
This only happens by default if assumptions allow, or if the
``force`` meta-hint is used:
>>> ((x*y)**z).expand(power_base=True)
(x*y)**z
>>> ((x*y)**z).expand(power_base=True, force=True)
x**z*y**z
>>> ((2*y)**z).expand(power_base=True)
2**z*y**z
Note that in some cases where this expansion always holds, SymPy performs
it automatically:
>>> (x*y)**2
x**2*y**2
log
---
Pull out power of an argument as a coefficient and split logs products
into sums of logs.
Note that these only work if the arguments of the log function have the
proper assumptions--the arguments must be positive and the exponents must
be real--or else the ``force`` hint must be True:
>>> from sympy import log, symbols, oo
>>> log(x**2*y).expand(log=True)
log(x**2*y)
>>> log(x**2*y).expand(log=True, force=True)
2*log(x) + log(y)
>>> x, y = symbols('x,y', positive=True)
>>> log(x**2*y).expand(log=True)
2*log(x) + log(y)
basic
-----
This hint is intended primarily as a way for custom subclasses to enable
expansion by default.
These hints are not run by default:
complex
-------
Split an expression into real and imaginary parts.
>>> x, y = symbols('x,y')
>>> (x + y).expand(complex=True)
re(x) + re(y) + I*im(x) + I*im(y)
>>> cos(x).expand(complex=True)
-I*sin(re(x))*sinh(im(x)) + cos(re(x))*cosh(im(x))
Note that this is just a wrapper around ``as_real_imag()``. Most objects
that wish to redefine ``_eval_expand_complex()`` should consider
redefining ``as_real_imag()`` instead.
func
----
Expand other functions.
>>> from sympy import gamma
>>> gamma(x + 1).expand(func=True)
x*gamma(x)
trig
----
Do trigonometric expansions.
>>> cos(x + y).expand(trig=True)
-sin(x)*sin(y) + cos(x)*cos(y)
>>> sin(2*x).expand(trig=True)
2*sin(x)*cos(x)
Note that the forms of ``sin(n*x)`` and ``cos(n*x)`` in terms of ``sin(x)``
and ``cos(x)`` are not unique, due to the identity `\sin^2(x) + \cos^2(x)
= 1`. The current implementation uses the form obtained from Chebyshev
polynomials, but this may change. See `this MathWorld article
<http://mathworld.wolfram.com/Multiple-AngleFormulas.html>`_ for more
information.
Notes
=====
- You can shut off unwanted methods::
>>> (exp(x + y)*(x + y)).expand()
x*exp(x)*exp(y) + y*exp(x)*exp(y)
>>> (exp(x + y)*(x + y)).expand(power_exp=False)
x*exp(x + y) + y*exp(x + y)
>>> (exp(x + y)*(x + y)).expand(mul=False)
(x + y)*exp(x)*exp(y)
- Use deep=False to only expand on the top level::
>>> exp(x + exp(x + y)).expand()
exp(x)*exp(exp(x)*exp(y))
>>> exp(x + exp(x + y)).expand(deep=False)
exp(x)*exp(exp(x + y))
- Hints are applied in an arbitrary, but consistent order (in the current
implementation, they are applied in alphabetical order, except
multinomial comes before mul, but this may change). Because of this,
some hints may prevent expansion by other hints if they are applied
first. For example, ``mul`` may distribute multiplications and prevent
``log`` and ``power_base`` from expanding them. Also, if ``mul`` is
applied before ``multinomial`, the expression might not be fully
distributed. The solution is to use the various ``expand_hint`` helper
functions or to use ``hint=False`` to this function to finely control
which hints are applied. Here are some examples::
>>> from sympy import expand_log, expand, expand_mul, expand_power_base
>>> x, y, z = symbols('x,y,z', positive=True)
>>> expand(log(x*(y + z)))
log(x) + log(y + z)
Here, we see that ``log`` was applied before ``mul``. To get the log
expanded form, either of the following will work::
>>> expand_log(log(x*(y + z)))
log(x) + log(y + z)
>>> expand(log(x*(y + z)), mul=False)
log(x) + log(y + z)
A similar thing can happen with the ``power_base`` hint::
>>> expand((x*(y + z))**x)
(x*y + x*z)**x
To get the ``power_base`` expanded form, either of the following will
work::
>>> expand((x*(y + z))**x, mul=False)
x**x*(y + z)**x
>>> expand_power_base((x*(y + z))**x)
x**x*(y + z)**x
>>> expand((x + y)*y/x)
y + y**2/x
The parts of a rational expression can be targeted::
>>> expand((x + y)*y/x/(x + 1), frac=True)
(x*y + y**2)/(x**2 + x)
>>> expand((x + y)*y/x/(x + 1), numer=True)
(x*y + y**2)/(x*(x + 1))
>>> expand((x + y)*y/x/(x + 1), denom=True)
y*(x + y)/(x**2 + x)
- The ``modulus`` meta-hint can be used to reduce the coefficients of an
expression post-expansion::
>>> expand((3*x + 1)**2)
9*x**2 + 6*x + 1
>>> expand((3*x + 1)**2, modulus=5)
4*x**2 + x + 1
- Either ``expand()`` the function or ``.expand()`` the method can be
used. Both are equivalent::
>>> expand((x + 1)**2)
x**2 + 2*x + 1
>>> ((x + 1)**2).expand()
x**2 + 2*x + 1
API
===
Objects can define their own expand hints by defining
``_eval_expand_hint()``. The function should take the form::
def _eval_expand_hint(self, **hints):
# Only apply the method to the top-level expression
...
See also the example below. Objects should define ``_eval_expand_hint()``
methods only if ``hint`` applies to that specific object. The generic
``_eval_expand_hint()`` method defined in Expr will handle the no-op case.
Each hint should be responsible for expanding that hint only.
Furthermore, the expansion should be applied to the top-level expression
only. ``expand()`` takes care of the recursion that happens when
``deep=True``.
You should only call ``_eval_expand_hint()`` methods directly if you are
100% sure that the object has the method, as otherwise you are liable to
get unexpected ``AttributeError``s. Note, again, that you do not need to
recursively apply the hint to args of your object: this is handled
automatically by ``expand()``. ``_eval_expand_hint()`` should
generally not be used at all outside of an ``_eval_expand_hint()`` method.
If you want to apply a specific expansion from within another method, use
the public ``expand()`` function, method, or ``expand_hint()`` functions.
In order for expand to work, objects must be rebuildable by their args,
i.e., ``obj.func(*obj.args) == obj`` must hold.
Expand methods are passed ``**hints`` so that expand hints may use
'metahints'--hints that control how different expand methods are applied.
For example, the ``force=True`` hint described above that causes
``expand(log=True)`` to ignore assumptions is such a metahint. The
``deep`` meta-hint is handled exclusively by ``expand()`` and is not
passed to ``_eval_expand_hint()`` methods.
Note that expansion hints should generally be methods that perform some
kind of 'expansion'. For hints that simply rewrite an expression, use the
.rewrite() API.
Example
-------
>>> from sympy import Expr, sympify
>>> class MyClass(Expr):
... def __new__(cls, *args):
... args = sympify(args)
... return Expr.__new__(cls, *args)
...
... def _eval_expand_double(self, **hints):
... '''
... Doubles the args of MyClass.
...
... If there more than four args, doubling is not performed,
... unless force=True is also used (False by default).
... '''
... force = hints.pop('force', False)
... if not force and len(self.args) > 4:
... return self
... return self.func(*(self.args + self.args))
...
>>> a = MyClass(1, 2, MyClass(3, 4))
>>> a
MyClass(1, 2, MyClass(3, 4))
>>> a.expand(double=True)
MyClass(1, 2, MyClass(3, 4, 3, 4), 1, 2, MyClass(3, 4, 3, 4))
>>> a.expand(double=True, deep=False)
MyClass(1, 2, MyClass(3, 4), 1, 2, MyClass(3, 4))
>>> b = MyClass(1, 2, 3, 4, 5)
>>> b.expand(double=True)
MyClass(1, 2, 3, 4, 5)
>>> b.expand(double=True, force=True)
MyClass(1, 2, 3, 4, 5, 1, 2, 3, 4, 5)
See Also
========
expand_log, expand_mul, expand_multinomial, expand_complex, expand_trig,
expand_power_base, expand_power_exp, expand_func, hyperexpand
"""
# don't modify this; modify the Expr.expand method
hints['power_base'] = power_base
hints['power_exp'] = power_exp
hints['mul'] = mul
hints['log'] = log
hints['multinomial'] = multinomial
hints['basic'] = basic
return sympify(e).expand(deep=deep, modulus=modulus, **hints)
# These are simple wrappers around single hints.
def expand_mul(expr, deep=True):
"""
Wrapper around expand that only uses the mul hint. See the expand
docstring for more information.
Examples
========
>>> from sympy import symbols, expand_mul, exp, log
>>> x, y = symbols('x,y', positive=True)
>>> expand_mul(exp(x+y)*(x+y)*log(x*y**2))
x*exp(x + y)*log(x*y**2) + y*exp(x + y)*log(x*y**2)
"""
return sympify(expr).expand(deep=deep, mul=True, power_exp=False,\
power_base=False, basic=False, multinomial=False, log=False)
def expand_multinomial(expr, deep=True):
"""
Wrapper around expand that only uses the multinomial hint. See the expand
docstring for more information.
Examples
========
>>> from sympy import symbols, expand_multinomial, exp
>>> x, y = symbols('x y', positive=True)
>>> expand_multinomial((x + exp(x + 1))**2)
x**2 + 2*x*exp(x + 1) + exp(2*x + 2)
"""
return sympify(expr).expand(deep=deep, mul=False, power_exp=False,\
power_base=False, basic=False, multinomial=True, log=False)
def expand_log(expr, deep=True, force=False):
"""
Wrapper around expand that only uses the log hint. See the expand
docstring for more information.
Examples
========
>>> from sympy import symbols, expand_log, exp, log
>>> x, y = symbols('x,y', positive=True)
>>> expand_log(exp(x+y)*(x+y)*log(x*y**2))
(x + y)*(log(x) + 2*log(y))*exp(x + y)
"""
return sympify(expr).expand(deep=deep, log=True, mul=False,
power_exp=False, power_base=False, multinomial=False,
basic=False, force=force)
def expand_func(expr, deep=True):
"""
Wrapper around expand that only uses the func hint. See the expand
docstring for more information.
Examples
========
>>> from sympy import expand_func, gamma
>>> from sympy.abc import x
>>> expand_func(gamma(x + 2))
x*(x + 1)*gamma(x)
"""
return sympify(expr).expand(deep=deep, func=True, basic=False,\
log=False, mul=False, power_exp=False, power_base=False, multinomial=False)
def expand_trig(expr, deep=True):
"""
Wrapper around expand that only uses the trig hint. See the expand
docstring for more information.
Examples
========
>>> from sympy import expand_trig, sin, cos
>>> from sympy.abc import x, y
>>> expand_trig(sin(x+y)*(x+y))
(x + y)*(sin(x)*cos(y) + sin(y)*cos(x))
"""
return sympify(expr).expand(deep=deep, trig=True, basic=False,\
log=False, mul=False, power_exp=False, power_base=False, multinomial=False)
def expand_complex(expr, deep=True):
"""
Wrapper around expand that only uses the complex hint. See the expand
docstring for more information.
Examples
========
>>> from sympy import expand_complex, exp, sqrt, I
>>> from sympy.abc import z
>>> expand_complex(exp(z))
I*exp(re(z))*sin(im(z)) + exp(re(z))*cos(im(z))
>>> expand_complex(sqrt(I))
sqrt(2)/2 + sqrt(2)*I/2
See Also
========
Expr.as_real_imag
"""
return sympify(expr).expand(deep=deep, complex=True, basic=False,\
log=False, mul=False, power_exp=False, power_base=False, multinomial=False)
def expand_power_base(expr, deep=True, force=False):
"""
Wrapper around expand that only uses the power_base hint.
See the expand docstring for more information.
A wrapper to expand(power_base=True) which separates a power with a base
that is a Mul into a product of powers, without performing any other
expansions, provided that assumptions about the power's base and exponent
allow.
deep=False (default is True) will only apply to the top-level expression.
force=True (default is False) will cause the expansion to ignore
assumptions about the base and exponent. When False, the expansion will
only happen if the base is non-negative or the exponent is an integer.
>>> from sympy.abc import x, y, z
>>> from sympy import expand_power_base, sin, cos, exp
>>> (x*y)**2
x**2*y**2
>>> (2*x)**y
(2*x)**y
>>> expand_power_base(_)
2**y*x**y
>>> expand_power_base((x*y)**z)
(x*y)**z
>>> expand_power_base((x*y)**z, force=True)
x**z*y**z
>>> expand_power_base(sin((x*y)**z), deep=False)
sin((x*y)**z)
>>> expand_power_base(sin((x*y)**z), force=True)
sin(x**z*y**z)
>>> expand_power_base((2*sin(x))**y + (2*cos(x))**y)
2**y*sin(x)**y + 2**y*cos(x)**y
>>> expand_power_base((2*exp(y))**x)
2**x*exp(y)**x
>>> expand_power_base((2*cos(x))**y)
2**y*cos(x)**y
Notice that sums are left untouched. If this is not the desired behavior,
apply full ``expand()`` to the expression:
>>> expand_power_base(((x+y)*z)**2)
z**2*(x + y)**2
>>> (((x+y)*z)**2).expand()
x**2*z**2 + 2*x*y*z**2 + y**2*z**2
>>> expand_power_base((2*y)**(1+z))
2**(z + 1)*y**(z + 1)
>>> ((2*y)**(1+z)).expand()
2*2**z*y*y**z
"""
return sympify(expr).expand(deep=deep, log=False, mul=False,
power_exp=False, power_base=True, multinomial=False,
basic=False, force=force)
def expand_power_exp(expr, deep=True):
"""
Wrapper around expand that only uses the power_exp hint.
See the expand docstring for more information.
Examples
========
>>> from sympy import expand_power_exp
>>> from sympy.abc import x, y
>>> expand_power_exp(x**(y + 2))
x**2*x**y
"""
return sympify(expr).expand(deep=deep, complex=False, basic=False,\
log=False, mul=False, power_exp=True, power_base=False, multinomial=False)
def count_ops(expr, visual=False):
"""
Return a representation (integer or expression) of the operations in expr.
If ``visual`` is ``False`` (default) then the sum of the coefficients of the
visual expression will be returned.
If ``visual`` is ``True`` then the number of each type of operation is shown
with the core class types (or their virtual equivalent) multiplied by the
number of times they occur.
If expr is an iterable, the sum of the op counts of the
items will be returned.
Examples
========
>>> from sympy.abc import a, b, x, y
>>> from sympy import sin, count_ops
Although there isn't a SUB object, minus signs are interpreted as
either negations or subtractions:
>>> (x - y).count_ops(visual=True)
SUB
>>> (-x).count_ops(visual=True)
NEG
Here, there are two Adds and a Pow:
>>> (1 + a + b**2).count_ops(visual=True)
2*ADD + POW
In the following, an Add, Mul, Pow and two functions:
>>> (sin(x)*x + sin(x)**2).count_ops(visual=True)
ADD + MUL + POW + 2*SIN
for a total of 5:
>>> (sin(x)*x + sin(x)**2).count_ops(visual=False)
5
Note that "what you type" is not always what you get. The expression
1/x/y is translated by sympy into 1/(x*y) so it gives a DIV and MUL rather
than two DIVs:
>>> (1/x/y).count_ops(visual=True)
DIV + MUL
The visual option can be used to demonstrate the difference in
operations for expressions in different forms. Here, the Horner
representation is compared with the expanded form of a polynomial:
>>> eq=x*(1 + x*(2 + x*(3 + x)))
>>> count_ops(eq.expand(), visual=True) - count_ops(eq, visual=True)
-MUL + 3*POW
The count_ops function also handles iterables:
>>> count_ops([x, sin(x), None, True, x + 2], visual=False)
2
>>> count_ops([x, sin(x), None, True, x + 2], visual=True)
ADD + SIN
>>> count_ops({x: sin(x), x + 2: y + 1}, visual=True)
2*ADD + SIN
"""
from sympy.simplify.simplify import fraction
expr = sympify(expr)
if isinstance(expr, Expr):
ops = []
args = [expr]
NEG = C.Symbol('NEG')
DIV = C.Symbol('DIV')
SUB = C.Symbol('SUB')
ADD = C.Symbol('ADD')
while args:
a = args.pop()
if a.is_Rational:
#-1/3 = NEG + DIV
if a is not S.One:
if a.p < 0:
ops.append(NEG)
if a.q != 1:
ops.append(DIV)
continue
elif a.is_Mul:
if _coeff_isneg(a):
ops.append(NEG)
if a.args[0] is S.NegativeOne:
a = a.as_two_terms()[1]
else:
a = -a
n, d = fraction(a)
if n.is_Integer:
ops.append(DIV)
if n < 0:
ops.append(NEG)
args.append(d)
continue # won't be -Mul but could be Add
elif d is not S.One:
if not d.is_Integer:
args.append(d)
ops.append(DIV)
args.append(n)
continue # could be -Mul
elif a.is_Add:
aargs = list(a.args)
negs = 0
for i, ai in enumerate(aargs):
if _coeff_isneg(ai):
negs += 1
args.append(-ai)
if i > 0:
ops.append(SUB)
else:
args.append(ai)
if i > 0:
ops.append(ADD)
if negs == len(aargs): # -x - y = NEG + SUB
ops.append(NEG)
elif _coeff_isneg(aargs[0]): # -x + y = SUB, but already recorded ADD
ops.append(SUB - ADD)
continue
if a.is_Pow and a.exp is S.NegativeOne:
ops.append(DIV)
args.append(a.base) # won't be -Mul but could be Add
continue
if (a.is_Mul or
a.is_Pow or
a.is_Function or
isinstance(a, Derivative) or
isinstance(a, C.Integral)):
o = C.Symbol(a.func.__name__.upper())
# count the args
if (a.is_Mul or isinstance(a, C.LatticeOp)):
ops.append(o*(len(a.args) - 1))
else:
ops.append(o)
if not a.is_Symbol:
args.extend(a.args)
elif type(expr) is dict:
ops = [count_ops(k, visual=visual) +
count_ops(v, visual=visual) for k, v in expr.iteritems()]
elif iterable(expr):
ops = [count_ops(i, visual=visual) for i in expr]
elif not isinstance(expr, Basic):
ops = []
else: # it's Basic not isinstance(expr, Expr):
assert isinstance(expr, Basic)
ops = [count_ops(a, visual=visual) for a in expr.args]
if not ops:
if visual:
return S.Zero
return 0
ops = Add(*ops)
if visual:
return ops
if ops.is_Number:
return int(ops)
return sum(int((a.args or [1])[0]) for a in Add.make_args(ops))
def nfloat(expr, n=15, exponent=False):
"""Make all Rationals in expr Floats except those in exponents
(unless the exponents flag is set to True).
Examples
========
>>> from sympy.core.function import nfloat
>>> from sympy.abc import x, y
>>> from sympy import cos, pi, S, sqrt
>>> nfloat(x**4 + x/2 + cos(pi/3) + 1 + sqrt(y))
x**4 + 0.5*x + sqrt(y) + 1.5
>>> nfloat(x**4 + sqrt(y), exponent=True)
x**4.0 + y**0.5
"""
from sympy.core import Pow
if iterable(expr, exclude=basestring):
if isinstance(expr, (dict, Dict)):
return type(expr)([(k, nfloat(v, n, exponent)) for k, v in
expr.iteritems()])
return type(expr)([nfloat(a, n, exponent) for a in expr])
elif not isinstance(expr, Expr):
return Float(expr, '')
elif expr.is_Float:
return expr.n(n)
elif expr.is_Integer:
return Float(float(expr)).n(n)
elif expr.is_Rational:
return Float(expr).n(n)
if not exponent:
bases = {}
expos = {}
reps = {}
for p in expr.atoms(Pow):
b, e = p.as_base_exp()
b = bases.setdefault(p.base, nfloat(p.base, n, exponent))
e = expos.setdefault(e, Dummy())
reps[p] = Pow(b, e, evaluate=False)
rv = expr.xreplace(dict(reps)).n(n).xreplace(
dict([(v, k) for k, v in expos.iteritems()]))
else:
intex = lambda x: x.is_Pow and x.exp.is_Integer
floex = lambda x: Pow(x.base, Float(x.exp, ''), evaluate=False)
rv = expr.n(n).replace(intex, floex)
funcs = [f for f in rv.atoms(Function)]
funcs.sort(key=count_ops)
funcs.reverse()
return rv.subs([(f, f.func(*[nfloat(a, n, exponent)
for a in f.args])) for f in funcs])
from sympy.core.symbol import Dummy
| srjoglekar246/sympy | sympy/core/function.py | Python | bsd-3-clause | 72,630 |
# -*- coding: utf-8 -*-
__author__ = 'puras'
from django.conf.urls import patterns, include, url
from django.views.static import serve
from django.conf import settings
from bbs.views import index
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'moobo.views.home', name='home'),
# url(r'^blog/', include('blog.urls')),
url(r'^$', index, name='bbs'),
url(r'^bbs/', include('bbs.urls', namespace='bbs')),
url(r'^upload/', include('editor.urls', namespace='editor')),
url(r'^media/(?P<path>.*)$', serve, {
'document_root': settings.MEDIA_ROOT
}),
# url(r'^blog/$', include(blog.urls, namespace='blog')),
url(r'^admin/', include(admin.site.urls)),
)
| puras/moobo | moobo/urls.py | Python | mit | 758 |
import logging
import sys
from PIL import Image
from compat import BytesIO
from django.contrib.admin.options import BaseModelAdmin
from django.core.files.base import ContentFile
from django.core.files.uploadedfile import InMemoryUploadedFile
from spacelaunchnow import config
def get_launch_status(status):
switcher = {
0: 'Unknown',
1: 'Go for Launch',
2: 'Launch is NO-GO',
3: 'Successful Launch',
4: 'Launch Failed',
5: 'Unplanned Hold',
6: 'In Flight',
7: 'Partial Failure',
}
return switcher.get(status, "Unknown")
def get_agency_type(agency_type):
switcher = {
0: 'Unknown',
1: 'Government',
2: 'Multinational',
3: 'Commercial',
4: 'Educational',
5: 'Private',
6: 'Unknown',
}
return switcher.get(agency_type, "Unknown")
def get_mission_type(mission_type):
switcher = {
0: 'Unknown',
1: 'Earth Science',
2: 'Planetary Science',
3: 'Astrophysics',
4: 'Heliophysics',
5: 'Human Exploration',
6: 'Robotic Exploration',
7: 'Government/Top Secret',
8: 'Tourism',
9: 'Unknown',
10: 'Communications',
11: 'Resupply',
12: 'Suborbital',
13: 'Test Flight',
14: 'Dedicated Rideshare',
15: 'Navigation',
}
return switcher.get(mission_type, "Unknown")
def resize_needed(item):
if item and hasattr(item, 'url'):
try:
image = Image.open(item)
if image.size[0] > 1920:
return True
except:
return False
return False
def resize_for_upload(item):
if item and hasattr(item, 'url'):
try:
basewidth = 1920
image = Image.open(item)
if image.size[0] <= 1920:
return item
wpercent = (basewidth / float(image.size[0]))
hsize = int((float(image.size[1]) * float(wpercent)))
output = BytesIO()
image = image.resize((basewidth, hsize), Image.ANTIALIAS)
if image.format == 'PNG' or image.mode == 'RGBA' or 'png' in item.name:
imageformat = 'PNG'
else:
imageformat = 'JPEG'
image.save(output, format=imageformat, optimize=True)
output.seek(0)
return InMemoryUploadedFile(output, 'FileField',
("%s." + imageformat.lower()) % item.name.split('.')[0],
'image/' + imageformat.lower(),
sys.getsizeof(output), None)
except:
return item
else:
return item
def admin_change_url(obj):
app_label = obj._meta.app_label
model_name = obj._meta.model.__name__.lower()
return reverse('admin:{}_{}_change'.format(
app_label, model_name
), args=(obj.pk,))
def admin_link(attr, short_description, empty_description="-"):
"""Decorator used for rendering a link to a related model in
the admin detail page.
attr (str):
Name of the related field.
short_description (str):
Name if the field.
empty_description (str):
Value to display if the related field is None.
The wrapped method receives the related object and should
return the link text.
Usage:
@admin_link('credit_card', _('Credit Card'))
def credit_card_link(self, credit_card):
return credit_card.name
"""
def wrap(func):
def field_func(self, obj):
related_obj = getattr(obj, attr)
if related_obj is None:
return empty_description
url = admin_change_url(related_obj)
return format_html(
'<a href="{}">{}</a>',
url,
func(self, related_obj)
)
field_func.short_description = short_description
field_func.allow_tags = True
return field_func
return wrap
def get_map_url(location):
import requests
logger = logging.getLogger('django')
# Enter your api key here
api_key = config.GOOGLE_API_KEY
# url variable store url
url = "https://maps.googleapis.com/maps/api/staticmap?"
# center defines the center of the map,
# equidistant from all edges of the map.
center = location.name
# zoom defines the zoom
# level of the map
zoom = 8
# get method of requests module
# return response object
full_url = (url + "center=" + center + "&zoom=" +
str(zoom) + "&maptype=hybrid&size= 600x400&scale=2&key=" +
api_key)
logger.info(full_url)
image_content = ContentFile(requests.get(full_url).content)
location.map_image.save("temp.jpg", image_content)
logger.info(location.map_image.url)
def get_pad_url(pad):
import requests
logger = logging.getLogger('django')
# Enter your api key here
api_key = config.GOOGLE_API_KEY
# url variable store url
url = "https://maps.googleapis.com/maps/api/staticmap?"
# center defines the center of the map,
# equidistant from all edges of the map.
center = "{0},{1}".format(pad.latitude, pad.longitude)
# zoom defines the zoom
# level of the map
zoom = 12
# get method of requests module
# return response object
full_url = (url + "center=" + center + "&zoom=" +
str(zoom) + "&maptype=hybrid&size= 600x400&scale=2" +
"&markers=color:blue|label:P|" + center + "&key=" +
api_key)
logger.info(full_url)
image_content = ContentFile(requests.get(full_url).content)
pad.map_image.save("temp.jpg", image_content)
logger.info(pad.map_image.url)
class AdminBaseWithSelectRelated(BaseModelAdmin):
"""
Admin Base using list_select_related for get_queryset related fields
"""
list_select_related = []
def get_queryset(self, request):
return super(AdminBaseWithSelectRelated, self).get_queryset(request).select_related(*self.list_select_related)
def form_apply_select_related(self, form):
for related_field in self.list_select_related:
splitted = related_field.split('__')
if len(splitted) > 1:
field = splitted[0]
related = '__'.join(splitted[1:])
form.base_fields[field].queryset = form.base_fields[field].queryset.select_related(related)
| ItsCalebJones/SpaceLaunchNow-Server | api/utils/utilities.py | Python | apache-2.0 | 6,505 |
import json
import requests
class Building():
"""Building Client."""
# Service Setup
config = {
'schema': 'http',
'host': 'localhost',
'port': '9202',
'endpoint': 'api/v1/buildings'
}
@classmethod
def base_url(cls):
"""Form the base url for the service."""
return "{schema}://{host}:{port}/{endpoint}".format(**cls.config)
@classmethod
def configure(cls, options={}):
cls.config.update(options)
@classmethod
def get_all(cls):
"""Return all buildings."""
r = requests.get(cls.base_url())
if r.status_code == 200:
return r.json()
else:
return None
@classmethod
def get(cls, code):
"""Return an building."""
r = requests.get(cls.base_url() + '/' + code)
if r.status_code == 200:
return r.json()
else:
return None
@classmethod
def create(cls, attrs):
"""Create an building with the attributes passed in attrs dict."""
r = requests.post(cls.base_url(), data=json.dumps(attrs))
if r.status_code == 200:
return r.json()
else:
return None
@classmethod
def update(cls, code, attrs):
"""Update the building identified by code with attrs dict."""
r = requests.put(cls.base_url() + '/' + code, data=json.dumps(attrs))
if r.status_code == 200:
return r.json()
else:
return None
@classmethod
def delete(cls, code):
"""Delete the building identified by code."""
r = requests.delete(cls.base_url() + '/' + code)
return r.status_code == 204
@classmethod
def delete_all(cls):
"""Delete all buildings."""
r = requests.delete(cls.base_url())
return r.status_code == 204
@classmethod
def bulk_load(cls, json_string):
"""Bulk loads an array of buildings."""
h = {
'Content-Type': 'application/json'
}
return requests.post(cls.base_url(), data=json_string, headers=h)
| Foris/darwined-core-python-clients | darwined_core_python_clients/physical/buildings.py | Python | mit | 2,117 |
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from unittest import mock
from openstack import exceptions
from heat.common import template_format
from heat.engine.clients.os import senlin
from heat.engine.resources.openstack.senlin import receiver as sr
from heat.engine import scheduler
from heat.tests import common
from heat.tests import utils
receiver_stack_template = """
heat_template_version: 2016-04-08
description: Senlin Receiver Template
resources:
senlin-receiver:
type: OS::Senlin::Receiver
properties:
name: SenlinReceiver
cluster: fake_cluster
action: CLUSTER_SCALE_OUT
type: webhook
params:
foo: bar
"""
class FakeReceiver(object):
def __init__(self, id='some_id'):
self.id = id
self.name = "SenlinReceiver"
self.cluster_id = "fake_cluster"
self.action = "CLUSTER_SCALE_OUT"
self.channel = {'alarm_url': "http://foo.bar/webhooks/fake_url"}
def to_dict(self):
return {
'id': self.id,
'name': self.name,
'cluster_id': self.cluster_id,
'action': self.action,
'channel': self.channel,
'actor': {'trust_id': ['fake_trust_id']}
}
class SenlinReceiverTest(common.HeatTestCase):
def setUp(self):
super(SenlinReceiverTest, self).setUp()
self.senlin_mock = mock.MagicMock()
self.patchobject(sr.Receiver, 'client',
return_value=self.senlin_mock)
self.patchobject(senlin.ClusterConstraint, 'validate',
return_value=True)
self.fake_r = FakeReceiver()
self.t = template_format.parse(receiver_stack_template)
def _init_recv(self, template):
self.stack = utils.parse_stack(template)
recv = self.stack['senlin-receiver']
return recv
def _create_recv(self, template):
recv = self._init_recv(template)
self.senlin_mock.create_receiver.return_value = self.fake_r
self.senlin_mock.get_receiver.return_value = self.fake_r
scheduler.TaskRunner(recv.create)()
self.assertEqual((recv.CREATE, recv.COMPLETE),
recv.state)
self.assertEqual(self.fake_r.id, recv.resource_id)
return recv
def test_recv_create_success(self):
self._create_recv(self.t)
expect_kwargs = {
'name': 'SenlinReceiver',
'cluster_id': 'fake_cluster',
'action': 'CLUSTER_SCALE_OUT',
'type': 'webhook',
'params': {'foo': 'bar'},
}
self.senlin_mock.create_receiver.assert_called_once_with(
**expect_kwargs)
def test_recv_delete_success(self):
self.senlin_mock.delete_receiver.return_value = None
recv = self._create_recv(self.t)
scheduler.TaskRunner(recv.delete)()
self.senlin_mock.delete_receiver.assert_called_once_with(
recv.resource_id)
def test_recv_delete_not_found(self):
self.senlin_mock.delete_receiver.side_effect = [
exceptions.ResourceNotFound(http_status=404)
]
recv = self._create_recv(self.t)
scheduler.TaskRunner(recv.delete)()
self.senlin_mock.delete_receiver.assert_called_once_with(
recv.resource_id)
def test_cluster_resolve_attribute(self):
excepted_show = {
'id': 'some_id',
'name': 'SenlinReceiver',
'cluster_id': 'fake_cluster',
'action': 'CLUSTER_SCALE_OUT',
'channel': {'alarm_url': "http://foo.bar/webhooks/fake_url"},
'actor': {'trust_id': ['fake_trust_id']}
}
recv = self._create_recv(self.t)
self.assertEqual(self.fake_r.channel,
recv._resolve_attribute('channel'))
self.assertEqual(excepted_show,
recv._show_resource())
| openstack/heat | heat/tests/openstack/senlin/test_receiver.py | Python | apache-2.0 | 4,412 |
p=['h','e','l','l','o']
q=p
q[0]='y'
p=[0,0]
print p+q | robertstepp/Clara-Oswin-Oswald | test.py | Python | apache-2.0 | 54 |
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Ticket'
db.create_table('articletrack_ticket', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('started_at', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
('finished_at', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True)),
('author', self.gf('django.db.models.fields.related.ForeignKey')(related_name='tickets', to=orm['auth.User'])),
('title', self.gf('django.db.models.fields.CharField')(max_length=256)),
('message', self.gf('django.db.models.fields.TextField')()),
('article', self.gf('django.db.models.fields.related.ForeignKey')(related_name='articles', to=orm['articletrack.Article'])),
))
db.send_create_signal('articletrack', ['Ticket'])
# Adding model 'Comment'
db.create_table('articletrack_comment', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('date', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
('author', self.gf('django.db.models.fields.related.ForeignKey')(related_name='comments_author', to=orm['auth.User'])),
('ticket', self.gf('django.db.models.fields.related.ForeignKey')(related_name='comments', to=orm['articletrack.Ticket'])),
('message', self.gf('django.db.models.fields.TextField')()),
))
db.send_create_signal('articletrack', ['Comment'])
# Adding model 'Article'
db.create_table('articletrack_article', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('article_title', self.gf('django.db.models.fields.CharField')(max_length=512)),
('articlepkg_ref', self.gf('django.db.models.fields.CharField')(max_length=32)),
('journal_title', self.gf('django.db.models.fields.CharField')(max_length=256)),
('issue_label', self.gf('django.db.models.fields.CharField')(max_length=64)),
('pissn', self.gf('django.db.models.fields.CharField')(default='', max_length=9)),
('eissn', self.gf('django.db.models.fields.CharField')(default='', max_length=9)),
))
db.send_create_signal('articletrack', ['Article'])
# Adding M2M table for field journals on 'Article'
m2m_table_name = db.shorten_name('articletrack_article_journals')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('article', models.ForeignKey(orm['articletrack.article'], null=False)),
('journal', models.ForeignKey(orm['journalmanager.journal'], null=False))
))
db.create_unique(m2m_table_name, ['article_id', 'journal_id'])
# Deleting field 'Checkin.issue_label'
db.delete_column('articletrack_checkin', 'issue_label')
# Deleting field 'Checkin.journal_title'
db.delete_column('articletrack_checkin', 'journal_title')
# Deleting field 'Checkin.pissn'
db.delete_column('articletrack_checkin', 'pissn')
# Deleting field 'Checkin.eissn'
db.delete_column('articletrack_checkin', 'eissn')
# Deleting field 'Checkin.article_title'
db.delete_column('articletrack_checkin', 'article_title')
# Deleting field 'Checkin.articlepkg_ref'
db.delete_column('articletrack_checkin', 'articlepkg_ref')
# Adding field 'Checkin.article'
db.add_column('articletrack_checkin', 'article',
self.gf('django.db.models.fields.related.ForeignKey')(related_name='checkins', null=True, to=orm['articletrack.Article']),
keep_default=False)
# Removing M2M table for field journals on 'Checkin'
db.delete_table(db.shorten_name('articletrack_checkin_journals'))
def backwards(self, orm):
# Deleting model 'Ticket'
db.delete_table('articletrack_ticket')
# Deleting model 'Comment'
db.delete_table('articletrack_comment')
# Deleting model 'Article'
db.delete_table('articletrack_article')
# Removing M2M table for field journals on 'Article'
db.delete_table(db.shorten_name('articletrack_article_journals'))
# User chose to not deal with backwards NULL issues for 'Checkin.issue_label'
raise RuntimeError("Cannot reverse this migration. 'Checkin.issue_label' and its values cannot be restored.")
# The following code is provided here to aid in writing a correct migration # Adding field 'Checkin.issue_label'
db.add_column('articletrack_checkin', 'issue_label',
self.gf('django.db.models.fields.CharField')(max_length=64),
keep_default=False)
# User chose to not deal with backwards NULL issues for 'Checkin.journal_title'
raise RuntimeError("Cannot reverse this migration. 'Checkin.journal_title' and its values cannot be restored.")
# The following code is provided here to aid in writing a correct migration # Adding field 'Checkin.journal_title'
db.add_column('articletrack_checkin', 'journal_title',
self.gf('django.db.models.fields.CharField')(max_length=256),
keep_default=False)
# Adding field 'Checkin.pissn'
db.add_column('articletrack_checkin', 'pissn',
self.gf('django.db.models.fields.CharField')(default='', max_length=9),
keep_default=False)
# Adding field 'Checkin.eissn'
db.add_column('articletrack_checkin', 'eissn',
self.gf('django.db.models.fields.CharField')(default='', max_length=9),
keep_default=False)
# User chose to not deal with backwards NULL issues for 'Checkin.article_title'
raise RuntimeError("Cannot reverse this migration. 'Checkin.article_title' and its values cannot be restored.")
# The following code is provided here to aid in writing a correct migration # Adding field 'Checkin.article_title'
db.add_column('articletrack_checkin', 'article_title',
self.gf('django.db.models.fields.CharField')(max_length=512),
keep_default=False)
# User chose to not deal with backwards NULL issues for 'Checkin.articlepkg_ref'
raise RuntimeError("Cannot reverse this migration. 'Checkin.articlepkg_ref' and its values cannot be restored.")
# The following code is provided here to aid in writing a correct migration # Adding field 'Checkin.articlepkg_ref'
db.add_column('articletrack_checkin', 'articlepkg_ref',
self.gf('django.db.models.fields.CharField')(max_length=32),
keep_default=False)
# Deleting field 'Checkin.article'
db.delete_column('articletrack_checkin', 'article_id')
# Adding M2M table for field journals on 'Checkin'
m2m_table_name = db.shorten_name('articletrack_checkin_journals')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('checkin', models.ForeignKey(orm['articletrack.checkin'], null=False)),
('journal', models.ForeignKey(orm['journalmanager.journal'], null=False))
))
db.create_unique(m2m_table_name, ['checkin_id', 'journal_id'])
models = {
'articletrack.article': {
'Meta': {'object_name': 'Article'},
'article_title': ('django.db.models.fields.CharField', [], {'max_length': '512'}),
'articlepkg_ref': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'eissn': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '9'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'issue_label': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'journal_title': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'journals': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'articles'", 'null': 'True', 'to': "orm['journalmanager.Journal']"}),
'pissn': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '9'})
},
'articletrack.checkin': {
'Meta': {'ordering': "['-created_at']", 'object_name': 'Checkin'},
'article': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'checkins'", 'null': 'True', 'to': "orm['articletrack.Article']"}),
'attempt_ref': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'package_name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'uploaded_at': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
'articletrack.comment': {
'Meta': {'ordering': "['-date']", 'object_name': 'Comment'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'comments_author'", 'to': "orm['auth.User']"}),
'date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'ticket': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'comments'", 'to': "orm['articletrack.Ticket']"})
},
'articletrack.notice': {
'Meta': {'ordering': "['-created_at']", 'object_name': 'Notice'},
'checkin': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['articletrack.Checkin']"}),
'checkpoint': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.CharField', [], {'max_length': '512'}),
'stage': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '64'})
},
'articletrack.ticket': {
'Meta': {'object_name': 'Ticket'},
'article': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'articles'", 'to': "orm['articletrack.Article']"}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'tickets'", 'to': "orm['auth.User']"}),
'finished_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'started_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '256'})
},
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'journalmanager.collection': {
'Meta': {'ordering': "['name']", 'object_name': 'Collection'},
'acronym': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '16', 'blank': 'True'}),
'address': ('django.db.models.fields.TextField', [], {}),
'address_complement': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'address_number': ('django.db.models.fields.CharField', [], {'max_length': '8'}),
'city': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'collection': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'user_collection'", 'to': "orm['auth.User']", 'through': "orm['journalmanager.UserCollections']", 'blank': 'True', 'symmetrical': 'False', 'null': 'True'}),
'country': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'fax': ('django.db.models.fields.CharField', [], {'max_length': '16', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'logo': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}),
'name_slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'phone': ('django.db.models.fields.CharField', [], {'max_length': '16', 'blank': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'zip_code': ('django.db.models.fields.CharField', [], {'max_length': '16', 'null': 'True', 'blank': 'True'})
},
'journalmanager.institution': {
'Meta': {'ordering': "['name']", 'object_name': 'Institution'},
'acronym': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '16', 'blank': 'True'}),
'address': ('django.db.models.fields.TextField', [], {}),
'address_complement': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'address_number': ('django.db.models.fields.CharField', [], {'max_length': '8'}),
'cel': ('django.db.models.fields.CharField', [], {'max_length': '16', 'blank': 'True'}),
'city': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'complement': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'country': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'fax': ('django.db.models.fields.CharField', [], {'max_length': '16', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_trashed': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '256', 'db_index': 'True'}),
'phone': ('django.db.models.fields.CharField', [], {'max_length': '16', 'blank': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'zip_code': ('django.db.models.fields.CharField', [], {'max_length': '16', 'null': 'True', 'blank': 'True'})
},
'journalmanager.journal': {
'Meta': {'ordering': "['title']", 'object_name': 'Journal'},
'abstract_keyword_languages': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'abstract_keyword_languages'", 'symmetrical': 'False', 'to': "orm['journalmanager.Language']"}),
'acronym': ('django.db.models.fields.CharField', [], {'max_length': '16'}),
'collection': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'journals'", 'to': "orm['journalmanager.Collection']"}),
'copyrighter': ('django.db.models.fields.CharField', [], {'max_length': '254'}),
'cover': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'enjoy_creator'", 'to': "orm['auth.User']"}),
'ctrl_vocabulary': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'current_ahead_documents': ('django.db.models.fields.IntegerField', [], {'default': '0', 'max_length': '3', 'null': 'True', 'blank': 'True'}),
'editor_address': ('django.db.models.fields.CharField', [], {'max_length': '512'}),
'editor_address_city': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'editor_address_country': ('scielo_extensions.modelfields.CountryField', [], {'max_length': '2'}),
'editor_address_state': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'editor_address_zip': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'editor_email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'editor_name': ('django.db.models.fields.CharField', [], {'max_length': '512'}),
'editor_phone1': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'editor_phone2': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'}),
'editorial_standard': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'editors': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'user_editors'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['auth.User']"}),
'eletronic_issn': ('django.db.models.fields.CharField', [], {'max_length': '9', 'db_index': 'True'}),
'final_num': ('django.db.models.fields.CharField', [], {'max_length': '16', 'blank': 'True'}),
'final_vol': ('django.db.models.fields.CharField', [], {'max_length': '16', 'blank': 'True'}),
'final_year': ('django.db.models.fields.CharField', [], {'max_length': '4', 'null': 'True', 'blank': 'True'}),
'frequency': ('django.db.models.fields.CharField', [], {'max_length': '16'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'index_coverage': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'init_num': ('django.db.models.fields.CharField', [], {'max_length': '16'}),
'init_vol': ('django.db.models.fields.CharField', [], {'max_length': '16'}),
'init_year': ('django.db.models.fields.CharField', [], {'max_length': '4'}),
'is_indexed_aehci': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_indexed_scie': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_indexed_ssci': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_trashed': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'languages': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['journalmanager.Language']", 'symmetrical': 'False'}),
'logo': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'medline_code': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'medline_title': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'national_code': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'notes': ('django.db.models.fields.TextField', [], {'max_length': '254', 'null': 'True', 'blank': 'True'}),
'other_previous_title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'previous_ahead_documents': ('django.db.models.fields.IntegerField', [], {'default': '0', 'max_length': '3', 'null': 'True', 'blank': 'True'}),
'previous_title': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'prev_title'", 'null': 'True', 'to': "orm['journalmanager.Journal']"}),
'print_issn': ('django.db.models.fields.CharField', [], {'max_length': '9', 'db_index': 'True'}),
'pub_level': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'pub_status': ('django.db.models.fields.CharField', [], {'default': "'inprogress'", 'max_length': '16', 'null': 'True', 'blank': 'True'}),
'pub_status_changed_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'pub_status_changed_by'", 'to': "orm['auth.User']"}),
'pub_status_reason': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'publication_city': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'publisher_country': ('scielo_extensions.modelfields.CountryField', [], {'max_length': '2'}),
'publisher_name': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'publisher_state': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'scielo_issn': ('django.db.models.fields.CharField', [], {'max_length': '16'}),
'secs_code': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'short_title': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'db_index': 'True'}),
'sponsor': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'journal_sponsor'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['journalmanager.Sponsor']"}),
'study_areas': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'journals_migration_tmp'", 'null': 'True', 'to': "orm['journalmanager.StudyArea']"}),
'subject_categories': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'journals'", 'null': 'True', 'to': "orm['journalmanager.SubjectCategory']"}),
'subject_descriptors': ('django.db.models.fields.CharField', [], {'max_length': '1024'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '256', 'db_index': 'True'}),
'title_iso': ('django.db.models.fields.CharField', [], {'max_length': '256', 'db_index': 'True'}),
'twitter_user': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'url_journal': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'url_online_submission': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'use_license': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['journalmanager.UseLicense']"})
},
'journalmanager.language': {
'Meta': {'ordering': "['name']", 'object_name': 'Language'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'iso_code': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'})
},
'journalmanager.sponsor': {
'Meta': {'ordering': "['name']", 'object_name': 'Sponsor', '_ormbases': ['journalmanager.Institution']},
'collections': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['journalmanager.Collection']", 'symmetrical': 'False'}),
'institution_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['journalmanager.Institution']", 'unique': 'True', 'primary_key': 'True'})
},
'journalmanager.studyarea': {
'Meta': {'object_name': 'StudyArea'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'study_area': ('django.db.models.fields.CharField', [], {'max_length': '256'})
},
'journalmanager.subjectcategory': {
'Meta': {'object_name': 'SubjectCategory'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'term': ('django.db.models.fields.CharField', [], {'max_length': '256', 'db_index': 'True'})
},
'journalmanager.uselicense': {
'Meta': {'ordering': "['license_code']", 'object_name': 'UseLicense'},
'disclaimer': ('django.db.models.fields.TextField', [], {'max_length': '512', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'license_code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64'}),
'reference_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'})
},
'journalmanager.usercollections': {
'Meta': {'unique_together': "(('user', 'collection'),)", 'object_name': 'UserCollections'},
'collection': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['journalmanager.Collection']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_default': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_manager': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
}
}
complete_apps = ['articletrack'] | jamilatta/scielo-manager | scielomanager/articletrack/migrations/0002_auto__add_ticket__add_comment__add_article__del_field_checkin_issue_la.py | Python | bsd-2-clause | 29,739 |
from lxml import etree
class Model:
def __init__(self, fn):
self._modelTree = etree.parse(fn)
class Document:
def __init__(self, model):
self._model = model
self._nextItemNumber = 1
self._items = []
def addItem(self, item):
"""
Add an item to this document.
Items can be changed after they are added (e.g. additional references added to a collection)
:param item:
:return: the item appended
"""
self._items.append(item)
return item
def createItem(self, className):
"""
Create an item in the given document.
This factory method should always be used rather than the constructor.
"""
item = Item(self._model, className)
item._id = "0_%d" % self._nextItemNumber
self._nextItemNumber += 1
return item
def write(self, outFn):
"""
Write the document to the filesystem
"""
with open(outFn, 'w') as f:
f.write(str(self))
def __str__(self):
itemsTag = etree.Element("items")
for item in self._items:
itemTag = etree.SubElement(itemsTag, "item",
attrib={"id": item._id, "class": item._className, "implements": ""})
for name, value in item._attrs.items():
if isinstance(value, list):
collectionTag = etree.SubElement(itemTag, "collection", attrib={"name": name})
for referencedItem in value:
etree.SubElement(collectionTag, "reference", attrib={"ref_id": referencedItem._id})
elif isinstance(value, Item):
etree.SubElement(itemTag, "reference", attrib={"name": name, "ref_id": value._id})
else:
# print "Writing attribute [%s]:[%s]" % (name, value)
etree.SubElement(itemTag, "attribute", attrib={"name": name, "value": str(value)})
return etree.tostring(itemsTag, pretty_print=True).decode('unicode_escape')
class Item:
def __init__(self, model, className):
self._model = model
# TODO: check this against the model
self._className = className
self._attrs = {}
def addAttribute(self, name, value):
"""
Add an attribute to this item.
If the value is empty then nothing is added since InterMine doesn't like this behaviour.
"""
if value != "":
self._attrs[name] = value
def addToAttribute(self, name, value):
"""
Add a value to an attribute. If the attribute then has more than one value it becomes a collection.
An attribute that doesn't already exist becomes a collection with a single value.
"""
if name in self._attrs:
self._attrs[name].append(value)
else:
self._attrs[name] = [value]
def getAttribute(self, name):
return self._attrs[name]
def hasAttribute(self, name):
return name in self._attrs
def getClassName(self):
return self._className | synbiomine/synbiomine-tools | modules/python/intermyne/model.py | Python | apache-2.0 | 3,151 |
# -*- coding: iso-8859-1 -*-
# mp_laplace.py
# laplace.py with mpmath
# appropriate for high precision
# Talbot suggested that the Bromwich line be deformed into a contour that begins
# and ends in the left half plane, i.e., z \to \infty at both ends.
# Due to the exponential factor the integrand decays rapidly
# on such a contour. In such situations the trapezoidal rule converge
# extraordinarily rapidly.
# For example here we compute the inverse transform of F(s) = 1/(s+1) at t = 1
#
# >>> error = Talbot(1,24)-exp(-1)
# >>> error
# (3.3306690738754696e-015+0j)
#
# Talbot method is very powerful here we see an error of 3.3e-015
# with only 24 function evaluations
#
# Created by Fernando Damian Nieuwveldt
# email:fdnieuwveldt@gmail.com
# Date : 25 October 2009
#
# Adapted to mpmath and classes by Dieter Kadelka
# email: Dieter.Kadelka@kit.edu
# Date : 27 October 2009
# Automatic precision control by D. Kadelka 2009-11-26
#
# Reference
# L.N.Trefethen, J.A.C.Weideman, and T.Schmelzer. Talbot quadratures
# and rational approximations. BIT. Numerical Mathematics,
# 46(3):653 670, 2006.
try:
import psyco
psyco.full()
except ImportError:
print 'Psyco not installed, the program will just run slower'
from mpmath import mp,mpf,mpc,pi,sin,tan,exp,floor,log10
# testfunction: Laplace-transform of exp(-t)
def F(s):
return 1.0/(s+1.0)
class Talbot(object):
# parameters from
# T. Schmelzer, L.N. Trefethen, SIAM J. Numer. Anal. 45 (2007) 558-571
c1 = mpf('0.5017')
c2 = mpf('0.6407')
c3 = mpf('0.6122')
c4 = mpc('0','0.2645')
# High precision of these parameters not needed
def __init__(self,F=F,shift=0.0,prec=50):
self.F = F
# test = Talbot() or test = Talbot(F) initializes with testfunction F
# Assumption: F realvalued and analytic
self.shift = mpf(shift)
# Shift contour to the right in case there is a pole on the
# positive real axis :
# Note the contour will not be optimal since it was originally devoloped
# for function with singularities on the negative real axis For example
# take F(s) = 1/(s-1), it has a pole at s = 1, the contour needs to be
# shifted with one unit, i.e shift = 1.
# But in the test example no shifting is necessary
self.N = 12
# with double precision this constant N seems to best for the testfunction
# given. For N = 11 or N = 13 the error is larger (for this special
# testfunction).
self.prec = prec
# calculations with prec more digits
def __call__(self,t):
with mp.extradps(self.prec):
t = mpf(t)
if t == 0:
print "ERROR: Inverse transform can not be calculated for t=0"
return ("Error");
N = 2*self.N
# Initiate the stepsize (mit aktueller Präsision)
h = 2*pi/N
# The for loop is evaluating the Laplace inversion at each point theta i
# which is based on the trapezoidal rule
ans = 0.0
for k in range(self.N):
theta = -pi + (k+0.5)*h
z = self.shift + N/t*(Talbot.c1*theta/tan(Talbot.c2*theta) - Talbot.c3 + Talbot.c4*theta)
dz = N/t * (-Talbot.c1*Talbot.c2*theta/sin(Talbot.c2*theta)**2 + Talbot.c1/tan(Talbot.c2*theta)+Talbot.c4)
v1 = exp(z*t)*dz
prec = floor(max(log10(abs(v1)),0))
with mp.extradps(prec):
value = self.F(z)
ans += v1*value
return ((h/pi)*ans).imag
*********************************************************************************
# -*- coding: iso-8859-1 -*-
# asian.py
# Title : Numerical inversion of the Laplace transform for pricing Asian options
# The Geman and Yor model
#
# Numerical inversion is done by Asian's method.
#
################################################################################
## Created by Fernando Damian Nieuwveldt
## Date : 26 October 2009
## email : fdnieuwveldt@gmail.com
## This was part work of my masters thesis (The Asian method not mpmath part)
## in Applied Mathematics at the University of Stellenbosch, South Africa
## Thesis title : A Survey of Computational Methods for Pricing Asian Options
## For reference details contact me via email.
################################################################################
# Example :
# Asian(2,2,1,0,0.1,0.02,100)
# 0.0559860415440030213974642963090994900722---mp.dps = 100
# Asian(2,2,1,0,0.05,0.02,250)
# 0.03394203103227322980773---mp.dps = 150
#
# NB : Computational time increases as the volatility becomes small, because of
# the argument for the hypergeometric function becomes large
#
# H. Geman and M. Yor. Bessel processes, Asian options and perpetuities.
# Mathematical Finance, 3:349 375, 1993.
# L.N.Trefethen, J.A.C.Weideman, and T.Schmelzer. Asian quadratures
# and rational approximations. BIT. Numerical Mathematics,
# 46(3):653 670, 2006.
# adapted to mp_laplace by D. Kadelka 2009-11-17
# Automatic precision control by D. Kadelka 2009-11-26
# email: Dieter.Kadelka@stoch.uni-karlsruhe.de
# Example:
# from asian import Asian
# f = Asian()
# print f
# Pricing Asian options: The Geman and Yor model with
# S = 2, K = 2, T = 1, t = 0, sig = 0.1, r = 0.02
# print f()
# 0.0559860415440029
# f.ch_sig('0.05')
# print f
# Pricing Asian options: The Geman and Yor model with
# S = 2, K = 2, T = 1, t = 0, sig = 0.05, r = 0.02
# print f()
# 0.0345709175410301
# f.N = 100
# print f()
# 0.0339410537085201
# from mpmath import mp
# mp.dps = 50
# f.update()
# 0.033941053708520319031364170122438704213486236188948
try:
import psyco
psyco.full()
except ImportError:
print 'Psyco not installed, the program will just run slower'
from mpmath import mp,mpf,mpc,pi,sin,tan,exp,gamma,hyp1f1,sqrt,log10,floor
from mp_laplace import Talbot
class Asian(object):
def G(self,s): # Laplace-Transform
zz = 2*self.v + 2 + s
mu = sqrt(self.v**2+2*zz)
a = mu/2 - self.v/2 - 1
b = mu/2 + self.v/2 + 2
v1 = (2*self.alp)**(-a)*gamma(b)/gamma(mu+1)/(zz*(zz - 2*(1 + self.v)))
prec = floor(max(log10(abs(v1)),mp.dps))+self.prec
# additional precision needed for computation of hyp1f1
with mp.extradps(prec):
value = hyp1f1(a,mu + 1,self.beta)*v1
return value
def update(self):
# Geman and Yor's variable
# possibly with infinite precision (strings)
self.S = mpf(self.parameter['S'])
self.K = mpf(self.parameter['K'])
self.T = mpf(self.parameter['T'])
self.t = mpf(self.parameter['t'])
self.sig = mpf(self.parameter['sig'])
self.r = mpf(self.parameter['r'])
self.v = 2*self.r/(self.sig**2) - 1
self.alp = self.sig**2/(4*self.S)*self.K*self.T
self.beta = -1/(2*self.alp)
self.f.shift = self.shift
def __init__(self,S=2,K=2,T=1,t=0,sig='0.1',r='0.02',N=50,shift=0.0,prec=0):
# Strings allowed for infinite precision
# prec compensates rounding errors not catched with automatic precision control
# parameters may be changed later
# after changing mp.dps or any of these parameters (except prec, N and t),
# update (v,alp,beta depend on these parameters)
self.N = N
self.shift = shift
self.prec = max(prec,0)
self.parameter = {'S':S,'K':K,'T':T,'t':t,'sig':sig,'r':r}
# input: possibly strings with infinite precision
self.f = Talbot(self.G,shift=self.shift,prec=0)
self.update()
def __call__(self):
# Initialize the stepsize (with actual precision)
self.f.N = self.N
tau = ((self.sig**2)/4)*(self.T - self.t)
# Evaluation of the integral at tau
return 4*exp(tau*(2*self.v+2))*exp(-self.r*(self.T - self.t))*self.S/(self.T*self.sig**2)*self.f(tau)
# Update Parameters
def ch_S(self,S):
self.parameter['S'] = S
self.update()
def ch_K(self,K):
self.parameter['K'] = K
self.update()
def ch_T(self,T):
self.parameter['T'] = T
self.update()
def ch_t(self,t):
self.parameter['t'] = t
self.update()
def ch_r(self,r):
self.parameter['r'] = r
self.update()
def ch_sig(self,sig):
self.parameter['sig'] = sig
self.update()
# Actual Parametes
def __str__(self):
s = 'Pricing Asian options: The Geman and Yor model with\n'
s += " S = %(S)s, K = %(K)s, T = %(T)s, t = %(t)s, sig = %(sig)s, r = %(r)s" % self.parameter
return s
| ActiveState/code | recipes/Python/576964_Pricing_Asioptions_using_mpmath_automatic/recipe-576964.py | Python | mit | 8,391 |
import os
import unittest
from vsg import vhdlFile
from vsg.tests import utils
sLrmUnit = 'process_statement'
lFile, eError =vhdlFile.utils.read_vhdlfile(os.path.join(os.path.dirname(__file__), sLrmUnit,'classification_test_input.vhd'))
oFile = vhdlFile.vhdlFile(lFile)
class test_token(unittest.TestCase):
def test_classification(self):
sTestDir = os.path.join(os.path.dirname(__file__), sLrmUnit)
lExpected = []
utils.read_file(os.path.join(sTestDir, 'classification_results.txt'), lExpected, False)
lActual = []
for oObject in utils.extract_objects(oFile, True):
lActual.append(str(oObject))
self.assertEqual(lExpected, lActual)
| jeremiah-c-leary/vhdl-style-guide | vsg/tests/vhdlFile/test_process_statement.py | Python | gpl-3.0 | 709 |
# ===============================================================================
# Copyright 2013 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
from __future__ import absolute_import
from envisage.ui.tasks.preferences_pane import PreferencesPane
from traits.api import Bool, Float, Enum, Str, Password
from traitsui.api import View, Item, VGroup, HGroup
# ============= standard library imports ========================
# ============= local library imports ==========================
from pychron.envisage.tasks.base_preferences_helper import BasePreferencesHelper
class SpectrometerPreferences(BasePreferencesHelper):
name = "Spectrometer"
preferences_path = "pychron.spectrometer"
id = "pychron.spectrometer.preferences_page"
send_config_on_startup = Bool
force_send_configuration = Bool(True)
use_local_mftable_archive = Bool
use_db_mftable_archive = Bool
confirmation_threshold_mass = Float
use_detector_safety = Bool
use_log_events = Bool
use_vertical_markers = Bool
auto_open_readout = Bool
use_default_scan_settings = Bool
default_isotope = Enum(("Ar40", "Ar39", "Ar38", "Ar37", "Ar36"))
default_detector = Enum(("H2", "H1", "AX", "L1", "L2", "CDD"))
class SpectrometerPreferencesPane(PreferencesPane):
model_factory = SpectrometerPreferences
category = "Spectrometer"
def traits_view(self):
magnet_grp = VGroup(
Item(
"confirmation_threshold_mass",
tooltip="Request confirmation if magnet move is greater than threshold",
label="Confirmation Threshold (amu)",
),
show_border=True,
label="Magnet",
)
mf_grp = VGroup(
Item(
"use_local_mftable_archive",
tooltip="Archive mftable to a local git repository",
label="Local Archive",
),
Item(
"use_db_mftable_archive",
tooltip="Archive mftable to central database",
label="DB Archive",
),
show_border=True,
label="MFTable",
)
gen_grp = VGroup(
Item(
"send_config_on_startup",
tooltip="Load the spectrometer parameters on startup",
),
Item(
"force_send_configuration",
tooltip="If disabled pychron will only set configuration values that are out of date",
),
Item(
"auto_open_readout",
tooltip="Open readout view when Spectrometer plugin starts",
),
)
scan_grp = VGroup(
Item(
"use_detector_safety",
label="Detector Safety",
tooltip="Abort magnet moves "
"if move will place an intensity greater than X on the current detector",
),
Item(
"use_log_events",
label="Event Logging",
tooltip="Display events such as valve open/close, magnet moves on Scan graph",
),
Item("use_vertical_markers", label="Vertical Markers"),
HGroup(
Item("use_default_scan_settings", label="Use Defaults"),
Item(
"default_detector",
label="Detector",
enabled_when="use_default_scan_settings",
),
Item(
"default_isotope",
label="Isotope",
enabled_when="use_default_scan_settings",
),
),
label="Scan",
show_border=True,
)
return View(VGroup(gen_grp, mf_grp, scan_grp, magnet_grp))
class NGXSpectrometerPreferences(BasePreferencesHelper):
username = Str
password = Password
preferences_path = "pychron.spectrometer.ngx"
class NGXSpectrometerPreferencesPane(PreferencesPane):
model_factory = NGXSpectrometerPreferences
category = "Spectrometer"
def traits_view(self):
v = View(Item("username"), Item("password"))
return v
# ============= EOF =============================================
| USGSDenverPychron/pychron | pychron/spectrometer/tasks/spectrometer_preferences.py | Python | apache-2.0 | 4,917 |
import os
from .private import (
DEBUG, SECRET_KEY, DB_NAME, DB_HOST, DB_PASS, DB_PORT, DB_USER, SERVER
)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
ALLOWED_HOSTS = ['127.0.0.1', 'localhost', SERVER]
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'bootstrapform',
'bh'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'brotherhood.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')]
,
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
'builtins': [
'bh.templatetags.bootstrap_filters'
]
},
},
]
WSGI_APPLICATION = 'brotherhood.wsgi.application'
# Database
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': DB_NAME,
'USER': DB_USER,
'PASSWORD': DB_PASS,
'HOST': DB_HOST,
'PORT': DB_PORT
},
}
# Password validation
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
TIME_ZONE = 'UTC'
USE_TZ = False
LANGUAGE_CODE = 'ru-ru'
USE_I18N = True
USE_L10N = True
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
STATIC_URL = '/static/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
MEDIA_URL = '/media/'
| dstarod/brotherhood | brotherhood/settings.py | Python | gpl-3.0 | 2,527 |
import numpy as np
from .._common import lhs, messages, optimizer, selection_sync
from .._helpers import OptimizeResult, register
__all__ = [
"minimize",
]
def minimize(
fun,
bounds,
x0=None,
args=(),
maxiter=100,
popsize=10,
nrperc=0.5,
seed=None,
xtol=1.0e-8,
ftol=1.0e-8,
workers=1,
backend=None,
return_all=False,
callback=True,
):
"""
Minimize an objective function using Neighborhood Algorithm (NA).
Parameters
----------
fun : callable
The objective function to be minimized. Must be in the form ``f(x, *args)``, where ``x`` is the argument in the form of a 1-D array and args is a tuple of any additional fixed parameters needed to completely specify the function.
bounds : array_like
Bounds for variables. ``(min, max)`` pairs for each element in ``x``, defining the finite lower and upper bounds for the optimizing argument of ``fun``. It is required to have ``len(bounds) == len(x)``. ``len(bounds)`` is used to determine the number of parameters in ``x``.
x0 : array_like or None, optional, default None
Initial population. Array of real elements with shape (``popsize``, ``ndim``), where ``ndim`` is the number of independent variables. If ``x0`` is not specified, the population is initialized using Latin Hypercube sampling.
args : tuple, optional, default None
Extra arguments passed to the objective function.
maxiter : int, optional, default 100
The maximum number of generations over which the entire population is evolved.
popsize : int, optional, default 10
Total population size.
nrperc : scalar, optional, default 0.5
Number of resamplings (as a fraction of total population size).
seed : int or None, optional, default None
Seed for random number generator.
xtol : scalar, optional, default 1.0e-8
Solution tolerance for termination.
ftol : scalar, optional, default 1.0e-8
Objective function value tolerance for termination.
workers : int, optional, default 1
The population is subdivided into workers sections and evaluated in parallel (uses :class:`joblib.Parallel`). Supply -1 to use all available CPU cores.
backend : str {'loky', 'threading', 'mpi'}, optional, default 'threading'
Parallel backend to use when ``workers`` is not ``0`` or ``1``:
- 'loky': disable threading
- 'threading': enable threading
- 'mpi': use MPI (uses :mod:`mpi4py`)
return_all : bool, optional, default False
Set to True to return an array with shape (``nit``, ``popsize``, ``ndim``) of all the solutions at each iteration.
callback : callable or None, optional, default None
Called after each iteration. It is a callable with the signature ``callback(X, OptimizeResult state)``, where ``X`` is the current population and ``state`` is a partial :class:`stochopy.optimize.OptimizeResult` object with the same fields as the ones from the return (except ``"success"``, ``"status"`` and ``"message"``).
Returns
-------
:class:`stochopy.optimize.OptimizeResult`
The optimization result represented as a :class:`stochopy.optimize.OptimizeResult`. Important attributes are:
- ``x``: the solution array
- ``fun``: the solution function value
- ``success``: a Boolean flag indicating if the optimizer exited successfully
- ``message``: a string which describes the cause of the termination
References
----------
.. [1] M. Sambridge, *Geophysical inversion with a neighbourhood algorithm - I. Searching a parameter space*, Geophysical Journal International, 1999, 138(2): 479–494
"""
# Cost function
if not hasattr(fun, "__call__"):
raise TypeError()
# Dimensionality and search space
if np.ndim(bounds) != 2:
raise ValueError()
# Initial guess x0
if x0 is not None:
if np.ndim(x0) != 2 or np.shape(x0)[1] != len(bounds):
raise ValueError()
# Population size
if popsize < 2:
raise ValueError()
if x0 is not None and len(x0) != popsize:
raise ValueError()
# NA parameters
if not 0.0 < nrperc <= 1.0:
raise ValueError()
# Seed
if seed is not None:
np.random.seed(seed)
# Callback
if callback is not None and not hasattr(callback, "__call__"):
raise ValueError()
# Run in serial or parallel
optargs = (
bounds,
x0,
maxiter,
popsize,
nrperc,
xtol,
ftol,
return_all,
callback,
)
res = na(fun, args, True, workers, backend, *optargs)
return res
@optimizer
def na(
funnorm,
args,
sync,
workers,
backend,
bounds,
x0,
maxiter,
popsize,
nrperc,
xtol,
ftol,
return_all,
callback,
):
"""Optimize with Neighborhood Algorithm."""
ndim = len(bounds)
lower, upper = np.transpose(bounds)
# Normalize and unnormalize
span = upper - lower
span_mask = span > 0.0
span[~span_mask] = 1.0 # Avoid zero division in normalize
normalize = lambda x: np.where(span_mask, (x - lower) / span, upper)
unnormalize = lambda x: np.where(span_mask, x * span + lower, upper)
fun = lambda x: funnorm(unnormalize(x))
# Number of resampling
nr = max(1, int(nrperc * popsize))
# Initial population
X = x0 if x0 is not None else lhs(popsize, ndim, bounds)
X = normalize(X)
pbest = X.copy()
# Evaluate initial population
pfit = fun(X)
pbestfit = pfit.copy()
# Initial best solution
gbidx = np.argmin(pbestfit)
gfit = pbestfit[gbidx]
gbest = X[gbidx].copy()
# Store all models sampled
Xall = X.copy()
Xallfit = pfit.copy()
# Initialize arrays
if return_all:
xall = np.empty((maxiter, popsize, ndim))
funall = np.empty((maxiter, popsize))
xall[0] = unnormalize(X)
funall[0] = pfit.copy()
# First iteration for callback
if callback is not None:
res = OptimizeResult(x=unnormalize(gbest), fun=gfit, nfev=popsize, nit=1)
if return_all:
res.update({"xall": xall[:1], "funall": funall[:1]})
callback(unnormalize(X), res)
# Iterate until one of the termination criterion is satisfied
it = 1
converged = False
while not converged:
it += 1
# Mutation
X = mutation(Xall, Xallfit, popsize, ndim, nr, span_mask)
# Selection
gbest, gfit, pfit, status = selection_sync(
it, X, gbest, pbest, pbestfit, maxiter, xtol, ftol, fun
)
Xall = np.vstack((X, Xall))
Xallfit = np.concatenate((pfit, Xallfit))
if return_all:
xall[it - 1] = unnormalize(X)
funall[it - 1] = pfit.copy()
converged = status is not None
if callback is not None:
res = OptimizeResult(
x=unnormalize(gbest), fun=gfit, nfev=it * popsize, nit=it,
)
if return_all:
res.update({"xall": xall[:it], "funall": funall[:it]})
callback(unnormalize(X), res)
res = OptimizeResult(
x=unnormalize(gbest),
success=status >= 0,
status=status,
message=messages[status],
fun=gfit,
nfev=it * popsize,
nit=it,
)
if return_all:
res.update({"xall": xall[:it], "funall": funall[:it]})
return res
def mutation(Xall, Xallfit, popsize, ndim, nr, span_mask):
"""
Update population.
Note
----
Code adapted from <https://github.com/keithfma/neighborhood/blob/master/neighborhood/search.py>
"""
X = np.empty((popsize, ndim))
ix = Xallfit.argsort()[:nr]
for i in range(popsize):
k = ix[i % nr]
X[i] = Xall[k].copy()
U = np.delete(Xall, k, axis=0)
d1 = 0.0
d2 = ((U[:, 1:] - X[i, 1:]) ** 2).sum(axis=1)
for j in range(ndim):
if not span_mask[j]:
# Value does not matter as it will be fixed by unnormalize
X[i, j] = 0.0
continue
lim = 0.5 * (Xall[k, j] + U[:, j] + (d1 - d2) / (Xall[k, j] - U[:, j]))
idx = lim <= X[i, j]
low = max(lim[idx].max(), 0.0) if idx.sum() else 0.0
idx = lim >= X[i, j]
high = min(lim[idx].min(), 1.0) if idx.sum() else 1.0
X[i, j] = np.random.uniform(low, high)
if j < ndim - 1:
d1 += (Xall[k, j] - X[i, j]) ** 2 - (Xall[k, j + 1] - X[i, j + 1]) ** 2
d2 += (U[:, j] - X[i, j]) ** 2 - (U[:, j + 1] - X[i, j + 1]) ** 2
return X
register("na", minimize)
| keurfonluu/StochOPy | stochopy/optimize/na/_na.py | Python | mit | 8,787 |
from csamtools import *
from ctabix import *
import csamtools
import ctabix
import Pileup
import sys
import os
class SamtoolsError( Exception ):
'''exception raised in case of an error incurred in the samtools library.'''
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class SamtoolsDispatcher(object):
'''samtools dispatcher.
Emulates the samtools command line as module calls.
Captures stdout and stderr.
Raises a :class:`pysam.SamtoolsError` exception in case
samtools exits with an error code other than 0.
Some command line options are associated with parsers.
For example, the samtools command "pileup -c" creates
a tab-separated table on standard output. In order to
associate parsers with options, an optional list of
parsers can be supplied. The list will be processed
in order checking for the presence of each option.
If no parser is given or no appropriate parser is found,
the stdout output of samtools commands will be returned.
'''
dispatch=None
parsers=None
def __init__(self,dispatch, parsers):
self.dispatch = dispatch
self.parsers = parsers
self.stderr = []
def __call__(self,*args, **kwargs):
'''execute the samtools command
'''
retval, stderr, stdout = csamtools._samtools_dispatch( self.dispatch, args )
if retval: raise SamtoolsError( "\n".join( stderr ) )
self.stderr = stderr
# samtools commands do not propagate the return code correctly.
# I have thus added this patch to throw if there is output on stderr.
# Note that there is sometimes output on stderr that is not an error,
# for example: [sam_header_read2] 2 sequences loaded.
# Ignore messages like these
stderr = [x for x in stderr
if not (x.startswith( "[sam_header_read2]" ) or
x.startswith("[bam_index_load]") or
x.startswith("[bam_sort_core]") or \
x.startswith("[samopen] SAM header is present"))]
if stderr: raise SamtoolsError( "\n".join( stderr ) )
# call parser for stdout:
if not kwargs.get("raw") and stdout and self.parsers:
for options, parser in self.parsers:
for option in options:
if option not in args: break
else:
return parser(stdout)
return stdout
def getMessages( self ):
return self.stderr
def usage(self):
'''return the samtools usage information for this command'''
retval, stderr, stdout = csamtools._samtools_dispatch( self.dispatch )
return "".join(stderr)
#
# samtools command line options to export in python
#
# import is a python reserved word.
SAMTOOLS_DISPATCH = {
"view" : ( "view", None ),
"sort" : ( "sort", None),
"samimport": ( "import", None),
"pileup" : ( "pileup", ( (("-c",), Pileup.iterate ), ), ),
"faidx" : ("faidx", None),
"tview" : ("tview", None),
"index" : ("index", None),
"fixmate" : ("fixmate", None),
"glfview" : ("glfview", None),
"flagstat" : ("flagstat", None),
"calmd" : ("calmd", None),
"merge" : ("merge", None),
"rmdup" : ("rmdup", None) }
# instantiate samtools commands as python functions
for key, options in SAMTOOLS_DISPATCH.iteritems():
cmd, parser = options
globals()[key] = SamtoolsDispatcher(cmd, parser)
# hack to export all the symbols from csamtools
__all__ = csamtools.__all__ + \
ctabix.__all__ + \
[ "SamtoolsError", "SamtoolsDispatcher" ] + list(SAMTOOLS_DISPATCH) +\
["Pileup",]
from version import __version__, __samtools_version__
| genome-vendor/chimerascan | chimerascan/pysam/__init__.py | Python | gpl-3.0 | 3,804 |
from dvc.main import main
def test_root(tmp_dir, dvc, capsys):
assert main(["root"]) == 0
assert "." in capsys.readouterr()[0]
def test_root_locked(tmp_dir, dvc, capsys):
# NOTE: check that `dvc root` is not blocked with dvc lock
with dvc.lock:
assert main(["root"]) == 0
assert "." in capsys.readouterr()[0]
| dmpetrov/dataversioncontrol | tests/func/test_root.py | Python | apache-2.0 | 341 |
# pylint: disable=C0111,R0903
"""Displays the current song being played in DeaDBeeF and provides
some media control bindings.
Left click toggles pause, scroll up skips the current song, scroll
down returns to the previous song.
Parameters:
* deadbeef.format: Format string (defaults to '{artist} - {title}')
Available values are: {artist}, {title}, {album}, {length},
{trackno}, {year}, {comment},
{copyright}, {time}
This is deprecated, but much simpler.
* deadbeef.tf_format: A foobar2000 title formatting-style format string.
These can be much more sophisticated than the standard
format strings. This is off by default, but specifying
any tf_format will enable it. If both deadbeef.format
and deadbeef.tf_format are specified, deadbeef.tf_format
takes priority.
* deadbeef.tf_format_if_stopped: Controls whether or not the tf_format format
string should be displayed even if no song is paused or
playing. This could be useful if you want to implement
your own stop strings with the built in logic. Any non-
null value will enable this (by default the module will
hide itself when the player is stopped).
* deadbeef.previous: Change binding for previous song (default is left click)
* deadbeef.next: Change binding for next song (default is right click)
* deadbeef.pause: Change binding for toggling pause (default is middle click)
Available options for deadbeef.previous, deadbeef.next and deadbeef.pause are:
LEFT_CLICK, RIGHT_CLICK, MIDDLE_CLICK, SCROLL_UP, SCROLL_DOWN
contributed by `joshbarrass <https://github.com/joshbarrass>`_ - many thanks!
"""
import sys
import subprocess
import logging
import core.module
import core.widget
import core.input
import core.decorators
import util.cli
import util.format
class Module(core.module.Module):
def __init__(self, config, theme):
super().__init__(config, theme, core.widget.Widget(self.deadbeef))
buttons = {
"LEFT_CLICK": core.input.LEFT_MOUSE,
"RIGHT_CLICK": core.input.RIGHT_MOUSE,
"MIDDLE_CLICK": core.input.MIDDLE_MOUSE,
"SCROLL_UP": core.input.WHEEL_UP,
"SCROLL_DOWN": core.input.WHEEL_DOWN,
}
self._song = ""
self._format = self.parameter("format", "{artist} - {title}")
self._tf_format = self.parameter("tf_format", "")
self._show_tf_when_stopped = util.format.asbool(
self.parameter("tf_format_if_stopped", False)
)
prev_button = self.parameter("previous", "LEFT_CLICK")
next_button = self.parameter("next", "RIGHT_CLICK")
pause_button = self.parameter("pause", "MIDDLE_CLICK")
self.now_playing = "deadbeef --nowplaying %a;%t;%b;%l;%n;%y;%c;%r;%e"
self.now_playing_tf = "deadbeef --nowplaying-tf "
cmd = "deadbeef "
core.input.register(self, button=buttons[prev_button], cmd=cmd + "--prev")
core.input.register(self, button=buttons[next_button], cmd=cmd + "--next")
core.input.register(
self, button=buttons[pause_button], cmd=cmd + "--play-pause"
)
# modify the tf_format if we don't want it to show on stop
# this adds conditions to the query itself, rather than
# polling to see if deadbeef is running
# doing this reduces the number of calls we have to make
if self._tf_format and not self._show_tf_when_stopped:
self._tf_format = "$if($or(%isplaying%,%ispaused%),{query})".format(
query=self._tf_format
)
@core.decorators.scrollable
def deadbeef(self, widget):
return self.string_song
def hidden(self):
return self.string_song == ""
def update(self):
widgets = self.widgets()
try:
if self._tf_format == "": # no tf format set, use the old style
return self.update_standard(widgets)
return self.update_tf(widgets)
except Exception as e:
logging.exception(e)
self._song = "error"
def update_tf(self, widgets):
## ensure that deadbeef is actually running
## easiest way to do this is to check --nowplaying for
## the string 'nothing'
if util.cli.execute(self.now_playing) == "nothing":
self._song = ""
return
## perform the actual query -- these can be much more sophisticated
data = util.cli.execute(self.now_playing_tf + '"'+self._tf_format+'"')
self._song = data
def update_standard(self, widgets):
data = util.cli.execute(self.now_playing)
if data == "nothing":
self._song = ""
else:
data = data.split(";")
self._song = self._format.format(
artist=data[0],
title=data[1],
album=data[2],
length=data[3],
trackno=data[4],
year=data[5],
comment=data[6],
copyright=data[7],
time=data[8],
)
@property
def string_song(self):
"""\
Returns the current song as a string, either as a unicode() (Python <
3) or a regular str() (Python >= 3)
"""
if sys.version_info.major < 3:
return unicode(self._song)
return str(self._song)
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
| tobi-wan-kenobi/bumblebee-status | bumblebee_status/modules/contrib/deadbeef.py | Python | mit | 5,484 |
#
# Copyright (C) 2015 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
from __future__ import absolute_import
from __future__ import unicode_literals
from dnf.yum.history import YumHistory, YumHistoryPackage
from dnfpluginscore import _, logger
import subprocess
import contextlib
import dnf
import dnf.cli
import glob
import logging
import os
import re
import yum
class _YumBase(yum.YumBase):
"""YUM's base extended for a use in this plugin."""
def __init__(self):
"""Initialize the base."""
super(_YumBase, self).__init__()
self.logger = logger
self.verbose_logger = logger
def iter_yumdb(self, onerror):
"""Return a new iterator over pkgtups and pkgids in the YUMDB."""
dirname_glob = os.path.join(self.rpmdb.yumdb.conf.db_path, "*", "*")
for dirname in glob.glob(dirname_glob):
basename = os.path.basename(dirname)
match = re.match(
r"^([^-]+)-(.+)-([^-]+)-([^-]+)-([^-]+)$", basename)
if not match:
onerror(_("failed to parse package NEVRA from: %s"), basename)
continue
pkgid, name, version, release, architecture = match.groups()
yield (name, architecture, "", version, release), pkgid
class Migrate(dnf.Plugin):
name = "migrate"
def __init__(self, base, cli):
super(Migrate, self).__init__(base, cli)
self.base = base
self.cli = cli
if self.cli is not None:
self.cli.register_command(MigrateCommand)
class MigrateCommand(dnf.cli.Command):
aliases = ("migrate",)
summary = _("migrate yum's history, group and yumdb data to dnf")
def configure(self):
demands = self.cli.demands
demands.available_repos = True
demands.sack_activation = True
demands.root_user = True
if self.opts.migrate == "all":
self.opts.migrate = ["history", "groups", "yumdb"]
else:
self.opts.migrate = [self.opts.migrate]
@staticmethod
def set_argparser(parser):
parser.add_argument("migrate", nargs="?", action="store",
choices=["all", "history", "groups", "yumdb"],
default="all",
help=_("which kind of yum data migrate."))
def run(self):
if "history" in self.opts.migrate:
self.migrate_history()
# Run before groups migration since it creates YUMDB records
# consisting of group_member only which results in some warnings
# generated by YUMDB migration.
if "yumdb" in self.opts.migrate:
self.migrate_yumdb()
if "groups" in self.opts.migrate:
self.migrate_groups()
def migrate_history(self):
logger.info(_("Migrating history data..."))
yum_history = YumHistory("/var/lib/yum/history", None)
dnf_history = YumHistory(self.base.conf.persistdir + "/history", None)
self.migrate_history_pkgs(yum_history, dnf_history)
self.migrate_history_transction(yum_history, dnf_history)
self.migrate_history_reorder(dnf_history)
def migrate_history_pkgs(self, yum_hist, dnf_hist):
yum_cur = yum_hist._get_cursor()
yum_cur.execute("""
select pkgtupid, name, arch, epoch, version, release, checksum
from pkgtups""")
for (pid, name, arch, epoch, version, release, checksum) \
in yum_cur.fetchall():
ypkg = YumHistoryPackage(name, arch, epoch, version, release,
checksum)
pid = dnf_hist.pkg2pid(ypkg)
self.migrate_history_pkgs_anydb(yum_hist, dnf_hist, pid, ypkg, "rpm")
self.migrate_history_pkgs_anydb(yum_hist, dnf_hist, pid, ypkg, "yum")
dnf_hist._get_cursor()
dnf_hist._commit()
@staticmethod
def migrate_history_pkgs_anydb(yum_hist, dnf_hist, yumid, pkg, dbname):
yum_cur = yum_hist._get_cursor()
select = """select {db}db_key, {db}db_val
from pkg_{db}db where pkgtupid = ?""".format(db=dbname)
yum_cur.execute(select, (yumid,))
dnf_hist._wipe_anydb(pkg, dbname)
for row in yum_cur.fetchall():
dnf_hist._save_anydb_key(pkg, dbname, row[0], row[1])
@staticmethod
def migrate_history_transction(yum_hist, dnf_hist):
yum_trans_list = yum_hist.old()
dnf_cur = dnf_hist._get_cursor()
for t in yum_trans_list:
dnf_cur.execute("""select 1 from trans_beg
where timestamp = ?
and rpmdb_version = ?
and loginuid = ?""",
(t.beg_timestamp, t.beg_rpmdbversion, t.loginuid))
if dnf_cur.fetchone():
# skip akready migrated transactions
continue
dnf_cur.execute("""insert into trans_beg
(timestamp, rpmdb_version, loginuid) values (?, ?, ?)""",
(t.beg_timestamp, t.beg_rpmdbversion, t.loginuid))
dnf_tid = dnf_cur.lastrowid
if t.cmdline:
dnf_cur.execute("""insert into trans_cmdline
(tid, cmdline) values (?, ?)""", (dnf_tid, t.cmdline))
if t.end_timestamp:
dnf_cur.execute("""insert into trans_end
(tid, timestamp, rpmdb_version, return_code)
values (?, ?, ?, ?)""",
(dnf_tid, t.end_timestamp, t.end_rpmdbversion,
t.return_code))
for pkg in t.trans_with:
pid = dnf_hist.pkg2pid(pkg)
dnf_cur.execute("""insert into trans_with_pkgs
(tid, pkgtupid) values (?, ?)""", (dnf_tid, pid))
for pkg in t.trans_data:
pid = dnf_hist.pkg2pid(pkg)
dnf_cur.execute("""insert into trans_data_pkgs
(tid, pkgtupid, done, state) values (?, ?, ?, ?)""",
(dnf_tid, pid, pkg.done, pkg.state))
for pkg in t.trans_skip:
pid = dnf_hist.pkg2pid(pkg)
dnf_cur.execute("""insert into trans_skip_pkgs
(tid, pkgtupid) values (?, ?)""", (dnf_tid, pid))
for prob in t.rpmdb_problems:
dnf_cur.execute("""insert into trans_rpmdb_problems
(tid, problem, msg) values (?, ?, ?)""",
(dnf_tid, prob.problem, prob.text))
rpid = dnf_cur.lastrowid
for pkg in prob.packages:
pid = dnf_hist.pkg2pid(pkg)
dnf_cur.execute("""insert into trans_prob_pkgs
(rpid, pkgtupid, main) values (?, ?, ?)""",
(rpid, pid, pkg.main))
for err in t.errors:
dnf_cur.execute("""insert into trans_error
(tid, msg) values (?, ?)""", (dnf_tid, err))
for msg in t.output:
dnf_cur.execute("""insert into trans_script_stdout
(tid, line) values (?, ?)""", (dnf_tid, msg))
dnf_hist._commit()
@staticmethod
def migrate_history_reorder(dnf_hist):
dnf_cur = dnf_hist._get_cursor()
dnf_cur.execute("""select max(tid) from trans_beg""")
new_tid = dnf_cur.fetchone()[0]
dnf_cur.execute("""select tid from trans_beg order by timestamp asc""")
for row in dnf_cur.fetchall():
old_tid = row[0]
new_tid += 1
for table in ["trans_beg", "trans_cmdline", "trans_end",
"trans_with_pkgs", "trans_data_pkgs",
"trans_skip_pkgs", "trans_rpmdb_problems",
"trans_error", "trans_script_stdout"]:
dnf_cur.execute("update %s set tid = ? where tid = ?" % table,
(new_tid, old_tid))
dnf_hist._commit()
def migrate_groups(self):
yum_exec = "/usr/bin/yum-deprecated"
if not os.path.exists(yum_exec):
yum_exec = "/usr/bin/yum"
logger.info(_("Migrating groups data..."))
try:
installed = self.get_yum_installed_groups(yum_exec)
except subprocess.CalledProcessError:
logger.warning(_("Execution of Yum failed. "
"Could not retrieve installed groups."))
return
if not installed:
logger.info(_("No groups to migrate from Yum"))
return
# mark installed groups in dnf
group_cmd = dnf.cli.commands.group.GroupCommand(self.cli)
group_cmd._grp_setup()
for group in installed:
try:
group_cmd._mark_install([group])
except dnf.exceptions.CompsError as e:
# skips not found groups, i.e. after fedup
# when the group name changes / disappears in new distro
logger.warning("%s, %s", dnf.i18n.ucd(e)[:-1], _("skipping."))
@staticmethod
def get_yum_installed_groups(yum_exec):
env_config = dict(os.environ, LANG="C", LC_ALL="C")
with open(os.devnull, 'w') as devnull:
output = dnf.i18n.ucd(subprocess.check_output(
[yum_exec, "-q", "group", "list", "installed", "-C",
"--setopt=*.skip_if_unavailable=1"], stderr=devnull,
env=env_config))
return map(lambda l: l.lstrip(), output.splitlines())
def migrate_yumdb(self):
"""Migrate YUMDB data."""
attribute2mandatory = {
"changed_by": False, "checksum_data": True, "checksum_type": True,
"command_line": False, "from_repo": True,
"from_repo_revision": False, "from_repo_timestamp": False,
"installed_by": False, "reason": True, "releasever": True}
migrated = skipped = 0
logger.info(_("Migrating YUMDB data..."))
try:
with contextlib.closing(_YumBase()) as yumbase:
for pkgtup, pkgid in yumbase.iter_yumdb(logger.warning):
nevra = "{0[0]}-{0[3]}-{0[4]}.{0[1]}".format(pkgtup)
dnfdata = self.base.yumdb.get_package(
pkgtup=pkgtup, pkgid=pkgid)
if next(iter(dnfdata), None) is not None:
logger.warning("%s found in DNFDB; skipping", nevra)
skipped += 1
continue
yumdata = yumbase.rpmdb.yumdb.get_package(
pkgtup=pkgtup, pkgid=pkgid)
for attribute, mandat in attribute2mandatory.items():
try:
value = getattr(yumdata, attribute)
except AttributeError:
lvl = logging.WARNING if mandat else logging.DEBUG
msg = _("%s of %s not found")
logger.log(lvl, msg, attribute, nevra)
continue
if isinstance(value, bytes):
value = value.decode("utf-8", "replace")
if '\ufffd' in value:
msg = _(
"replacing unknown characters in %s of %s")
logger.warning(msg, attribute, nevra)
try:
setattr(dnfdata, attribute, value)
except (OSError, IOError):
msg = _("DNFDB access denied")
raise dnf.exceptions.Error(msg)
logger.debug(_("%s of %s migrated"), attribute, nevra)
migrated += 1
finally:
logger.info(
_("%d YUMDB records found, %d migrated, %d skipped/preserved"),
migrated + skipped, migrated, skipped)
| jsilhan/dnf-plugins-core | plugins/migrate.py | Python | gpl-2.0 | 13,023 |
from concurrent import futures
from itertools import product
import numpy as np
import nifty
import nifty.graph.rag as nrag
def mask_corners(input_, halo):
ndim = input_.ndim
shape = input_.shape
corners = ndim * [[0, 1]]
corners = product(*corners)
for corner in corners:
corner_bb = tuple(slice(0, ha) if co == 0 else slice(sh - ha, sh)
for ha, co, sh in zip(halo, shape, corner))
input_[corner_bb] = 0
return input_
def dummy_agglomerator(affs, offsets, previous_segmentation=None,
previous_edge=None, previous_weights=None, return_state=False,
**parameters):
pass
def make_checkorboard(blocking):
"""
"""
blocks1 = [0]
blocks2 = []
all_blocks = [0]
def recurse(current_block, insert_list):
other_list = blocks1 if insert_list is blocks2 else blocks2
for dim in range(3):
ngb_id = blocking.getNeighborId(current_block, dim, False)
if ngb_id != -1:
if ngb_id not in all_blocks:
insert_list.append(ngb_id)
all_blocks.append(ngb_id)
recurse(ngb_id, other_list)
recurse(0, blocks2)
all_blocks = blocks1 + blocks2
expected = set(range(blocking.numberOfBlocks))
assert len(all_blocks) == len(expected), "%i, %i" % (len(all_blocks), len(expected))
assert len(set(all_blocks) - expected) == 0
assert len(blocks1) == len(blocks2), "%i, %i" % (len(blocks1), len(blocks2))
return blocks1, blocks2
# find segments in segmentation that originate from seeds
def get_assignments(segmentation, seeds):
seed_ids, seed_indices = np.unique(seeds, return_index=True)
# 0 stands for unseeded
seed_ids, seed_indices = seed_ids[1:], seed_indices[1:]
seg_ids = segmentation.ravel()[seed_indices]
assignments = np.concatenate([seed_ids[:, None], seg_ids[:, None]], axis=1)
return assignments
def two_pass_agglomeration(affinities, offsets, agglomerator,
block_shape, halo, n_threads):
""" Run two-pass agglommeration
"""
assert affinities.ndim == 4
assert affinities.shape[0] == len(offsets)
assert callable(agglomerator)
assert len(block_shape) == len(halo) == 3
shape = affinities.shape[1:]
blocking = nifty.tools.blocking([0, 0, 0], list(shape), list(block_shape))
block_size = np.prod(block_shape)
segmentation = np.zeros(shape, dtype='uint64')
# calculations for pass 1:
#
def pass1(block_id):
# TODO we could already add some halo here, that might help to make results more consistent
# load the affinities from the current block
block = blocking.getBlock(block_id)
bb = tuple(slice(beg, end) for beg, end in zip(block.begin, block.end))
aff_bb = (slice(None),) + bb
# mutex watershed changes the affs, so we need to copy here
affs = affinities[aff_bb].copy()
# get the segmentation and state from our agglomeration function
seg, state = agglomerator(affs, offsets, return_state=True)
# offset the segmentation with the lowest block coordinate to
# make segmentation ids unique
id_offset = block_id * block_size
seg += id_offset
uvs, weights = state
uvs += id_offset
# write out the segmentation
segmentation[bb] = seg
# compute the state of the segmentation and return it
return uvs, weights
# get blocks corresponding to the two checkerboard colorings
blocks1, blocks2 = make_checkorboard(blocking)
with futures.ThreadPoolExecutor(n_threads) as tp:
tasks = [tp.submit(pass1, block_id) for block_id in blocks1]
results = [t.result() for t in tasks]
# results = [pass1(block_id) for block_id in blocks1]
# combine results and build graph corresponding to it
uvs = np.concatenate([res[0] for res in results], axis=0)
n_labels = int(uvs.max()) + 1
graph = nifty.graph.undirectedGraph(n_labels)
graph.insertEdges(uvs)
weights = np.concatenate([res[1] for res in results], axis=0)
assert len(uvs) == len(weights)
# calculations for pass 2:
#
def pass2(block_id):
# load segmentation from pass1 from the current block with halo
block = blocking.getBlockWithHalo(block_id, list(halo))
bb = tuple(slice(beg, end) for beg, end in zip(block.outerBlock.begin, block.outerBlock.end))
seg = segmentation[bb]
# mask the corners, because these are not part of the seeds, and could already be written by path 2
seg = mask_corners(seg, halo)
# load affinties
aff_bb = (slice(None),) + bb
# mutex watershed changes the affs, so we need to copy here
affs = affinities[aff_bb].copy()
# get the state of the segmentation from pass 1
# TODO maybe there is a better option than doing this with the rag
rag = nrag.gridRag(seg, numberOfLabels=int(seg.max() + 1), numberOfThreads=1)
prev_uv_ids = rag.uvIds()
prev_uv_ids = prev_uv_ids[(prev_uv_ids != 0).all(axis=1)]
edge_ids = graph.findEdges(prev_uv_ids)
assert len(edge_ids) == len(prev_uv_ids), "%i, %i" % (len(edge_ids), len(prev_uv_ids))
assert (edge_ids != -1).all()
prev_weights = weights[edge_ids]
assert len(prev_uv_ids) == len(prev_weights)
# call the agglomerator with state
new_seg = agglomerator(affs, offsets, previous_segmentation=seg,
previous_edges=prev_uv_ids, previous_weights=prev_weights)
# offset the segmentation with the lowest block coordinate to
# make segmentation ids unique
id_offset = block_id * block_size
new_seg += id_offset
# find the assignments to seed ids
assignments = get_assignments(new_seg, seg)
# write out the segmentation
inner_bb = tuple(slice(beg, end) for beg, end in zip(block.innerBlock.begin, block.innerBlock.end))
local_bb = tuple(slice(beg, end) for beg, end in zip(block.innerBlockLocal.begin, block.innerBlockLocal.end))
segmentation[inner_bb] = new_seg[local_bb]
return assignments
with futures.ThreadPoolExecutor(n_threads) as tp:
tasks = [tp.submit(pass2, block_id) for block_id in blocks2]
results = [t.result() for t in tasks]
# results = [pass2(block_id) for block_id in blocks2]
assignments = np.concatenate(results)
# get consistent labeling with union find
n_labels = int(segmentation.max()) + 1
ufd = nifty.ufd.ufd(n_labels)
ufd.merge(assignments)
labeling = ufd.elementLabeling()
segmentation = nifty.tools.take(labeling, segmentation)
return segmentation
| DerThorsten/nifty | two_pass_agglomeration.py | Python | mit | 6,832 |
from queue import Queue
import os.path
from coalib.settings.Section import Section
from coalib.results.Result import Result
from coalib.results.RESULT_SEVERITY import RESULT_SEVERITY
from coalib.testing.LocalBearTestHelper import (
LocalBearTestHelper, verify_local_bear)
from bears.python.BanditBear import BanditBear
def get_testfile_path(name):
return os.path.join(os.path.dirname(__file__), 'bandit_test_files', name)
def load_testfile(name, splitlines=False):
with open(get_testfile_path(name)) as fl:
contents = fl.read()
if splitlines:
contents = contents.splitlines(True)
return contents
def gen_check(testfilename, expected_results):
def test_function(self):
bear = BanditBear(Section(''), Queue())
self.check_results(bear, load_testfile(testfilename, True),
expected_results, get_testfile_path(testfilename),
create_tempfile=False)
return test_function
class BanditBearTest(LocalBearTestHelper):
test_assert = gen_check(
'assert.py',
[Result.from_values('B101', 'Use of assert detected. The enclosed '
'code will be removed when compiling to optimised '
'byte code.', get_testfile_path('assert.py'), 1,
end_line=1, severity=RESULT_SEVERITY.INFO,
confidence=90)])
test_exec_py2_py = gen_check(
'exec-py2.py',
[Result.from_values('BanditBear', 'syntax error while parsing AST '
'from file', get_testfile_path('exec-py2.py'),
severity=RESULT_SEVERITY.MAJOR)])
test_jinja2_templating = gen_check(
'jinja2_templating.py',
[Result.from_values('B701', 'Using jinja2 templates with '
'autoescape=False is dangerous and can lead to '
'XSS. Ensure autoescape=True to mitigate XSS '
'vulnerabilities.',
get_testfile_path('jinja2_templating.py'), 9,
end_line=9, severity=RESULT_SEVERITY.MAJOR,
confidence=70),
Result.from_values('B701', 'Using jinja2 templates with '
'autoescape=False is dangerous and can lead to '
'XSS. Use autoescape=True to mitigate XSS '
'vulnerabilities.',
get_testfile_path('jinja2_templating.py'), 10,
end_line=10, severity=RESULT_SEVERITY.MAJOR,
confidence=90),
Result.from_values('B701', 'Using jinja2 templates with '
'autoescape=False is dangerous and can lead to '
'XSS. Use autoescape=True to mitigate XSS '
'vulnerabilities.',
get_testfile_path('jinja2_templating.py'), 11,
end_line=13, severity=RESULT_SEVERITY.MAJOR,
confidence=90),
Result.from_values('B701', 'By default, jinja2 sets autoescape to '
'False. Consider using autoescape=True to '
'mitigate XSS vulnerabilities.',
get_testfile_path('jinja2_templating.py'), 15,
end_line=16, severity=RESULT_SEVERITY.MAJOR,
confidence=90)])
# The following test will ignore some error codes, so "good" and "bad" doesn't
# reflect the actual code quality.
good_files = ('good_file.py', 'assert.py')
bad_files = ('exec-py2.py', 'httpoxy_cgihandler.py', 'jinja2_templating.py',
'skip.py')
skipped_error_codes = ['B105', 'B106', 'B107', 'B404', 'B606', 'B607', 'B101']
BanditBearSkipErrorCodesTest1 = verify_local_bear(
BanditBear,
valid_files=tuple(load_testfile(file) for file in good_files),
invalid_files=tuple(load_testfile(file) for file in bad_files),
settings={'bandit_skipped_tests': ','.join(skipped_error_codes)},
tempfile_kwargs={'suffix': '.py'})
good_files = ('good_file.py',)
bad_files = ('exec-py2.py', 'httpoxy_cgihandler.py', 'jinja2_templating.py',
'skip.py', 'assert.py')
BanditBearSkipErrorCodesTest2 = verify_local_bear(
BanditBear,
valid_files=tuple(load_testfile(file) for file in good_files),
invalid_files=tuple(load_testfile(file) for file in bad_files),
settings={'bandit_skipped_tests': ''},
tempfile_kwargs={'suffix': '.py'})
| refeed/coala-bears | tests/python/BanditBearTest.py | Python | agpl-3.0 | 4,632 |
#!/usr/bin/python
import sys, os, yaml, pickle, subprocess, time
try:
from PyQt4 import QtCore, QtGui
QtCore.Signal = QtCore.pyqtSignal
QtCore.Slot = QtCore.pyqtSlot
except ImportError:
try:
from PySide import QtCore, QtGui
QtCore.QString = str
except ImportError:
raise ImportError("Cannot load either PyQt or PySide")
##
# A helper function for printYaml (private)
# Prints a Yaml data dictionary to the screen
# @param data The dictionary to print
# @param level The indentation level to utilize
# @see printYaml
def _printYamlDict(data, level=0):
# Indent two spaces for each level
indent = ' '
# Iterate through the dictionary items
for key,value in data.items():
# Print the name as a header
if key == 'name':
print indent*level + str(value)
# The subblocks contain additional dictionaries; loop
# through each one and print at an increated indentation
elif key == 'subblocks':
if value != None:
for v in value:
_printYamlDict(v, level+1)
# The parameters contain additional dictionaries; loop
# through the parameters and place the output under a parameter section
elif key == 'parameters':
print indent*(level+1) + 'parameters:'
if value != None:
for v in value:
_printYamlDict(v, level+2)
# The default case, print the key value pairings
else:
print (indent*(level+1) + str(key) + " = " + str(value)).rstrip('\n')
##
# A function for printing the YAML information to the screen (public)
# @param data The YAML dump data (returned by GenSyntax::GetSynatx)
# @param name Limits the output based on the supplied string, if the
# supplied name is anywhere in the 'name' parameter of the
# top level YAML data the corresponding dictionary is printed (optional)
def printYaml(data, name = None):
# Print all output
if name == None:
for d in data:
_printYamlDict(d)
# Only print data that contains the given name string
else:
for d in data:
if name in d['name']:
_printYamlDict(d)
class GenSyntax():
def __init__(self, qt_app, app_path, use_cached_syntax):
self.qt_app = qt_app
self.app_path = app_path
self.use_cached_syntax = use_cached_syntax
self.saved_raw_data = None
self.saved_data = None
def GetSyntax(self, recache):
if not self.use_cached_syntax and not os.path.isfile(self.app_path):
print 'ERROR: Executable ' + self.app_path + ' not found!'
sys.exit(1)
self.executable = os.path.basename(self.app_path)
self.executable_path = os.path.dirname(self.app_path)
yaml_dump_file_name = self.executable_path + '/yaml_dump_' + self.executable
raw_yaml_dump_file_name = self.executable_path + '/yaml_dump_' + self.executable + '_raw'
raw_data = self.getRawDump()
if not self.saved_raw_data:
if os.path.isfile(raw_yaml_dump_file_name):
self.saved_raw_data = pickle.load(open(raw_yaml_dump_file_name, 'rb'))
else:
recache = True
if not recache:
if self.saved_raw_data != raw_data: # If the yaml has changed - force a recache
recache = True
elif self.saved_data: #If we have currently loaded data - just return it!
return self.saved_data
if recache or not os.path.exists(yaml_dump_file_name) or not os.path.exists(raw_yaml_dump_file_name):
progress = QtGui.QProgressDialog("Recaching Syntax...", "Abort", 0, 10, None)
progress.setWindowModality(QtCore.Qt.WindowModal)
progress.show()
progress.raise_()
for i in xrange(0,7):
progress.setValue(i)
self.qt_app.processEvents()
self.qt_app.flush()
pickle.dump(raw_data, open(raw_yaml_dump_file_name, 'wb'))
self.saved_raw_data = raw_data
data = yaml.load(raw_data)
pickle.dump(data, open(yaml_dump_file_name, 'wb'))
progress.setValue(8)
progress.setValue(9)
progress.setValue(10)
else:
data = pickle.load(open(yaml_dump_file_name, 'rb'))
self.saved_data = data
return data
def getRawDump(self):
if not self.use_cached_syntax:
try:
data = subprocess.Popen([self.app_path, '--yaml'], stdout=subprocess.PIPE).communicate()[0]
except:
print '\n\nPeacock: Error executing ' + self.app_path + '\nPlease make sure your application is built and able to execute with the "--yaml" flag'
sys.exit(1)
data = data.split('**START YAML DATA**\n')[1]
data = data.split('**END YAML DATA**')[0]
else:
data = pickle.load(open(self.executable_path + '/yaml_dump_' + self.executable + '_raw', 'rb'))
return data
def massage_data(self, data):
for block in data:
name = block['name']
if name == 'Executioner' or name == 'InitialCondition':
curr_type = str(block['type'])
if curr_type == 'None':
curr_type = 'ALL'
block['name'] = name + '/' + curr_type
return data
| Chuban/moose | gui/utils/GenSyntax.py | Python | lgpl-2.1 | 5,514 |
# TODO:
# - handle UTF-8 inputs correctly
from pylab import *
from collections import Counter,defaultdict
import glob,re,heapq,os
import codecs
def method(cls):
"""Adds the function as a method to the given class."""
import new
def _wrap(f):
cls.__dict__[f.func_name] = new.instancemethod(f,None,cls)
return None
return _wrap
replacements = [
(r'[\0-\x1f]',''), # get rid of weird control characters
(r'\s+',' '), # replace multiple spaces
(r'[~]',""), # replace rejects with nothing
# single quotation marks
(r"`","'"), # grave accent
(u"\u00b4","'"), # acute accent
(u"\u2018","'"), # left single quotation mark
(u"\u2019","'"), # right single quotation mark
(u"\u017f","s"), # Fraktur "s" glyph
(u"\u021a",","), # single low quotation mark
# double quotation marks
(r'"',"''"), # typewriter double quote
(r'``',"''"), # replace fancy double quotes
(r"``","''"), # grave accents used as quotes
(r'"',"''"), # replace fancy double quotes
(u"\u201c","''"), # left double quotation mark
(u"\u201d","''"), # right double quotation mark
(u"\u201e",",,"), # lower double quotation mark
(u"\u201f","''"), # reversed double quotation mark
]
replacements2 = replacements + [
(r'[0-9]','9'), # don't try to model individual digit frequencies
(r'[^-=A-Za-z0-9.,?:()"/\' ]','!'), # model other special characters just as '!'
]
def rsample(dist):
v = add.accumulate(dist)
assert abs(v[-1]-1)<1e-3
val = rand()
return searchsorted(v,val)
def safe_readlines(stream,nonl=0):
once = 0
for lineno in xrange(100000000):
try:
line = stream.readline()
except UnicodeDecodeError as e:
if not once: print lineno,":",e
once = 1
return
if line is None: return
if nonl and line[-1]=="\n": line = line[:-1]
yield line
class NGraphsCounts:
def __init__(self,N=3,replacements=replacements):
self.N = N
self.replacements = replacements
self.missing = {"~":15.0}
def lineproc(self,s):
"""Preprocessing for the line (and also lattice output strings).
This is used to normalize quotes, remove illegal characters,
and collapse some character classes (e.g., digits) into a single
representative."""
for regex,subst in self.replacements:
s = re.sub(regex,subst,s,flags=re.U)
return s
def computeNGraphs(self,fnames,n):
"""Given a set of text file names, compute a counter
of n-graphs in those files, after performing the regular
expression edits in `self.replacement`."""
counter = Counter()
lineskip = 0
linelimit = 2000
for fnum,fname in enumerate(fnames):
print fnum,"of",len(fnames),":",fname
if fname.startswith("lineskip="):
lineskip = int(fname.split("=")[1])
print "changing lineskip to",lineskip
continue
if fname.startswith("linelimit="):
linelimit = int(fname.split("=")[1])
print "changing linelimit to",linelimit
continue
with codecs.open(fname,"r","utf-8") as stream:
for lineno,line in enumerate(safe_readlines(stream)):
assert type(line)==unicode
if lineno<lineskip: continue
if lineno>=linelimit+lineskip: break
line = line[:-1]
if len(line)<3: continue
line = self.lineproc(line)
line = "_"*(n-1)+line+"_"*(n-1)
for i in range(len(line)-n):
sub = line[i:i+n]
counter[sub] += 1
return counter
class NGraphs(NGraphsCounts):
"""A class representing n-graph models, that is
$P(c_i | c_{i-1} ... c_{i_n})$, where the $c_i$ are
characters."""
def __init__(self,*args,**kw):
NGraphsCounts.__init__(self,*args,**kw)
def buildFromFiles(self,fnames,n):
"""Given a set of files, build the log posteriors."""
print "reading",len(fnames),"files"
counter = self.computeNGraphs(fnames,n)
print "got",sum(counter.values()),"%d-graphs"%(n,)
self.computePosteriors(counter)
print "done building lposteriors"
def computePosteriors(self,counter):
"""Given a `counter` of all n-graphs, compute
(log) conditional probabilities."""
self.N = len(counter.items()[0][0])
ngrams = defaultdict(list)
for k,v in counter.items():
ngrams[k[:-1]].append((k,v))
lposteriors = {}
for prefix in ngrams.keys():
ps = [(k[-1],v) for k,v in ngrams[prefix]] + [("~",1)]
total = sum([v for k,v in ps])
total = log(total)
ps = {k : total-log(v) for k,v in ps}
lposteriors[prefix] = ps
self.lposteriors = lposteriors
def sample(self,n=80,prefix=None):
"""Sample from the n-graph model. This gives a fairly
good impression of how well the n-graph model models
text."""
if prefix is None:
prefix = "_"*self.N
for i in range(n):
lposteriors = self.lposteriors.get(prefix[-self.N+1:])
if lposteriors is None:
prefix += chr(ord("a")+int(rand()*26))
else:
items = [(k,p) for k,p in lposteriors.items() if k not in ["~","_"]]
items += [(" ",10.0)]
ks = [k for k,p in items]
ps = array([p for k,p in items],'f')
ps = exp(-ps)
ps /= sum(ps)
j = rsample(ps)
prefix += ks[j]
return prefix[self.N:]
def getLogPosteriors(self,s):
"""Return a dictionary mapping characters in the given context
to negative log posterior probabilities."""
prefix = self.lineproc(s)[-self.N+1:]
return self.lposteriors.get(prefix,self.missing)
def getBestGuesses(self,s,nother=5):
"""Get guesses for what the next character might be based on the current path."""
lposteriors = self.getLogPosteriors(s)
best = sorted(lposteriors.items(),key=lambda x:x[1])[:nother]
best = [(cls,p) for cls,p in best if cls!="~"]
return best
class ComboDict:
def __init__(self,dicts):
self.dicts = dicts
def get(self,key,dflt=None):
for d in self.dicts:
result = d.get(key)
if result is not None: return result
return dflt
class NGraphsBackoff:
def __init__(self,primary,secondary):
self.primary = primary
self.secondary = secondary
self.N = max(primary.N,secondary.N)
self.missing = {"~":15.0}
def lineproc(self,s):
return self.primary.lineproc(s)
def getLogPosteriors(self,s):
self.primary.missing = self.missing
self.secondary.missing = self.missing
return ComboDict([self.primary.getLogPosteriors(s),
self.secondary.getLogPosteriors(s)])
def getBestGuesses(self,s,nother=5):
return self.primary.getBestGuesses(s,nother=nother)
| brobertson/ocropus-bgr | ocropy/ocrolib/ngraphs.py | Python | apache-2.0 | 7,345 |
"""
Implementation of the RESTful endpoints for the Course About API.
"""
from rest_framework.throttling import UserRateThrottle
from rest_framework.views import APIView
from course_about import api
from rest_framework import status
from rest_framework.response import Response
from course_about.errors import CourseNotFoundError, CourseAboutError
class CourseAboutThrottle(UserRateThrottle):
"""Limit the number of requests users can make to the Course About API."""
# TODO Limit based on expected throughput # pylint: disable=fixme
rate = '50/second'
class CourseAboutView(APIView):
""" RESTful Course About API view.
Used to retrieve JSON serialized Course About information.
"""
authentication_classes = []
permission_classes = []
throttle_classes = CourseAboutThrottle,
def get(self, request, course_id=None): # pylint: disable=unused-argument
"""Read course information.
HTTP Endpoint for course info api.
Args:
Course Id = URI element specifying the course location. Course information will be
returned for this particular course.
Return:
A JSON serialized representation of the course information
"""
try:
return Response(api.get_course_about_details(course_id))
except CourseNotFoundError:
return Response(
status=status.HTTP_404_NOT_FOUND,
data={
"message": (
u"An error occurred while retrieving course information"
u" for course '{course_id}' no course found"
).format(course_id=course_id)
}
)
except CourseAboutError:
return Response(
status=status.HTTP_500_INTERNAL_SERVER_ERROR,
data={
"message": (
u"An error occurred while retrieving course information"
u" for course '{course_id}'"
).format(course_id=course_id)
}
)
| olexiim/edx-platform | common/djangoapps/course_about/views.py | Python | agpl-3.0 | 2,124 |
import copy
import json
import github
from groundstation.gref import Tip
from groundstation.protocols.github import _identifier_, AbstractGithubAdaptor
from groundstation.objects.root_object import RootObject
from groundstation.objects.update_object import UpdateObject
from groundstation import logger
log = logger.getLogger(__name__)
class GithubWriteAdaptor(AbstractGithubAdaptor):
"""GithubAdaptor(station, gh)
Accepts a station and a github repo object (from PyGithub)
"""
protocol = _identifier_
@property
def repo_name(self):
return self.repo.full_name.replace("/", "_")
def write_issue(self, issue):
# Stupid implementation, blindly write with no deduping or merge
# resolution.
parents = []
issue_id = self._issue_id(issue.number)
gref = self.issue_gref(issue.number)
def _write_new_tip(obj):
our_parents = []
while parents:
our_parents.append(parents.pop())
log.debug("Creating new object with parents: %s" % (str(our_parents)))
oid = self.station.write(obj.as_object())
self.station.update_gref(gref, [Tip(oid, "")], our_parents)
parents.append(oid)
log.debug("Setting parents to: %s" % (str(parents)))
def _parents():
return copy.copy(parents)
# Bail out if we've already written:
if gref.exists():
log.info("Not creating any objects, a gref already exists at: %s" % str(gref))
return False
# Write out a root object
log.info(("Creating a new root_object with:\n" +
"id: %s\n" +
"channel: %s\n" +
"protocol: %s") % (issue_id, self.channel, self.protocol))
root_object = RootObject(issue_id, self.channel, self.protocol)
_write_new_tip(root_object)
# Write out the initial state
# Creating lots of tiny objects should make deduping easier later
title_payload = {
"type": "title",
"id": None,
"body": issue.title,
"user": issue.user.login
}
update_object = UpdateObject(_parents(), json.dumps(title_payload))
_write_new_tip(update_object)
# Write out the body of the issue
body_payload = {
"type": "body",
"id": None,
"body": issue.body
}
update_object = UpdateObject(_parents(), json.dumps(body_payload))
_write_new_tip(update_object)
# Write out all of the comments and events
everything = []
everything.extend(issue.get_comments())
everything.extend(issue.get_events())
everything.sort(key=lambda x: x.created_at)
for item in everything:
if isinstance(item, github.IssueComment.IssueComment):
payload = {
"type": "comment",
"id": item.id,
"body": item.body,
"user": item.user.login
}
elif isinstance(item, github.IssueEvent.IssueEvent):
payload = {
"type": "event",
"id": item.id,
"state": item.event,
"user": item.actor.login
}
else:
raise Exception("Unhandled item %s" % (repr(item)))
update_object = UpdateObject(_parents(), json.dumps(payload))
_write_new_tip(update_object)
| richo/groundstation | groundstation/protocols/github/write_adaptor.py | Python | mit | 3,653 |
import botologist.plugin
class QlredditPlugin(botologist.plugin.Plugin):
"""#qlreddit plugin."""
@botologist.plugin.reply()
def opa_opa(self, msg):
if 'opa opa' in msg.message.lower():
return 'https://www.youtube.com/watch?v=Dqzrofdwi-g'
@botologist.plugin.reply()
def locomotion(self, msg):
if 'locomotion' in msg.message.lower():
return 'https://www.youtube.com/watch?v=dgjc-6L0Wm4#t=5'
| x89/botologist | plugins/qlreddit.py | Python | mit | 408 |
from structure import *
from ark import *
| facepalm/kivy-colony-game | structures/__init__.py | Python | gpl-3.0 | 43 |
import sys
class ORFFinder:
"""Find the longest ORF in a given sequence
"seq" is a string, if "start" is not provided any codon can be the start of
and ORF. If muliple ORFs have the longest length the first one encountered
is printed
"""
def __init__(self, seq):
self.seq = seq.upper()
self.result = ("+",0,0,0,0)
self.winner = 0
def _reverse_comp(self):
swap = {"A":"T", "T":"A", "C":"G", "G":"C","N":"N","X":"X"}
return "".join(swap[b] for b in self.seq)[::-1]
def codons(self, frame):
""" A generator that yields DNA in one codon blocks
"frame" counts for 0. This function yelids a tuple (triplet, index) with
index relative to the original DNA sequence
"""
start = frame
while start + 3 <= len(self.seq):
yield (self.seq[start:start+3], start)
start += 3
def run_one(self, frame_number, direction,start_coden, stop_coden):
""" Search in one reading frame """
codon_gen = self.codons(frame_number)
start_codens = start_coden
stop_codens = stop_coden
while True:
try:
c , index = codon_gen.next()
except StopIteration:
break
# Lots of conditions here: checks if we care about looking for start
# codon then that codon is not a stop
if c in start_codens or not start_codens and c not in stop_codens:
orf_start = index # we'll return the result as 0-indexed
end = False
while True:
try:
c, index = codon_gen.next()
except StopIteration:
end = True
if c in stop_codens:
end = True
if end:
orf_end = index + 3 # because index is realitve to start of codon
L = (orf_end - orf_start)
if L > self.winner:
self.winner = L
self.result = (direction, frame_number+1, orf_start, orf_end, L)
break
def longest_orf(self,direction,start_coden=['ATG'], stop_coden=['TAG','TAA','TGA']):
if direction == "+":
for frame in range(3):
self.run_one(frame, direction,start_coden, stop_coden)
return (self.result[4], self.result[1],self.seq[self.result[2]:self.result[3]]) #CDS length, coding frame, CDS sequence
if direction == "-":
self.seq = self._reverse_comp()
for frame in range(3):
self.run_one(frame, direction,start_coden, stop_coden)
return (self.result[4], self.result[1],self.seq[self.result[2]:self.result[3]]) #CDS length, coding frame, CDS sequence
#===================
def little_test():
seq=''
for line in open(sys.argv[1],'r'):
line=line.rstrip('\n\r')
if line.startswith('>'):
continue
seq += line
(l,f,s) = ORFFinder(seq).longest_orf(sys.argv[2])
print str(l) + '\t' + str(f) + '\t' + s
if __name__ == "__main__":
little_test() | parlar/calls2xls | external/CrossMap/usr/lib64/python2.7/site-packages/cmmodule/orf.py | Python | mit | 2,870 |
# coding: utf-8
# Copyright (C) 2018-Today: GRAP (http://www.grap.coop)
# @author: Sylvain LE GAL (https://twitter.com/legalsylvain)
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
{
'name': 'CAE - Project Module',
'version': '8.0.1.0.0',
'category': 'CAE',
'summary': 'Manage Cooperatives of Activities and Employment',
'author': 'GRAP',
'website': 'http://www.grap.coop',
'license': 'AGPL-3',
'depends': [
'base_fiscal_company',
'project',
],
'data': [
'views/view_project_category.xml',
'views/view_project_project.xml',
'views/view_project_task.xml',
'views/view_project_task_type.xml',
'security/ir_rule.xml',
],
'installable': True,
'auto_install': True,
}
| grap/odoo-addons-cis | project_fiscal_company/__openerp__.py | Python | gpl-3.0 | 793 |
#!/usr/bin/env python
# encoding: utf-8
from collections import OrderedDict
import sys, os
import waflib
from waflib import Utils
from waflib.Configure import conf
_board_classes = {}
_board = None
class BoardMeta(type):
def __init__(cls, name, bases, dct):
super(BoardMeta, cls).__init__(name, bases, dct)
if 'abstract' not in cls.__dict__:
cls.abstract = False
if cls.abstract:
return
if not hasattr(cls, 'toolchain'):
cls.toolchain = 'native'
board_name = getattr(cls, 'name', name)
if board_name in _board_classes:
raise Exception('board named %s already exists' % board_name)
_board_classes[board_name] = cls
class Board:
abstract = True
def __init__(self):
self.with_uavcan = False
def configure(self, cfg):
cfg.env.TOOLCHAIN = cfg.options.toolchain or self.toolchain
cfg.env.ROMFS_FILES = []
cfg.load('toolchain')
cfg.load('cxx_checks')
env = waflib.ConfigSet.ConfigSet()
self.configure_env(cfg, env)
# Setup scripting, had to defer this to allow checking board size
if ((not cfg.options.disable_scripting) and
(not cfg.env.DISABLE_SCRIPTING) and
((cfg.env.BOARD_FLASH_SIZE is None) or
(cfg.env.BOARD_FLASH_SIZE == []) or
(cfg.env.BOARD_FLASH_SIZE > 1024))):
env.DEFINES.update(
ENABLE_SCRIPTING = 1,
ENABLE_HEAP = 1,
LUA_32BITS = 1,
)
env.ROMFS_FILES += [
('sandbox.lua', 'libraries/AP_Scripting/scripts/sandbox.lua'),
]
env.AP_LIBRARIES += [
'AP_Scripting',
'AP_Scripting/lua/src',
]
env.CXXFLAGS += [
'-DHAL_HAVE_AP_ROMFS_EMBEDDED_H'
]
else:
cfg.options.disable_scripting = True;
d = env.get_merged_dict()
# Always prepend so that arguments passed in the command line get
# the priority.
for k, val in d.items():
# Dictionaries (like 'DEFINES') are converted to lists to
# conform to waf conventions.
if isinstance(val, dict):
keys = list(val.keys())
if not isinstance(val, OrderedDict):
keys.sort()
val = ['%s=%s' % (vk, val[vk]) for vk in keys]
if k in cfg.env and isinstance(cfg.env[k], list):
cfg.env.prepend_value(k, val)
else:
cfg.env[k] = val
cfg.ap_common_checks()
cfg.env.prepend_value('INCLUDES', [
cfg.srcnode.find_dir('libraries/AP_Common/missing').abspath()
])
def configure_env(self, cfg, env):
# Use a dictionary instead of the convetional list for definitions to
# make easy to override them. Convert back to list before consumption.
env.DEFINES = {}
env.CFLAGS += [
'-ffunction-sections',
'-fdata-sections',
'-fsigned-char',
'-Wall',
'-Wextra',
'-Wformat',
'-Wpointer-arith',
'-Wcast-align',
'-Wundef',
'-Wno-missing-field-initializers',
'-Wno-unused-parameter',
'-Wno-redundant-decls',
'-Wno-unknown-pragmas',
'-Wno-trigraphs',
'-Werror=shadow',
'-Werror=return-type',
'-Werror=unused-result',
'-Werror=unused-variable',
'-Werror=narrowing',
'-Werror=attributes',
'-Werror=overflow',
'-Werror=parentheses',
'-Werror=format-extra-args',
'-Werror=delete-non-virtual-dtor',
'-Werror=ignored-qualifiers',
]
if cfg.options.scripting_checks:
env.DEFINES.update(
AP_SCRIPTING_CHECKS = 1,
)
if 'clang' in cfg.env.COMPILER_CC:
env.CFLAGS += [
'-fcolor-diagnostics',
'-Wno-gnu-designator',
'-Wno-inconsistent-missing-override',
'-Wno-mismatched-tags',
'-Wno-gnu-variable-sized-type-not-at-end',
]
if cfg.env.DEBUG:
env.CFLAGS += [
'-g',
'-O0',
]
if cfg.options.bootloader:
# don't let bootloaders try and pull scripting in
cfg.options.disable_scripting = True
if cfg.options.enable_math_check_indexes:
env.CXXFLAGS += ['-DMATH_CHECK_INDEXES']
env.CXXFLAGS += [
'-std=gnu++11',
'-fdata-sections',
'-ffunction-sections',
'-fno-exceptions',
'-fsigned-char',
'-Wall',
'-Wextra',
'-Wformat',
'-Wpointer-arith',
'-Wcast-align',
'-Wundef',
'-Wno-unused-parameter',
'-Wno-missing-field-initializers',
'-Wno-reorder',
'-Wno-redundant-decls',
'-Wno-unknown-pragmas',
'-Wno-expansion-to-defined',
'-Werror=attributes',
'-Werror=format-security',
'-Werror=format-extra-args',
'-Werror=enum-compare',
'-Werror=array-bounds',
'-Werror=uninitialized',
'-Werror=init-self',
'-Werror=narrowing',
'-Werror=return-type',
'-Werror=switch',
'-Werror=sign-compare',
'-Werror=type-limits',
'-Werror=unused-result',
'-Werror=shadow',
'-Werror=unused-variable',
'-Wfatal-errors',
'-Wno-trigraphs',
'-Werror=parentheses',
]
if 'clang++' in cfg.env.COMPILER_CXX:
env.CXXFLAGS += [
'-fcolor-diagnostics',
'-Werror=address-of-packed-member',
'-Werror=inconsistent-missing-override',
'-Werror=overloaded-virtual',
# catch conversion issues:
'-Werror=bitfield-enum-conversion',
'-Werror=bool-conversion',
'-Werror=constant-conversion',
'-Werror=enum-conversion',
'-Werror=int-conversion',
'-Werror=literal-conversion',
'-Werror=non-literal-null-conversion',
'-Werror=null-conversion',
'-Werror=objc-literal-conversion',
# '-Werror=shorten-64-to-32', # ARRAY_SIZE() creates this all over the place as the caller typically takes a uint32_t not a size_t
'-Werror=string-conversion',
# '-Werror=sign-conversion', # can't use as we assign into AP_Int8 from uint8_ts
'-Wno-gnu-designator',
'-Wno-mismatched-tags',
'-Wno-gnu-variable-sized-type-not-at-end',
]
else:
env.CXXFLAGS += [
'-Werror=unused-but-set-variable'
]
(major, minor, patchlevel) = cfg.env.CC_VERSION
if int(major) >= 5 and int(minor) > 1 and not self.with_uavcan:
env.CXXFLAGS += [
'-Werror=suggest-override',
]
if cfg.env.DEBUG:
env.CXXFLAGS += [
'-g',
'-O0',
]
if cfg.env.DEST_OS == 'darwin':
env.LINKFLAGS += [
'-Wl,-dead_strip',
]
else:
env.LINKFLAGS += [
'-Wl,--gc-sections',
]
if self.with_uavcan:
env.AP_LIBRARIES += [
'AP_UAVCAN',
'modules/uavcan/libuavcan/src/**/*.cpp'
]
env.CXXFLAGS += [
'-Wno-error=cast-align',
]
env.DEFINES.update(
UAVCAN_CPP_VERSION = 'UAVCAN_CPP03',
UAVCAN_NO_ASSERTIONS = 1,
UAVCAN_NULLPTR = 'nullptr'
)
env.INCLUDES += [
cfg.srcnode.find_dir('modules/uavcan/libuavcan/include').abspath()
]
if cfg.env.build_dates:
env.build_dates = True
# We always want to use PRI format macros
cfg.define('__STDC_FORMAT_MACROS', 1)
def pre_build(self, bld):
'''pre-build hook that gets called before dynamic sources'''
if bld.env.ROMFS_FILES:
self.embed_ROMFS_files(bld)
def build(self, bld):
bld.ap_version_append_str('GIT_VERSION', bld.git_head_hash(short=True))
import time
ltime = time.localtime()
if bld.env.build_dates:
bld.ap_version_append_int('BUILD_DATE_YEAR', ltime.tm_year)
bld.ap_version_append_int('BUILD_DATE_MONTH', ltime.tm_mon)
bld.ap_version_append_int('BUILD_DATE_DAY', ltime.tm_mday)
def embed_ROMFS_files(self, ctx):
'''embed some files using AP_ROMFS'''
import embed
header = ctx.bldnode.make_node('ap_romfs_embedded.h').abspath()
if not embed.create_embedded_h(header, ctx.env.ROMFS_FILES):
ctx.fatal("Failed to created ap_romfs_embedded.h")
Board = BoardMeta('Board', Board.__bases__, dict(Board.__dict__))
def add_dynamic_boards():
'''add boards based on existance of hwdef.dat in subdirectories for ChibiOS'''
dirname, dirlist, filenames = next(os.walk('libraries/AP_HAL_ChibiOS/hwdef'))
for d in dirlist:
if d in _board_classes.keys():
continue
hwdef = os.path.join(dirname, d, 'hwdef.dat')
if os.path.exists(hwdef):
newclass = type(d, (chibios,), {'name': d})
def get_boards_names():
add_dynamic_boards()
return sorted(list(_board_classes.keys()), key=str.lower)
def get_removed_boards():
'''list of boards which have been removed'''
return sorted(['px4-v1', 'px4-v2', 'px4-v3', 'px4-v4', 'px4-v4pro'])
@conf
def get_board(ctx):
global _board
if not _board:
if not ctx.env.BOARD:
ctx.fatal('BOARD environment variable must be set before first call to get_board()')
if ctx.env.BOARD in get_removed_boards():
ctx.fatal('''
The board target %s has been removed from ArduPilot with the removal of NuttX support and HAL_PX4.
Please use a replacement build as follows:
px4-v2 Use Pixhawk1 build
px4-v3 Use Pixhawk1 or CubeBlack builds
px4-v4 Use Pixracer build
px4-v4pro Use DrotekP3Pro build
''' % ctx.env.BOARD)
boards = _board_classes.keys()
if not ctx.env.BOARD in boards:
ctx.fatal("Invalid board '%s': choices are %s" % (ctx.env.BOARD, ', '.join(sorted(boards, key=str.lower))))
_board = _board_classes[ctx.env.BOARD]()
return _board
# NOTE: Keeping all the board definitions together so we can easily
# identify opportunities to simplify common flags. In the future might
# be worthy to keep board definitions in files of their own.
class sitl(Board):
def configure_env(self, cfg, env):
super(sitl, self).configure_env(cfg, env)
env.DEFINES.update(
CONFIG_HAL_BOARD = 'HAL_BOARD_SITL',
CONFIG_HAL_BOARD_SUBTYPE = 'HAL_BOARD_SUBTYPE_NONE',
AP_SCRIPTING_CHECKS = 1, # SITL should always do runtime scripting checks
)
env.CXXFLAGS += [
'-Werror=float-equal'
]
if not cfg.env.DEBUG:
env.CXXFLAGS += [
'-O3',
]
env.LIB += [
'm',
]
cfg.check_librt(env)
cfg.check_feenableexcept()
env.LINKFLAGS += ['-pthread',]
env.AP_LIBRARIES += [
'AP_HAL_SITL',
'SITL',
]
if cfg.options.enable_sfml:
if not cfg.check_SFML(env):
cfg.fatal("Failed to find SFML libraries")
if cfg.options.sitl_osd:
env.CXXFLAGS += ['-DWITH_SITL_OSD','-DOSD_ENABLED=ENABLED','-DHAL_HAVE_AP_ROMFS_EMBEDDED_H']
import fnmatch
for f in os.listdir('libraries/AP_OSD/fonts'):
if fnmatch.fnmatch(f, "font*bin"):
env.ROMFS_FILES += [(f,'libraries/AP_OSD/fonts/'+f)]
if cfg.options.sitl_rgbled:
env.CXXFLAGS += ['-DWITH_SITL_RGBLED']
if cfg.options.enable_sfml_audio:
if not cfg.check_SFML_Audio(env):
cfg.fatal("Failed to find SFML Audio libraries")
env.CXXFLAGS += ['-DWITH_SITL_TONEALARM']
if cfg.options.sitl_flash_storage:
env.CXXFLAGS += ['-DSTORAGE_USE_FLASH=1']
if cfg.env.DEST_OS == 'cygwin':
env.LIB += [
'winmm',
]
if Utils.unversioned_sys_platform() == 'cygwin':
env.CXXFLAGS += ['-DCYGWIN_BUILD']
if 'clang++' in cfg.env.COMPILER_CXX:
print("Disabling SLP for clang++")
env.CXXFLAGS += [
'-fno-slp-vectorize' # compiler bug when trying to use SLP
]
class chibios(Board):
abstract = True
toolchain = 'arm-none-eabi'
def configure_env(self, cfg, env):
super(chibios, self).configure_env(cfg, env)
cfg.load('chibios')
env.BOARD = self.name
env.DEFINES.update(
CONFIG_HAL_BOARD = 'HAL_BOARD_CHIBIOS',
HAVE_OCLOEXEC = 0,
HAVE_STD_NULLPTR_T = 0,
)
env.AP_LIBRARIES += [
'AP_HAL_ChibiOS',
]
# make board name available for USB IDs
env.CHIBIOS_BOARD_NAME = 'HAL_BOARD_NAME="%s"' % self.name
env.CFLAGS += cfg.env.CPU_FLAGS + [
'-Wno-cast-align',
'-Wlogical-op',
'-Wframe-larger-than=1300',
'-fsingle-precision-constant',
'-Wno-attributes',
'-Wno-error=double-promotion',
'-Wno-error=missing-declarations',
'-Wno-error=float-equal',
'-Wno-error=undef',
'-Wno-error=cpp',
'-fno-exceptions',
'-Wall',
'-Wextra',
'-Wno-sign-compare',
'-Wfloat-equal',
'-Wpointer-arith',
'-Wmissing-declarations',
'-Wno-unused-parameter',
'-Werror=array-bounds',
'-Wfatal-errors',
'-Werror=uninitialized',
'-Werror=init-self',
'-Wframe-larger-than=1024',
'-Werror=unused-but-set-variable',
'-Wno-missing-field-initializers',
'-Wno-trigraphs',
'-fno-strict-aliasing',
'-fomit-frame-pointer',
'-falign-functions=16',
'-ffunction-sections',
'-fdata-sections',
'-fno-strength-reduce',
'-fno-builtin-printf',
'-fno-builtin-fprintf',
'-fno-builtin-vprintf',
'-fno-builtin-vfprintf',
'-fno-builtin-puts',
'-mno-thumb-interwork',
'-mthumb',
'--specs=nano.specs',
'-specs=nosys.specs',
'-DCHIBIOS_BOARD_NAME="%s"' % self.name,
]
env.CXXFLAGS += env.CFLAGS + [
'-fno-rtti',
'-fno-threadsafe-statics',
]
env.CFLAGS += [
'-std=c11'
]
if Utils.unversioned_sys_platform() == 'cygwin':
env.CXXFLAGS += ['-DCYGWIN_BUILD']
bldnode = cfg.bldnode.make_node(self.name)
env.BUILDROOT = bldnode.make_node('').abspath()
env.LINKFLAGS = cfg.env.CPU_FLAGS + [
'-fomit-frame-pointer',
'-falign-functions=16',
'-ffunction-sections',
'-fdata-sections',
'-u_port_lock',
'-u_port_unlock',
'-u_exit',
'-u_kill',
'-u_getpid',
'-u_errno',
'-uchThdExit',
'-fno-common',
'-nostartfiles',
'-mno-thumb-interwork',
'-mthumb',
'-specs=nano.specs',
'-specs=nosys.specs',
'-L%s' % env.BUILDROOT,
'-L%s' % cfg.srcnode.make_node('modules/ChibiOS/os/common/startup/ARMCMx/compilers/GCC/ld/').abspath(),
'-L%s' % cfg.srcnode.make_node('libraries/AP_HAL_ChibiOS/hwdef/common/').abspath(),
'-Wl,--gc-sections,--no-warn-mismatch,--library-path=/ld,--script=ldscript.ld,--defsym=__process_stack_size__=%s,--defsym=__main_stack_size__=%s' % (cfg.env.PROCESS_STACK, cfg.env.MAIN_STACK)
]
if cfg.env.DEBUG:
env.CFLAGS += [
'-gdwarf-4',
'-g3',
]
env.LINKFLAGS += [
'-gdwarf-4',
'-g3',
]
if cfg.env.ENABLE_ASSERTS:
cfg.msg("Enabling ChibiOS asserts", "yes")
env.CFLAGS += [ '-DHAL_CHIBIOS_ENABLE_ASSERTS' ]
env.CXXFLAGS += [ '-DHAL_CHIBIOS_ENABLE_ASSERTS' ]
else:
cfg.msg("Enabling ChibiOS asserts", "no")
env.LIB += ['gcc', 'm']
env.GIT_SUBMODULES += [
'ChibiOS',
]
try:
import intelhex
env.HAVE_INTEL_HEX = True
cfg.msg("Checking for intelhex module:", 'OK')
except Exception:
cfg.msg("Checking for intelhex module:", 'disabled', color='YELLOW')
env.HAVE_INTEL_HEX = False
def build(self, bld):
super(chibios, self).build(bld)
bld.ap_version_append_str('CHIBIOS_GIT_VERSION', bld.git_submodule_head_hash('ChibiOS', short=True))
bld.load('chibios')
def pre_build(self, bld):
'''pre-build hook that gets called before dynamic sources'''
super(chibios, self).pre_build(bld)
from waflib.Context import load_tool
module = load_tool('chibios', [], with_sys_path=True)
fun = getattr(module, 'pre_build', None)
if fun:
fun(bld)
class linux(Board):
def configure_env(self, cfg, env):
super(linux, self).configure_env(cfg, env)
env.DEFINES.update(
CONFIG_HAL_BOARD = 'HAL_BOARD_LINUX',
CONFIG_HAL_BOARD_SUBTYPE = 'HAL_BOARD_SUBTYPE_LINUX_NONE',
)
if not cfg.env.DEBUG:
env.CXXFLAGS += [
'-O3',
]
env.LIB += [
'm',
]
cfg.check_librt(env)
cfg.check_lttng(env)
cfg.check_libdl(env)
cfg.check_libiio(env)
env.LINKFLAGS += ['-pthread',]
env.AP_LIBRARIES += [
'AP_HAL_Linux',
]
if self.with_uavcan:
cfg.define('UAVCAN_EXCEPTIONS', 0)
if cfg.options.apstatedir:
cfg.define('AP_STATEDIR', cfg.options.apstatedir)
def build(self, bld):
super(linux, self).build(bld)
if bld.options.upload:
waflib.Options.commands.append('rsync')
# Avoid infinite recursion
bld.options.upload = False
class erleboard(linux):
toolchain = 'arm-linux-gnueabihf'
def configure_env(self, cfg, env):
super(erleboard, self).configure_env(cfg, env)
env.DEFINES.update(
CONFIG_HAL_BOARD_SUBTYPE = 'HAL_BOARD_SUBTYPE_LINUX_ERLEBOARD',
)
class navio(linux):
toolchain = 'arm-linux-gnueabihf'
def configure_env(self, cfg, env):
super(navio, self).configure_env(cfg, env)
env.DEFINES.update(
CONFIG_HAL_BOARD_SUBTYPE = 'HAL_BOARD_SUBTYPE_LINUX_NAVIO',
)
class navio2(linux):
toolchain = 'arm-linux-gnueabihf'
def configure_env(self, cfg, env):
super(navio2, self).configure_env(cfg, env)
env.DEFINES.update(
CONFIG_HAL_BOARD_SUBTYPE = 'HAL_BOARD_SUBTYPE_LINUX_NAVIO2',
)
class edge(linux):
toolchain = 'arm-linux-gnueabihf'
def __init__(self):
self.with_uavcan = True
def configure_env(self, cfg, env):
super(edge, self).configure_env(cfg, env)
env.DEFINES.update(
CONFIG_HAL_BOARD_SUBTYPE = 'HAL_BOARD_SUBTYPE_LINUX_EDGE',
)
class zynq(linux):
toolchain = 'arm-xilinx-linux-gnueabi'
def configure_env(self, cfg, env):
super(zynq, self).configure_env(cfg, env)
env.DEFINES.update(
CONFIG_HAL_BOARD_SUBTYPE = 'HAL_BOARD_SUBTYPE_LINUX_ZYNQ',
)
class ocpoc_zynq(linux):
toolchain = 'arm-linux-gnueabihf'
def configure_env(self, cfg, env):
super(ocpoc_zynq, self).configure_env(cfg, env)
env.DEFINES.update(
CONFIG_HAL_BOARD_SUBTYPE = 'HAL_BOARD_SUBTYPE_LINUX_OCPOC_ZYNQ',
)
class bbbmini(linux):
toolchain = 'arm-linux-gnueabihf'
def __init__(self):
self.with_uavcan = True
def configure_env(self, cfg, env):
super(bbbmini, self).configure_env(cfg, env)
env.DEFINES.update(
CONFIG_HAL_BOARD_SUBTYPE = 'HAL_BOARD_SUBTYPE_LINUX_BBBMINI',
)
class blue(linux):
toolchain = 'arm-linux-gnueabihf'
def __init__(self):
self.with_uavcan = True
def configure_env(self, cfg, env):
super(blue, self).configure_env(cfg, env)
env.DEFINES.update(
CONFIG_HAL_BOARD_SUBTYPE = 'HAL_BOARD_SUBTYPE_LINUX_BLUE',
)
class pocket(linux):
toolchain = 'arm-linux-gnueabihf'
def __init__(self):
self.with_uavcan = True
def configure_env(self, cfg, env):
super(pocket, self).configure_env(cfg, env)
env.DEFINES.update(
CONFIG_HAL_BOARD_SUBTYPE = 'HAL_BOARD_SUBTYPE_LINUX_POCKET',
)
class pxf(linux):
toolchain = 'arm-linux-gnueabihf'
def configure_env(self, cfg, env):
super(pxf, self).configure_env(cfg, env)
env.DEFINES.update(
CONFIG_HAL_BOARD_SUBTYPE = 'HAL_BOARD_SUBTYPE_LINUX_PXF',
)
class bebop(linux):
toolchain = 'arm-linux-gnueabihf'
def configure_env(self, cfg, env):
super(bebop, self).configure_env(cfg, env)
env.DEFINES.update(
CONFIG_HAL_BOARD_SUBTYPE = 'HAL_BOARD_SUBTYPE_LINUX_BEBOP',
)
class disco(linux):
toolchain = 'arm-linux-gnueabihf'
def configure_env(self, cfg, env):
super(disco, self).configure_env(cfg, env)
env.DEFINES.update(
CONFIG_HAL_BOARD_SUBTYPE = 'HAL_BOARD_SUBTYPE_LINUX_DISCO',
)
class erlebrain2(linux):
toolchain = 'arm-linux-gnueabihf'
def configure_env(self, cfg, env):
super(erlebrain2, self).configure_env(cfg, env)
env.DEFINES.update(
CONFIG_HAL_BOARD_SUBTYPE = 'HAL_BOARD_SUBTYPE_LINUX_ERLEBRAIN2',
)
class bhat(linux):
toolchain = 'arm-linux-gnueabihf'
def configure_env(self, cfg, env):
super(bhat, self).configure_env(cfg, env)
env.DEFINES.update(
CONFIG_HAL_BOARD_SUBTYPE = 'HAL_BOARD_SUBTYPE_LINUX_BH',
)
class dark(linux):
toolchain = 'arm-linux-gnueabihf'
def configure_env(self, cfg, env):
super(dark, self).configure_env(cfg, env)
env.DEFINES.update(
CONFIG_HAL_BOARD_SUBTYPE = 'HAL_BOARD_SUBTYPE_LINUX_DARK',
)
class pxfmini(linux):
toolchain = 'arm-linux-gnueabihf'
def configure_env(self, cfg, env):
super(pxfmini, self).configure_env(cfg, env)
env.DEFINES.update(
CONFIG_HAL_BOARD_SUBTYPE = 'HAL_BOARD_SUBTYPE_LINUX_PXFMINI',
)
class aero(linux):
def __init__(self):
self.with_uavcan = True
def configure_env(self, cfg, env):
super(aero, self).configure_env(cfg, env)
env.DEFINES.update(
CONFIG_HAL_BOARD_SUBTYPE = 'HAL_BOARD_SUBTYPE_LINUX_AERO',
)
class rst_zynq(linux):
toolchain = 'arm-linux-gnueabihf'
def configure_env(self, cfg, env):
super(rst_zynq, self).configure_env(cfg, env)
env.DEFINES.update(
CONFIG_HAL_BOARD_SUBTYPE = 'HAL_BOARD_SUBTYPE_LINUX_RST_ZYNQ',
)
class SITL_static(sitl):
def configure_env(self, cfg, env):
super(SITL_static, self).configure_env(cfg, env)
cfg.env.STATIC_LINKING = True
class SITL_x86_64_linux_gnu(SITL_static):
toolchain = 'x86_64-linux-gnu'
class SITL_arm_linux_gnueabihf(SITL_static):
toolchain = 'arm-linux-gnueabihf'
| ethomas997/ardupilot | Tools/ardupilotwaf/boards.py | Python | gpl-3.0 | 24,527 |
# Copyright (C) 2004, 2005 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
import dns.rdtypes.keybase
class DNSKEY(dns.rdtypes.keybase.KEYBase):
"""DNSKEY record"""
pass
| liyongyue/dnsspider | dns/rdtypes/ANY/DNSKEY.py | Python | isc | 881 |
from peewee import *
db = SqliteDatabase("tracker.db")
class Coach(Model):
username = CharField()
password = CharField()
f_name = CharField()
class Meta:
database = db # This means this model uses the "coach.db" database
class Athlete(Model):
grade = IntegerField()
f_name = CharField()
l_name = CharField()
class Meta:
database = db
| Acais/Tracker | models.py | Python | gpl-3.0 | 395 |
#!/usr/bin/python
#
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example displays all active ads your DFA user profile can see.
Only name and ID are returned.
Tags: ads.list
"""
__author__ = ('api.jimper@gmail.com (Jonathon Imperiosi)')
import argparse
import sys
from apiclient import sample_tools
from oauth2client import client
# Declare command-line flags.
argparser = argparse.ArgumentParser(add_help=False)
argparser.add_argument(
'profile_id', type=int,
help='The ID of the profile to look up ads for')
def main(argv):
# Authenticate and construct service.
service, flags = sample_tools.init(
argv, 'dfareporting', 'v2.0', __doc__, __file__, parents=[argparser],
scope=['https://www.googleapis.com/auth/dfareporting',
'https://www.googleapis.com/auth/dfatrafficking'])
profile_id = flags.profile_id
try:
# Construct the request.
request = service.ads().list(profileId=profile_id, active=True)
while True:
# Execute request and print response.
response = request.execute()
for ad in response['ads']:
print ('Found ad with ID %s and name "%s".' % (ad['id'], ad['name']))
if response['ads'] and response['nextPageToken']:
request = service.ads().list_next(request, response)
else:
break
except client.AccessTokenRefreshError:
print ('The credentials have been revoked or expired, please re-run the '
'application to re-authorize')
if __name__ == '__main__':
main(sys.argv)
| vanant/googleads-dfa-reporting-samples | python/v2.0/get_ads.py | Python | apache-2.0 | 2,080 |
#!/usr/bin/env python
#
# parser.py
# Where the magic happens
#
#
# Zack Marotta (c)
from PIL import Image#, ImageDraw
import numpy as np
import random as rnd
import colorsys
#from collections import OrderedDict
from user import *
from ops import Ops
from fns import Fns
from libpyparsing.pyparsing import * #TODO: import only what I need
#The few functions below were taken from fourFn.py and SimpleCalc.py in the examples of the pyparsing lib.
#I have commented out some of the lines and modified others. The original files can be found at the following links:
#http://pyparsing.wikispaces.com/file/view/fourFn.py/30154950/fourFn.py
#http://pyparsing.wikispaces.com/file/view/SimpleCalc.py/30112812/SimpleCalc.py
log = Log() #required for numpy exceptions
np.seterrcall(log)
np.seterr(all="log")
#ParserElement.enablePackrat() #WARNING: MIGHT BREAK STUFF
exprStack = []
def pushFirst( strg, loc, toks ):
exprStack.append( toks[0] )
def pushUMinus( strg, loc, toks ):
for t in toks:
if t == '-':
exprStack.append( 'unary -' )
else:
break
bnf = None
def BNF():
"""
expop :: '^'
multop :: '*' | '/'
addop :: '+' | '-'
integer :: ['+' | '-'] '0'..'9'+
atom :: PI | E | real | fn '(' expr ')' | '(' expr ')'
factor :: atom [ expop factor ]*
term :: factor [ multop factor ]*
expr :: term [ addop term ]*
"""
global bnf
if not bnf:
point = Literal( "." )
#~ fnumber = Combine( Word( "+-"+nums, nums ) +
#~ Optional( point + Optional( Word( nums ) ) ) +
#~ Optional( e + Word( "+-"+nums, nums ) ) )
#fnumber = Regex(r"[+-]?\d+(:?\.\d*)?(:?[eE][+-]?\d+)?")
fnumber = Regex(r"[+-]?\d+(:?\.\d*)?") #disabled sci notation because I'd rather things didn't get messy
#ident = Word(alphas, alphas+nums+"_$")
ident = Word(alphas, alphas+nums)
plus = Literal("+")
minus = Literal("-")
mult = Literal("*")
div = Literal("/") | Literal("%")
lpar = Literal("(").suppress()
rpar = Literal(")").suppress()
addop = plus | minus
miscop = Keyword("<<") | Keyword(">>") | Keyword("~") | Keyword("&&") | Keyword("X||") | Keyword("||") | Keyword("AND") | Keyword("XOR") | Keyword("OR") | Keyword("NOT") | Keyword(">=") | Keyword(">") | Keyword("<=") | Keyword("<") | Keyword("==")
multop = mult | div | miscop
expop = Literal("^")
expr = Forward()
atom = ((0,None)*minus + (fnumber | ident + lpar + expr + rpar | ident).setParseAction(pushFirst) |
Group(lpar + expr + rpar)).setParseAction(pushUMinus) # Todo: Find out how the fuck this recursive stuff works
factor = Forward()
factor << atom + ZeroOrMore((expop + factor).setParseAction(pushFirst))
term = factor + ZeroOrMore((multop + factor).setParseAction(pushFirst))
expr << term + ZeroOrMore((addop + term).setParseAction(pushFirst))
bnf = expr
return bnf
arithExpr = BNF()
ident = Word(alphas).setName("identifier")
assignment = ident("varname") + '=' + arithExpr
comment = Literal("#") + restOfLine
#pattern = assignment
pattern = assignment + Optional(comment)
class Parser(object):
def __init__(self, isclamp, width, height, alg, verbose, debug, fg, bg, filename):
self.isclamp = isclamp
self.width = width
self.height = height
self.bands = 1
self.alg = alg
self.verbose = verbose
self.debug = debug
self.filename = filename
if fg is not None:
self.fghue = colorsys.hls_to_rgb(fg[0]/360, fg[1], fg[2])
else:
self.fghue = colorsys.hls_to_rgb(0, 0, 1)
if bg is not None:
self.bghue = colorsys.hls_to_rgb(bg[0]/360, bg[1], bg[2])
else:
self.bghue = colorsys.hls_to_rgb(0, 0, 0)
self.constants = {"ROWS": height, "COLS": width, "MAX": width*height}
self.optvars = {"X": 0, "Y": 0, "P": 0}
self.locals = {}
self.ops = Ops()
self.fns = Fns()
def mainSequence(self):
self.newImage()
if self.verbose: vprint("Size:{}\tPixels:{}\tCustom fg/bg RGB color:({}, {})\nRequired variables: {}\nAlgorithm:\n{}\n".format("{}x{}".format(self.width, self.height), self.width*self.height, self.fghue, self.bghue, self.reqvars, self.alg))
self.crunch()
self.placePixels()
self.saveImage()
def evaluateStack(self, s):
op = s.pop()
if op == 'unary -': #negate
return -self.evaluateStack(s)
if op in self.ops.defs: #perform op
op2 = self.evaluateStack(s)
op1 = self.evaluateStack(s)
return self.ops.defs[op](op1, op2)
elif op in self.fns.defs: #perform fn
return self.fns.defs[op](self.evaluateStack(s))
elif op[0].isalpha():
if op in self.optvars:
return self.optvars[op] #return optvar
if op in self.locals:
return self.locals[op] #return local var
if op in self.constants:
return self.constants[op] #return constant
raise Exception("invalid identifier '%s'" % op)
else:
return float(op)
def crunch(self):
print("Parsing...")
global exprStack
exprStack = []
self.pixeldata = np.ndarray(shape=(self.height, self.width, self.bands), dtype=float)
maxp = self.constants["MAX"]
ws = self.alg.replace("\n", ";").split(";") #lines with leading and/or trailing whitespace
lines = map(lambda x: x.strip(), ws) #trims whitespace
for currentRow in xrange(self.height):
self.optvars["Y"] = currentRow
# 1111
# 2222
# 3333
for currentCol in xrange(self.width):
self.optvars["X"] = currentCol
pixval = []
if self.verbose: progbar(self.optvars["P"], maxp)
# 1234
# 1234
# 1234
for i, input_string in enumerate(lines):
if input_string != '':
del exprStack[:]
### IMPLEMENT WHEN RANDOM FUNCTIONS ARRIVE
#isplit = input_string.split()
#if isplit[1] != "=":
# if isplit[0][0] == "$":
# try:
# rand.seed(int(isplit[0][1:]))
# except:
# err("ParseFailure: Line 1\n{}\n ^ invalid seed value".format(input_string))
# else:
# missing("=", i, isplit)
###
#if self.verbose: vprint(input_string + "\n")#
try:
L=pattern.parseString(input_string, parseAll=True) #EXECUTE MAGIC.EXE seriously this shit is insane
except ParseException as pe:
L=['Parse Failure',input_string]
#show result of parsing the input string
#if self.verbose: print(input_string, "->", L)#
if len(L)==0 or L[0] != 'Parse Failure':
#if self.verbose: vprint("exprStack = {}\n".format(exprStack))#
try:
result=self.evaluateStack(exprStack)
except Exception as e:
err(e)
else:
self.locals[L.varname] = result
#if self.verbose: vprint("variables = {}\n".format(variables))#
else:
err("ParseFailure: Line ", more=True)
err(pe.line, more=True)
err(" "*(pe.column-1) + "^", more=True)
err(pe)
#end of current line
for v in self.reqvars:
pixval.append(self.locals[v]) #[255,255,255]
if self.debug:
print(self.pixeldata)
print("({}, {}): {}".format(currentCol, currentRow, pixval))
self.pixeldata[currentRow, currentCol] = pixval #[ ..., [255,255,255] ]
self.optvars["P"] += 1
#end of column
#end of row
#end of method
def newImage(self):
pass
def convert(p): #this will be overridden to return an RGB/CYMK tuple
pass
def placePixels(self):
print("Placing pixels...")
maxp = self.constants["MAX"]
if self.isclamp:
self.maxpvalue = 255
finaldata = clamp255(self.pixeldata)
else:
self.maxpvalue = np.amax(self.pixeldata)
if self.verbose: vprint("Creative mode range: 0-{}\n".format(self.maxpvalue))
finaldata = self.pixeldata
if self.debug: self.saveDebug(self.pixeldata)
i = 0
for y in xrange(self.height):
for x in xrange(self.width):
if self.verbose: progbar(i, maxp)
color = self.convert(finaldata[y,x]) #(y,x) when in array, (x,y) when in image. Ugh.
self.pix[x,y] = color
i += 1
def saveImage(self):
self.im.save(self.filename, "png")
print("Saving as {}".format(self.filename))
def saveDebug(self, array):
vprint("Saving debug data to debug.txt...\n")
f = open("debug.txt", "w")
for y in xrange(self.height):
for x in xrange(self.width):
f.write("(")
for z in array[y,x]:
f.write(str(z) + ",")
f.write("),")
f.write("\n")
f.close()
#http://effbot.org/imagingbook/imagedraw.htm
class ParserGray(Parser):
def newImage(self):
self.reqvars = ("K")
self.bands = 1
self.im = Image.new("RGB", (self.width, self.height))
self.pix = self.im.load()
def convert(self, p):
r = lerp(self.bghue[0], self.fghue[0], p[0]/self.maxpvalue)*255 # linearinterpolate([0-1.0], [0-1.0], [0-255]/255)*255
g = lerp(self.bghue[1], self.fghue[1], p[0]/self.maxpvalue)*255
b = lerp(self.bghue[2], self.fghue[2], p[0]/self.maxpvalue)*255
return (int(round(r)), int(round(g)), int(round(b)))
class ParserRGB(Parser):
def newImage(self):
self.reqvars = ("R", "G", "B")
self.bands = 3
self.im = Image.new("RGB", (self.width, self.height))
self.pix = self.im.load()
def convert(self, p):
r = p[0] / self.maxpvalue * 255 #make sure pixels align to 0-255 if in creative mode. if not, well, this is a sanity check
g = p[1] / self.maxpvalue * 255
b = p[2] / self.maxpvalue * 255
return (int(round(r)), int(round(g)), int(round(b)))
class ParserCMYK(Parser):
def newImage(self):
self.bands = 4
self.reqvars = ("C", "M", "Y", "K")
self.im = Image.new("CMYK", (self.width, self.height))
self.pix = self.im.load()
def convert(self, p):
c = p[0] / self.maxpvalue * 255
m = p[1] / self.maxpvalue * 255
y = p[2] / self.maxpvalue * 255
k = p[3] / self.maxpvalue * 255
return (int(round(c)), int(round(m)), int(round(y)), int(round(k)))
def saveImage(self):
rgb_im = self.im.convert("RGB")
rgb_im.save(self.filename, "png")
print("Saving as {}".format(self.filename))
class ParserHLS(Parser):
def newImage(self):
#err("NOT IMPLEMENTED")
self.reqvars = ("H", "L", "S")
self.bands = 3
self.im = Image.new("RGB", (self.width, self.height))
self.pix = self.im.load()
def convert(self, p):
h = p[0] / self.maxpvalue * 255
l = p[1] / self.maxpvalue * 255
s = p[2] / self.maxpvalue * 255
vals = colorsys.hls_to_rgb(h/255, l/255, s/255)
return (int(vals[0]*255), int(vals[1]*255), int(vals[2]*255))
class ParserHSV(Parser):
def newImage(self):
#err("NOT IMPLEMENTED")
self.reqvars = ("H", "S", "V")
self.bands = 3
self.im = Image.new("RGB", (self.width, self.height))
self.pix = self.im.load()
def convert(self, p):
h = p[0] / self.maxpvalue * 255
s = p[1] / self.maxpvalue * 255
v = p[2] / self.maxpvalue * 255
vals = colorsys.hsv_to_rgb(h/255, s/255, v/255)
return (int(vals[0]*255), int(vals[1]*255), int(vals[2]*255))
class ParserYIQ(Parser): #TODO
def newImage(self):
err("NOT IMPLEMENTED")
self.reqvars = ("Y", "I", "Q")
self.bands = 3
self.im = Image.new("RGB", (self.width, self.height))
self.pix = self.im.load()
#Make a pull request if you want to add more modes!
| Sir-Fancy/AlgArt | libalgart/parser.py | Python | artistic-2.0 | 13,424 |
'''Module for the contacts pageset'''
from datetime import datetime
from murmeli.pages.base import PageSet, Bean
from murmeli.pagetemplate import PageTemplate
from murmeli import dbutils
from murmeli.fingerprints import FingerprintChecker
from murmeli.contactmgr import ContactManager
from murmeli import cryptoutils
class ContactsPageSet(PageSet):
'''Contacts page server, for showing list of contacts etc'''
def __init__(self, system):
PageSet.__init__(self, system, "contacts")
self.list_template = PageTemplate('contactlist')
self.details_template = PageTemplate('contactdetails')
self.editowndetails_template = PageTemplate('editcontactself')
self.editdetails_template = PageTemplate('editcontact')
self.add_template = PageTemplate('addcontact')
self.addrobot_template = PageTemplate('addrobot')
self.fingerprintstemplate = PageTemplate('fingerprints')
def serve_page(self, view, url, params):
'''Serve a page to the given view'''
print("Contacts serving page", url)
self.require_resources(['button-addperson.png', 'button-addrobot.png',
'button-removerobot.png',
'button-drawgraph.png', 'avatar-none.jpg'])
database = self.system.get_component(self.system.COMPNAME_DATABASE)
crypto = self.system.get_component(self.system.COMPNAME_CRYPTO)
dbutils.export_all_avatars(database, self.get_web_cache_dir())
commands = self.interpret_commands(url)
if commands[0] == "exportkey":
if self._export_key(crypto, database):
view.page().runJavaScript("showMessage('%s')" % \
self.i18n('contacts.confirm.keyexported'))
return
contents, page_params, userid = self.make_page_contents(commands, params)
# If we haven't got any contents yet, then do a show details
contents = contents or self.make_list_page(do_edit=False, userid=userid,
extra_params=page_params)
view.set_html(contents)
def make_page_contents(self, commands, params):
'''Make the page contents given the command and parameters'''
userid = commands[1] if len(commands) == 2 else None
database = self.system.get_component(self.system.COMPNAME_DATABASE)
crypto = self.system.get_component(self.system.COMPNAME_CRYPTO)
contents = None
page_params = {}
if commands[0] == "add":
contents = self.make_add_page()
elif commands[0] == "submitadd":
req_id = params.get('murmeliid') if params else None
if req_id:
disp_name = params.get('displayname', '') if params else None
intro_msg = params.get('intromessage', '') if params else None
ContactManager(database, crypto).handle_initiate(req_id, disp_name, intro_msg)
# ensure that avatar is exported for this new contact
dbutils.export_all_avatars(database, self.get_web_cache_dir())
elif commands[0] == "addrobot":
contents = self.make_add_robot_page()
elif commands[0] == "submitaddrobot":
req_id = params.get('murmeliid') if params else None
if req_id:
ContactManager(database, crypto).handle_initiate(req_id, "", "", True)
elif commands[0] == "removerobot":
ContactManager(database, crypto).handle_robot_removal()
elif commands[0] == "edit":
contents = self.make_list_page(do_edit=True, userid=userid)
elif commands[0] == "submitedit":
assert not set(params.keys()).intersection(['status', 'keyid'])
dbutils.update_profile(database, tor_id=userid, in_profile=params,
pic_output_path=self.get_web_cache_dir())
elif commands[0] == "checkfingerprint":
contents = self.make_checkfinger_page(commands[1], params.get('lang'))
elif commands[0] == "checkedfingerprint":
given_answer = self.get_param_as_int(params, "answer", -1)
fingers = self._make_fingerprint_checker(userid)
# Compare with expected answer, generate appropriate page
if given_answer == fingers.get_correct_answer():
pending_referrals = []
ContactManager(database, crypto, self.get_config()).key_fingerprint_checked( \
userid, pending_referrals)
print("Pending referrals:", pending_referrals)
for msg in pending_referrals:
self.system.invoke_call(self.system.COMPNAME_MSG_HANDLER, "receive", msg=msg)
# Show page again
contents = self.make_checkfinger_page(userid, params.get('lang'))
else:
page_params['fingerprint_check_failed'] = True
elif commands[0] == "delete" and userid:
ContactManager(database, None, self.get_config()).delete_contact(userid)
userid = None
elif commands[0] == "refer":
intro = str(params.get('introMessage', ""))
ContactManager(database, crypto).send_referral_messages(commands[1], commands[2],
intro)
return (contents, page_params, userid)
def _export_key(self, crypto, database):
'''Export our own public key to a file in our data directory'''
own_keyid = dbutils.get_own_key_id(database)
data_dir = self.get_config().get_data_dir()
if cryptoutils.export_public_key(own_keyid, data_dir, crypto):
print("Exported public key")
return True
print("FAILED to export public key")
return False
@staticmethod
def interpret_commands(url):
'''Take the url to make a list of command to execute and its parameters'''
if url:
command = [elem for elem in url.split("/") if elem]
if command:
if len(command) == 1:
if command[0] in ['add', 'submitadd', 'addrobot', 'submitaddrobot',
'exportkey', 'removerobot']:
return command
if ContactsPageSet.looks_like_userid(command[0]):
return ['show', command[0]]
elif len(command) == 2:
if ContactsPageSet.looks_like_userid(command[0]):
if command[1] in ['edit', 'submitedit', 'delete', 'checkfingerprint',
'checkedfingerprint']:
return [command[1], command[0]]
elif len(command) == 3:
if ContactsPageSet.looks_like_userid(command[0]) and \
ContactsPageSet.looks_like_userid(command[2]):
if command[1] in ['refer', 'requestrefer']:
return [command[1], command[0], command[2]]
return ['show', None]
def make_list_page(self, do_edit=False, userid=None, extra_params=None):
'''Generate a page for listing all the contacts and showing the details of one of them'''
self.require_resources(['status-self.png', 'status-requested.png', 'status-untrusted.png',
'status-trusted.png', 'status-robot.png'])
# Who are we showing?
selectedprofile = self.system.invoke_call(self.system.COMPNAME_DATABASE, "get_profile",
torid=userid)
ownprofile = self.system.invoke_call(self.system.COMPNAME_DATABASE, "get_profile",
torid=None)
if not selectedprofile:
selectedprofile = ownprofile
userid = selectedprofile['torid']
# Build list of contacts
userboxes, has_friends = self._make_user_boxes(userid)
# build left side of page using these boxes
lefttext = self.list_template.get_html(self.get_all_i18n(),
{'webcachedir':self.get_web_cache_dir(),
'contacts':userboxes,
'has_friends':has_friends})
page_props = {"webcachedir":self.get_web_cache_dir(), 'person':selectedprofile}
# Add extra parameters if necessary
if extra_params:
page_props.update(extra_params)
# See which contacts we have in common with this person
database = self.system.get_component(self.system.COMPNAME_DATABASE)
shared_info = ContactManager(database, None).get_shared_possible_contacts(userid)
page_props["sharedcontacts"] = self._make_id_name_bean_list(shared_info.get_shared_ids())
page_props["posscontactsforthem"] = self._make_id_name_bean_list( \
shared_info.get_ids_for_them())
page_props["posscontactsforme"] = self._make_id_name_bean_list( \
shared_info.get_ids_for_me())
# Work out status of this contact's robot
robot_status = dbutils.get_robot_status(database, userid, \
self.system.get_component(self.system.COMPNAME_CONTACTS))
page_props['robotstatus'] = self.i18n("contacts.details.robotstatus." + robot_status)
page_props['robotset'] = (robot_status != 'none')
# Which template to use depends on whether we're just showing or also editing
if do_edit:
# Use two different details templates, one for self and one for others
page_templ = self.editowndetails_template if userid == ownprofile['torid'] \
else self.editdetails_template
else:
page_templ = self.details_template
# Put left side and right side together
return self.build_two_column_page({'pageTitle':self.i18n("contacts.title"),
'leftColumn':lefttext,
'rightColumn':page_templ.get_html(self.get_all_i18n(),
page_props),
'pageFooter':"<p>Footer</p>"})
def _make_user_boxes(self, selected_id):
'''Make a list of boxes for our contacts'''
userboxes = []
has_friends = False
database = self.system.get_component(self.system.COMPNAME_DATABASE)
for profile in database.get_profiles():
if profile['status'] in ['requested', 'untrusted', 'trusted', 'self']:
box = Bean()
box.set('disp_name', profile['displayName'])
tor_id = profile['torid']
box.set('torid', tor_id)
tile_selected = profile['torid'] == selected_id
box.set('tilestyle', "contacttile" + ("selected" if tile_selected else ""))
box.set('status', profile['status'])
is_online = self.system.invoke_call(self.system.COMPNAME_CONTACTS,
"is_online", tor_id=tor_id)
last_time = self.system.invoke_call(self.system.COMPNAME_CONTACTS,
"last_seen", tor_id=tor_id)
box.set('last_seen', self._make_lastseen_string(is_online, last_time))
box.set('has_robot', dbutils.has_robot(database, tor_id))
userboxes.append(box)
if profile['status'] in ['untrusted', 'trusted']:
has_friends = True
return (userboxes, has_friends)
@staticmethod
def _make_id_name_bean_list(contact_list):
'''Make a list of Bean objects for the given contact list'''
con_list = []
for cid, cname in contact_list:
pair = Bean()
pair.set('torid', cid)
pair.set('disp_name', cname)
con_list.append(pair)
return con_list
def _make_lastseen_string(self, online, last_time):
'''Make a string describing the online / offline status'''
curr_time = datetime.now()
if last_time and (curr_time-last_time).total_seconds() < 18000:
token = "contacts.onlinesince" if online else "contacts.offlinesince"
return self.i18n(token) % str(last_time.timetz())[:5]
if online:
return self.i18n("contacts.online")
return ""
def make_add_page(self):
'''Build the form page for adding a new contact, using the template'''
own_profile = self.system.invoke_call(self.system.COMPNAME_DATABASE, "get_profile")
own_tor_id = own_profile.get("torid") if own_profile else None
tokens = self.get_all_i18n()
bodytext = self.add_template.get_html(tokens, {"owntorid":own_tor_id or ""})
return self.build_page({'pageTitle':self.i18n("contacts.title"),
'pageBody':bodytext,
'pageFooter':"<p>Footer</p>"})
def make_add_robot_page(self):
'''Build the form page for adding a new robot, using the template'''
own_profile = self.system.invoke_call(self.system.COMPNAME_DATABASE, "get_profile") or {}
own_tor_id = own_profile.get("torid")
robot_id = own_profile.get("robotid")
bodytext = self.addrobot_template.get_html(self.get_all_i18n(),
{"owntorid":own_tor_id or "",
"robotid":robot_id or ""})
return self.build_page({'pageTitle':self.i18n("contacts.title"),
'pageBody':bodytext,
'pageFooter':"<p>Footer</p>"})
def make_checkfinger_page(self, userid, lang):
'''Generate a page for checking the fingerprint of the given user'''
# First, get the name of the user
person = self.system.invoke_call(self.system.COMPNAME_DATABASE, "get_profile",
torid=userid)
disp_name = person['displayName']
full_name = person['name']
if disp_name != full_name:
full_name = "%s (%s)" % (disp_name, full_name)
# check it's ok to generate
status = person.get('status')
if status not in ['untrusted', 'trusted']:
print("Not generating fingerprints page because status is", status)
return None
fingers = self._make_fingerprint_checker(userid)
page_params = {"mywords":fingers.get_code_words(True, 0, lang or "en"),
"theirwords0":fingers.get_code_words(False, 0, lang or "en"),
"theirwords1":fingers.get_code_words(False, 1, lang or "en"),
"theirwords2":fingers.get_code_words(False, 2, lang or "en"),
"fullname":full_name, "shortname":disp_name, "userid":userid,
"language_en":"", "language_de":"",
"alreadychecked":status == "trusted"}
page_params["language_" + (lang or "en")] = "selected"
body_text = self.fingerprintstemplate.get_html(self.get_all_i18n(), page_params)
return self.build_page({'pageTitle':self.i18n("contacts.title"),
'pageBody':body_text,
'pageFooter':"<p>Footer</p>"})
def _make_fingerprint_checker(self, userid):
'''Use the given userid to make a FingerprintChecker between me and them'''
own_profile = self.system.invoke_call(self.system.COMPNAME_DATABASE, "get_profile",
torid=None)
own_fingerprint = self.system.invoke_call(self.system.COMPNAME_CRYPTO, "get_fingerprint",
key_id=own_profile['keyid'])
person = self.system.invoke_call(self.system.COMPNAME_DATABASE, "get_profile",
torid=userid)
other_fingerprint = self.system.invoke_call(self.system.COMPNAME_CRYPTO, "get_fingerprint",
key_id=person['keyid'])
assert own_fingerprint and other_fingerprint
return FingerprintChecker(own_fingerprint, other_fingerprint)
| activityworkshop/Murmeli | murmeli/pages/contacts.py | Python | gpl-2.0 | 16,404 |
default_app_config = 'waldur_mastermind.google.apps.GoogleConfig'
| opennode/nodeconductor-assembly-waldur | src/waldur_mastermind/google/__init__.py | Python | mit | 66 |
"""
11-19-15
Uses Kivy to present an interactive visualization of the DNA chain structure.
"""
from kivy.app import runTouchApp
from kivy.uix.widget import Widget
from kivy.uix.label import Label
from kivy.properties import ListProperty, NumericProperty
from kivy.lang import Builder
from dna_chain import DNANode
from dna import DNA
DNANode.__repr__ = lambda a: str(a.name)
class NodeVis(Label):
color = ListProperty([0, .6, .5])
padding = NumericProperty('7dp')
Builder.load_string("""
<NodeVis>:
canvas.before:
Color:
rgb: self.color
Rectangle:
pos: self.x + self.padding, self.y + self.padding
size: self.width - self.padding * 2, self.height - self.padding * 2
Color:
rgb: 0, 0, 0
Line:
rectangle: self.x + self.padding, self.y + self.padding, \
self.width - self.padding * 2, self.height - self.padding * 2
width: 2.5
""")
class DNAVis(Widget):
def __init__(self, **kw):
super(DNAVis, self).__init__(**kw)
self.dna = DNA()
self.crawler = self.dna.spawn_crawler()
self.bind(size=self.redraw, pos=self.redraw)
def redraw(self, *ar):
for w in self.children[:]:
if isinstance(w, NodeVis):
self.remove_widget(w)
node_lookup = {}
y = self.top - 100
crawler = self.dna.spawn_crawler()
crawler.reset()
indent = 100
for n, indent_no in crawler.crawl_indents():
indent += 100 * indent_no
nv = NodeVis(size=(100, 100), text=n.name)
node_lookup[n] = nv
nv.x = self.x + indent
nv.y = y
self.add_widget(nv)
y -= 100
crawler = self.crawler
cv = NodeVis(
size=(100, 100),
text='CRAWLER',
color=[.7, .05, 0])
if crawler.current_node is not None:
cv.x = node_lookup[crawler.current_node].x - 100
cv.y = node_lookup[crawler.current_node].y
else:
cv.y = self.top - 100
cv.x = self.width - cv.width
self.add_widget(cv)
def eval_input(self, text):
dna = self.dna
crawler = self.crawler
if not hasattr(self, 'exec_locals'):
self.exec_locals = locals()
try:
to_run = compile(text, '', 'exec')
exec(to_run, globals(), self.exec_locals)
except (NameError, SyntaxError, AttributeError, TypeError) as err:
print(err)
self.redraw()
Builder.load_string("""
#:import Clock kivy.clock.Clock
<DNAVis>:
canvas.before:
Color:
rgb: 1, .9, .8
Rectangle:
size: self.size
pos: self.pos
TextInput:
id: input
pos: root.pos
size: root.width, '35dp'
multiline: False
on_text_validate:
root.eval_input(self.text)
self.text = ''
Clock.schedule_once(lambda dt: setattr(self, 'focus', True), .1)
""")
if __name__ == '__main__':
from kivy.clock import Clock
dv = DNAVis()
def the_deeds(*ar):
n = DNANode()
n.name = 'NODE 1'
dv.dna.head = n
c = dv.dna.spawn_crawler()
n = DNANode()
n.name = 'NODE 2'
c.add_child(n, dv.dna.head)
dv.crawler.reset()
dv.redraw()
Clock.schedule_once(the_deeds, 1.0)
runTouchApp(dv)
| mrhubbs/dna | dna_vis.py | Python | gpl-3.0 | 3,495 |
"""Test for the new detector file"""
import sys
import pathlib
import zipfile
import io
import pytest
import serpentTools
from serpentTools.data import getFile
import serpentTools.next
BWR_FILE = getFile("bwr_det0.m")
HEX_FILE = getFile("hexplot_det0.m")
@pytest.fixture(scope="module")
def previousBWR():
return serpentTools.read(BWR_FILE)
@pytest.fixture
def zippedStream(tmp_path):
d = tmp_path / "next_detector"
d.mkdir()
filename = d / "bwr.zip"
zipname = "bwr_det0.m"
# Create the file in a zipped archive
with zipfile.ZipFile(filename, mode="w") as z:
with z.open(zipname, mode="w") as zfile, open(
BWR_FILE, mode="rb"
) as sfile:
zfile.write(sfile.read())
# Yield the stream of binary zip data to be reader
with zipfile.ZipFile(filename, mode="r") as z:
with z.open(zipname, mode="r") as zstream:
yield zstream
# Clean up after the test
filename.unlink()
d.rmdir()
def compareDetector(actual, expected):
assert type(actual) is type(expected)
assert actual.indexes == expected.indexes
assert actual.bins == pytest.approx(expected.bins)
assert actual.tallies == pytest.approx(expected.tallies)
assert actual.errors == pytest.approx(expected.errors)
assert set(actual.grids) == set(expected.grids)
for gridK, grid in expected.grids.items():
assert actual.grids[gridK] == pytest.approx(grid)
def compareDetReader(actual, expected):
assert set(actual) == set(expected)
for key, detector in expected.items():
compareDetector(actual[key], detector)
def test_new_det(previousBWR):
fromstr = serpentTools.next.DetectorFile.fromSerpent(BWR_FILE)
compareDetReader(fromstr, previousBWR)
frompath = serpentTools.next.DetectorFile.fromSerpent(
pathlib.Path(BWR_FILE)
)
compareDetReader(frompath, previousBWR)
with open(BWR_FILE, mode="r") as stream:
fromstream = serpentTools.next.DetectorFile.fromSerpent(stream)
compareDetReader(fromstream, previousBWR)
@pytest.mark.skipif(
sys.version_info < (3, 6), reason="Can't write zip files with <3.6"
)
def test_from_zip(previousBWR, zippedStream):
newfile = serpentTools.next.DetectorFile.fromSerpent(zippedStream)
compareDetReader(previousBWR, newfile)
@pytest.fixture(scope="module")
def previousHex():
return serpentTools.read(HEX_FILE)
def test_filtered(previousHex):
reader = serpentTools.next.DetectorReader()
full = reader.read(HEX_FILE)
compareDetReader(full, previousHex)
for name, detector in previousHex.items():
reader.names.clear()
reader.names.add(name)
single = reader.read(HEX_FILE)
assert len(single) == 1
found, fromFilter = single.popitem()
assert found == name
compareDetector(fromFilter, detector)
def test_postcheck():
fakestream = io.StringIO()
reader = serpentTools.next.DetectorReader()
with pytest.raises(serpentTools.SerpentToolsException):
reader.read(fakestream, postcheck=True, strict=True)
reader.read(fakestream, postcheck=False)
with pytest.warns(UserWarning):
reader.read(fakestream, postcheck=True, strict=False)
| CORE-GATECH-GROUP/serpent-tools | tests/test_next_detector.py | Python | mit | 3,247 |
# Licensed under the MIT license
# http://opensource.org/licenses/mit-license.php
"""
INFO lastFM_user Dez 14 17:35:27 Got new sessionid: '1488f34a1cbed7c9f4232f8fd563c3bd' (coherence/backends/lastfm_storage.py:60)
DEBUG lastFM_stream Dez 14 17:35:53 render <GET /da525474-5357-4d1b-a894-76b1293224c9/1005 HTTP/1.1> (coherence/backends/lastfm_storage.py:148)
command GET
rest /user/e0362c757ef49169e9a0f0970cc2d367.mp3
headers {'icy-metadata': '1', 'host': 'kingpin5.last.fm', 'te': 'trailers', 'connection': 'TE', 'user-agent': 'gnome-vfs/2.12.0.19 neon/0.24.7'}
ProxyClient handleStatus HTTP/1.1 200 OK
ProxyClient handleHeader Content-Type audio/mpeg
ProxyClient handleHeader Content-Length 4050441
ProxyClient handleHeader Cache-Control no-cache, must-revalidate
DEBUG lastFM_stream Dez 14 17:35:53 render <GET /da525474-5357-4d1b-a894-76b1293224c9/1005 HTTP/1.1> (coherence/backends/lastfm_storage.py:148)
command GET
rest /user/e0362c757ef49169e9a0f0970cc2d367.mp3
headers {'icy-metadata': '1', 'host': 'kingpin5.last.fm', 'te': 'trailers', 'connection': 'TE', 'user-agent': 'gnome-vfs/2.12.0.19 neon/0.24.7'}
ProxyClient handleStatus HTTP/1.1 403 Invalid ticket
"""
# Copyright 2007, Frank Scholz <coherence@beebits.net>
# Copyright 2007, Moritz Struebe <morty@gmx.net>
from twisted.internet import defer
from coherence.upnp.core import utils
from coherence.upnp.core.DIDLLite import classChooser, Container, Resource, DIDLElement
import coherence.extern.louie as louie
from coherence.extern.simple_plugin import Plugin
from coherence import log
from coherence.backend import BackendItem, BackendStore
from urlparse import urlsplit
try:
from hashlib import md5
except ImportError:
# hashlib is new in Python 2.5
from md5 import md5
import string
DEFAULT_NAME = 'LastFMStore'
class LastFMUser(log.Loggable):
logCategory = 'lastFM_user'
user = None
passwd = None
host = "ws.audioscrobbler.com"
basepath = "/radio"
sessionid = None
parent = None
getting_tracks = False
tracks = []
def __init__(self, user, passwd):
log.Loggable.__init__(self)
if user is None:
self.warn("No User", )
if passwd is None:
self.warn("No Passwd", )
self.user = user
self.passwd = passwd
def login(self):
if self.sessionid != None:
self.warning("Session seems to be valid", )
return
def got_page(result):
lines = result[0].split("\n")
for line in lines:
tuple = line.rstrip().split("=", 1)
if len(tuple) == 2:
if tuple[0] == "session":
self.sessionid = tuple[1]
self.info("Got new sessionid: %r", self.sessionid)
if tuple[0] == "base_url":
if(self.host != tuple[1]):
self.host = tuple[1]
self.info("Got new host: %s", self.host)
if tuple[0] == "base_path":
if(self.basepath != tuple[1]):
self.basepath = tuple[1]
self.info("Got new path: %s", self.basepath)
self.get_tracks()
def got_error(error):
self.warning("Login to LastFM Failed! %r", error)
self.debug("%r", error.getTraceback())
def hexify(s): # This function might be GPL! Found this code in some other Projects, too.
result = ""
for c in s:
result = result + ("%02x" % ord(c))
return result
password = hexify(md5(self.passwd).digest())
req = self.basepath + "/handshake.php/?version=1&platform=win&username=" + self.user + "&passwordmd5=" + password + "&language=en&player=coherence"
utils.getPage("http://" + self.host + req).addCallbacks(got_page, got_error, None, None, None, None)
def get_tracks(self):
if self.getting_tracks == True:
return
def got_page(result):
result = utils.parse_xml(result, encoding='utf-8')
self.getting_tracks = False
print self.getting_tracks
print "got Tracks"
for track in result.findall('trackList/track'):
data = {}
def get_data(name):
#print track.find(name).text.encode('utf-8')
return track.find(name).text.encode('utf-8')
#Fixme: This section needs some work
print "adding Track"
data['mimetype'] = 'audio/mpeg'
data['name'] = get_data('creator') + " - " + get_data('title')
data['title'] = get_data('title')
data['artist'] = get_data('creator')
data['creator'] = get_data('creator')
data['album'] = get_data('album')
data['duration'] = get_data('duration')
#FIXME: Image is the wrong tag.
data['image'] = get_data('image')
data['url'] = track.find('location').text.encode('utf-8')
item = self.parent.store.append(data, self.parent)
self.tracks.append(item)
def got_error(error):
self.warning("Problem getting Tracks! %r", error)
self.debug("%r", error.getTraceback())
self.getting_tracks = False
self.getting_tracks = True
req = self.basepath + "/xspf.php?sk=" + self.sessionid + "&discovery=0&desktop=1.3.1.1"
utils.getPage("http://" + self.host + req).addCallbacks(got_page, got_error, None, None, None, None)
def update(self, item):
if 0 < self.tracks.count(item):
while True:
track = self.tracks[0]
if track == item:
break
self.tracks.remove(track)
# Do not remoce so the tracks to answer the browse
# request correctly.
#track.store.remove(track)
#del track
#if len(self.tracks) < 5:
self.get_tracks()
class LFMProxyStream(utils.ReverseProxyResource, log.Loggable):
logCategory = 'lastFM_stream'
def __init__(self, uri, parent):
log.Loggable.__init__(self)
self.uri = uri
self.parent = parent
_, host_port, path, _, _ = urlsplit(uri)
if host_port.find(':') != -1:
host, port = tuple(host_port.split(':'))
port = int(port)
else:
host = host_port
port = 80
if path == '':
path = '/'
#print "ProxyStream init", host, port, path
utils.ReverseProxyResource.__init__(self, host, port, path)
def render(self, request):
self.debug("render %r", request)
self.parent.store.LFM.update(self.parent)
self.parent.played = True
return utils.ReverseProxyResource.render(self, request)
class LastFMItem(log.Loggable):
logCategory = 'LastFM_item'
def __init__(self, id, obj, parent, mimetype, urlbase, UPnPClass, update=False):
log.Loggable.__init__(self)
self.id = id
self.name = obj.get('name')
self.title = obj.get('title')
self.artist = obj.get('artist')
self.creator = obj.get('creator')
self.album = obj.get('album')
self.duration = obj.get('duration')
self.mimetype = mimetype
self.parent = parent
if parent:
parent.add_child(self, update=update)
if parent == None:
parent_id = -1
else:
parent_id = parent.get_id()
self.item = UPnPClass(id, parent_id, self.title, False, self.creator)
if isinstance(self.item, Container):
self.item.childCount = 0
self.child_count = 0
self.children = []
if(len(urlbase) and urlbase[-1] != '/'):
urlbase += '/'
if self.mimetype == 'directory':
self.url = urlbase + str(self.id)
else:
self.url = urlbase + str(self.id)
self.location = LFMProxyStream(obj.get('url'), self)
#self.url = obj.get('url')
if self.mimetype == 'directory':
self.update_id = 0
else:
res = Resource(self.url, 'http-get:*:%s:%s' % (obj.get('mimetype'),
';'.join(('DLNA.ORG_PN=MP3',
'DLNA.ORG_CI=0',
'DLNA.ORG_OP=01',
'DLNA.ORG_FLAGS=01700000000000000000000000000000'))))
res.size = -1 # None
self.item.res.append(res)
def remove(self):
if self.parent:
self.parent.remove_child(self)
del self.item
def add_child(self, child, update=False):
if self.children == None:
self.children = []
self.children.append(child)
self.child_count += 1
if isinstance(self.item, Container):
self.item.childCount += 1
if update == True:
self.update_id += 1
def remove_child(self, child):
self.info("remove_from %d (%s) child %d (%s)", self.id, self.get_name(), child.id, child.get_name())
if child in self.children:
self.child_count -= 1
if isinstance(self.item, Container):
self.item.childCount -= 1
self.children.remove(child)
self.update_id += 1
def get_children(self, start=0, request_count=0):
if request_count == 0:
return self.children[start:]
else:
return self.children[start:request_count]
def get_child_count(self):
if self.mimetype == 'directory':
return 100 # Some Testing, with strange Numbers: 0/lots
return self.child_count
def get_id(self):
return self.id
def get_update_id(self):
if hasattr(self, 'update_id'):
return self.update_id
else:
return None
def get_path(self):
return self.url
def get_name(self):
return self.name
def get_parent(self):
return self.parent
def get_item(self):
return self.item
def get_xml(self):
return self.item.toString()
def __repr__(self):
return 'id: ' + str(self.id) + ' @ ' + self.url + ' ' + self.name
class LastFMStore(log.Loggable, Plugin):
logCategory = 'lastFM_store'
implements = ['MediaServer']
def __init__(self, server, **kwargs):
log.Loggable.__init__(self)
self.next_id = 1000
self.config = kwargs
self.name = kwargs.get('name', DEFAULT_NAME)
self.update_id = 0
self.store = {}
self.wmc_mapping = {'4': 1000}
louie.send('Coherence.UPnP.Backend.init_completed', None, backend=self)
def __repr__(self):
return str(self.__class__).split('.')[-1]
def append(self, obj, parent):
if isinstance(obj, basestring):
mimetype = 'directory'
else:
mimetype = obj['mimetype']
UPnPClass = classChooser(mimetype)
id = self.getnextID()
update = False
if hasattr(self, 'update_id'):
update = True
self.store[id] = LastFMItem(id, obj, parent, mimetype, self.urlbase,
UPnPClass, update=update)
self.store[id].store = self
if hasattr(self, 'update_id'):
self.update_id += 1
if self.server:
self.server.content_directory_server.set_variable(0, 'SystemUpdateID', self.update_id)
if parent:
#value = '%d,%d' % (parent.get_id(),parent_get_update_id())
value = (parent.get_id(), parent.get_update_id())
if self.server:
self.server.content_directory_server.set_variable(0, 'ContainerUpdateIDs', value)
return self.store[id]
def remove(self, item):
try:
parent = item.get_parent()
item.remove()
del self.store[int(id)]
if hasattr(self, 'update_id'):
self.update_id += 1
if self.server:
self.server.content_directory_server.set_variable(0, 'SystemUpdateID', self.update_id)
#value = '%d,%d' % (parent.get_id(),parent_get_update_id())
value = (parent.get_id(), parent.get_update_id())
if self.server:
self.server.content_directory_server.set_variable(0, 'ContainerUpdateIDs', value)
except:
pass
def len(self):
return len(self.store)
def get_by_id(self, id):
if isinstance(id, basestring):
id = id.split('@', 1)
id = id[0]
id = int(id)
if id == 0:
id = 1000
try:
return self.store[id]
except:
return None
def getnextID(self):
ret = self.next_id
self.next_id += 1
return ret
def upnp_init(self):
self.current_connection_id = None
parent = self.append({'name': 'LastFM', 'mimetype': 'directory'}, None)
self.LFM = LastFMUser(self.config.get("login"), self.config.get("password"))
self.LFM.parent = parent
self.LFM.login()
if self.server:
self.server.connection_manager_server.set_variable(0, 'SourceProtocolInfo',
['http-get:*:audio/mpeg:*'],
default=True)
def main():
f = LastFMStore(None)
def got_upnp_result(result):
print "upnp", result
f.upnp_init()
if __name__ == '__main__':
from twisted.internet import reactor
reactor.callWhenRunning(main)
reactor.run()
| coherence-project/Coherence | coherence/backends/lastfm_storage.py | Python | mit | 14,194 |
import unittest
from charm.toolbox.symcrypto import SymmetricCryptoAbstraction,AuthenticatedCryptoAbstraction, MessageAuthenticator
from charm.toolbox.pairinggroup import PairingGroup,GT
from charm.core.math.pairing import hashPair as sha1
class SymmetricCryptoAbstractionTest(unittest.TestCase):
def testAESCBC(self):
self.MsgtestAESCBC(b"hello world")
def testAESCBCLong(self):
self.MsgtestAESCBC(b"Lots of people working in cryptography have no deep \
concern with real application issues. They are trying to discover things \
clever enough to write papers about -- Whitfield Diffie.")
def testAESCBC_Seperate(self):
self.MsgTestAESCBCSeperate(b"Lots of people working in cryptography have no deep \
concern with real application issues. They are trying to discover things \
clever enough to write papers about -- Whitfield Diffie.")
def MsgtestAESCBC(self,msg):
groupObj = PairingGroup('SS512')
a = SymmetricCryptoAbstraction(sha1(groupObj.random(GT)))
ct = a.encrypt(msg)
dmsg = a.decrypt(ct);
assert msg == dmsg , 'o: =>%s\nm: =>%s' % (msg, dmsg)
def MsgTestAESCBCSeperate(self,msg):
groupObj = PairingGroup('SS512')
ran = groupObj.random(GT)
a = SymmetricCryptoAbstraction(sha1(ran))
ct = a.encrypt(msg)
b = SymmetricCryptoAbstraction(sha1(ran))
dmsg = b.decrypt(ct);
assert msg == dmsg , 'o: =>%s\nm: =>%s' % (msg, dmsg)
class AuthenticatedCryptoAbstractionTest(unittest.TestCase):
def testAESCBC(self):
self.MsgtestAESCBC(b"hello world")
def testAESCBCLong(self):
self.MsgtestAESCBC(b"Lots of people working in cryptography have no deep \
concern with real application issues. They are trying to discover things \
clever enough to write papers about -- Whitfield Diffie.")
def testAESCBC_Seperate(self):
self.MsgTestAESCBCSeperate(b"Lots of people working in cryptography have no deep \
concern with real application issues. They are trying to discover things \
clever enough to write papers about -- Whitfield Diffie.")
def MsgtestAESCBC(self,msg):
groupObj = PairingGroup('SS512')
a = AuthenticatedCryptoAbstraction(sha1(groupObj.random(GT)))
ct = a.encrypt(msg)
dmsg = a.decrypt(ct);
assert msg == dmsg , 'o: =>%s\nm: =>%s' % (msg, dmsg)
def MsgTestAESCBCSeperate(self,msg):
groupObj = PairingGroup('SS512')
ran = groupObj.random(GT)
a = AuthenticatedCryptoAbstraction(sha1(ran))
ct = a.encrypt(msg)
b = AuthenticatedCryptoAbstraction(sha1(ran))
dmsg = b.decrypt(ct);
assert msg == dmsg , 'o: =>%s\nm: =>%s' % (msg, dmsg)
class MessageAuthenticatorTest(unittest.TestCase):
def testSelfVerify(self):
key = sha1(PairingGroup('SS512').random(GT))
m = MessageAuthenticator(key)
a = m.mac('hello world')
assert m.verify(a), "expected message to verify";
def testSeperateVerify(self):
key = sha1(PairingGroup('SS512').random(GT))
m = MessageAuthenticator(key)
a = m.mac('hello world')
m1 = MessageAuthenticator(key)
assert m1.verify(a), "expected message to verify";
def testTamperData(self):
key = sha1(PairingGroup('SS512').random(GT))
m = MessageAuthenticator(key)
a = m.mac('hello world')
m1 = MessageAuthenticator(key)
a["msg"]= "tampered"
assert not m1.verify(a), "expected message to verify";
def testTamperMac(self):
key = sha1(PairingGroup('SS512').random(GT))
m = MessageAuthenticator(key)
a = m.mac('hello world')
m1 = MessageAuthenticator(key)
a["digest"]= "tampered"
assert not m1.verify(a), "expected message to verify";
def testTamperAlg(self):
key = sha1(PairingGroup('SS512').random(GT))
m = MessageAuthenticator(key)
a = m.mac('hello world')
m1 = MessageAuthenticator(key)
m1._algorithm = "alg" # bypassing the algorithm check to verify the mac is over the alg + data
a["alg"]= "alg"
assert not m1.verify(a), "expected message to verify";
if __name__ == "__main__":
unittest.main()
| lferr/charm | charm/test/toolbox/symcrypto_test.py | Python | lgpl-3.0 | 4,362 |
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
#
# Copyright 2016 sadikovi
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import inspect
import multiprocessing
import Queue as threadqueue
import threading
import time
import src.const as const
from src.log import logger
import src.util as util
# == Task statuses ==
# Task is pending, ready to execute block
TASK_PENDING = "PENDING"
# Task is started and running on backend
TASK_STARTED = "STARTED"
# Task is cancelled
TASK_CANCELLED = "CANCELLED"
# Task is succeeded
TASK_SUCCEEDED = "SUCCEEDED"
# Task is failed to execute
TASK_FAILED = "FAILED"
# == Message statuses ==
# Action requests for executor
EXECUTOR_IS_ALIVE = "EXECUTOR_IS_ALIVE"
EXECUTOR_SHUTDOWN = "EXECUTOR_SHUTDOWN"
EXECUTOR_CANCEL_TASK = "EXECUTOR_CANCEL_TASK"
# Statuses of actions taken by executor (events of task execution)
EXECUTOR_TASK_STARTED = "EXECUTOR_TASK_STARTED"
EXECUTOR_TASK_SUCCEEDED = "EXECUTOR_TASK_SUCCEEDED"
EXECUTOR_TASK_FAILED = "EXECUTOR_TASK_FAILED"
EXECUTOR_TASK_CANCELLED = "EXECUTOR_TASK_CANCELLED"
def validate_timeout(value):
"""
Validate timeout, check that it is greater or equal minimum timeout of 100ms.
:param timeout: raw timeout
:return: validated timeout
"""
timeout = float(value)
if timeout < 0.1:
raise AttributeError("Invalid timeout %s, expected timeout >= 0.1" % timeout)
return timeout
def validate_num_executors(value):
"""
Validate number of executors created, should be at least 1 executor.
:param num_executors: raw number of executors
:return: validated value
"""
num_executors = int(value)
if num_executors <= 0:
raise AttributeError("Invalid num executors %s, expected executors >= 1" % num_executors)
return num_executors
class Task(object):
"""
Task class is a public API for creating executable code within executor TaskThread. Must be
serializable, and must implement methods:
- priority
- run()
- cancel()
Task defines the running code for unit of execution, note that it is assumed to be blocking,
though should handle asynchronous calls of `cancel()` method correctly.
"""
@property
def priority(self):
"""
Priority of the task. Must be one of the PRIORITY_0, PRIORITY_1, PRIORITY_2.
:return: task priority
"""
raise NotImplementedError("Not implemented")
def run(self):
"""
Main method within Block code executes. This method should block and wait for it to
complete. Example might be a polling status of the running process. Any failures can result
in exception, it is recommended to rather fail instead of silencing exception, because
Task will capture failure and return correct status for code block.
"""
raise NotImplementedError("Not implemented")
def cancel(self):
"""
Cancel currently running block. This should shut down main process in `run()`, though this
operation will run after task is cancelled, so it is not going to block task process, since
task will exit before shutdown. Note that it is important that Block provides this method,
otherwise task will be cancelled without terminating actual process.
"""
raise NotImplementedError("Not implemented")
def dumps(self):
"""
Return dictionary of key-value pairs (can be nested) to display as task info. This is used
solely for the purpose of showing in UI and will be converted into JSON. Front-end must
know how to parse task's generated JSON.
:return: dictionary of key-value pairs to display
"""
return None
class InterruptedException(Exception):
"""
Base class for interrupted exceptions, these errors are only raised when either task or
executor are asked to terminate, in case of task it will be cancellation. Note that this only
covers expected or scheduled failures, any other exceptions are thrown without wrapping into
InterruptedException.
"""
pass
class TaskInterruptedException(InterruptedException):
"""
Interrupted exception for task, thrown when task is cancelled.
"""
pass
class ExecutorInterruptedException(InterruptedException):
"""
Interrupted exception for executor, thrown when executor is requested to shut down.
"""
pass
class AtomicCounter(object):
"""
Simple atomic counter that is used to assign unique task id within scheduler.
"""
def __init__(self, initial=0):
"""
Create instance of atomic counter with some initial value.
:param initial: initial value for counter
"""
if initial >= 0:
self.val = multiprocessing.Value("I", initial, lock=True)
else:
raise AttributeError("Expected initial %s to be >= 0" % initial)
def increment(self):
"""
Increment value by 1 atomically.
"""
with self.val.get_lock():
self.val.value += 1
def get(self):
"""
Get current counter value.
:return: atomic value
"""
with self.val.get_lock():
return self.val.value
def getAndIncrement(self):
"""
Get and increment subsequently.
:return: value before increment
"""
with self.val.get_lock():
current_value = self.val.value
self.val.value += 1
return current_value
class Message(object):
"""
Base class for message task. Takes dictionary of values to return, note that values must be
pickle-able, even better if it is just single string text, or a list.
"""
def __init__(self, status, **kwargs):
self.status = status
self.arguments = kwargs
self.pretty_name = u"%s[%s]%s" % (type(self).__name__, self.status, self.arguments)
def __str__(self):
return self.pretty_name
def __unicode__(self):
return self.pretty_name
def __repr__(self):
return self.pretty_name
class WorkerThread(threading.Thread):
"""
Non-daemon thread to execute Task instance. Any exception is captured and sent back to a
task thread. Communication with the task thread goes through message queue, which is created
before launching thread.
"""
def __init__(self, task, msg_queue):
"""
Create new instance of WorkerThread.
:param task: Task instance to run
:param msg_queue: message queue, mainly to store error messages
"""
super(WorkerThread, self).__init__()
self._task = task
self._msg_queue = msg_queue
def run(self):
# pylint: disable=W0703,broad-except
try:
self._task.run()
except Exception as e:
self._msg_queue.put_nowait(e)
# pylint: enable=W0703,broad-except
def cancel(self):
self._task.cancel()
class TaskBlock(object):
"""
Task block that encapsulates information about task, internal and external id.
"""
def __init__(self, uid, task, info=None):
"""
Create instance of Task block.
:param uid: internal task id, assigned by scheduler
:param task: Task instance to run
:param info: user-defined information to track task
"""
self.uid = uid
self.task = task
self.info = info
class TaskThread(threading.Thread):
"""
TaskThread is a container for task as a unit of execution. It is essentially a daemon thread
that spawns another worker thread to execute task block. Life cycle of the task is reflected in
set of statuses:
TASK_PENDING -> TASK_STARTED -> TASK_FAILED/TASK_SUCCEEDED
-> TASK_CANCELLED
When task is created it is assigned TASK_PENDING status, TASK_STARTED is assigned when task is
launched, we also start collecting metrics, TASK_FAILED/TASK_SUCCEEDED is returned when worker
thread is finished; if there is any message in msg_queue as an exception, task is considered
failed, otherwise succeeded. Task can also be cancelled during execution. Note that task does
not guarantee that worker thread will exit correctly, this depends on actual implementation.
"""
def __init__(self, task_block):
"""
Create new instance of TaskThread. Note that task block must be serializable with pickle.
:param task_block: TaskBlock instance containing correct task to run
"""
super(TaskThread, self).__init__()
# task is by definition a daemon thread
self.daemon = True
# refresh timeout for worker thread
self.refresh_timeout = 0.5
if not isinstance(task_block, TaskBlock):
raise AttributeError("Invalid task block provided: %s" % task_block)
if not isinstance(task_block.task, Task):
raise AttributeError("Invalid task provided: %s" % task_block.task)
self.__uid = task_block.uid
self.__task = task_block.task
self.__info = task_block.info
self.__metrics = {}
self.__status = TASK_PENDING
# setting up logger
self.name = "Task[%s]" % self.__uid
# different work statuses
self.__cancel = threading.Event()
# callbacks
# .. note:: DeveloperApi
self.on_task_started = None
self.on_task_cancelled = None
self.on_task_succeeded = None
self.on_task_failed = None
@property
def uid(self):
"""
Get unique identifier.
:return: unique identifier for this task
"""
return self.__uid
@property
def task_info(self):
"""
Get task info (optional, can be None).
:return: task info if specified
"""
return self.__info
@property
def status(self):
"""
Get current task status.
:return: status for this task
"""
return self.__status
def _set_metric(self, name, value):
"""
Set metric value for name, this overwrites previous value.
:param name: name for metric
:param value: new value for metric
"""
self.__metrics[name] = value
def _get_metric(self, name):
"""
Get metric value for name, or None if name is not found.
:return: metric value or None in case of absent name
"""
return self.__metrics[name] if name in self.__metrics else None
def cancel(self):
"""
Cancel current thread and potentially running task.
"""
logger.debug("%s - Requested cancellation of task", self.name)
self.__cancel.set()
@property
def is_cancelled(self):
"""
Return True, if thread is either cancelled, or has been requested to stop.
:return: True if cancel condition is triggered, False otherwise
"""
return self.__cancel.is_set()
def _safe_exec(self, func, **kwargs):
"""
Safely execute function with a list of arguments. Function is assumed not to return any
result.
:param func: function to execute
:param kwargs: dictionary of method parameters
"""
# pylint: disable=W0703,broad-except
try:
if func:
func(**kwargs)
except Exception as e:
logger.debug("%s - Failed to execute '%s(%s)', reason=%s", self.name, func, kwargs, e)
# pylint: enable=W0703,broad-except
def run(self):
# update task metrics and set status
self._set_metric("starttime", time.time())
self._set_metric("duration", 0)
self.__status = TASK_STARTED
# try launching listener callback, note that failure should not affect execution of task
self._safe_exec(self.on_task_started, uid=self.__uid, info=self.__info)
logger.debug("%s - Started, time=%s", self.name, self._get_metric("starttime"))
try:
msg_queue = threadqueue.Queue()
wprocess = WorkerThread(self.__task, msg_queue)
wprocess.start()
next_iteration = True
while next_iteration:
time.sleep(self.refresh_timeout)
if self.is_cancelled:
wprocess.cancel()
raise TaskInterruptedException()
if not wprocess.is_alive():
next_iteration = False
wprocess.join()
if not msg_queue.empty():
# we only care about the first exception occuried
error = msg_queue.get_nowait()
raise error
except TaskInterruptedException:
# task has been cancelled or requested termination
self.__status = TASK_CANCELLED
self._safe_exec(self.on_task_cancelled, uid=self.__uid, info=self.__info)
# pylint: disable=W0703,broad-except
except Exception as e:
# any other exception is considered a failure
self._set_metric("reason", "%s" % e)
logger.debug("%s - Failure reason=%s", self.name, self._get_metric("reason"))
self.__status = TASK_FAILED
self._safe_exec(self.on_task_failed, uid=self.__uid, info=self.__info,
reason=self._get_metric("reason"))
# pylint: enable=W0703,broad-except
else:
self.__status = TASK_SUCCEEDED
self._safe_exec(self.on_task_succeeded, uid=self.__uid, info=self.__info)
finally:
# set post-execution metrics for task
self._set_metric("endtime", time.time())
duration = self._get_metric("endtime") - self._get_metric("starttime")
self._set_metric("duration", duration)
logger.debug("%s - Finished, status=%s, time=%s, duration=%s", self.name, self.__status,
self._get_metric("endtime"), self._get_metric("duration"))
class Executor(multiprocessing.Process):
"""
Executor process to run tasks and receive messages from scheduler. It represents long running
daemon process with polling interval, all communications are done through pipe. Executor
guarantees termination of task with termination of subprocess, assuming that task implements
interface correctly. Takes dictionary of task queues that are mapped to priorities, higher
priority is checked first.
"""
def __init__(self, name, conn, task_queue_map, timeout=0.5):
"""
Create instance of Executor.
:param name: friendly executor name, e.g. "executor-1"
:param conn: Connection instance to receive and send messages
:param task_queue_map: task queue as a dict [priority: queue]
:param timeout: polling interval
"""
super(Executor, self).__init__()
self.name = "%s[%s]" % (type(self).__name__, name)
self.daemon = True
self.conn = conn
self.task_queue_map = task_queue_map
self.timeout = validate_timeout(timeout)
# we also keep reference to active task, this will be reassigned for every iteration
self._active_task = None
# list of task ids to cancel, we add new task_id when specific message arrives and remove
# task_id that has been removed
self._cancel_task_ids = set()
# flag to indicate if executor is terminated
self._terminated = False
def _process_message(self, msg):
"""
Process message and take action, e.g. terminate process, execute callback, etc. Message
types are defined above in the package. Note that this can take actions on tasks, e.g.
when task is cancelled, so the subsequent processing of task, will work with updated state.
:param msg: message to process
"""
logger.debug("%s - Received message %s", self.name, msg)
if isinstance(msg, Message):
if msg.status == EXECUTOR_SHUTDOWN: # pragma: no branch
raise ExecutorInterruptedException("Executor shutdown")
elif msg.status == EXECUTOR_CANCEL_TASK: # pragma: no branch
# update set of tasks to cancel
if "task_id" in msg.arguments: # pragma: no branch
task_id = msg.arguments["task_id"]
self._cancel_task_ids.add(task_id)
logger.debug("%s - Registered cancelled task %s", self.name, task_id)
else:
# valid but unrecognized message, no-op
pass
else:
logger.info("%s - Invalid message %s is ignored", self.name, msg)
def _respond_is_alive(self):
"""
Send "is alive" response to the scheduler with current timestamp.
"""
if self.conn: # pragma: no branch
self.conn.send(Message(EXECUTOR_IS_ALIVE, datetime=util.utcnow(), name=self.name))
def _get_new_task(self):
"""
Extract new task from priority list of queues. If no tasks found for priority or priority
does not exist in dictionary, next priority is checked. If task is found, it is returned,
otherwise None. For each task TaskThread is created to provide status and metrics updates.
:return: new available task across priorities
"""
task_block = None
for priority in const.PRIORITIES:
logger.debug("%s - Searching task in queue for priority %s", self.name, priority)
try:
task_block = self.task_queue_map[priority].get(block=False)
except threadqueue.Empty:
logger.debug("%s - No tasks available for priority %s", self.name, priority)
except KeyError:
logger.debug("%s - Non-existent priority %s skipped", self.name, priority)
else:
if task_block: # pragma: no branch
break
# create thread for task
task_thread = TaskThread(task_block) if task_block else None
return task_thread
def _process_task(self):
"""
Process individual task, returns exit code for each task following available API. One of
the checks is performed to test current task_id against cancelled list, and discard task,
if it has been marked as cancelled, or terminate running task.
"""
if not self._active_task:
self._active_task = self._get_new_task()
logger.info("%s - New task registered", self.name)
# before checking statuses and proceed execution, we check if current task was
# requested to be cancelled, if yes, we remove it from set of ids.
if self._active_task and self._active_task.uid in self._cancel_task_ids:
self._cancel_task_ids.discard(self._active_task.uid)
self._cancel_active_task()
# check general task processing
if self._active_task:
task_id = self._active_task.uid
info = self._active_task.task_info
task_status = self._active_task.status
# perform action based on active task status
if task_status is TASK_PENDING:
# check if external system is available to run task (Developer API)
if self.external_system_available():
self._active_task.start()
self.conn.send(Message(EXECUTOR_TASK_STARTED, task_id=task_id, info=info))
logger.info("%s - Started task %s", self.name, task_id)
else:
logger.info("%s - External system is not available", self.name)
elif task_status is TASK_STARTED:
# task has started and running
if self._active_task.is_alive(): # pragma: no branch
logger.debug("%s - Ping task %s is alive", self.name, task_id)
elif task_status is TASK_SUCCEEDED:
# task finished successfully
self.conn.send(Message(EXECUTOR_TASK_SUCCEEDED, task_id=task_id, info=info))
logger.info("%s - Finished task %s, status %s", self.name, task_id, task_status)
self._active_task = None
elif task_status is TASK_FAILED:
# task failed
self.conn.send(Message(EXECUTOR_TASK_FAILED, task_id=task_id, info=info))
logger.info("%s - Finished task %s, status %s", self.name, task_id, task_status)
self._active_task = None
elif task_status is TASK_CANCELLED:
# task has been cancelled
if self._active_task: # pragma: no branch
self._active_task = None
else:
logger.warning("%s - Unknown status %s for %s", self.name, task_status, task_id)
else:
logger.debug("%s - No active task registered", self.name)
def _cancel_active_task(self):
"""
Cancel current running task, if available.
"""
if self._active_task:
task_id = self._active_task.uid
info = self._active_task.task_info
self._active_task.cancel()
self.conn.send(Message(EXECUTOR_TASK_CANCELLED, task_id=task_id, info=info))
logger.info("%s - Cancelled task %s", self.name, task_id)
self._active_task = None
else:
logger.info("%s - No active task to cancel", self.name)
def external_system_available(self):
"""
.. note:: DeveloperApi
Can be overriden to check if external system is available to run task. This can include
system status, e.g. running, or system load, e.g. how many tasks are already queued up.
:return: True if system can run task, False otherwise
"""
return True
def iteration(self):
"""
Run single iteration, entire logic of executor should be specified in this method, unless
there is an additional logic between iterations. Iteration is cancelled, if executor is
terminated.
:return: boolean flag, True - run next iteration, False - terminate
"""
# we process special case of terminated executor in case someone would launch it again.
if self._terminated:
logger.warning("Executor %s has been terminated", self.name)
return False
logger.debug("%s - Run iteration, timeout=%s", self.name, self.timeout)
try:
# send reponse to the scheduler that this executor is up and processing tasks
self._respond_is_alive()
# check if there are any messages in connection, process one message per iteration
if self.conn.poll():
self._process_message(self.conn.recv())
# check if there is any outstanding task to run, otherwise poll data for current task
self._process_task()
except ExecutorInterruptedException:
logger.info("%s - Requested termination of executor", self.name)
self._terminated = True
# cancel task that is currently running and clean up state
self._cancel_active_task()
return False
# pylint: disable=W0703,broad-except
except Exception as e:
logger.exception("%s - Unrecoverable error %s, terminating", self.name, e)
self._terminated = True
return False
# pylint: enable=W0703,broad-except
else:
return True
def run(self):
"""
Method to run tasks on executor, this runs in iterations with each timeout interval. Each
iteration polls new messages from connection and checks running task. If iteration fails we
immediately return status False.
"""
logger.info("Start executor %s, time=%s", self.name, time.time())
proceed = True
while proceed: # pragma: no branch
proceed = self.iteration()
if not proceed:
return False
time.sleep(self.timeout)
class Scheduler(object):
"""
Scheduler class prepares and launches executors and provides means to pass and process tasks
and messages from executors. Should be one instance per application.
"""
def __init__(self, num_executors, timeout=0.5):
"""
Create new instance of Scheduler.
:param num_executors: number of executors to initialize
:param timeout: executor's timeout
:param logger: executor's logger
"""
self.num_executors = validate_num_executors(num_executors)
self.timeout = validate_timeout(timeout)
# pipe connections to send and receive messages to/from executors
self.pipe = {}
# list of executors that are initialized
self.executors = []
# list of is_alive statuses for executors
self.is_alive_statuses = {}
# initialize priority queues, the lower number means higher priority
self.task_queue_map = {
const.PRIORITY_0: multiprocessing.Queue(),
const.PRIORITY_1: multiprocessing.Queue(),
const.PRIORITY_2: multiprocessing.Queue()
}
# scheduler metrics
self.__metrics = {}
# id allocator
self.counter = AtomicCounter(0)
# callbacks
"""
.. note:: DeveloperApi
Invoked when task is started on executor.
:param messages: list of messages EXECUTOR_TASK_STARTED
"""
self.on_task_started = None
"""
.. note:: DeveloperApi
Invoked when task is cancelled on executor.
:param messages: list of messages EXECUTOR_TASK_CANCELLED
"""
self.on_task_cancelled = None
"""
.. note:: DeveloperApi
Invoked when task is finished successfully on executor.
:param messages: list of messages EXECUTOR_TASK_SUCCEEDED
"""
self.on_task_succeeded = None
"""
.. note:: DeveloperApi
Invoked when task is finished with failure on executor.
:param messages: list of messages EXECUTOR_TASK_FAILED
"""
self.on_task_failed = None
"""
.. note:: DeveloperApi
Invoked when executor sends 'is alive' reponse.
:param messages: list of messages EXECUTOR_IS_ALIVE
"""
self.on_is_alive = self._update_is_alive
def get_num_executors(self):
"""
Get requested number of executors.
:return: number of executors requested
"""
return self.num_executors
def _get_metric(self, name):
"""
Get metric for name.
:return: metric value
"""
return self.__metrics[name] if name in self.__metrics else None
def _set_metric(self, name, value):
"""
Set metric value for name. Will update previously registered value.
:param name: metric name
:param value: metric value
"""
self.__metrics[name] = value
def _increment_metric(self, name):
"""
Increment metric assuming that metric is integer value. If error occurs defaults to None.
:param name: metric name
"""
updated = 0
try:
updated = int(self._get_metric(name)) + 1
except ValueError:
updated = 0
except TypeError:
updated = None
self._set_metric(name, updated)
def get_metrics(self):
"""
Return copy of the scheduler metrics.
:return: scheduler metrics copy
"""
return self.__metrics.copy()
def _prepare_executor(self, name):
"""
Prepare single executor, this creates connection for executor and launches it as daemon
process, and appends to executors list.
:param name: executor's name (original, not final executor name)
:return: created executor (it is already added to the list of executors)
"""
main_conn, exc_conn = multiprocessing.Pipe()
clazz = self.executor_class()
if not inspect.isclass(clazz) or not issubclass(clazz, Executor):
raise TypeError("Type %s !<: Executor" % clazz)
exc = clazz(name, exc_conn, self.task_queue_map, timeout=self.timeout)
# update executor with additional options (note that this should be used if custom executor
# class is provided)
self.update_executor(exc)
# update maintenance tools
self.executors.append(exc)
self.pipe[exc.name] = main_conn
self.is_alive_statuses[exc.name] = util.utcnow()
return exc
def start(self):
"""
Start scheduler, launches executors asynchronously.
"""
logger.info("Starting %s '%s' executors", self.num_executors, self.executor_class())
# Launch executors and save pipes per each
for i in range(self.num_executors):
exc = self._prepare_executor("#%s" % i)
exc.start()
def stop(self):
"""
Stop scheduler, terminates executors, and all tasks that were running at the time.
"""
for conn in self.pipe.values():
conn.send(Message(EXECUTOR_SHUTDOWN))
# timeout to terminate processes and process remaining messages in Pipe by polling thread
logger.info("Waiting for termination...")
time.sleep(5)
for exc in self.executors:
if exc.is_alive():
exc.terminate()
exc.join()
logger.info("Terminated executors, cleaning up internal data")
self.pipe = None
self.executors = None
self.task_queue_map = None
def submit(self, task, info=None):
"""
Add task for priority provided with task.
:param task: task to add, must be instance of Task
:param info: task info to track
:return: task uid
"""
if not isinstance(task, Task):
raise TypeError("%s != Task" % type(task))
if task.priority not in self.task_queue_map:
raise KeyError("No priority %s found in queue map" % task.priority)
task_id = self.counter.getAndIncrement()
task_block = TaskBlock(task_id, task, info)
self.task_queue_map[task.priority].put_nowait(task_block)
# keep number of submitted tasks based on atomic counter
self._set_metric("submitted-tasks", self.counter.get())
return task_id
def cancel(self, task_id):
"""
Cancel task by provided task_id, this includes either termination of currently running task,
or removal of future scheduled tasks, note that this will be no-op if task that has been
already processed.
:param task_id: task id to cancel, no-op if task_id is None
"""
if task_id:
for conn in self.pipe.values():
conn.send(Message(EXECUTOR_CANCEL_TASK, task_id=task_id))
# Thread is considered to be long-lived, and is terminated when scheduler is stopped.
# Method always provides list of messages to callback or empty list, it is guaranteed to
# provide list of valid messages.
def _process_callback(self):
msg_list = {}
# sometimes thread can report that pipe is None, which might require lock before
# processing, currently we just skip iteration, if it is None.
if self.pipe is not None:
for conn in self.pipe.values():
while conn.poll():
message = conn.recv()
# ignore non-valid messages
if not isinstance(message, Message):
continue
if message.status in msg_list:
msg_list[message.status].append(message)
else:
msg_list[message.status] = [message]
if self.on_task_started and EXECUTOR_TASK_STARTED in msg_list:
self.on_task_started.__call__(msg_list[EXECUTOR_TASK_STARTED])
if self.on_task_succeeded and EXECUTOR_TASK_SUCCEEDED in msg_list:
self.on_task_succeeded.__call__(msg_list[EXECUTOR_TASK_SUCCEEDED])
if self.on_task_failed and EXECUTOR_TASK_FAILED in msg_list:
self.on_task_failed.__call__(msg_list[EXECUTOR_TASK_FAILED])
if self.on_task_cancelled and EXECUTOR_TASK_CANCELLED in msg_list:
self.on_task_cancelled.__call__(msg_list[EXECUTOR_TASK_CANCELLED])
if self.on_is_alive and EXECUTOR_IS_ALIVE in msg_list:
self.on_is_alive.__call__(msg_list[EXECUTOR_IS_ALIVE])
def _update_is_alive(self, messages):
"""
Update 'is alive' status for executors. Currently just updates datetime of message.
:param messages: list of Message instances with EXECUTOR_IS_ALIVE status
"""
for msg in messages:
if "name" in msg.arguments:
exc_name = msg.arguments["name"]
self.is_alive_statuses[exc_name] = util.utcnow()
logger.debug("Updated 'is alive' status for executor %s", exc_name)
def get_is_alive_statuses(self):
"""
Return copy of 'is alive' statuses.
:return: dictionary of 'executor -> datetime of update in UTC'
"""
return self.is_alive_statuses.copy()
def _prepare_polling_thread(self, name):
"""
Prepare maintenance thread for polling messages from Pipe. This returns None, when no
target consumer is provided.
:param name: name of the polling thread
:return: created daemon thread or None, if target is not specified
"""
def poll_messages(): # pragma: no cover
while True:
self._process_callback()
time.sleep(self.timeout)
thread = threading.Thread(name=name, target=poll_messages)
thread.daemon = True
return thread
def start_maintenance(self):
"""
Start all maintenance threads and processes.
"""
# Launch polling thread for messages
thread = self._prepare_polling_thread("Polling-1")
if thread:
thread.start()
def executor_class(self):
"""
.. note:: DeveloperApi
Return executor class to launch. By default returns generic Executor implementation. Note
that subclass implementation should take the same parameters as Executor.
:return: scheduler.Executor subclass
"""
return Executor
def update_executor(self, executor):
"""
.. note:: DeveloperApi
Update executor instance with additional options.
This should be used with custom executor class.
:param executor: executor to update
"""
pass
| sadikovi/queue | src/scheduler.py | Python | apache-2.0 | 35,429 |
# https://www.codewars.com/kata/permutations/train/python
def permutations(text):
import itertools
# Get all permutations, saved as a list
perms = list(itertools.permutations(text))
# Remove duplicates
perms = set(perms)
# Merge elements in each permutation to a string
perm_list = [''.join(p) for p in perms]
return perm_list | pcampese/codewars | permutations.py | Python | gpl-3.0 | 339 |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = 'maxim'
import copy
import numbers
from six import iteritems
import numpy as np
from .nodes import BaseNode, AcceptsInputNode, JointNode
class ParsedSpec(object):
"""
Responsible for parsing the spec and constructing a tree.
When the spec is parsed, it can transform values from multi-dimensional [0,1] hypercube to domain values.
"""
def __init__(self, spec):
self._spec = spec
self._input_nodes = {}
self._traverse_nodes(spec)
def size(self):
return len(self._input_nodes)
def instantiate(self, points):
assert len(points) == self.size()
for node, index in iteritems(self._input_nodes):
node.set_point(points[index])
spec_copy = copy.deepcopy(self._spec)
spec_copy = self._traverse_and_replace(spec_copy)
return spec_copy
def get_names(self):
return {index: node.name() for node, index in iteritems(self._input_nodes)}
def _traverse_nodes(self, spec):
self._visited = set()
self._traverse_nodes_recursive(spec)
self._visited = None
def _visit(self, obj):
if isinstance(obj, object) and hasattr(obj, '__dict__'):
id_ = id(obj)
if id_ in self._visited:
return True
self._visited.add(id_)
return False
def _traverse_nodes_recursive(self, spec, *path):
if self._visit(spec):
return
if isinstance(spec, BaseNode):
node = spec
if isinstance(node, JointNode):
for i, child in enumerate(node._children):
self._traverse_nodes_recursive(child, *path)
if isinstance(node, AcceptsInputNode) and not node in self._input_nodes:
index = len(self._input_nodes)
self._input_nodes[node] = index
self._set_name(node, path)
return
if isinstance(spec, numbers.Number):
return
if isinstance(spec, dict):
for key, value in iteritems(spec):
self._traverse_nodes_recursive(value, key, *path)
return
if isinstance(spec, list) or isinstance(spec, tuple):
for i, item in enumerate(spec):
self._traverse_nodes_recursive(item, *path)
return
if isinstance(spec, object) and hasattr(spec, '__dict__'):
for key, value in iteritems(spec.__dict__):
if not (key.startswith('__') and key.endswith('__')):
self._traverse_nodes_recursive(value, key, *path)
return
def _set_name(self, node, path):
if node._name is None:
name = '-'.join([str(i) for i in reversed(path)])
describe = node.describe()
if describe:
name = name + '-' + describe
node._name = name
def _traverse_and_replace(self, spec_copy):
self._visited = set()
spec_copy = self._traverse_and_replace_recursive(spec_copy)
self._visited = None
return spec_copy
def _traverse_and_replace_recursive(self, spec_copy):
if isinstance(spec_copy, BaseNode):
return spec_copy.value()
if isinstance(spec_copy, numbers.Number):
return spec_copy
if self._visit(spec_copy):
return spec_copy
if isinstance(spec_copy, dict):
for key, value in iteritems(spec_copy):
spec_copy[key] = self._traverse_and_replace_recursive(spec_copy[key])
return spec_copy
if isinstance(spec_copy, list) or isinstance(spec_copy, tuple):
replaced = [self._traverse_and_replace_recursive(item_copy) for item_copy in spec_copy]
if isinstance(spec_copy, tuple):
replaced = tuple(replaced)
return replaced
if isinstance(spec_copy, object) and hasattr(spec_copy, '__dict__'):
for key, value in iteritems(spec_copy.__dict__):
if not(key.startswith('__') and key.endswith('__')):
setattr(spec_copy, key, self._traverse_and_replace_recursive(value))
return spec_copy
return spec_copy
def get_instance(spec):
parsed = spec if isinstance(spec, ParsedSpec) else ParsedSpec(spec)
points = np.random.uniform(0, 1, size=(parsed.size(),))
return parsed.instantiate(points)
| maxim5/hyper-engine | hyperengine/spec/parsed_spec.py | Python | apache-2.0 | 4,009 |
import OSIsoft.AF as AF
CURRENT_SERVER = None
# change from pi period to pandas frequency
FREQUENCY = {'1s': 'S', '1h': 'H', '1d': 'D'}
| raphaeltimbo/PI | PI/config.py | Python | apache-2.0 | 139 |
#!/usr/bin/python
# cutIT.py
# Example cat seqs.fastq | cutIT.py
# By Simon H. Rasmussen
# Bioinformatics Centre
# University of Copenhagen
from types import *
def clearFile(fn):
import os
import sys
cwd = os.getcwd()
if fn != "":
file1 = open(cwd + "/" + fn,'r')
sin = False
else:
sin = True
i = 0
j = 0
second = False
l = '#'
while l != "":
if sin:
sys.stdin.readline()
else:
l = file1.readline()
if i % 2 == 0 and len(l.split()) > 1 or second and len(l.split()) > 1:
second = True
j = j + 1
continue
second = False
if len(l.split()) == 1:
print l,
else:
print l,
i = i + 1
print >> sys.stderr, "Number of : ", j
def cutIT_fasta(fastq,predfile,cutoff):
import os
import sys
line = ""
if fastq != "":
file1 = open(fastq,'r')
lines1 = file1.readlines()
else:
lines1 = sys.stdin.readlines()
file2 = open(predfile,'r')
lines2 = file2.readlines()
numLines = len(lines2)
for i in range(numLines):
full_ID = lines1[2*i]
ID = int(full_ID.split(".")[1])
seq = lines1[2*i+1]
l = lines2[i].split()
ID2 = int(l[0].split(".")[1])
start = int(l[1])
if ID == ID2:
if not full_ID == "" and not seq[0:start-1] == "" and start > cutoff:
print full_ID,
print seq[0:start-1]
def cutIT_fastq(fastq,predfile,cutoff,prime):
import os
import sys
import gzip
line = ""
if predfile != "":
file2 = open(predfile,'r')
sin = False
else:
sin = True
if fastq[-2:] == "gz":
file1 = gzip.open(fastq,'r')
else:
file1 = open(fastq,'r')
i = 0
k = 0
line1 = '#'
line2 = '#'
next = True
while True:
if sin:
if next:
#Read cutfile
cutline = sys.stdin.readline()
else:
if next:
#Read cutfile
cutline = file2.readline()
line1 = file1.readline()
if cutline.strip() == '':
break
full_ID = line1
ID = (full_ID.split()[0])[1:]
line1 = file1.readline()
seq = line1
line1 = file1.readline()
l3 = line1
line1 = file1.readline()
l4 = line1
l = cutline.split()
ID2 = l[0]
start = int(l[1])
if prime:
ll = len(seq)
if ID == ID2:
next = True
if not prime and not full_ID == "" and seq[0:start-1] != "" and start > cutoff:
if "length" in full_ID:
firstLine = "".join(full_ID.split("length")[0:-1]) + "length=" + str(start-1)
else:
firstLine = full_ID.strip() + " length=" + str(start-1)
print firstLine
print seq[0:(start-1)].strip()
print "+" + firstLine[1:]
print l4[0:(start-1)].strip()
i = i + 1
elif prime and not full_ID == "" and seq[ll - start - 2:] != "" and start + 1 >= cutoff and start < ll:
if "length" in full_ID:
oldll = int(full_ID.split("length=")[-1].split()[0])
firstLine = "".join(full_ID.split("length")[0:-1]) + "length=" + str(start + 1)
else:
firstLine = full_ID.strip() + " length=" + str(start+1)
print firstLine
print seq[ll - start - 2:].strip()
print "+" + firstLine[1:]
print l4[ll - start - 2:].strip()
i = i + 1
else:
k = k + 1
else:
k = k + 1
next = False
print >> sys.stderr,"cutIT.py: Sequences filtered out",k
print >> sys.stderr,"cutIT.py: Sequences printed" ,i
if __name__ == "__main__":
from optparse import OptionParser
parser = OptionParser()
parser.add_option("-f", action="store", type="string", dest="fastqF",default="", help="input fastq file")
parser.add_option("-p", action="store", type="string", dest="predF",default="", help="input pred file")
parser.add_option("-c", action="store", type="int", dest="cut", default=0, help="length cutoff")
parser.add_option("-5", action="store_true", dest="prime", default=False, help="5prime adapter?")
(options, args) = parser.parse_args()
if options.fastqF[-5:] == "fastq" or options.fastqF[-2:] == "gz":
cutIT_fastq(options.fastqF,options.predF,options.cut,options.prime)
else:
cutIT_fasta(options.fastqF,options.predF,options.cut)
| simras/CLAP | scripts/cutIT.py | Python | mit | 4,899 |
import pytest
import cv2
from plantcv.plantcv.morphology import segment_combine
def test_segment_combine(morphology_test_data):
"""Test for PlantCV."""
skel = cv2.imread(morphology_test_data.skel_img, -1)
edges = morphology_test_data.load_segments(morphology_test_data.segments_file, "edges")
# Test with list of IDs input
_, new_objects = segment_combine(segment_list=[0, 1], objects=edges, mask=skel)
assert len(new_objects) + 1 == len(edges)
def test_segment_combine_lists(morphology_test_data):
"""Test for PlantCV."""
skel = cv2.imread(morphology_test_data.skel_img, -1)
edges = morphology_test_data.load_segments(morphology_test_data.segments_file, "edges")
# Test with list of lists input
_, new_objects = segment_combine(segment_list=[[0, 1, 2], [3, 4]], objects=edges, mask=skel)
assert len(new_objects) + 3 == len(edges)
def test_segment_combine_bad_input(morphology_test_data):
"""Test for PlantCV."""
skel = cv2.imread(morphology_test_data.skel_img, -1)
edges = morphology_test_data.load_segments(morphology_test_data.segments_file, "edges")
with pytest.raises(RuntimeError):
_ = segment_combine(segment_list=[0.5, 1.5], objects=edges, mask=skel)
| danforthcenter/plantcv | tests/plantcv/morphology/test_segment_combine.py | Python | mit | 1,237 |
# SPDX-License-Identifier: Apache-2.0
#
# Copyright (C) 2015, ARM Limited and contributors.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" Helper module for registering Analysis classes methods """
import os
import sys
from glob import glob
from inspect import isclass
from importlib import import_module
from analysis_module import AnalysisModule
# Configure logging
import logging
class AnalysisRegister(object):
"""
Define list of supported Analysis Classes.
:param trace: input Trace object
:type trace: :mod:`libs.utils.Trace`
"""
def __init__(self, trace):
# Add workloads dir to system path
analysis_dir = os.path.dirname(os.path.abspath(__file__))
analysis_dir = os.path.join(analysis_dir, 'analysis')
logging.debug('%14s - Analysis: %s', 'Analysis', analysis_dir)
sys.path.insert(0, analysis_dir)
logging.debug('%14s - Syspath: %s', 'Analysis', format(sys.path))
logging.info("Registering trace analysis modules:")
for filepath in glob(os.path.join(analysis_dir, '*.py')):
filename = os.path.splitext(os.path.basename(filepath))[0]
# Ignore __init__ files
if filename.startswith('__'):
continue
logging.debug('%14s - Filename: %s', 'Analysis', filename)
# Import the module for inspection
module = import_module(filename)
for member in dir(module):
# Ignore the base class
if member == 'AnalysisModule':
continue
handler = getattr(module, member)
if handler and isclass(handler) and \
issubclass(handler, AnalysisModule):
module_name = module.__name__.replace('_analysis', '')
setattr(self, module_name, handler(trace))
logging.info(" %s", module_name)
# vim :set tabstop=4 shiftwidth=4 expandtab
| JaviMerino/lisa | libs/utils/analysis_register.py | Python | apache-2.0 | 2,476 |
"""Allows setting the time a Status object was last updated.
Revision ID: 58441c58e37e
Revises: 2db48f0c89c7
Create Date: 2014-03-09 15:17:01.996769
"""
# revision identifiers, used by Alembic.
revision = '58441c58e37e'
down_revision = '2db48f0c89c7'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('status', sa.Column('time', sa.DateTime(), nullable=True))
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('status', 'time')
### end Alembic commands ###
| lae/simplemona | migrations/versions/58441c58e37e_.py | Python | mit | 646 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class TaskSchedulingPolicy(Model):
"""Specifies how tasks should be distributed across compute nodes.
:param node_fill_type: How tasks should be distributed across compute
nodes. Possible values include: 'spread', 'pack'
:type node_fill_type: str or :class:`ComputeNodeFillType
<azure.batch.models.ComputeNodeFillType>`
"""
_validation = {
'node_fill_type': {'required': True},
}
_attribute_map = {
'node_fill_type': {'key': 'nodeFillType', 'type': 'ComputeNodeFillType'},
}
def __init__(self, node_fill_type):
self.node_fill_type = node_fill_type
| SUSE/azure-sdk-for-python | azure-batch/azure/batch/models/task_scheduling_policy.py | Python | mit | 1,137 |
# Copyright 2008-2010 WebDriver committers
# Copyright 2008-2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from selenium import selenium
__version__ = "2.21.3"
| leighpauls/k2cro4 | third_party/webdriver/pylib/selenium/__init__.py | Python | bsd-3-clause | 698 |
"""
WSGI config for baldys_testbed project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
# We defer to a DJANGO_SETTINGS_MODULE already in the environment. This breaks
# if running multiple sites in the same mod_wsgi process. To fix this, use
# mod_wsgi daemon mode with each site in its own daemon process, or use
# os.environ["DJANGO_SETTINGS_MODULE"] = "baldys_testbed.settings"
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "baldys_testbed.settings")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
| jmuharsky/baldys-secret-underground-lair | baldys_testbed/wsgi.py | Python | mit | 1,443 |
from django.utils.translation import ugettext_lazy as _
from django.http import HttpResponse
from django.template import loader
from cyclope.core import frontend
from cyclope.apps.newsletter.models import Newsletter
from cyclope.core.collections.models import Category, Categorization
class NewsletterContentTeasers(frontend.FrontendView):
"""Teaser list for a Newsletter rendered as a table"""
name='content_teasers_as_table'
verbose_name=_('show content teasers linked to the website')
is_default = True
is_instance_view = True
is_content_view = False
is_region_view = False
template = "newsletter/content_teasers.html"
def get_response(self, request, req_context, options, content_object):
newsletter = content_object
category = newsletter.content_category
categorizations_list = category.categorizations.all()
req_context.update({'category': category,
'newsletter': newsletter,
'categorizations': categorizations_list,
'inline_view_name': 'newsletter_teaser',
})
t = loader.get_template(self.template)
return t.render(req_context)
frontend.site.register_view(Newsletter, NewsletterContentTeasers)
class NewsletterContent(NewsletterContentTeasers):
"""Teaser list for a Newsletter rendered as a table"""
name='content'
verbose_name=_('show the full content (no external links)')
is_default = False
is_instance_view = True
is_content_view = False
is_region_view = False
template = "newsletter/content.html"
frontend.site.register_view(Newsletter, NewsletterContent)
## class NewsletterHeader(frontend.FrontendView):
## """Header of a Newsletter"""
## name='header'
## verbose_name=_('Header')
## is_instance_view = True
## is_region_view = True
## def get_response(self, request, req_context, options, content_object):
## newsletter = content_object
## return newsletter.header
## frontend.site.register_view(Newsletter, NewsletterHeader)
| CodigoSur/cyclope | cyclope/apps/newsletter/frontend_views.py | Python | gpl-3.0 | 2,122 |
'''
Author: Rajmani Arya
TicTacToe Game System vs. Player
using Min Max Algorithm
Graphical User Interface Implemented in Python Tk
'''
from Tkinter import Tk, Label, Frame, Canvas, Button, ALL
def min_max_move(instance, marker):
bestmove = None
bestscore = None
if marker == 2:
for m in instance.get_free_cells():
instance.mark(m, 2)
if instance.is_gameover():
score = instance.get_score()
else:
mov_pos, score = min_max_move(instance, 1)
instance.revert_last_move()
if bestscore == None or score > bestscore:
bestscore = score
bestmove = m
else:
for m in instance.get_free_cells():
instance.mark(m, 1)
if instance.is_gameover():
score = instance.get_score()
else:
mov_pos, score = min_max_move(instance, 2)
instance.revert_last_move()
if bestscore == None or score < bestscore:
bestscore = score
bestmove = m
return bestmove, bestscore
class TTT:
'''
main class for interface and game handling
'''
def __init__(self, master):
self.frame = Frame(master)
self.frame.pack(fill="both", expand=True)
self.label = Label(self.frame, text='Tic Tac Toe Game', height=2, font="Arial 14", bg='black', fg='blue')
self.label.pack(fill="both", expand=True)
self.canvas = Canvas(self.frame, width=300, height=300)
self.canvas.pack(fill="both", expand=True)
self.status = Label(self.frame, text='Start Game', height=2, font="Arial 14", bg='white', fg='black')
self.status.pack(fill="both", expand=True)
self.reset = Button(self.frame, text="Reset Game", command=self.reset)
self.reset.pack(fill="both", expand=True)
self.__board()
self.canvas.bind("<ButtonPress-1>", self.handler)
self.board = [0 for x in range(0, 9)]
self.winner = None
self.lastmoves = []
def get_free_cells(self):
moves = []
for i,v in enumerate(self.board):
if v == 0:
moves.append(i)
return moves
def mark(self,pos, marker):
self.board[pos] = marker
self.lastmoves.append(pos)
def revert_last_move(self):
self.board[self.lastmoves.pop()] = 0
self.winner = None
def is_gameover(self):
win_positions = [(0,1,2), (3,4,5), (6,7,8), (0,3,6),(1,4,7),(2,5,8), (0,4,8), (2,4,6)]
for i,j,k in win_positions:
if self.board[i] == self.board[j] and self.board[j] == self.board[k] and self.board[i] != 0:
self.winner = self.board[i]
return True
if 0 not in self.board:
self.winner = 0
return True
return False
def get_score(self):
if self.is_gameover():
if self.winner == 2:
return 1 # Won
elif self.winner == 1:
return -1
return 0
def get_cell_value(self,pos):
return self.board[pos]
def __board(self):
self.canvas.create_rectangle(0, 0, 300, 300, outline="black")
self.canvas.create_rectangle(100, 300, 200, 0, outline="black")
self.canvas.create_rectangle(0, 100, 300, 200, outline="black")
def reset(self):
self.canvas.delete(ALL)
self.__board()
self.changeStatus('Start Game')
self.canvas.bind("<ButtonPress-1>", self.handler)
self.board = [0 for x in range(0, 9)]
self.winner = None
self.lastmoves = []
def changeStatus(self, status):
self.status['text'] = status
def markFinal(self, pos, marker):
x = pos%3
y = int(pos/3)
# print pos, marker
if marker == 2:
X = 100 * (x + 1)
Y = 100 * (y + 1)
self.canvas.create_oval(X - 25, Y - 25, X - 75, Y - 75, width=4, outline="green")
self.changeStatus("X's Move !")
else:
X = 100 * x
Y = 100 * y
self.canvas.create_line(X + 25, Y + 25, X + 75, Y + 75, width=4, fill="red")
self.canvas.create_line(X + 25, Y + 75, X + 75, Y + 25, width=4, fill="red")
self.changeStatus("O's Move !")
self.board[pos] = marker
def handler(self, event):
'''
handle mouse click event on the board
'''
x = int(event.x / 100)
y = int(event.y / 100)
if self.board[y*3+x] == 0:
self.markFinal(y*3+x, 2)
if self.is_gameover():
self.canvas.unbind("<ButtonPress-1>")
if self.winner == 2:
self.changeStatus("O Won the Game !")
elif self.winner == 1:
self.changeStatus("X Won the Game !")
else:
self.changeStatus("Game Draw !")
return
pos, score = min_max_move(self, 1)
self.markFinal(pos, 1);
if self.is_gameover():
self.canvas.unbind("<ButtonPress-1>")
if self.winner == 2:
self.changeStatus("O Won the Game !")
elif self.winner == 1:
self.changeStatus("X Won the Game !")
else:
self.changeStatus("Game Draw !")
# Program Starts Here
root = Tk()
app = TTT(root)
root.mainloop()
| rajmani1995/TicTacToe-Automated | AutoTicTacToe.py | Python | mit | 5,548 |
# -*- coding: utf-8 -*-
from shellstreaming import api
from shellstreaming.istream import RandInt
from shellstreaming.operator import CountWindow, Sort
from shellstreaming.ostream import LocalFile
OUTPUT_FILE = '/tmp/04_Sort.txt'
NUM_RECORDS = 10000
def main():
randint_stream = api.IStream(RandInt, 0, 100, max_records=NUM_RECORDS)
randint_win = api.Operator([randint_stream], CountWindow, 3, slide_size=3, fixed_to=['localhost'])
sorted_win = api.Operator([randint_win], Sort, 'num')
api.OStream(sorted_win, LocalFile, OUTPUT_FILE, output_format='json', fixed_to=['localhost'])
def test():
import json
num_lines = 0
with open(OUTPUT_FILE) as f:
while True:
try:
first = int(json.loads(next(f))['num'])
second = int(json.loads(next(f))['num'])
third = int(json.loads(next(f))['num'])
assert(first <= second <= third)
num_lines += 3
except StopIteration:
break
assert(num_lines == NUM_RECORDS - 1)
| laysakura/shellstreaming | example/04_Sort.py | Python | apache-2.0 | 1,067 |
#!/usr/bin/python2
'''
genPOI.py
Scans regionsets for TileEntities and Entities, filters them, and writes out
POI/marker info.
A markerSet is list of POIs to display on a tileset. It has a display name,
and a group name.
markersDB.js holds a list of POIs in each group
markers.js holds a list of which markerSets are attached to each tileSet
'''
import os
import logging
import json
import sys
from optparse import OptionParser
from overviewer_core import logger
from overviewer_core import nbt
from overviewer_core import configParser, world
def replaceBads(s):
"Replaces bad characters with good characters!"
bads = [" ", "(", ")"]
x=s
for bad in bads:
x = x.replace(bad,"_")
return x
def handleEntities(rset, outputdir, render, rname):
# if we're already handled the POIs for this region regionset, do nothing
if hasattr(rset, "_pois"):
return
logging.info("Looking for entities in %r", rset)
filters = render['markers']
rset._pois = dict(TileEntities=[], Entities=[])
for (x,z,mtime) in rset.iterate_chunks():
data = rset.get_chunk(x,z)
rset._pois['TileEntities'] += data['TileEntities']
rset._pois['Entities'] += data['Entities']
logging.info("Done.")
def handlePlayers(rset, render, worldpath):
if not hasattr(rset, "_pois"):
rset._pois = dict(TileEntities=[], Entities=[])
# only handle this region set once
if 'Players' in rset._pois:
return
dimension = {None: 0,
'DIM-1': -1,
'DIM1': 1}[rset.get_type()]
playerdir = os.path.join(worldpath, "players")
if os.path.isdir(playerdir):
playerfiles = os.listdir(playerdir)
playerfiles = [x for x in playerfiles if x.endswith(".dat")]
isSinglePlayer = False
else:
playerfiles = [os.path.join(worldpath, "level.dat")]
isSinglePlayer = True
rset._pois['Players'] = []
for playerfile in playerfiles:
try:
data = nbt.load(os.path.join(playerdir, playerfile))[1]
if isSinglePlayer:
data = data['Data']['Player']
except IOError:
logging.warning("Skipping bad player dat file %r", playerfile)
continue
playername = playerfile.split(".")[0]
if isSinglePlayer:
playername = 'Player'
if data['Dimension'] == dimension:
# Position at last logout
data['id'] = "Player"
data['EntityId'] = playername
data['x'] = int(data['Pos'][0])
data['y'] = int(data['Pos'][1])
data['z'] = int(data['Pos'][2])
rset._pois['Players'].append(data)
if "SpawnX" in data and dimension == 0:
# Spawn position (bed or main spawn)
spawn = {"id": "PlayerSpawn",
"EntityId": playername,
"x": data['SpawnX'],
"y": data['SpawnY'],
"z": data['SpawnZ']}
rset._pois['Players'].append(spawn)
def handleManual(rset, manualpois):
if not hasattr(rset, "_pois"):
rset._pois = dict(TileEntities=[], Entities=[])
rset._pois['Manual'] = []
if manualpois:
rset._pois['Manual'].extend(manualpois)
def main():
if os.path.basename(sys.argv[0]) == """genPOI.py""":
helptext = """genPOI.py
%prog --config=<config file> [--quiet]"""
else:
helptext = """genPOI
%prog --genpoi --config=<config file> [--quiet]"""
logger.configure()
parser = OptionParser(usage=helptext)
parser.add_option("--config", dest="config", action="store", help="Specify the config file to use.")
parser.add_option("--quiet", dest="quiet", action="count", help="Reduce logging output")
options, args = parser.parse_args()
if not options.config:
parser.print_help()
return
if options.quiet > 0:
logger.configure(logging.WARN, False)
# Parse the config file
mw_parser = configParser.MultiWorldParser()
mw_parser.parse(options.config)
try:
config = mw_parser.get_validated_config()
except Exception:
logging.exception("An error was encountered with your configuration. See the info below.")
return 1
destdir = config['outputdir']
# saves us from creating the same World object over and over again
worldcache = {}
markersets = set()
markers = dict()
for rname, render in config['renders'].iteritems():
try:
worldpath = config['worlds'][render['world']]
except KeyError:
logging.error("Render %s's world is '%s', but I could not find a corresponding entry in the worlds dictionary.",
rname, render['world'])
return 1
render['worldname_orig'] = render['world']
render['world'] = worldpath
# find or create the world object
if (render['world'] not in worldcache):
w = world.World(render['world'])
worldcache[render['world']] = w
else:
w = worldcache[render['world']]
rset = w.get_regionset(render['dimension'][1])
if rset == None: # indicates no such dimension was found:
logging.error("Sorry, you requested dimension '%s' for %s, but I couldn't find it", render['dimension'][0], render_name)
return 1
for f in render['markers']:
markersets.add(((f['name'], f['filterFunction']), rset))
name = replaceBads(f['name']) + hex(hash(f['filterFunction']))[-4:] + "_" + hex(hash(rset))[-4:]
to_append = dict(groupName=name,
displayName = f['name'],
icon=f.get('icon', 'signpost_icon.png'),
createInfoWindow=f.get('createInfoWindow',True),
checked = f.get('checked', False))
try:
l = markers[rname]
l.append(to_append)
except KeyError:
markers[rname] = [to_append]
handleEntities(rset, os.path.join(destdir, rname), render, rname)
handlePlayers(rset, render, worldpath)
handleManual(rset, render['manualpois'])
logging.info("Done scanning regions")
logging.info("Writing out javascript files")
markerSetDict = dict()
for (flter, rset) in markersets:
# generate a unique name for this markerset. it will not be user visible
filter_name = flter[0]
filter_function = flter[1]
name = replaceBads(filter_name) + hex(hash(filter_function))[-4:] + "_" + hex(hash(rset))[-4:]
markerSetDict[name] = dict(created=False, raw=[], name=filter_name)
for poi in rset._pois['Entities']:
result = filter_function(poi)
if result:
if isinstance(result, basestring):
d = dict(x=poi['Pos'][0], y=poi['Pos'][1], z=poi['Pos'][2], text=result, hovertext=result)
elif type(result) == tuple:
d = dict(x=poi['Pos'][0], y=poi['Pos'][1], z=poi['Pos'][2], text=result[1], hovertext=result[0])
if "icon" in poi:
d.update({"icon": poi['icon']})
if "createInfoWindow" in poi:
d.update({"createInfoWindow": poi['createInfoWindow']})
markerSetDict[name]['raw'].append(d)
for poi in rset._pois['TileEntities']:
result = filter_function(poi)
if result:
if isinstance(result, basestring):
d = dict(x=poi['x'], y=poi['y'], z=poi['z'], text=result, hovertext=result)
elif type(result) == tuple:
d = dict(x=poi['x'], y=poi['y'], z=poi['z'], text=result[1], hovertext=result[0])
if "icon" in poi:
d.update({"icon": poi['icon']})
if "createInfoWindow" in poi:
d.update({"createInfoWindow": poi['createInfoWindow']})
markerSetDict[name]['raw'].append(d)
for poi in rset._pois['Players']:
result = filter_function(poi)
if result:
if isinstance(result, basestring):
d = dict(x=poi['x'], y=poi['y'], z=poi['z'], text=result, hovertext=result)
elif type(result) == tuple:
d = dict(x=poi['x'], y=poi['y'], z=poi['z'], text=result[1], hovertext=result[0])
if "icon" in poi:
d.update({"icon": poi['icon']})
if "createInfoWindow" in poi:
d.update({"createInfoWindow": poi['createInfoWindow']})
markerSetDict[name]['raw'].append(d)
for poi in rset._pois['Manual']:
result = filter_function(poi)
if result:
if isinstance(result, basestring):
d = dict(x=poi['x'], y=poi['y'], z=poi['z'], text=result, hovertext=result)
elif type(result) == tuple:
d = dict(x=poi['x'], y=poi['y'], z=poi['z'], text=result[1], hovertext=result[0])
if "icon" in poi:
d.update({"icon": poi['icon']})
if "createInfoWindow" in poi:
d.update({"createInfoWindow": poi['createInfoWindow']})
markerSetDict[name]['raw'].append(d)
#print markerSetDict
with open(os.path.join(destdir, "markersDB.js"), "w") as output:
output.write("var markersDB=")
json.dump(markerSetDict, output, indent=2)
output.write(";\n");
with open(os.path.join(destdir, "markers.js"), "w") as output:
output.write("var markers=")
json.dump(markers, output, indent=2)
output.write(";\n");
with open(os.path.join(destdir, "baseMarkers.js"), "w") as output:
output.write("overviewer.util.injectMarkerScript('markersDB.js');\n")
output.write("overviewer.util.injectMarkerScript('markers.js');\n")
output.write("overviewer.collections.haveSigns=true;\n")
logging.info("Done")
if __name__ == "__main__":
main()
| eminence/Minecraft-Overviewer | overviewer_core/aux_files/genPOI.py | Python | gpl-3.0 | 10,189 |
# -*- coding: utf-8 -*-
##############################################################################
# For copyright and license notices, see __openerp__.py file in module root
# directory
##############################################################################
from openerp import models, fields, api, SUPERUSER_ID
import sys
import pytz
from ast import literal_eval
from datetime import datetime
from dateutil import relativedelta
from openerp.tools import DEFAULT_SERVER_DATE_FORMAT, DEFAULT_SERVER_DATETIME_FORMAT
import logging
_logger = logging.getLogger(__name__)
class action(models.Model):
""""""
_name = 'etl.action'
_description = 'action'
_order = "sequence"
blocked = fields.Boolean(
string='Blocked',
copy=False,
)
sequence = fields.Integer(
string='Sequence'
)
state = fields.Selection(
[(u'to_analyze', 'to_analyze'), (u'enabled', 'enabled'), (u'disabled', 'disabled'), (u'no_records', 'no_records')],
string='State',
required=True
)
name = fields.Char(
string='Name',
required=True
)
source_domain = fields.Char(
string='Source Domain',
required=True,
default='[]'
)
log = fields.Text(
string='Log'
)
note = fields.Html(
string='Notes'
)
repeating_action = fields.Boolean(
string='Repeating Action?',
store=True,
compute='_get_repeating_action',
)
source_id_exp = fields.Char(
string='source_id_exp',
required=True,
default='id'
)
target_id_type = fields.Selection(
[(u'source_id', 'source_id'), (u'builded_id', 'builded_id')],
string='Target ID Type',
required=True,
related='manager_id.target_id_type'
)
from_rec_id = fields.Integer(
string='From Record'
)
to_rec_id = fields.Integer(
string='To Record'
)
target_id_prefix = fields.Char(
string='target_id_prefix',
compute='_get_action_prefix'
)
manager_id = fields.Many2one(
'etl.manager',
ondelete='cascade',
string='Manager',
required=True
)
field_mapping_ids = fields.One2many(
'etl.field_mapping',
'action_id',
string='Fields Mapping',
copy=False,
)
source_model_id = fields.Many2one(
'etl.external_model',
string='Source Model',
required=True,
ondelete='cascade',
)
target_model_id = fields.Many2one(
'etl.external_model',
string='Target Model',
ondelete='cascade',
)
source_records = fields.Integer(
related='source_model_id.records',
readonly=True,
string='Source Records',
)
target_records = fields.Integer(
related='target_model_id.records',
readonly=True,
string='Target Records',
)
_constraints = [
]
@api.one
@api.depends(
'source_model_id.model','target_id_type'
)
def _get_action_prefix(self):
value = False
if self.target_id_type == 'builded_id':
value = self.manager_id.name + '_' + self.source_model_id.model.replace('.','_')
self.target_id_prefix = value
@api.one
@api.depends(
'field_mapping_ids.state'
)
def _get_repeating_action(self):
repeating_action = False
repeating_field_mapps = self.field_mapping_ids.search([
('state', '=', 'on_repeating'),
('action_id', '=', self.id),
])
if repeating_field_mapps:
repeating_action = True
self.repeating_action = repeating_action
@api.multi
def action_block(self):
return self.write({'blocked': True})
@api.one
def match_fields(self):
''' Match fields'''
_logger.info("Matching fields on action %s" % self.name)
migrator_field = self.env['etl.field']
field_mapping = self.env['etl.field_mapping']
# Get disabled and to analize words and fields
field_disable_default = []
field_analyze_default = []
field_disable_words = []
if self.manager_id.field_disable_default:
field_disable_default = literal_eval(
self.manager_id.field_disable_default)
if self.manager_id.field_analyze_default:
field_analyze_default = literal_eval(
self.manager_id.field_analyze_default)
if self.manager_id.field_disable_words:
field_disable_words = literal_eval(
self.manager_id.field_disable_words)
# get source fields thar are not functions ore one2many
# Function in False or in '_fnct_read' (aparentemente _fnct_read es para campos related y los queremos mapear)
source_domain = [
('model_id.id', '=', self.source_model_id.id),
('ttype', 'not in', ['one2many']),
'|', ('function', 'in', [False, '_fnct_read']),
('required', '=', 'True')]
source_fields = migrator_field.search(source_domain)
mapping_data = []
action_has_active_field = False
for field in source_fields:
# If nothing asserts, choose expresion
mapping_type = 'expression'
# build source_field with or not /id
source_field_name = field.name
if field.ttype in ['many2one', 'many2many']:
source_field_name += '/id'
# look for a target field
target_domain = [
('model_id.id', '=', self.target_model_id.id),
('name', '=', field.name)]
target_fields = migrator_field.search(target_domain)
# check the state
state = 'enabled'
if field.name in field_analyze_default or not target_fields:
state = 'to_analyze'
if field.name in field_disable_default:
state = 'disabled'
else:
for field_disable_word in field_disable_words:
if field.name.find(field_disable_word) == 0:
state = 'disabled'
# check if is active field
if field.name == 'active':
action_has_active_field = True
# depending on the target field properties, set some other values
target_field = ''
target_field_name = False
if target_fields:
mapping_type = 'field'
target_field = target_fields[0]
target_field_name = target_field.name
if target_field.ttype in ['many2one', 'many2many']:
target_field_name += '/id'
if target_field.ttype == 'many2many':
relation = target_field.relation
previus_actions = self.search([
('manager_id', '=', self.manager_id.id),
('sequence', '<', self.sequence),
('target_model_id.model', '=', relation)])
if not previus_actions:
state = 'other_class'
elif field.ttype == 'datetime' and target_field.ttype == 'date' or field.ttype == 'date' and target_field.ttype == 'datetime':
mapping_type = 'date_adapt'
elif field.ttype == 'reference':
mapping_type = 'reference'
# Check if there is any value mapping for current field
value_mapping_field = False
value_mappings = self.env['etl.value_mapping_field'].search([
('source_model_id.model', '=', field.relation),
('manager_id', '=', self.manager_id.id)])
if value_mappings:
mapping_type = 'value_mapping'
value_mapping_field = value_mappings[0]
# If field name = 'state' then we upload it on a repeating action so we are sure we can upload all the related data
if field.name == 'state':
state = 'on_repeating'
vals = [
'field_mapping_' + str(self.id) + '_' + str(field.id),
state,
field.id,
source_field_name,
mapping_type,
target_field and target_field.id or False,
target_field_name,
self.id,
value_mapping_field and value_mapping_field.id or False]
# See if mappings have already a blocked mapping created
blocked_fields = field_mapping.search([
('blocked', '=', True),
('action_id', '=', self.id)])
blocked_field_ext_ids = blocked_fields.export_data(
['id'])['datas']
if [vals[0]] in blocked_field_ext_ids:
continue
mapping_data.append(vals)
# load mapping
mapping_fields = [
'id',
'state',
'source_field_id/.id',
'source_field',
'type',
'target_field_id/.id',
'target_field',
'action_id/.id',
'value_mapping_field_id/.id']
_logger.info("Loading mapping fields for action %s" % self.name)
import_result = field_mapping.load(mapping_fields, mapping_data)
vals = {'log': import_result}
if action_has_active_field and self.source_domain == '[]':
vals['source_domain'] = "['|',('active','=',False),('active','=',True)]"
# write log and domain if active field exist
self.write(vals)
# TODO, si algo anda lento o mal hay que borrar esto. No puedo hacer el check m2o depends ants de tenerlas ordenadas
# return self.check_m2o_depends(cr, uid, ids, context=context)
# return True
@api.one
def check_m2o_depends(self):
''' Check if there are fields that should be load in a repeating action
If there is at least one mapping field with repeating,
make the action repeating '''
data = []
# Look for enabled or to analize future actions of this manager and
# this action
future_actions = self.search([
('manager_id', '=', self.manager_id.id),
('sequence', '>=', self.sequence),
('state', 'in', ['enabled', 'to_analyze'])])
future_models = []
for future_action in future_actions:
future_models.append(future_action.source_model_id.model)
# Look for active fields of this action
field_mapping_domain = [
('blocked', '!=', True),
('action_id', '=', self.id),
('source_field_id.ttype', '=', 'many2one'),
('state', 'in', ['enabled', 'to_analyze', 'on_repeating']),
('type', '=', 'field')]
field_mappings = self.env['etl.field_mapping'].search(
field_mapping_domain)
# If there are mappings with future depends make them 'on_repeating'
for mapping in field_mappings:
dependency = mapping.source_field_id.relation
if dependency in future_models:
state = 'on_repeating'
vals = [
'field_mapping_%s_%s' % (
str(self.id),
str(mapping.source_field_id.id)),
state]
data.append(vals)
fields = ['id', 'state']
# if there is any repeating mapping field, then make action
# 'repeating action'
import_result = self.env['etl.field_mapping'].load(fields, data)
vals = {
'log': import_result,
}
self.write(vals)
@api.one
def updata_records_number(
self, source_connection=False, target_connection=False):
if not source_connection or not target_connection:
(source_connection, target_connection) = self.manager_id.open_connections()
self.source_model_id.get_records(source_connection)
self.target_model_id.get_records(target_connection)
@api.multi
def run_repeated_action(
self, source_connection=False, target_connection=False,
repeated_action=True):
return self.run_action(repeated_action=True)
@api.multi
def read_source_model(
self, source_connection=False, target_connection=False,
repeated_action=False, context=None):
readed_model = []
for action in self:
if action.source_model_id.id in readed_model:
continue
_logger.info('Reading model %s' % action.source_model_id.model)
if not source_connection:
(source_connection, target_connection) = action.manager_id.open_connections()
source_model_obj = source_connection.model(action.source_model_id.model)
domain = []
active_field = action.env['etl.field'].search([
('model_id', '=', action.source_model_id.id),
('name', '=', 'active'),
], limit=1)
if active_field:
domain = [('active', 'in', [True, False])]
source_model_ids = source_model_obj.search(domain)
source_model_obj.export_data(source_model_ids, ['id'])
readed_model.append(action.source_model_id.id)
@api.one
def run_action(
self, source_connection=False, target_connection=False,
repeated_action=False):
_logger.info('Actions to run: %i' % len(self.ids))
action_obj = self.env['etl.action']
model_obj = self.env['etl.external_model']
field_mapping_obj = self.env['etl.field_mapping']
value_mapping_field_detail_obj = self.env['etl.value_mapping_field_detail']
value_mapping_field_obj = self.env['etl.value_mapping_field']
if not source_connection or not target_connection:
(source_connection, target_connection) = self.manager_id.open_connections()
# add language to connections context
source_connection.context = {'lang': self.manager_id.source_lang}
target_connection.context = {'lang': self.manager_id.target_lang}
_logger.info('Running action external_model_id.type %s' % self.name)
domain = literal_eval(self.source_domain)
if self.from_rec_id > 0:
domain.append(('id', '>=', self.from_rec_id))
if self.to_rec_id > 0:
domain.append(('id', '<=', self.to_rec_id))
source_model_obj = source_connection.model(self.source_model_id.model)
target_model_obj = target_connection.model(self.target_model_id.model)
source_model_ids = source_model_obj.search(domain)
_logger.info('Records to import %i' % len(source_model_ids))
_logger.info('Building source data...')
# Empezamos con los campos que definimos como id
source_fields = ['.id', self.source_id_exp]
target_fields = ['id']
if repeated_action:
state = 'on_repeating'
else:
state = 'enabled'
# source fields = enabled (or repeating) and type field
source_fields.extend([x.source_field for x in self.field_mapping_ids if x.state==state and x.type == 'field' and x.source_field_id.ttype != 'many2many' and x.source_field_id.ttype != 'many2one'])
#print source_fields
# target fields = enabled and field then expression then migrated_id
target_fields.extend([x.target_field for x in self.field_mapping_ids if x.state==state and x.type == 'field' and x.source_field_id.ttype != 'many2many' and x.source_field_id.ttype != 'many2one'])
target_fields.extend([x.target_field for x in self.field_mapping_ids if x.state==state and x.type == 'field' and x.source_field_id.ttype == 'many2one'])
target_fields.extend([x.target_field for x in self.field_mapping_ids if x.state==state and x.type == 'field' and x.source_field_id.ttype == 'many2many'])
target_fields.extend([x.target_field for x in self.field_mapping_ids if x.state==state and x.type == 'value_mapping'])
target_fields.extend([x.target_field for x in self.field_mapping_ids if x.state==state and x.type == 'date_adapt'])
target_fields.extend([x.target_field for x in self.field_mapping_ids if x.state==state and x.type == 'expression'])
target_fields.extend([x.target_field for x in self.field_mapping_ids if x.state==state and x.type == 'migrated_id'])
target_fields.extend([x.target_field for x in self.field_mapping_ids if x.state==state and x.type == 'reference'])
# Read and append source values of type 'field' and type not m2m
_logger.info('Building none m2m field mapping...')
source_model_data = source_model_obj.export_data(
source_model_ids, source_fields)['datas']
_logger.info('Building m2o field mapping...')
# Read and append source values of type 'field' and type m2m
source_fields_m2o = [x.id for x in self.field_mapping_ids if x.state==state and x.type == 'field' and x.source_field_id.ttype == 'many2one']
for field_id in source_fields_m2o:
field = field_mapping_obj.browse(field_id)
field_model = field.source_field_id.relation
model_id = model_obj.search([('model','=',field_model),('type','ilike','source')])
field_action = False
if model_id:
field_action = action_obj.search([('source_model_id','=',model_id[0].id)])
if field_action:
field_action = field_action[0]
for source_data_record in source_model_data:
source_data_m2o = source_model_obj.export_data([int(source_data_record[0])], ['.id', field.source_field, field.source_field.replace('/','.')])['datas']
new_field_value = False
if field_action.target_id_type == 'source_id' and source_data_m2o[0][1]:
new_field_value = source_data_m2o[0][1]
elif field_action.target_id_type == 'builded_id' and source_data_m2o[0][2]:
new_field_value = '%s_%s' % (field_action.target_id_prefix, str(source_data_m2o[0][2]))
source_data_record.append(new_field_value)
_logger.info('Building m2m field mapping...')
# Read and append source values of type 'field' and type m2m
source_fields_m2m = [x.id for x in self.field_mapping_ids if x.state==state and x.type == 'field' and x.source_field_id.ttype == 'many2many']
for field_id in source_fields_m2m:
field = field_mapping_obj.browse(field_id)
field_model = field.source_field_id.relation
model_id = model_obj.search([('model','=',field_model),('type','ilike','source')])
field_action = False
if model_id:
field_action = action_obj.search([('source_model_id','=',model_id[0].id)])
if field_action:
field_action = field_action[0]
model_data_obj = source_connection.model('ir.model.data')
for source_data_record in source_model_data:
source_data_m2m = source_model_obj.export_data([int(source_data_record[0])], ['.id', field.source_field])['datas']
new_field_value = False
for readed_record in source_data_m2m:
if readed_record[1]:
for value in readed_record[1].split(','):
value_id = model_data_obj.search([('model','ilike',field.source_field_id.relation),('name','ilike',value.split('.')[-1])])
if value_id:
value_id = model_data_obj.export_data([value_id[0]], ['.id', 'res_id'])['datas']
value_id = value_id[0][1]
if field_action.target_id_type == 'source_id' and value:
new_field_value = value
elif field_action.target_id_type == 'builded_id' and value_id:
if new_field_value:
new_field_value = new_field_value + ',' + '%s_%s' % (field_action.target_id_prefix, str(value_id))
else:
new_field_value = '%s_%s' % (field_action.target_id_prefix, str(value_id))
source_data_record.append(new_field_value)
_logger.info('Building value mapping mapping...')
# Read and append source values of type 'value_mapping'
source_fields_value_mapping = [x.source_field for x in self.field_mapping_ids if x.state==state and x.type == 'value_mapping']
#print 'source_fields_value_mapping', source_fields_value_mapping
source_data_value_mapping = source_model_obj.export_data(source_model_ids, source_fields_value_mapping)['datas']
#print 'source_data_value_mapping', source_data_value_mapping
source_value_mapping_id = [x.value_mapping_field_id.id for x in self.field_mapping_ids if x.state==state and x.type == 'value_mapping']
#print 'source_value_mapping_id', source_value_mapping_id
for readed_record, source_data_record in zip(source_data_value_mapping, source_model_data):
target_record = []
for field_value, value_mapping_id in zip(readed_record, source_value_mapping_id):
new_field_value = False
value_mapping = value_mapping_field_obj.browse(
value_mapping_id)
# TODO mejorar esta cosa horrible, no hace falta guardar en dos clases separadas, deberia usar una sola para selection y para id
if value_mapping.type == 'id':
new_field = value_mapping_field_detail_obj.search([
('source_external_model_record_id.ext_id', '=', field_value),
('value_mapping_field_id', '=', value_mapping_id)],
limit=1)
# if new_fields:
new_field_value = new_field.target_external_model_record_id.ext_id
elif value_mapping.type == 'selection':
new_field = value_mapping_field_detail_obj.search([
('source_value_id.ext_id', '=', field_value),
('value_mapping_field_id', '=', value_mapping_id)],
limit=1)
new_field_value = new_field.target_value_id.ext_id
# Convertimos a false todos aquellos mapeos al que no se les asigno pareja
# Si el modelo permite valores false va a andar bien, si no va a dar el error y debera mapearse
if new_field_value is None:
new_field_value = False
target_record.append(new_field_value)
source_data_record.extend(target_record)
_logger.info('Building date adapt...')
# Read and append source values of type 'date_adapt'
source_fields_date_adapt = [x.source_field for x in self.field_mapping_ids if x.state==state and x.type == 'date_adapt']
source_data_date_adapt = source_model_obj.export_data(source_model_ids, source_fields_date_adapt)['datas']
source_mapping_date_adapt = [x for x in self.field_mapping_ids if x.state==state and x.type == 'date_adapt']
for readed_record, source_data_record in zip(source_data_date_adapt, source_model_data):
target_record = []
for field_value, source_mapping in zip(readed_record, source_mapping_date_adapt):
if source_mapping.source_field_id.ttype == 'datetime' and field_value:
if source_mapping.target_field_id.ttype == 'date':
# TODO, no estoy seguro si esta forma de truncarlo funciona bien
field_value = field_value[:10]
if source_mapping.source_field_id.ttype == 'date' and field_value:
if source_mapping.target_field_id.ttype == 'datetime':
field_value = self.date_to_datetime(field_value)
target_record.append(field_value)
source_data_record.extend(target_record)
_logger.info('Building expressions...')
field_mapping_expression_ids = [x.id for x in self.field_mapping_ids if x.state==state and x.type == 'expression']
if field_mapping_expression_ids:
for rec in source_model_data:
rec_id = rec[0]
expression_results = field_mapping_obj.browse(
field_mapping_expression_ids).run_expressions(
int(rec_id),
source_connection,
target_connection)
rec.extend(expression_results)
_logger.info('Building migrated ids...')
field_mapping_migrated_id_ids = [x.id for x in self.field_mapping_ids if x.state==state and x.type == 'migrated_id']
if field_mapping_migrated_id_ids:
for rec in source_model_data:
rec_id = rec[0]
migrated_id_results = field_mapping_obj.browse(
field_mapping_migrated_id_ids).get_migrated_id(
int(rec_id),
source_connection,
target_connection)
rec.extend(migrated_id_results)
_logger.info('Building reference fields...')
field_mapping_reference_ids = [x.id for x in self.field_mapping_ids if x.state==state and x.type == 'reference']
if field_mapping_reference_ids:
for rec in source_model_data:
rec_id = rec[0]
reference_results = field_mapping_obj.browse(
field_mapping_reference_ids).get_reference(
int(rec_id), source_connection, target_connection)
_logger.info('Reference_results: %s' % reference_results)
rec.extend(reference_results)
_logger.info('Removing auxliaria .id')
target_model_data = []
#print source_model_data
#print ''
for record in source_model_data:
if self.target_id_type == 'source_id':
target_model_data.append(record[1:])
elif self.target_id_type == 'builded_id':
target_model_data.append(['%s_%s' % (
self.target_id_prefix, str(record[0]))] + record[2:])
try:
_logger.info('Loadding Data...')
import_result = target_model_obj.load(
target_fields, target_model_data)
vals = {'log': import_result}
except:
error = sys.exc_info()
print error
vals = {'log': error}
self.write(vals)
self.target_model_id.get_records(target_connection)
@api.multi
def order_actions(self, exceptions=None):
_logger.info('Lines to order %i' % len(self.ids))
if exceptions is None:
exceptions = []
# field_mapping_obj = self.pool.get('etl.field_mapping')
ordered_actions = []
ordered_ids = []
# We exclude de exceptions
unordered_ids = self.search([
('id', 'in', self.ids),
('source_model_id.model', 'not in', exceptions)]).ids
_logger.info('Request IDS: %s' % str(self.ids))
_logger.info('Request IDS without exceptions: %s' % str(unordered_ids))
actions_to_order = [
x.source_model_id.model for x in self.browse(unordered_ids)]
_logger.info('Actions_to_order %s' % actions_to_order)
count = 0
count_max = len(self) * 2
while unordered_ids and (count < count_max):
count += 1
rec = self.browse(unordered_ids[0])
action_clean_dependecies = []
many2one_mappings = self.env['etl.field_mapping'].search([
('action_id', '=', rec.id),
('source_field_id.ttype', '=', 'many2one'),
('state', 'in', ['to_analyze', 'enabled', 'on_repeating'])])
for mapping in many2one_mappings:
if (mapping.source_field_id.relation not in action_clean_dependecies) and (mapping.source_field_id.relation in actions_to_order):
if not(mapping.source_field_id.relation == rec.source_model_id.model):
action_clean_dependecies.append(mapping.source_field_id.relation)
# else:
# TODO usar este dato para algo! para macar la clase por ejemplo
_logger.info('Model: %s, depenencias: %s' % (
rec.source_model_id.model, action_clean_dependecies))
dependecies_ok = True
for action_dependecy in action_clean_dependecies:
if (action_dependecy not in ordered_actions) and (action_dependecy not in exceptions):
dependecies_ok = False
break
unordered_ids.remove(rec.id)
if dependecies_ok:
_logger.info('Dependency ok!')
ordered_ids.append(rec.id)
ordered_actions.append(rec.source_model_id.model)
else:
_logger.info('Break, dependency false!')
unordered_ids.append(rec.id)
_logger.info('Unordered Models: %s' % str(unordered_ids))
_logger.info('New Order: %s' % str(ordered_actions))
# Add sequence to exception actions
sequence = 0
for exception in exceptions:
exception_action_ids = self.search([
('id', 'in', self.ids),
('source_model_id.model', '=', exception)])
sequence += 10
vals = {
'sequence': sequence,
}
exception_action_ids.write(vals)
# Add sequence to ordered actions
sequence = 500
for ordered_action in self.browse(ordered_ids):
sequence += 10
vals = {
'sequence': sequence,
}
ordered_action.write(vals)
return [unordered_ids, ordered_ids]
@api.model
def date_to_datetime(self, userdate):
""" Convert date values expressed in user's timezone to
server-side UTC timestamp, assuming a default arbitrary
time of 12:00 AM - because a time is needed.
:param str userdate: date string in in user time zone
:return: UTC datetime string for server-side use
"""
# TODO: move to fields.datetime in server after 7.0
user_date = datetime.strptime(userdate, DEFAULT_SERVER_DATE_FORMAT)
context = self._context
if context and context.get('tz'):
tz_name = context['tz']
else:
tz_name = self.env['res.users'].browse(SUPERUSER_ID).tz
print tz_name
#tz_name = tz_name[0]
if tz_name:
utc = pytz.timezone('UTC')
context_tz = pytz.timezone(tz_name)
#user_datetime = user_date + relativedelta(hours=12.0)
local_timestamp = context_tz.localize(user_date, is_dst=False)
user_datetime = local_timestamp.astimezone(utc)
return user_datetime.strftime(DEFAULT_SERVER_DATETIME_FORMAT)
return user_date.strftime(DEFAULT_SERVER_DATETIME_FORMAT)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| ingadhoc/odoo-etl | etl/action.py | Python | agpl-3.0 | 31,818 |
# Copyright 2015 Mirantis, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from threading import Timer
from oslo_log import log as logging
from networking_vsphere._i18n import _LI
from networking_vsphere.utils.rpc_translator import update_rules
from neutron.agent import securitygroups_rpc
LOG = logging.getLogger(__name__)
class DVSSecurityGroupRpc(securitygroups_rpc.SecurityGroupAgentRpc):
def __init__(self, context, plugin_rpc,
defer_refresh_firewall=False):
self.context = context
self.plugin_rpc = plugin_rpc
self._devices_to_update = set()
self.init_firewall(defer_refresh_firewall)
def prepare_devices_filter(self, device_ids):
if not device_ids:
return
LOG.info(_LI("Preparing filters for devices %s"), device_ids)
if self.use_enhanced_rpc:
devices_info = self.plugin_rpc.security_group_info_for_devices(
self.context, list(device_ids))
devices = update_rules(devices_info)
else:
devices = self.plugin_rpc.security_group_rules_for_devices(
self.context, list(device_ids))
self.firewall.prepare_port_filter(devices.values())
def remove_devices_filter(self, device_ids):
if not device_ids:
return
LOG.info(_LI("Remove device filter for %r"), device_ids)
self.firewall.remove_port_filter(device_ids)
def _refresh_ports(self):
device_ids = self._devices_to_update
self._devices_to_update = self._devices_to_update - device_ids
if not device_ids:
return
if self.use_enhanced_rpc:
devices_info = self.plugin_rpc.security_group_info_for_devices(
self.context, device_ids)
devices = update_rules(devices_info)
else:
devices = self.plugin_rpc.security_group_rules_for_devices(
self.context, device_ids)
self.firewall.update_port_filter(devices.values())
def refresh_firewall(self, device_ids=None):
LOG.info(_LI("Refresh firewall rules"))
self._devices_to_update |= device_ids
if device_ids:
Timer(2, self._refresh_ports).start()
| VTabolin/networking-vsphere | networking_vsphere/agent/firewalls/dvs_securitygroup_rpc.py | Python | apache-2.0 | 2,782 |
# Copyright (C) 2013 Kai Willadsen <kai.willadsen@gmail.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or (at
# your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
from gi.repository import Gio
from gi.repository import GLib
from gi.repository import GObject
from gi.repository import Pango
from gi.repository import GtkSource
import meld.conf
import meld.filters
MELD_SCHEMA = 'org.gnome.meld'
class MeldSettings(GObject.GObject):
"""Handler for settings that can't easily be bound to object properties"""
__gsignals__ = {
'file-filters-changed': (GObject.SignalFlags.RUN_FIRST, None, ()),
'text-filters-changed': (GObject.SignalFlags.RUN_FIRST, None, ()),
'changed': (GObject.SignalFlags.RUN_FIRST, None, (str,)),
}
def __init__(self):
GObject.GObject.__init__(self)
self.on_setting_changed(settings, 'filename-filters')
self.on_setting_changed(settings, 'text-filters')
self.on_setting_changed(settings, 'use-system-font')
self.style_scheme = self._style_scheme_from_gsettings()
settings.connect('changed', self.on_setting_changed)
def on_setting_changed(self, settings, key):
if key == 'filename-filters':
self.file_filters = self._filters_from_gsetting(
'filename-filters', meld.filters.FilterEntry.SHELL)
self.emit('file-filters-changed')
elif key == 'text-filters':
self.text_filters = self._filters_from_gsetting(
'text-filters', meld.filters.FilterEntry.REGEX)
self.emit('text-filters-changed')
elif key in ('use-system-font', 'custom-font'):
self.font = self._current_font_from_gsetting()
self.emit('changed', 'font')
elif key in ('style-scheme'):
self.style_scheme = self._style_scheme_from_gsettings()
self.emit('changed', 'style-scheme')
def _style_scheme_from_gsettings(self):
manager = GtkSource.StyleSchemeManager.get_default()
return manager.get_scheme(settings.get_string('style-scheme'))
def _filters_from_gsetting(self, key, filt_type):
filter_params = settings.get_value(key)
filters = [
meld.filters.FilterEntry.new_from_gsetting(params, filt_type)
for params in filter_params
]
return filters
def _current_font_from_gsetting(self, *args):
if settings.get_boolean('use-system-font'):
font_string = interface_settings.get_string('monospace-font-name')
else:
font_string = settings.get_string('custom-font')
return Pango.FontDescription(font_string)
def find_schema():
schema_source = Gio.SettingsSchemaSource.new_from_directory(
meld.conf.DATADIR,
Gio.SettingsSchemaSource.get_default(),
False,
)
return schema_source.lookup(MELD_SCHEMA, False)
def check_backend():
force_ini = os.path.exists(
os.path.join(GLib.get_user_config_dir(), 'meld', 'use-rc-prefs'))
if force_ini:
# TODO: Use GKeyfileSettingsBackend once available (see bgo#682702)
print("Using a flat-file settings backend is not yet supported")
return None
return None
def create_settings(uninstalled=False):
global settings, interface_settings, meldsettings
backend = check_backend()
if uninstalled:
schema = find_schema()
settings = Gio.Settings.new_full(schema, backend, None)
elif backend:
settings = Gio.Settings.new_with_backend(MELD_SCHEMA, backend)
else:
settings = Gio.Settings.new(MELD_SCHEMA)
interface_settings = Gio.Settings.new('org.gnome.desktop.interface')
meldsettings = MeldSettings()
def bind_settings(obj):
global settings
for binding in getattr(obj, '__gsettings_bindings__', ()):
settings_id, property_id = binding
settings.bind(
settings_id, obj, property_id, Gio.SettingsBindFlags.DEFAULT)
settings = None
interface_settings = None
meldsettings = None
| Spitfire1900/meld | meld/settings.py | Python | gpl-2.0 | 4,587 |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.5 on 2017-01-28 14:45
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import shopifier.admin.models
class Migration(migrations.Migration):
dependencies = [
('shopifier_admin', '0016_auto_20170128_1650'),
]
operations = [
migrations.CreateModel(
name='CollectionImage',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('src', models.ImageField(upload_to=shopifier.admin.models.normalization_img_collection_file_name)),
('created_at', models.DateTimeField(default=shopifier.admin.models.now)),
],
),
migrations.AlterField(
model_name='customcollection',
name='image',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='collection', to='shopifier_admin.CollectionImage'),
),
]
| vkuryachenko/Django-Shopy | shopifier/admin/migrations/0017_auto_20170128_2045.py | Python | bsd-3-clause | 1,063 |
IMAGES = [{
'accountId': 1234,
'blockDevices': [],
'createDate': '2013-12-05T21:53:03-06:00',
'globalIdentifier': '0B5DEAF4-643D-46CA-A695-CECBE8832C9D',
'id': 100,
'name': 'test_image',
'parentId': '',
'publicFlag': True,
}, {
'accountId': 1234,
'blockDevices': [],
'createDate': '2013-12-05T21:53:03-06:00',
'globalIdentifier': 'EB38414C-2AB3-47F3-BBBD-56A5F689620B',
'id': 101,
'name': 'test_image2',
'parentId': '',
'publicFlag': True,
}]
getObject = IMAGES[0]
getPublicImages = IMAGES
deleteObject = {}
editObject = True
setTags = True
createFromExternalSource = [{
'createDate': '2013-12-05T21:53:03-06:00',
'globalIdentifier': '0B5DEAF4-643D-46CA-A695-CECBE8832C9D',
'id': 100,
'name': 'test_image',
}]
copyToExternalSource = True
| skraghu/softlayer-python | SoftLayer/fixtures/SoftLayer_Virtual_Guest_Block_Device_Template_Group.py | Python | mit | 820 |
# -*- coding: utf-8 -*-
"""
Created on Tue Nov 1 14:27:42 2016
@author: xunil
"""
import Simulation as s
s.sim_normal([1000]) | xunilrj/sandbox | courses/course-edx-dat2031x/main.py | Python | apache-2.0 | 128 |
##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Libev(AutotoolsPackage):
"""A full-featured and high-performance event loop that is loosely modelled
after libevent, but without its limitations and bugs."""
homepage = "http://software.schmorp.de/pkg/libev.html"
url = "http://dist.schmorp.de/libev/libev-4.24.tar.gz"
list_url = "http://dist.schmorp.de/libev/Attic/"
version('develop', git='https://github.com/enki/libev')
version('4.24', '94459a5a22db041dec6f98424d6efe54')
depends_on('autoconf', type='build', when='@develop')
depends_on('automake', type='build', when='@develop')
depends_on('libtool', type='build', when='@develop')
depends_on('m4', type='build', when='@develop')
| EmreAtes/spack | var/spack/repos/builtin/packages/libev/package.py | Python | lgpl-2.1 | 1,956 |
#!/usr/bin/env python
import unittest
import sys
import os
import socket
sys.path.append("..")
import test_helpers
from hook_script_test_case import hook_script_test_case
class test_script_ping_url(hook_script_test_case):
def test_notification(self):
test_helpers.deployHookKit('test_script_ping_url_config.json')
os.system(('echo foo >> ' + test_helpers.repo_checkout +
'/testfile.py'))
test_helpers.gitCommitWithMessage("Testing notifications")
# Fork a listening socket for the hook to push into
pid = os.fork()
if pid:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.bind(('', 4891))
s.listen(1)
conn, addr = s.accept()
server_data_received = conn.recv(1024)
conn.sendall('HTTP/1.1 200 OK\r\n')
conn.close()
# make sure the child process gets cleaned up
os.waitpid(pid, 0)
else:
self.assertTrue(test_helpers.gitPush(),
"Pushing a commit and receiving a url ping")
os._exit(0)
self.assertEqual('GET / HTTP/1.1', server_data_received.split('\r')[0])
if __name__ == '__main__':
unittest.main()
| jesper/hookkit | tests/test_script_ping_url.py | Python | gpl-2.0 | 1,312 |
#!/usr/bin/env python
import numpy as np
import os
import pyhdf.SD
import tempfile
from nose.tools import eq_
from pyhdf.SD import SDC
def test_long_varname():
sds_name = 'a'*255
_, path = tempfile.mkstemp(suffix='.hdf', prefix='pyhdf_')
try:
# create a file with a long variable name
sd = pyhdf.SD.SD(path, SDC.WRITE|SDC.CREATE|SDC.TRUNC)
sds = sd.create(sds_name, SDC.FLOAT32, (3,))
sds[:] = range(10, 13)
sds.endaccess()
sd.end()
# check we can read the variable name
sd = pyhdf.SD.SD(path)
sds = sd.select(sds_name)
name, _, _, _, _ = sds.info()
sds.endaccess()
sd.end()
eq_(sds_name, name)
finally:
os.unlink(path)
| ryfeus/lambda-packs | HDF4_H5_NETCDF/source2.7/pyhdf/test_SD.py | Python | mit | 752 |
# -*- coding: utf-8 -*-
"""Read data from 'Harvard Library Open Metadata'.
Records: ~12 Million
Size: 12.8 GigaByte (Unpacked)
Info: http://library.harvard.edu/open-metadata
Data: https://s3.amazonaws.com/hlom/harvard.tar.gz
Instructions:
Download datafile and run `tar xvf harvard.tar.gz` to extract marc21 files.
After moving the .mrc files to the /data/harvard folder you should be able
to run this script and see log output of parsed data.
"""
import os
import logging
import isbnlib
from pymarc import MARCReader
from iscc_bench import DATA_DIR, MetaData
log = logging.getLogger(__name__)
HARVARD_DATA = os.path.join(DATA_DIR, "harvard")
def harvard(path=HARVARD_DATA):
"""Return a generator that iterates over all harvard records with complete metadata.
:param str path: path to directory with harvard .mrc files
:return: Generator[:class:`MetaData`] (filtered for records that have ISBNs)
"""
for meta in marc21_dir_reader(path):
if all((meta.isbn, meta.title, meta.author)) and not isbnlib.notisbn(meta.isbn):
# Basic cleanup
try:
isbn = isbnlib.to_isbn13(meta.isbn)
title = meta.title.strip("/").strip().split(" : ")[0]
cleaned = MetaData(isbn, title, meta.author)
except Exception:
log.exception("Error parsing data")
continue
log.debug(cleaned)
yield cleaned
def marc21_dir_reader(path=HARVARD_DATA):
"""Return a generator that iterates over all harvard marc21 files in a
directory and yields parsed MetaData objects from those files.
:param str path: path to directory with harvard .mrc files
:return: Generator[:class:`MetaData`]
"""
for marc21_file_name in os.listdir(path):
marc21_file_path = os.path.join(path, marc21_file_name)
log.info("Reading harvard marc21 file: {}".format(marc21_file_name))
for meta_record in marc21_file_reader(marc21_file_path):
yield meta_record
def marc21_file_reader(file_path):
"""Return a generator that yields parsed MetaData records from a harvard marc21 file.
:param str file_path: path to harvard marc21 file
:return: Generator[:class:`MetaData`]
"""
with open(file_path, "rb") as mf:
reader = MARCReader(mf, utf8_handling="ignore")
while True:
try:
record = next(reader)
yield MetaData(record.isbn(), record.title(), record.author())
except UnicodeDecodeError as e:
log.error(e)
continue
except StopIteration:
break
if __name__ == "__main__":
"""Demo usage."""
# logging.basicConfig(level=logging.DEBUG)
for entry in harvard():
# Do something with entry (MetaData object)
print(entry)
| coblo/isccbench | iscc_bench/readers/harvard.py | Python | bsd-2-clause | 2,886 |
#!/usr/bin/python
# Copyright (c) 2014-2017 Ansible Project
# Copyright (c) 2017, 2018 Will Thames
# Copyright (c) 2017, 2018 Michael De La Rue
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: rds_snapshot_info
version_added: "2.6"
short_description: obtain information about one or more RDS snapshots
description:
- obtain information about one or more RDS snapshots. These can be for unclustered snapshots or snapshots of clustered DBs (Aurora)
- Aurora snapshot information may be obtained if no identifier parameters are passed or if one of the cluster parameters are passed.
- This module was called C(rds_snapshot_facts) before Ansible 2.9. The usage did not change.
options:
db_snapshot_identifier:
description:
- Name of an RDS (unclustered) snapshot. Mutually exclusive with I(db_instance_identifier), I(db_cluster_identifier), I(db_cluster_snapshot_identifier)
required: false
aliases:
- snapshot_name
db_instance_identifier:
description:
- RDS instance name for which to find snapshots. Mutually exclusive with I(db_snapshot_identifier), I(db_cluster_identifier),
I(db_cluster_snapshot_identifier)
required: false
db_cluster_identifier:
description:
- RDS cluster name for which to find snapshots. Mutually exclusive with I(db_snapshot_identifier), I(db_instance_identifier),
I(db_cluster_snapshot_identifier)
required: false
db_cluster_snapshot_identifier:
description:
- Name of an RDS cluster snapshot. Mutually exclusive with I(db_instance_identifier), I(db_snapshot_identifier), I(db_cluster_identifier)
required: false
snapshot_type:
description:
- Type of snapshot to find. By default both automated and manual
snapshots will be returned.
required: false
choices: ['automated', 'manual', 'shared', 'public']
requirements:
- "python >= 2.6"
- "boto3"
author:
- "Will Thames (@willthames)"
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
# Get information about an snapshot
- rds_snapshot_info:
db_snapshot_identifier: snapshot_name
register: new_database_info
# Get all RDS snapshots for an RDS instance
- rds_snapshot_info:
db_instance_identifier: helloworld-rds-master
'''
RETURN = '''
snapshots:
description: List of non-clustered snapshots
returned: When cluster parameters are not passed
type: complex
contains:
allocated_storage:
description: How many gigabytes of storage are allocated
returned: always
type: int
sample: 10
availability_zone:
description: The availability zone of the database from which the snapshot was taken
returned: always
type: str
sample: us-west-2b
db_instance_identifier:
description: Database instance identifier
returned: always
type: str
sample: hello-world-rds
db_snapshot_arn:
description: Snapshot ARN
returned: always
type: str
sample: arn:aws:rds:us-west-2:111111111111:snapshot:rds:hello-world-rds-us1-2018-05-16-04-03
db_snapshot_identifier:
description: Snapshot name
returned: always
type: str
sample: rds:hello-world-rds-us1-2018-05-16-04-03
encrypted:
description: Whether the snapshot was encrypted
returned: always
type: bool
sample: true
engine:
description: Database engine
returned: always
type: str
sample: postgres
engine_version:
description: Database engine version
returned: always
type: str
sample: 9.5.10
iam_database_authentication_enabled:
description: Whether database authentication through IAM is enabled
returned: always
type: bool
sample: false
instance_create_time:
description: Time the Instance was created
returned: always
type: str
sample: '2017-10-10T04:00:07.434000+00:00'
kms_key_id:
description: ID of the KMS Key encrypting the snapshot
returned: always
type: str
sample: arn:aws:kms:us-west-2:111111111111:key/abcd1234-1234-aaaa-0000-1234567890ab
license_model:
description: License model
returned: always
type: str
sample: postgresql-license
master_username:
description: Database master username
returned: always
type: str
sample: dbadmin
option_group_name:
description: Database option group name
returned: always
type: str
sample: default:postgres-9-5
percent_progress:
description: Percent progress of snapshot
returned: always
type: int
sample: 100
snapshot_create_time:
description: Time snapshot was created
returned: always
type: str
sample: '2018-05-16T04:03:33.871000+00:00'
snapshot_type:
description: Type of snapshot
returned: always
type: str
sample: automated
status:
description: Status of snapshot
returned: always
type: str
sample: available
storage_type:
description: Storage type of underlying DB
returned: always
type: str
sample: gp2
tags:
description: Snapshot tags
returned: always
type: complex
contains: {}
vpc_id:
description: ID of VPC containing the DB
returned: always
type: str
sample: vpc-abcd1234
cluster_snapshots:
description: List of cluster snapshots
returned: always
type: complex
contains:
allocated_storage:
description: How many gigabytes of storage are allocated
returned: always
type: int
sample: 1
availability_zones:
description: The availability zones of the database from which the snapshot was taken
returned: always
type: list
sample:
- ca-central-1a
- ca-central-1b
cluster_create_time:
description: Date and time the cluster was created
returned: always
type: str
sample: '2018-05-17T00:13:40.223000+00:00'
db_cluster_identifier:
description: Database cluster identifier
returned: always
type: str
sample: test-aurora-cluster
db_cluster_snapshot_arn:
description: ARN of the database snapshot
returned: always
type: str
sample: arn:aws:rds:ca-central-1:111111111111:cluster-snapshot:test-aurora-snapshot
db_cluster_snapshot_identifier:
description: Snapshot identifier
returned: always
type: str
sample: test-aurora-snapshot
engine:
description: Database engine
returned: always
type: str
sample: aurora
engine_version:
description: Database engine version
returned: always
type: str
sample: 5.6.10a
iam_database_authentication_enabled:
description: Whether database authentication through IAM is enabled
returned: always
type: bool
sample: false
kms_key_id:
description: ID of the KMS Key encrypting the snapshot
returned: always
type: str
sample: arn:aws:kms:ca-central-1:111111111111:key/abcd1234-abcd-1111-aaaa-0123456789ab
license_model:
description: License model
returned: always
type: str
sample: aurora
master_username:
description: Database master username
returned: always
type: str
sample: shertel
percent_progress:
description: Percent progress of snapshot
returned: always
type: int
sample: 0
port:
description: Database port
returned: always
type: int
sample: 0
snapshot_create_time:
description: Date and time when the snapshot was created
returned: always
type: str
sample: '2018-05-17T00:23:23.731000+00:00'
snapshot_type:
description: Type of snapshot
returned: always
type: str
sample: manual
status:
description: Status of snapshot
returned: always
type: str
sample: creating
storage_encrypted:
description: Whether the snapshot is encrypted
returned: always
type: bool
sample: true
tags:
description: Tags of the snapshot
returned: always
type: complex
contains: {}
vpc_id:
description: VPC of the database
returned: always
type: str
sample: vpc-abcd1234
'''
from ansible.module_utils.aws.core import AnsibleAWSModule, is_boto3_error_code
from ansible.module_utils.ec2 import AWSRetry, boto3_tag_list_to_ansible_dict, camel_dict_to_snake_dict
try:
import botocore
except Exception:
pass # caught by imported HAS_BOTO3
def common_snapshot_info(module, conn, method, prefix, params):
paginator = conn.get_paginator(method)
try:
results = paginator.paginate(**params).build_full_result()['%ss' % prefix]
except is_boto3_error_code('%sNotFound' % prefix):
results = []
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except
module.fail_json_aws(e, "trying to get snapshot information")
for snapshot in results:
try:
snapshot['Tags'] = boto3_tag_list_to_ansible_dict(conn.list_tags_for_resource(ResourceName=snapshot['%sArn' % prefix],
aws_retry=True)['TagList'])
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, "Couldn't get tags for snapshot %s" % snapshot['%sIdentifier' % prefix])
return [camel_dict_to_snake_dict(snapshot, ignore_list=['Tags']) for snapshot in results]
def cluster_snapshot_info(module, conn):
snapshot_name = module.params.get('db_cluster_snapshot_identifier')
snapshot_type = module.params.get('snapshot_type')
instance_name = module.params.get('db_cluster_instance_identifier')
params = dict()
if snapshot_name:
params['DBClusterSnapshotIdentifier'] = snapshot_name
if instance_name:
params['DBClusterInstanceIdentifier'] = instance_name
if snapshot_type:
params['SnapshotType'] = snapshot_type
if snapshot_type == 'public':
params['IsPublic'] = True
elif snapshot_type == 'shared':
params['IsShared'] = True
return common_snapshot_info(module, conn, 'describe_db_cluster_snapshots', 'DBClusterSnapshot', params)
def standalone_snapshot_info(module, conn):
snapshot_name = module.params.get('db_snapshot_identifier')
snapshot_type = module.params.get('snapshot_type')
instance_name = module.params.get('db_instance_identifier')
params = dict()
if snapshot_name:
params['DBSnapshotIdentifier'] = snapshot_name
if instance_name:
params['DBInstanceIdentifier'] = instance_name
if snapshot_type:
params['SnapshotType'] = snapshot_type
if snapshot_type == 'public':
params['IsPublic'] = True
elif snapshot_type == 'shared':
params['IsShared'] = True
return common_snapshot_info(module, conn, 'describe_db_snapshots', 'DBSnapshot', params)
def main():
argument_spec = dict(
db_snapshot_identifier=dict(aliases=['snapshot_name']),
db_instance_identifier=dict(),
db_cluster_identifier=dict(),
db_cluster_snapshot_identifier=dict(),
snapshot_type=dict(choices=['automated', 'manual', 'shared', 'public'])
)
module = AnsibleAWSModule(
argument_spec=argument_spec,
supports_check_mode=True,
mutually_exclusive=[['db_snapshot_identifier', 'db_instance_identifier', 'db_cluster_identifier', 'db_cluster_snapshot_identifier']]
)
if module._name == 'rds_snapshot_facts':
module.deprecate("The 'rds_snapshot_facts' module has been renamed to 'rds_snapshot_info'", version='2.13')
conn = module.client('rds', retry_decorator=AWSRetry.jittered_backoff(retries=10))
results = dict()
if not module.params['db_cluster_identifier'] and not module.params['db_cluster_snapshot_identifier']:
results['snapshots'] = standalone_snapshot_info(module, conn)
if not module.params['db_snapshot_identifier'] and not module.params['db_instance_identifier']:
results['cluster_snapshots'] = cluster_snapshot_info(module, conn)
module.exit_json(changed=False, **results)
if __name__ == '__main__':
main()
| thaim/ansible | lib/ansible/modules/cloud/amazon/rds_snapshot_info.py | Python | mit | 12,675 |
"""
Django forms for accounts
"""
from django import forms
from django.core.exceptions import ValidationError
class RetirementQueueDeletionForm(forms.Form):
"""
Admin form to facilitate learner retirement cancellation
"""
cancel_retirement = forms.BooleanField(required=True)
def save(self, retirement):
"""
When the form is POSTed we double-check the retirment status
and perform the necessary steps to cancel the retirement
request.
"""
if retirement.current_state.state_name != 'PENDING':
self.add_error(
None,
# Translators: 'current_state' is a string from an enumerated list indicating the learner's retirement
# state. Example: FORUMS_COMPLETE
"Retirement requests can only be cancelled for users in the PENDING state."
" Current request state for '{original_username}': {current_state}".format(
original_username=retirement.original_username,
current_state=retirement.current_state.state_name
)
)
raise ValidationError('Retirement is in the wrong state!')
# Load the user record using the retired email address -and- change the email address back.
retirement.user.email = retirement.original_email
retirement.user.save()
# Delete the user retirement status record.
# No need to delete the accompanying "permanent" retirement request record - it gets done via Django signal.
retirement.delete()
| ahmedaljazzar/edx-platform | openedx/core/djangoapps/user_api/accounts/forms.py | Python | agpl-3.0 | 1,596 |
import sys
import argparse
from builder.args import addLoggingParams, addEarlyStop, addSupDataParams
from builder.profiler import setupLogging
import numpy as np
'''This is a simple batch generator for lenet5Trainer.py. All tweak-able values
in lenet5Trainer have min, max and step here. This generates a dense matrix
of processing parameters sets into a batch file for the appropriate OS.
'''
if __name__ == '__main__' :
parser = argparse.ArgumentParser()
addLoggingParams(parser)
parser.add_argument('--learnC', dest='learnC', type=float, nargs='+',
default=[.1,1,.25],
help='Rate of learning on Convolutional Layers.')
parser.add_argument('--learnF', dest='learnF', type=float, nargs='+',
default=[.1,1,.25],
help='Rate of learning on Fully-Connected Layers.')
parser.add_argument('--momentum', dest='momentum', type=float, nargs='+',
default=[.1,1,.4],
help='Momentum rate all layers.')
parser.add_argument('--dropout', dest='dropout', type=bool,
nargs='+', default=[False, True],
help='Enable dropout throughout the network. Dropout '\
'percentages are based on optimal reported '\
'results. NOTE: Networks using dropout need to '\
'increase both neural breadth and learning rates')
parser.add_argument('--kernel', dest='kernel', type=int, nargs='+',
default=[20,80,20],
help='Number of Convolutional Kernels in each Layer.')
parser.add_argument('--neuron', dest='neuron', type=int, nargs='+',
default=[200,500,150],
help='Number of Neurons in Hidden Layer.')
addEarlyStop(parser)
addSupDataParams(parser, 'batchGen')
options = parser.parse_args()
# setup the logger
log, prof = setupLogging(options, 'batchGen')
def genSteps(args) :
'''Generate the range specified by the user.'''
return np.arange(args[0], args[1], args[2]) if len(args) == 3 else args
def permute(learnC, learnF, momentum, dropout,
kernel, neuron, limit, stop, batch) :
'''Generate all possible permutations of the parameters.'''
import itertools
params = [learnC, learnF, momentum, dropout,
kernel, neuron, limit, stop, batch]
paramSets = [genSteps(x) for x in params]
return list(itertools.product(*paramSets))
permutations = permute(options.learnC, options.learnF, options.momentum,
options.dropout, options.kernel, options.neuron,
options.limit, options.stop, options.batchSize)
filename = 'batch' + '.bat' if sys.platform == 'win32' else '.sh'
with open(filename, 'w') as f :
for perm in permutations :
perm = [str(x) for x in perm]
cmd = 'python .\lenet5Trainer.py'
cmd += ' --learnC ' + perm[0]
cmd += ' --learnF ' + perm[1]
cmd += ' --momentum ' + perm[2]
cmd += ' --dropout ' + perm[3]
cmd += ' --kernel ' + perm[4]
cmd += ' --neuron ' + perm[5]
cmd += ' --limit ' + perm[6]
cmd += ' --stop ' + perm[7]
cmd += ' --batch ' + perm[8]
if options.synapse is not None :
cmd += ' --syn ' + options.synapse
if options.data is not None :
cmd += ' ' + options.data
f.write(cmd + '\n') | mbojrab/playbox | trunk/projects/supervised/genBatchRun.py | Python | mit | 3,724 |
#
# ParamSet.py -- Groups of widgets holding parameters
#
# Eric Jeschke (eric@naoj.org)
#
# Copyright (c) Eric R. Jeschke. All rights reserved.
# This is open-source software licensed under a BSD license.
# Please see the file LICENSE.txt for details.
#
from ginga.misc import Widgets, Callback, Bunch
class ParamSet(Callback.Callbacks):
def __init__(self, logger, params):
super(ParamSet, self).__init__()
self.logger = logger
self.paramlst = []
self.params = params
self.widgets = {}
for name in ('changed', ):
self.enable_callback(name)
def build_params(self, paramlst, orientation='vertical'):
# construct a set of widgets for the parameters
captions = []
for param in paramlst:
title = param.get('time', param.name)
captions.append((title+':', 'label', param.name, 'entry'))
w, b = Widgets.build_info(captions, orientation=orientation)
# fill with default values and tool tips
for param in paramlst:
name = param.name
# if we have a cached value for the parameter, use it
if name in self.params:
value = self.params[name]
b[name].set_text(str(value))
# otherwise initialize to the default value, if available
elif 'default' in param:
value = param.default
b[name].set_text(str(value))
self.params[name] = value
if 'description' in param:
b[name].set_tooltip(param.description)
b[name].add_callback('activated', self._value_changed_cb)
self.paramlst = paramlst
self.widgets = b
return w
def _get_params(self):
for param in self.paramlst:
w = self.widgets[param.name]
value = w.get_text()
if 'type' in param:
value = param.type(value)
self.params[param.name] = value
def sync_params(self):
for param in self.paramlst:
key = param.name
w = self.widgets[key]
if key in self.params:
value = self.params[key]
w.set_text(str(value))
def get_params(self):
self._get_params()
return self.params
def _value_changed_cb(self, w):
self._get_params()
self.make_callback('changed', self.params)
#END
| bsipocz/ginga | ginga/misc/ParamSet.py | Python | bsd-3-clause | 2,493 |
from sos.plugins import DebianPlugin
from sos.policies import PackageManager, LinuxPolicy
import os
class DebianPolicy(LinuxPolicy):
distro = "Debian"
vendor = "the Debian project"
vendor_url = "http://www.debian.org/"
report_name = ""
ticket_number = ""
package_manager = PackageManager(
"dpkg-query -W -f='${Package}|${Version}\\n' \*")
valid_subclasses = [DebianPlugin]
PATH = "/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games" \
+ ":/usr/local/sbin:/usr/local/bin"
def __init__(self, sysroot=None):
super(DebianPolicy, self).__init__(sysroot=sysroot)
self.report_name = ""
self.ticket_number = ""
self.package_manager = PackageManager(
"dpkg-query -W -f='${Package}|${Version}\\n' \*")
self.valid_subclasses = [DebianPlugin]
@classmethod
def check(self):
"""This method checks to see if we are running on Debian.
It returns True or False."""
return os.path.isfile('/etc/debian_version')
def dist_version(self):
try:
with open('/etc/lsb-release', 'r') as fp:
rel_string = fp.read()
if "wheezy/sid" in rel_string:
return 6
elif "jessie/sid" in rel_string:
return 7
return False
except:
return False
# vim: set et ts=4 sw=4 :
| alexandrujuncu/sos | sos/policies/debian.py | Python | gpl-2.0 | 1,429 |
"""
Object model and helper classes used in the generation of classes from
an OME XML (http://www.ome-xml.org) XSD document.
"""
#
# Copyright (C) 2009 - 2016 Open Microscopy Environment. All rights reserved.
#
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
from util import odict
import logging
from ome.modeltools.object import OMEModelObject
from ome.modeltools.property import OMEModelProperty
from ome.modeltools.exceptions import ModelProcessingError
from ome.modeltools import config
class ReferenceDelegate(object):
"""
A "virtual" property delegate to be used with "reference"
OMEModelProperty instances. This delegate conforms loosely to the same
interface as a delegate coming from generateDS (ie. an "attribute" or
an "element").
"""
def __init__(self, name, dataType, plural):
self.name = name + "_BackReference"
self.dataType = dataType
self.plural = plural
# Ensures property code which is looking for elements or attributes
# which conform to an enumeration can still function.
self.values = None
self.maxOccurs = 9999
self.minOccurs = 0
def getValues(self):
return self.values
def getMaxOccurs(self):
return self.maxOccurs
def getMinOccurs(self):
return self.minOccurs
def getType(self):
return self.dataType
def getName(self):
return self.name
def isComplex(self):
return True
class OMEModel(object):
def __init__(self, opts):
self.opts = opts
self.elementNameObjectMap = dict()
self.objects = odict()
self.parents = dict()
# A mapping of abstract substitution groups with the abstract element
self.substitutionElement_map = dict()
def addObject(self, element, obj):
elementName = element.getName()
if element in self.objects:
raise ModelProcessingError(
"Element %s has been processed!" % element)
if elementName in self.elementNameObjectMap:
if (elementName == "EmissionFilterRef" or
elementName == "ExcitationFilterRef"):
pass
else:
logging.warn(
"Element %s has duplicate object with same name,"
" skipping!" % element)
return
self.elementNameObjectMap[element.getName()] = obj
self.objects[element] = obj
def getObject(self, element):
try:
return self.objects[element]
except KeyError:
return None
def getObjectByName(self, name):
try:
return self.elementNameObjectMap[name]
except KeyError:
return None
def getTopLevelSimpleType(self, name):
"""
Returns the simpleType that has a given name from the list of top
level simple types for this model.
"""
for simpleType in self.topLevelSimpleTypes:
if simpleType.name == name:
return simpleType
return None
def getAllHeaders(self):
headers = set()
for o in self.objects.values():
h = o.header_dependencies
if h is not None:
headers.union(h)
return sorted(headers)
def getEnumHeaders(self):
headers = set()
for obj in self.objects.values():
for prop in obj.properties.values():
if not prop.isEnumeration:
continue
h = prop.header
if h is not None:
headers.add(h)
return sorted(headers)
def getObjectHeaders(self):
headers = set()
for obj in self.objects.values():
h = obj.header
if h is not None:
headers.add(h)
return sorted(headers)
def processAttributes(self, element):
"""
Process all the attributes for a given element (a leaf).
"""
attributes = element.getAttributeDefs()
obj = self.getObject(element)
length = len(attributes)
for i, key in enumerate(attributes):
logging.debug("Processing attribute: %s %d/%d"
% (key, i + 1, length))
attribute = attributes[key]
logging.debug("Dump: %s" % attribute.__dict__)
obj.addAttribute(attribute)
children = element.getChildren()
length = len(children)
for i, child in enumerate(children):
logging.debug("Processing child: %s %d/%d"
% (child, i + 1, length))
logging.debug("Dump: %s" % child.__dict__)
obj.addElement(child)
def processLeaf(self, element, parent):
"""
Process an element (a leaf).
"""
e = element
logging.debug("Processing leaf (topLevel? %s): (%s) --> (%s)"
% (e.topLevel, parent, e))
e_name = e.getName()
e_type = e.getType()
if parent is not None:
if e_name not in self.parents:
self.parents[e_name] = list()
self.parents[e_name].append(parent.getName())
if (not e.isExplicitDefine() and
(e_name not in config.EXPLICIT_DEFINE_OVERRIDE and
not e.topLevel)):
logging.info(
"Element %s.%s not an explicit define, skipping."
% (parent, e))
return
if e.getMixedExtensionError():
logging.error(
"Element %s.%s extension chain contains mixed and non-mixed"
" content, skipping." % (parent, e))
return
if e_type != e_name and e_name not in config.EXPLICIT_DEFINE_OVERRIDE:
logging.info(
"Element %s.%s is not a concrete type (%s != %s), skipping."
% (parent, e, e_type, e_name))
return
obj = OMEModelObject(e, parent, self)
self.addObject(e, obj)
self.processAttributes(e)
def processTree(self, elements, parent=None):
"""
Recursively processes a tree of elements.
"""
length = len(elements)
for i, element in enumerate(elements):
if self.opts.lang.hasSubstitutionGroup(element.getName()):
continue
if (element.getName() in self.substitutionElement_map.keys()):
if parent is not None:
element = self.substitutionElement_map[element.getName()]
if parent is None:
continue
logging.info("Processing element: %s %d/%d"
% (element, i + 1, length))
self.processLeaf(element, parent)
children = element.getChildren()
if children:
self.processTree(children, element)
def calculateMaxOccurs(self, o, prop):
if prop.isReference:
return 9999
return 1
def calculateMinOccurs(self, o, prop):
if prop.isReference or prop.isSettings:
return 0
return 1
def postProcessReferences(self):
"""
Examines the list of objects in the model for instances that conform
to the OME XML Schema referential object naming conventions and
injects properties into referenced objects to provide back links.
"""
references = dict()
for o in self.objects.values():
if o.isSettings and not o.isAbstract:
shortName = o.name.replace('Settings', '')
ref = '%sRef' % (shortName)
delegate = ReferenceDelegate(ref, ref, None)
# Override back reference naming and default cardinality. Also
# set the namespace to be the same
delegate.name = ref
delegate.minOccurs = 1
delegate.maxOccurs = 1
delegate.namespace = o.namespace
prop = OMEModelProperty.fromReference(delegate, o, self)
o.properties[ref] = prop
for o in self.objects.values():
for prop in o.properties.values():
if self.opts.lang.hasAbstractType(prop.name):
abstractName = self.opts.lang.abstractType(prop.name)
prop.delegate.name = abstractName
prop.delegate.type = abstractName
prop.delegate.unmappedCleanName = abstractName
prop.delegate.cleanName = abstractName
if not prop.isReference and (
prop.isAttribute or prop.maxOccurs == 1 or
o.name == 'OME' or o.isAbstractProprietary):
continue
shortName = config.REF_REGEX.sub('', prop.type)
try:
if o.name in config.BACK_REFERENCE_OVERRIDE[shortName]:
continue
except KeyError:
pass
if shortName not in references:
references[shortName] = list()
v = {'data_type': o.name, 'property_name': prop.methodName,
'plural': prop.plural,
'maxOccurs': self.calculateMaxOccurs(o, prop),
'minOccurs': self.calculateMinOccurs(o, prop),
'isOrdered': prop.isOrdered,
'isChildOrdered': prop.isChildOrdered,
'isParentOrdered': prop.isParentOrdered,
'isInjected': prop.isInjected}
references[shortName].append(v)
logging.debug("Model references: %s" % references)
for o in self.objects.values():
if o.name not in references:
continue
for ref in references[o.name]:
key = '%s.%s' % (ref['data_type'], ref['property_name'])
delegate = ReferenceDelegate(
ref['data_type'], ref['data_type'], ref['plural'])
delegate.minOccurs = ref['minOccurs']
delegate.maxOccurs = ref['maxOccurs']
prop = OMEModelProperty.fromReference(delegate, o, self)
prop.key = key
prop.isChildOrdered = ref['isChildOrdered']
prop.isParentOrdered = ref['isParentOrdered']
prop.isOrdered = ref['isOrdered']
prop.isInjected = ref['isInjected']
o.properties[key] = prop
def process(klass, contentHandler, opts):
"""
Main process entry point. All instantiations of this class should be
made through this class method unless you really know what you are
doing.
"""
elements = contentHandler.getRoot().getChildren()
model = klass(opts)
model.populateSubstitutionGroups(elements)
model.topLevelSimpleTypes = contentHandler.topLevelSimpleTypes
model.processTree(elements)
model.postProcessReferences()
return model
process = classmethod(process)
def resolve_parents(self, element_name):
"""
Resolves the parents of an element and returns them as an ordered list.
"""
parents = dict()
try:
my_parents = self.parents[element_name]
except KeyError:
return None
for parent in my_parents:
parents[parent] = self.resolve_parents(parent)
return parents
def _get_header_deps(self):
deps = set()
for o in self.objects.values():
dep = o.header
if dep is not None:
deps.add(dep)
deps.update(o.header_dependencies)
return sorted(deps)
header_dependencies = property(
_get_header_deps,
doc="""The model's dependencies for include/import in headers.""")
def populateSubstitutionGroups(self, elements):
"""
Creates a mapping between substitution group elements and their type elements
"""
length = len(elements)
for i, element in enumerate(elements):
if 'substitutionGroup' in element.getAttrs():
substitutionGroup = element.getAttrs()['substitutionGroup']
base = element.getBase()
self.opts.lang.abstract_type_map[substitutionGroup] = base
self.opts.lang.substitutionGroup_map[base] = substitutionGroup
for i, element in enumerate(elements):
if self.opts.lang.hasSubstitutionGroup(element.getName()):
substitutionGroupName = self.opts.lang.substitutionGroup(element.getName())
self.substitutionElement_map[substitutionGroupName] = element
continue
| imunro/bioformats | components/xsd-fu/python/ome/modeltools/model.py | Python | gpl-2.0 | 13,488 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Functions for working with the Inventory service
"""
from __future__ import division
from __future__ import absolute_import
from __future__ import unicode_literals
import logging
import os
import re
from marshmallow import Schema, fields
import requests
import dgparse
log = logging.getLogger(__name__)
class CreateInstructionSchema(Schema):
object = fields.String(required=True, attribute="object")
create = fields.List(fields.Raw, required=True)
class AuthenticationError(Exception):
pass
class InventoryClient(object):
"""
Represents the Inventory Service and its public methods.
"""
crudreq_schema = CreateInstructionSchema()
def __init__(self, target_server):
self.target_server = target_server
self.crud_url = os.path.join(self.target_server, 'api/inventory/crud')
self.upload_url = os.path.join(self.target_server, 'api/inventory/upload')
self.login_url = os.path.join(self.target_server, 'api/inventory/authenticate')
self.session = requests.Session()
def set_credentials(self, username, password):
self.session.auth = (username, password)
def _get_schema(self, record_type):
return dgparse.VALIDATORS[record_type]
def login(self, username, password):
self.set_credentials(username, password)
resp = self.session.post(self.login_url, {})
# Copy Session Token
if resp.ok:
self.session.cookies['session'] = resp.cookies['session']
else:
raise AuthenticationError
def create(self, object_, record):
"""Creates a record"""
schema = self._get_schema(object_)
data, errors = schema.dump(record)
errors.update(schema.validate(record))
if errors:
return record, errors
instruction = {
'object': object_,
'create': data if isinstance(data, list) else [data],
}
# validate instruction
body, errors = self.crudreq_schema.dump(instruction)
resp = self.session.post(self.crud_url, json=body)
if resp.ok:
return resp.json()['create'], {}
else:
try:
return record, resp.json()
except ValueError:
return record, {'create': resp.text}
def construct_service(config):
inv_serve = InventoryClient(config.target_server)
return inv_serve
def parse_file_name(path):
"""
Parse file names in accession, name, and format
:param path:
:return dictionary of matched items:
"""
matched = re.search("([A-Z]{2,}\d+)[\s\_]([^/]+)\.(\w+)$", path)
if matched:
return {
'accession': matched.group(1),
'name': matched.group(2),
'format': matched.group(3),
}
else:
return {'accession': os.path.basename(path)}
| DeskGen/dgcli | dgcli/inventory.py | Python | gpl-2.0 | 2,921 |
# Elpy, the Emacs Lisp Python Environment
# Copyright (C) 2013 Jorgen Schaefer
# Author: Jorgen Schaefer <contact@jorgenschaefer.de>
# URL: http://github.com/jorgenschaefer/elpy
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 3
# of the License, or (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""The Emacs Lisp Python Environment.
Elpy is a mode for Emacs to support writing Python code. This package
provides the backend within Python to support auto-completion,
documentation extraction, and navigation.
Emacs will start the protocol by running the module itself, like so:
python -m elpy
This will emit a greeting string on a single line, and then wait for
the protocol to start. Details of the protocol can be found in
elpy.rpc.
This package is unlikely to be useful on its own.
"""
__author__ = "Jorgen Schaefer"
__version__ = "1.5.1"
__license__ = "GPL"
| ProfessorX/Emacs-Laptop | elpa/elpy-20140810.7/elpy/__init__.py | Python | gpl-2.0 | 1,388 |
try:
from future_builtins import filter
except ImportError:
pass
from copy import deepcopy
###{standalone
from collections import OrderedDict
class Meta:
def __init__(self):
self.empty = True
class Tree(object):
"""The main tree class.
Creates a new tree, and stores "data" and "children" in attributes of the same name.
Trees can be hashed and compared.
Parameters:
data: The name of the rule or alias
children: List of matched sub-rules and terminals
meta: Line & Column numbers (if ``propagate_positions`` is enabled).
meta attributes: line, column, start_pos, end_line, end_column, end_pos
"""
def __init__(self, data, children, meta=None):
self.data = data
self.children = children
self._meta = meta
@property
def meta(self):
if self._meta is None:
self._meta = Meta()
return self._meta
def __repr__(self):
return 'Tree(%r, %r)' % (self.data, self.children)
def _pretty_label(self):
return self.data
def _pretty(self, level, indent_str):
if len(self.children) == 1 and not isinstance(self.children[0], Tree):
return [indent_str*level, self._pretty_label(), '\t', '%s' % (self.children[0],), '\n']
l = [indent_str*level, self._pretty_label(), '\n']
for n in self.children:
if isinstance(n, Tree):
l += n._pretty(level+1, indent_str)
else:
l += [indent_str*(level+1), '%s' % (n,), '\n']
return l
def pretty(self, indent_str=' '):
"""Returns an indented string representation of the tree.
Great for debugging.
"""
return ''.join(self._pretty(0, indent_str))
def __eq__(self, other):
try:
return self.data == other.data and self.children == other.children
except AttributeError:
return False
def __ne__(self, other):
return not (self == other)
def __hash__(self):
return hash((self.data, tuple(self.children)))
def iter_subtrees(self):
"""Depth-first iteration.
Iterates over all the subtrees, never returning to the same node twice (Lark's parse-tree is actually a DAG).
"""
queue = [self]
subtrees = OrderedDict()
for subtree in queue:
subtrees[id(subtree)] = subtree
queue += [c for c in reversed(subtree.children)
if isinstance(c, Tree) and id(c) not in subtrees]
del queue
return reversed(list(subtrees.values()))
def find_pred(self, pred):
"""Returns all nodes of the tree that evaluate pred(node) as true."""
return filter(pred, self.iter_subtrees())
def find_data(self, data):
"""Returns all nodes of the tree whose data equals the given data."""
return self.find_pred(lambda t: t.data == data)
###}
def expand_kids_by_index(self, *indices):
"""Expand (inline) children at the given indices"""
for i in sorted(indices, reverse=True): # reverse so that changing tail won't affect indices
kid = self.children[i]
self.children[i:i+1] = kid.children
def scan_values(self, pred):
for c in self.children:
if isinstance(c, Tree):
for t in c.scan_values(pred):
yield t
else:
if pred(c):
yield c
def iter_subtrees_topdown(self):
"""Breadth-first iteration.
Iterates over all the subtrees, return nodes in order like pretty() does.
"""
stack = [self]
while stack:
node = stack.pop()
if not isinstance(node, Tree):
continue
yield node
for n in reversed(node.children):
stack.append(n)
def __deepcopy__(self, memo):
return type(self)(self.data, deepcopy(self.children, memo), meta=self._meta)
def copy(self):
return type(self)(self.data, self.children)
def set(self, data, children):
self.data = data
self.children = children
# XXX Deprecated! Here for backwards compatibility <0.6.0
@property
def line(self):
return self.meta.line
@property
def column(self):
return self.meta.column
@property
def end_line(self):
return self.meta.end_line
@property
def end_column(self):
return self.meta.end_column
class SlottedTree(Tree):
__slots__ = 'data', 'children', 'rule', '_meta'
def pydot__tree_to_png(tree, filename, rankdir="LR", **kwargs):
graph = pydot__tree_to_graph(tree, rankdir, **kwargs)
graph.write_png(filename)
def pydot__tree_to_dot(tree, filename, rankdir="LR", **kwargs):
graph = pydot__tree_to_graph(tree, rankdir, **kwargs)
graph.write(filename)
def pydot__tree_to_graph(tree, rankdir="LR", **kwargs):
"""Creates a colorful image that represents the tree (data+children, without meta)
Possible values for `rankdir` are "TB", "LR", "BT", "RL", corresponding to
directed graphs drawn from top to bottom, from left to right, from bottom to
top, and from right to left, respectively.
`kwargs` can be any graph attribute (e. g. `dpi=200`). For a list of
possible attributes, see https://www.graphviz.org/doc/info/attrs.html.
"""
import pydot
graph = pydot.Dot(graph_type='digraph', rankdir=rankdir, **kwargs)
i = [0]
def new_leaf(leaf):
node = pydot.Node(i[0], label=repr(leaf))
i[0] += 1
graph.add_node(node)
return node
def _to_pydot(subtree):
color = hash(subtree.data) & 0xffffff
color |= 0x808080
subnodes = [_to_pydot(child) if isinstance(child, Tree) else new_leaf(child)
for child in subtree.children]
node = pydot.Node(i[0], style="filled", fillcolor="#%x" % color, label=subtree.data)
i[0] += 1
graph.add_node(node)
for subnode in subnodes:
graph.add_edge(pydot.Edge(node, subnode))
return node
_to_pydot(tree)
return graph
| erezsh/lark | lark/tree.py | Python | mit | 6,212 |
import os, glob, sys, argparse
import intf_tools as it
#converts from digitizer units to V/m
Epd = -30./2**15
#threhold for identifying flashes
Thresh = 1 #V/m
#slack on either side to process
Slack = 50 #ms
parser = argparse.ArgumentParser(description="Plot processed DITF data")
parser.add_argument('--version', action='version', version='%(prog)s 1.0.0111')
parser.add_argument('input_files', nargs='+',
help='Path to input FA files')
arguments = parser.parse_args(sys.argv[1:])
arguments.input_files.sort()
for fileS in arguments.input_files:
head = it.RawHeader( fileS )
#the number of samples to read in
S = (head.preTrigger+head.postTrigger)*head.samplesPerRecord
#this will take a while
wave = it.read_raw_waveform_file_data( fileS, head, 0, S )
wave[1] -= 2**15
wave[1] *= Epd
m = abs(wave[1]) > Thresh
if len( wave[0,m] ) == 0:
continue
t0 = round( wave[0,m][0]-Slack )
t1 = round( wave[0,m][-1]+Slack )
print fileS, str(t0).rjust(9), str(t1).rjust(9)
| mikestock/intf-tools | intf_find_flashes.py | Python | gpl-2.0 | 997 |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2010 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import base64
import fixtures
import netaddr
import sys
import traceback
from nova.compute import manager
from nova import exception
from nova.openstack.common import importutils
from nova.openstack.common import log as logging
from nova import test
from nova.tests.image import fake as fake_image
from nova.tests import utils as test_utils
from nova.tests.virt.libvirt import fake_libvirt_utils
from nova.virt import event as virtevent
from nova.virt import fake
LOG = logging.getLogger(__name__)
def catch_notimplementederror(f):
"""Decorator to simplify catching drivers raising NotImplementedError
If a particular call makes a driver raise NotImplementedError, we
log it so that we can extract this information afterwards to
automatically generate a hypervisor/feature support matrix.
"""
def wrapped_func(self, *args, **kwargs):
try:
return f(self, *args, **kwargs)
except NotImplementedError:
frame = traceback.extract_tb(sys.exc_info()[2])[-1]
LOG.error('%(driver)s does not implement %(method)s' % {
'driver': type(self.connection),
'method': frame[2]})
wrapped_func.__name__ = f.__name__
wrapped_func.__doc__ = f.__doc__
return wrapped_func
class _FakeDriverBackendTestCase(object):
def _setup_fakelibvirt(self):
# So that the _supports_direct_io does the test based
# on the current working directory, instead of the
# default instances_path which doesn't exist
self.flags(instances_path=self.useFixture(fixtures.TempDir()).path)
# Put fakelibvirt in place
if 'libvirt' in sys.modules:
self.saved_libvirt = sys.modules['libvirt']
else:
self.saved_libvirt = None
import nova.tests.virt.libvirt.fake_imagebackend as fake_imagebackend
import nova.tests.virt.libvirt.fake_libvirt_utils as fake_libvirt_utils
import nova.tests.virt.libvirt.fakelibvirt as fakelibvirt
sys.modules['libvirt'] = fakelibvirt
import nova.virt.libvirt.driver
import nova.virt.libvirt.firewall
self.useFixture(fixtures.MonkeyPatch(
'nova.virt.libvirt.driver.imagebackend',
fake_imagebackend))
self.useFixture(fixtures.MonkeyPatch(
'nova.virt.libvirt.driver.libvirt',
fakelibvirt))
self.useFixture(fixtures.MonkeyPatch(
'nova.virt.libvirt.driver.libvirt_utils',
fake_libvirt_utils))
self.useFixture(fixtures.MonkeyPatch(
'nova.virt.libvirt.imagebackend.libvirt_utils',
fake_libvirt_utils))
self.useFixture(fixtures.MonkeyPatch(
'nova.virt.libvirt.firewall.libvirt',
fakelibvirt))
self.flags(rescue_image_id="2",
rescue_kernel_id="3",
rescue_ramdisk_id=None,
libvirt_snapshots_directory='./')
def fake_extend(image, size):
pass
def fake_migrateToURI(*a):
pass
def fake_make_drive(_self, _path):
pass
def fake_get_instance_disk_info(_self, instance, xml=None,
block_device_info=None):
return '[]'
def fake_delete_instance_files(_self, _instance):
pass
self.stubs.Set(nova.virt.libvirt.driver.LibvirtDriver,
'get_instance_disk_info',
fake_get_instance_disk_info)
self.stubs.Set(nova.virt.libvirt.driver.disk,
'extend', fake_extend)
self.stubs.Set(nova.virt.libvirt.driver.LibvirtDriver,
'_delete_instance_files',
fake_delete_instance_files)
# Like the existing fakelibvirt.migrateToURI, do nothing,
# but don't fail for these tests.
self.stubs.Set(nova.virt.libvirt.driver.libvirt.Domain,
'migrateToURI', fake_migrateToURI)
# We can't actually make a config drive v2 because ensure_tree has
# been faked out
self.stubs.Set(nova.virt.configdrive.ConfigDriveBuilder,
'make_drive', fake_make_drive)
def _teardown_fakelibvirt(self):
# Restore libvirt
if self.saved_libvirt:
sys.modules['libvirt'] = self.saved_libvirt
def setUp(self):
super(_FakeDriverBackendTestCase, self).setUp()
# TODO(sdague): it would be nice to do this in a way that only
# the relevant backends where replaced for tests, though this
# should not harm anything by doing it for all backends
fake_image.stub_out_image_service(self.stubs)
self._setup_fakelibvirt()
def tearDown(self):
fake_image.FakeImageService_reset()
self._teardown_fakelibvirt()
super(_FakeDriverBackendTestCase, self).tearDown()
class VirtDriverLoaderTestCase(_FakeDriverBackendTestCase, test.TestCase):
"""Test that ComputeManager can successfully load both
old style and new style drivers and end up with the correct
final class.
"""
# if your driver supports being tested in a fake way, it can go here
#
# both long form and short form drivers are supported
new_drivers = {
'nova.virt.fake.FakeDriver': 'FakeDriver',
'nova.virt.libvirt.LibvirtDriver': 'LibvirtDriver',
'fake.FakeDriver': 'FakeDriver',
'libvirt.LibvirtDriver': 'LibvirtDriver'
}
def test_load_new_drivers(self):
for cls, driver in self.new_drivers.iteritems():
self.flags(compute_driver=cls)
# NOTE(sdague) the try block is to make it easier to debug a
# failure by knowing which driver broke
try:
cm = manager.ComputeManager()
except Exception as e:
self.fail("Couldn't load driver %s - %s" % (cls, e))
self.assertEqual(cm.driver.__class__.__name__, driver,
"Could't load driver %s" % cls)
def test_fail_to_load_new_drivers(self):
self.flags(compute_driver='nova.virt.amiga')
def _fake_exit(error):
raise test.TestingException()
self.stubs.Set(sys, 'exit', _fake_exit)
self.assertRaises(test.TestingException, manager.ComputeManager)
class _VirtDriverTestCase(_FakeDriverBackendTestCase):
def setUp(self):
super(_VirtDriverTestCase, self).setUp()
self.flags(instances_path=self.useFixture(fixtures.TempDir()).path)
self.connection = importutils.import_object(self.driver_module,
fake.FakeVirtAPI())
self.ctxt = test_utils.get_test_admin_context()
self.image_service = fake_image.FakeImageService()
def _get_running_instance(self):
instance_ref = test_utils.get_test_instance()
network_info = test_utils.get_test_network_info()
network_info[0]['network']['subnets'][0]['meta']['dhcp_server'] = \
'1.1.1.1'
image_info = test_utils.get_test_image_info(None, instance_ref)
self.connection.spawn(self.ctxt, instance_ref, image_info,
[], 'herp', network_info=network_info)
return instance_ref, network_info
@catch_notimplementederror
def test_init_host(self):
self.connection.init_host('myhostname')
@catch_notimplementederror
def test_list_instances(self):
self.connection.list_instances()
@catch_notimplementederror
def test_list_instance_uuids(self):
self.connection.list_instance_uuids()
@catch_notimplementederror
def test_spawn(self):
instance_ref, network_info = self._get_running_instance()
domains = self.connection.list_instances()
self.assertIn(instance_ref['name'], domains)
num_instances = self.connection.get_num_instances()
self.assertEqual(1, num_instances)
@catch_notimplementederror
def test_snapshot_not_running(self):
instance_ref = test_utils.get_test_instance()
img_ref = self.image_service.create(self.ctxt, {'name': 'snap-1'})
self.assertRaises(exception.InstanceNotRunning,
self.connection.snapshot,
self.ctxt, instance_ref, img_ref['id'],
lambda *args, **kwargs: None)
@catch_notimplementederror
def test_snapshot_running(self):
img_ref = self.image_service.create(self.ctxt, {'name': 'snap-1'})
instance_ref, network_info = self._get_running_instance()
self.connection.snapshot(self.ctxt, instance_ref, img_ref['id'],
lambda *args, **kwargs: None)
@catch_notimplementederror
def test_live_snapshot_not_running(self):
instance_ref = test_utils.get_test_instance()
img_ref = self.image_service.create(self.ctxt, {'name': 'snap-1'})
self.assertRaises(exception.InstanceNotRunning,
self.connection.live_snapshot,
self.ctxt, instance_ref, img_ref['id'],
lambda *args, **kwargs: None)
@catch_notimplementederror
def test_live_snapshot_running(self):
img_ref = self.image_service.create(self.ctxt, {'name': 'snap-1'})
instance_ref, network_info = self._get_running_instance()
self.connection.live_snapshot(self.ctxt, instance_ref, img_ref['id'],
lambda *args, **kwargs: None)
@catch_notimplementederror
def test_reboot(self):
reboot_type = "SOFT"
instance_ref, network_info = self._get_running_instance()
self.connection.reboot(self.ctxt, instance_ref, network_info,
reboot_type)
@catch_notimplementederror
def test_get_host_ip_addr(self):
host_ip = self.connection.get_host_ip_addr()
# Will raise an exception if it's not a valid IP at all
ip = netaddr.IPAddress(host_ip)
# For now, assume IPv4.
self.assertEquals(ip.version, 4)
@catch_notimplementederror
def test_set_admin_password(self):
instance_ref, network_info = self._get_running_instance()
self.connection.set_admin_password(instance_ref, 'p4ssw0rd')
@catch_notimplementederror
def test_inject_file(self):
instance_ref, network_info = self._get_running_instance()
self.connection.inject_file(instance_ref,
base64.b64encode('/testfile'),
base64.b64encode('testcontents'))
@catch_notimplementederror
def test_resume_state_on_host_boot(self):
instance_ref, network_info = self._get_running_instance()
self.connection.resume_state_on_host_boot(self.ctxt, instance_ref,
network_info)
@catch_notimplementederror
def test_rescue(self):
instance_ref, network_info = self._get_running_instance()
self.connection.rescue(self.ctxt, instance_ref, network_info, None, '')
@catch_notimplementederror
def test_unrescue_unrescued_instance(self):
instance_ref, network_info = self._get_running_instance()
self.connection.unrescue(instance_ref, network_info)
@catch_notimplementederror
def test_unrescue_rescued_instance(self):
instance_ref, network_info = self._get_running_instance()
self.connection.rescue(self.ctxt, instance_ref, network_info, None, '')
self.connection.unrescue(instance_ref, network_info)
@catch_notimplementederror
def test_poll_rebooting_instances(self):
instances = [self._get_running_instance()]
self.connection.poll_rebooting_instances(10, instances)
@catch_notimplementederror
def test_migrate_disk_and_power_off(self):
instance_ref, network_info = self._get_running_instance()
instance_type_ref = test_utils.get_test_instance_type()
self.connection.migrate_disk_and_power_off(
self.ctxt, instance_ref, 'dest_host', instance_type_ref,
network_info)
@catch_notimplementederror
def test_power_off(self):
instance_ref, network_info = self._get_running_instance()
self.connection.power_off(instance_ref)
@catch_notimplementederror
def test_power_on_running(self):
instance_ref, network_info = self._get_running_instance()
self.connection.power_on(self.ctxt, instance_ref,
network_info, None)
@catch_notimplementederror
def test_power_on_powered_off(self):
instance_ref, network_info = self._get_running_instance()
self.connection.power_off(instance_ref)
self.connection.power_on(self.ctxt, instance_ref, network_info, None)
@catch_notimplementederror
def test_soft_delete(self):
instance_ref, network_info = self._get_running_instance()
self.connection.soft_delete(instance_ref)
@catch_notimplementederror
def test_restore_running(self):
instance_ref, network_info = self._get_running_instance()
self.connection.restore(instance_ref)
@catch_notimplementederror
def test_restore_soft_deleted(self):
instance_ref, network_info = self._get_running_instance()
self.connection.soft_delete(instance_ref)
self.connection.restore(instance_ref)
@catch_notimplementederror
def test_pause(self):
instance_ref, network_info = self._get_running_instance()
self.connection.pause(instance_ref)
@catch_notimplementederror
def test_unpause_unpaused_instance(self):
instance_ref, network_info = self._get_running_instance()
self.connection.unpause(instance_ref)
@catch_notimplementederror
def test_unpause_paused_instance(self):
instance_ref, network_info = self._get_running_instance()
self.connection.pause(instance_ref)
self.connection.unpause(instance_ref)
@catch_notimplementederror
def test_suspend(self):
instance_ref, network_info = self._get_running_instance()
self.connection.suspend(instance_ref)
@catch_notimplementederror
def test_resume_unsuspended_instance(self):
instance_ref, network_info = self._get_running_instance()
self.connection.resume(self.ctxt, instance_ref, network_info)
@catch_notimplementederror
def test_resume_suspended_instance(self):
instance_ref, network_info = self._get_running_instance()
self.connection.suspend(instance_ref)
self.connection.resume(self.ctxt, instance_ref, network_info)
@catch_notimplementederror
def test_destroy_instance_nonexistent(self):
fake_instance = {'id': 42, 'name': 'I just made this up!',
'uuid': 'bda5fb9e-b347-40e8-8256-42397848cb00'}
network_info = test_utils.get_test_network_info()
self.connection.destroy(fake_instance, network_info)
@catch_notimplementederror
def test_destroy_instance(self):
instance_ref, network_info = self._get_running_instance()
self.assertIn(instance_ref['name'],
self.connection.list_instances())
self.connection.destroy(instance_ref, network_info)
self.assertNotIn(instance_ref['name'],
self.connection.list_instances())
@catch_notimplementederror
def test_get_volume_connector(self):
result = self.connection.get_volume_connector({'id': 'fake'})
self.assertTrue('ip' in result)
self.assertTrue('initiator' in result)
self.assertTrue('host' in result)
@catch_notimplementederror
def test_attach_detach_volume(self):
instance_ref, network_info = self._get_running_instance()
connection_info = {
"driver_volume_type": "fake",
"serial": "fake_serial",
}
self.connection.attach_volume(None, connection_info, instance_ref,
'/dev/sda')
self.connection.detach_volume(connection_info, instance_ref,
'/dev/sda')
@catch_notimplementederror
def test_swap_volume(self):
instance_ref, network_info = self._get_running_instance()
self.connection.attach_volume(None, {'driver_volume_type': 'fake'},
instance_ref,
'/dev/sda')
self.connection.swap_volume({'driver_volume_type': 'fake'},
{'driver_volume_type': 'fake'},
instance_ref,
'/dev/sda')
@catch_notimplementederror
def test_attach_detach_different_power_states(self):
instance_ref, network_info = self._get_running_instance()
connection_info = {
"driver_volume_type": "fake",
"serial": "fake_serial",
}
self.connection.power_off(instance_ref)
self.connection.attach_volume(None, connection_info, instance_ref,
'/dev/sda')
bdm = {
'root_device_name': None,
'swap': None,
'ephemerals': [],
'block_device_mapping': [{
'instance_uuid': instance_ref['uuid'],
'connection_info': {'driver_volume_type': 'fake'},
'mount_device': '/dev/sda',
'delete_on_termination': False,
'virtual_name': None,
'snapshot_id': None,
'volume_id': 'abcdedf',
'volume_size': None,
'no_device': None
}]
}
self.connection.power_on(self.ctxt, instance_ref, network_info, bdm)
self.connection.detach_volume(connection_info,
instance_ref,
'/dev/sda')
@catch_notimplementederror
def test_get_info(self):
instance_ref, network_info = self._get_running_instance()
info = self.connection.get_info(instance_ref)
self.assertIn('state', info)
self.assertIn('max_mem', info)
self.assertIn('mem', info)
self.assertIn('num_cpu', info)
self.assertIn('cpu_time', info)
@catch_notimplementederror
def test_get_info_for_unknown_instance(self):
self.assertRaises(exception.NotFound,
self.connection.get_info,
{'name': 'I just made this name up'})
@catch_notimplementederror
def test_get_diagnostics(self):
instance_ref, network_info = self._get_running_instance()
self.connection.get_diagnostics(instance_ref)
@catch_notimplementederror
def test_block_stats(self):
instance_ref, network_info = self._get_running_instance()
stats = self.connection.block_stats(instance_ref['name'], 'someid')
self.assertEquals(len(stats), 5)
@catch_notimplementederror
def test_interface_stats(self):
instance_ref, network_info = self._get_running_instance()
stats = self.connection.interface_stats(instance_ref['name'], 'someid')
self.assertEquals(len(stats), 8)
@catch_notimplementederror
def test_get_console_output(self):
fake_libvirt_utils.files['dummy.log'] = ''
instance_ref, network_info = self._get_running_instance()
console_output = self.connection.get_console_output(instance_ref)
self.assertTrue(isinstance(console_output, basestring))
@catch_notimplementederror
def test_get_vnc_console(self):
instance_ref, network_info = self._get_running_instance()
vnc_console = self.connection.get_vnc_console(instance_ref)
self.assertIn('internal_access_path', vnc_console)
self.assertIn('host', vnc_console)
self.assertIn('port', vnc_console)
@catch_notimplementederror
def test_get_spice_console(self):
instance_ref, network_info = self._get_running_instance()
spice_console = self.connection.get_spice_console(instance_ref)
self.assertIn('internal_access_path', spice_console)
self.assertIn('host', spice_console)
self.assertIn('port', spice_console)
self.assertIn('tlsPort', spice_console)
@catch_notimplementederror
def test_get_console_pool_info(self):
instance_ref, network_info = self._get_running_instance()
console_pool = self.connection.get_console_pool_info(instance_ref)
self.assertIn('address', console_pool)
self.assertIn('username', console_pool)
self.assertIn('password', console_pool)
@catch_notimplementederror
def test_refresh_security_group_rules(self):
# FIXME: Create security group and add the instance to it
instance_ref, network_info = self._get_running_instance()
self.connection.refresh_security_group_rules(1)
@catch_notimplementederror
def test_refresh_security_group_members(self):
# FIXME: Create security group and add the instance to it
instance_ref, network_info = self._get_running_instance()
self.connection.refresh_security_group_members(1)
@catch_notimplementederror
def test_refresh_provider_fw_rules(self):
instance_ref, network_info = self._get_running_instance()
self.connection.refresh_provider_fw_rules()
@catch_notimplementederror
def test_ensure_filtering_for_instance(self):
instance_ref = test_utils.get_test_instance()
network_info = test_utils.get_test_network_info()
self.connection.ensure_filtering_rules_for_instance(instance_ref,
network_info)
@catch_notimplementederror
def test_unfilter_instance(self):
instance_ref = test_utils.get_test_instance()
network_info = test_utils.get_test_network_info()
self.connection.unfilter_instance(instance_ref, network_info)
@catch_notimplementederror
def test_live_migration(self):
instance_ref, network_info = self._get_running_instance()
self.connection.live_migration(self.ctxt, instance_ref, 'otherhost',
lambda *a: None, lambda *a: None)
@catch_notimplementederror
def _check_available_resouce_fields(self, host_status):
keys = ['vcpus', 'memory_mb', 'local_gb', 'vcpus_used',
'memory_mb_used', 'hypervisor_type', 'hypervisor_version',
'hypervisor_hostname', 'cpu_info', 'disk_available_least']
for key in keys:
self.assertIn(key, host_status)
@catch_notimplementederror
def test_get_host_stats(self):
host_status = self.connection.get_host_stats()
self._check_available_resouce_fields(host_status)
@catch_notimplementederror
def test_get_available_resource(self):
available_resource = self.connection.get_available_resource(
'myhostname')
self._check_available_resouce_fields(available_resource)
@catch_notimplementederror
def test_set_host_enabled(self):
self.connection.set_host_enabled('a useless argument?', True)
@catch_notimplementederror
def test_get_host_uptime(self):
self.connection.get_host_uptime('a useless argument?')
@catch_notimplementederror
def test_host_power_action_reboot(self):
self.connection.host_power_action('a useless argument?', 'reboot')
@catch_notimplementederror
def test_host_power_action_shutdown(self):
self.connection.host_power_action('a useless argument?', 'shutdown')
@catch_notimplementederror
def test_host_power_action_startup(self):
self.connection.host_power_action('a useless argument?', 'startup')
@catch_notimplementederror
def test_add_to_aggregate(self):
self.connection.add_to_aggregate(self.ctxt, 'aggregate', 'host')
@catch_notimplementederror
def test_remove_from_aggregate(self):
self.connection.remove_from_aggregate(self.ctxt, 'aggregate', 'host')
def test_events(self):
got_events = []
def handler(event):
got_events.append(event)
self.connection.register_event_listener(handler)
event1 = virtevent.LifecycleEvent(
"cef19ce0-0ca2-11df-855d-b19fbce37686",
virtevent.EVENT_LIFECYCLE_STARTED)
event2 = virtevent.LifecycleEvent(
"cef19ce0-0ca2-11df-855d-b19fbce37686",
virtevent.EVENT_LIFECYCLE_PAUSED)
self.connection.emit_event(event1)
self.connection.emit_event(event2)
want_events = [event1, event2]
self.assertEqual(want_events, got_events)
event3 = virtevent.LifecycleEvent(
"cef19ce0-0ca2-11df-855d-b19fbce37686",
virtevent.EVENT_LIFECYCLE_RESUMED)
event4 = virtevent.LifecycleEvent(
"cef19ce0-0ca2-11df-855d-b19fbce37686",
virtevent.EVENT_LIFECYCLE_STOPPED)
self.connection.emit_event(event3)
self.connection.emit_event(event4)
want_events = [event1, event2, event3, event4]
self.assertEqual(want_events, got_events)
def test_event_bad_object(self):
# Passing in something which does not inherit
# from virtevent.Event
def handler(event):
pass
self.connection.register_event_listener(handler)
badevent = {
"foo": "bar"
}
self.assertRaises(ValueError,
self.connection.emit_event,
badevent)
def test_event_bad_callback(self):
# Check that if a callback raises an exception,
# it does not propagate back out of the
# 'emit_event' call
def handler(event):
raise Exception("Hit Me!")
self.connection.register_event_listener(handler)
event1 = virtevent.LifecycleEvent(
"cef19ce0-0ca2-11df-855d-b19fbce37686",
virtevent.EVENT_LIFECYCLE_STARTED)
self.connection.emit_event(event1)
class AbstractDriverTestCase(_VirtDriverTestCase, test.TestCase):
def setUp(self):
self.driver_module = "nova.virt.driver.ComputeDriver"
super(AbstractDriverTestCase, self).setUp()
class FakeConnectionTestCase(_VirtDriverTestCase, test.TestCase):
def setUp(self):
self.driver_module = 'nova.virt.fake.FakeDriver'
fake.set_nodes(['myhostname'])
super(FakeConnectionTestCase, self).setUp()
class LibvirtConnTestCase(_VirtDriverTestCase, test.TestCase):
def setUp(self):
# Point _VirtDriverTestCase at the right module
self.driver_module = 'nova.virt.libvirt.LibvirtDriver'
super(LibvirtConnTestCase, self).setUp()
def test_force_hard_reboot(self):
self.flags(libvirt_wait_soft_reboot_seconds=0)
self.test_reboot()
def test_migrate_disk_and_power_off(self):
# there is lack of fake stuff to execute this method. so pass.
self.skipTest("Test nothing, but this method"
" needed to override superclass.")
| TieWei/nova | nova/tests/virt/test_virt_drivers.py | Python | apache-2.0 | 27,963 |
import datetime
from django.db import models
from django.contrib.contenttypes.models import ContentType
from django.contrib.sites.models import Site
from django.contrib.auth.models import User
from django.utils.translation import ugettext_lazy as _
from django.conf import settings
MIN_PHOTO_DIMENSION = 5
MAX_PHOTO_DIMENSION = 1000
# Option codes for comment-form hidden fields.
PHOTOS_REQUIRED = 'pr'
PHOTOS_OPTIONAL = 'pa'
RATINGS_REQUIRED = 'rr'
RATINGS_OPTIONAL = 'ra'
IS_PUBLIC = 'ip'
# What users get if they don't have any karma.
DEFAULT_KARMA = 5
KARMA_NEEDED_BEFORE_DISPLAYED = 3
class CommentManager(models.Manager):
def get_security_hash(self, options, photo_options, rating_options, target):
"""
Returns the MD5 hash of the given options (a comma-separated string such as
'pa,ra') and target (something like 'lcom.eventtimes:5157'). Used to
validate that submitted form options have not been tampered-with.
"""
import md5
return md5.new(options + photo_options + rating_options + target + settings.SECRET_KEY).hexdigest()
def get_rating_options(self, rating_string):
"""
Given a rating_string, this returns a tuple of (rating_range, options).
>>> s = "scale:1-10|First_category|Second_category"
>>> Comment.objects.get_rating_options(s)
([1, 2, 3, 4, 5, 6, 7, 8, 9, 10], ['First category', 'Second category'])
"""
rating_range, options = rating_string.split('|', 1)
rating_range = range(int(rating_range[6:].split('-')[0]), int(rating_range[6:].split('-')[1])+1)
choices = [c.replace('_', ' ') for c in options.split('|')]
return rating_range, choices
def get_list_with_karma(self, **kwargs):
"""
Returns a list of Comment objects matching the given lookup terms, with
_karma_total_good and _karma_total_bad filled.
"""
extra_kwargs = {}
extra_kwargs.setdefault('select', {})
extra_kwargs['select']['_karma_total_good'] = 'SELECT COUNT(*) FROM comments_karmascore, comments_comment WHERE comments_karmascore.comment_id=comments_comment.id AND score=1'
extra_kwargs['select']['_karma_total_bad'] = 'SELECT COUNT(*) FROM comments_karmascore, comments_comment WHERE comments_karmascore.comment_id=comments_comment.id AND score=-1'
return self.filter(**kwargs).extra(**extra_kwargs)
def user_is_moderator(self, user):
if user.is_superuser:
return True
for g in user.groups.all():
if g.id == settings.COMMENTS_MODERATORS_GROUP:
return True
return False
class Comment(models.Model):
"""A comment by a registered user."""
user = models.ForeignKey(User)
content_type = models.ForeignKey(ContentType)
object_id = models.IntegerField(_('object ID'))
headline = models.CharField(_('headline'), max_length=255, blank=True)
comment = models.TextField(_('comment'), max_length=3000)
rating1 = models.PositiveSmallIntegerField(_('rating #1'), blank=True, null=True)
rating2 = models.PositiveSmallIntegerField(_('rating #2'), blank=True, null=True)
rating3 = models.PositiveSmallIntegerField(_('rating #3'), blank=True, null=True)
rating4 = models.PositiveSmallIntegerField(_('rating #4'), blank=True, null=True)
rating5 = models.PositiveSmallIntegerField(_('rating #5'), blank=True, null=True)
rating6 = models.PositiveSmallIntegerField(_('rating #6'), blank=True, null=True)
rating7 = models.PositiveSmallIntegerField(_('rating #7'), blank=True, null=True)
rating8 = models.PositiveSmallIntegerField(_('rating #8'), blank=True, null=True)
# This field designates whether to use this row's ratings in aggregate
# functions (summaries). We need this because people are allowed to post
# multiple reviews on the same thing, but the system will only use the
# latest one (with valid_rating=True) in tallying the reviews.
valid_rating = models.BooleanField(_('is valid rating'))
submit_date = models.DateTimeField(_('date/time submitted'), auto_now_add=True)
is_public = models.BooleanField(_('is public'))
ip_address = models.IPAddressField(_('IP address'), blank=True, null=True)
is_removed = models.BooleanField(_('is removed'), help_text=_('Check this box if the comment is inappropriate. A "This comment has been removed" message will be displayed instead.'))
site = models.ForeignKey(Site)
objects = CommentManager()
class Meta:
verbose_name = _('comment')
verbose_name_plural = _('comments')
ordering = ('-submit_date',)
def __unicode__(self):
return "%s: %s..." % (self.user.username, self.comment[:100])
def get_absolute_url(self):
try:
return self.get_content_object().get_absolute_url() + "#c" + str(self.id)
except AttributeError:
return ""
def get_crossdomain_url(self):
return "/r/%d/%d/" % (self.content_type_id, self.object_id)
def get_flag_url(self):
return "/comments/flag/%s/" % self.id
def get_deletion_url(self):
return "/comments/delete/%s/" % self.id
def get_content_object(self):
"""
Returns the object that this comment is a comment on. Returns None if
the object no longer exists.
"""
from django.core.exceptions import ObjectDoesNotExist
try:
return self.content_type.get_object_for_this_type(pk=self.object_id)
except ObjectDoesNotExist:
return None
get_content_object.short_description = _('Content object')
def _fill_karma_cache(self):
"""Helper function that populates good/bad karma caches."""
good, bad = 0, 0
for k in self.karmascore_set:
if k.score == -1:
bad +=1
elif k.score == 1:
good +=1
self._karma_total_good, self._karma_total_bad = good, bad
def get_good_karma_total(self):
if not hasattr(self, "_karma_total_good"):
self._fill_karma_cache()
return self._karma_total_good
def get_bad_karma_total(self):
if not hasattr(self, "_karma_total_bad"):
self._fill_karma_cache()
return self._karma_total_bad
def get_karma_total(self):
if not hasattr(self, "_karma_total_good") or not hasattr(self, "_karma_total_bad"):
self._fill_karma_cache()
return self._karma_total_good + self._karma_total_bad
def get_as_text(self):
return _('Posted by %(user)s at %(date)s\n\n%(comment)s\n\nhttp://%(domain)s%(url)s') % \
{'user': self.user.username, 'date': self.submit_date,
'comment': self.comment, 'domain': self.site.domain, 'url': self.get_absolute_url()}
class FreeComment(models.Model):
"""A comment by a non-registered user."""
content_type = models.ForeignKey(ContentType)
object_id = models.IntegerField(_('object ID'))
comment = models.TextField(_('comment'), max_length=3000)
person_name = models.CharField(_("person's name"), max_length=50)
submit_date = models.DateTimeField(_('date/time submitted'), auto_now_add=True)
is_public = models.BooleanField(_('is public'))
ip_address = models.IPAddressField(_('ip address'))
# TODO: Change this to is_removed, like Comment
approved = models.BooleanField(_('approved by staff'))
site = models.ForeignKey(Site)
class Meta:
verbose_name = _('free comment')
verbose_name_plural = _('free comments')
ordering = ('-submit_date',)
def __unicode__(self):
return "%s: %s..." % (self.person_name, self.comment[:100])
def get_absolute_url(self):
try:
return self.get_content_object().get_absolute_url() + "#c" + str(self.id)
except AttributeError:
return ""
def get_content_object(self):
"""
Returns the object that this comment is a comment on. Returns None if
the object no longer exists.
"""
from django.core.exceptions import ObjectDoesNotExist
try:
return self.content_type.get_object_for_this_type(pk=self.object_id)
except ObjectDoesNotExist:
return None
get_content_object.short_description = _('Content object')
class KarmaScoreManager(models.Manager):
def vote(self, user_id, comment_id, score):
try:
karma = self.get(comment__pk=comment_id, user__pk=user_id)
except self.model.DoesNotExist:
karma = self.model(None, user_id=user_id, comment_id=comment_id, score=score, scored_date=datetime.datetime.now())
karma.save()
else:
karma.score = score
karma.scored_date = datetime.datetime.now()
karma.save()
def get_pretty_score(self, score):
"""
Given a score between -1 and 1 (inclusive), returns the same score on a
scale between 1 and 10 (inclusive), as an integer.
"""
if score is None:
return DEFAULT_KARMA
return int(round((4.5 * score) + 5.5))
class KarmaScore(models.Model):
user = models.ForeignKey(User)
comment = models.ForeignKey(Comment)
score = models.SmallIntegerField(_('score'), db_index=True)
scored_date = models.DateTimeField(_('score date'), auto_now=True)
objects = KarmaScoreManager()
class Meta:
verbose_name = _('karma score')
verbose_name_plural = _('karma scores')
unique_together = (('user', 'comment'),)
def __unicode__(self):
return _("%(score)d rating by %(user)s") % {'score': self.score, 'user': self.user}
class UserFlagManager(models.Manager):
def flag(self, comment, user):
"""
Flags the given comment by the given user. If the comment has already
been flagged by the user, or it was a comment posted by the user,
nothing happens.
"""
if int(comment.user_id) == int(user.id):
return # A user can't flag his own comment. Fail silently.
try:
f = self.get(user__pk=user.id, comment__pk=comment.id)
except self.model.DoesNotExist:
from django.core.mail import mail_managers
f = self.model(None, user.id, comment.id, None)
message = _('This comment was flagged by %(user)s:\n\n%(text)s') % {'user': user.username, 'text': comment.get_as_text()}
mail_managers('Comment flagged', message, fail_silently=True)
f.save()
class UserFlag(models.Model):
user = models.ForeignKey(User)
comment = models.ForeignKey(Comment)
flag_date = models.DateTimeField(_('flag date'), auto_now_add=True)
objects = UserFlagManager()
class Meta:
verbose_name = _('user flag')
verbose_name_plural = _('user flags')
unique_together = (('user', 'comment'),)
def __unicode__(self):
return _("Flag by %r") % self.user
class ModeratorDeletion(models.Model):
user = models.ForeignKey(User, verbose_name='moderator')
comment = models.ForeignKey(Comment)
deletion_date = models.DateTimeField(_('deletion date'), auto_now_add=True)
class Meta:
verbose_name = _('moderator deletion')
verbose_name_plural = _('moderator deletions')
unique_together = (('user', 'comment'),)
def __unicode__(self):
return _("Moderator deletion by %r") % self.user
# Register the admin options for these models.
# TODO: Maybe this should live in a separate module admin.py, but how would we
# ensure that module was loaded?
from django.contrib import admin
class CommentAdmin(admin.ModelAdmin):
fieldsets = (
(None, {'fields': ('content_type', 'object_id', 'site')}),
('Content', {'fields': ('user', 'headline', 'comment')}),
('Ratings', {'fields': ('rating1', 'rating2', 'rating3', 'rating4', 'rating5', 'rating6', 'rating7', 'rating8', 'valid_rating')}),
('Meta', {'fields': ('is_public', 'is_removed', 'ip_address')}),
)
list_display = ('user', 'submit_date', 'content_type', 'get_content_object')
list_filter = ('submit_date',)
date_hierarchy = 'submit_date'
search_fields = ('comment', 'user__username')
raw_id_fields = ('user',)
class FreeCommentAdmin(admin.ModelAdmin):
fieldsets = (
(None, {'fields': ('content_type', 'object_id', 'site')}),
('Content', {'fields': ('person_name', 'comment')}),
('Meta', {'fields': ('is_public', 'ip_address', 'approved')}),
)
list_display = ('person_name', 'submit_date', 'content_type', 'get_content_object')
list_filter = ('submit_date',)
date_hierarchy = 'submit_date'
search_fields = ('comment', 'person_name')
admin.site.register(Comment, CommentAdmin)
admin.site.register(FreeComment, FreeCommentAdmin)
| rawwell/django | django/contrib/comments/models.py | Python | bsd-3-clause | 12,879 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (C) 2009-2014:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
# Gregory Starck, g.starck@gmail.com
# Hartmut Goebel, h.goebel@goebel-consult.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
import copy
from item import Item
from itemgroup import Itemgroup, Itemgroups
from shinken.property import BoolProp, IntegerProp, StringProp
from shinken.log import logger
# It change from hostgroup Class because there is no members
# properties, just the realm_members that we rewrite on it.
class Realm(Itemgroup):
id = 1 # zero is always a little bit special... like in database
my_type = 'realm'
properties = Itemgroup.properties.copy()
properties.update({
'id': IntegerProp(default=0, fill_brok=['full_status']),
'realm_name': StringProp(fill_brok=['full_status']),
'realm_members': StringProp(default=''), # No status_broker_name because it put hosts, not host_name
'higher_realms': StringProp(default=''),
'default': BoolProp(default='0'),
'broker_complete_links': BoolProp(default='0'),
#'alias': {'required': True, 'fill_brok': ['full_status']},
#'notes': {'required': False, 'default':'', 'fill_brok': ['full_status']},
#'notes_url': {'required': False, 'default':'', 'fill_brok': ['full_status']},
#'action_url': {'required': False, 'default':'', 'fill_brok': ['full_status']},
})
running_properties = Item.running_properties.copy()
running_properties.update({
'serialized_confs': StringProp(default={}),
})
macros = {
'REALMNAME': 'realm_name',
'REALMMEMBERS': 'members',
}
def get_name(self):
return self.realm_name
def get_realms(self):
return self.realm_members
def add_string_member(self, member):
self.realm_members += ',' + member
def get_realm_members(self):
if self.has('realm_members'):
return [r.strip() for r in self.realm_members.split(',')]
else:
return []
# Use to make python properties
# TODO: change itemgroup function pythonize?
def pythonize(self):
cls = self.__class__
for prop, tab in cls.properties.items():
try:
old_val = getattr(self, prop)
new_val = tab.pythonize(old_val)
#print "Changing ", old_val, "by", new_val
setattr(self, prop, new_val)
except AttributeError, exp:
pass # Will be catch at the is_correct moment
# We fillfull properties with template ones if need
# Because hostgroup we call may not have it's members
# we call get_hosts_by_explosion on it
def get_realms_by_explosion(self, realms):
# First we tag the hg so it will not be explode
# if a son of it already call it
self.already_explode = True
# Now the recursive part
# rec_tag is set to False every HG we explode
# so if True here, it must be a loop in HG
# calls... not GOOD!
if self.rec_tag:
err = "Error: we've got a loop in realm definition %s" % self.get_name()
self.configuration_errors.append(err)
if self.has('members'):
return self.members
else:
return ''
# Ok, not a loop, we tag it and continue
self.rec_tag = True
p_mbrs = self.get_realm_members()
for p_mbr in p_mbrs:
p = realms.find_by_name(p_mbr.strip())
if p is not None:
value = p.get_realms_by_explosion(realms)
if value is not None:
self.add_string_member(value)
if self.has('members'):
return self.members
else:
return ''
def get_all_subs_satellites_by_type(self, sat_type):
r = copy.copy(getattr(self, sat_type))
for p in self.realm_members:
tmps = p.get_all_subs_satellites_by_type(sat_type)
for s in tmps:
r.append(s)
return r
def count_reactionners(self):
self.nb_reactionners = 0
for reactionner in self.reactionners:
if not reactionner.spare:
self.nb_reactionners += 1
for realm in self.higher_realms:
for reactionner in realm.reactionners:
if not reactionner.spare and reactionner.manage_sub_realms:
self.nb_reactionners += 1
def count_pollers(self):
self.nb_pollers = 0
for poller in self.pollers:
if not poller.spare:
self.nb_pollers += 1
for realm in self.higher_realms:
for poller in realm.pollers:
if not poller.spare and poller.manage_sub_realms:
self.nb_pollers += 1
def count_brokers(self):
self.nb_brokers = 0
for broker in self.brokers:
if not broker.spare:
self.nb_brokers += 1
for realm in self.higher_realms:
for broker in realm.brokers:
if not broker.spare and broker.manage_sub_realms:
self.nb_brokers += 1
def count_receivers(self):
self.nb_receivers = 0
for receiver in self.receivers:
if not receiver.spare:
self.nb_receivers += 1
for realm in self.higher_realms:
for receiver in realm.receivers:
if not receiver.spare and receiver.manage_sub_realms:
self.nb_receivers += 1
# Return the list of satellites of a certain type
# like reactionner -> self.reactionners
def get_satellties_by_type(self, type):
if hasattr(self, type + 's'):
return getattr(self, type + 's')
else:
logger.debug("[realm] do not have this kind of satellites: %s", type)
return []
def fill_potential_satellites_by_type(self, sat_type):
setattr(self, 'potential_%s' % sat_type, [])
for satellite in getattr(self, sat_type):
getattr(self, 'potential_%s' % sat_type).append(satellite)
for realm in self.higher_realms:
for satellite in getattr(realm, sat_type):
if satellite.manage_sub_realms:
getattr(self, 'potential_%s' % sat_type).append(satellite)
# Return the list of potentials satellites of a certain type
# like reactionner -> self.potential_reactionners
def get_potential_satellites_by_type(self, type):
if hasattr(self, 'potential_' + type + 's'):
return getattr(self, 'potential_' + type + 's')
else:
logger.debug("[realm] do not have this kind of satellites: %s", type)
return []
# Return the list of potentials satellites of a certain type
# like reactionner -> self.nb_reactionners
def get_nb_of_must_have_satellites(self, type):
if hasattr(self, 'nb_' + type + 's'):
return getattr(self, 'nb_' + type + 's')
else:
logger.debug("[realm] do not have this kind of satellites: %s", type)
return 0
# Fill dict of realms for managing the satellites confs
def prepare_for_satellites_conf(self):
self.to_satellites = {}
self.to_satellites['reactionner'] = {}
self.to_satellites['poller'] = {}
self.to_satellites['broker'] = {}
self.to_satellites['receiver'] = {}
self.to_satellites_need_dispatch = {}
self.to_satellites_need_dispatch['reactionner'] = {}
self.to_satellites_need_dispatch['poller'] = {}
self.to_satellites_need_dispatch['broker'] = {}
self.to_satellites_need_dispatch['receiver'] = {}
self.to_satellites_managed_by = {}
self.to_satellites_managed_by['reactionner'] = {}
self.to_satellites_managed_by['poller'] = {}
self.to_satellites_managed_by['broker'] = {}
self.to_satellites_managed_by['receiver'] = {}
self.count_reactionners()
self.fill_potential_satellites_by_type('reactionners')
self.count_pollers()
self.fill_potential_satellites_by_type('pollers')
self.count_brokers()
self.fill_potential_satellites_by_type('brokers')
self.count_receivers()
self.fill_potential_satellites_by_type('receivers')
s = "%s: (in/potential) (schedulers:%d) (pollers:%d/%d) (reactionners:%d/%d) (brokers:%d/%d) (receivers:%d/%d)" % \
(self.get_name(),
len(self.schedulers),
self.nb_pollers, len(self.potential_pollers),
self.nb_reactionners, len(self.potential_reactionners),
self.nb_brokers, len(self.potential_brokers),
self.nb_receivers, len(self.potential_receivers)
)
logger.info(s)
# TODO: find a better name...
# TODO: and if he goes active?
def fill_broker_with_poller_reactionner_links(self, broker):
# First we create/void theses links
broker.cfg['pollers'] = {}
broker.cfg['reactionners'] = {}
broker.cfg['receivers'] = {}
# First our own level
for p in self.pollers:
cfg = p.give_satellite_cfg()
broker.cfg['pollers'][p.id] = cfg
for r in self.reactionners:
cfg = r.give_satellite_cfg()
broker.cfg['reactionners'][r.id] = cfg
for b in self.receivers:
cfg = b.give_satellite_cfg()
broker.cfg['receivers'][b.id] = cfg
# Then sub if we must to it
if broker.manage_sub_realms:
# Now pollers
for p in self.get_all_subs_satellites_by_type('pollers'):
cfg = p.give_satellite_cfg()
broker.cfg['pollers'][p.id] = cfg
# Now reactionners
for r in self.get_all_subs_satellites_by_type('reactionners'):
cfg = r.give_satellite_cfg()
broker.cfg['reactionners'][r.id] = cfg
# Now receivers
for r in self.get_all_subs_satellites_by_type('receivers'):
cfg = r.give_satellite_cfg()
broker.cfg['receivers'][r.id] = cfg
# Get a conf package of satellites links that can be useful for
# a scheduler
def get_satellites_links_for_scheduler(self):
cfg = {}
# First we create/void theses links
cfg['pollers'] = {}
cfg['reactionners'] = {}
# First our own level
for p in self.pollers:
c = p.give_satellite_cfg()
cfg['pollers'][p.id] = c
for r in self.reactionners:
c = r.give_satellite_cfg()
cfg['reactionners'][r.id] = c
#print "***** Preparing a satellites conf for a scheduler", cfg
return cfg
class Realms(Itemgroups):
name_property = "realm_name" # is used for finding hostgroups
inner_class = Realm
def get_members_by_name(self, pname):
realm = self.find_by_name(pname)
if realm is None:
return []
return realm.get_realms()
def linkify(self):
self.linkify_p_by_p()
# prepare list of satellites and confs
for p in self:
p.pollers = []
p.schedulers = []
p.reactionners = []
p.brokers = []
p.receivers = []
p.packs = []
p.confs = {}
# We just search for each realm the others realms
# and replace the name by the realm
def linkify_p_by_p(self):
for p in self.items.values():
mbrs = p.get_realm_members()
# The new member list, in id
new_mbrs = []
for mbr in mbrs:
new_mbr = self.find_by_name(mbr)
if new_mbr is not None:
new_mbrs.append(new_mbr)
# We find the id, we replace the names
p.realm_members = new_mbrs
# Now put higher realm in sub realms
# So after they can
for p in self.items.values():
p.higher_realms = []
for p in self.items.values():
for sub_p in p.realm_members:
sub_p.higher_realms.append(p)
# Use to fill members with hostgroup_members
def explode(self):
# We do not want a same hg to be explode again and again
# so we tag it
for tmp_p in self.items.values():
tmp_p.already_explode = False
for p in self:
if p.has('realm_members') and not p.already_explode:
# get_hosts_by_explosion is a recursive
# function, so we must tag hg so we do not loop
for tmp_p in self:
tmp_p.rec_tag = False
p.get_realms_by_explosion(self)
# We clean the tags
for tmp_p in self.items.values():
if hasattr(tmp_p, 'rec_tag'):
del tmp_p.rec_tag
del tmp_p.already_explode
def get_default(self):
for r in self:
if getattr(r, 'default', False):
return r
return None
def prepare_for_satellites_conf(self):
for r in self:
r.prepare_for_satellites_conf()
| h4wkmoon/shinken | shinken/objects/realm.py | Python | agpl-3.0 | 13,987 |
import math
from typing import List, Optional, Callable
import numpy as np
import stp.play as play
import stp.role as role
import stp.role.constraint as constraint
import stp.role.cost as cost
import stp.skill as skill
import stp.tactic as tactic
import stp.testing as testing
from stp import action as action
from stp.rc import Ball, Robot, WorldState
import stp.rc as rc
from stp.role import Priority
from stp.role.assignment import FlatRoleRequests, RoleId
from stp.role.assignment.naive import NaiveRoleAssignment, SortedRequests
class AssignCostFn(role.CostFn):
def __call__(
self,
robot: rc.Robot,
prev_result: Optional[role.RoleResult],
world_state: rc.WorldState,
) -> float:
return -1
def unassigned_cost_fn(
self,
prev_result: Optional[role.RoleResult],
world_state: rc.WorldState,
) -> float:
# TODO: Implement real unassigned cost function
return 9999
class UnassignCostFn(role.CostFn):
def __call__(
self,
robot: rc.Robot,
prev_result: Optional[role.RoleResult],
world_state: rc.WorldState,
) -> float:
return 9999
def unassigned_cost_fn(
self,
prev_result: Optional[role.RoleResult],
world_state: rc.WorldState,
) -> float:
# TODO: Implement real unassigned cost function
return -1
class TestCostFn(role.CostFn):
def __init__(
self,
fn: Callable[[rc.Robot, Optional[role.RoleResult], rc.WorldState], float],
) -> None:
self.fn = fn
def __call__(
self,
robot: rc.Robot,
prev_result: Optional[role.RoleResult],
world_state: rc.WorldState,
) -> float:
return self.fn(robot, prev_result, world_state)
def unassigned_cost_fn(
self, prev_result: Optional[role.RoleResult], world_state: rc.WorldState
) -> float:
return 9999
class SkillBase(skill.ISkill):
def define(self):
pass
def tick(self):
pass
def create_request(self) -> role.RoleRequest:
assign_cost_fn = AssignCostFn()
return role.RoleRequest(Priority.LOW, required=True, cost_fn=assign_cost_fn)
def __repr__(self) -> str:
return "<{} object>".format(self.__class__.__name__)
class SkillA(SkillBase):
...
class SkillB(SkillBase):
...
class SkillC(SkillBase):
...
class BallSkill(SkillBase):
...
class Skills(tactic.SkillsEnum):
A1 = tactic.SkillEntry(SkillA)
A2 = tactic.SkillEntry(SkillA)
B1 = tactic.SkillEntry(SkillB)
B2 = tactic.SkillEntry(SkillB)
C1 = tactic.SkillEntry(SkillC)
C2 = tactic.SkillEntry(SkillC)
BALL_SKILL = tactic.SkillEntry(BallSkill)
class TacticBase(tactic.ITactic[None]):
def __init__(self, ctx: tactic.Ctx):
self.skills = Skills(ctx.skill_factory)
self.A1 = self.skills.A1
self.A2 = self.skills.A2
self.B1 = self.skills.B1
self.B2 = self.skills.B2
self.C1 = self.skills.C1
self.C2 = self.skills.C2
self.BALL_SKILL = self.skills.BALL_SKILL
def compute_props(self, prev_props: None) -> None:
return None
def tick(
self, role_results: tactic.RoleResults, props: None
) -> List[action.IAction]:
# Dummy tick function doesn't return any actions.
return []
def get_requests(self, world_state: WorldState, props: None) -> tactic.RoleRequests:
role_requests: tactic.RoleRequests = {
self.A1: [self.A1.skill.create_request().with_priority(Priority.LOW)],
self.A2: [self.A2.skill.create_request().with_priority(Priority.MEDIUM)],
self.B1: [self.B1.skill.create_request().with_priority(Priority.MEDIUM)],
self.B2: [self.B2.skill.create_request().with_priority(Priority.HIGH)],
self.C1: [self.C1.skill.create_request().with_priority(Priority.LOW)],
self.C2: [self.C2.skill.create_request().with_priority(Priority.MEDIUM)],
self.BALL_SKILL: [
self.BALL_SKILL.skill.create_request()
.with_priority(Priority.HIGH)
.with_constraint_fn(constraint.has_ball())
],
}
return role_requests
def create_request(self):
pass
def get_simple_role_ids() -> List[RoleId]:
"""Creates and returns a list of role ids with skills SkillA, SkillB and SkillC for
TacticBase.
:return: List of role ids with skills SkillA, SkillB and SkillC for TacticBase.
"""
skill_entries = [
tactic.SkillEntry(SkillA),
tactic.SkillEntry(SkillB),
tactic.SkillEntry(SkillC),
tactic.SkillEntry(BallSkill),
]
skill_instances = [SkillA(), SkillB(), SkillC(), BallSkill()]
for idx, (skill_entry, skill_instance) in enumerate(
zip(skill_entries, skill_instances)
):
skill_entry.set_idx(idx)
skill_entry.skill = skill_instance
return [(TacticBase, skill_entry, 0) for skill_entry in skill_entries]
def test_get_sorted_requests_simple():
"""Manually create a Requests and check that get_sorted_requests returns a list of
three dictionaries, one for each priority level.
"""
role_id_a, role_id_b, role_id_c, role_id_ball = get_simple_role_ids()
switch_cost = 0.0
constant_cost = cost.constant(0.5, switch_cost)
requests: FlatRoleRequests = {
role_id_a: role.RoleRequest(
Priority.HIGH, required=True, cost_fn=constant_cost
),
role_id_b: role.RoleRequest(Priority.LOW, required=True, cost_fn=constant_cost),
role_id_c: role.RoleRequest(
Priority.MEDIUM, required=True, cost_fn=constant_cost
),
}
# Get the sorted requests.
sorted_requests = NaiveRoleAssignment.get_sorted_requests(requests)
# Check that the lengths of the sorted_requests is correct.
assert len(sorted_requests) == 3
assert len(sorted_requests[0]) == 1
assert len(sorted_requests[1]) == 1
assert len(sorted_requests[2]) == 1
# Check that A is in high priority, B is in low priority, C is in medium priority.
assert role_id_a in sorted_requests[Priority.HIGH]
assert role_id_b in sorted_requests[Priority.LOW]
assert role_id_c in sorted_requests[Priority.MEDIUM]
# Check that each of the role requests are equal.
assert sorted_requests[Priority.LOW][role_id_b] == requests[role_id_b]
assert sorted_requests[Priority.MEDIUM][role_id_c] == requests[role_id_c]
assert sorted_requests[Priority.HIGH][role_id_a] == requests[role_id_a]
def get_tactic_ctx() -> tactic.Ctx:
"""Creates a simple tactic context for convenience.
:return: Tactic context containing SkillA, SkillB and SkillC.
"""
skill_registry = skill.Registry()
skill_registry[SkillA] = SkillA()
skill_registry[SkillB] = SkillB()
skill_registry[SkillC] = SkillC()
skill_registry[BallSkill] = BallSkill()
skill_factory = skill.Factory(skill_registry)
return tactic.Ctx(skill_factory)
def test_get_sorted_requests_multiple() -> None:
"""Tests get_sorted_requests with a more complicated example."""
tactic_ctx = get_tactic_ctx()
tactic_instance = TacticBase(tactic_ctx)
world_state: WorldState = testing.generate_test_worldstate()
requests: play.RoleRequests = {
TacticBase: tactic_instance.get_requests(world_state, None)
}
flat_requests: FlatRoleRequests = play.flatten_requests(requests)
# Flat requests contains:
# A1: LOW, A2: MEDIUM
# B1: MEDIUM, B2: HIGH,
# C1: LOW, C2: MEDIUM
sorted_requests: SortedRequests = NaiveRoleAssignment.get_sorted_requests(
flat_requests
)
# Check the lengths of each dictionary.
low_tactics = [tactic_instance.A1, tactic_instance.C1]
med_tactics = [tactic_instance.A2, tactic_instance.B1, tactic_instance.C2]
hi_tactics = [tactic_instance.B2, tactic_instance.BALL_SKILL]
assert len(sorted_requests) == 3
assert len(sorted_requests[Priority.LOW]) == len(low_tactics)
assert len(sorted_requests[Priority.MEDIUM]) == len(med_tactics)
assert len(sorted_requests[Priority.HIGH]) == len(hi_tactics)
for low_tactic in low_tactics:
assert (TacticBase, low_tactic, 0) in sorted_requests[Priority.LOW]
assert (
sorted_requests[Priority.LOW][TacticBase, low_tactic, 0]
== requests[TacticBase][low_tactic][0]
)
for med_tactic in med_tactics:
assert (TacticBase, med_tactic, 0) in sorted_requests[Priority.MEDIUM]
assert (
sorted_requests[Priority.MEDIUM][TacticBase, med_tactic, 0]
== requests[TacticBase][med_tactic][0]
)
for hi_tactic in hi_tactics:
assert (TacticBase, hi_tactic, 0) in sorted_requests[Priority.HIGH]
assert (
sorted_requests[Priority.HIGH][TacticBase, hi_tactic, 0]
== requests[TacticBase][hi_tactic][0]
)
def test_compute_costs_matrix() -> None:
"""Tests the compute_costs_matrix function.
Costs:
A: Dist to (0, 0)
B: Dist to (1, 1)
C: Dist to (2, 2)
"""
# Get the three roles.
role_id_a, role_id_b, role_id_c, role_id_ball = get_simple_role_ids()
# Create the cost functions.
switch_cost = 0.0
cost_a = cost.distance_to_pt(np.array([0, 0]), math.sqrt(8), switch_cost)
cost_b = cost.distance_to_pt(np.array([1, 1]), math.sqrt(8), switch_cost)
cost_c = cost.distance_to_pt(np.array([2, 2]), math.sqrt(8), switch_cost)
# Create CostFns
cost_fn_a = TestCostFn(cost_a)
cost_fn_b = TestCostFn(cost_b)
cost_fn_c = TestCostFn(cost_c)
# Create the requests of same priority.
requests: FlatRoleRequests = {
role_id_a: role.RoleRequest(Priority.LOW, required=True, cost_fn=cost_fn_a),
role_id_b: role.RoleRequest(Priority.LOW, required=True, cost_fn=cost_fn_b),
role_id_c: role.RoleRequest(Priority.LOW, required=True, cost_fn=cost_fn_c),
}
# Create the robots at (0, 0), (1, 1), (2, 2), (3, 3)
free_robots = np.array(
[
testing.generate_test_robot(robot_id=1, pose=np.array([0, 0, 0])),
testing.generate_test_robot(robot_id=2, pose=np.array([1, 1, 0])),
testing.generate_test_robot(robot_id=3, pose=np.array([2, 2, 0])),
testing.generate_test_robot(robot_id=4, pose=np.array([3, 3, 0])),
]
)
# Construct the world state.
out_robots: List[Robot] = list(free_robots)
their_robots: List[Robot] = []
ball: Ball = testing.generate_test_ball()
world_state: WorldState = testing.generate_test_worldstate(our_robots=out_robots)
prev_results = {}
# Compute the cost matrix.
costs_matrix: np.ndarray = NaiveRoleAssignment.compute_costs_matrix(
free_robots, requests, world_state, prev_results
)
# Check that the cost matrix is of the right size, ie. (num_robots, num_requests).
assert costs_matrix.shape == (5, 3)
# fmt: off
expected_costs_matrix = np.array(
[[0.0, math.sqrt(2), math.sqrt(8)],
[math.sqrt(2), 0.0, math.sqrt(2)],
[math.sqrt(8), math.sqrt(2), 0.0],
[math.sqrt(8), math.sqrt(8), math.sqrt(2)],
[9999, 9999, 9999]]
)
# fmt: on
# costs_matrix should be equal to expected_costs_matrix.
assert np.allclose(costs_matrix, expected_costs_matrix)
def test_assign_prioritized_roles() -> None:
"""Tests that for the role requests and free robots above that role assignment
returns the expected result.
"""
# Get the three roles.
role_id_a, role_id_b, role_id_c, role_id_ball = get_simple_role_ids()
# Create the cost functions.
switch_cost = 0.0
cost_a = cost.distance_to_pt(np.array([0, 0]), math.sqrt(8), switch_cost)
cost_b = cost.distance_to_pt(np.array([1, 1]), math.sqrt(8), switch_cost)
cost_c = cost.distance_to_pt(np.array([2, 2]), math.sqrt(8), switch_cost)
# Create CostFns
cost_fn_a = TestCostFn(cost_a)
cost_fn_b = TestCostFn(cost_b)
cost_fn_c = TestCostFn(cost_c)
# Create the requests of same priority.
requests: FlatRoleRequests = {
role_id_a: role.RoleRequest(Priority.LOW, required=True, cost_fn=cost_fn_a),
role_id_b: role.RoleRequest(Priority.LOW, required=True, cost_fn=cost_fn_b),
role_id_c: role.RoleRequest(Priority.LOW, required=True, cost_fn=cost_fn_c),
}
# Create the robots at (0, 0), (1, 1), (2, 2), (3, 3)
free_robots = np.array(
[
testing.generate_test_robot(robot_id=0, pose=np.array([0, 0, 0])),
testing.generate_test_robot(robot_id=0, pose=np.array([1, 1, 0])),
testing.generate_test_robot(robot_id=0, pose=np.array([2, 2, 0])),
testing.generate_test_robot(robot_id=0, pose=np.array([3, 3, 0])),
]
)
# Construct the world state.
our_bots: List[Robot] = list(free_robots)
their_bots: List[Robot] = []
world_state: WorldState = testing.generate_test_worldstate(
our_robots=our_bots, their_robots=their_bots
)
# Assign the roles.
results, new_free_robots = NaiveRoleAssignment.assign_prioritized_roles(
requests, world_state, free_robots, {}
)
# Check that the the three role ids are assigned.
assert len(results) == 3
assert role_id_a in results
assert role_id_b in results
assert role_id_c in results
# Check that A->0, B->1, C->2.
assert results[role_id_a].role.robot == free_robots[0]
assert results[role_id_b].role.robot == free_robots[1]
assert results[role_id_c].role.robot == free_robots[2]
# Check that the costs for each role result are 0.
assert results[role_id_a].cost == 0.0
assert results[role_id_b].cost == 0.0
assert results[role_id_c].cost == 0.0
# Check that new_free_robots is length 1 and contains free_robots[3].
assert len(new_free_robots) == 1
assert new_free_robots[0] == free_robots[3]
def test_assign_roles() -> None:
"""Tests that NaiveRoleAssignment.assign_roles assigns HIGH, then MEDIUM, then LOW
priority. This is tested by having a MEDIUM priority role request that has a lower
cost than a HIGH priority role request, and expecting that the HIGH role request is
fulfilled first.
"""
# Get the three roles.
role_id_a, role_id_b, role_id_c, role_id_ball = get_simple_role_ids()
# Create the cost functions.
switch_cost = 0.0
cost_a = cost.distance_to_pt(np.array([0, 0]), math.sqrt(8), switch_cost)
cost_b = cost.distance_to_pt(np.array([1, 1]), math.sqrt(8), switch_cost)
cost_c = cost.distance_to_pt(np.array([2, 2]), math.sqrt(8), switch_cost)
# Create CosFns
cost_fn_a = TestCostFn(cost_a)
cost_fn_b = TestCostFn(cost_b)
cost_fn_c = TestCostFn(cost_c)
# Create the requests in descending priority.
requests: FlatRoleRequests = {
role_id_a: role.RoleRequest(Priority.HIGH, required=True, cost_fn=cost_fn_a),
role_id_b: role.RoleRequest(Priority.MEDIUM, required=True, cost_fn=cost_fn_b),
role_id_c: role.RoleRequest(Priority.LOW, required=True, cost_fn=cost_fn_c),
}
# Create the robots at (1, 1), (2, 2), (3, 3), (4, 4).
free_robots = np.array(
[
testing.generate_test_robot(robot_id=0, pose=np.array([1, 1, 0])),
testing.generate_test_robot(robot_id=1, pose=np.array([2, 2, 0])),
testing.generate_test_robot(robot_id=2, pose=np.array([3, 3, 0])),
testing.generate_test_robot(robot_id=3, pose=np.array([4, 4, 0])),
]
)
# Construct the world state.
out_robots: List[Robot] = list(free_robots)
their_robots: List[Robot] = []
world_state: WorldState = testing.generate_test_worldstate(
our_robots=out_robots, their_robots=their_robots
)
# Assign the roles.
results = NaiveRoleAssignment.assign_roles(requests, world_state, {})
# Check that the the three role ids are assigned.
assert len(results) == 3
assert role_id_a in results
assert role_id_b in results
assert role_id_c in results
# Check that A->0, B->1, C->2, even though A has a higher cost than B for robot 0.
assert results[role_id_a].role.robot == free_robots[0]
assert results[role_id_b].role.robot == free_robots[1]
assert results[role_id_c].role.robot == free_robots[2]
# Check that the costs for each role result are sqrt(2).
assert results[role_id_a].cost == math.sqrt(2)
assert results[role_id_b].cost == math.sqrt(2)
assert results[role_id_c].cost == math.sqrt(2)
def test_assign_roles_constrained() -> None:
"""Tests that NaiveRoleAssignment.assign_roles respects constraints, ie. even though
role_id_a and role_id_ball both are HIGH priority, the robot at (0, 0) has the ball
and thus is assigned BALL_SKILL.
This test will fail as has_ball has been removed from Robot thus breaking the ball constraint
"""
# Create role assigner
role_assigner = NaiveRoleAssignment()
# Get the four roles.
role_id_a, role_id_b, role_id_c, role_id_ball = get_simple_role_ids()
# Create the cost functions.
switch_cost = 0.0
cost_a = cost.distance_to_pt(np.array([0, 0]), math.sqrt(8), switch_cost)
cost_b = cost.distance_to_pt(np.array([1, 1]), math.sqrt(8), switch_cost)
cost_c = cost.distance_to_pt(np.array([2, 2]), math.sqrt(8), switch_cost)
cost_ball = cost.distance_to_pt(np.array([2, 2]), math.sqrt(8), switch_cost)
# Create CosFns
cost_fn_a = TestCostFn(cost_a)
cost_fn_b = TestCostFn(cost_b)
cost_fn_c = TestCostFn(cost_c)
cost_fn_ball = TestCostFn(cost_ball)
# Create the requests in descending priority.
requests: FlatRoleRequests = {
role_id_a: role.RoleRequest(Priority.HIGH, required=False, cost_fn=cost_fn_a),
role_id_ball: role.RoleRequest(
Priority.HIGH,
required=False,
cost_fn=cost_fn_ball,
constraint_fn=constraint.has_ball(),
),
role_id_b: role.RoleRequest(Priority.MEDIUM, required=False, cost_fn=cost_fn_b),
role_id_c: role.RoleRequest(Priority.LOW, required=False, cost_fn=cost_fn_c),
}
# Create the robots at (0, 0) (1, 1) and (2, 2)
free_robots = np.array(
[
testing.generate_test_robot(
robot_id=0, pose=np.array([0, 0, 0]), has_ball_sense=True
),
testing.generate_test_robot(robot_id=1, pose=np.array([1, 1, 0])),
testing.generate_test_robot(robot_id=2, pose=np.array([2, 2, 0])),
]
)
# Construct the world state.
out_robots: List[Robot] = list(free_robots)
their_robots: List[Robot] = []
world_state: WorldState = testing.generate_test_worldstate(
our_robots=out_robots, their_robots=their_robots
)
# Assign the roles.
results = NaiveRoleAssignment.assign_roles(requests, world_state, {})
# Check that all roles have been assigned.
assert len(results) == 4
assert role_id_a in results
assert role_id_ball in results
assert role_id_b in results
assert role_id_c in results
# Check that A->1, BALL->0, B->2 even though A has a lower cost than BALL for 0.
assert results[role_id_a].role.robot == free_robots[1]
assert results[role_id_ball].role.robot == free_robots[0]
assert results[role_id_b].role.robot == free_robots[2]
# Check that C's role request is unfilled due to being low priority.
assert not results[role_id_c].is_filled()
def test_unassigned_role() -> None:
# Get the four roles.
role_id_a, role_id_b, role_id_c, role_id_d = get_simple_role_ids()
# Create role assigner
role_assigner = NaiveRoleAssignment()
assign_cost_fn = AssignCostFn()
unassigned_cost_fn = UnassignCostFn()
requests: FlatRoleRequests = {
role_id_a: role.RoleRequest(
Priority.HIGH, required=False, cost_fn=assign_cost_fn
),
role_id_b: role.RoleRequest(
Priority.HIGH,
required=False,
cost_fn=assign_cost_fn,
constraint_fn=constraint.has_ball(),
),
role_id_c: role.RoleRequest(
Priority.HIGH, required=False, cost_fn=assign_cost_fn
),
role_id_d: role.RoleRequest(
Priority.HIGH, required=False, cost_fn=unassigned_cost_fn
),
}
# Create the robots at (0, 0) (1, 1) and (2, 2)
free_robots = np.array(
[
testing.generate_test_robot(
robot_id=0, pose=np.array([0, 0, 0]), has_ball_sense=True
),
testing.generate_test_robot(robot_id=1, pose=np.array([1, 1, 0])),
testing.generate_test_robot(robot_id=2, pose=np.array([2, 2, 0])),
testing.generate_test_robot(robot_id=3, pose=np.array([2, 1, 0])),
testing.generate_test_robot(robot_id=4, pose=np.array([1, 2, 0])),
]
)
# Construct the world state.
out_robots: List[Robot] = list(free_robots)
their_robots: List[Robot] = []
world_state: WorldState = testing.generate_test_worldstate(
our_robots=out_robots, their_robots=their_robots
)
# Assign the roles.
results = role_assigner.assign_roles(requests, world_state, {})
# Check that all roles are returned in results.
assert len(results) == 4
assert role_id_a in results
assert role_id_b in results
assert role_id_c in results
assert role_id_d in results
# Check that D's role request is unfilled due to unassigned cost function.
assert not results[role_id_d].is_filled()
def test_unassigned_roles() -> None:
# Get the four roles.
role_id_a, role_id_b, role_id_c, role_id_d = get_simple_role_ids()
# Create role assigner
role_assigner = NaiveRoleAssignment()
# Create cost functions
assign_cost_fn = AssignCostFn()
unassigned_cost_fn = UnassignCostFn()
# Create role requests
requests: FlatRoleRequests = {
role_id_a: role.RoleRequest(
Priority.HIGH, required=False, cost_fn=unassigned_cost_fn
),
role_id_b: role.RoleRequest(
Priority.HIGH,
required=False,
cost_fn=unassigned_cost_fn,
constraint_fn=constraint.has_ball(),
),
role_id_c: role.RoleRequest(
Priority.HIGH, required=False, cost_fn=unassigned_cost_fn
),
role_id_d: role.RoleRequest(
Priority.HIGH, required=False, cost_fn=unassigned_cost_fn
),
}
# Create the robots at (0, 0) (1, 1) and (2, 2)
free_robots = np.array(
[
testing.generate_test_robot(
robot_id=0, pose=np.array([0, 0, 0]), has_ball_sense=True
),
testing.generate_test_robot(robot_id=1, pose=np.array([1, 1, 0])),
testing.generate_test_robot(robot_id=2, pose=np.array([2, 2, 0])),
testing.generate_test_robot(robot_id=3, pose=np.array([2, 1, 0])),
testing.generate_test_robot(robot_id=4, pose=np.array([1, 2, 0])),
]
)
# Construct the world state.
out_robots: List[Robot] = list(free_robots)
their_robots: List[Robot] = []
world_state: WorldState = testing.generate_test_worldstate(
our_robots=out_robots, their_robots=their_robots
)
# Assign the roles.
results = role_assigner.assign_roles(requests, world_state, {})
# Check that all roles are returned in results.
assert len(results) == 4
assert role_id_a in results
assert role_id_b in results
assert role_id_c in results
assert role_id_d in results
# Check that no role requests are filled due to the cost function.
assert not results[role_id_a].is_filled()
assert not results[role_id_b].is_filled()
assert not results[role_id_c].is_filled()
assert not results[role_id_d].is_filled()
| RoboJackets/robocup-software | rj_gameplay/tests/stp/role/test_naive_assignment.py | Python | apache-2.0 | 24,116 |
#!/usr/bin/env python3
import json
import os
import unittest
from npoapi import Pages
from npoapi.data.api import PagesForm, PagesSearchType, TextMatcherListType, TextMatcherType
ENV = "acc"
CONFIG_DIR=os.path.dirname(os.path.dirname(__file__))
DEBUG=False
class PagesTest(unittest.TestCase):
def test_search(self):
client = self.get_client()
form = PagesForm()
form.searches = PagesSearchType()
form.searches.types = TextMatcherListType()
matcher = TextMatcherType()
matcher.value = "HOME"
form.searches.types.matcher = [matcher]
result = client.search(form = form, profile = "vpro")
result = json.JSONDecoder().decode(result)
def get_client(self):
print(os.path.dirname(__file__))
return Pages().configured_login(config_dir=CONFIG_DIR).env(ENV).debug(DEBUG)
| npo-poms/pyapi | tests/integration/npoapi_pages_test.py | Python | gpl-3.0 | 864 |
# coding=utf-8
'''
###########################################
思路:
第一步:实现数据报欺骗:我需要不断的向公网广播自己的arp应答包,把应答包中的目的地址的Mac发过去
arp_attack.py 使用一个进程去完成 可以开一个进程去做
第二步:实现对欺骗过来的数据报文的处理:对于所有流入网卡的数据包,
检查源地址和目的地址,不匹配ip则进行发送
#############################################
'''
import socket
import multi_process_test
from ARP_attack import arp_attack
from packet_investigate import main_start
import argparse
import os
from multiprocessing import Process
from utils import start_threading
if __name__ == '__main__':
start_threading(arp_attack, ())
start_threading(main_start, ())
'''
p_1 = Process(target=arp_attack())
p_1.start()
p_2 = Process(target=main_start())
p_2.start()
'''
| Great-Li-Xin/PythonDev | ViolentPython/FKTCT/macOS&&LinuxPlatform/start.py | Python | mit | 926 |
#!/usr/bin/env python3
import os
import sys
import argparse
import ctypes
import pandas
# These are hardcoded from fsti_type in fsti-defs.h
LONG = 8
DBL = 13
STR = 15
# These are defined in fsti-pythoncalls.c
NEW = 0
REPLACE = 1
NEW_REPLACE = 2
class Value(ctypes.Union):
_fields_ = [
("longint", ctypes.c_long,),
("dbl", ctypes.c_double,),
("string", ctypes.c_char_p),
]
class Variant(ctypes.Structure):
_fields_ = [
("value", Value,),
("field_type", ctypes.c_int),
]
class Config:
def __init__(self, dll, simset, filename=None):
self.dll = dll
self._parameters = {}
self._simset = simset
if filename:
self.load(filename)
def load(self, filename):
load_config_file = self.dll.fsti_simset_load_config_file
load_config_file.argtypes = [ctypes.c_void_p, ctypes.c_char_p,]
load_config_file(self._simset, filename.encode('utf-8'))
class Simulation:
def __init__(self, dll, address):
self.dll = dll
self._address = address
def get_id(self):
if self._address:
self.dll.fsti_py_simulation_id.argtypes = [ctypes.c_void_p, ]
return self.dll.fsti_py_simulation_id(self._address)
else:
return -1
def get_parm(self, key, index=0):
self.dll.fsti_py_config_get.argtypes = [ctypes.c_void_p,
ctypes.c_char_p,
ctypes.c_size_t,]
self.dll.fsti_py_config_get.restype = ctypes.POINTER(Variant)
var = self.dll.fsti_py_config_get(self._address,
key.encode('utf-8'),
index)[0]
# These types are encoded in fsti_type in fsti_defs.h
if var.field_type == LONG:
return var.value.longint
elif var.field_type == DBL:
return var.value.dbl
elif var.field_type == STR:
return var.value.string.decode("utf-8")
def set_parm(self, key, values):
self.dll.fsti_py_config_set.argtypes = [ctypes.c_void_p,
ctypes.c_char_p,
ctypes.c_size_t,
ctypes.c_char_p,
ctypes.c_int,
ctypes.c_char_p,]
vals = ";".join(str(i) for i in values).encode("utf-8")
self.dll.fsti_py_config_set(self._address,
key.encode("utf-8"),
len(values),
vals,
REPLACE,
"".encode("utf-8"))
def run(self):
self.dll.fsti_py_simulations_exec.argtypes = \
[ctypes.c_int, ctypes.c_void_p * n]
arr = (ctypes.c_void_p * 1)([self._id,])
self.dll.fsti_py_simulations_exec(1, arr)
self._address = arr[0]
def address(self):
return self._address
def is_alive(self):
if self._address:
return True
else:
return False
def set_freed(self):
self._address = 0
def __del__(self):
if self._address:
self.dll.fsti_py_simulation_free.argtypes = [ctypes.c_void_p, ]
self.dll.fsti_py_simulation_free(self._address)
class SimulationSet:
def __init__(self, config_filename=None, libname="libfaststi.so",
datapath=None):
if datapath:
os.environ["FSTI_DATA"] = datapath
self.dll = ctypes.CDLL(libname)
self.dataset = None
self._simset = self.dll.fsti_py_simset_init()
self.config = Config(self.dll, self._simset, config_filename)
self.dll.fsti_py_simulations_get.argtypes = [ctypes.c_void_p,]
self.dll.fsti_py_simulations_get.restype = \
ctypes.POINTER(ctypes.c_void_p)
sim_arr = self.dll.fsti_py_simulations_get(self._simset)
self.simulations = [Simulation(self.dll, sim_arr[i])
for i in range(1, sim_arr[0] + 1)]
global lines
def results_as_dataframe(self,
filename="results.csv",
sort=True,
sep=";"):
results = open(filename)
lines = results.readlines()
results.close()
header = "name" + sep
if sort:
lines.sort()
if "header" in lines[-1]:
lines.insert(0, lines[-1])
lines.pop()
if header not in lines[0]:
lines.insert(0,"name;sim;num;date;description;value\n")
results = open(filename, "w")
results.writelines(lines)
results.close()
return pandas.read_csv(filename, sep)
def run(self, lo=0, hi=None):
if hi == None:
hi = len(self.simulations)
n = hi - lo
if n > 0 and self.simulations[lo].is_alive():
sep = self.simulations[lo].get_parm("csv_delimiter")
filename = self.simulations[lo].get_parm("results_file")
self.dll.fsti_py_simulations_exec.argtypes = \
[ctypes.c_int, ctypes.c_void_p * n]
addresses = [s.address() for s in self.simulations[lo:hi]]
arr = (ctypes.c_void_p * n)(*addresses)
self.dll.fsti_py_simulations_exec(n, arr)
for i in range(lo,hi):
self.simulations[i].set_freed()
if filename:
self.dataset = self.results_as_dataframe(filename, True, sep)
return self.dataset
def __del__(self):
self.dll.fsti_py_simset_free(self._simset)
def main():
parser = argparse.ArgumentParser(description="Run simulations of "
"sexually transmitted infection "
"epidemics.")
parser.add_argument('--config', '-c', required=True,
help='Simulation set configuration file')
parser.add_argument('--data_path', '-d',
help='Path to data files')
args = parser.parse_args()
filename = args.config
data_path = args.data_path
simset = SimulationSet(config_filename=filename, datapath=data_path)
simset.run()
if __name__ == '__main__':
main()
| nathangeffen/faststi | scripts/faststi.py | Python | gpl-3.0 | 6,539 |
# Microsoft Azure Linux Agent
#
# Copyright 2014 Microsoft Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Requires Python 2.4+ and Openssl 1.0+
import crypt
import random
import string
import struct
import xml.dom.minidom as minidom
import sys
from distutils.version import LooseVersion
def parse_doc(xml_text):
"""
Parse xml document from string
"""
#The minidom lib has some issue with unicode in python2.
#Encode the string into utf-8 first
xml_text = xml_text.encode('utf-8')
return minidom.parseString(xml_text)
def findall(root, tag, namespace=None):
"""
Get all nodes by tag and namespace under Node root.
"""
if root is None:
return []
if namespace is None:
return root.getElementsByTagName(tag)
else:
return root.getElementsByTagNameNS(namespace, tag)
def find(root, tag, namespace=None):
"""
Get first node by tag and namespace under Node root.
"""
nodes = findall(root, tag, namespace=namespace)
if nodes is not None and len(nodes) >= 1:
return nodes[0]
else:
return None
def gettext(node):
"""
Get node text
"""
if node is None:
return None
for child in node.childNodes:
if child.nodeType == child.TEXT_NODE:
return child.data
return None
def findtext(root, tag, namespace=None):
"""
Get text of node by tag and namespace under Node root.
"""
node = find(root, tag, namespace=namespace)
return gettext(node)
def getattrib(node, attr_name):
"""
Get attribute of xml node
"""
if node is not None:
return node.getAttribute(attr_name)
else:
return None
def unpack(buf, offset, range):
"""
Unpack bytes into python values.
"""
result = 0
for i in range:
result = (result << 8) | str_to_ord(buf[offset + i])
return result
def unpack_little_endian(buf, offset, length):
"""
Unpack little endian bytes into python values.
"""
return unpack(buf, offset, list(range(length - 1, -1, -1)))
def unpack_big_endian(buf, offset, length):
"""
Unpack big endian bytes into python values.
"""
return unpack(buf, offset, list(range(0, length)))
def hex_dump3(buf, offset, length):
"""
Dump range of buf in formatted hex.
"""
return ''.join(['%02X' % str_to_ord(char) for char in buf[offset:offset + length]])
def hex_dump2(buf):
"""
Dump buf in formatted hex.
"""
return hex_dump3(buf, 0, len(buf))
def is_in_range(a, low, high):
"""
Return True if 'a' in 'low' <= a >= 'high'
"""
return (a >= low and a <= high)
def is_printable(ch):
"""
Return True if character is displayable.
"""
return (is_in_range(ch, str_to_ord('A'), str_to_ord('Z'))
or is_in_range(ch, str_to_ord('a'), str_to_ord('z'))
or is_in_range(ch, str_to_ord('0'), str_to_ord('9')))
def hex_dump(buffer, size):
"""
Return Hex formated dump of a 'buffer' of 'size'.
"""
if size < 0:
size = len(buffer)
result = ""
for i in range(0, size):
if (i % 16) == 0:
result += "%06X: " % i
byte = buffer[i]
if type(byte) == str:
byte = ord(byte.decode('latin1'))
result += "%02X " % byte
if (i & 15) == 7:
result += " "
if ((i + 1) % 16) == 0 or (i + 1) == size:
j = i
while ((j + 1) % 16) != 0:
result += " "
if (j & 7) == 7:
result += " "
j += 1
result += " "
for j in range(i - (i % 16), i + 1):
byte=buffer[j]
if type(byte) == str:
byte = str_to_ord(byte.decode('latin1'))
k = '.'
if is_printable(byte):
k = chr(byte)
result += k
if (i + 1) != size:
result += "\n"
return result
def str_to_ord(a):
"""
Allows indexing into a string or an array of integers transparently.
Generic utility function.
"""
if type(a) == type(b'') or type(a) == type(u''):
a = ord(a)
return a
def compare_bytes(a, b, start, length):
for offset in range(start, start + length):
if str_to_ord(a[offset]) != str_to_ord(b[offset]):
return False
return True
def int_to_ip4_addr(a):
"""
Build DHCP request string.
"""
return "%u.%u.%u.%u" % ((a >> 24) & 0xFF,
(a >> 16) & 0xFF,
(a >> 8) & 0xFF,
(a) & 0xFF)
def hexstr_to_bytearray(a):
"""
Return hex string packed into a binary struct.
"""
b = b""
for c in range(0, len(a) // 2):
b += struct.pack("B", int(a[c * 2:c * 2 + 2], 16))
return b
def set_ssh_config(config, name, val):
notfound = True
for i in range(0, len(config)):
if config[i].startswith(name):
config[i] = "{0} {1}".format(name, val)
notfound = False
elif config[i].startswith("Match"):
#Match block must be put in the end of sshd config
break
if notfound:
config.insert(i, "{0} {1}".format(name, val))
return config
def remove_bom(c):
if str_to_ord(c[0]) > 128 and str_to_ord(c[1]) > 128 and \
str_to_ord(c[2]) > 128:
c = c[3:]
return c
def gen_password_hash(password, crypt_id, salt_len):
collection = string.ascii_letters + string.digits
salt = ''.join(random.choice(collection) for _ in range(salt_len))
salt = "${0}${1}".format(crypt_id, salt)
return crypt.crypt(password, salt)
def get_bytes_from_pem(pem_str):
base64_bytes = ""
for line in pem_str.split('\n'):
if "----" not in line:
base64_bytes += line
return base64_bytes
Version = LooseVersion
| nathanleclaire/WALinuxAgent | azurelinuxagent/utils/textutil.py | Python | apache-2.0 | 6,454 |
# -*- coding: utf-8 -*-
import os
import threading
import types
import time # used to eval time.strftime expressions
from datetime import datetime, timedelta
import logging
from copy import deepcopy
import openerp.pooler as pooler
import openerp.sql_db as sql_db
import misc
from config import config
import yaml_tag
import yaml
import re
from lxml import etree
from openerp import SUPERUSER_ID
# YAML import needs both safe and unsafe eval, but let's
# default to /safe/.
unsafe_eval = eval
from safe_eval import safe_eval as eval
import assertion_report
_logger = logging.getLogger(__name__)
class YamlImportException(Exception):
pass
class YamlImportAbortion(Exception):
pass
def _is_yaml_mapping(node, tag_constructor):
value = isinstance(node, types.DictionaryType) \
and len(node.keys()) == 1 \
and isinstance(node.keys()[0], tag_constructor)
return value
def is_comment(node):
return isinstance(node, types.StringTypes)
def is_assert(node):
return isinstance(node, yaml_tag.Assert) \
or _is_yaml_mapping(node, yaml_tag.Assert)
def is_record(node):
return _is_yaml_mapping(node, yaml_tag.Record)
def is_python(node):
return _is_yaml_mapping(node, yaml_tag.Python)
def is_menuitem(node):
return isinstance(node, yaml_tag.Menuitem) \
or _is_yaml_mapping(node, yaml_tag.Menuitem)
def is_function(node):
return isinstance(node, yaml_tag.Function) \
or _is_yaml_mapping(node, yaml_tag.Function)
def is_report(node):
return isinstance(node, yaml_tag.Report)
def is_workflow(node):
return isinstance(node, yaml_tag.Workflow)
def is_act_window(node):
return isinstance(node, yaml_tag.ActWindow)
def is_delete(node):
return isinstance(node, yaml_tag.Delete)
def is_context(node):
return isinstance(node, yaml_tag.Context)
def is_url(node):
return isinstance(node, yaml_tag.Url)
def is_eval(node):
return isinstance(node, yaml_tag.Eval)
def is_ref(node):
return isinstance(node, yaml_tag.Ref) \
or _is_yaml_mapping(node, yaml_tag.Ref)
def is_ir_set(node):
return _is_yaml_mapping(node, yaml_tag.IrSet)
def is_string(node):
return isinstance(node, basestring)
class RecordDictWrapper(dict):
"""
Used to pass a record as locals in eval:
records do not strictly behave like dict, so we force them to.
"""
def __init__(self, record):
self.record = record
def __getitem__(self, key):
if key in self.record:
return self.record[key]
return dict.__getitem__(self, key)
class YamlInterpreter(object):
def __init__(self, cr, module, id_map, mode, filename, report=None, noupdate=False, loglevel=logging.DEBUG):
self.cr = cr
self.module = module
self.id_map = id_map
self.mode = mode
self.filename = filename
if report is None:
report = assertion_report.assertion_report()
self.assertion_report = report
self.noupdate = noupdate
self.loglevel = loglevel
self.pool = pooler.get_pool(cr.dbname)
self.uid = 1
self.context = {} # opererp context
self.eval_context = {'ref': self._ref(),
'_ref': self._ref(), # added '_ref' so that record['ref'] is possible
'time': time,
'datetime': datetime,
'timedelta': timedelta}
def _log(self, *args, **kwargs):
_logger.log(self.loglevel, *args, **kwargs)
def _ref(self):
return lambda xml_id: self.get_id(xml_id)
def get_model(self, model_name):
model = self.pool.get(model_name)
assert model, "The model %s does not exist." % (model_name,)
return model
def validate_xml_id(self, xml_id):
id = xml_id
if '.' in xml_id:
module, id = xml_id.split('.', 1)
assert '.' not in id, "The ID reference '%s' must contains maximum one dot.\n" \
"It is used to refer to other modules ID, in the form: module.record_id" \
% (xml_id,)
if module != self.module:
module_count = self.pool.get('ir.module.module').search_count(self.cr, self.uid, \
['&', ('name', '=', module), ('state', 'in', ['installed'])])
assert module_count == 1, 'The ID "%s" refers to an uninstalled module.' % (xml_id,)
if len(id) > 64: # TODO where does 64 come from (DB is 128)? should be a constant or loaded form DB
_logger.error('id: %s is to long (max: 64)', id)
def get_id(self, xml_id):
if xml_id is False or xml_id is None:
return False
#if not xml_id:
# raise YamlImportException("The xml_id should be a non empty string.")
elif isinstance(xml_id, types.IntType):
id = xml_id
elif xml_id in self.id_map:
id = self.id_map[xml_id]
else:
if '.' in xml_id:
module, checked_xml_id = xml_id.split('.', 1)
else:
module = self.module
checked_xml_id = xml_id
try:
_, id = self.pool.get('ir.model.data').get_object_reference(self.cr, self.uid, module, checked_xml_id)
self.id_map[xml_id] = id
except ValueError:
raise ValueError("""%s not found when processing %s.
This Yaml file appears to depend on missing data. This often happens for
tests that belong to a module's test suite and depend on each other.""" % (checked_xml_id, self.filename))
return id
def get_context(self, node, eval_dict):
context = self.context.copy()
if node.context:
context.update(eval(node.context, eval_dict))
return context
def isnoupdate(self, node):
return self.noupdate or node.noupdate or False
def _get_first_result(self, results, default=False):
if len(results):
value = results[0]
if isinstance(value, types.TupleType):
value = value[0]
else:
value = default
return value
def process_comment(self, node):
return node
def _log_assert_failure(self, msg, *args):
from openerp.modules import module # cannot be made before (loop)
basepath = module.get_module_path(self.module)
self.assertion_report.record_failure(
details=dict(module=self.module,
testfile=os.path.relpath(self.filename, basepath),
msg=msg,
msg_args=deepcopy(args)))
_logger.error(msg, *args)
def _get_assertion_id(self, assertion):
if assertion.id:
ids = [self.get_id(assertion.id)]
elif assertion.search:
q = eval(assertion.search, self.eval_context)
ids = self.pool.get(assertion.model).search(self.cr, self.uid, q, context=assertion.context)
else:
raise YamlImportException('Nothing to assert: you must give either an id or a search criteria.')
return ids
def process_assert(self, node):
if isinstance(node, dict):
assertion, expressions = node.items()[0]
else:
assertion, expressions = node, []
if self.isnoupdate(assertion) and self.mode != 'init':
_logger.warning('This assertion was not evaluated ("%s").', assertion.string)
return
model = self.get_model(assertion.model)
ids = self._get_assertion_id(assertion)
if assertion.count is not None and len(ids) != assertion.count:
msg = 'assertion "%s" failed!\n' \
' Incorrect search count:\n' \
' expected count: %d\n' \
' obtained count: %d\n'
args = (assertion.string, assertion.count, len(ids))
self._log_assert_failure(msg, *args)
else:
context = self.get_context(assertion, self.eval_context)
for id in ids:
record = model.browse(self.cr, self.uid, id, context)
for test in expressions:
try:
success = unsafe_eval(test, self.eval_context, RecordDictWrapper(record))
except Exception, e:
_logger.debug('Exception during evaluation of !assert block in yaml_file %s.', self.filename, exc_info=True)
raise YamlImportAbortion(e)
if not success:
msg = 'Assertion "%s" FAILED\ntest: %s\n'
args = (assertion.string, test)
for aop in ('==', '!=', '<>', 'in', 'not in', '>=', '<=', '>', '<'):
if aop in test:
left, right = test.split(aop,1)
lmsg = ''
rmsg = ''
try:
lmsg = unsafe_eval(left, self.eval_context, RecordDictWrapper(record))
except Exception, e:
lmsg = '<exc>'
try:
rmsg = unsafe_eval(right, self.eval_context, RecordDictWrapper(record))
except Exception, e:
rmsg = '<exc>'
msg += 'values: ! %s %s %s'
args += ( lmsg, aop, rmsg )
break
self._log_assert_failure(msg, *args)
return
else: # all tests were successful for this assertion tag (no break)
self.assertion_report.record_success()
def _coerce_bool(self, value, default=False):
if isinstance(value, types.BooleanType):
b = value
if isinstance(value, types.StringTypes):
b = value.strip().lower() not in ('0', 'false', 'off', 'no')
elif isinstance(value, types.IntType):
b = bool(value)
else:
b = default
return b
def create_osv_memory_record(self, record, fields):
model = self.get_model(record.model)
context = self.get_context(record, self.eval_context)
record_dict = self._create_record(model, fields)
id_new = model.create(self.cr, self.uid, record_dict, context=context)
self.id_map[record.id] = int(id_new)
return record_dict
def process_record(self, node):
import openerp.osv as osv
record, fields = node.items()[0]
model = self.get_model(record.model)
view_id = record.view
if view_id and (view_id is not True) and isinstance(view_id, basestring):
module = self.module
if '.' in view_id:
module, view_id = view_id.split('.',1)
view_id = self.pool.get('ir.model.data').get_object_reference(self.cr, SUPERUSER_ID, module, view_id)[1]
if model.is_transient():
record_dict=self.create_osv_memory_record(record, fields)
else:
self.validate_xml_id(record.id)
try:
self.pool.get('ir.model.data')._get_id(self.cr, SUPERUSER_ID, self.module, record.id)
default = False
except ValueError:
default = True
if self.isnoupdate(record) and self.mode != 'init':
id = self.pool.get('ir.model.data')._update_dummy(self.cr, SUPERUSER_ID, record.model, self.module, record.id)
# check if the resource already existed at the last update
if id:
self.id_map[record] = int(id)
return None
else:
if not self._coerce_bool(record.forcecreate):
return None
#context = self.get_context(record, self.eval_context)
#TOFIX: record.context like {'withoutemployee':True} should pass from self.eval_context. example: test_project.yml in project module
context = record.context
view_info = False
if view_id:
varg = view_id
if view_id is True: varg = False
view_info = model.fields_view_get(self.cr, SUPERUSER_ID, varg, 'form', context)
record_dict = self._create_record(model, fields, view_info, default=default)
_logger.debug("RECORD_DICT %s" % record_dict)
id = self.pool.get('ir.model.data')._update(self.cr, SUPERUSER_ID, record.model, \
self.module, record_dict, record.id, noupdate=self.isnoupdate(record), mode=self.mode, context=context)
self.id_map[record.id] = int(id)
if config.get('import_partial'):
self.cr.commit()
def _create_record(self, model, fields, view_info=False, parent={}, default=True):
"""This function processes the !record tag in yalm files. It simulates the record creation through an xml
view (either specified on the !record tag or the default one for this object), including the calls to
on_change() functions, and sending only values for fields that aren't set as readonly.
:param model: model instance
:param fields: dictonary mapping the field names and their values
:param view_info: result of fields_view_get() called on the object
:param parent: dictionary containing the values already computed for the parent, in case of one2many fields
:param default: if True, the default values must be processed too or not
:return: dictionary mapping the field names and their values, ready to use when calling the create() function
:rtype: dict
"""
def _get_right_one2many_view(fg, field_name, view_type):
one2many_view = fg[field_name]['views'].get(view_type)
# if the view is not defined inline, we call fields_view_get()
if not one2many_view:
one2many_view = self.pool.get(fg[field_name]['relation']).fields_view_get(self.cr, SUPERUSER_ID, False, view_type, self.context)
return one2many_view
def process_val(key, val):
if fg[key]['type'] == 'many2one':
if type(val) in (tuple,list):
val = val[0]
elif fg[key]['type'] == 'one2many':
if val and isinstance(val, (list,tuple)) and isinstance(val[0], dict):
# we want to return only the fields that aren't readonly
# For that, we need to first get the right tree view to consider for the field `key´
one2many_tree_view = _get_right_one2many_view(fg, key, 'tree')
arch = etree.fromstring(one2many_tree_view['arch'].encode('utf-8'))
for rec in val:
# make a copy for the iteration, as we will alter `rec´
rec_copy = rec.copy()
for field_key in rec_copy:
# if field is missing in view or has a readonly modifier, drop it
field_elem = arch.xpath("//field[@name='%s']" % field_key)
if field_elem and (field_elem[0].get('modifiers', '{}').find('"readonly": true') >= 0):
# TODO: currently we only support if readonly is True in the modifiers. Some improvement may be done in
# order to support also modifiers that look like {"readonly": [["state", "not in", ["draft", "confirm"]]]}
del rec[field_key]
# now that unwanted values have been removed from val, we can encapsulate it in a tuple as returned value
val = map(lambda x: (0,0,x), val)
elif fg[key]['type'] == 'many2many':
if val and isinstance(val,(list,tuple)) and isinstance(val[0], (int,long)):
val = [(6,0,val)]
# we want to return only the fields that aren't readonly
if el.get('modifiers', '{}').find('"readonly": true') >= 0:
# TODO: currently we only support if readonly is True in the modifiers. Some improvement may be done in
# order to support also modifiers that look like {"readonly": [["state", "not in", ["draft", "confirm"]]]}
return False
return val
if view_info:
arch = etree.fromstring(view_info['arch'].decode('utf-8'))
view = arch if len(arch) else False
else:
view = False
fields = fields or {}
if view is not False:
fg = view_info['fields']
# gather the default values on the object. (Can't use `fields´ as parameter instead of {} because we may
# have references like `base.main_company´ in the yaml file and it's not compatible with the function)
defaults = default and model._add_missing_default_values(self.cr, self.uid, {}, context=self.context) or {}
# copy the default values in record_dict, only if they are in the view (because that's what the client does)
# the other default values will be added later on by the create().
record_dict = dict([(key, val) for key, val in defaults.items() if key in fg])
# Process all on_change calls
nodes = [view]
while nodes:
el = nodes.pop(0)
if el.tag=='field':
field_name = el.attrib['name']
assert field_name in fg, "The field '%s' is defined in the form view but not on the object '%s'!" % (field_name, model._name)
if field_name in fields:
one2many_form_view = None
if (view is not False) and (fg[field_name]['type']=='one2many'):
# for one2many fields, we want to eval them using the inline form view defined on the parent
one2many_form_view = _get_right_one2many_view(fg, field_name, 'form')
field_value = self._eval_field(model, field_name, fields[field_name], one2many_form_view or view_info, parent=record_dict, default=default)
#call process_val to not update record_dict if values were given for readonly fields
val = process_val(field_name, field_value)
if val:
record_dict[field_name] = val
#if (field_name in defaults) and defaults[field_name] == field_value:
# print '*** You can remove these lines:', field_name, field_value
#if field_name has a default value or a value is given in the yaml file, we must call its on_change()
elif field_name not in defaults:
continue
if not el.attrib.get('on_change', False):
continue
match = re.match("([a-z_1-9A-Z]+)\((.*)\)", el.attrib['on_change'])
assert match, "Unable to parse the on_change '%s'!" % (el.attrib['on_change'], )
# creating the context
class parent2(object):
def __init__(self, d):
self.d = d
def __getattr__(self, name):
return self.d.get(name, False)
ctx = record_dict.copy()
ctx['context'] = self.context
ctx['uid'] = SUPERUSER_ID
ctx['parent'] = parent2(parent)
for a in fg:
if a not in ctx:
ctx[a] = process_val(a, defaults.get(a, False))
# Evaluation args
args = map(lambda x: eval(x, ctx), match.group(2).split(','))
result = getattr(model, match.group(1))(self.cr, self.uid, [], *args)
for key, val in (result or {}).get('value', {}).items():
if key in fg:
record_dict[key] = process_val(key, val)
else:
_logger.debug("The returning field '%s' from your on_change call '%s'"
" does not exist either on the object '%s', either in"
" the view '%s'",
key, match.group(1), model._name, view_info['name'])
else:
nodes = list(el) + nodes
else:
record_dict = {}
for field_name, expression in fields.items():
if field_name in record_dict:
continue
field_value = self._eval_field(model, field_name, expression, default=False)
record_dict[field_name] = field_value
return record_dict
def process_ref(self, node, column=None):
assert node.search or node.id, '!ref node should have a `search` attribute or `id` attribute'
if node.search:
if node.model:
model_name = node.model
elif column:
model_name = column._obj
else:
raise YamlImportException('You need to give a model for the search, or a column to infer it.')
model = self.get_model(model_name)
q = eval(node.search, self.eval_context)
ids = model.search(self.cr, self.uid, q)
if node.use:
instances = model.browse(self.cr, self.uid, ids)
value = [inst[node.use] for inst in instances]
else:
value = ids
elif node.id:
value = self.get_id(node.id)
else:
value = None
return value
def process_eval(self, node):
return eval(node.expression, self.eval_context)
def _eval_field(self, model, field_name, expression, view_info=False, parent={}, default=True):
# TODO this should be refactored as something like model.get_field() in bin/osv
if field_name in model._columns:
column = model._columns[field_name]
elif field_name in model._inherit_fields:
column = model._inherit_fields[field_name][2]
else:
raise KeyError("Object '%s' does not contain field '%s'" % (model, field_name))
if is_ref(expression):
elements = self.process_ref(expression, column)
if column._type in ("many2many", "one2many"):
value = [(6, 0, elements)]
else: # many2one
if isinstance(elements, (list,tuple)):
value = self._get_first_result(elements)
else:
value = elements
elif column._type == "many2one":
value = self.get_id(expression)
elif column._type == "one2many":
other_model = self.get_model(column._obj)
value = [(0, 0, self._create_record(other_model, fields, view_info, parent, default=default)) for fields in expression]
elif column._type == "many2many":
ids = [self.get_id(xml_id) for xml_id in expression]
value = [(6, 0, ids)]
elif column._type == "date" and is_string(expression):
# enforce ISO format for string date values, to be locale-agnostic during tests
time.strptime(expression, misc.DEFAULT_SERVER_DATE_FORMAT)
value = expression
elif column._type == "datetime" and is_string(expression):
# enforce ISO format for string datetime values, to be locale-agnostic during tests
time.strptime(expression, misc.DEFAULT_SERVER_DATETIME_FORMAT)
value = expression
else: # scalar field
if is_eval(expression):
value = self.process_eval(expression)
else:
value = expression
# raise YamlImportException('Unsupported column "%s" or value %s:%s' % (field_name, type(expression), expression))
return value
def process_context(self, node):
self.context = node.__dict__
if node.uid:
self.uid = self.get_id(node.uid)
if node.noupdate:
self.noupdate = node.noupdate
def process_python(self, node):
python, statements = node.items()[0]
model = self.get_model(python.model)
statements = statements.replace("\r\n", "\n")
code_context = { 'model': model, 'cr': self.cr, 'uid': self.uid, 'log': self._log, 'context': self.context }
code_context.update({'self': model}) # remove me when no !python block test uses 'self' anymore
try:
code_obj = compile(statements, self.filename, 'exec')
unsafe_eval(code_obj, {'ref': self.get_id}, code_context)
except AssertionError, e:
self._log_assert_failure('AssertionError in Python code %s: %s', python.name, e)
return
except Exception, e:
_logger.debug('Exception during evaluation of !python block in yaml_file %s.', self.filename, exc_info=True)
raise
else:
self.assertion_report.record_success()
def process_workflow(self, node):
workflow, values = node.items()[0]
if self.isnoupdate(workflow) and self.mode != 'init':
return
if workflow.ref:
id = self.get_id(workflow.ref)
else:
if not values:
raise YamlImportException('You must define a child node if you do not give a ref.')
if not len(values) == 1:
raise YamlImportException('Only one child node is accepted (%d given).' % len(values))
value = values[0]
if not 'model' in value and (not 'eval' in value or not 'search' in value):
raise YamlImportException('You must provide a "model" and an "eval" or "search" to evaluate.')
value_model = self.get_model(value['model'])
local_context = {'obj': lambda x: value_model.browse(self.cr, self.uid, x, context=self.context)}
local_context.update(self.id_map)
id = eval(value['eval'], self.eval_context, local_context)
if workflow.uid is not None:
uid = workflow.uid
else:
uid = self.uid
self.cr.execute('select distinct signal from wkf_transition')
signals=[x['signal'] for x in self.cr.dictfetchall()]
if workflow.action not in signals:
raise YamlImportException('Incorrect action %s. No such action defined' % workflow.action)
import openerp.netsvc as netsvc
wf_service = netsvc.LocalService("workflow")
wf_service.trg_validate(uid, workflow.model, id, workflow.action, self.cr)
def _eval_params(self, model, params):
args = []
for i, param in enumerate(params):
if isinstance(param, types.ListType):
value = self._eval_params(model, param)
elif is_ref(param):
value = self.process_ref(param)
elif is_eval(param):
value = self.process_eval(param)
elif isinstance(param, types.DictionaryType): # supports XML syntax
param_model = self.get_model(param.get('model', model))
if 'search' in param:
q = eval(param['search'], self.eval_context)
ids = param_model.search(self.cr, self.uid, q)
value = self._get_first_result(ids)
elif 'eval' in param:
local_context = {'obj': lambda x: param_model.browse(self.cr, self.uid, x, self.context)}
local_context.update(self.id_map)
value = eval(param['eval'], self.eval_context, local_context)
else:
raise YamlImportException('You must provide either a !ref or at least a "eval" or a "search" to function parameter #%d.' % i)
else:
value = param # scalar value
args.append(value)
return args
def process_function(self, node):
function, params = node.items()[0]
if self.isnoupdate(function) and self.mode != 'init':
return
model = self.get_model(function.model)
if function.eval:
args = self.process_eval(function.eval)
else:
args = self._eval_params(function.model, params)
method = function.name
getattr(model, method)(self.cr, self.uid, *args)
def _set_group_values(self, node, values):
if node.groups:
group_names = node.groups.split(',')
groups_value = []
for group in group_names:
if group.startswith('-'):
group_id = self.get_id(group[1:])
groups_value.append((3, group_id))
else:
group_id = self.get_id(group)
groups_value.append((4, group_id))
values['groups_id'] = groups_value
def process_menuitem(self, node):
self.validate_xml_id(node.id)
if not node.parent:
parent_id = False
self.cr.execute('select id from ir_ui_menu where parent_id is null and name=%s', (node.name,))
res = self.cr.fetchone()
values = {'parent_id': parent_id, 'name': node.name}
else:
parent_id = self.get_id(node.parent)
values = {'parent_id': parent_id}
if node.name:
values['name'] = node.name
try:
res = [ self.get_id(node.id) ]
except: # which exception ?
res = None
if node.action:
action_type = node.type or 'act_window'
icons = {
"act_window": 'STOCK_NEW',
"report.xml": 'STOCK_PASTE',
"wizard": 'STOCK_EXECUTE',
"url": 'STOCK_JUMP_TO',
}
values['icon'] = icons.get(action_type, 'STOCK_NEW')
if action_type == 'act_window':
action_id = self.get_id(node.action)
self.cr.execute('select view_type,view_mode,name,view_id,target from ir_act_window where id=%s', (action_id,))
ir_act_window_result = self.cr.fetchone()
assert ir_act_window_result, "No window action defined for this id %s !\n" \
"Verify that this is a window action or add a type argument." % (node.action,)
action_type, action_mode, action_name, view_id, target = ir_act_window_result
if view_id:
self.cr.execute('SELECT type FROM ir_ui_view WHERE id=%s', (view_id,))
# TODO guess why action_mode is ir_act_window.view_mode above and ir_ui_view.type here
action_mode = self.cr.fetchone()
self.cr.execute('SELECT view_mode FROM ir_act_window_view WHERE act_window_id=%s ORDER BY sequence LIMIT 1', (action_id,))
if self.cr.rowcount:
action_mode = self.cr.fetchone()
if action_type == 'tree':
values['icon'] = 'STOCK_INDENT'
elif action_mode and action_mode.startswith('tree'):
values['icon'] = 'STOCK_JUSTIFY_FILL'
elif action_mode and action_mode.startswith('graph'):
values['icon'] = 'terp-graph'
elif action_mode and action_mode.startswith('calendar'):
values['icon'] = 'terp-calendar'
if target == 'new':
values['icon'] = 'STOCK_EXECUTE'
if not values.get('name', False):
values['name'] = action_name
elif action_type == 'wizard':
action_id = self.get_id(node.action)
self.cr.execute('select name from ir_act_wizard where id=%s', (action_id,))
ir_act_wizard_result = self.cr.fetchone()
if (not values.get('name', False)) and ir_act_wizard_result:
values['name'] = ir_act_wizard_result[0]
else:
raise YamlImportException("Unsupported type '%s' in menuitem tag." % action_type)
if node.sequence:
values['sequence'] = node.sequence
if node.icon:
values['icon'] = node.icon
self._set_group_values(node, values)
pid = self.pool.get('ir.model.data')._update(self.cr, SUPERUSER_ID, \
'ir.ui.menu', self.module, values, node.id, mode=self.mode, \
noupdate=self.isnoupdate(node), res_id=res and res[0] or False)
if node.id and parent_id:
self.id_map[node.id] = int(parent_id)
if node.action and pid:
action_type = node.type or 'act_window'
action_id = self.get_id(node.action)
action = "ir.actions.%s,%d" % (action_type, action_id)
self.pool.get('ir.model.data').ir_set(self.cr, SUPERUSER_ID, 'action', \
'tree_but_open', 'Menuitem', [('ir.ui.menu', int(parent_id))], action, True, True, xml_id=node.id)
def process_act_window(self, node):
assert getattr(node, 'id'), "Attribute %s of act_window is empty !" % ('id',)
assert getattr(node, 'name'), "Attribute %s of act_window is empty !" % ('name',)
assert getattr(node, 'res_model'), "Attribute %s of act_window is empty !" % ('res_model',)
self.validate_xml_id(node.id)
view_id = False
if node.view:
view_id = self.get_id(node.view)
if not node.context:
node.context={}
context = eval(str(node.context), self.eval_context)
values = {
'name': node.name,
'type': node.type or 'ir.actions.act_window',
'view_id': view_id,
'domain': node.domain,
'context': context,
'res_model': node.res_model,
'src_model': node.src_model,
'view_type': node.view_type or 'form',
'view_mode': node.view_mode or 'tree,form',
'usage': node.usage,
'limit': node.limit,
'auto_refresh': node.auto_refresh,
'multi': getattr(node, 'multi', False),
}
self._set_group_values(node, values)
if node.target:
values['target'] = node.target
id = self.pool.get('ir.model.data')._update(self.cr, SUPERUSER_ID, \
'ir.actions.act_window', self.module, values, node.id, mode=self.mode)
self.id_map[node.id] = int(id)
if node.src_model:
keyword = 'client_action_relate'
value = 'ir.actions.act_window,%s' % id
replace = node.replace or True
self.pool.get('ir.model.data').ir_set(self.cr, SUPERUSER_ID, 'action', keyword, \
node.id, [node.src_model], value, replace=replace, noupdate=self.isnoupdate(node), isobject=True, xml_id=node.id)
# TODO add remove ir.model.data
def process_delete(self, node):
assert getattr(node, 'model'), "Attribute %s of delete tag is empty !" % ('model',)
if self.pool.get(node.model):
if node.search:
ids = self.pool.get(node.model).search(self.cr, self.uid, eval(node.search, self.eval_context))
else:
ids = [self.get_id(node.id)]
if len(ids):
self.pool.get(node.model).unlink(self.cr, self.uid, ids)
else:
self._log("Record not deleted.")
def process_url(self, node):
self.validate_xml_id(node.id)
res = {'name': node.name, 'url': node.url, 'target': node.target}
id = self.pool.get('ir.model.data')._update(self.cr, SUPERUSER_ID, \
"ir.actions.act_url", self.module, res, node.id, mode=self.mode)
self.id_map[node.id] = int(id)
# ir_set
if (not node.menu or eval(node.menu)) and id:
keyword = node.keyword or 'client_action_multi'
value = 'ir.actions.act_url,%s' % id
replace = node.replace or True
self.pool.get('ir.model.data').ir_set(self.cr, SUPERUSER_ID, 'action', \
keyword, node.url, ["ir.actions.act_url"], value, replace=replace, \
noupdate=self.isnoupdate(node), isobject=True, xml_id=node.id)
def process_ir_set(self, node):
if not self.mode == 'init':
return False
_, fields = node.items()[0]
res = {}
for fieldname, expression in fields.items():
if is_eval(expression):
value = eval(expression.expression, self.eval_context)
else:
value = expression
res[fieldname] = value
self.pool.get('ir.model.data').ir_set(self.cr, SUPERUSER_ID, res['key'], res['key2'], \
res['name'], res['models'], res['value'], replace=res.get('replace',True), \
isobject=res.get('isobject', False), meta=res.get('meta',None))
def process_report(self, node):
values = {}
for dest, f in (('name','string'), ('model','model'), ('report_name','name')):
values[dest] = getattr(node, f)
assert values[dest], "Attribute %s of report is empty !" % (f,)
for field,dest in (('rml','report_rml'),('file','report_rml'),('xml','report_xml'),('xsl','report_xsl'),('attachment','attachment'),('attachment_use','attachment_use')):
if getattr(node, field):
values[dest] = getattr(node, field)
if node.auto:
values['auto'] = eval(node.auto)
if node.sxw:
sxw_file = misc.file_open(node.sxw)
try:
sxw_content = sxw_file.read()
values['report_sxw_content'] = sxw_content
finally:
sxw_file.close()
if node.header:
values['header'] = eval(node.header)
values['multi'] = node.multi and eval(node.multi)
xml_id = node.id
self.validate_xml_id(xml_id)
self._set_group_values(node, values)
id = self.pool.get('ir.model.data')._update(self.cr, SUPERUSER_ID, "ir.actions.report.xml", \
self.module, values, xml_id, noupdate=self.isnoupdate(node), mode=self.mode)
self.id_map[xml_id] = int(id)
if not node.menu or eval(node.menu):
keyword = node.keyword or 'client_print_multi'
value = 'ir.actions.report.xml,%s' % id
replace = node.replace or True
self.pool.get('ir.model.data').ir_set(self.cr, SUPERUSER_ID, 'action', \
keyword, values['name'], [values['model']], value, replace=replace, isobject=True, xml_id=xml_id)
def process_none(self):
"""
Empty node or commented node should not pass silently.
"""
self._log_assert_failure("You have an empty block in your tests.")
def process(self, yaml_string):
"""
Processes a Yaml string. Custom tags are interpreted by 'process_' instance methods.
"""
yaml_tag.add_constructors()
is_preceded_by_comment = False
for node in yaml.load(yaml_string):
is_preceded_by_comment = self._log_node(node, is_preceded_by_comment)
try:
self._process_node(node)
except Exception, e:
_logger.exception(e)
raise
def _process_node(self, node):
if is_comment(node):
self.process_comment(node)
elif is_assert(node):
self.process_assert(node)
elif is_record(node):
self.process_record(node)
elif is_python(node):
self.process_python(node)
elif is_menuitem(node):
self.process_menuitem(node)
elif is_delete(node):
self.process_delete(node)
elif is_url(node):
self.process_url(node)
elif is_context(node):
self.process_context(node)
elif is_ir_set(node):
self.process_ir_set(node)
elif is_act_window(node):
self.process_act_window(node)
elif is_report(node):
self.process_report(node)
elif is_workflow(node):
if isinstance(node, types.DictionaryType):
self.process_workflow(node)
else:
self.process_workflow({node: []})
elif is_function(node):
if isinstance(node, types.DictionaryType):
self.process_function(node)
else:
self.process_function({node: []})
elif node is None:
self.process_none()
else:
raise YamlImportException("Can not process YAML block: %s" % node)
def _log_node(self, node, is_preceded_by_comment):
if is_comment(node):
is_preceded_by_comment = True
self._log(node)
elif not is_preceded_by_comment:
if isinstance(node, types.DictionaryType):
msg = "Creating %s\n with %s"
args = node.items()[0]
self._log(msg, *args)
else:
self._log(node)
else:
is_preceded_by_comment = False
return is_preceded_by_comment
def yaml_import(cr, module, yamlfile, kind, idref=None, mode='init', noupdate=False, report=None):
if idref is None:
idref = {}
loglevel = logging.TEST if kind == 'test' else logging.DEBUG
yaml_string = yamlfile.read()
yaml_interpreter = YamlInterpreter(cr, module, idref, mode, filename=yamlfile.name, report=report, noupdate=noupdate, loglevel=loglevel)
yaml_interpreter.process(yaml_string)
# keeps convention of convert.py
convert_yaml_import = yaml_import
def threaded_yaml_import(db_name, module_name, file_name, delay=0):
def f():
time.sleep(delay)
cr = None
fp = None
try:
cr = sql_db.db_connect(db_name).cursor()
fp = misc.file_open(file_name)
convert_yaml_import(cr, module_name, fp, {}, 'update', True)
finally:
if cr: cr.close()
if fp: fp.close()
threading.Thread(target=f).start()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| camptocamp/ngo-addons-backport | openerp/tools/yaml_import.py | Python | agpl-3.0 | 42,846 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
import torch
from . import data_utils, FairseqDataset
def collate(samples, pad_idx, eos_idx):
if len(samples) == 0:
return {}
def merge(key, is_list=False):
if is_list:
res = []
for i in range(len(samples[0][key])):
res.append(data_utils.collate_tokens(
[s[key][i] for s in samples], pad_idx, eos_idx, left_pad=False,
))
return res
else:
return data_utils.collate_tokens(
[s[key] for s in samples], pad_idx, eos_idx, left_pad=False,
)
src_tokens = merge('source')
if samples[0]['target'] is not None:
is_target_list = isinstance(samples[0]['target'], list)
target = merge('target', is_target_list)
else:
target = src_tokens
return {
'id': torch.LongTensor([s['id'] for s in samples]),
'nsentences': len(samples),
'ntokens': sum(len(s['source']) for s in samples),
'net_input': {
'src_tokens': src_tokens,
'src_lengths': torch.LongTensor([
s['source'].numel() for s in samples
]),
},
'target': target,
}
class MonolingualDataset(FairseqDataset):
"""
A wrapper around torch.utils.data.Dataset for monolingual data.
Args:
dataset (torch.utils.data.Dataset): dataset to wrap
sizes (List[int]): sentence lengths
vocab (~fairseq.data.Dictionary): vocabulary
shuffle (bool, optional): shuffle the elements before batching
(default: True).
"""
def __init__(self, dataset, sizes, src_vocab, tgt_vocab, add_eos_for_other_targets, shuffle,
targets=None, add_bos_token=False):
self.dataset = dataset
self.sizes = np.array(sizes)
self.vocab = src_vocab
self.tgt_vocab = tgt_vocab
self.add_eos_for_other_targets = add_eos_for_other_targets
self.shuffle = shuffle
self.add_bos_token = add_bos_token
assert targets is None or all(t in {'self', 'future', 'past'} for t in targets), \
"targets must be none or one of 'self', 'future', 'past'"
if targets is not None and len(targets) == 0:
targets = None
self.targets = targets
def __getitem__(self, index):
if self.targets is not None:
# *future_target* is the original sentence
# *source* is shifted right by 1 (maybe left-padded with eos)
# *past_target* is shifted right by 2 (left-padded as needed)
#
# Left-to-right language models should condition on *source* and
# predict *future_target*.
# Right-to-left language models should condition on *source* and
# predict *past_target*.
source, future_target, past_target = self.dataset[index]
source, target = self._make_source_target(source, future_target, past_target)
else:
source = self.dataset[index]
target = None
source, target = self._maybe_add_bos(source, target)
return {'id': index, 'source': source, 'target': target}
def __len__(self):
return len(self.dataset)
def _make_source_target(self, source, future_target, past_target):
if self.targets is not None:
target = []
if self.add_eos_for_other_targets and (('self' in self.targets) or ('past' in self.targets)) \
and source[-1] != self.vocab.eos():
# append eos at the end of source
source = torch.cat([source, source.new([self.vocab.eos()])])
if 'future' in self.targets:
future_target = torch.cat([future_target, future_target.new([self.vocab.pad()])])
if 'past' in self.targets:
# first token is before the start of sentence which is only used in "none" break mode when
# add_eos_for_other_targets is False
past_target = torch.cat([past_target.new([self.vocab.pad()]), past_target[1:], source[-2, None]])
for t in self.targets:
if t == 'self':
target.append(source)
elif t == 'future':
target.append(future_target)
elif t == 'past':
target.append(past_target)
else:
raise Exception('invalid target ' + t)
if len(target) == 1:
target = target[0]
else:
target = future_target
return source, self._filter_vocab(target)
def _maybe_add_bos(self, source, target):
if self.add_bos_token:
source = torch.cat([source.new([self.vocab.bos()]), source])
if target is not None:
target = torch.cat([target.new([self.tgt_vocab.bos()]), target])
return source, target
def _filter_vocab(self, target):
if len(self.tgt_vocab) != len(self.vocab):
def _filter(target):
mask = target.ge(len(self.tgt_vocab))
if mask.any():
target[mask] = self.tgt_vocab.unk()
return target
if isinstance(target, list):
return [_filter(t) for t in target]
return _filter(target)
return target
def collater(self, samples):
"""Merge a list of samples to form a mini-batch.
Args:
samples (List[dict]): samples to collate
Returns:
dict: a mini-batch with the following keys:
- `id` (LongTensor): example IDs in the original input order
- `ntokens` (int): total number of tokens in the batch
- `net_input` (dict): the input to the Model, containing keys:
- `src_tokens` (LongTensor): a padded 2D Tensor of tokens in
the source sentence of shape `(bsz, src_len)`. Padding will
appear on the right.
- `target` (LongTensor): a padded 2D Tensor of tokens in the
target sentence of shape `(bsz, tgt_len)`. Padding will appear
on the right.
"""
return collate(samples, self.vocab.pad(), self.vocab.eos())
def num_tokens(self, index):
"""Return the number of tokens in a sample. This value is used to
enforce ``--max-tokens`` during batching."""
return self.sizes[index]
def size(self, index):
"""Return an example's size as a float or tuple. This value is used when
filtering a dataset with ``--max-positions``."""
return self.sizes[index]
def ordered_indices(self):
"""Return an ordered list of indices. Batches will be constructed based
on this order."""
if self.shuffle:
order = [np.random.permutation(len(self))]
else:
order = [np.arange(len(self))]
order.append(self.sizes)
return np.lexsort(order)
@property
def supports_prefetch(self):
return getattr(self.dataset, 'supports_prefetch', False)
def prefetch(self, indices):
self.dataset.prefetch(indices)
| hfp/libxsmm | samples/deeplearning/sparse_training/fairseq/fairseq/data/monolingual_dataset.py | Python | bsd-3-clause | 7,469 |
#! /usr/bin/env python
import os
import sys
def run_tests():
import django
from django.conf import settings
settings.configure(
DATABASES={
'default': {
'ENGINE': 'django.db.backends.sqlite3',
},
},
)
sys.path.append(os.path.abspath(__file__))
if hasattr(django, 'setup'):
django.setup()
from django_nose import NoseTestSuiteRunner
test_runner = NoseTestSuiteRunner(verbosity=1)
return test_runner.run_tests([
'tests',
])
def main():
failures = run_tests()
sys.exit(failures)
if __name__ == '__main__':
main()
| MattBlack85/django-query-logger | runtests.py | Python | mit | 644 |
import pygame
from pygame.locals import * # pour les constantes touches...
from constantes import *
from fichiers import *
from general import *
from aide import *
def edit(screen, levelNumber ,mode, lang, langu, levelFinal):
motionX = 0
motionY = 0
alsoMario = 0
carte = [[int for lgn in range(NB_BLOCS_HAUTEUR)]for col in range(NB_BLOCS_LARGEUR)]
restMario = 0
levelWord = ''
clicGaucheEnCours = False
clicDroitEnCours = False
saved = False
objectPos = pygame.Rect(0,0,0,0)
exemplePos = pygame.Rect(0,0,0,0)
# charger images
mur = pygame.image.load(SOURCE_IMG + 'mur.jpg').convert()
mur50 = pygame.image.load(SOURCE_IMG + 'mur50.jpg').convert()
caisse = pygame.image.load(SOURCE_IMG + 'caisse.jpg').convert()
caisse50 = pygame.image.load(SOURCE_IMG + 'caisse50.jpg').convert()
caisse_ok = pygame.image.load(SOURCE_IMG + 'caisse_ok.jpg').convert()
caisse_ok50 = pygame.image.load(SOURCE_IMG + 'caisse_ok50.jpg').convert()
objectif = pygame.image.load(SOURCE_IMG + 'objectif.png').convert_alpha()
objectif50 = pygame.image.load(SOURCE_IMG + 'objectif50.png').convert_alpha()
mario = pygame.image.load(SOURCE_IMG + 'mario_bas.gif').convert_alpha()
mario50 = pygame.image.load(SOURCE_IMG + 'mario_bas50.gif').convert_alpha()
quadrillage = pygame.image.load(SOURCE_IMG + 'quadrillage.png').convert_alpha()
# objet par défaut
objet = MUR
# load map
chargeCarte(carte, levelNumber)
# search mario
for i in range(NB_BLOCS_LARGEUR):
for j in range(NB_BLOCS_HAUTEUR):
if carte[i][j] ==MARIO:
alsoMario += 1
# white Bar
whiteBar = pygame.Surface((screen.get_width(), 60), screen.get_flags())
whiteBar.fill(WHITE)
# police
police = pygame.font.Font('angelina.ttf', 20)
# define sourceFile default
pathFile = printLang(lang) # 'fr' ou 'en'
sourceFile = SOURCE_FILE + pathFile + '/edit.lvl' # './files/'fr' ou 'en'/edit.lvl'
# H: Help Level: Saved ESC: Exit ou H: Aide Niveau: Sauve ESC: Quitter
# nombre de lignes
lignes = compteLignes(sourceFile)
tableau = [Text() for i in range(lignes)]
# initialise tableau en fr ou en
initialiseEditTable(sourceFile,lignes,tableau)
levelWord = tableau[1].data
tableau[1].data = levelWord + ' ' + str(levelNumber)
tableau[1].partie = police.render(tableau[1].data, True, BLUE)
# event
continuer = True
while(continuer):
# check if there is mario on the map if not initialize the boolean
if(objet == MARIO and alsoMario != 0):
for i in range(NB_BLOCS_LARGEUR):
for j in range(NB_BLOCS_LARGEUR):
if carte[i][j]==MARIO:
restMario += 1
if restMario == 0:
alsoMario = 0
restMario=0
for event in pygame.event.get():
if event.type == QUIT or (event.type == KEYDOWN and event.key == K_ESCAPE):
continuer = False # sortie de la boucle
if event.type == KEYDOWN:
if event.key == K_ESCAPE:
continuer = False
elif event.key == K_1 or event.key == K_KP1:
objet = MUR
elif event.key == K_2 or event.key == K_KP2:
objet = CAISSE
elif event.key == K_3 or event.key == K_KP3:
objet = OBJECTIF
elif event.key == K_4 or event.key == K_KP4:
objet = MARIO
elif event.key == K_5 or event.key == K_KP5:
objet = CAISSE_OK
elif event.key == K_h and lang == EN:
aide(screen,mode,lang,langu)
elif event.key == K_a and lang == FR:
aide(screen,mode,lang,langu)
elif event.key == K_s:
saved = True
sauveCarte(carte,levelNumber)
elif event.key == K_PAGEUP:
if levelNumber <= levelFinal:
levelNumber += 1
if levelNumber == levelFinal+ 1:
carte = [[MUR for lgn in range(NB_BLOCS_HAUTEUR)]for col in range(NB_BLOCS_LARGEUR)]
tableau[1].data = levelWord + ' ' + str(levelNumber)
tableau[1].partie = police.render(tableau[1].data, True, BLUE)
break
else:
# add level number to tableau[1]
tableau[1].data = levelWord + ' ' + str(levelNumber)
tableau[1].partie = police.render(tableau[1].data, True, BLUE)
chargeCarte(carte, levelNumber)
elif event.key == K_PAGEDOWN:
if levelNumber > 1:
levelNumber -=1
# add level number to tableau[1]
tableau[1].data = levelWord + ' ' + str(levelNumber)
tableau[1].partie = police.render(tableau[1].data, True, BLUE)
chargeCarte(carte, levelNumber)
if event.type == MOUSEBUTTONDOWN:
motionY, motionX = event.pos
if motionX <= 408 and motionY <= 408:
if event.button == RIGHT:
clicDroitEnCours = True
carte[motionX // TAILLE_BLOC][motionY // TAILLE_BLOC] = VIDE
if event.button == LEFT:
clicGaucheEnCours = True
if objet == MARIO and alsoMario != 0: # mario can be put only once.
continue
else:
carte[motionX // TAILLE_BLOC][motionY // TAILLE_BLOC] = objet
if objet == MARIO:
alsoMario +=1
if event.type == MOUSEBUTTONUP:
if event.button == LEFT:
clicGaucheEnCours = False
elif event.button == RIGHT:
clicDroitEnCours = False
if event.type == MOUSEMOTION:
motionX, motionY = event.pos
exemplePos.x = motionX + 20
exemplePos.y = motionY + 20
# screen
screen.fill(BLACK) # Ecran tout noir
# affichage carte
for lgn in range (NB_BLOCS_HAUTEUR):
for col in range (NB_BLOCS_LARGEUR):
objectPos.x = col * TAILLE_BLOC
objectPos.y = lgn * TAILLE_BLOC
if carte[lgn][col] == MUR:
screen.blit(mur, objectPos)
elif carte[lgn][col] == CAISSE:
screen.blit(caisse,objectPos)
elif carte[lgn][col] == CAISSE_OK:
screen.blit(caisse_ok,objectPos)
elif carte[lgn][col] == OBJECTIF:
screen.blit(objectif,objectPos)
elif carte[lgn][col] == MARIO:
screen.blit(mario, objectPos)
screen.blit(quadrillage, (0, 0))
# whiteBar
objectPos.x = 0
objectPos.y = screen.get_height() - whiteBar.get_height()
screen.blit(whiteBar,objectPos)
# text
objectPos.x = 10
objectPos.y = (screen.get_height() - whiteBar.get_height()) + 5
screen.blit(tableau[0].partie,objectPos)
objectPos.x = 100
screen.blit(tableau[1].partie,objectPos)
if saved:
objectPos.x = 200
screen.blit(tableau[2].partie,objectPos)
objectPos.x = (screen.get_width() - tableau[3].partie.get_width()) - 10
screen.blit(tableau[3].partie,objectPos)
# blit exemple
if objet == MUR:
screen.blit(mur50, exemplePos)
elif objet == CAISSE:
screen.blit(caisse50, exemplePos)
elif objet == CAISSE_OK:
screen.blit(caisse_ok50, exemplePos)
elif objet == OBJECTIF:
screen.blit(objectif50, exemplePos)
elif objet == MARIO:
screen.blit(mario50, exemplePos)
# mise a jour affichage de l'écran ---------------------
pygame.display.flip()
if saved:
pygame.time.delay(2000)
objectPos.x = 10
objectPos.y = (screen.get_height() - whiteBar.get_height()) + 5
screen.blit(tableau[0].partie, objectPos)
objectPos.x = 100
screen.blit(tableau[1].partie, objectPos)
objectPos.x = (screen.get_width() - tableau[3].partie.get_width())-10
screen.blit(tableau[3].partie, objectPos)
saved = False
| litzler/marioSokoBan | edit.py | Python | gpl-3.0 | 8,847 |
# -*- coding: utf-8 -*-
def test(list1, list2):
"""
Returns True if list1 is a permutation of list2
"""
result = (len(list1) == len(list2))
if result is True:
for item in list1:
if not item in list2:
result = False
break
return result | collective/ECSpooler | backends/python/permTest.py | Python | gpl-2.0 | 322 |
# Copyright 2018-2019 Red Hat, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# Refer to the README and COPYING files for full details of the license
#
from __future__ import absolute_import
from __future__ import division
from vdsm.network import dns
class TestNetworkDnsIntegration(object):
def test_read_dns_entries_from_resolv_conf(self):
# Assuming at least one DNS entry exists on the host running the tests
nameservers = dns.get_host_nameservers()
assert nameservers, 'No DNS entries detected'
| nirs/vdsm | tests/network/integration/dns_test.py | Python | gpl-2.0 | 1,198 |
#!/usr/bin/env python
"""
Simple plots for the recurrence model
"""
import numpy as np
import matplotlib.pyplot as plt
from openquake.hmtk.plotting.seismicity.catalogue_plots import \
(get_completeness_adjusted_table, _save_image)
from openquake.hmtk.seismicity.occurrence.utils import get_completeness_counts
from openquake.hazardlib.mfd.truncated_gr import TruncatedGRMFD
from openquake.hazardlib.mfd.evenly_discretized import EvenlyDiscretizedMFD
from openquake.hazardlib.mfd.youngs_coppersmith_1985 import\
YoungsCoppersmith1985MFD
def _check_recurrence_model_type(input_model):
"""
"""
valid_model = False
for model in [TruncatedGRMFD, EvenlyDiscretizedMFD,
YoungsCoppersmith1985MFD]:
valid_model = isinstance(input_model, model)
if valid_model:
break
if not valid_model:
raise ValueError('Recurrence model not recognised')
def _get_recurrence_model(input_model):
"""
Returns the annual and cumulative recurrence rates predicted by the
recurrence model
"""
_check_recurrence_model_type(input_model)
# Get model annual occurrence rates
annual_rates = input_model.get_annual_occurrence_rates()
annual_rates = np.array([[val[0], val[1]] for val in annual_rates])
# Get cumulative rates
cumulative_rates = np.array([np.sum(annual_rates[iloc:, 1])
for iloc in range(0, len(annual_rates), 1)])
return annual_rates, cumulative_rates
def _check_completeness_table(completeness, catalogue):
"""
Generates the completeness table according to different instances
"""
if isinstance(completeness, np.ndarray) and np.shape(completeness)[1] == 2:
return completeness
elif isinstance(completeness, float):
return np.array([[float(np.min(catalogue.data['year'])),
completeness]])
elif completeness is None:
return np.array([[float(np.min(catalogue.data['year'])),
np.min(catalogue.data['magnitude'])]])
else:
raise ValueError('Completeness representation not recognised')
def plot_recurrence_model(input_model, catalogue, completeness, dmag,
figure_size=(10, 8), filename=None, filetype='png', dpi=300):
"""
Plot a calculated recurrence model over an observed catalogue, adjusted for
time-varying completeness
"""
if figure_size is None:
figure_size=(10, 8)
if dmag is None:
dmag = 0.1
annual_rates, cumulative_rates = _get_recurrence_model(input_model)
# Get observed annual recurrence
if not catalogue.end_year:
catalogue.update_end_year()
cent_mag, t_per, n_obs = get_completeness_counts(catalogue,
completeness,
dmag)
obs_rates = n_obs / t_per
cum_obs_rates = np.array([np.sum(obs_rates[i:])
for i in range(len(obs_rates))])
# Create plot
plt.figure(figsize=figure_size)
plt.semilogy(cent_mag, obs_rates, 'bo')
plt.semilogy(annual_rates[:, 0], annual_rates[:, 1], 'b-')
plt.semilogy(cent_mag, cum_obs_rates, 'rs')
plt.semilogy(annual_rates[:, 0], cumulative_rates, 'r-')
plt.grid(which='both')
plt.xlabel('Magnitude', fontsize='16')
plt.ylabel('Annual Rate', fontsize='16')
plt.legend(['Observed Incremental Rate',
'Model Incremental Rate',
'Observed Cumulative Rate',
'Model Cumulative Rate'], fontsize=14)
plt.tick_params(labelsize=12)
_save_image(filename, filetype, dpi)
def plot_trunc_gr_model(aval, bval, min_mag, max_mag, dmag, catalogue=None,
completeness=None, figure_size=None, filename=None, filetype='png',
dpi=300):
"""
Plots a Gutenberg-Richter model
"""
input_model = TruncatedGRMFD(min_mag, max_mag, dmag, aval, bval)
if not catalogue:
# Plot only the modelled recurrence
annual_rates, cumulative_rates = _get_recurrence_model(input_model)
plt.semilogy(annual_rates[:, 0], annual_rates[:, 1], 'b-')
plt.semilogy(annual_rates[:, 0], cumulative_rates, 'r-')
plt.xlabel('Magnitude', fontsize='large')
plt.ylabel('Annual Rate', fontsize='large')
plt.legend(['Incremental Rate', 'Cumulative Rate'])
_save_image(filename, filetype, dpi)
else:
completeness = _check_completeness_table(completeness, catalogue)
plot_recurrence_model(input_model,
catalogue,
completeness,
input_model.bin_width,
figure_size,
filename,
filetype,
dpi)
| gem/oq-hazardlib | openquake/hmtk/plotting/seismicity/occurrence/recurrence_plot.py | Python | agpl-3.0 | 4,871 |
from unittest import TestCase
from wtforms.fields import TextField
from wtforms.ext.csrf import SecureForm
from wtforms.ext.csrf.session import SessionSecureForm
import hashlib
import hmac
class DummyPostData(dict):
def getlist(self, key):
v = self[key]
if not isinstance(v, (list, tuple)):
v = [v]
return v
class InsecureForm(SecureForm):
def generate_csrf_token(self, csrf_context):
return csrf_context
a = TextField()
class FakeSessionRequest(object):
def __init__(self, session):
self.session = session
class SecureFormTest(TestCase):
def test_base_class(self):
self.assertRaises(NotImplementedError, SecureForm)
def test_basic_impl(self):
form = InsecureForm(csrf_context=42)
self.assertEqual(form.csrf_token.current_token, 42)
self.assert_(not form.validate())
self.assertEqual(len(form.csrf_token.errors), 1)
self.assertEqual(form.csrf_token._value(), 42)
# Make sure csrf_token is taken out from .data
self.assertEqual(form.data, {'a': None})
def test_with_data(self):
post_data = DummyPostData(csrf_token=u'test', a='hi')
form = InsecureForm(post_data, csrf_context=u'test')
self.assert_(form.validate())
self.assertEqual(form.data, {'a': u'hi'})
form = InsecureForm(post_data, csrf_context=u'something')
self.assert_(not form.validate())
# Make sure that value is still the current token despite
# the posting of a different value
self.assertEqual(form.csrf_token._value(), u'something')
def test_with_missing_token(self):
post_data = DummyPostData(a='hi')
form = InsecureForm(post_data, csrf_context=u'test')
self.assert_(not form.validate())
self.assertEqual(form.csrf_token.data, u'')
self.assertEqual(form.csrf_token._value(), u'test')
class SessionSecureFormTest(TestCase):
class SSF(SessionSecureForm):
SECRET_KEY = 'abcdefghijklmnop'.encode('ascii')
class NoTimeSSF(SessionSecureForm):
SECRET_KEY = 'abcdefghijklmnop'.encode('ascii')
TIME_LIMIT = None
def test_basic(self):
self.assertRaises(TypeError, self.SSF)
session = {}
form = self.SSF(csrf_context=FakeSessionRequest(session))
assert 'csrf' in session
def test_timestamped(self):
session = {}
postdata = DummyPostData(csrf_token=u'fake##fake')
form = self.SSF(postdata, csrf_context=session)
assert 'csrf' in session
assert form.csrf_token._value()
assert form.csrf_token._value() != session['csrf']
assert not form.validate()
self.assertEqual(form.csrf_token.errors[0], u'CSRF failed')
# TODO: More stringent test with timestamps and all that
def test_notime(self):
session = {}
form = self.NoTimeSSF(csrf_context=session)
hmacced = hmac.new(form.SECRET_KEY, session['csrf'].encode('utf8'), digestmod=hashlib.sha1)
self.assertEqual(form.csrf_token._value(), '##%s' % hmacced.hexdigest())
assert not form.validate()
self.assertEqual(form.csrf_token.errors[0], u'CSRF token missing')
# Test with pre-made values
session = {'csrf': u'00e9fa5fe507251ac5f32b1608e9282f75156a05'}
postdata = DummyPostData(csrf_token=u'##d21f54b7dd2041fab5f8d644d4d3690c77beeb14')
form = self.NoTimeSSF(postdata, csrf_context=session)
assert form.validate()
| mfa/wtforms-clone | tests/ext_csrf.py | Python | bsd-3-clause | 3,530 |
"""Make a mapping from body part words to categories.
Make mapping <body part word> -> [historic words] based on Inger Leemans'
clustering.
Usage: python make_body_part_mapping.py
Requires files body_part_clusters_renaissance.csv,
body_part_clusters_classisism.csv, and body_part_clusters_enlightenment.csv to
be in the current directory.
Writes body_part_mapping.json to the current directory.
"""
import codecs
import json
import argparse
import os
def csv2mapping(file_name):
mapping = {}
with codecs.open(file_name, 'rb', 'utf-8') as f:
for line in f.readlines():
parts = line.split(';')
label = parts[0].lower()
if parts[2] != '':
if not mapping.get(label):
mapping[label] = []
for entry in parts[2:]:
if entry and entry != '\n':
words = entry.split('\t')
mapping[label].append(words[0])
return mapping
def merge_mappings(m1, m2):
for k, v in m2.iteritems():
if not m1.get(k):
m1[k] = v
else:
m1[k] = m1[k] + v
return m1
parser = argparse.ArgumentParser()
parser.add_argument('dir', help='directory containing the body part cluster '
'csv files (<embem_data_dir>/dict).')
parser.add_argument('json_out', help='name of file to write the mapping to '
'(json file).')
args = parser.parse_args()
dr = args.dir
mapping_r = csv2mapping(os.path.join(dr, 'body_part_clusters_renaissance.csv'))
mapping_c = csv2mapping(os.path.join(dr, 'body_part_clusters_classisism.csv'))
mapping_e = csv2mapping(os.path.join(dr,
'body_part_clusters_enlightenment.csv'))
mapping = merge_mappings(mapping_r, mapping_c)
mapping = merge_mappings(mapping, mapping_e)
for k, v in mapping.iteritems():
mapping[k] = list(set(mapping[k]))
with codecs.open(args.json_out, 'wb', 'utf-8') as f:
json.dump(mapping, f, indent=2)
| NLeSC/embodied-emotions-scripts | embem/bodyparts/make_body_part_mapping.py | Python | apache-2.0 | 2,023 |
#!/usr/bin/env python
################################################################################
#
# make_aims.py
#
# Creates a standard control.in template file using a specified geometry.in
# and species directory.
#
################################################################################
#
# Copyright 2013 Kane O'Donnell
#
# This library is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this library. If not, see <http://www.gnu.org/licenses/>.
#
################################################################################
#
# NOTES
#
# 1.
#
################################################################################
from __future__ import division
import argparse
import os.path
import os
import esc_lib as el
# This is the file template.
control = """
#-----------------------------------------------------------------------------------------
#
# control.in generated by make_aims.py
#
#-----------------------------------------------------------------------------------------
# Physics
xc pbe
charge 0.0
spin none
#default_initial_moment hund
relativistic none
# SCF
sc_accuracy_eev 1.0e-3
sc_accuracy_rho 1.0e-5
sc_accuracy_etot 1.0e-6
sc_iter_limit 200
# Geometry optimization
#hessian_to_restart_geometry .false.
relax_geometry bfgs 5e-3
"""
DEBUG=0
parser = argparse.ArgumentParser(description="Create FHI-aims control.in template from geometry")
parser.add_argument('inputfile', help="Input geometry.in file.")
parser.add_argument('species', help="Directory of species defaults.")
args = parser.parse_args()
geom = el.Atoms(args.inputfile, "aims,geometry")
f = open("control.in", 'w')
f.write(control)
f.close()
# check the species directory is actually a directory.
if not os.path.isdir(args.species):
print "The path you specified for the species directory is not actually a directory!\n\nSpecies settings will not be added to the control.in file."
else:
files = os.listdir(args.species)
species = el.uniqify(geom.species[0])
print "Species to search for: ", species
for spec_file in files:
if int(spec_file.split("_")[0]) in species:
os.system("cat %s >> control.in" % os.path.join(args.species, spec_file))
| kaneod/physics | python/make_aims.py | Python | gpl-3.0 | 2,885 |
# hgversion.py - Version information for Mercurial
#
# Copyright 2009 Steve Borho <steve@borho.org>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2, incorporated herein by reference.
import re
try:
# post 1.1.2
from mercurial import util
hgversion = util.version()
except AttributeError:
# <= 1.1.2
from mercurial import version
hgversion = version.get_version()
testedwith = '3.6 3.7'
def checkhgversion(v):
"""range check the Mercurial version"""
reqvers = testedwith.split()
v = v.split('+')[0]
if not v or v == 'unknown' or len(v) >= 12:
# can't make any intelligent decisions about unknown or hashes
return
vers = re.split(r'\.|-', v)[:2]
if len(vers) < 2:
return
if '.'.join(vers) in reqvers:
return
return ('This version of TortoiseHg requires Mercurial version %s.n to '
'%s.n, but found %s') % (reqvers[0], reqvers[-1], v)
| seewindcn/tortoisehg | src/tortoisehg/util/hgversion.py | Python | gpl-2.0 | 1,003 |
"""SCons.Platform.win32
Platform-specific initialization for Win32 systems.
There normally shouldn't be any need to import this module directly. It
will usually be imported through the generic SCons.Platform.Platform()
selection method.
"""
#
# Copyright (c) 2001 - 2016 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Platform/win32.py rel_2.5.0:3543:937e55cd78f7 2016/04/09 11:29:54 bdbaddog"
import os
import os.path
import sys
import tempfile
from SCons.Platform.posix import exitvalmap
from SCons.Platform import TempFileMunge
import SCons.Util
try:
import msvcrt
import win32api
import win32con
msvcrt.get_osfhandle
win32api.SetHandleInformation
win32con.HANDLE_FLAG_INHERIT
except ImportError:
parallel_msg = \
"you do not seem to have the pywin32 extensions installed;\n" + \
"\tparallel (-j) builds may not work reliably with open Python files."
except AttributeError:
parallel_msg = \
"your pywin32 extensions do not support file handle operations;\n" + \
"\tparallel (-j) builds may not work reliably with open Python files."
else:
parallel_msg = None
_builtin_file = file
_builtin_open = open
class _scons_file(_builtin_file):
def __init__(self, *args, **kw):
_builtin_file.__init__(self, *args, **kw)
win32api.SetHandleInformation(msvcrt.get_osfhandle(self.fileno()),
win32con.HANDLE_FLAG_INHERIT, 0)
def _scons_open(*args, **kw):
fp = _builtin_open(*args, **kw)
win32api.SetHandleInformation(msvcrt.get_osfhandle(fp.fileno()),
win32con.HANDLE_FLAG_INHERIT,
0)
return fp
file = _scons_file
open = _scons_open
try:
import threading
spawn_lock = threading.Lock()
# This locked version of spawnve works around a Windows
# MSVCRT bug, because its spawnve is not thread-safe.
# Without this, python can randomly crash while using -jN.
# See the python bug at http://bugs.python.org/issue6476
# and SCons issue at
# http://scons.tigris.org/issues/show_bug.cgi?id=2449
def spawnve(mode, file, args, env):
spawn_lock.acquire()
try:
if mode == os.P_WAIT:
ret = os.spawnve(os.P_NOWAIT, file, args, env)
else:
ret = os.spawnve(mode, file, args, env)
finally:
spawn_lock.release()
if mode == os.P_WAIT:
pid, status = os.waitpid(ret, 0)
ret = status >> 8
return ret
except ImportError:
# Use the unsafe method of spawnve.
# Please, don't try to optimize this try-except block
# away by assuming that the threading module is always present.
# In the test test/option-j.py we intentionally call SCons with
# a fake threading.py that raises an import exception right away,
# simulating a non-existent package.
def spawnve(mode, file, args, env):
return os.spawnve(mode, file, args, env)
# The upshot of all this is that, if you are using Python 1.5.2,
# you had better have cmd or command.com in your PATH when you run
# scons.
def piped_spawn(sh, escape, cmd, args, env, stdout, stderr):
# There is no direct way to do that in python. What we do
# here should work for most cases:
# In case stdout (stderr) is not redirected to a file,
# we redirect it into a temporary file tmpFileStdout
# (tmpFileStderr) and copy the contents of this file
# to stdout (stderr) given in the argument
if not sh:
sys.stderr.write("scons: Could not find command interpreter, is it in your PATH?\n")
return 127
else:
# one temporary file for stdout and stderr
tmpFileStdout = os.path.normpath(tempfile.mktemp())
tmpFileStderr = os.path.normpath(tempfile.mktemp())
# check if output is redirected
stdoutRedirected = 0
stderrRedirected = 0
for arg in args:
# are there more possibilities to redirect stdout ?
if (arg.find( ">", 0, 1 ) != -1 or
arg.find( "1>", 0, 2 ) != -1):
stdoutRedirected = 1
# are there more possibilities to redirect stderr ?
if arg.find( "2>", 0, 2 ) != -1:
stderrRedirected = 1
# redirect output of non-redirected streams to our tempfiles
if stdoutRedirected == 0:
args.append(">" + str(tmpFileStdout))
if stderrRedirected == 0:
args.append("2>" + str(tmpFileStderr))
# actually do the spawn
try:
args = [sh, '/C', escape(' '.join(args)) ]
ret = spawnve(os.P_WAIT, sh, args, env)
except OSError, e:
# catch any error
try:
ret = exitvalmap[e[0]]
except KeyError:
sys.stderr.write("scons: unknown OSError exception code %d - %s: %s\n" % (e[0], cmd, e[1]))
if stderr is not None:
stderr.write("scons: %s: %s\n" % (cmd, e[1]))
# copy child output from tempfiles to our streams
# and do clean up stuff
if stdout is not None and stdoutRedirected == 0:
try:
stdout.write(open( tmpFileStdout, "r" ).read())
os.remove( tmpFileStdout )
except (IOError, OSError):
pass
if stderr is not None and stderrRedirected == 0:
try:
stderr.write(open( tmpFileStderr, "r" ).read())
os.remove( tmpFileStderr )
except (IOError, OSError):
pass
return ret
def exec_spawn(l, env):
try:
result = spawnve(os.P_WAIT, l[0], l, env)
except OSError, e:
try:
result = exitvalmap[e[0]]
sys.stderr.write("scons: %s: %s\n" % (l[0], e[1]))
except KeyError:
result = 127
if len(l) > 2:
if len(l[2]) < 1000:
command = ' '.join(l[0:3])
else:
command = l[0]
else:
command = l[0]
sys.stderr.write("scons: unknown OSError exception code %d - '%s': %s\n" % (e[0], command, e[1]))
return result
def spawn(sh, escape, cmd, args, env):
if not sh:
sys.stderr.write("scons: Could not find command interpreter, is it in your PATH?\n")
return 127
return exec_spawn([sh, '/C', escape(' '.join(args))], env)
# Windows does not allow special characters in file names anyway, so no
# need for a complex escape function, we will just quote the arg, except
# that "cmd /c" requires that if an argument ends with a backslash it
# needs to be escaped so as not to interfere with closing double quote
# that we add.
def escape(x):
if x[-1] == '\\':
x = x + '\\'
return '"' + x + '"'
# Get the windows system directory name
_system_root = None
def get_system_root():
global _system_root
if _system_root is not None:
return _system_root
# A resonable default if we can't read the registry
val = os.environ.get('SystemRoot', "C:\\WINDOWS")
if SCons.Util.can_read_reg:
try:
# Look for Windows NT system root
k=SCons.Util.RegOpenKeyEx(SCons.Util.hkey_mod.HKEY_LOCAL_MACHINE,
'Software\\Microsoft\\Windows NT\\CurrentVersion')
val, tok = SCons.Util.RegQueryValueEx(k, 'SystemRoot')
except SCons.Util.RegError:
try:
# Okay, try the Windows 9x system root
k=SCons.Util.RegOpenKeyEx(SCons.Util.hkey_mod.HKEY_LOCAL_MACHINE,
'Software\\Microsoft\\Windows\\CurrentVersion')
val, tok = SCons.Util.RegQueryValueEx(k, 'SystemRoot')
except KeyboardInterrupt:
raise
except:
pass
_system_root = val
return val
# Get the location of the program files directory
def get_program_files_dir():
# Now see if we can look in the registry...
val = ''
if SCons.Util.can_read_reg:
try:
# Look for Windows Program Files directory
k=SCons.Util.RegOpenKeyEx(SCons.Util.hkey_mod.HKEY_LOCAL_MACHINE,
'Software\\Microsoft\\Windows\\CurrentVersion')
val, tok = SCons.Util.RegQueryValueEx(k, 'ProgramFilesDir')
except SCons.Util.RegError:
val = ''
pass
if val == '':
# A reasonable default if we can't read the registry
# (Actually, it's pretty reasonable even if we can :-)
val = os.path.join(os.path.dirname(get_system_root()),"Program Files")
return val
# Determine which windows CPU were running on.
class ArchDefinition(object):
"""
A class for defining architecture-specific settings and logic.
"""
def __init__(self, arch, synonyms=[]):
self.arch = arch
self.synonyms = synonyms
SupportedArchitectureList = [
ArchDefinition(
'x86',
['i386', 'i486', 'i586', 'i686'],
),
ArchDefinition(
'x86_64',
['AMD64', 'amd64', 'em64t', 'EM64T', 'x86_64'],
),
ArchDefinition(
'ia64',
['IA64'],
),
]
SupportedArchitectureMap = {}
for a in SupportedArchitectureList:
SupportedArchitectureMap[a.arch] = a
for s in a.synonyms:
SupportedArchitectureMap[s] = a
def get_architecture(arch=None):
"""Returns the definition for the specified architecture string.
If no string is specified, the system default is returned (as defined
by the PROCESSOR_ARCHITEW6432 or PROCESSOR_ARCHITECTURE environment
variables).
"""
if arch is None:
arch = os.environ.get('PROCESSOR_ARCHITEW6432')
if not arch:
arch = os.environ.get('PROCESSOR_ARCHITECTURE')
return SupportedArchitectureMap.get(arch, ArchDefinition('', ['']))
def generate(env):
# Attempt to find cmd.exe (for WinNT/2k/XP) or
# command.com for Win9x
cmd_interp = ''
# First see if we can look in the registry...
if SCons.Util.can_read_reg:
try:
# Look for Windows NT system root
k=SCons.Util.RegOpenKeyEx(SCons.Util.hkey_mod.HKEY_LOCAL_MACHINE,
'Software\\Microsoft\\Windows NT\\CurrentVersion')
val, tok = SCons.Util.RegQueryValueEx(k, 'SystemRoot')
cmd_interp = os.path.join(val, 'System32\\cmd.exe')
except SCons.Util.RegError:
try:
# Okay, try the Windows 9x system root
k=SCons.Util.RegOpenKeyEx(SCons.Util.hkey_mod.HKEY_LOCAL_MACHINE,
'Software\\Microsoft\\Windows\\CurrentVersion')
val, tok = SCons.Util.RegQueryValueEx(k, 'SystemRoot')
cmd_interp = os.path.join(val, 'command.com')
except KeyboardInterrupt:
raise
except:
pass
# For the special case of not having access to the registry, we
# use a temporary path and pathext to attempt to find the command
# interpreter. If we fail, we try to find the interpreter through
# the env's PATH. The problem with that is that it might not
# contain an ENV and a PATH.
if not cmd_interp:
systemroot = get_system_root()
tmp_path = systemroot + os.pathsep + \
os.path.join(systemroot,'System32')
tmp_pathext = '.com;.exe;.bat;.cmd'
if 'PATHEXT' in os.environ:
tmp_pathext = os.environ['PATHEXT']
cmd_interp = SCons.Util.WhereIs('cmd', tmp_path, tmp_pathext)
if not cmd_interp:
cmd_interp = SCons.Util.WhereIs('command', tmp_path, tmp_pathext)
if not cmd_interp:
cmd_interp = env.Detect('cmd')
if not cmd_interp:
cmd_interp = env.Detect('command')
if 'ENV' not in env:
env['ENV'] = {}
# Import things from the external environment to the construction
# environment's ENV. This is a potential slippery slope, because we
# *don't* want to make builds dependent on the user's environment by
# default. We're doing this for SystemRoot, though, because it's
# needed for anything that uses sockets, and seldom changes, and
# for SystemDrive because it's related.
#
# Weigh the impact carefully before adding other variables to this list.
import_env = [ 'SystemDrive', 'SystemRoot', 'TEMP', 'TMP' ]
for var in import_env:
v = os.environ.get(var)
if v:
env['ENV'][var] = v
if 'COMSPEC' not in env['ENV']:
v = os.environ.get("COMSPEC")
if v:
env['ENV']['COMSPEC'] = v
env.AppendENVPath('PATH', get_system_root() + '\System32')
env['ENV']['PATHEXT'] = '.COM;.EXE;.BAT;.CMD'
env['OBJPREFIX'] = ''
env['OBJSUFFIX'] = '.obj'
env['SHOBJPREFIX'] = '$OBJPREFIX'
env['SHOBJSUFFIX'] = '$OBJSUFFIX'
env['PROGPREFIX'] = ''
env['PROGSUFFIX'] = '.exe'
env['LIBPREFIX'] = ''
env['LIBSUFFIX'] = '.lib'
env['SHLIBPREFIX'] = ''
env['SHLIBSUFFIX'] = '.dll'
env['LIBPREFIXES'] = [ '$LIBPREFIX' ]
env['LIBSUFFIXES'] = [ '$LIBSUFFIX' ]
env['PSPAWN'] = piped_spawn
env['SPAWN'] = spawn
env['SHELL'] = cmd_interp
env['TEMPFILE'] = TempFileMunge
env['TEMPFILEPREFIX'] = '@'
env['MAXLINELENGTH'] = 2048
env['ESCAPE'] = escape
env['HOST_OS'] = 'win32'
env['HOST_ARCH'] = get_architecture().arch
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| xiaohaidao007/pandoraBox-SDK-mt7620 | staging_dir/host/lib/scons-2.5.0/SCons/Platform/win32.py | Python | gpl-2.0 | 14,950 |
# Copyright 2010-2017, The University of Melbourne
# Copyright 2010-2017, Brian May
#
# This file is part of Karaage.
#
# Karaage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Karaage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Karaage If not, see <http://www.gnu.org/licenses/>.
""" Application specific tags. """
import django_tables2 as tables
from django import template
from karaage.people.tables import PersonTable
from ..views.base import get_state_machine
register = template.Library()
@register.simple_tag(takes_context=True)
def application_state(context, application):
""" Render current state of application, verbose. """
new_context = {
'roles': context['roles'],
'org_name': context['org_name'],
'application': application,
}
nodelist = template.loader.get_template(
'kgapplications/%s_common_state.html' % application.type)
output = nodelist.render(new_context)
return output
@register.simple_tag(takes_context=True)
def application_request(context, application):
""" Render current detail of application, verbose. """
new_context = {
'roles': context['roles'],
'org_name': context['org_name'],
'application': application,
}
nodelist = template.loader.get_template(
'kgapplications/%s_common_request.html' % application.type)
output = nodelist.render(new_context)
return output
@register.simple_tag(takes_context=True)
def application_simple_state(context, application):
""" Render current state of application, verbose. """
state_machine = get_state_machine(application)
state = state_machine.get_state(application)
return state.name
@register.inclusion_tag(
'kgapplications/common_actions.html', takes_context=True)
def application_actions(context):
""" Render actions available. """
return {
'roles': context['roles'],
'actions': context['actions'],
'extra': "",
}
@register.tag(name="application_actions_plus")
def do_application_actions_plus(parser, token):
""" Render actions available with extra text. """
nodelist = parser.parse(('end_application_actions',))
parser.delete_first_token()
return ApplicationActionsPlus(nodelist)
class ApplicationActionsPlus(template.Node):
""" Node for rendering actions available with extra text. """
def __init__(self, nodelist):
super(ApplicationActionsPlus, self).__init__()
self.nodelist = nodelist
def render(self, context):
extra = self.nodelist.render(context)
nodelist = template.loader.get_template(
'kgapplications/common_actions.html')
new_context = {
'roles': context['roles'],
'extra': extra,
'actions': context['actions'],
}
output = nodelist.render(new_context)
return output
@register.simple_tag(takes_context=True)
def get_similar_people_table(context, applicant):
queryset = applicant.similar_people()
table = PersonTable(
queryset,
empty_text="(No potential duplicates found, please check manually)")
config = tables.RequestConfig(context['request'], paginate={"per_page": 5})
config.configure(table)
return table
| brianmay/karaage | karaage/plugins/kgapplications/templatetags/applications.py | Python | gpl-3.0 | 3,713 |
from functools import wraps
from django.core.exceptions import PermissionDenied
from django.views.decorators.csrf import csrf_exempt
from canvas import util, knobs, browse
from canvas.api_decorators import json_service
from canvas.exceptions import ServiceError
from canvas.metrics import Metrics
from canvas.redis_models import RateLimit
def short_id(id):
return util.base36encode(id)
def long_id(short_id):
return util.base36decode(short_id)
def check_rate_limit(request):
return RateLimit('apicall:' + request.META['REMOTE_ADDR'], knobs.PUBLIC_API_RATE_LIMIT).allowed()
def public_api_method(f):
@csrf_exempt
@json_service
@wraps(f)
def wrapper(*args, **kwargs):
try:
request = kwargs.get('request') or args[0]
if not check_rate_limit(request):
Metrics.api_rate_limited.record(request)
raise ServiceError("Slow down there, cowboy!")
payload = request.JSON
ids = payload.get('ids')
if ids and len(ids) > knobs.PUBLIC_API_MAX_ITEMS:
Metrics.api_items_limited.record(request)
raise ServiceError("Max items per query limited to {0}".format(knobs.PUBLIC_API_MAX_ITEMS))
kwargs['payload'] = payload
ret = f(*args, **kwargs)
Metrics.api_successful_request.record(request)
if not ret:
Metrics.api_documentation.record(request)
return {'documentation': f.__doc__}
else:
return ret
except ServiceError as se:
Metrics.api_failed_request.record(request)
raise se
return wrapper
| canvasnetworks/canvas | website/apps/public_api/util.py | Python | bsd-3-clause | 1,688 |
# Copyright (C) 2003 by Martin Pool <mbp@samba.org>
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""Symbolic names for unicode characters.
Defines symbolic names for a few UNICODE characters, to make test
source code more readable on machines that don't have all the
necessary fonts.
"""
LATIN_CAPITAL_LETTER_N_WITH_TILDE = u'\u004e'
LATIN_CAPITAL_LETTER_O_WITH_DIARESIS = u'\u00d6'
LATIN_SMALL_LETTER_O_WITH_DIARESIS = u'\u00f6'
KATAKANA_LETTER_A = u'\u30a2'
| sathieu/samba | python/samba/tests/unicodenames.py | Python | gpl-3.0 | 1,100 |
###############################################################################
##
## Copyright (C) 2014-2016, New York University.
## Copyright (C) 2011-2014, NYU-Poly.
## Copyright (C) 2006-2011, University of Utah.
## All rights reserved.
## Contact: contact@vistrails.org
##
## This file is part of VisTrails.
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are met:
##
## - Redistributions of source code must retain the above copyright notice,
## this list of conditions and the following disclaimer.
## - Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in the
## documentation and/or other materials provided with the distribution.
## - Neither the name of the New York University nor the names of its
## contributors may be used to endorse or promote products derived from
## this software without specific prior written permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
## AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
## THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
## PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
## CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
## EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
## PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
## OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
## WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
## OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
## ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
##
###############################################################################
from __future__ import division
from xml.auto_gen import XMLDAOListBase
from sql.auto_gen import SQLDAOListBase
from vistrails.core.system import get_elementtree_library
from vistrails.db import VistrailsDBException
from vistrails.db.versions.v1_0_3 import version as my_version
from vistrails.db.versions.v1_0_3.domain import DBGroup, DBWorkflow, DBVistrail, DBLog, \
DBRegistry, DBMashuptrail
root_set = set([DBVistrail.vtType, DBWorkflow.vtType,
DBLog.vtType, DBRegistry.vtType, DBMashuptrail.vtType])
ElementTree = get_elementtree_library()
class DAOList(dict):
def __init__(self):
self['xml'] = XMLDAOListBase()
self['sql'] = SQLDAOListBase()
def parse_xml_file(self, filename):
return ElementTree.parse(filename)
def write_xml_file(self, filename, tree):
def indent(elem, level=0):
i = "\n" + level*" "
if len(elem):
if not elem.text or not elem.text.strip():
elem.text = i + " "
if not elem.tail or not elem.tail.strip():
elem.tail = i
for elem in elem:
indent(elem, level+1)
if not elem.tail or not elem.tail.strip():
elem.tail = i
else:
if level and (not elem.tail or not elem.tail.strip()):
elem.tail = i
indent(tree.getroot())
tree.write(filename)
def read_xml_object(self, vtType, node):
return self['xml'][vtType].fromXML(node)
def write_xml_object(self, obj, node=None):
res_node = self['xml'][obj.vtType].toXML(obj, node)
return res_node
def open_from_xml(self, filename, vtType, tree=None):
"""open_from_xml(filename) -> DBVistrail"""
if tree is None:
tree = self.parse_xml_file(filename)
vistrail = self.read_xml_object(vtType, tree.getroot())
return vistrail
def save_to_xml(self, obj, filename, tags, version=None):
"""save_to_xml(obj : object, filename: str, tags: dict,
version: str) -> None
"""
root = self.write_xml_object(obj)
if version is None:
version = my_version
root.set('version', version)
for k, v in tags.iteritems():
root.set(k, v)
tree = ElementTree.ElementTree(root)
self.write_xml_file(filename, tree)
def open_from_db(self, db_connection, vtType, id=None, lock=False,
global_props=None):
all_objects = {}
if global_props is None:
global_props = {}
if id is not None:
global_props['id'] = id
# global_props
res_objects = self['sql'][vtType].get_sql_columns(db_connection,
global_props,
lock)
if len(res_objects) > 1:
raise VistrailsDBException("More than object of type '%s' and "
"id '%s' exist in the database" % \
(vtType, id))
elif len(res_objects) <= 0:
raise VistrailsDBException("No objects of type '%s' and "
"id '%s' exist in the database" % \
(vtType, id))
all_objects.update(res_objects)
res = res_objects.values()[0]
global_props = {'entity_id': res.db_id,
'entity_type': res.vtType}
# collect all commands so that they can be executed together
# daoList should contain (dao_type, dao, dbCommand) values
daoList = []
# dbCommandList should contain dbCommand values
dbCommandList = []
# generate SELECT statements
for dao_type, dao in self['sql'].iteritems():
if dao_type in root_set:
continue
daoList.append([dao_type, dao, None])
dbCommand = dao.get_sql_select(db_connection, global_props, lock)
dbCommandList.append(dbCommand)
# Exacute all select statements
results = self['sql'][vtType].executeSQLGroup(db_connection,
dbCommandList, True)
# add result to correct dao
for i in xrange(len(daoList)):
daoList[i][2] = results[i]
# process results
for dao_type, dao, data in daoList:
current_objs = dao.process_sql_columns(data, global_props)
all_objects.update(current_objs)
if dao_type == DBGroup.vtType:
for key, obj in current_objs.iteritems():
new_props = {'parent_id': key[1],
'entity_id': global_props['entity_id'],
'entity_type': global_props['entity_type']}
res_obj = self.open_from_db(db_connection,
DBWorkflow.vtType,
None, lock, new_props)
res_dict = {}
res_dict[(res_obj.vtType, res_obj.db_id)] = res_obj
all_objects.update(res_dict)
for key, obj in all_objects.iteritems():
if key[0] == vtType and key[1] == id:
continue
self['sql'][obj.vtType].from_sql_fast(obj, all_objects)
for obj in all_objects.itervalues():
obj.is_dirty = False
obj.is_new = False
return res
def open_many_from_db(self, db_connection, vtType, ids, lock=False):
""" Loads multiple objects. They need to be loaded as one single
multiple select statement command for performance reasons.
"""
log_dao = self['sql'][vtType]
# loop through ids and build SELECT statements
selects = [log_dao.get_sql_select(db_connection, {'id': id}, lock)
for id in ids]
# Execute all SELECT statements for main objects
results = log_dao.executeSQLGroup(db_connection, selects, True)
# list of final objects
objects = []
# list of selects
selects = []
# list of children id:all_objects_dict
all_objects_dict = {}
# process each result and extract child SELECTS
# daoList should contain (id, dao_type, dao, result) values
daoList = []
# selects should contain dbCommand values
selects = []
global_props = {}
for id, data in zip(ids, results):
res_objects = log_dao.process_sql_columns(data, global_props)
if len(res_objects) > 1:
raise VistrailsDBException("More than object of type '%s' and "
"id '%s' exist in the database" % \
(vtType, id))
elif len(res_objects) <= 0:
raise VistrailsDBException("No objects of type '%s' and "
"id '%s' exist in the database" % \
(vtType, id))
all_objects = {}
all_objects_dict[id] = all_objects
all_objects.update(res_objects)
objects.append(res_objects.values()[0])
# collect all commands so that they can be executed together
# generate SELECT statements for children
for dao_type, dao in self['sql'].iteritems():
if dao_type in root_set:
continue
daoList.append([id, dao_type, dao, None])
dbCommand = dao.get_sql_select(db_connection, global_props, lock)
selects.append(dbCommand)
# Execute all child select statements
results = self['sql'][vtType].executeSQLGroup(db_connection,
selects, True)
for i in xrange(len(daoList)):
daoList[i][3] = results[i]
# process results
for id, dao_type, dao, data in daoList:
all_objects = all_objects_dict[id]
current_objs = dao.process_sql_columns(data, global_props)
all_objects.update(current_objs)
if dao_type == DBGroup.vtType:
for key, obj in current_objs.iteritems():
new_props = {'parent_id': key[1],
'entity_id': global_props['entity_id'],
'entity_type': global_props['entity_type']}
res_obj = self.open_from_db(db_connection,
DBWorkflow.vtType,
None, lock, new_props)
res_dict = {}
res_dict[(res_obj.vtType, res_obj.db_id)] = res_obj
all_objects.update(res_dict)
for id, all_objects in all_objects_dict.iteritems():
for key, obj in all_objects.iteritems():
if key[0] == vtType and key[1] == id:
continue
self['sql'][obj.vtType].from_sql_fast(obj, all_objects)
for id, dao_type, dao, data in daoList:
all_objects = all_objects_dict[id]
for obj in all_objects.itervalues():
obj.is_dirty = False
obj.is_new = False
return objects
def save_to_db(self, db_connection, obj, do_copy=False, global_props=None):
if do_copy == 'with_ids':
do_copy = True
elif do_copy and obj.db_id is not None:
obj.db_id = None
children = obj.db_children()
children.reverse()
if global_props is None:
global_props = {'entity_type': obj.vtType}
# print 'global_props:', global_props
# assumes not deleting entire thing
child = children[0][0]
self['sql'][child.vtType].set_sql_columns(db_connection, child,
global_props, do_copy)
self['sql'][child.vtType].to_sql_fast(child, do_copy)
global_props = {'entity_id': child.db_id,
'entity_type': child.vtType}
# do deletes
if not do_copy:
for (child, _, _) in children:
for c in child.db_deleted_children(True):
self['sql'][c.vtType].delete_sql_column(db_connection,
c,
global_props)
child = children.pop(0)[0]
child.is_dirty = False
child.is_new = False
if not len(children):
return
# list of all children
dbCommandList = []
writtenChildren = []
# process remaining children
for (child, _, _) in children:
dbCommand = self['sql'][child.vtType].set_sql_command(
db_connection, child, global_props, do_copy)
if dbCommand is not None:
dbCommandList.append(dbCommand)
writtenChildren.append(child)
self['sql'][child.vtType].to_sql_fast(child, do_copy)
# Debug version of Execute all insert/update statements
#results = [self['sql'][children[0][0].vtType].executeSQL(
# db_connection, c, False) for c in dbCommandList]
# Execute all insert/update statements
results = self['sql'][children[0][0].vtType].executeSQLGroup(
db_connection,
dbCommandList, False)
resultDict = dict(zip(writtenChildren, results))
# process remaining children
for (child, _, _) in children:
if child in resultDict:
lastId = resultDict[child]
self['sql'][child.vtType].set_sql_process(child,
global_props,
lastId)
self['sql'][child.vtType].to_sql_fast(child, do_copy)
if child.vtType == DBGroup.vtType:
if child.db_workflow:
# print '*** entity_type:', global_props['entity_type']
new_props = {'entity_id': global_props['entity_id'],
'entity_type': global_props['entity_type']}
is_dirty = child.db_workflow.is_dirty
child.db_workflow.db_entity_type = DBWorkflow.vtType
child.db_workflow.is_dirty = is_dirty
self.save_to_db(db_connection, child.db_workflow, do_copy,
new_props)
def save_many_to_db(self, db_connection, objList, do_copy=False):
if do_copy == 'with_ids':
do_copy = True
if not len(objList):
return
childrenDict = {}
global_propsDict = {}
dbCommandList = []
writtenChildren = []
for obj in objList:
if do_copy and obj.db_id is not None:
obj.db_id = None
children = obj.db_children()
children.reverse()
global_props = {'entity_type': obj.vtType}
child = children[0][0]
dbCommand = self['sql'][child.vtType].set_sql_command(
db_connection, child, global_props, do_copy)
if dbCommand is not None:
dbCommandList.append(dbCommand)
writtenChildren.append(child)
childrenDict[child] = children
global_propsDict[child] = global_props
# Execute all insert/update statements for the main objects
results = self['sql'][children[0][0].vtType].executeSQLGroup(
db_connection,
dbCommandList, False)
resultDict = dict(zip(writtenChildren, results))
dbCommandList = []
writtenChildren = []
for child, children in childrenDict.iteritems():
# process objects
if child in resultDict:
lastId = resultDict[child]
self['sql'][child.vtType].set_sql_process(
child, global_propsDict[child], lastId)
self['sql'][child.vtType].to_sql_fast(child, do_copy)
# process children
global_props = {'entity_id': child.db_id,
'entity_type': child.vtType}
global_propsDict[child] = global_props
# do deletes
if not do_copy:
for (child, _, _) in childrenDict[child]:
for c in child.db_deleted_children(True):
self['sql'][c.vtType].delete_sql_column(db_connection,
c,
global_props)
child = children.pop(0)[0]
child.is_dirty = False
child.is_new = False
# list of all children
# process remaining children
for (child, _, _) in children:
dbCommand = self['sql'][child.vtType].set_sql_command(
db_connection, child, global_props, do_copy)
if dbCommand is not None:
dbCommandList.append(dbCommand)
writtenChildren.append(child)
self['sql'][child.vtType].to_sql_fast(child, do_copy)
# Execute all child insert/update statements
results = self['sql'][children[0][0].vtType].executeSQLGroup(
db_connection,
dbCommandList, False)
resultDict = dict(zip(writtenChildren, results))
for child, children in childrenDict.iteritems():
global_props = global_propsDict[child]
# process remaining children
for (child, _, _) in children:
if child in resultDict:
lastId = resultDict[child]
self['sql'][child.vtType].set_sql_process(child,
global_props,
lastId)
self['sql'][child.vtType].to_sql_fast(child, do_copy)
if child.vtType == DBGroup.vtType:
if child.db_workflow:
# print '*** entity_type:', global_props['entity_type']
new_props = {'entity_id': global_props['entity_id'],
'entity_type': global_props['entity_type']}
is_dirty = child.db_workflow.is_dirty
child.db_workflow.db_entity_type = DBWorkflow.vtType
child.db_workflow.is_dirty = is_dirty
self.save_to_db(db_connection, child.db_workflow, do_copy,
new_props)
def delete_from_db(self, db_connection, type, obj_id):
if type not in root_set:
raise VistrailsDBException("Cannot delete entity of type '%s'" \
% type)
id_str = str(obj_id)
for (dao_type, dao) in self['sql'].iteritems():
if dao_type not in root_set:
db_cmd = \
self['sql'][type].createSQLDelete(dao.table,
{'entity_type': type,
'entity_id': id_str})
self['sql'][type].executeSQL(db_connection, db_cmd, False)
db_cmd = self['sql'][type].createSQLDelete(self['sql'][type].table,
{'id': id_str})
self['sql'][type].executeSQL(db_connection, db_cmd, False)
def serialize(self, object):
root = self.write_xml_object(object)
return ElementTree.tostring(root)
def unserialize(self, str, obj_type):
def set_dirty(obj):
for child, _, _ in obj.db_children():
if child.vtType == DBGroup.vtType:
if child.db_workflow:
set_dirty(child.db_workflow)
child.is_dirty = True
child.is_new = True
try:
root = ElementTree.fromstring(str)
obj = self.read_xml_object(obj_type, root)
set_dirty(obj)
return obj
except SyntaxError, e:
msg = "Invalid VisTrails serialized object %s" % str
raise VistrailsDBException(msg)
return None
| VisTrails/VisTrails | vistrails/db/versions/v1_0_3/persistence/__init__.py | Python | bsd-3-clause | 21,040 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.