repo_name stringlengths 5 100 | path stringlengths 4 231 | language stringclasses 1 value | license stringclasses 15 values | size int64 6 947k | score float64 0 0.34 | prefix stringlengths 0 8.16k | middle stringlengths 3 512 | suffix stringlengths 0 8.17k |
|---|---|---|---|---|---|---|---|---|
Silmathoron/PyNeurActiv | analysis/__init__.py | Python | gpl-3.0 | 1,164 | 0.003436 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# This file is part of the PyNeurActiv project, which aims at providing tools
# to study and model the activity of neuronal cultures.
# Copyright (C) 2017 SENeC Initiative
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the Li | cense, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A P | ARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
===============
Analysis module
===============
Tools to work on simulated or recorded data.
Content
=======
"""
from . import activity_properties as _ap
from . import array_searching as _as
from .activity_properties import *
from .array_searching import *
__all__ = _ap.__all__ + _as.__all__
|
kyleconroy/vogeltron | tests/test_sports.py | Python | mit | 6,742 | 0 | import mock
from datetime import datetime, timezone
import pytz
from nose.tools import assert_equals, assert_raises
from vogeltron import baseball
from bs4 import BeautifulSoup
YEAR = datetime.today().year
def date_for_month(month, day, hour, minute):
timez = pytz.timezone('US/Pacific')
return timez.localize(datetime(YEAR, month, day, hour, minute))
def april(day, hour, minute):
return date_for_month(4, day, hour, minute)
def june(day, hour, minute):
return date_for_month(6, day, hour, minute)
game = baseball.Result('LA Dodgers', april(1, 20, 5), False, False, '4-0')
def test_game_date():
assert_equals(game.pretty_date, 'April 1')
def test_game_time():
assert_equals(game.pretty_time, '08:05PM')
def test_game_description():
assert_equals(game.description, 'at LA Dodgers')
@mock.patch('requests.get')
def test_all_teams(_get):
_get().content = open('tests/fixtures/teams.html').read()
teams = baseball.teams()
assert_equals(len(teams), 30)
@mock.patch('requests.get')
def test_first_teams(_get):
_get().content = open('tests/fixtures/teams.html').read()
team = baseball.teams()[0]
assert_equals(team['name'], 'Baltimore Orioles')
assert_equals(team['league'], 'AMERICAN')
assert_equals(team['division'], 'EAST')
assert_equals(team['links']['schedule'],
'http://espn.go.com/mlb/teams/schedule?team=bal')
@mock.patch('requests.get')
def test_results(_get):
_get().content = open('tests/fixtures/schedule.html').read()
results, _ = baseball.schedule('WEST', 'http://example.com')
assert_equals(results, [
baseball.Result('LA Dodgers', april(1, 13, 5), False, False, '4-0'),
baseball.Result('LA Dodgers', april(2, 13, 5), False, True, '3-0'),
baseball.Result('LA Dodgers', april(3, 13, 5), False, True, '5-3'),
baseball.Result('St. Louis', april(5, 13, 5), True, True, '1-0'),
])
@mock.patch('requests.get')
def test_no_next_game(_get):
_get().content = open('tests/fixtures/schedule_current_game.html').read()
game_time, game_id = baseball.next_game('http://example.com')
assert_equals(game_id, '330406126')
@mock.patch('requests.get')
def test_next_game_against_bluejays(_get):
_get().content = \
open('tests/fixtures/bluejays_with_double_header.html').read()
game_time, game_id = baseball.next_game('http://example.com')
assert game_time is not None
assert_equals('330604126', game_id)
@mock.patch('requests.get')
def test_next_game(_get):
_get().content = open('tests/fixtures/schedule.html').read()
game_time, game_id = baseball.next_game('http://example.com')
assert_equals(game_id, '330406126')
assert_equals(game_time, april(6, 13, 5))
@mock.patch('requests.get')
def test_upcoming(_get):
_get().content = open('tests/fixtures/schedule.html').read()
_, upcoming = baseball.schedule('WEST', 'http://example.com')
assert_equals(upcoming, [
baseball.Result('St. Louis', april(6, 13, 5), True, None, '0-0'),
baseball.Result('St. Louis', april(7, 13, 5), True, None, '0-0'),
baseball.Result('Colorado', april(8, 19, 15), True, None, '0-0'),
baseball.Result('Colorado', april(9, 19, 15), True, None, '0-0'),
baseball.Result('Colorado', april(10, 12, 45), True, None, '0-0'),
])
@mock.patch('requests.get')
def test_upcoming_with_skipped(_get):
webpage = open('tests/fixtures/bluejays_with_double_header.html').read()
_get().content = webpa | ge
_, upcoming = baseball.schedule('WEST', 'http://example.com')
print(upcoming[0].opponent)
assert_equals(upcoming, [
baseball.Result('Toronto', june(4, 19, 15), True, None, '0-0'),
baseball.Result('Toronto', june(5, 12, 45), True, None, '0-0'),
baseball.Result('Arizona', june(7, 18, 40), False, None, '0-0'),
baseball.Result('Arizona', june(8, 19, 10), | False, None, '0-0'),
baseball.Result('Arizona', june(9, 13, 10), False, None, '0-0'),
])
@mock.patch('requests.get')
def test_standings(_get):
_get().content = open('tests/fixtures/standings.html').read()
standings = baseball.current_standings('NATIONAL', 'WEST')
examples = [
baseball.Standing('San Francisco', 'SF', 3, 1, .75, 0.0, 'Won 3'),
baseball.Standing('Colorado', 'COL', 3, 1, .75, 0.0, 'Won 3'),
baseball.Standing('Arizona', 'ARI', 2, 1, .667, 0.5, 'Won 1'),
baseball.Standing('LA Dodgers', 'LAD', 1, 2, .333, 1.5, 'Lost 2'),
baseball.Standing('San Diego', 'SD', 1, 3, .250, 2.0, 'Lost 1'),
]
assert_equals(standings, examples)
def test_parse_gametime_tba():
gt = baseball.parse_gametime("Mon, Apr 1", "TBA")
assert_equals(pytz.utc.localize(datetime(YEAR, 4, 1, 20, 5)), gt)
def test_parse_gametime_postponed():
gt = baseball.parse_gametime("Mon, Apr 1", "POSTPONED")
assert_equals(pytz.utc.localize(datetime(YEAR, 4, 1, 20, 5)), gt)
def test_parse_gametime():
gt = baseball.parse_gametime("Mon, Apr 1", "4:05 PM")
assert_equals(pytz.utc.localize(datetime(YEAR, 4, 1, 20, 5)), gt)
def test_no_team_info():
with assert_raises(Exception):
baseball.team_info('Giantssjk')
def test_team_info():
team = baseball.team_info('Giants')
assert_equals(team['name'], 'San Francisco Giants')
def test_normalize():
assert_equals(baseball.normalize('Giants'), 'GIANTS')
assert_equals(baseball.normalize('Francisco Giants'), 'FRANCISCOGIANTS')
assert_equals(baseball.normalize('Red-Sox'), 'REDSOX')
def test_preview_weather():
soup = BeautifulSoup(open('tests/fixtures/preview_during.html'))
assert_equals(baseball.parse_weather(soup), '40° Broken Clouds')
def test_preview_gametime():
soup = BeautifulSoup(open('tests/fixtures/preview_during.html'))
assert_equals(baseball.parse_game_time(soup),
datetime(2013, 4, 13, 17, 5, tzinfo=timezone.utc))
def test_preview_teamname():
soup = BeautifulSoup(open('tests/fixtures/preview_during.html'))
name, record = baseball.parse_team_info(soup, 0)
assert_equals(name, "Giants")
assert_equals(record, "7-4")
def test_preview_pitcher():
soup = BeautifulSoup(open('tests/fixtures/preview_during.html'))
pitcher = baseball.parse_starting_pitcher(soup, 0)
assert_equals(pitcher.name, "Bumgarner")
assert_equals(pitcher.era, 0.96)
assert_equals(pitcher.record, '2-0')
def test_preview_lineup():
soup = BeautifulSoup(open('tests/fixtures/preview_during.html'))
lineup = baseball.parse_starting_lineup(soup, 0)
blanco = lineup[0]
assert_equals(len(lineup), 9)
assert_equals(blanco.name, 'Blanco')
assert_equals(blanco.position, 'CF')
|
keen99/SickRage | tornado/test/gen_test.py | Python | gpl-3.0 | 38,133 | 0.000367 | from __future__ import absolute_import, division, print_function, with_statement
import contextlib
import datetime
import functools
import sys
import textwrap
import time
import platform
import weakref
from tornado.concurrent import return_future, Future
from tornado.escape import url_escape
from tornado.httpclient import AsyncHTTPClient
from tornado.ioloop import IOLoop
from tornado.log import app_log
from tornado import stack_context
from tornado.testing import AsyncHTTPTestCase, AsyncTestCase, ExpectLog, gen_test
from tornado.test.util import unittest, skipOnTravis
from tornado.web import Application, RequestHandler, asynchronous, HTTPError
from tornado import gen
try:
from concurrent import futures
except ImportError:
futures = None
skipBefore33 = unittest.skipIf(sys.version_info < (3, 3), 'PEP 380 not available')
skipNotCPython = unittest.skipIf(platform.python_implementation() != 'CPython',
'Not CPython implementation')
class GenEngineTest(AsyncTestCase):
def setUp(self):
super(GenEngineTest, self).setUp()
self.named_contexts = []
def named_context(self, name):
@contextlib.contextmanager
def context():
sel | f.named_contexts.append(name)
try:
yield
finally:
self.assertEqual(self.named_contexts.pop(), name)
return context
def run_gen(self, f):
f()
return self.wait()
def delay_callback(self, iterations, callback, arg):
"""Runs callback(arg) after a number of IOLoop iterations."""
if iterations == 0:
callba | ck(arg)
else:
self.io_loop.add_callback(functools.partial(
self.delay_callback, iterations - 1, callback, arg))
@return_future
def async_future(self, result, callback):
self.io_loop.add_callback(callback, result)
def test_no_yield(self):
@gen.engine
def f():
self.stop()
self.run_gen(f)
def test_inline_cb(self):
@gen.engine
def f():
(yield gen.Callback("k1"))()
res = yield gen.Wait("k1")
self.assertTrue(res is None)
self.stop()
self.run_gen(f)
def test_ioloop_cb(self):
@gen.engine
def f():
self.io_loop.add_callback((yield gen.Callback("k1")))
yield gen.Wait("k1")
self.stop()
self.run_gen(f)
def test_exception_phase1(self):
@gen.engine
def f():
1 / 0
self.assertRaises(ZeroDivisionError, self.run_gen, f)
def test_exception_phase2(self):
@gen.engine
def f():
self.io_loop.add_callback((yield gen.Callback("k1")))
yield gen.Wait("k1")
1 / 0
self.assertRaises(ZeroDivisionError, self.run_gen, f)
def test_exception_in_task_phase1(self):
def fail_task(callback):
1 / 0
@gen.engine
def f():
try:
yield gen.Task(fail_task)
raise Exception("did not get expected exception")
except ZeroDivisionError:
self.stop()
self.run_gen(f)
def test_exception_in_task_phase2(self):
# This is the case that requires the use of stack_context in gen.engine
def fail_task(callback):
self.io_loop.add_callback(lambda: 1 / 0)
@gen.engine
def f():
try:
yield gen.Task(fail_task)
raise Exception("did not get expected exception")
except ZeroDivisionError:
self.stop()
self.run_gen(f)
def test_with_arg(self):
@gen.engine
def f():
(yield gen.Callback("k1"))(42)
res = yield gen.Wait("k1")
self.assertEqual(42, res)
self.stop()
self.run_gen(f)
def test_with_arg_tuple(self):
@gen.engine
def f():
(yield gen.Callback((1, 2)))((3, 4))
res = yield gen.Wait((1, 2))
self.assertEqual((3, 4), res)
self.stop()
self.run_gen(f)
def test_key_reuse(self):
@gen.engine
def f():
yield gen.Callback("k1")
yield gen.Callback("k1")
self.stop()
self.assertRaises(gen.KeyReuseError, self.run_gen, f)
def test_key_reuse_tuple(self):
@gen.engine
def f():
yield gen.Callback((1, 2))
yield gen.Callback((1, 2))
self.stop()
self.assertRaises(gen.KeyReuseError, self.run_gen, f)
def test_key_mismatch(self):
@gen.engine
def f():
yield gen.Callback("k1")
yield gen.Wait("k2")
self.stop()
self.assertRaises(gen.UnknownKeyError, self.run_gen, f)
def test_key_mismatch_tuple(self):
@gen.engine
def f():
yield gen.Callback((1, 2))
yield gen.Wait((2, 3))
self.stop()
self.assertRaises(gen.UnknownKeyError, self.run_gen, f)
def test_leaked_callback(self):
@gen.engine
def f():
yield gen.Callback("k1")
self.stop()
self.assertRaises(gen.LeakedCallbackError, self.run_gen, f)
def test_leaked_callback_tuple(self):
@gen.engine
def f():
yield gen.Callback((1, 2))
self.stop()
self.assertRaises(gen.LeakedCallbackError, self.run_gen, f)
def test_parallel_callback(self):
@gen.engine
def f():
for k in range(3):
self.io_loop.add_callback((yield gen.Callback(k)))
yield gen.Wait(1)
self.io_loop.add_callback((yield gen.Callback(3)))
yield gen.Wait(0)
yield gen.Wait(3)
yield gen.Wait(2)
self.stop()
self.run_gen(f)
def test_bogus_yield(self):
@gen.engine
def f():
yield 42
self.assertRaises(gen.BadYieldError, self.run_gen, f)
def test_bogus_yield_tuple(self):
@gen.engine
def f():
yield (1, 2)
self.assertRaises(gen.BadYieldError, self.run_gen, f)
def test_reuse(self):
@gen.engine
def f():
self.io_loop.add_callback((yield gen.Callback(0)))
yield gen.Wait(0)
self.stop()
self.run_gen(f)
self.run_gen(f)
def test_task(self):
@gen.engine
def f():
yield gen.Task(self.io_loop.add_callback)
self.stop()
self.run_gen(f)
def test_wait_all(self):
@gen.engine
def f():
(yield gen.Callback("k1"))("v1")
(yield gen.Callback("k2"))("v2")
results = yield gen.WaitAll(["k1", "k2"])
self.assertEqual(results, ["v1", "v2"])
self.stop()
self.run_gen(f)
def test_exception_in_yield(self):
@gen.engine
def f():
try:
yield gen.Wait("k1")
raise Exception("did not get expected exception")
except gen.UnknownKeyError:
pass
self.stop()
self.run_gen(f)
def test_resume_after_exception_in_yield(self):
@gen.engine
def f():
try:
yield gen.Wait("k1")
raise Exception("did not get expected exception")
except gen.UnknownKeyError:
pass
(yield gen.Callback("k2"))("v2")
self.assertEqual((yield gen.Wait("k2")), "v2")
self.stop()
self.run_gen(f)
def test_orphaned_callback(self):
@gen.engine
def f():
self.orphaned_callback = yield gen.Callback(1)
try:
self.run_gen(f)
raise Exception("did not get expected exception")
except gen.LeakedCallbackError:
pass
self.orphaned_callback()
def test_multi(self):
@gen.engine
def f():
(yield gen.Callback("k1"))("v1")
(yield gen.Callback("k2"))("v2")
|
debugchannel/debugchannel-python-client | test/DebugChannelTest.py | Python | mit | 891 | 0.004489 | from DebugChannel import DebugChannel
from unittest import TestCase
from model.Person import Person
class DebugChannelTest(TestCase):
def setUp(self):
self.d = DebugChannel('http://192.168.2.17', '1025', 'hello/world')
def testLogStringDoesNotThrowException(self):
self.d.log("hello")
def testLogIntDoesNotThrowException(self):
self.d.log(44)
def testLogNullDoesNotThrowException(self):
self.d.log(None)
def testLogObjectDoesNotThrowException(self):
peterGriffin = Person('Peter Griffin', 45)
chrisGriffin = Person('Chris Griffin', 15, | peterGriffin)
self.d.log(chrisGriffin)
def testLogRecursionDoesNotThrowException(se | lf):
class Node(object): pass
n1, n2 = Node(), Node()
n1.name, n2.name = "NODE 1", "NODE 2"
n1.neighbour, n2.neighbour = n2, n1
self.d.log(n1)
|
dmlc/tvm | python/tvm/topi/vision/ssd/multibox.py | Python | apache-2.0 | 10,269 | 0.000974 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, no-member, too-many-locals, too-many-arguments, undefined-variable
"""SSD multibox operators"""
import tvm
from tvm.te import hybrid
from tvm.tir import exp, sqrt
from tvm import topi
from ..nms import non_max_suppression
@hybrid.script
def hybrid_multibox_prior(data, sizes, ratios, steps, offsets):
"""Hybrid routing for multibox_prior operator.
Parameters
----------
data : tvm.te.Tensor or numpy NDArray
4-D tensor with shape [batch, channel, height, width]]
sizes : tvm ConsExpr
Sizes for anchor boxes.
ratios : tvm ConsExpr
Ratios for anchor boxes.
steps : tvm ConsExpr
Priorbox step across y and x, -1 for auto calculation.
offsets : tvm ConsExpr
Priorbox center offsets, y and x respectively.
Returns
-------
output : tvm.te.Tensor or numpy NDArray
3-D tensor with shape [1, h_in * w_in * (num_sizes + num_ratios - 1), 4]
"""
in_height = data.shape[2]
in_width = data.shape[3]
num_sizes = len(sizes)
num_ratios = len(ratios)
num_boxes = in_height * in_width * (num_sizes + num_ratios - 1)
output = output_tensor((1, num_boxes, 4), "float32")
steps_h = steps[0] * 1.0 if steps[0] > 0 else 1.0 / in_height
steps_w = steps[1] * 1.0 if steps[1] > 0 else 1.0 / in_width
offset_h = offsets[0]
offset_w = offsets[1]
# Need to define var out of const_range + if
w = 0.0
h = 0.0
for i in parallel(in_height):
center_h = (i + offset_h) * steps_h
for j in range(in_width):
center_w = (j + offset_w) * steps_w
for k in const_range(num_sizes + num_ratios - 1):
if k < num_sizes:
w = float32(sizes[k] * in_height) / in_width / 2.0
h = sizes[k] / 2.0
| else:
w = (
float32(sizes[0] * in_height)
/ in_width
* sqrt(ratios[k - num_sizes + 1] * 1.0)
/ 2.0
)
h = sizes[0] / sqrt(ratios[k - num_sizes + 1] * 1.0) / 2.0
count = (
i * in_width * (num_sizes + num_ratios - 1)
| + j * (num_sizes + num_ratios - 1)
+ k
)
output[0, count, 0] = center_w - w
output[0, count, 1] = center_h - h
output[0, count, 2] = center_w + w
output[0, count, 3] = center_h + h
return output
def multibox_prior(data, sizes=(1,), ratios=(1,), steps=(-1, -1), offsets=(0.5, 0.5), clip=False):
"""Generate prior(anchor) boxes from data, sizes and ratios.
Parameters
----------
data : tvm.te.Tensor
4-D with shape [batch, c_in, h_in, w_in]]
sizes : tuple of float
Tuple of sizes for anchor boxes.
ratios : tuple of float
Tuple of ratios for anchor boxes.
steps : Tuple of float
Priorbox step across y and x, -1 for auto calculation.
offsets : tuple of int
Priorbox center offsets, y and x respectively.
clip : boolean
Whether to clip out-of-boundary boxes.
Returns
-------
out : tvm.te.Tensor
3-D tensor with shape [1, h_in * w_in * (num_sizes + num_ratios - 1), 4]
"""
out = hybrid_multibox_prior(
data,
tvm.runtime.convert(sizes),
tvm.runtime.convert(ratios),
tvm.runtime.convert(steps),
tvm.runtime.convert(offsets),
)
if clip:
out = topi.clip(out, 0, 1)
return out
@hybrid.script
def _hybridy_transform_loc(box, pred_loc, variance, clip):
"""Transform prior anchor box to output box through location predictions."""
al = box[0]
at = box[1]
ar = box[2]
ab = box[3]
px = pred_loc[0]
py = pred_loc[1]
pw = pred_loc[2]
ph = pred_loc[3]
vx = variance[0]
vy = variance[1]
vw = variance[2]
vh = variance[3]
output = output_tensor((4,), pred_loc.dtype)
aw = ar - al
ah = ab - at
ax = (al + ar) / 2.0
ay = (at + ab) / 2.0
ox = px * vx * aw + ax
oy = py * vy * ah + ay
ow = exp(pw * vw) * aw / 2.0
oh = exp(ph * vh) * ah / 2.0
output[0] = max(0.0, min(1.0, ox - ow)) if clip else ox - ow
output[1] = max(0.0, min(1.0, oy - oh)) if clip else oy - oh
output[2] = max(0.0, min(1.0, ox + ow)) if clip else ox + ow
output[3] = max(0.0, min(1.0, oy + oh)) if clip else oy + oh
return output
@hybrid.script
def hybrid_multibox_transform_loc(cls_prob, loc_pred, anchor, clip, threshold, variances):
"""Hybrid routing for transform location in multibox_detection operator.
Parameters
----------
cls_prob : tvm.te.Tensor or numpy NDArray
3-D tensor of class probabilities.
loc_pred : tvm.te.Tensor or numpy NDArray
2-D tensor of location regression predictions.
anchor : tvm.te.Tensor or numpy NDArray
3-D tensor of prior anchor boxes.
clip : tvm.tir.const
Whether to clip out-of-boundary boxes.
threshold : tvm.tir.const
Threshold to be a positive prediction.
variances : tvm.nd.NDArray
Variances to be decoded from box regression output.
Returns
-------
out_loc : tvm.te.Tensor or numpy NDArray
3-D tensor of transformed location.
valid_count : tvm.te.Tensor or numpy NDArray
1_d tensor of valid counts for boxes.
"""
batch_size = cls_prob.shape[0]
num_classes = cls_prob.shape[1]
num_anchors = cls_prob.shape[2]
box_coord = allocate((4,), loc_pred.dtype)
pred_coord = allocate((4,), loc_pred.dtype)
out_loc = output_tensor((batch_size, num_anchors, 6), loc_pred.dtype)
valid_count = output_tensor((batch_size,), "int32")
for i in parallel(batch_size):
valid_count[i] = 0
for j in range(num_anchors):
# Find the predicted class id and probability
score = -1.0
cls_id = 0
for k in range(num_classes):
if k > 0:
temp = cls_prob[i, k, j]
cls_id = k if temp > score else cls_id
score = max(temp, score)
if cls_id > 0 and score < threshold:
cls_id = 0
# [id, prob, xmin, ymin, xmax, ymax]
# Remove background, restore original id
if cls_id > 0:
out_loc[i, valid_count[i], 0] = cls_id - 1.0
out_loc[i, valid_count[i], 1] = score
for l in range(4):
box_coord[l] = anchor[0, j, l]
pred_coord[l] = loc_pred[i, j * 4 + l]
out_coord = _hybridy_transform_loc(box_coord, pred_coord, variances, clip)
out_loc[i, valid_count[i], 2] = out_coord[0]
out_loc[i, valid_count[i], 3] = out_coord[1]
out_loc[i, valid_count[i], 4] = out_coord[2]
out_loc[i, valid_count[i], 5] = out_coord[3]
valid_count[i] += 1
return out_loc, valid_count
def multibox_transform_loc(
cls_prob, loc_pred, anchor, clip=True, threshold=0.01, variances=(0.1, 0.1, 0.2, 0.2)
):
"""Location transformation for multibox detection
Parameters
----------
cls_prob : tvm.te.Tensor
Class probabili |
niosus/EasyClangComplete | tests/test_thread_pool.py | Python | mit | 3,316 | 0 | """Test delayed thread pool."""
import time
from unittest import TestCase
import EasyClangComplete.plugin.utils.thread_pool
import EasyClangComplete.plugin.utils.thread_job
ThreadPool = EasyClangComplete.plugin.utils.thread_pool.ThreadPool
ThreadJob = EasyClangComplete.plugin.utils.thread_job.ThreadJob
def run_me(result):
"""Run this asyncronously."""
time.sleep(0.2)
return result
TIMEOUT = 5.0
class TestContainer():
"""A test container to store results of the operation."""
def __init__(self):
"""Initialize this object."""
self.futures = []
def on_job_done(self, future):
"""Call this when the job is done."""
self.futures.append(future)
def wait_until_got_number_of_callbacks(self, number):
"""Wait until callback is called."""
slept = 0.0
time_step = 0.1
while not len(self.futures) == number and slept < TIMEOUT:
time.sleep(time_step)
slept += time_step
class TestThreadPool(TestCase):
"""Test thread pool."""
def test_single_job(self):
"""Test single job."""
test_container = TestContainer()
job = ThreadJob(name="test_job",
callback=test_container.on_job_done,
function=run_me,
args=[True])
pool = ThreadPool()
pool.new_job(job)
test_container.wait_until_got_number_of_callbacks(1)
self.assertGreater(len(test_container.futures), 0)
self.assertFalse(test_container.futures[0].cancelled())
self.assertTrue(test_container.futures[0].result())
def test_override_job(self):
"""Test overriding job.
The first job should be overridden by the next one.
"""
test_container = TestContainer()
job_1 = ThreadJob(name="test_job",
function=run_me,
callback=test_container.on_job_done,
args=["job_1"])
job_2 = ThreadJob(name="test_job",
function=run_me,
callback=test_container.on_job_done,
args=["job_2"])
job_3 = ThreadJob(name="test_job",
function=run_me,
callback=test_container.on_job_done,
args=["job_3"])
pool = ThreadPool()
pool.new_job(job_1)
pool.new_job(job_2)
pool.new_job(job_3)
test_co | ntainer.wait_until_got_number_of_callbacks(3)
self.assertEqual(len(test_container.futures), 3)
# Here is what happens. job_1 runs so cannot be cancelled by job_2, so
# job_1 keeps running while job_2 is added to the queue. Then we add
# job_3, which cancels job_2, which immediately calls it's callback,
# thus the first future in the result is from job_2. Then job_1
# eventually finishes. Then job_3 starts and finishes.
self.assertTrue(test_containe | r.futures[0].cancelled())
self.assertFalse(test_container.futures[1].cancelled())
self.assertEqual(test_container.futures[1].result(), "job_1")
self.assertFalse(test_container.futures[2].cancelled())
self.assertEqual(test_container.futures[2].result(), "job_3")
|
sstjohn/thundergate | py/cdpserver.py | Python | gpl-3.0 | 22,448 | 0.005034 | '''
ThunderGate - an open source toolkit for PCI bus exploration
Copyright (C) 2015-2016 Saul St. John
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import os
import json
import sys
import platform
import traceback
import functools
import struct
from collections import namedtuple
p = platform.system()
if "Windows" == p:
LINE_SEP = "\n"
else:
LINE_SEP = "\r\n"
del p
from image import Image
from monitor import ExecutionMonitor
from datamodel import model_registers, model_memory, get_data_value, GenericModel
from blocks.cpu import mips_regs
try:
from capstone import *
from capstone.mips import *
if cs_version()[0] < 3:
print "[-] capstone outdated - disassembly unavailable"
_no_capstone = True
else:
_no_capstone = False
md_mode = CS_MODE_MIPS32 + CS_MODE_BIG_ENDIAN
md = Cs(CS_ARCH_MIPS, md_mode)
md.detail = True
md.skipdata = True
def _disassemble_word(word):
i = struct.pack(">I", word)
r = md.disasm(i, 4).next()
return "%s %s" % (r.mnemonic, r.op_str)
except:
print "[-] capstone not present - disassembly unavailable"
_no_capstone = True
class ScopeModel(GenericModel):
pass
class Var_Tracker(object):
def __init__(self):
self._references = []
self._scopes = []
self._fixed_reference_end = None
self._fixed_scope_end = None
self._known_frame_levels = []
def _assign_variablesReference(self, v):
self._references.append(v)
v.variablesReference = len(self._references)
def _add_variables_references(self, v):
if hasattr(v, "children") and isinstance(v.children, list) and len(v.children) > 0:
self._assign_variablesReference(v)
for c in v.children:
c.scope = v.scope
self._add_variables_references(c)
def add_fixed_scope(self, s, fl=-1):
if self._fixed_scope_end:
raise Exception("fixed scopes cannot be added while dynamic scopes are present")
self._add_scope(s, fl)
def _add_scope(self, s, fl=0):
print "adding scope %s" % s.name
s.fl = fl
self._assign_variablesReference(s)
self._scopes += [s]
for c in s.children:
c.scope = s
self._add_variables_references(c)
if not fl in self._known_frame_levels:
self._known_frame_levels += [fl]
def add_dynamic_scope(self, s, fl = 0):
if not self._fixed_scope_end:
self._fixed_scope_end = len(self._scopes)
self._fixed_reference_end = len(self._references)
self._add_scope(s, fl)
def clear_dynamic_scopes(self):
self._scopes = self._scopes[:self._fixed_scope_end]
self._references = self._references[:self._fixed_reference_end]
self._fixed_scope_end = None
self._fixed_reference_end = None
self._known_frame_levels = []
def get_scopes(self, fl=0):
return [s for s in self._scopes if s.fl == fl or s.fl == -1]
def dereference(self, ref_no):
return self._references[ref_no - 1]
class CDPServer(object):
def __init__(self, dev, di, do):
self.data_in = di
self.data_out = do
self.dev = dev
self._monitor = ExecutionMonitor(dev, functools.partial(CDPServer._evt_stopped, self))
self.__dispatch_setup()
self._register_model = model_registers(dev)
self._register_model.accessor = functools.partial(get_data_value, mroot=self._register_model)
self._memory_model = model_memory(dev)
self._memory_model.accessor = functools.partial(get_data_value, mroot=self._register_model)
self._vt = Var_Tracker()
self._vt.add_fixed_scope(self._register_model)
self._vt.add_fixed_scope(self._memory_model)
for c in self._register_model.children:
if c.name == "rxcpu":
s = ScopeModel("rxcpu registers")
s2 = ScopeModel("rxcpu state")
for r in c.children:
if r.name[0] == 'r' and r.name[1:].isdigit():
reg_no = int(r.name[1:])
reg_name = mips_regs.inv[reg_no]
reg_for_display = GenericModel(r.name, r.parent)
reg_for_display.display_name = reg_name
s.children += [reg_for_display]
if r.name == "pc":
ss = GenericModel(r.name, r.parent)
ss.display_name = "program counter"
ss.accessor = lambda r=r:self._register_model.accessor(r)
s2.children += [ss]
if r.name == "i | r":
ss = GenericModel(r.name, r.parent)
ss.display_name = "instruction register"
ss | .accessor = lambda r=r:self._register_model.accessor(r)
s2.children += [ss]
if not _no_capstone:
ss = GenericModel(r.name, r.parent)
ss.display_name = "instruction register (decoded)"
ss.accessor = lambda r=r:_disassemble_word(self._register_model.accessor(r))
s2.children += [ss]
if r.name in ["mode", "status"]:
ss = GenericModel(r.name, r.parent)
ss.accessor = lambda r=r:self._register_model.accessor(r)
for b in r.children:
cc = GenericModel(b.name, b.parent)
cc.accessor = lambda b=b:self._register_model.accessor(b)
ss.children += [cc]
s2.children += [ss]
s.accessor = self._register_model.accessor
self._vt.add_fixed_scope(s, fl=1)
s2.accessor = lambda x: x.accessor()
self._vt.add_fixed_scope(s2, fl=1)
self._breakpoints = {}
self._bp_replaced_insn = {}
def __enter__(self):
return self
def __exit__(self, t, v, traceback):
pass
def __dispatch_setup(self):
self.__dispatch_tbl = {}
for i in self.__class__.__dict__:
if len(i) > 5 and i[0:5] == "_cmd_":
self.__dispatch_tbl[unicode(i[5:])] = getattr(self, i)
def _dispatch_cmd(self, cmd):
try:
fncall = self.__dispatch_tbl[cmd["command"]]
except:
fncall = self._default_cmd
fncall(cmd)
def _cmd_initialize(self, cmd):
self._seq = 1
ex = {}
ex["supportsConfigurationDoneRequest"] = True
ex["supportEvaluateForHovers"] = False
self._respond(cmd, True, ex=ex)
def _cmd_launch(self, cmd):
try:
stop_now = cmd["arguments"]["stopOnEntry"]
except:
stop_now = False
program = cmd["arguments"]["program"]
self._image = Image(program)
self.dev.rxcpu.reset()
#self.dev.ma.mode.fast_ath_read_disable = 1
#self.dev.ma.mode.cpu_pipeline_request_disable = 1
#self.dev.ma.mode.low_latency_enable = 1
self.dev.rxcpu.image_load(*self._image.executable)
self._respond(cmd, True)
if stop_now:
b = {}
b["reason"] = "launch"
b["threadId"] = 1
self._event("stopped", body = b)
else:
|
BrandonY/gsutil | gslib/tests/test_util.py | Python | apache-2.0 | 14,742 | 0.002171 | # -*- coding: utf-8 -*-
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
"""Tests for gsutil utility functions."""
from __future__ import | absolute_import
from gslib import util
import gslib.tests.testcase as testcase
from gslib.tests.util import SetEnvironmentForTest
from gslib.tests.util import TestParams
from gslib.util import CompareVersions
fro | m gslib.util import DecimalShort
from gslib.util import HumanReadableWithDecimalPlaces
from gslib.util import PrettyTime
import httplib2
import mock
class TestUtil(testcase.GsUtilUnitTestCase):
"""Tests for utility functions."""
def test_MakeHumanReadable(self):
"""Tests converting byte counts to human-readable strings."""
self.assertEqual(util.MakeHumanReadable(0), '0 B')
self.assertEqual(util.MakeHumanReadable(1023), '1023 B')
self.assertEqual(util.MakeHumanReadable(1024), '1 KiB')
self.assertEqual(util.MakeHumanReadable(1024 ** 2), '1 MiB')
self.assertEqual(util.MakeHumanReadable(1024 ** 3), '1 GiB')
self.assertEqual(util.MakeHumanReadable(1024 ** 3 * 5.3), '5.3 GiB')
self.assertEqual(util.MakeHumanReadable(1024 ** 4 * 2.7), '2.7 TiB')
self.assertEqual(util.MakeHumanReadable(1024 ** 5), '1 PiB')
self.assertEqual(util.MakeHumanReadable(1024 ** 6), '1 EiB')
def test_MakeBitsHumanReadable(self):
"""Tests converting bit counts to human-readable strings."""
self.assertEqual(util.MakeBitsHumanReadable(0), '0 bit')
self.assertEqual(util.MakeBitsHumanReadable(1023), '1023 bit')
self.assertEqual(util.MakeBitsHumanReadable(1024), '1 Kibit')
self.assertEqual(util.MakeBitsHumanReadable(1024 ** 2), '1 Mibit')
self.assertEqual(util.MakeBitsHumanReadable(1024 ** 3), '1 Gibit')
self.assertEqual(util.MakeBitsHumanReadable(1024 ** 3 * 5.3), '5.3 Gibit')
self.assertEqual(util.MakeBitsHumanReadable(1024 ** 4 * 2.7), '2.7 Tibit')
self.assertEqual(util.MakeBitsHumanReadable(1024 ** 5), '1 Pibit')
self.assertEqual(util.MakeBitsHumanReadable(1024 ** 6), '1 Eibit')
def test_HumanReadableToBytes(self):
"""Tests converting human-readable strings to byte counts."""
self.assertEqual(util.HumanReadableToBytes('1'), 1)
self.assertEqual(util.HumanReadableToBytes('15'), 15)
self.assertEqual(util.HumanReadableToBytes('15.3'), 15)
self.assertEqual(util.HumanReadableToBytes('15.7'), 16)
self.assertEqual(util.HumanReadableToBytes('1023'), 1023)
self.assertEqual(util.HumanReadableToBytes('1k'), 1024)
self.assertEqual(util.HumanReadableToBytes('2048'), 2048)
self.assertEqual(util.HumanReadableToBytes('1 k'), 1024)
self.assertEqual(util.HumanReadableToBytes('1 K'), 1024)
self.assertEqual(util.HumanReadableToBytes('1 KB'), 1024)
self.assertEqual(util.HumanReadableToBytes('1 KiB'), 1024)
self.assertEqual(util.HumanReadableToBytes('1 m'), 1024 ** 2)
self.assertEqual(util.HumanReadableToBytes('1 M'), 1024 ** 2)
self.assertEqual(util.HumanReadableToBytes('1 MB'), 1024 ** 2)
self.assertEqual(util.HumanReadableToBytes('1 MiB'), 1024 ** 2)
self.assertEqual(util.HumanReadableToBytes('1 g'), 1024 ** 3)
self.assertEqual(util.HumanReadableToBytes('1 G'), 1024 ** 3)
self.assertEqual(util.HumanReadableToBytes('1 GB'), 1024 ** 3)
self.assertEqual(util.HumanReadableToBytes('1 GiB'), 1024 ** 3)
self.assertEqual(util.HumanReadableToBytes('1t'), 1024 ** 4)
self.assertEqual(util.HumanReadableToBytes('1T'), 1024 ** 4)
self.assertEqual(util.HumanReadableToBytes('1TB'), 1024 ** 4)
self.assertEqual(util.HumanReadableToBytes('1TiB'), 1024 ** 4)
self.assertEqual(util.HumanReadableToBytes('1\t p'), 1024 ** 5)
self.assertEqual(util.HumanReadableToBytes('1\t P'), 1024 ** 5)
self.assertEqual(util.HumanReadableToBytes('1\t PB'), 1024 ** 5)
self.assertEqual(util.HumanReadableToBytes('1\t PiB'), 1024 ** 5)
self.assertEqual(util.HumanReadableToBytes('1e'), 1024 ** 6)
self.assertEqual(util.HumanReadableToBytes('1E'), 1024 ** 6)
self.assertEqual(util.HumanReadableToBytes('1EB'), 1024 ** 6)
self.assertEqual(util.HumanReadableToBytes('1EiB'), 1024 ** 6)
def test_CompareVersions(self):
"""Tests CompareVersions for various use cases."""
# CompareVersions(first, second) returns (g, m), where
# g is True if first known to be greater than second, else False.
# m is True if first known to be greater by at least 1 major version,
(g, m) = CompareVersions('3.37', '3.2')
self.assertTrue(g)
self.assertFalse(m)
(g, m) = CompareVersions('7', '2')
self.assertTrue(g)
self.assertTrue(m)
(g, m) = CompareVersions('3.32', '3.32pre')
self.assertTrue(g)
self.assertFalse(m)
(g, m) = CompareVersions('3.32pre', '3.31')
self.assertTrue(g)
self.assertFalse(m)
(g, m) = CompareVersions('3.4pre', '3.3pree')
self.assertTrue(g)
self.assertFalse(m)
(g, m) = CompareVersions('3.2', '3.37')
self.assertFalse(g)
self.assertFalse(m)
(g, m) = CompareVersions('2', '7')
self.assertFalse(g)
self.assertFalse(m)
(g, m) = CompareVersions('3.32pre', '3.32')
self.assertFalse(g)
self.assertFalse(m)
(g, m) = CompareVersions('3.31', '3.32pre')
self.assertFalse(g)
self.assertFalse(m)
(g, m) = CompareVersions('3.3pre', '3.3pre')
self.assertFalse(g)
self.assertFalse(m)
(g, m) = CompareVersions('foobar', 'baz')
self.assertFalse(g)
self.assertFalse(m)
(g, m) = CompareVersions('3.32', 'baz')
self.assertFalse(g)
self.assertFalse(m)
(g, m) = CompareVersions('3.4', '3.3')
self.assertTrue(g)
self.assertFalse(m)
(g, m) = CompareVersions('3.3', '3.4')
self.assertFalse(g)
self.assertFalse(m)
(g, m) = CompareVersions('4.1', '3.33')
self.assertTrue(g)
self.assertTrue(m)
(g, m) = CompareVersions('3.10', '3.1')
self.assertTrue(g)
self.assertFalse(m)
def _AssertProxyInfosEqual(self, pi1, pi2):
self.assertEqual(pi1.proxy_type, pi2.proxy_type)
self.assertEqual(pi1.proxy_host, pi2.proxy_host)
self.assertEqual(pi1.proxy_port, pi2.proxy_port)
self.assertEqual(pi1.proxy_rdns, pi2.proxy_rdns)
self.assertEqual(pi1.proxy_user, pi2.proxy_user)
self.assertEqual(pi1.proxy_pass, pi2.proxy_pass)
def testMakeMetadataLine(self):
test_params = (
TestParams(args=('AFairlyShortKey', 'Value'),
expected=' AFairlyShortKey: Value'),
TestParams(args=('', 'Value'),
expected=' : Value'),
TestParams(args=('AnotherKey', 'Value'),
kwargs={'indent': 2},
expected=' AnotherKey: Value'),
TestParams(args=('AKeyMuchLongerThanTheLast', 'Value'),
expected=(' AKeyMuchLongerThanTheLast:Value')))
for params in test_params:
line = util.MakeMetadataLine(*(params.args), **(params.kwargs))
self.assertEqual(line, params.expected)
def test_ProxyInfoFromEnvironmentVar(self):
"""Tests ProxyInfoFromEnvironmentVar for various cases."""
valid_variables = ['http_proxy', 'h |
gregnordin/micropython_pyboard | 150729_pyboard_to_pyqtgraph/serial_pyboard_to_python.py | Python | mit | 9,779 | 0.015339 | #HV Control &
#Read and Plot from the PMT
#This code is to record the data that is received into the Teensy's ADC.
#Includes the HV control and replotting the results at the end.
#See CSV Dataplot notebook to plot old experiment data.
from __future__ import division
from __future__ import print_function
from pyqtgraph import QtGui, QtCore #Provides usage of PyQt4's libraries which aids in UI design
import pyqtgraph as pg #Initiation of plotting code
import serial #Communication with the serial port is done using the pySerial 2.7 package
from datetime import datetime #Allows us to look at current date and time
#import dataprocessing #code for plotting the data from the CSV
## Always start by initializing Qt (only once per application)
app = QtGui.QApplication([])
## Define a top-level widget to hold everything (a window)
w = QtGui.QWidget()
w.resize(1000,600)
w.setWindowTitle('Voltage Plots')
startBtnClicked = False
quitBtnClicked = False
firstupdate = 0
## This function contains the behavior we want to see when the start button is clicked
def startButtonClicked():
global startBtnClicked
global startBtn
if (startBtnClicked == False):
teensySerialData.flushInput() #empty serial buffer for input from the teensy
startBtnClicked = True
startBtn.setText('Stop')
elif (startBtnClicked == True):
startBtnClicked = False
startBtn.setText('Start')
## Below at the end of the update function we check the value of quitBtnClicked
def quitButtonClicked():
global quitBtnClicked
quitBtnClicked = True
## Buttons to control the High Voltage
def HVoffButtonClicked():
teensySerialData.write('0')
print("HV Off")
def HVonButtonClicked():
teensySerialData.write('1')
print("HV On")
def insertionButtonClicked():
teensySerialData.write('3')
print("Insertion")
def separationButtonClicked():
teensySerialData.write('2')
print("Separation")
#Start Recording in Widget
## Create widgets to be placed inside
startBtn = QtGui.QPushButton('Start')
startBtn.setToolTip('Click to begin graphing') #This message appears while hovering mouse over button
quitBtn = QtGui.QPushButton('Quit')
quitBtn.setToolTip('Click to quit program')
HVonBtn = QtGui.QPushButton("HV on")
HVonBtn.setToolTip('Click to turn the high voltage on')
HVoffBtn = QtGui.QPushButton("HV off")
HVoffBtn.setToolTip('Click to turn the high voltage off')
insBtn = QtGui.QPushButton("Insertion")
insBtn.setToolTip('Click to start insertion (#3)')
sepBtn = QtGui.QPushButton("Separation")
sepBtn.setToolTip('Click to start separation (#2)')
## Functions in parantheses are to be called when buttons are clicked
startBtn.clicked.connect(startButtonClicked)
quitBtn.clicked.connect(quitButtonClicked)
HVonBtn.clicked.connect(HVonButtonClicked)
HVoffBtn.clicked.connect(HVoffButtonClicked)
insBtn.clicked.connect(insertionButtonClicked)
sepBtn.clicked.connect(separationButtonClicked)
## xSamples is the maximum amount of samples we want graphed at a time
xSamples = 300
## Create plot widget for peak detector plot
pmtPlotWidget = pg.PlotWidget()
pmtPlotWidget.setYRange(0, 4096)
pmtPlotWidget.setXRange(0, xSamples)
pmtPlotWidget.setLabel('top', text = "PMT") #Title to appear at top of widget
## Create a grid layout to manage the widgets size and position
## The grid layout allows us to place a widget in a given column and row
layout = QtGui.QGridLayout()
w.setLayout(layout)
## Add widgets to the layout in their proper positions
## The first number in parantheses is the row, the second is the column
layout.addWidget(quitBtn, 0, 0)
layout.addWidget(startBtn, 2, 0)
layout.addWidget(HVonBtn, 0, 2)
layout.addWidget(insBtn, 2, 2)
layout.addWidget(sepBtn, 3, 2)
layout.addWidget(HVoffBtn, 4, 2)
layout.addWidget(pmtPlotWidget, 1, 1)
## Display the widget as a new window
w.show()
## Initialize all global variables
## Whenever we plot a range of samples, xLeftIndex is the x value on the
## PlotWidget where we start plotting the samples, xRightIndex is where we stop
## These values will reset when they reach the value of xSamples
xRightIndex = 0
xLeftIndex = 0
## These arrays will hold the unplotted voltage values from the pmt
## and the peak detector until we are able to update the plot
pmtData = []
## Used to determine how often we plot a range of values
graphCount = 0
## Time values in microseconds read from the teensy are stored in these variables
## Before timeElapsed is updated, we store its old value in timeElapsedPrev
timeElapsed = 0
timeElapsedPrev = 0
## Determines if we are running through the update loop for the first time
firstRun = True
## Create new file, with the na | me being | today's date and current time and write headings to file in CSV format
i = datetime.now()
fileName = str(i.year) + str(i.month) + str(i.day) + "_" + str(i.hour) + str(i.minute) + str(i.second) + ".csv"
## File is saved to Documents/IPython Notebooks/RecordedData
#f = open('RecordedData\\' + fileName, 'a')
#f.write("#Data from " + str(i.year) + "-" + str(i.month) + "-" + str(i.day) + " at " + str(i.hour) + ":" + str(i.minute) + ":" + str(i.second) + '\n')
#f.write("Timestamp,PMT\n")
## Initialize the container for our voltage values read in from the teensy
## IMPORTANT NOTE: The com port value needs to be updated if the com value
## changes. It's the same number that appears on the bottom right corner of the
## window containing the TeensyDataWrite.ino code
teensySerialData = serial.Serial("/dev/tty.usbmodem1452", 115200)
def update():
## Set global precedence to previously defined values
global xSamples
global xRightIndex
global xLeftIndex
global pmtData
global graphCount
global timeElapsed
global timeElapsedPrev
global firstRun
global firstupdate
if firstupdate == 0:
teensySerialData.flushInput()
firstupdate += 1
## The number of bytes currently waiting to be read in.
## We want to read these values as soon as possible, because
## we will lose them if the buffer fills up
bufferSize = teensySerialData.inWaiting()
runCount = bufferSize//8 # since we write 8 bytes at a time, we similarly want to read them 8 at a time
#print(bufferSize, runCount)
while (runCount > 0):
if (startBtnClicked == True):
#Read in time (int) and PMT output (float with up to 5 decimal places)
temp = []
temp.append(teensySerialData.readline().strip().split(',') )
print(bufferSize, runCount, temp[-1][0], temp[-1][1])
timeElapsedPrev = timeElapsed
timeElapsed = int (temp[0][0])
if (firstRun == True):
## Only run once to ensure buffer is completely flushed
firstRun = False
teensySerialData.flushInput()
break
# We'll add all our values to this string until we're ready to exit the loop, at which point it will be written to a file
stringToWrite = str(timeElapsed) + ","
## This difference calucalted in the if statement is the amount of time in microseconds since the last value
## we read in and wrote to a file. If this value is significantly greater than 100, we know we have missed some
## values, probably as a result of the buffer filling up and scrapping old values to make room for new values.
## The number we print out will be the approximate number of values we failed to read in.
## This is useful to determine if your code is running too slow
#if (timeElapsed - timeElapsedPrev > 8000):
#print(str((timeElapsed-timeElapsedPrev)/7400))
numData = float (temp[0][1])
pmtData.append(numData)
stringToWrite = stringToWrite + str(numData) + '\n'
#f.write(stringToWrite)
graphCount = graphCount + 1
xRightIndex = xRightIndex + 1
runCount = runCount - |
dhuang/incubator-airflow | tests/models/test_dagparam.py | Python | apache-2.0 | 3,838 | 0.001563 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# | regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See | the License for the
# specific language governing permissions and limitations
# under the License.
import unittest
from datetime import timedelta
from airflow.decorators import task
from airflow.models.dag import DAG
from airflow.utils import timezone
from airflow.utils.state import State
from airflow.utils.types import DagRunType
from tests.test_utils.db import clear_db_runs
class TestDagParamRuntime(unittest.TestCase):
DEFAULT_ARGS = {
"owner": "test",
"depends_on_past": True,
"start_date": timezone.utcnow(),
"retries": 1,
"retry_delay": timedelta(minutes=1),
}
VALUE = 42
DEFAULT_DATE = timezone.datetime(2016, 1, 1)
def tearDown(self):
super().tearDown()
clear_db_runs()
def test_dag_param_resolves(self):
"""Test dagparam resolves on operator execution"""
with DAG(dag_id="test_xcom_pass_to_op", default_args=self.DEFAULT_ARGS) as dag:
value = dag.param('value', default=self.VALUE)
@task
def return_num(num):
return num
xcom_arg = return_num(value)
dr = dag.create_dagrun(
run_id=DagRunType.MANUAL.value,
start_date=timezone.utcnow(),
execution_date=self.DEFAULT_DATE,
state=State.RUNNING,
)
xcom_arg.operator.run(start_date=self.DEFAULT_DATE, end_date=self.DEFAULT_DATE)
ti = dr.get_task_instances()[0]
assert ti.xcom_pull() == self.VALUE
def test_dag_param_overwrite(self):
"""Test dag param is overwritten from dagrun config"""
with DAG(dag_id="test_xcom_pass_to_op", default_args=self.DEFAULT_ARGS) as dag:
value = dag.param('value', default=self.VALUE)
@task
def return_num(num):
return num
xcom_arg = return_num(value)
assert dag.params['value'] == self.VALUE
new_value = 2
dr = dag.create_dagrun(
run_id=DagRunType.MANUAL.value,
start_date=timezone.utcnow(),
execution_date=self.DEFAULT_DATE,
state=State.RUNNING,
conf={'value': new_value},
)
xcom_arg.operator.run(start_date=self.DEFAULT_DATE, end_date=self.DEFAULT_DATE)
ti = dr.get_task_instances()[0]
assert ti.xcom_pull() == new_value
def test_dag_param_default(self):
"""Test dag param is overwritten from dagrun config"""
with DAG(
dag_id="test_xcom_pass_to_op", default_args=self.DEFAULT_ARGS, params={'value': 'test'}
) as dag:
value = dag.param('value')
@task
def return_num(num):
return num
xcom_arg = return_num(value)
dr = dag.create_dagrun(
run_id=DagRunType.MANUAL.value,
start_date=timezone.utcnow(),
execution_date=self.DEFAULT_DATE,
state=State.RUNNING,
)
xcom_arg.operator.run(start_date=self.DEFAULT_DATE, end_date=self.DEFAULT_DATE)
ti = dr.get_task_instances()[0]
assert ti.xcom_pull() == 'test'
|
daniestevez/jupyter_notebooks | december2021_eclipse/make_waterfalls.py | Python | gpl-3.0 | 1,664 | 0 | #!/usr/bin/env python3
import argparse
import pathlib
import numpy as np
def waterfall(input_filename, output_filename):
fs = 200
nfft = 8192
w = np.blackman(nfft)
x = np.fromfile(input_filename, 'int16')
x = (x[::2] + | 1j*x[1::2])/2**15
freq_span = 5
| nbins = round(freq_span / fs * nfft)
# In these recordings the internal reference was used, so there
# is a frequency offset
freq_offset = 11.6 if '2021-12-08T12:57:25' in input_filename.name else 0
band = int(input_filename.name.split('_')[-2].replace('kHz', ''))
# 1.6 Hz offset is at 10 MHz
freq_offset *= band / 10000
bin_offset = round(freq_offset / fs * nfft)
freq_sel = slice(nfft//2-nbins+bin_offset, nfft//2+nbins+1+bin_offset)
x = x[:x.size//nfft*nfft]
f = np.fft.fftshift(
np.fft.fft(w * x.reshape(-1, nfft)),
axes=1)
f = np.abs(f[:, freq_sel])**2
np.save(output_filename, f.astype('float32'))
def parse_args():
parser = argparse.ArgumentParser(
description='Make waterfalls from the December 2021 eclipse IQ data')
parser.add_argument('input_folder',
help='Input folder')
parser.add_argument('output_folder',
help='Output folder')
return parser.parse_args()
def main():
args = parse_args()
input_files = pathlib.Path(args.input_folder).glob('*.sigmf-data')
output_path = pathlib.Path(args.output_folder)
for f_in in input_files:
f_out_name = f_in.name.replace('.sigmf-data', '_waterfall.npy')
f_out = output_path / f_out_name
waterfall(f_in, f_out)
if __name__ == '__main__':
main()
|
GhostshipSoftware/avaloria | src/commands/cmdhandler.py | Python | bsd-3-clause | 20,996 | 0.002286 | """
Command handler
This module contains the infrastructure for accepting commands on the
command line. The process is as follows:
1) The calling object (caller) inputs a string and triggers the command parsing system.
2) The system checks the state of the caller - loggedin or not
3) If no command string was supplied, we search the merged cmdset for system command CMD_NOINPUT
and branches to execute that. --> Finished
4) Cmdsets are gathered from different sources (in order of dropping priority):
channels - all available channel names are auto-created into a cmdset, to allow
for giving the channel name and have the following immediately
sent to the channel. The sending is performed by the CMD_CHANNEL
system command.
object cmdsets - all objects at caller's location are scanned for non-empty
cmdsets. This includes cmdsets on exits.
caller - the caller is searched for its own currently active cmdset.
player - lastly the cmdsets defined on caller.player are added.
5) All the gathered cmdsets (if more than one) are merged into one using the cmdset priority rules.
6) If merged cmdset is empty, raise NoCmdSet exception (this should not happen, at least the
player should have a default cmdset available at all times). --> Finished
7) The raw input string is parsed using the parser defined by settings.COMMAND_PARSER. It
uses the available commands from the merged cmdset to know which commands to look for and
returns one or many matches.
8) If match list is empty, branch to system command CMD_NOMATCH --> Finished
9) If match list has more than one element, branch to system command CMD_MULTIMATCH --> Finished
10) A single match was found. If this is a channel-command (i.e. the command name is that of a channel),
branch to CMD_CHANNEL --> Finished
11) At this point we have found a normal command. We assign useful variables to it that
will be available to the command coder at run-time.
12) We have a unique cmdobject, primed for use. Call all hooks:
at_pre_cmd(), cmdobj.parse(), cmdobj.func() and finally at_post_cmd().
"""
from weakref import WeakValueDictionary
from copy impor | t copy
from traceback import format_exc
from twisted.internet.defer import inlineCallbacks, returnValue
from django.conf import settings
from src.comms.channelhandler import CHANNELHANDLER
from src.utils import logger, utils
from src.commands.cmdparser import at_multimatch_cmd
from src.utils.utils impor | t string_suggestions, to_unicode
from django.utils.translation import ugettext as _
__all__ = ("cmdhandler",)
_GA = object.__getattribute__
_CMDSET_MERGE_CACHE = WeakValueDictionary()
# This decides which command parser is to be used.
# You have to restart the server for changes to take effect.
_COMMAND_PARSER = utils.variable_from_module(*settings.COMMAND_PARSER.rsplit('.', 1))
# System command names - import these variables rather than trying to
# remember the actual string constants. If not defined, Evennia
# hard-coded defaults are used instead.
# command to call if user just presses <return> with no input
CMD_NOINPUT = "__noinput_command"
# command to call if no command match was found
CMD_NOMATCH = "__nomatch_command"
# command to call if multiple command matches were found
CMD_MULTIMATCH = "__multimatch_command"
# command to call if found command is the name of a channel
CMD_CHANNEL = "__send_to_channel_command"
# command to call as the very first one when the user connects.
# (is expected to display the login screen)
CMD_LOGINSTART = "__unloggedin_look_command"
# custom Exceptions
class NoCmdSets(Exception):
"No cmdsets found. Critical error."
pass
class ExecSystemCommand(Exception):
"Run a system command"
def __init__(self, syscmd, sysarg):
self.args = (syscmd, sysarg) # needed by exception error handling
self.syscmd = syscmd
self.sysarg = sysarg
# Helper function
@inlineCallbacks
def get_and_merge_cmdsets(caller, session, player, obj,
callertype, sessid=None):
"""
Gather all relevant cmdsets and merge them.
callertype is one of "session", "player" or "object" dependin
on which level the cmdhandler is invoked. Session includes the
cmdsets available to Session, Player and its eventual puppeted Object.
Player-level include cmdsets on Player and Object, while calling
the handler on an Object only includes cmdsets on itself.
The cdmsets are merged in order generality, so that the Object's
cmdset is merged last (and will thus take precedence over
same-named and same-prio commands on Player and Session).
Note that this function returns a deferred!
"""
local_obj_cmdsets = [None]
@inlineCallbacks
def _get_channel_cmdsets(player, player_cmdset):
"Channel-cmdsets"
# Create cmdset for all player's available channels
channel_cmdset = None
if not player_cmdset.no_channels:
channel_cmdset = yield CHANNELHANDLER.get_cmdset(player)
returnValue(channel_cmdset)
@inlineCallbacks
def _get_local_obj_cmdsets(obj, obj_cmdset):
"Object-level cmdsets"
# Gather cmdsets from location, objects in location or carried
local_obj_cmdsets = [None]
try:
location = obj.location
except Exception:
location = None
if location and not obj_cmdset.no_objs:
# Gather all cmdsets stored on objects in the room and
# also in the caller's inventory and the location itself
local_objlist = yield (location.contents_get(exclude=obj.dbobj) +
obj.contents +
[location])
for lobj in local_objlist:
try:
# call hook in case we need to do dynamic changing to cmdset
_GA(lobj, "at_cmdset_get")()
except Exception:
logger.log_trace()
# the call-type lock is checked here, it makes sure a player
# is not seeing e.g. the commands on a fellow player (which is why
# the no_superuser_bypass must be True)
local_obj_cmdsets = \
yield [lobj.cmdset.current for lobj in local_objlist
if (lobj.cmdset.current and
lobj.locks.check(caller, 'call', no_superuser_bypass=True))]
for cset in local_obj_cmdsets:
#This is necessary for object sets, or we won't be able to
# separate the command sets from each other in a busy room.
cset.old_duplicates = cset.duplicates
cset.duplicates = True
returnValue(local_obj_cmdsets)
@inlineCallbacks
def _get_cmdset(obj):
"Get cmdset, triggering all hooks"
try:
yield obj.at_cmdset_get()
except Exception:
logger.log_trace()
try:
returnValue(obj.cmdset.current)
except AttributeError:
returnValue(None)
if callertype == "session":
# we are calling the command from the session level
report_to = session
session_cmdset = yield _get_cmdset(session)
cmdsets = [session_cmdset]
if player: # this automatically implies logged-in
player_cmdset = yield _get_cmdset(player)
channel_cmdset = yield _get_channel_cmdsets(player, player_cmdset)
cmdsets.extend([player_cmdset, channel_cmdset])
if obj:
obj_cmdset = yield _get_cmdset(obj)
local_obj_cmdsets = yield _get_local_obj_cmdsets(obj, obj_cmdset)
cmdsets.extend([obj_cmdset] + local_obj_cmdsets)
elif callertype == "player":
# we are calling the command from the player level
report_to = player
player_cmdset = yield _get_cmdset(player)
channel_cmdset = yield _get_channel_cmdsets(player, player_cmdset)
cmdsets = [player_cmdset, channel_cmdset]
if obj:
obj_cmdset = yield _get_ |
InspectorIncognito/visualization | login/apps.py | Python | gpl-3.0 | 150 | 0 | # -*- coding: utf-8 -*-
| from __future__ import unicode_literals
from django.apps import AppConfig
class LoginConfig(AppConfig):
n | ame = 'login'
|
infoxchange/messagemedia-python | mmsoap/client.py | Python | apache-2.0 | 13,370 | 0.002917 | #
# Copyright 2014-2016 MessageMedia
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy
# of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import suds
from cache import ExtendedObjectCache
from suds.transport.http import HttpTransport as SudsHttpTransport
from .exceptions import *
class MMSoapClient(object):
"""
Message Media SOAP Client.
This class is a wrapper for the Suds SOAP client and implements the operations
found in the following wsdl: http://soap.m4u.com.au/?wsdl'.
"""
WSDL_URL = "https://soap.m4u.com.au/?wsdl"
def __init__(self, userId=None, password=None, **kwargs):
"""
Initialise the SOAP client.
:param userId: User ID for authenticating requests. Defaults to None.
:param password: Password for authenticating requests. Defaults to None.
:param kwargs: Additional arguments to set on the client. Currently only 'cache_location' is supported.
"""
object_cache = ExtendedObjectCache()
object_cache.setduration(days=10)
if 'cache_location' in kwargs:
object_cache.setlocation(kwargs["cache_location"])
self.client = suds.client.Client(self.WSDL_URL,
cache=object_cache,
transport=WellBehavedHttpTransport())
self.authentication = self.create('AuthenticationType')
if userId and password:
self.authentication.userId = userId
self.authentication.password = password
def create(self, object_name):
"""Short-hand for creating WSDL objects."""
return self.client.factory.create(object_name)
def set_userId(self, userId):
""" Set user id on the authentication element. """
self.authentication.userId = userId
def set_password(self, password):
""" Set password on the authentication element. """
self.authentication.password = password
def check_user(self):
"""
Check user account details.
:return: The account details including used and remaining credit limits.
"""
return self.client.service.checkUser(self.authentication).accountDetails
def send_messages(self, recipients, content, send_mode='normal', scheduled=None, report=None, seq=None):
"""
Send a single message to `recipients`.
:param recipients: Iterable of recipient numbers
:param content: Message content
:param send_mode: Used for testing. Should be one of:
* normal (default) -- send as normal.
* dropAll -- drop (not send) the requested messages, return a mix of errors and successes.
* dropAllWithErrors -- drop the requested messages, return all as errors.
* dropAllWithSuccess -- drop the requested messages, return all as success.
:param scheduled: Scheduled date/time of the message in UTC format.
:param report: Request a delivery report for this message.
:return: Response containing count of sent, scheduled, failed and any associated error code types.
"""
recipients_type = self.create('RecipientsType')
for recipient in recipients:
this_recipient = self.create('RecipientType')
this_recipient.value = recipient
# add uid here
recipients_type.recipient.append(this_recipient)
message = self.create('MessageType')
message.recipients = recipients_type
message.content = content
if seq:
message._sequenceNumber = seq
if scheduled:
message.scheduled = scheduled
if report:
message.deliveryReport = report
message_list = self.create('MessageListType')
message_list.message = message
message_list._sendMode = send_mode
request_body = self.create('SendMessagesBodyType')
request_body.messages = message_list
response = self.client.service.sendMessages(self.authentication, request_body)
self.raise_for_response(response)
return response
def raise_for_response(self, response):
"""
Raise an exception for a given error code in a SOAP response.
:param response: SOAP Response containing the error code.
:raises An exception matching the error code.
"""
try:
code = response.errors[0][0]._code
if code == 'invalidRecipient':
raise InvalidRecipientException()
elif code == 'recipientBlocked':
raise RecipientBlockedException()
elif code == 'emptyMessageContent':
raise EmptyMessageContentException()
elif code == 'other':
raise OtherMMSOAPException()
else:
pass
except AttributeError:
pass
def check_replies(self, maximum_replies=None):
"""
Check replies to any sent messages.
:param maximum_replies: Limits the number of replies returned in the response.
Default is to return all if this value isn't supplied.
:return: Iterable containing the replies, never null.
"""
request_body = self.create('CheckRepliesBodyType')
if maximum_replies is int and maximum_replies >= 0:
request_body.maximumReplies = maximum_replies
response = self.client.service.checkReplies(self.authentication, request_body)
return response.reply if 'reply' in response else []
def confirm_replies(self, message_receipt_ids):
"""
Confirm replies which were previously retrieved with the check_replies function.
:param message_receipt_ids: Iterable containing the receipt id's of the replies to confirm.
These must correspond to id's previously retrieved using the
check_replies function.
:return: Response containing the number of replies confirmed.
"""
confirm_reply_list = self.create('ConfirmReplyListType')
for receipt_id in message_receipt_ids:
this_confirm_item = self.create('ConfirmItemType')
this_confirm_item._receiptId = receipt_id
confirm_reply_list.rep | ly.append(this_confirm_item)
request_body = self.create('ConfirmRepliesBodyType')
request_body.replies = confirm_reply_list
return self.client.service.confirmReplies(self.authentication, request_body)
def check_reports(self, maximum_reports=None):
"""
Check delivery reports to any sent messages.
| :param maximum_reports: Limits the number of reports returned in the response.
Default is to return all if this value isn't supplied.
:return: Iterable containing the reports, never null.
"""
request_body = self.create('CheckReportsBodyType')
if maximum_reports is int and maximum_reports >= 0:
request_body.maximumReports = maximum_reports
response = self.client.service.checkReports(self.authentication, request_body)
return response.report if 'report' in response else []
def confirm_reports(self, delivery_report_ids):
"""
Confirm reports which were previously retrieved with the check_reports function.
:param delivery_report_ids: Iterable containing the id's of the reports to confirm.
These must correspond to id's previously retrieved using the
check_reports function.
|
scalyr/scalyr-agent-2 | tests/unit/url_monitor_test.py | Python | apache-2.0 | 4,851 | 0.001237 | # Copyright 2014 Scalyr Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------------------------------------------------
#
# author: Saurabh Jain <saurabh@scalyr.com>
from __future__ import unicode_literals
from __future__ import absolute_import
__author__ = "saurabh@scalyr.com"
import unittest
try:
from __scalyr__ import SCALYR_VERSION
except ImportError:
from scalyr_agent.__scalyr__ import SCALYR_VERSION
from scalyr_agent.builtin_monitors.url_monitor import UrlMonitor
from scalyr_agent.scalyr_monitor import MonitorConfig
from scalyr_agent.json_lib.objects import JsonArray, JsonObject
import six
import mock
EXPECTED_BASE_HEADERS = list(
{"User-agent": "scalyr-agent-%s;monitor=url_monitor" % (SCALYR_VERSION)}.items()
)
class UrlMonitorTestRequest(unittest.TestCase):
"""
Tests the formation of the request for monitoring a URL
"""
def setUp(self):
super(UrlMonitorTestRequest, self).setUp()
self.legit_headers = JsonArray()
self.legit_headers.add(JsonObject({"header": "header_foo", "value": "foo"}))
self.legit_headers.add(JsonObject({"header": "header_bar", "value": "bar"}))
self.module = "scalyr_agent.builtin_monitors.url_monitor"
def tearDown(self):
pass
def test_get_request_no_headers(self):
mock_logger = mock.MagicMock()
config_data = {
"url": "http://fooUrl",
"request_method": "GET",
"request_data": None,
"request_headers": [],
"module": self.module,
| }
config = MonitorConfig(content=config_data)
url_monitor = UrlMonitor(monitor_config=config, logger=mock_logger)
actual_request = url_monitor.build_request()
self.assertEqual(actual_request.get_method(), "GET")
self.assertFalse(actual_request.data is not None)
self.assertE | qual(actual_request.header_items(), EXPECTED_BASE_HEADERS)
def test_get_request_with_headers(self):
mock_logger = mock.MagicMock()
config_data = {
"url": "http://fooUrl",
"request_method": "GET",
"request_data": None,
"request_headers": self.legit_headers,
"module": self.module,
}
config = MonitorConfig(content=config_data)
url_monitor = UrlMonitor(monitor_config=config, logger=mock_logger)
actual_request = url_monitor.build_request()
self.assertEqual(actual_request.get_method(), "GET")
self.assertFalse(actual_request.data is not None)
self.assertEqual(
sorted(actual_request.header_items()),
sorted(
[("Header_foo", "foo"), ("Header_bar", "bar")] + EXPECTED_BASE_HEADERS
),
)
def test_post_request_with_data(self):
mock_logger = mock.MagicMock()
config_data = {
"url": "http://fooUrl",
"request_method": "POST",
"request_data": "{fakejsonthatisnotlegit}",
"request_headers": self.legit_headers,
"module": self.module,
}
config = MonitorConfig(content=config_data)
url_monitor = UrlMonitor(monitor_config=config, logger=mock_logger)
actual_request = url_monitor.build_request()
self.assertEqual(actual_request.get_method(), "POST")
if six.PY3:
self.assertEqual(actual_request.data, b"{fakejsonthatisnotlegit}")
else:
self.assertEqual(actual_request.data, "{fakejsonthatisnotlegit}")
self.assertEqual(
sorted(actual_request.header_items()),
sorted(
[("Header_foo", "foo"), ("Header_bar", "bar")] + EXPECTED_BASE_HEADERS
),
)
def test_malformed_headers(self):
mock_logger = mock.MagicMock()
config_data = {
"url": "fooUrl",
"request_method": "POST",
"request_data": "{fakejsonthatisnotlegit}",
"request_headers": "not legit headers",
"module": self.module,
}
if six.PY3:
config_data["request_data"] = b"{fakejsonthatisnotlegit}"
config = MonitorConfig(content=config_data)
self.assertRaises(
Exception, lambda: UrlMonitor(monitor_config=config, logger=mock_logger)
)
|
TheTimmy/spack | var/spack/repos/builtin/packages/r-leaflet/package.py | Python | lgpl-2.1 | 2,218 | 0.000451 | ##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://gi | thub.com/llnl/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free s | oftware; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class RLeaflet(RPackage):
"""Create and customize interactive maps using the 'Leaflet' JavaScript
library and the 'htmlwidgets' package. These maps can be used directly from
the R console, from 'RStudio', in Shiny apps and R Markdown documents."""
homepage = "http://rstudio.github.io/leaflet/"
url = "https://cran.r-project.org/src/contrib/leaflet_1.0.1.tar.gz"
version('1.0.1', '7f3d8b17092604d87d4eeb579f73d5df')
depends_on('r-base64enc', type=('build', 'run'))
depends_on('r-htmlwidgets', type=('build', 'run'))
depends_on('r-htmltools', type=('build', 'run'))
depends_on('r-magrittr', type=('build', 'run'))
depends_on('r-markdown', type=('build', 'run'))
depends_on('r-png', type=('build', 'run'))
depends_on('r-rcolorbrewer', type=('build', 'run'))
depends_on('r-raster', type=('build', 'run'))
depends_on('r-scales', type=('build', 'run'))
depends_on('r-sp', type=('build', 'run'))
|
Hiestaa/RLViz | experiments/gym_test_2.py | Python | mit | 7,015 | 0 | #! .env/bin/python
# -*- coding: utf8 -*-
from __future__ import unicode_literals
# import time
import random
import itertools
# from collections import d | efaultdict
import gym
import numpy as np
# implementation of the sarsa algorithm on the mountain ca | r using values
# rounding for value function approximation
# Note: discarded because matplotlib was a bitch.
# def plot(Q):
# """
# Plot the mountain car action value function.
# This only account for the two first dimensions of the state space.
# This plots in a 2 dimensional space circle for each action that is
# bigger the higher the action value function is for this action.
# Assumes all states have the same action set.
# """
# x, y = zip(*Q.keys())
# allXs, allYs, allAreas, allColors = [], [], [], []
# ACTION_COLORS = [
# (1, 0, 0),
# (0, 1, 0),
# (0, 0, 1)
# ]
# areas = defaultdict(dict)
# SEP = 1
# for a in Q[(x[0], y[0])]:
# for xi, yi in zip(x, y):
# bounds = (min(Q[(xi, yi)].values()),
# max(Q[(xi, yi)].values()))
# areas[(xi, yi)][a] = \
# np.pi * SEP * Q[(xi, yi)][a] / (bounds[1] - bounds[0])
# for xi, yi in zip(x, y):
# order = sorted(
# range(Q[(xi, yi)].keys()),
# key=lambda a: Q[(xi, yi)][a])
# for a in order:
# allXs.append(xi)
# allYs.append(yi)
# allAreas.append(areas[(xi, yi)][a])
# allColors.append(tuple(ACTION_COLORS[a]))
# plt.scatter(allXs, allYs, s=allAreas, c=allColors, alpha=0.5)
# plt.show()
class Sarsa(object):
def __init__(self, allStates, allActions):
"""
Sarsa performs in discrete action space and requires the
action state value function table to be initialized arbitrarily
for each state and action.
* allStates should be given as a list of all possible states,
each state being a tuple floats, all of the same length
* allActiosn should be the list of possible actions
"""
super(Sarsa, self).__init__()
self._Q = {
state: {action: 0 for action in allActions}
for state in allStates
}
self._e = 0.2 # epsilon, for the epsilon-greedy policy
self._a = 1 # alpha, learning reat
self._g = 0.5 # gamma, discount factor
def pickAction(self, state, episodeI=None):
"""
Returns the best action according to (for now) the e-greedy policy
If episodeI is given, it should be the episode index. Used to
update epsilon for the e-greedy polic
"""
def pickMax():
best = max(self._Q[state].values())
for action in self._Q[state].keys():
if self._Q[state][action] == best:
return action
def pickRandom():
nbActions = len(self._Q[state])
return self._Q[state].keys()[random.randint(0, nbActions - 1)]
if episodeI is not None:
self._e = 1.0 / (episodeI or 1)
# print "e=", self._e
if random.random() > self._e:
return pickMax()
else:
return pickRandom()
def train(self, oldState, newState, action, reward, episodeI):
"""
TD(0) policy improvement
Returns the next action to take
"""
# sample a new action following e-greedy
# print "train:", oldState, newState, action, reward
newAction = self.pickAction(newState, episodeI=episodeI)
# print "New action: ", newAction
self._Q[oldState][action] = self._Q[oldState][action] + self._a *\
(reward +
self._g * self._Q[newState][newAction] -
self._Q[oldState][action])
return newAction
class RoundingSarsa(object):
"""
Rouding sarsa dummily uses sarse on a discretized space
This makes no assumption of the relationship there may exist between two
states prior to visit.
Requires a discrete action space.
Observation space is assumed to be continuous, a gym Box
"""
def __init__(self, observationSpace, actionSpace, d=2):
super(RoundingSarsa, self).__init__()
self._precision = 100
self._os = observationSpace
values, self._steps = zip(*[
np.linspace(
observationSpace.low[x],
observationSpace.high[x],
self._precision,
retstep=True)
for x in xrange(d)
])
allStates = list(itertools.product(*values))
allActions = range(actionSpace.n)
self.sarsa = Sarsa(allStates, allActions)
def _threshold(self, val, step, dim):
# warning: this assumes rounding started at 0 which may not be the case
return round(float(val - self._os.low[dim]) / step) * step + \
self._os.low[dim]
def _round(self, observations):
return tuple([
self._threshold(observations[x], self._steps[x], x)
for x in xrange(len(observations))])
def pickAction(self, state):
state = self._round(state)
return self.sarsa.pickAction(state)
def train(self, oldState, newState, action, reward, episodeI):
return self.sarsa.train(
self._round(oldState),
self._round(newState),
action, reward, episodeI)
RENDER_EPISODES_SKIP = 1000
# load the environment
env = gym.make('MountainCar-v0')
agent = RoundingSarsa(env.observation_space, env.action_space)
for i_episode in range(1, 20001):
# reset the enviroment at the beginning of each episode
observation = env.reset()
# import ipdb; ipdb.set_trace()
action = agent.pickAction(observation)
done = False
episodeReturn = 0
# up to a 100 steps
t = 0
for t in xrange(1000):
if (i_episode - 1) % RENDER_EPISODES_SKIP == 0:
env.render() # render the environment
# print(observation)
# take action, get back the reward and the observations
newObservation, reward, done, info = env.step(action)
episodeReturn += reward
action = agent.train(
observation, newObservation, action, reward, i_episode)
observation = newObservation
if done: # the episode is terminated (we 'lost'/'won')
break
# plot(agent.sarsa._Q)
print("Episode %d finished after %d timesteps" % (i_episode, t + 1))
print "Episode %d Return: " % i_episode, episodeReturn
while True:
observation = env.reset()
agent.pickAction(observation)
done = False
while not done:
env.render() # render the environment
observation, reward, done, info = env.step(action)
action = agent.pickAction(observation)
if done:
break
# Using the following line, gym can record the execution of the environment
# env.monitor.start('/tmp/experiment-name-1')
|
certain/certain | certain/StoreServer/__init__.py | Python | agpl-3.0 | 325 | 0 | """StoreServer provides a number of plugins which can provide a store
service on a server.
There are currently 2 plugins available: ``webserver`` and ``gitdaemon``.
These can be used to simplify provision of a store, e.g using the
``webserver` | ` StoreServer instead of installing a 3rd party webserver
suc | h as Apache.
"""
|
sourcepole/qgis-openlayers-plugin | openlayers/openlayers_overview.py | Python | gpl-2.0 | 2,585 | 0.000387 | # -*- coding: utf-8 -*-
"""
/***************************************************************************
Openlayers Overview - A QGIS plugin to show map in browser(google maps and others)
-------------------
begin : 2011-03-01
copyright : (C) 2011 by Luiz Motta
author : Luiz P. Motta
email : motta _dot_ luiz _at_ gmail.com
***************************************************************************/
/***************************************************************************
* *
* This program is free software | ; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
************** | *************************************************************/
"""
from qgis.PyQt.QtCore import Qt
from qgis.PyQt.QtWidgets import QApplication, QDockWidget
from .openlayers_ovwidget import OpenLayersOverviewWidget
class OLOverview(object):
def __init__(self, iface, olLayerTypeRegistry):
self._iface = iface
self._olLayerTypeRegistry = olLayerTypeRegistry
self._dockwidget = None
self._oloWidget = None
# Private
def _setDocWidget(self):
self._dockwidget = QDockWidget(QApplication.translate(
"OpenLayersOverviewWidget", "OpenLayers Overview"),
self._iface.mainWindow())
self._dockwidget.setObjectName("dwOpenlayersOverview")
self._oloWidget = OpenLayersOverviewWidget(self._iface,
self._dockwidget,
self._olLayerTypeRegistry)
self._dockwidget.setWidget(self._oloWidget)
def _initGui(self):
self._setDocWidget()
self._iface.addDockWidget(Qt.LeftDockWidgetArea, self._dockwidget)
def _unload(self):
self._dockwidget.close()
self._iface.removeDockWidget(self._dockwidget)
del self._oloWidget
self._dockwidget = None
# Public
def setVisible(self, visible):
if visible:
if self._dockwidget is None:
self._initGui()
else:
if self._dockwidget is not None:
self._unload()
|
karlproject/karl.saml2 | karl/saml2/identity.py | Python | gpl-2.0 | 473 | 0 | from pyramid.view import view_config
@view_config(name='sso', renderer='templates/login.pt')
def sign_on(context, request):
""" Perform the SAML2 SSO dance.
- If the request already has | valid credentials, process the 'SAMLRequest'
query string value and return a POSTing redirect.
- If processing the POSTed login form, authenticate.
- If no authenticated user is known, displa | y the login form.
"""
return {'hidden': request.GET.items()}
|
pap/rethinkdb | scripts/generate_serialize_macros.py | Python | agpl-3.0 | 9,043 | 0.003096 | #!/usr/bin/env python
# Copyright 2010-2014 RethinkDB, all rights reserved.
import sys
"""This script is used to generate the RDB_MAKE_SERIALIZABLE_*() and
RDB_MAKE_ME_SERIALIZABLE_*() macro definitions. Because there are so
many variations, and because they are so similar, it's easier to just
have a Python s | cript to generate them.
This script is meant to be run as follows (assuming you are in the
"rethinkdb/src" directory) | :
$ ../scripts/generate_serialize_macros.py > rpc/serialize_macros.hpp
"""
def generate_make_serializable_macro(nfields):
fields = "".join(", field%d" % (i + 1) for i in xrange(nfields))
zeroarg = ("UNUSED " if nfields == 0 else "")
print "#define RDB_MAKE_SERIALIZABLE_%d(type_t%s) \\" % \
(nfields, fields)
print " template <cluster_version_t W> \\"
print " void serialize(%swrite_message_t *wm, %sconst type_t &thing) { \\" % (zeroarg, zeroarg)
for i in xrange(nfields):
print " serialize<W>(wm, thing.field%d); \\" % (i + 1)
print " } \\"
print " template <cluster_version_t W> \\"
print " archive_result_t deserialize(%sread_stream_t *s, %stype_t *thing) { \\" % (zeroarg, zeroarg)
print " archive_result_t res = archive_result_t::SUCCESS; \\"
for i in xrange(nfields):
print " res = deserialize<W>(s, deserialize_deref(thing->field%d)); \\" % (i + 1)
print " if (bad(res)) { return res; } \\"
print " return res; \\"
print " } \\"
print " extern int dont_use_RDB_MAKE_SERIALIZABLE_within_a_class_body"
print
print "#define RDB_MAKE_SERIALIZABLE_%d_FOR_CLUSTER(type_t%s) \\" % \
(nfields, fields)
print " template <> \\"
print " void serialize<cluster_version_t::CLUSTER>( \\"
print " %swrite_message_t *wm, %sconst type_t &thing) { \\" % (zeroarg, zeroarg)
for i in xrange(nfields):
print " serialize<cluster_version_t::CLUSTER>(wm, thing.field%d); \\" % (i + 1)
print " } \\"
print " template <> \\"
print " archive_result_t deserialize<cluster_version_t::CLUSTER>( \\"
print " %sread_stream_t *s, %stype_t *thing) { \\" % (zeroarg, zeroarg)
print " archive_result_t res = archive_result_t::SUCCESS; \\"
for i in xrange(nfields):
print " res = deserialize<cluster_version_t::CLUSTER>( \\"
print " s, deserialize_deref(thing->field%d)); \\" % (i + 1)
print " if (bad(res)) { return res; } \\"
print " return res; \\"
print " } \\"
print " extern int dont_use_RDB_MAKE_SERIALIZABLE_FOR_CLUSTER_within_a_class_body"
print
# See the note in the comment below.
print "#define RDB_IMPL_SERIALIZABLE_%d(type_t%s) RDB_MAKE_SERIALIZABLE_%d(type_t%s); \\" % (nfields, fields, nfields, fields)
print
print "#define RDB_IMPL_SERIALIZABLE_%d_FOR_CLUSTER(type_t%s) \\" % (nfields, fields)
print " RDB_MAKE_SERIALIZABLE_%d_FOR_CLUSTER(type_t%s); \\" % (nfields, fields)
print " INSTANTIATE_SERIALIZABLE_FOR_CLUSTER(type_t);"
print
print "#define RDB_IMPL_SERIALIZABLE_%d_SINCE_v1_13(type_t%s) \\" % (nfields, fields)
print " RDB_IMPL_SERIALIZABLE_%d(type_t%s); \\" % (nfields, fields)
print " INSTANTIATE_SERIALIZABLE_SINCE_v1_13(type_t)"
print
print "#define RDB_IMPL_SERIALIZABLE_%d_SINCE_v1_16(type_t%s) \\" % (nfields, fields)
print " RDB_IMPL_SERIALIZABLE_%d(type_t%s); \\" % (nfields, fields)
print " INSTANTIATE_SERIALIZABLE_SINCE_v1_16(type_t)"
print
print "#define RDB_IMPL_SERIALIZABLE_%d_SINCE_v2_1(type_t%s) \\" % (nfields, fields)
print " RDB_IMPL_SERIALIZABLE_%d(type_t%s); \\" % (nfields, fields)
print " INSTANTIATE_SERIALIZABLE_SINCE_v2_1(type_t)"
print "#define RDB_MAKE_ME_SERIALIZABLE_%d(type_t%s) \\" % \
(nfields, fields)
print " template <cluster_version_t W> \\"
print " friend void serialize(%swrite_message_t *wm, %sconst type_t &thing) { \\" % (zeroarg, zeroarg)
for i in xrange(nfields):
print " serialize<W>(wm, thing.field%d); \\" % (i + 1)
print " } \\"
print " template <cluster_version_t W> \\"
print " friend archive_result_t deserialize(%sread_stream_t *s, %stype_t *thing) { \\" % (zeroarg, zeroarg)
print " archive_result_t res = archive_result_t::SUCCESS; \\"
for i in xrange(nfields):
print " res = deserialize<W>(s, deserialize_deref(thing->field%d)); \\" % (i + 1)
print " if (bad(res)) { return res; } \\"
print " return res; \\"
print " }"
if __name__ == "__main__":
print "// Copyright 2010-2014 RethinkDB, all rights reserved."
print "#ifndef RPC_SERIALIZE_MACROS_HPP_"
print "#define RPC_SERIALIZE_MACROS_HPP_"
print
print "/* This file is automatically generated by '%s'." % " ".join(sys.argv)
print "Please modify '%s' instead of modifying this file.*/" % sys.argv[0]
print
print "#include <type_traits>"
print
print "#include \"containers/archive/archive.hpp\""
print "#include \"containers/archive/versioned.hpp\""
print "#include \"errors.hpp\""
print "#include \"version.hpp\""
print
print """
/* The purpose of these macros is to make it easier to serialize and
unserialize data types that consist of a simple series of fields, each
of which is serializable. Suppose we have a type "struct point_t {
int32_t x, y; }" that we want to be able to serialize. To make it
serializable automatically, either write
RDB_MAKE_SERIALIZABLE_2(point_t, x, y) at the global scope, or write
RDB_MAKE_ME_SERIALIZABLE_2(point_t, x, y) within the body of the
point_t type.
The _FOR_CLUSTER variants of the macros exist to indicate that a type
can only be serialized for use within the cluster, thus should not be
serialized to disk.
The _SINCE_v1_13 variants of the macros exist to make the conversion to
versioned serialization easier. They must only be used for types which
serialization format has not changed since version 1.13.0.
Once the format changes, you can still use the macros without
the _SINCE_v1_13 suffix and instantiate the serialize() and deserialize()
functions explicitly for a certain version.
We use dummy "extern int" declarations to force a compile error in
macros that should not be used inside of class bodies. */
""".strip()
print "namespace helper {"
print
print "/* When a `static_assert` is used within a templated class or function,"
print " * but does not depend on any template parameters the C++ compiler is free"
print " * to evaluate the assert even before instantiating that template. This"
print " * helper class allows a `static_assert(false, ...)` to depend on the"
print " * `cluster_version_t` template parameter."
print " * Also see http://stackoverflow.com/a/14637534. */"
print "template <cluster_version_t W>"
print "struct always_false"
print " : std::false_type { };"
print
print "} // namespace helper"
print
print "#define RDB_DECLARE_SERIALIZABLE(type_t) \\"
print " template <cluster_version_t W> \\"
print " void serialize(write_message_t *, const type_t &); \\"
print " template <cluster_version_t W> \\"
print " archive_result_t deserialize(read_stream_t *s, type_t *thing); \\"
print " extern int dont_use_RDB_DECLARE_SERIALIZABLE_within_a_class_body"
print
print "#define RDB_DECLARE_SERIALIZABLE_FOR_CLUSTER(type_t) \\"
print " template <cluster_version_t W> \\"
print " void serialize(write_message_t *, const type_t &) { \\"
print " static_assert(helper::always_false<W>::value, \\"
print " \"This type is only serializable for cluster.\"); \\"
print " unreachable(); \\"
print " } \\"
print " template <> \\"
print " void serialize<cluster_version_t::CLUSTER>( \\"
print " write_message_t *, const type_t &); \\"
print " template <cluster_version_t W> \\"
print " archive_result_t deserialize(read_stream_t *, type_t |
justb4/stetl | docs/conf.py | Python | gpl-3.0 | 7,939 | 0.00718 | # -*- coding: utf-8 -*-
#
# Stetl documentation build configuration file, created by
# sphinx-quickstart on Sun Jun 2 11:01:36 2013.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# indicate Sphinx is building (to replace @Config decorators)
os.environ['SPHINX_BUILD'] = '1'
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('.'))
sys.path.insert(0, os.path.abspath('../'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the | project.
project = u'Stetl'
copyright = u'20 | 13+, Just van den Broecke'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.2-dev'
# The full version, including alpha/beta/rc tags.
release = '1.2-dev'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Stetldoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'Stetl.tex', u'Stetl Documentation',
u'Just van den Broecke', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'stetl', u'Stetl Documentation',
[u'Just van den Broecke'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'Stetl', u'Stetl Documentation',
u'Just van den Broecke', 'Stetl', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
|
sdiazpier/nest-simulator | testsuite/summarize_tests.py | Python | gpl-2.0 | 3,966 | 0.001513 | # -*- coding: utf-8 -*-
#
# summarize_tests.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
# Invoke this script as
#
# python3 parse_test_output.py <path to test output>.xml
#
# It will print on a single line
#
# <No of tests run> <No of skipped tests> <No of failed tests> <No of errored tests> <List of unsuccessful tests>
import junitparser as jp
import glob
import os
import sys
import xml
assert int(jp.version.split('.')[0]) >= 2, 'junitparser version must be >= 2'
def parse_result_file(fname):
results = jp.JUnitXml.fromfile(fname)
if isinstance(results, jp.junitparser.JUnitXml):
# special case for pytest, which wraps all once more
suites = list(results)
assert len(suites) == 1, "JUnit XML files may only contain results from a single testsuite."
results = suites[0]
assert all(len(case.result) == 1 for case in results if case.result), 'Case result has unexpected length > 1'
failed_tests = ['.'.join((case.classname, case.name)) for case in results
if case.result and not isinstance(case.result[0], jp.junitparser.Skipped)]
return {'Tests': results.tests,
'Skipped': results.skipped,
'Failures': results.failures,
'Errors': results.errors,
'Time': results.time,
'Failed tests': failed_tests}
if __name__ == '__main__':
assert len(sys.argv) == 2, 'summarize_tests must be called with TEST_OUTDIR.'
test_outdir = sys.argv[1]
results = {}
totals = {'Tests': 0, 'Skipped': 0,
'Failures': 0, 'Errors': 0,
'Time': 0, 'Failed tests': []}
for pfile in sorted(glob.glob(os.path.join(test_outdir, '*.xml'))):
ph_name = os.path.splitext(os.path.split(pfile)[1])[0].replace('_', ' ')
ph_res = parse_result_file(pfile)
results[ph_name] = ph_res
for k, v in ph_res.items():
totals[k] += v
cols = ['Tests', 'Skipped', 'Failures', 'Errors', 'Time']
tline = '-' * (len(cols) * 10 + 20)
print()
print()
print(tline)
print('NEST Testsuite Results')
print(tline)
print('{:<20s}'.format('Phase'), end='')
for c in cols:
print('{:>10s}'.format(c), end='')
print()
print(tline)
for pn, pr in results.items():
print('{:<20s}'.format(pn), end='')
for c in cols:
fstr = '{:10.1f}' if c == 'Time' else '{:10d}'
print(fstr.format(pr[c]), end='')
print()
print(tline)
print('{:<20s}'.format('Total'), end='')
for c in cols:
fstr = '{:10.1f}' if c == 'Time' else '{:10d}'
print(fstr.format(totals[c]), end='')
print()
print(tline)
print()
if totals['Failures'] + totals['Errors'] > 0:
print('THE NEST TESTSUITE DISCOVERED PROBLEMS')
print(' The following tests failed')
for t in totals['Failed tests']:
print(f' | {t}') # | marks line for parsing
print()
print( | ' Please report test failures by creating an issue at')
print(' | ERROR: type should be string, got " https://github.com/nest/nest_simulator/issues')\n print()\n print(tline)\n print()\n\n sys.exit(1)\n else:\n print('The NEST Testsuite passed successfully.')\n print()\n print(tline)\n print()\n" |
guillaume-chevalier/HAR-stacked-residual-bidir-LSTMs | data/preprocess_data.py | Python | apache-2.0 | 13,571 | 0.002432 | # Adapted from: https://github.com/sussexwearlab/DeepConvLSTM
__author__ = 'fjordonez, gchevalier'
from signal_filtering import filter_opportunity_datasets_accelerometers
import os
import zipfile
import argparse
import numpy as np
import cPickle as cp
from io import BytesIO
from pandas import Series
# Hardcoded number of sensor channels employed in the OPPORTUNITY challenge
NB_SENSOR_CHANNELS = 113
NB_SENSOR_CHANNELS_WITH_FILTERING = 149 # =77 gyros +36*2 accelerometer channels
# Hardcoded names of the files defining the OPPORTUNITY challenge data. As named in the original data.
OPPORTUNITY_DATA_FILES_TRAIN = [
'OpportunityUCIDataset/dataset/S1-Drill.dat',
'OpportunityUCIDataset/dataset/S1-ADL1.dat',
'OpportunityUCIDataset/dataset/S1-ADL2.dat',
'OpportunityUCIDataset/dataset/S1-ADL3.dat',
'OpportunityUCIDataset/dataset/S1-ADL4.dat',
'OpportunityUCIDataset/dataset/S1-ADL5.dat',
'OpportunityUCIDataset/dataset/S2-Drill.dat',
'OpportunityUCIDataset/dataset/S2-ADL1.dat',
'OpportunityUCIDataset/dataset/S2-ADL2.dat',
'OpportunityUCIDatase | t/dataset/S2-ADL3.dat',
'OpportunityUCIDataset/dataset/S3-Drill.dat',
'OpportunityUCIDataset/dataset/S3-ADL1.dat',
'OpportunityUCIDataset/dataset/S3-ADL2.dat',
'OpportunityUCIDataset/dataset/S3-ADL3.dat'
]
OPPORTUNITY_DATA_FILES_TEST = [
' | OpportunityUCIDataset/dataset/S2-ADL4.dat',
'OpportunityUCIDataset/dataset/S2-ADL5.dat',
'OpportunityUCIDataset/dataset/S3-ADL4.dat',
'OpportunityUCIDataset/dataset/S3-ADL5.dat'
]
def select_columns_opp(data):
"""Selection of the 113 columns employed in the OPPORTUNITY challenge
:param data: numpy integer matrix
Sensor data (all features)
:return: tuple((numpy integer 2D matrix, numpy integer 1D matrix))
(Selection of features (N, f), feature_is_accelerometer (f,) one-hot)
"""
# In term of column_names.txt's ranges: excluded-included (here 0-indexed)
features_delete = np.arange(46, 50)
features_delete = np.concatenate([features_delete, np.arange(59, 63)])
features_delete = np.concatenate([features_delete, np.arange(72, 76)])
features_delete = np.concatenate([features_delete, np.arange(85, 89)])
features_delete = np.concatenate([features_delete, np.arange(98, 102)])
features_delete = np.concatenate([features_delete, np.arange(134, 243)])
features_delete = np.concatenate([features_delete, np.arange(244, 249)])
# In term of column_names.txt's ranges: excluded-included
features_delete = np.arange(46, 50)
features_delete = np.concatenate([features_delete, np.arange(59, 63)])
features_delete = np.concatenate([features_delete, np.arange(72, 76)])
features_delete = np.concatenate([features_delete, np.arange(85, 89)])
features_delete = np.concatenate([features_delete, np.arange(98, 102)])
features_delete = np.concatenate([features_delete, np.arange(134, 243)])
features_delete = np.concatenate([features_delete, np.arange(244, 249)])
# In term of column_names.txt's ranges: excluded-included
features_acc = np.arange(1, 37)
features_acc = np.concatenate([features_acc, np.arange(134, 194)])
features_acc = np.concatenate([features_acc, np.arange(207, 231)])
# One-hot for everything that is an accelerometer
is_accelerometer = np.zeros([243])
is_accelerometer[features_acc] = 1
# Deleting some signals to keep only the 113 of the challenge
data = np.delete(data, features_delete, 1)
is_accelerometer = np.delete(is_accelerometer, features_delete, 0)
# Shape `(N, f), (f, )`
# where N is number of timesteps and f is 113 features, one-hot
return data, is_accelerometer
def normalize(x):
"""Normalizes all sensor channels by mean substraction,
dividing by the standard deviation and by 2.
:param x: numpy integer matrix
Sensor data
:return:
Normalized sensor data
"""
x = np.array(x, dtype=np.float32)
m = np.mean(x, axis=0)
x -= m
std = np.std(x, axis=0)
std += 0.000001
x /= (std * 2) # 2 is for having smaller values
return x
def split_data_into_time_gyros_accelerometers(data, is_accelerometer):
# Assuming index 0 of features is reserved for time.
# Splitting data into gyros, accelerometers and time:
is_accelerometer = np.array(is_accelerometer*2-1, dtype=np.int32)
# is_accelerometer's zeros have been replaced by -1. 1's are untouched.
plane = np.arange(len(is_accelerometer)) * is_accelerometer
delete_gyros = [-e for e in plane if e <= 0]
delete_accms = [ e for e in plane if e >= 0]
time = data[:,0]
gyros = np.delete(data, delete_accms, 1)
accms = np.delete(data, delete_gyros, 1)
return time, gyros, accms
def divide_x_y(data, label, filter_accelerometers):
"""Segments each sample into (time+features) and (label)
:param data: numpy integer matrix
Sensor data
:param label: string, ['gestures' (default), 'locomotion']
Type of activities to be recognized
:return: numpy integer matrix, numpy integer array
Features encapsulated into a matrix and labels as an array
"""
if filter_accelerometers:
data_x = data[:, :114]
else:
data_x = data[:,1:114]
# Choose labels type for y
if label not in ['locomotion', 'gestures']:
raise RuntimeError("Invalid label: '%s'" % label)
if label == 'locomotion':
data_y = data[:, 114] # Locomotion label
elif label == 'gestures':
data_y = data[:, 115] # Gestures label
return data_x, data_y
def adjust_idx_labels(data_y, label):
"""Transforms original labels into the range [0, nb_labels-1]
:param data_y: numpy integer array
Sensor labels
:param label: string, ['gestures' (default), 'locomotion']
Type of activities to be recognized
:return: numpy integer array
Modified sensor labels
"""
if label == 'locomotion': # Labels for locomotion are adjusted
data_y[data_y == 4] = 3
data_y[data_y == 5] = 4
elif label == 'gestures': # Labels for gestures are adjusted
data_y[data_y == 406516] = 1
data_y[data_y == 406517] = 2
data_y[data_y == 404516] = 3
data_y[data_y == 404517] = 4
data_y[data_y == 406520] = 5
data_y[data_y == 404520] = 6
data_y[data_y == 406505] = 7
data_y[data_y == 404505] = 8
data_y[data_y == 406519] = 9
data_y[data_y == 404519] = 10
data_y[data_y == 406511] = 11
data_y[data_y == 404511] = 12
data_y[data_y == 406508] = 13
data_y[data_y == 404508] = 14
data_y[data_y == 408512] = 15
data_y[data_y == 407521] = 16
data_y[data_y == 405506] = 17
return data_y
def check_data(data_set):
"""Try to access to the file and checks if dataset is in the data directory
In case the file is not found try to download it from original location
:param data_set:
Path with original OPPORTUNITY zip file
:return:
"""
print 'Checking dataset {0}'.format(data_set)
data_dir, data_file = os.path.split(data_set)
# When a directory is not provided, check if dataset is in the data directory
if data_dir == "" and not os.path.isfile(data_set):
new_path = os.path.join(os.path.split(__file__)[0], "data", data_set)
if os.path.isfile(new_path) or data_file == 'OpportunityUCIDataset.zip':
data_set = new_path
# When dataset not found, try to download it from UCI repository
if (not os.path.isfile(data_set)) and data_file == 'OpportunityUCIDataset.zip':
print '... dataset path {0} not found'.format(data_set)
import urllib
origin = (
'https://archive.ics.uci.edu/ml/machine-learning-databases/00226/OpportunityUCIDataset.zip'
)
if not os.path.exists(data_dir):
print '... creating directory {0}'.format(data_dir)
os.makedirs(data_dir)
print '... downloading data from {0}'.format(origin)
urllib.urlretrieve(origin, data_set)
return data_dir
def process_dataset |
google/tf-quant-finance | tf_quant_finance/experimental/pricing_platform/framework/core/models.py | Python | apache-2.0 | 1,474 | 0.005427 | # Lint as: python3
# Copyright 2020 Google LLC
#
# Licensed under | the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WA | RRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Supported pricing models."""
import enum
from typing import Any
import dataclasses
class InterestRateModelType(enum.Enum):
"""Models for pricing interest rate derivatives.
LOGNORMAL_RATE: Lognormal model for the underlying rate.
NORMAL_RATE: Normal model for the underlying rate
LOGNORMAL_SMILE_CONSISTENT_REPLICATION: Smile consistent replication
(lognormal vols).
NORMAL_SMILE_CONSISTENT_REPLICATION: Smile consistent replication
(normal vols).
HULL_WHITE_ONE_FACTOR: Hull-White single factor model of short rate.
"""
LOGNORMAL_RATE = 1
NORMAL_RATE = 2
LOGNORMAL_SMILE_CONSISTENT_REPLICATION = 3
NORMAL_SMILE_CONSISTENT_REPLICATION = 4
HULL_WHITE_ONE_FACTOR = 5
@dataclasses.dataclass(frozen=True)
class HullWhite1FactorConfig:
mean_reversion: Any
volatility: Any
__all__ = ["InterestRateModelType", "HullWhite1FactorConfig"]
|
IskyN/submeter-bill-generator | scratch/testing_requests.py | Python | apache-2.0 | 1,093 | 0.000915 | import requests
site_url = "http://meterdata.submetersolutions.com"
login_url = "/login.php"
file_url = "/consumption_csv.php"
username = input("Enter username: ")
password = input("Enter password: ")
# Thanks to tigerFinch @ http://stackov | erflow.com/a/17633072
# Fill in your details here | to be posted to the login form.
login_payload = {"txtUserName": username,
"txtPassword": password,
"btnLogin": "Login"}
query_string = {"SiteID": "128",
"FromDate": "02/01/2017",
"ToDate": "02/28/2017",
"SiteName": "Brimley Plaza"}
# Use 'with' to ensure the session context is closed after use.
with requests.Session() as s:
p = s.post(site_url + login_url, data=login_payload)
# print the html returned or something more intelligent to see if it's a successful login page.
# print(p.text)
# An authorised request.
r = s.get(site_url + file_url, params=query_string)
with open("testfile.csv", 'wb') as f:
f.write(r.content)
# print(r.text)
|
walterbender/turtleconfusion | plugins/rfid/rfidrweusb.py | Python | mit | 6,437 | 0.000777 | from device import RFIDDevice
from serial import Serial
import dbus
from dbus.mainloop.glib import DBusGMainLoop
import gobject
from time import sleep
import utils
HAL_SERVICE = 'org.freedesktop.Hal'
HAL_MGR_PATH = '/org/freedesktop/Hal/Manager'
HAL_MGR_IFACE = 'org.freedesktop.Hal.Manager'
HAL_DEV_IFACE = 'org.freedesktop.Hal.Device'
REGEXP_SERUSB = '\/org\/freedesktop\/Hal\/devices\/usb_device['\
'a-z,A-Z,0-9,_]*serial_usb_[0-9]'
VERSIONS = ['301']
class RFIDReader(RFIDDevice):
"""
RFIDRW-E-W interface.
"""
def __init__(self):
RFIDDevice.__init__(self)
self.last_tag = ""
self.tags = []
self.ser = Serial()
self.device = ''
self.device_path = ''
self._connected = False
loop = DBusGMainLoop()
self.bus = dbus.SystemBus(mainloop=loop)
hmgr_iface = dbus.Interface(
self.bus.get_object(
HAL_SERVICE,
HAL_MGR_PATH),
HAL_MGR_IFACE)
hmgr_iface.connect_to_signal('DeviceRemoved', self._device_removed_cb)
def get_present(self):
"""
Checks if RFID-RW-USB device is present.
Returns True if so, False otherwise.
"""
hmgr_if = dbus.Interface(
self.bus.get_object(
HAL_SERVICE,
HAL_MGR_PATH),
HAL_MGR_IFACE)
serialusb_devices = set(
hmgr_if.FindDeviceStringMatch(
'serial.type', 'usb')) & set(
hmgr_if.FindDeviceStringMatch(
'info.subsystem', 'tty'))
for i in serialusb_devices:
serialusb_if = dbus.Interface(self.bus.get_object(HAL_SERVICE, i),
HAL_DEV_IFACE)
if serialusb_if.PropertyExists('info.parent'):
parent_udi = str(serialusb_if.GetProperty('info.parent'))
parent = dbus.Interface(
self.bus.get_object(
HAL_SERVICE,
parent_udi),
HAL_DEV_IFACE)
if parent.PropertyExists('info.linux.driver') and str(
parent.GetProperty('info.linux.driver')) == 'ftdi_sio':
device = str(serialusb_if.GetProperty('linux.device_file'))
ser = Serial(device, 9600, timeout=0.1)
ser.read(100)
ser.write('v')
ser.write('e')
ser.write('r')
ser.write('\x0D')
resp = ser.read(4)
if resp[0:-1] in VERSIONS:
self.device = device
self.device_path = i
return True
return False
def do_connect(self):
"""
Connects to the device.
Returns True if successfull, False otherwise.
"""
retval = False
if self.get_present():
try:
self.ser = Serial(self.device, 9600, timeout=0.1)
self._connected = True
if self._select_animal_tag:
# gobject.idle_add(self._loop)
gobject.timeout_add(1000, self._loop)
retval = True
except BaseException:
self._connected = False
return retval
def do_disconnect(self):
"""
Disconnect from the device.
"""
self.ser.close()
self._connected = False
def read_tag(self):
"""
Returns the last read value.
"""
return self.last_tag
def _select_animal_tag(self):
"""
Sends the "Select Tag 2" (animal tag) command to the device.
"""
self.ser.read(100)
self.ser.write('s')
self.ser.write('t')
self.ser.write('2')
self.ser.write('\x0d')
resp = self.ser.read(3)[0:-1]
if resp == 'OK':
return True
return False
def get_version(self):
"""
Sends the version command to the device and returns
a string with the device version.
"""
# self.ser.flushInput()
ver = "???"
self.ser.read(100)
self.ser.write('v')
self.ser.write('e')
self.ser.write('r')
self.ser.write('\x0d')
resp = self.ser.read(4)[0:-1]
if resp in VERSIONS:
return "RFIDRW-E-USB " + resp
return ver
def _device_removed_cb(self, path):
"""
Called when a device is removed.
Checks if the removed device is itself and emits the "disconnected"
signal if so.
"""
if path == self.device_path:
self.device_path = ''
self.ser.close()
self._connected = False
self.tags = []
self.emit("disconnected", "RFID-RW-USB")
def _loop(self):
"""
Threaded loop for reading data from the device.
"""
if not self._connected:
return False
self.ser.read(100)
self.ser.write('r')
self.s | er.write('a')
self.ser.write('t')
self.ser.write('\x0d')
resp = self.ser.read(33)[0:-1].split('_')
if resp.__len__() is not 6 or resp in self.tags:
return True
self.tags.append(resp)
anbit_bin = utils.dec2bin(int(resp[2]))
reserved_bin = '00000000000000'
databit_bin = utils.dec2bin(i | nt(resp[3]))
country_bin = utils.dec2bin(int(resp[0]))
while country_bin.__len__() < 10:
country_bin = '0' + country_bin
id_bin = utils.dec2bin(int(resp[1]))
while id_bin.__len__() < 10:
id_bin = '0' + id_bin
tag_bin = anbit_bin + reserved_bin + databit_bin + country_bin + id_bin
data = utils.bin2hex(tag_bin)
self.emit("tag-read", data)
self.last_tag = data
# sleep(1)
return True
# Testing
# if __name__ == '__main__':
# def handler(device, idhex):
# """
# Handler for "tag-read" signal.
# Prints the tag id.
# """
# print "ID: ", idhex
#
# dev = RFIDReader()
# if dev.get_present():
# print "SIPI!"
# dev.do_connect()
# dev.connect('tag-read', handler)
# else:
# print "Not connected"
#
# mloop = gobject.MainLoop()
# mloop.run()
|
Christophe-Foyer/Naive_Bayes_Price_Prediction | Old files and backups/Naive Bayes Classifier - Copie.py | Python | gpl-3.0 | 7,147 | 0.019169 | #!/usr/bin/env python
# Wheat price prediction using Baysian classification.
# Version 1.0
# Christophe Foyer - 2016
from xlrd import open_workbook
import random
import math
#set filename:
filename = 'Wheat-price-data.xlsx'
#import wheat price data (will automate downloading later, probably a different script that writes to the excel file)
def importExcel(filename):
#this function is a very ugly, and not that effecient. but it should work...
excel = open_workbook(filename)
#extract data from excel sheet
for sheet in excel.sheets():
number_of_rows = sheet.nrows
number_of_columns = sheet.ncols
dataset = [[0.0 for x in range(number_of_columns + 3)] for y in range(number_of_rows)]
date = []
date_string = []
price = []
rows = []
for row in range(1, number_of_rows):
#excel stores dates as the number of days since 1900-Jan-0 (not sure if that means january 1st or december 31st but that won't matter much in our case)
#new method: substract number of days in year until negative
date_string = str(sheet.cell(row,0).value)
days = float(date_string)
dataset[row-1][0] = float(days)
[dataset[row-1][1], dataset[row-1][2], dataset[row-1][3]] = excelDate(days)
value = (sheet.cell(row,1).value)
try:
value = str(int(value))
dataset[row-1][4] = float(value)
except ValueError:
pass
finally:
dataset[row-1][4] = round(float(value)/10,0)*10
#now the rest of the data
for col in range(2, number_of_columns):
value = (sheet.cell(row,col).value)
try:
dataset[row-1][col + 3] = float(value)
except ValueError:
pass
#now all the data should be accessible from the "dataset" array
del dataset[-1]
#print dataset
return dataset
def excelDate(days):
month_day_count = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
leap_years = [1900, 1904, 1908, 1912, 1916, 1920, 1924, 1928, 1932, 1936, 1940, 1944, 1948, 1952, 1956, 1960, 1964, 1968, 1972, 1976, 1980, 1984, 1988, 1992, 1996, 2000, 2004, 2008, 2012, 2016, 2020, 2024, 2028, 2032, 2036, 2040, 2044, 2 | 048, 2052, 2056, 2060, 2064, 2068, 2072, 2076, 2080, 2084, 2088, 2092, 2096]
i = 0
leap = 0
#this will find how many years and how | many leftover days for that year
while days >= (365 + leap):
leap = 0
if i + 1900 in leap_years:
leap = 1
days = days - 365 - leap
i = i + 1
year = i
#now find the month and leftover days given leftover days
month = 1
for i in range(1, 12):
if (year + 1900 in leap_years) and (i == 2):
leap = 1
else:
leap = 0
if days <= (month_day_count[i-1] + leap):
break
else:
days = days - month_day_count[i-1] - leap
month = i + 1
#now we should have the exact date seperated in day, month and year
return [year, month, days]
def splitDataset(dataset, splitRatio):
trainSize = int(len(dataset) * splitRatio)
trainSet = []
copy = list(dataset)
while len(trainSet) < trainSize:
index = random.randrange(len(copy))
trainSet.append(copy.pop(index))
return [trainSet, copy]
def separateByClass(dataset):
separated = {}
for i in range(len(dataset)):
vector = dataset[i]
if (vector[4] not in separated):
separated[vector[4]] = []
separated[vector[4]].append(vector)
return separated
def mean(numbers):
return sum(numbers)/float(len(numbers))
def stdev(numbers):
if len(numbers) > 1:
avg = mean(numbers)
variance = sum([pow(x-avg,2) for x in numbers])/float(len(numbers)-1)
return math.sqrt(variance)
else:
return 0
def summarize(dataset):
summaries = [(mean(attribute), stdev(attribute)) for attribute in zip(*dataset)]
del summaries[4]
#print summaries
return summaries
def summarizeByClass(dataset):
separated = separateByClass(dataset)
print separated
summaries = {}
for classValue, instances in separated.iteritems():
summaries[classValue] = summarize(instances)
return summaries
def calculateProbability(x, mean, stdev):
if stdev !=0:
exponent = math.exp(-(math.pow(x-mean,2)/(2*math.pow(stdev,2))))
return (1 / (math.sqrt(2*math.pi) * stdev)) * exponent
else:
return 1
def calculateClassProbabilities(summaries, inputVector):
probabilities = {}
for classValue, classSummaries in summaries.iteritems():
probabilities[classValue] = 1
for i in range(len(classSummaries)):
mean, stdev = classSummaries[i]
x = inputVector[i]
probabilities[classValue] *= calculateProbability(x, mean, stdev)
return probabilities
def predict(summaries, inputVector):
probabilities = calculateClassProbabilities(summaries, inputVector)
bestLabel, bestProb = None, -1
for classValue, probability in probabilities.iteritems():
if bestLabel is None or probability > bestProb:
bestProb = probability
bestLabel = classValue
return bestLabel
def getPredictions(summaries, testSet):
predictions = []
for i in range(len(testSet)):
result = predict(summaries, testSet[i])
predictions.append(result)
return predictions
def getAccuracy(testSet, predictions):
correct = 0
for i in range(len(testSet)):
if testSet[i][4] == predictions[i]:
correct += 1
return (correct/float(len(testSet))) * 100.0
def reorganizeData(dataset):
reorganizedData = [["unknown"] for x in range(len(dataset))]
for i in range(len(dataset)):
for j in range(1, int(dataset[i][0]-min([l[0] for l in dataset]))):
reorganizedData[i][0] = dataset[i][4]
if (dataset[i][0]-j) in ([l[0] for l in dataset]):
index = [l[0] for l in dataset].index(dataset[i][0]-j)
for k in range(0, len(dataset[index])):
reorganizedData[i].append(dataset[index][k])
else:
for k in range(0, len(dataset[i])):
reorganizedData[i].append("unknown")
return reorganizedData
def main():
splitRatio = 0.67
dataset = importExcel(filename)
#reorganise data to include past days
dataset = reorganizeData(dataset)
print dataset
print('Loaded data file {0} with {1} rows').format(filename, len(dataset))
trainingSet, testSet = splitDataset(dataset, splitRatio)
print('Split {0} rows into train={1} and test={2} rows').format(len(dataset), len(trainingSet), len(testSet))
# prepare model
summaries = summarizeByClass(trainingSet)
# test model
predictions = getPredictions(summaries, testSet)
accuracy = getAccuracy(testSet, predictions)
print('Accuracy: {0}%').format(accuracy)
main()
|
KeepSafe/ks-email-parser | setup.py | Python | apache-2.0 | 1,170 | 0.004274 | import os
from setuptools import setup, find_packages
version = '0.3.2'
install_requires = [
'Markdown < 3',
'beautifulsoup4 < 5',
'inlinestyler==0.2.1',
'pystache < 0.6',
'parse < 2'
]
tests_require = [
'nose',
'flake8==2.5.4',
'coverage',
]
devtools_require = [
'twine',
'build',
]
def read(f):
return open(os.path.join(os.path.dirname(__file__), f)).read().strip()
setup(
name='ks-email-parser',
version=version,
description=('A command line tool to render HTML and text emails of markdown content.'),
classifiers=[
| 'License :: OSI Approved :: BSD License', 'Intended Audience :: Developers', 'Programming Language :: Python'
],
author='Keepsafe',
author_email='support@getkeepsafe.com',
url='https://github.com/KeepSafe/ks-email-parser',
| license='Apache',
packages=find_packages(),
install_requires=install_requires,
tests_require=tests_require,
extras_require={
'tests': tests_require,
'devtools': devtools_require,
},
entry_points={'console_scripts': ['ks-email-parser = email_parser.cmd:main']},
include_package_data=True)
|
Microsoft/ApplicationInsights-Python | django_tests/tests.py | Python | mit | 16,940 | 0.004073 | import os
import logging
import django
from django.test import TestCase, Client, modify_settings, override_settings
from applicationinsights import TelemetryClient
from applicationinsights.channel import TelemetryChannel, SynchronousQueue, SenderBase, NullSender, AsynchronousSender
from applicationinsights.channel.SenderBase import DEFAULT_ENDPOINT_URL as DEFAULT_ENDPOINT
from applicationinsights.django import common
if django.VERSION > (1, 10):
MIDDLEWARE_NAME = "MIDDLEWARE"
else:
MIDDLEWARE_NAME = "MIDDLEWARE_CLASSES"
TEST_IKEY = '12345678-1234-5678-9012-123456789abc'
TEST_ENDPOINT = 'https://test.endpoint/v2/track'
PROJECT_ROOT = os.path.abspath(os.path.dirname(__file__))
class AITestCase(TestCase):
def plug_sender(self):
# Reset saved objects
common.saved_clients = {}
common.saved_channels = {}
# Create a client and mock out the sender
client = common.create_client()
sender = MockSender()
client._channel = TelemetryChannel(None, SynchronousQueue(sender))
self.events = sender.events
self.channel = client.channel
def get_events(self, count):
self.channel.flush()
self.assertEqual(len(self.events), count, "Expected %d event(s) in queue (%d actual)" % (count, len(self.events)))
if count == 1:
return self.events[0]
return self.events
@modify_settings(**{MIDDLEWARE_NAME: {'append': 'applicationinsights.django.ApplicationInsightsMiddleware'}})
@override_settings(
APPLICATION_INSIGHTS={'ikey': TEST_IKEY},
# Templates for 1.7
TEMPLATE_DIRS=(PROJECT_ROOT,),
TEMPLATE_LOADERS=('django.template.loaders.filesystem.Loader',),
# Templates for 1.8 and up
TEMPLATES=[{'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [PROJECT_ROOT]}])
class MiddlewareTests(AITestCase):
def setUp(self):
self.plug_sender()
def test_basic_request(self):
"""Tests that hitting a simple view generates a telemetry item with the correct properties"""
response = self.client.get('/')
self.assertEqual(response.status_code, 200)
event = self.get_events(1)
tags = event['tags']
data = event['data']['baseData']
self.assertEqual(event['name'], 'Microsoft.ApplicationInsights.Request', "Event type")
self.assertEqual(event['iKey'], TEST_IKEY)
self.assertEqual(tags['ai.operation.name'], 'GET /', "Operation name")
self.assertEqual(data['name'], 'GET /', "Request name")
self.assertEqual(data['responseCode'], 200, "Status code")
self.assertEqual(data['success'], True, "Success value")
self.assertEqual(data['url'], 'http://testserver/', "Request url")
def test_logger(self):
"""Tests that traces logged from inside of a view are submitted and parented to the request telemetry item"""
response = self.client.get('/logger')
self.assertEqual(response.status_code, 200)
logev, reqev = self.get_events(2)
# Check request event (minimal, since we validate this elsewhere)
tags = reqev['tags']
data = reqev['data']['baseData']
reqid = tags['ai.operation.id']
self.assertEqual(reqev['name'], 'Microsoft.ApplicationInsights.Request', "Event type")
self.assertEqual(data['id'], reqid, "Request id")
self.assertEqual(data['name'], 'GET /logger', "Operation name")
self.assertEqual(data['url'], 'http://testserver/logger', "Request url")
self.assertTrue(reqid, "Request id not empty")
# Check log event
tags = logev['tags']
data = logev['data']['baseData']
self.assertEqual(logev['name'], 'Microsoft.ApplicationInsights.Message', "Event type")
self.assertEqual(logev['iKey'], TEST_IKEY)
self.assertEqual(tags['ai.operation.parentId'], reqid, "Parent id")
self.assertEqual(data['message'], 'Logger message', "Log message")
self.assertEqual(data['properties']['property'], 'value', "Property=value")
def test_thrower(self):
"""Tests that unhandled exceptions generate an exception telemetry item parented to the request telemetry item"""
with self.assertRaises(ValueError):
self.client.get('/thrower')
errev, reqev = self.get_events(2)
# Check request event
tags = reqev['tags']
data = reqev['data']['baseData']
reqid = tags['ai.operation.id']
self.assertEqual(reqev['name'], 'Microsoft.ApplicationInsights.Request', "Event type")
self.assertEqual(reqev['iKey'], TEST_IKEY)
self.assertEqual(data['id'], reqid, "Request id")
self.assertEqual(data['responseCode'], 500, "Response code")
self.assertEqual(data['success'], False, "Success value")
self.assertEqual(data['name'], 'GET /thrower', "Request name")
self.assertEqual(data['url'], 'http://testserver/thrower', "Request url")
self.assertTrue(reqid, "Request id not empty")
# Check exception event
tags = errev['tags']
data = errev['data']['baseData']
self.assertEqual(errev['name'], 'Microsoft.ApplicationInsights.Exception', "Event type")
self.assertEqual(tags['ai.operation.parentId'], reqid, "Exception parent id")
self.assertEqual(len(data['exceptions']), 1, "Exception count")
exc = data['exceptions'][0]
self.assertEqual(exc['typeName'], 'ValueError', "Exception type")
self.assertEqual(exc['hasFullStack'], True, "Has full stack")
self.assertEqual(exc['parsedStack'][0]['method'], 'thrower', "Stack frame method name")
def test_error(self):
"""Tests that Http404 exception does not generate an exception event
and the request telemetry item properly logs the failure"""
response = self.client.get("/errorer")
self.assertEqual(response.status_code, 404)
event = self.get_events(1)
tags = event['tags']
data = event['data']['baseData']
self.assertEqual(event['name'], 'Microsoft.ApplicationInsights.Request', "Event type")
self.assertEqual(tags['ai.operation.name'], 'GET /errorer', "Operation name")
self.assertEqua | l(data['responseCode'], 404, "Status code")
self.assertEqual(data['success'], False, "Success value")
self.assertEqual(data['url'], 'http://testserver/errorer', "Request url")
def test_template(self):
"""Tests that views using templates operate correctly and that template data is logged"""
response = self.client.get("/templater/ctx")
self.assertEqual(response.status_code, 200)
event = se | lf.get_events(1)
data = event['data']['baseData']
self.assertEqual(event['name'], 'Microsoft.ApplicationInsights.Request', "Event type")
self.assertEqual(data['success'], True, "Success value")
self.assertEqual(data['responseCode'], 200, "Status code")
self.assertEqual(data['properties']['template_name'], 'template.html', "Template name")
def test_no_view_arguments(self):
"""Tests that view id logging is off by default"""
self.plug_sender()
response = self.client.get('/getid/24')
self.assertEqual(response.status_code, 200)
event = self.get_events(1)
data = event['data']['baseData']
self.assertEqual(event['name'], 'Microsoft.ApplicationInsights.Request', "Event type")
self.assertTrue('properties' not in data or 'view_arg_0' not in data['properties'])
def test_no_view(self):
"""Tests that requests to URLs not backed by views are still logged"""
response = self.client.get('/this/view/does/not/exist')
self.assertEqual(response.status_code, 404)
event = self.get_events(1)
tags = event['tags']
data = event['data']['baseData']
self.assertEqual(event['name'], 'Microsoft.ApplicationInsights.Request', "Event type")
self.assertEqual(data['responseCode'], 404, "Status code")
self.assertEqual(data['success'], False, "Success value")
self.assertEqual(data['name'], 'GET /this/view/does/not/exist |
cloudera/hue | apps/impala/src/impala/dbms.py | Python | apache-2.0 | 8,037 | 0.009332 | #!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import sys
from desktop.conf import CLUSTER_ID, has_connectors
from desktop.lib.exceptions_renderable import PopupException
from desktop.lib.i18n import smart_str
from desktop.models import Cluster
from beeswax.design import hql_query
from beeswax.models import QUERY_TYPES
from beeswax.server import dbms
from beeswax.server.dbms import HiveServer2Dbms, QueryS | erverException, QueryServerTimeoutException, \
get_query_server_config as beeswax_query_server_config, get_query_server_config_via_connector
from impala import conf
from impala.impala_flags imp | ort get_hs2_http_port
if sys.version_info[0] > 2:
from django.utils.translation import gettext as _
else:
from django.utils.translation import ugettext as _
LOG = logging.getLogger(__name__)
def get_query_server_config(connector=None):
if connector and has_connectors():
query_server = get_query_server_config_via_connector(connector)
else:
server_port = get_hs2_http_port() if conf.USE_THRIFT_HTTP.get() and not conf.PROXY_ENDPOINT.get() else conf.SERVER_PORT.get()
query_server = {
'server_name': 'impala',
'dialect': 'impala',
'server_host': conf.SERVER_HOST.get(),
'server_port': server_port,
'principal': conf.IMPALA_PRINCIPAL.get(),
'http_url': '%(protocol)s://%(host)s:%(port)s%(cli_endpoint)s' % {
'protocol': 'https' if conf.SSL.ENABLED.get() else 'http',
'host': conf.SERVER_HOST.get(),
'port': server_port,
'cli_endpoint': conf.PROXY_ENDPOINT.get()
},
'impersonation_enabled': conf.IMPERSONATION_ENABLED.get(),
'querycache_rows': conf.QUERYCACHE_ROWS.get(),
'QUERY_TIMEOUT_S': conf.QUERY_TIMEOUT_S.get(),
'SESSION_TIMEOUT_S': conf.SESSION_TIMEOUT_S.get(),
'auth_username': conf.AUTH_USERNAME.get(),
'auth_password': conf.AUTH_PASSWORD.get(),
'use_sasl': conf.USE_SASL.get(),
'transport_mode': 'http' if conf.USE_THRIFT_HTTP.get() else 'socket',
}
debug_query_server = query_server.copy()
debug_query_server['auth_password_used'] = bool(debug_query_server.pop('auth_password'))
LOG.debug("Query Server: %s" % debug_query_server)
return query_server
class ImpalaDbms(HiveServer2Dbms):
@classmethod
def get_nested_select(cls, database, table, column, nested=None):
"""
Given a column or nested type, return the corresponding SELECT and FROM clauses in Impala's nested-type syntax
"""
select_tokens = [column]
from_tokens = [database, table]
if nested:
nested_tokens = nested.strip('/').split('/')
while nested_tokens:
token = nested_tokens.pop(0)
if token not in ['key', 'value', 'item']:
select_tokens.append(token)
else:
# if we encounter a reserved keyword, move current select_tokens to from_tokens and reset the select_tokens
from_tokens.extend(select_tokens)
select_tokens = []
# if reserved keyword is the last token, make it the only select_token, otherwise we ignore and continue
if not nested_tokens:
select_tokens = [token]
select_clause = '.'.join(select_tokens)
from_clause = '.'.join('`%s`' % token.strip('`') for token in from_tokens)
return select_clause, from_clause
@classmethod
def get_histogram_query(cls, database, table, column, nested=None):
select_clause, from_clause = cls.get_nested_select(database, table, column, nested)
return 'SELECT histogram(%s) FROM %s' % (select_clause, from_clause)
# Deprecated
def invalidate(self, database=None, table=None, flush_all=False):
handle = None
try:
if flush_all or database is None:
hql = "INVALIDATE METADATA"
query = hql_query(hql, query_type=QUERY_TYPES[1])
handle = self.execute_and_wait(query, timeout_sec=10.0)
elif table is None:
if not Cluster(self.client.user).get_app_config().get_hive_metastore_interpreters():
raise PopupException(_("Hive and HMS not configured. Please do a full refresh"))
diff_tables = self._get_different_tables(database)
if len(diff_tables) > 10:
raise PopupException(_("Too many tables (%d) to invalidate. Please do a full refresh") % len(diff_tables))
else:
for table in diff_tables:
hql = "INVALIDATE METADATA `%s`.`%s`" % (database, table)
query = hql_query(hql, query_type=QUERY_TYPES[1])
handle = self.execute_and_wait(query, timeout_sec=10.0)
else:
hql = "INVALIDATE METADATA `%s`.`%s`" % (database, table)
query = hql_query(hql, query_type=QUERY_TYPES[1])
handle = self.execute_and_wait(query, timeout_sec=10.0)
except QueryServerTimeoutException as e:
# Allow timeout exceptions to propagate
raise e
except PopupException as e:
raise e
except Exception as e:
msg = 'Failed to invalidate `%s`: %s' % (database or 'databases', e)
raise QueryServerException(msg)
finally:
if handle:
self.close(handle)
def refresh_table(self, database, table):
handle = None
try:
hql = "REFRESH `%s`.`%s`" % (database, table)
query = hql_query(hql, database, query_type=QUERY_TYPES[1])
handle = self.execute_and_wait(query, timeout_sec=10.0)
except Exception as e:
msg = 'Failed to refresh `%s`.`%s`' % (database, table)
raise QueryServerException(msg)
finally:
if handle:
self.close(handle)
def get_histogram(self, database, table, column, nested=None):
"""
Returns the results of an Impala SELECT histogram() FROM query for a given column or nested type.
Assumes that the column/nested type is scalar.
"""
results = []
hql = self.get_histogram_query(database, table, column, nested)
query = hql_query(hql)
handle = self.execute_and_wait(query, timeout_sec=5.0)
if handle:
result = self.fetch(handle)
try:
histogram = list(result.rows())[0][0] # actual histogram results is in first-and-only result row
unique_values = set(histogram.split(', '))
results = list(unique_values)
except IndexError as e:
LOG.warning('Failed to get histogram results, result set has unexpected format: %s' % smart_str(e))
finally:
self.close(handle)
return results
def get_exec_summary(self, query_handle, session_handle):
return self.client._client.get_exec_summary(query_handle, session_handle)
def get_runtime_profile(self, query_handle, session_handle):
return self.client._client.get_runtime_profile(query_handle, session_handle)
def _get_beeswax_tables(self, database):
beeswax_query_server = dbms.get(
user=self.client.user,
query_server=beeswax_query_server_config(
name=Cluster(self.client.user).get_app_config().get_hive_metastore_interpreters()[0]
)
)
return beeswax_query_server.get_tables(database=database)
def _get_different_tables(self, database):
beeswax_tables = self._get_beeswax_tables(database)
impala_tables = self.get_tables(database=database)
return set(beeswax_tables).symmetric_difference(impala_tables)
|
theskumar-archive/flask-api | flask_api/tests/test_app.py | Python | bsd-2-clause | 4,692 | 0.001279 | # coding: utf8
from __future__ import unicode_literals
from flask import abort, make_response, request
from flask_api.decorators import set_renderers
from flask_api import exceptions, renderers, status, FlaskAPI
import json
import unittest
app = FlaskAPI(__name__)
app.config['TESTING'] = True
class JSONVersion1(renderers.JSONRenderer):
media_type = 'application/json; api-version="1.0"'
class JSONVersion2(renderers.JSONRenderer):
media_type = 'application/json; api-version="2.0"'
@app.route('/set_status_and_headers/')
def set_status_and_headers():
headers = {'Location': 'http://example.com/456'}
return {'example': 'content'}, status.HTTP_201_CREATED, headers
@app.route('/set_headers/')
def set_headers():
headers = {'Location': 'http://example.com/456'}
return {'example': 'content'}, headers
@app.route('/make_response_view/')
def make_response_view():
response = make_response({'example': 'content'})
response.headers['Location'] = 'http://example.com/456'
return response
@app.route('/api_exception/')
def api_exception():
raise exceptions.PermissionDenied()
@app.route('/abort_view/')
def abort_view():
abort(status.HTTP_403_FORBIDDEN)
@app.route('/options/')
def options_view():
return {}
@app.route('/accepted_media_type/')
@set_renderers([JSONVersion2, JSONVersion1])
def accepted_media_type():
return {'accepted_media_type': str(request.accepted_media_type)}
class AppTests(unittest.TestCase):
def test_set_status_and_headers(self):
with app.test_client() as client:
response = client.get('/set_status_and_headers/')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(response.headers['Location'], 'http://example.com/456')
self.assertEqual(response.content_type, 'application/json')
expected = '{"example": "c | ontent"}'
self.assertEqual(response.get_data().decode('utf8'), expected)
def test_set_headers(self):
with app.test_client() as client:
response = client.get('/set_headers/')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.headers['Location'], 'http://example.com/456')
self.assertEqual(response.content_type, 'application/json')
expected = '{"example": "content"}'
self.asser | tEqual(response.get_data().decode('utf8'), expected)
def test_make_response(self):
with app.test_client() as client:
response = client.get('/make_response_view/')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.headers['Location'], 'http://example.com/456')
self.assertEqual(response.content_type, 'application/json')
expected = '{"example": "content"}'
self.assertEqual(response.get_data().decode('utf8'), expected)
def test_api_exception(self):
with app.test_client() as client:
response = client.get('/api_exception/')
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
self.assertEqual(response.content_type, 'application/json')
expected = '{"message": "You do not have permission to perform this action."}'
self.assertEqual(response.get_data().decode('utf8'), expected)
def test_abort_view(self):
with app.test_client() as client:
response = client.get('/abort_view/')
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_options_view(self):
with app.test_client() as client:
response = client.options('/options/')
# Errors if `response.response` is `None`
response.get_data()
def test_accepted_media_type_property(self):
with app.test_client() as client:
# Explicitly request the "api-version 1.0" renderer.
headers = {'Accept': 'application/json; api-version="1.0"'}
response = client.get('/accepted_media_type/', headers=headers)
data = json.loads(response.get_data().decode('utf8'))
expected = {'accepted_media_type': 'application/json; api-version="1.0"'}
self.assertEqual(data, expected)
# Request the default renderer, which is "api-version 2.0".
headers = {'Accept': '*/*'}
response = client.get('/accepted_media_type/', headers=headers)
data = json.loads(response.get_data().decode('utf8'))
expected = {'accepted_media_type': 'application/json; api-version="2.0"'}
self.assertEqual(data, expected)
|
rackerlabs/django-DefectDojo | dojo/db_migrations/0063_jira_refactor.py | Python | bsd-3-clause | 2,280 | 0.002632 | # Generated by Django 2.2.16 on 2020-11-07 11:31
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('dojo', '0062_add_vuln_id_from_tool'),
]
operations = [
migrations.DeleteModel(
name='JIRA_Clone',
),
migrations.DeleteModel(
name='JIRA_Details_Cache',
),
migrations.RenameModel(
old_name='JIRA_PKey',
new_name='JIRA_Project',
),
migrations.AddField(
model_name='jira_issue',
name='jira_change',
field=models.DateTimeField(help_text='The date the linked Jira issue was last modified.', null=True, verbose_name='Jira last update'),
),
migrations.AddField(
model_name='jira_issue',
name='jira_creation',
field=models.DateTimeField(help_text='The date a Jira issue was created from this finding.', null=True, verbose_name='Jira creation'),
),
migrations.RenameModel(
old_name='JIRA_Conf',
new_name='JIRA_Instance',
),
migrations.RenameField(
model_name='jira_project',
old_name='conf',
new_name='jira_instance',
),
migrations.AddField(
model_name='jira_issue',
name='jira_project',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='dojo.JIRA_Project'),
),
migrations.AddField(
model_name='JIRA_Project',
name='engagement',
field=models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='dojo.Engagement'),
),
migrations.AlterField(
model_name='JIRA_Project',
name='product',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='dojo.Product') | ,
),
migrations.AlterField(
| model_name='jira_project',
name='jira_instance',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='dojo.JIRA_Instance', verbose_name='JIRA Instance'),
),
]
|
unioslo/cerebrum | Cerebrum/modules/no/uio/randsone_ldif.py | Python | gpl-2.0 | 1,562 | 0 | # -*- coding: utf-8 -*-
#
# Copyright 2017-2020 University of Oslo, Norway
#
# This file is part of Cerebrum.
#
# Cerebrum is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Cerebrum is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Cerebrum; if not, write to the Free Software Foundation,
# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Mixin for contrib/no/uio/generate_randsone_ldif.py."""
from Cerebrum.modules.no.OrgLDIF import norEduLDIFMixin
class RandsoneOrgLdif(norEduLDIFMixin): # noqa: N801
def init_ou_structure(self):
# Change from original: Drop OUs outside self.root_ou_id subtree.
super(RandsoneOrgLdif, self).init_ou_structure()
ous, tree = [self.root_ou_id], self.ou_tree
for ou | in ous:
ous.extend(tree.get(ou, ()))
self.ou_tree = dict((ou, tree[ou]) for ou in ous if ou in tree)
def init_attr2id2contacts(self):
self.attr2id2contacts = {}
self.id2labeledURI = {}
def init_person_titles(self):
self.person_titles = {}
def init_person_addresses(self):
self.addr_info | = {}
|
andrewjw/airplay2sonos | airplay2sonos/__init__.py | Python | gpl-2.0 | 689 | 0.001451 | # airplay2sonos is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as | published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <htt | p://www.gnu.org/licenses/>.
from main import main
|
Tarrasch/luigi | luigi/tools/deps.py | Python | apache-2.0 | 4,717 | 0.00318 | #!/usr/bin/env python
# Finds all tasks and task outputs on the dependency paths from the given downstream task T
# up to the given source/upstream task S (optional). If the upstream task is not given,
# all upstream tasks on all dependancy paths of T will be returned.
# Terms:
# if the execution of Task T depends on the output of task S on a dependancy graph,
# T is called a downstream/sink task, S is called an upstream/source task.
# This is useful and practical way to find all upstream tasks of task T.
# For example suppose you have a daily computation that starts with a task named Daily.
# And suppose you have another task named Aggregate. Daily triggers a few tasks
# which eventually trigger Aggregate. Now, suppose you find a bug in Aggregate.
# You fixed the bug and now you want to rerun it, including all it's upstream deps.
#
# To do that you run:
# bin/deps.py --module daily_module Aggregate --daily-param1 xxx --upstream-family Daily
#
# This will output all the tasks on the dependency path between Daily and Aggregate. In
# effect, this is how you find all upstream tasks for Aggregate. Now you can delete its
# output and run Aggregate again. Daily will eventually trigget Aggregate and all tasks on
# the way.
#
# The same code here might be used as a CLI tool as well as a python module.
# In python, invoke find_deps(task, upstream_name) to get a set of all task instances on the
# paths between task T and upstream task S. You can then use the task instances to delete their output or
# perform other computation based on that.
#
# Example:
#
# PYTHONPATH=$PYTHONPATH:/path/to/your/luigi/tasks bin/deps.py \
# --module my.tasks MyDownstreamTask
# --downstream_task_param1 123456
# [--upstream-family MyUpstreamTask]
#
from __future__ import print_function
import luigi.interface
from luigi.contrib.ssh import RemoteTarget
from luigi.contrib.postgres import PostgresTarget
from luigi.contrib.s3 import S3Target
from luigi.target import FileSystemTarget
from luigi.task import flatten
from luigi import parameter
import sys
from luigi.cmdline_parser import CmdlineParser
import collections
def get_task_requires(task):
return set(flatten(task.requires()))
def dfs_paths(start_task, goal_task_family, path=None):
if path is None:
path = [start_task]
if start_task.task_family == goal_task_family or goal_task_family is None:
for item in path:
yield item
for next in get_task_requires(start_task) - set(path):
for t in dfs_paths(next, | goal_task_family, path + [next]):
yield t
class upstream(luigi.task.Config):
'''
Used to provide the parameter upstream-family
'''
family = parameter.Parameter(default=None)
def find_deps(task, upstream_task_family):
'''
| Finds all dependencies that start with the given task and have a path
to upstream_task_family
Returns all deps on all paths between task and upstream
'''
return set([t for t in dfs_paths(task, upstream_task_family)])
def find_deps_cli():
'''
Finds all tasks on all paths from provided CLI task
'''
cmdline_args = sys.argv[1:]
with CmdlineParser.global_instance(cmdline_args) as cp:
return find_deps(cp.get_task_obj(), upstream().family)
def get_task_output_description(task_output):
'''
Returns a task's output as a string
'''
output_description = "n/a"
if isinstance(task_output, RemoteTarget):
output_description = "[SSH] {0}:{1}".format(task_output._fs.remote_context.host, task_output.path)
elif isinstance(task_output, S3Target):
output_description = "[S3] {0}".format(task_output.path)
elif isinstance(task_output, FileSystemTarget):
output_description = "[FileSystem] {0}".format(task_output.path)
elif isinstance(task_output, PostgresTarget):
output_description = "[DB] {0}:{1}".format(task_output.host, task_output.table)
else:
output_description = "to be determined"
return output_description
def main():
deps = find_deps_cli()
for task in deps:
task_output = task.output()
if isinstance(task_output, dict):
output_descriptions = [get_task_output_description(output) for label, output in task_output.items()]
elif isinstance(task_output, collections.Iterable):
output_descriptions = [get_task_output_description(output) for output in task_output]
else:
output_descriptions = [get_task_output_description(task_output)]
print(" TASK: {0}".format(task))
for desc in output_descriptions:
print(" : {0}".format(desc))
if __name__ == '__main__':
main()
|
dgoldin/snakebite | doc/source/conf.py | Python | apache-2.0 | 8,202 | 0.007071 | # -*- coding: utf-8 -*-
#
# snakebite documentation build configuration file, created by
# sphinx-quickstart on Tue | Apr 30 11:39:44 2013.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import sphinx_rtd_theme
import snakebite.version
# If extensions (or modules to document with autodoc) are in another directory,
# add these directori | es to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../../snakebite'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.ifconfig', 'sphinx.ext.autosummary']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'snakebite'
copyright = u'2013 - 2014, Spotify AB'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = snakebite.version.version()
# The full version, including alpha/beta/rc tags.
release = snakebite.version.version()
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = "sphinx_rtd_theme"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'snakebitedoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'snakebite.tex', u'snakebite Documentation',
u'Wouter de Bie', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'snakebite', u'snakebite Documentation',
[u'Wouter de Bie'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'snakebite', u'snakebite Documentation',
u'Wouter de Bie', 'snakebite', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
autoc |
VTabolin/vmware-dvs | vmware_dvs/agent/dvs_neutron_agent.py | Python | apache-2.0 | 17,391 | 0.000115 | # Copyright 2015 Mirantis, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import signal
import sys
import time
import itertools
from neutron import context
from neutron.agent import rpc as agent_rpc
from neutron.agent import securitygroups_rpc as sg_rpc
from neutron.agent.common import polling
from neutron.common import config as common_config
from neutron.common import constants as n_const
from neutron.common import utils
from neutron.common import topics
from neutron._i18n import _, _LE, _LI
from oslo_config import cfg
from oslo_log import log as logging
from oslo_service import loopingcall
import oslo_messaging
from vmware_dvs.utils import dvs_util
from vmware_dvs.common import constants as dvs_const, exceptions
from vmware_dvs.api import dvs_agent_rpc_api
from vmware_dvs.agent.firewalls import dvs_securitygroup_rpc as dvs_rpc
LOG = logging.getLogger(__name__)
cfg.CONF.import_group('AGENT', 'vmware_dvs.common.config')
class DVSPluginApi(agent_rpc.PluginApi):
pass
class DVSAgent(sg_rpc.SecurityGroupAgentRpcCallbackMixin,
dvs_agent_rpc_api.ExtendAPI):
target = oslo_messaging.Target(version='1.2')
def __init__(self, vsphere_hostname, vsphere_login, vsphere_password,
bridge_mappings, polling_interval, quitting_rpc_timeout=None):
super(DVSAgent, self).__init__()
self.agent_state = {
'binary': 'neutron-dvs-agent',
'host': cfg.CONF.host,
'topic': n_const.L2_AGENT_TOPIC,
'configurations': {'bridge_mappings': bridge_mappings,
'vsphere_hostname': vsphere_hostname,
'log_agent_heartbeats':
cfg.CONF.AGENT.log_agent_heartbeats},
'agent_type': 'DVS agent',
'start_flag': True}
report_interval = cfg.CONF.AGENT.report_interval
self.polling_interval = polling_interval
# Security group agent support
self.context = context.get_admin_context_without_session()
self.sg_plugin_rpc = sg_rpc.SecurityGroupServerRpcApi(topics.PLUGIN)
self.sg_agent = dvs_rpc.DVSSecurityGroupRpc(self.context,
self.sg_plugin_rpc, defer_refresh_firewall=True)
self.setup_rpc()
if report_interval:
heartbeat = loopingcall.FixedIntervalLoopingCall(
self._report_state)
heartbeat.start(interval=report_interval)
self.run_daemon_loop = True
self.iter_num = 0
self.fullsync = True
self.quitting_rpc_timeout = quitting_rpc_timeout
self.network_map = dvs_util.create_network_map_from_config(
cfg.CONF.ML2_VMWARE, pg_cache=True)
self.updated_ports = set()
self.deleted_ports = set()
self.known_ports = set()
self.added_ports = set()
self.booked_ports = set()
# The initialization is complete; we can start receiving messages
self.connection.consume_in_threads()
@dvs_util.wrap_retry
def create_network_precommit(self, current, segment):
try:
dvs = self._lookup_dvs_for_context(segment)
except exceptions.NoDVSForPhysicalNetwork as e:
LOG.info(_LI('Network %(id)s not created. Reason: %(reason)s') % {
'id': current['id'],
'reason': e.message})
except exceptions.InvalidNetwork:
pass
else:
dvs.create_network(current, segment)
@dvs_util.wrap_retry
def delete_network_postcommit(self, current, segment):
try:
dvs = self._lookup_dvs_for_context(segment)
except exceptions.NoDVSForPhysicalNetwork as e:
LOG.info(_LI('Network %(id)s not deleted. Reason: %(reason)s') % {
'id': current['id'],
'reason': e.message})
except exceptions.InvalidNetwork:
pass
else:
dvs. | delete_network(current)
@dvs_util.wrap_retry
def update_network_precommit(self, current, segment, original):
try:
dvs = self._lookup_dvs_for_context(segment)
except (exceptions.NoDVSForPhysicalNetwork) as e:
LOG.info(_LI('Network %(id)s not updated. Reason: %(reason)s') % {
'id': current['id'],
'reason': e.message})
except exceptio | ns.InvalidNetwork:
pass
else:
dvs.update_network(current, original)
@dvs_util.wrap_retry
def book_port(self, current, network_segments, network_current):
physnet = network_current['provider:physical_network']
dvs = None
dvs_segment = None
for segment in network_segments:
if segment['physical_network'] == physnet:
dvs = self._lookup_dvs_for_context(segment)
dvs_segment = segment
if dvs:
port = dvs.book_port(network_current, current['id'],
dvs_segment, current.get('portgroup_name'))
self.booked_ports.add(current['id'])
return port
return None
@dvs_util.wrap_retry
def update_port_postcommit(self, current, original, segment):
try:
dvs = self._lookup_dvs_for_context(segment)
if current['id'] in self.booked_ports:
self.added_ports.add(current['id'])
self.booked_ports.discard(current['id'])
except exceptions.NoDVSForPhysicalNetwork:
raise exceptions.InvalidSystemState(details=_(
'Port %(port_id)s belong to VMWare VM, but there is '
'no mapping from network to DVS.') % {'port_id': current['id']}
)
else:
self._update_admin_state_up(dvs, original, current)
def delete_port_postcommit(self, current, original, segment):
try:
dvs = self._lookup_dvs_for_context(segment)
except exceptions.NoDVSForPhysicalNetwork:
raise exceptions.InvalidSystemState(details=_(
'Port %(port_id)s belong to VMWare VM, but there is '
'no mapping from network to DVS.') % {'port_id': current['id']}
)
else:
if sg_rpc.is_firewall_enabled():
key = current.get(
'binding:vif_details', {}).get('dvs_port_key')
if key:
dvs.remove_block(key)
else:
dvs.release_port(current)
def _lookup_dvs_for_context(self, segment):
physical_network = segment['physical_network']
try:
return self.network_map[physical_network]
except KeyError:
LOG.debug('No dvs mapped for physical '
'network: %s' % physical_network)
raise exceptions.NoDVSForPhysicalNetwork(
physical_network=physical_network)
def _update_admin_state_up(self, dvs, original, current):
try:
original_admin_state_up = original['admin_state_up']
except KeyError:
pass
else:
current_admin_state_up = current['admin_state_up']
perform = current_admin_state_up != original_admin_state_up
if perform:
dvs.switch_port_blocked_state(current)
def _report_state(self):
try:
agent_status = self.state_rpc.report_state(self.context,
self.agent_state,
True)
if agent_status == n_const.AGENT_REVIVED:
|
leapcode/soledad | tests/benchmarks/test_crypto.py | Python | gpl-3.0 | 3,238 | 0 | """
Benchmarks for crypto operations.
If you don't want to stress your local machine too much, you can pass the
SIZE_LIMT environment variable.
For instance, to keep the maximum payload at 1MB:
SIZE_LIMIT=1E6 py.test -s tests/perf/test_crypto.py
"""
import pytest
import os
import json
from uuid import uuid4
from leap.soledad.common.document import SoledadDocument
from leap.soledad.client import _crypto
LIMIT = int(float(os.environ.get('SIZE_LIMIT', 50 * 1000 * 1000)))
def create_doc_encryption(size):
@pytest.mark.benchmark(group="test_crypto_encrypt_doc")
@pytest.inlineCallbacks |
def test_doc_encryption(soledad_client, txbenchmark, payload):
"""
Encrypt a document of a given size.
"""
crypto = soledad_client | ()._crypto
DOC_CONTENT = {'payload': payload(size)}
doc = SoledadDocument(
doc_id=uuid4().hex, rev='rev',
json=json.dumps(DOC_CONTENT))
yield txbenchmark(crypto.encrypt_doc, doc)
return test_doc_encryption
# TODO this test is really bullshit, because it's still including
# the json serialization.
def create_doc_decryption(size):
@pytest.inlineCallbacks
@pytest.mark.benchmark(group="test_crypto_decrypt_doc")
def test_doc_decryption(soledad_client, txbenchmark, payload):
"""
Decrypt a document of a given size.
"""
crypto = soledad_client()._crypto
DOC_CONTENT = {'payload': payload(size)}
doc = SoledadDocument(
doc_id=uuid4().hex, rev='rev',
json=json.dumps(DOC_CONTENT))
encrypted_doc = yield crypto.encrypt_doc(doc)
doc.set_json(encrypted_doc)
yield txbenchmark(crypto.decrypt_doc, doc)
return test_doc_decryption
def create_raw_encryption(size):
@pytest.mark.benchmark(group="test_crypto_raw_encrypt")
def test_raw_encrypt(monitored_benchmark, payload):
"""
Encrypt raw payload using default mode from crypto module.
"""
key = payload(32)
monitored_benchmark(_crypto.encrypt_sym, payload(size), key)
return test_raw_encrypt
def create_raw_decryption(size):
@pytest.mark.benchmark(group="test_crypto_raw_decrypt")
def test_raw_decrypt(monitored_benchmark, payload):
"""
Decrypt raw payload using default mode from crypto module.
"""
key = payload(32)
iv, ciphertext = _crypto.encrypt_sym(payload(size), key)
monitored_benchmark(_crypto.decrypt_sym, ciphertext, key, iv)
return test_raw_decrypt
# Create the TESTS in the global namespace, they'll be picked by the benchmark
# plugin.
encryption_tests = [
('10k', 1E4),
('100k', 1E5),
('500k', 5E5),
('1M', 1E6),
('10M', 1E7),
('50M', 5E7),
]
for name, size in encryption_tests:
if size < LIMIT:
sz = int(size)
globals()['test_encrypt_doc_' + name] = create_doc_encryption(sz)
globals()['test_decrypt_doc_' + name] = create_doc_decryption(sz)
for name, size in encryption_tests:
if size < LIMIT:
sz = int(size)
globals()['test_encrypt_raw_' + name] = create_raw_encryption(sz)
globals()['test_decrypt_raw_' + name] = create_raw_decryption(sz)
|
anchore/anchore-engine | anchore_engine/decorators.py | Python | apache-2.0 | 1,604 | 0.002494 | """
Generic decorators for use in all parts of the system
"""
def delegate_to_callable(fn, err_msg=None):
"""
Delegate a call of the same name to the object returned by the fn() invocation. Useful for lazy initializers and functions
that return a common singleton. Can be used to hoist a singleton objects functions to module level
Example usage:
singleton = None
class A(object):
def test():
return 'abc'
def get_singleton():
global singleton
if singleton is None:
singleton = A()
return singleton
@delegate_to_callable(get_singleton)
def test():
pass
:param fn:
:return:
"""
def outer_wrapper(f):
def inner_wrapper(*args, **kwargs):
obj = fn()
if obj is None:
raise Exception(
"Delegate object not available. Err: {}".format(err_msg)
)
if not hasattr(obj, f.__name__):
raise Exception(
"Cannot dele | gate {} to {}, no attribute to delegate to".format(
| f.__name__, str(obj)
)
)
delegated_attr = getattr(obj, f.__name__)
if not callable(delegated_attr):
raise Exception(
"Cannot delegate {} to {} due to not a callable attribute".format(
f.__name__, delegated_attr.__name__
)
)
return delegated_attr(*args, **kwargs)
return inner_wrapper
return outer_wrapper
|
asoliveira/NumShip | scripts/plot/leme-velo-u-cg-plt.py | Python | gpl-3.0 | 2,013 | 0.028957 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#É adimensional?
adi = False
#É para salvar as figuras(True|False)?
save = True
#Caso seja para salvar, qual é o formato desejado?
formato = 'jpg'
#Caso seja para salvar, qual é o diretório que devo salvar?
dircg = 'fig-sen'
#Caso seja para salvar, qual é o nome do arquivo?
nome = 'leme-velo-u-cg'
#Qual título colocar no gráficos?
titulo = ''#'Curva de Giro'
#Qual a cor dos gráficos?
pc = 'k'
r1c = 'b'
r2c = 'y'
r3c = 'r'
#Estilo de linha
ps = '-'
r1s = '-'
r2s = '-'
r3s = '-'
import os
import scipy as sp
import matplotlib.pyplot as plt
from libplot import *
acehis = sp.genfromtxt('../entrada/padrao/CurvaGiro/velo.dat')
acehis2 = sp.genfromtxt('../entrada/leme/saida1.1/CurvaGiro/velo.dat')
acehis3 = sp.genfromtxt('../entrada/leme/saida1.2/CurvaGiro/velo.dat')
acehis4 = sp.genfromtxt('../entrada/leme/saida1.3/CurvaGiro/velo.dat')
axl = [0, 1000, 1, 9]
#Plotando a Curva de Giro
if adi:
ylabel = r'$t\prime$'
xveloabel = r'$u \prime$'
else:
ylabel = r'$u \quad m/s$'
xveloabel = r'$t \quad segundos$'
plt.subplot2grid((1,4),(0,0), colspan=3)
#Padrao
plt.plot(acehis[:, 0], acehis[:, 1], color = pc, linestyle = ps,
linewidth = 1, label=ur'padrão')
plt.plot(acehis2[:, 0], acehis2[:, 1], color = r1c,linestyle = r1s,
linewidth = 1, label=ur'1.1leme')
plt.plot(acehis3[:, 0], acehis3[:, 1], color = r2c, linewidth = 1,
linestyle = r2s, label=ur'1.2leme')
plt.plot | (acehis4[:, 0], acehis4[:, 1],color = r3c, linestyle = r3s,
linewidth = 1, label=ur'1.3leme')
plt.title(titulo)
plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
plt.ylabel(ylabel)
plt.xlabel(xveloabel)
plt.axis(axl)
plt.grid(True)
if save:
if not os.path.exists(dircg):
os.makedirs(dircg)
if os.path.exists(dircg + '/' + nome + '.' + formato):
os.remove(dircg + '/' + nome + '.' + formato)
plt.savefig(dircg | + '/' + nome + '.' + formato , format=formato)
else:
plt.show()
|
jfburkhart/shyft | shyft/tests/pyapi/test_selector_ts.py | Python | lgpl-3.0 | 7,317 | 0.00287 |
import math
import socket
import tempfile
import unittest
from contextlib import closing
import numpy as np
from shyft.api import (
Calendar, UtcPeriod,
DtsServer, DtsClient,
TimeAxis, TimeSeries, POINT_AVERAGE_VALUE, POINT_INSTANT_VALUE
)
from shyft.pyapi import fixed_tsv, windowed_percentiles_tsv, period_percentiles_tsv, selector_ts
def find_free_port() -> int:
"""
from SO https://stackoverflow.com/questions/1365265/on-localhost-how-to-pick-a-free-port-number
:return: available port number for use
"""
with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as s:
s.bind(('', 0))
return s.getsockname()[1]
class SelectorTsTestCase(unittest.TestCase):
def setUp(self) -> None:
self.port = find_free_port()
self.server = DtsServe | r()
self.server.set_listening_port(self.port)
self.server.start_async()
self.client = DtsClient(rf'localhost:{self.port}')
def tearDown(self) -> None:
self.server.clear()
del self.server
del self.port
def test_fixed_tsv_empty(self) -> None:
"""Test that an empty TsVector is generated by fixed_tsv when given an empty sequence of values."""
cal = Calendar()
period = UtcPeriod(cal.time(2017, 1, | 1), cal.time(2018, 1, 1))
tsv = fixed_tsv(period, [])
self.assertEqual(len(tsv), 0)
def test_fixed_tsv_values(self) -> None:
"""Test that a TsVector with fixed constant values is generated by fixed_tsv when given
a sequence of values."""
cal = Calendar()
period = UtcPeriod(cal.time(2017, 1, 1), cal.time(2018, 1, 1))
values = [12, 15.5]
tsv = fixed_tsv(period, values)
self.assertEqual(len(tsv), 2)
for v, ts in zip(values, tsv):
for ts_v in ts.values:
self.assertEqual(ts_v, v)
def test_windowed_percentiles_tsv_empty(self) -> None:
"""Test that an empty TsVector is generated by windowed_percentiles_tsv
when given an empty sequence of percentiles."""
cal = Calendar()
period = UtcPeriod(cal.time(2017, 1, 1), cal.time(2018, 1, 1))
data = np.linspace(-2, 2, 24*7)
data_ts = TimeSeries(TimeAxis(0, Calendar.HOUR, len(data)), data, POINT_INSTANT_VALUE)
# compute
tsv = windowed_percentiles_tsv(
data_ts, period,
Calendar.HOUR, Calendar.HOUR,
[],
self.client, cal
)
self.assertEqual(len(tsv), 0)
def test_windowed_percentiles_tsv_values(self) -> None:
"""Test that a TsVector is generated by windowed_percentiles_tsv with time-series
fulfilling some properties of being percentiles of the data ts."""
cal = Calendar()
period = UtcPeriod(cal.time(2017, 1, 1), cal.time(2018, 1, 1))
data = np.linspace(-2, 2, 24*7)
data_ts = TimeSeries(TimeAxis(0, Calendar.HOUR, len(data)), data, POINT_INSTANT_VALUE)
# compute
percentiles = [0, 10, 50, 90, 100]
tsv = windowed_percentiles_tsv(
data_ts, period,
3*Calendar.HOUR, 12*Calendar.HOUR,
percentiles,
self.client, cal
)
self.assertEqual(len(tsv), 5)
# assert that the time-series have the correct properties for being percentile series
for i in range(len(tsv[0])):
prev_v = tsv[0].values[i]
for j in range(len(percentiles)-1):
v = tsv[j+1].values[i]
# both values will be NaN at the end - that is ok
if math.isnan(prev_v) and math.isnan(v):
continue
# check that no larger percentile have values greater than lower percentiles
self.assertLessEqual(prev_v, v)
prev_v = v
def test_period_percentiles_tsv_empty(self) -> None:
"""Test that an empty TsVector is generated by period_percentiles_tsv
when given an empty sequence of percentiles."""
cal = Calendar()
period = UtcPeriod(cal.time(2017, 1, 1), cal.time(2018, 1, 1))
data = np.linspace(-2, 2, 24*7)
data_ts = TimeSeries(TimeAxis(0, Calendar.HOUR, len(data)), data, POINT_INSTANT_VALUE)
# compute
tsv = period_percentiles_tsv(
data_ts, period,
3*Calendar.HOUR, period,
[],
self.client, cal
)
self.assertEqual(len(tsv), 0)
def test_period_percentiles_tsv_values(self) -> None:
"""Test that a TsVector is generated by period_percentiles_tsv with time-series
fulfilling some properties of being percentiles of the data ts."""
cal = Calendar()
period = UtcPeriod(cal.time(2017, 1, 1), cal.time(2018, 1, 1))
data = np.linspace(-2, 2, 24*7)
data_ts = TimeSeries(TimeAxis(0, Calendar.HOUR, len(data)), data, POINT_INSTANT_VALUE)
# compute
percentiles = [0, 10, 50, 90, 100]
tsv = period_percentiles_tsv(
data_ts, period,
3*Calendar.HOUR, period,
percentiles,
self.client, cal
)
self.assertEqual(len(tsv), 5)
# assert that the time-series have the correct properties for being percentile series
for i in range(len(tsv[0])):
prev_v = tsv[0].values[i]
for j in range(len(percentiles)-1):
v = tsv[j+1].values[i]
# both values will be NaN at the end - that is ok
if math.isnan(prev_v) and math.isnan(v):
continue
# check that no larger percentile have values greater than lower percentiles
self.assertLessEqual(prev_v, v)
prev_v = v
def test_selector_ts(self) -> None:
"""Test that selector_ts constructs a time-series selects data from different time-series correctly."""
n = 24
cal = Calendar()
period = UtcPeriod(0, n*Calendar.HOUR)
data_ts = TimeSeries(TimeAxis(0, Calendar.HOUR, n), np.linspace(-10, 10, n), POINT_INSTANT_VALUE)
source_tss = [
TimeSeries(TimeAxis(0, Calendar.HOUR, n), 1.00*np.ones(n), POINT_INSTANT_VALUE),
TimeSeries(TimeAxis(0, Calendar.HOUR, n), 10.0*np.ones(n), POINT_INSTANT_VALUE),
TimeSeries(TimeAxis(0, Calendar.HOUR, n), 100.*np.ones(n), POINT_INSTANT_VALUE),
]
threshold_1 = -5
threshold_2 = 5
threshold_tss = [
TimeSeries(TimeAxis(0, Calendar.HOUR, n), threshold_1*np.ones(n), POINT_INSTANT_VALUE),
TimeSeries(TimeAxis(0, Calendar.HOUR, n), threshold_2*np.ones(n), POINT_INSTANT_VALUE),
]
ts = selector_ts(
data_ts, period, 2*Calendar.HOUR,
threshold_tss, source_tss,
POINT_AVERAGE_VALUE,
self.client, cal
)
self.assertEqual(len(data_ts), len(ts))
for dv, rv in zip(data_ts.values, ts.values):
if dv < threshold_1:
self.assertEqual(rv, source_tss[0].values[0])
elif threshold_1 <= dv < threshold_2:
self.assertEqual(rv, source_tss[1].values[0])
else:
self.assertEqual(rv, source_tss[2].values[0])
|
cherrishes/weilai | xingxing/protobuf/python/lib/Python3.4/google/protobuf/pyext/descriptor_cpp2_test.py | Python | apache-2.0 | 2,506 | 0.001995 | #! /usr/bin/python
#
# Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# https://developers.google.com/protocol-buffers/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL | DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests | for google.protobuf.pyext behavior."""
__author__ = 'anuraag@google.com (Anuraag Agrawal)'
import os
os.environ['PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION'] = 'cpp'
os.environ['PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION_VERSION'] = '2'
# We must set the implementation version above before the google3 imports.
# pylint: disable=g-import-not-at-top
from google.apputils import basetest
from google.protobuf.internal import api_implementation
# Run all tests from the original module by putting them in our namespace.
# pylint: disable=wildcard-import
from google.protobuf.internal.descriptor_test import *
class ConfirmCppApi2Test(basetest.TestCase):
def testImplementationSetting(self):
self.assertEqual('cpp', api_implementation.Type())
self.assertEqual(2, api_implementation.Version())
if __name__ == '__main__':
basetest.main()
|
antoinecarme/pyaf | tests/model_control/detailed/transf_Fisher/model_control_one_enabled_Fisher_Lag1Trend_Seasonal_Hour_LSTM.py | Python | bsd-3-clause | 154 | 0.051948 | import tests.model_control.test_ozone_custom_models_enabled as testmod |
testmod.build_model( [ | 'Fisher'] , ['Lag1Trend'] , ['Seasonal_Hour'] , ['LSTM'] ); |
unphased/Vim-IndentFinder | test_many_files.py | Python | bsd-2-clause | 2,378 | 0.037006 | #!/usr/bin/env pyt | hon
#
# Indentation finder, by Philippe Fremy <phil at freehackers dot org>
# Copyright 2002,2005 Philippe Fremy
#
# This program is distributed under the BSD license. You should have received
# a copy of t | he file LICENSE.txt along with this software.
#
import indent_finder
import os, glob
import unittest
from pprint import pprint
TEST_DEFAULT_RESULT=('',0)
class Test_many_files( unittest.TestCase ):
def check_file( self, fname, result, expected_vim_result ):
ifi = indent_finder.IndentFinder( TEST_DEFAULT_RESULT )
indent_finder.DEFAULT_TAB_WIDTH = 13
ifi.parse_file( fname )
res = str(ifi)
self.assertEquals( res, result )
self.assertEquals( expected_vim_result, ifi.vim_output() )
def test_file_space4( self ):
l = []
l += glob.glob( 'test_files/space4/*.py' )
l += glob.glob( 'test_files/space4/*.java' )
l += glob.glob( 'test_files/space4/*.vim' )
for f in l:
print 'checking: ', f
self.check_file( f , 'space 4',
'set sts=4 | set tabstop=4 | set expandtab | set shiftwidth=4 " (space 4)' )
def test_file_space2( self ):
l = []
l += glob.glob( 'test_files/space2/*.cpp' )
for f in l:
print 'checking: ', f
self.check_file( f , 'space 2',
'set sts=2 | set tabstop=2 | set expandtab | set shiftwidth=2 " (space 2)' )
def test_file_tab( self ):
l = []
l += glob.glob( 'test_files/tab/*.c' )
l += glob.glob( 'test_files/tab/*.cpp' )
l += glob.glob( 'test_files/tab/*.py' )
for f in l:
print 'checking: ', f
self.check_file( f , 'tab %d' % indent_finder.DEFAULT_TAB_WIDTH,
'set sts=0 | set tabstop=%d | set noexpandtab | set shiftwidth=%d " (tab)'%
(indent_finder.DEFAULT_TAB_WIDTH,
indent_finder.DEFAULT_TAB_WIDTH) )
def test_file_mixed4( self ):
l = []
l += glob.glob( 'test_files/mixed4/*.c' )
for f in l:
print 'checking: ', f
self.check_file( f, 'mixed tab 8 space 4',
'set sts=4 | set tabstop=8 | set noexpandtab | set shiftwidth=4 " (mixed 4)' )
if __name__ == "__main__":
unittest.main( testRunner = unittest.TextTestRunner( verbosity = 2 ) )
|
Jaza/url-for-s3 | setup.py | Python | apache-2.0 | 1,123 | 0 | import os
import setuptools
module_path = os.path.join(os.path.dirname(__file__), 'url_for_s3.py')
version_line = [line for line in open(module_path)
if line.startswith('__version__')][0]
__version__ = version_line.split('__version__ = ')[-1][1:][:-2]
setuptools.setup(
name="url-for-s3",
version=__version__,
url="https://github.com/Jaza/url-for-s3",
author="Jeremy Epstein",
author_email="jazepstein@gmail.com",
description="Python function that generates a URL to a given S3 resource.",
long | _description=open('README.rst').read(),
py_modules=['url_for_s3'],
zip_safe=False,
platforms='any',
install_requires=[],
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming La | nguage :: Python :: 3',
'Programming Language :: Python :: 3.3',
],
)
|
kingsdigitallab/tvof-django | tvof/kiln/urls.py | Python | mit | 232 | 0 | from django.urls import re_path
from .views import process
urlpatterns = [
re_path(
r'biblio | graphy/?$', process,
{'kiln_url': | 'bibliography', 'page_title': 'Bibliography'},
name='bibliography'
),
]
|
vlegoff/tsunami | src/primaires/information/__init__.py | Python | bsd-3-clause | 13,007 | 0.00085 | # -*-coding:Utf-8 -*
# Copyright (c) 2010-2017 LE GOFF Vincent
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Fichier contenant le module primaire aide."""
try:
import psutil
except ImportError:
psutil = None
from abstraits.module import *
from primaires.format.fonctions import *
from primaires.information.config import cfg_info
from primaires.information import commandes
from .editeurs.hedit import EdtHedit
from .editeurs.nledit import EdtNledit
from .roadmap import Roadmap
from .newsletter import Newsletter
from .sujet import SujetAide
from .tips import Tips
from .versions import Versions
from .annonces import Annonces
from primaires.information.reboot import Reboot
class Module(BaseModule):
"""Cette classe représente le module primaire information.
Ce module gère l'aide in-game, c'est-à-dire les sujets d'aide
(fichier ./sujet.py), ainsi que le système de versions.
"""
def __init__(self, importeur):
"""Constructeur du module"""
BaseModule.__init__(self, importeur, "information", "primaire")
self.__sujets = []
self.tips = None
self.versions = None
self.annonces = None
self.newsletters = []
self.roadmaps = []
self.logger = importeur.man_logs.creer_logger(
"information", "information")
self.reboot = None
def config(self):
"""Configuration du module"""
self.cfg_info = type(self.importeur).anaconf.get_config("config_info",
"information/config.cfg", "config information", cfg_info)
BaseModule.config(self)
def init(self):
"""Initialisation du module.
On récupère les sujets d'aide enregistrés, les versions,
les newsletters et la roadmap.
"""
sujets = self.importeur.supenr.charger_groupe(SujetAide)
self.__sujets = sujets
nb_sujets = len(sujets)
self.logger.info(format_nb(nb_sujets, "{nb} sujet{s} d'aide " \
"récupéré{s}")) |
versions = self.importeur.supenr.charger_unique(Versions)
if versions is None:
versions = Versions()
self.versions = versions
annonces = self.importeur.supenr.charger_unique(Annonces)
if annonces is None:
annonces = Annonces()
self.annonces = annonces
tips = self.importeur.supenr.charger_unique(Tips)
if not tips:
| tips = Tips()
self.tips = tips
self.newsletters = sorted(importeur.supenr.charger_groupe(Newsletter),
key=lambda nl: nl.date_creation)
self.roadmaps = sorted(importeur.supenr.charger_groupe(Roadmap),
key=lambda r: r.no)
# On lie la méthode joueur_connecte avec l'hook joueur_connecte
# La méthode joueur_connecte sera ainsi appelée quand un joueur
# se connecte
self.importeur.hook["joueur:connecte"].ajouter_evenement(
self.joueur_connecte)
# diffact pour psutil
if psutil:
importeur.diffact.ajouter_action("ver_mem", 60, self.surveiller_memoire)
BaseModule.init(self)
def ajouter_commandes(self):
"""Ajout des commandes dans l'interpréteur"""
self.commandes = [
commandes.aide.CmdAide(),
commandes.annonces.CmdAnnonces(),
commandes.hedit.CmdHedit(),
commandes.newsletter.CmdNewsletter(),
commandes.reboot.CmdReboot(),
commandes.roadmap.CmdRoadmap(),
commandes.tips.CmdTips(),
commandes.versions.CmdVersions(),
]
for cmd in self.commandes:
self.importeur.interpreteur.ajouter_commande(cmd)
# Ajout des éditeurs 'hedit' et 'nledit'
self.importeur.interpreteur.ajouter_editeur(EdtHedit)
self.importeur.interpreteur.ajouter_editeur(EdtNledit)
def __getitem__(self, titre):
"""Retourne le sujet portant ce titre.
Si le titre n'est pas trouvé, lève l'exception KeyError.
La recherche du sujet se fait sans tenir compte des accents ni de
la casse.
"""
titre = supprimer_accents(titre).lower()
for sujet in self.__sujets:
if supprimer_accents(sujet.titre).lower() == titre:
return sujet
raise KeyError("le titre {} n'a pas pu être trouvé".format(titre))
def __delitem__(self, titre):
"""Détruit un sujet d'aide de manière définitive."""
titre = supprimer_accents(titre).lower()
for sujet in self.__sujets:
if supprimer_accents(sujet.titre).lower() == titre:
titre = sujet
break
self.__sujets.remove(titre)
titre.vider()
titre.detruire()
def get_sujet_par_mot_cle(self, mot):
"""Retourne le sujet correspondant à ce mot-clé."""
mot = supprimer_accents(mot.lower())
for sujet in self.__sujets:
if mot in [supprimer_accents(m) for m in sujet.mots_cles]:
return sujet
return None
def get_sujet(self, nom_sujet):
"""Retourne un sujet ou None si le sujet recherché n'existe pas.
Contrairement à __getitem__ et get_sujet_par_mot_cle, le sujet est
renvoyé indifféremment en fonction de son nom ou d'un mot-clé.
"""
sujet = None
try:
sujet = self[nom_sujet]
except KeyError:
sujet = self.get_sujet_par_mot_cle(nom_sujet)
return sujet
@property
def sujets(self):
"""Retourne un dictionnaire {cle: sujet} des sujets existants."""
dic = {}
for sujet in self.__sujets:
dic[sujet.cle] = sujet
return dic
def ajouter_sujet(self, cle):
"""Ajoute un sujet à la liste des sujets d'aide.
La clé du sujet doit être fournie en paramètre.
Si la clé est déjà utilisée, lève une exception. Sinon, retourne
le sujet nouvellement créé.
"""
if cle in self.sujets.keys():
raise ValueError("la clé {} est déjà utilisée".format(cle))
sujet = SujetAide(cle)
self.__sujets.append(sujet)
return sujet
def creer_newsletter(self, sujet):
"""Créée et retourne la News Letter."""
newsletter = Newsletter(sujet)
self.newsletters.append(newsletter)
return newsletter
def creer_roadmap(self, texte):
"""Crée un nouvel élément de la roadmap.
Le texte contient le titre sous la forme "titre : texte".
Par exemple, "exploration : 500 salles ouvrables". Si le
deux point ne peut être trouvé, l'élément n'a pas de texte,
juste un titre.
"" |
sheplu/Anthill | Python/utils/Simulation.py | Python | mit | 130 | 0.046154 | #!/user/bin/ | python
# coding: utf8
class Simulation:
"""docstring for Simulation"""
def __init__(self, arg): |
self.arg = arg
|
mk8310/im_demo | views/index.py | Python | gpl-3.0 | 405 | 0 | #!/usr/bin/env python
# -*-coding:utf-8-*- |
'''
Author : ming
date : 2016/11/27 上午12:20
role : Version Update
'''
from tornado import web
from tornado.web import HTTPError
class IndexHandler(web.RequestHandler):
def get(self, room):
| if room == 'get':
raise HTTPError(500)
self.room = room
self.render('index.html', room=self.room, host=self.request.host)
|
prat0318/bravado-core | tests/validate/validate_array_test.py | Python | bsd-3-clause | 3,505 | 0.000571 | from jsonschema.exceptions import ValidationError
import pytest
from bravado_core.validate import validate_array
from tests.validate.conftest import email_address_format
@pytest.fixture
def int_array_spec():
return {
'type': 'array',
'items': | {
'type': 'integer',
}
}
def test_minItems_success(minimal_swagger_spec, in | t_array_spec):
int_array_spec['minItems'] = 2
validate_array(minimal_swagger_spec, int_array_spec, [1, 2, 3])
def test_minItems_failure(minimal_swagger_spec, int_array_spec):
int_array_spec['minItems'] = 2
with pytest.raises(ValidationError) as excinfo:
validate_array(minimal_swagger_spec, int_array_spec, [1])
assert 'is too short' in str(excinfo)
def test_maxItems_success(minimal_swagger_spec, int_array_spec):
int_array_spec['maxItems'] = 2
validate_array(minimal_swagger_spec, int_array_spec, [1])
def test_maxItems_failure(minimal_swagger_spec, int_array_spec):
int_array_spec['maxItems'] = 2
with pytest.raises(ValidationError) as excinfo:
validate_array(minimal_swagger_spec, int_array_spec, [1, 2, 3, 4])
assert 'is too long' in str(excinfo)
def test_unqiueItems_true_success(minimal_swagger_spec, int_array_spec):
int_array_spec['uniqueItems'] = True
validate_array(minimal_swagger_spec, int_array_spec, [1, 2, 3])
def test_uniqueItems_true_failure(minimal_swagger_spec, int_array_spec):
int_array_spec['uniqueItems'] = True
with pytest.raises(ValidationError) as excinfo:
validate_array(minimal_swagger_spec, int_array_spec, [1, 2, 1, 4])
assert 'has non-unique elements' in str(excinfo)
def test_uniqueItems_false(minimal_swagger_spec, int_array_spec):
int_array_spec['uniqueItems'] = False
validate_array(minimal_swagger_spec, int_array_spec, [1, 2, 3])
validate_array(minimal_swagger_spec, int_array_spec, [1, 2, 1, 4])
@pytest.fixture
def email_address_array_spec():
return {
'type': 'array',
'items': {
'type': 'string',
'format': 'email_address',
}
}
def test_user_defined_format_success(minimal_swagger_spec,
email_address_array_spec):
request_body = ['foo@bar.com']
minimal_swagger_spec.register_format(email_address_format)
# No exception thrown == success
validate_array(minimal_swagger_spec, email_address_array_spec, request_body)
def test_user_defined_format_failure(minimal_swagger_spec,
email_address_array_spec):
request_body = ['i_am_not_a_valid_email_address']
minimal_swagger_spec.register_format(email_address_format)
with pytest.raises(ValidationError) as excinfo:
validate_array(minimal_swagger_spec, email_address_array_spec,
request_body)
assert "'i_am_not_a_valid_email_address' is not a 'email_address'" in \
str(excinfo.value)
def test_builtin_format_still_works_when_user_defined_format_used(
minimal_swagger_spec):
ipaddress_array_spec = {
'type': 'array',
'items': {
'type': 'string',
'format': 'ipv4',
}
}
request_body = ['not_an_ip_address']
minimal_swagger_spec.register_format(email_address_format)
with pytest.raises(ValidationError) as excinfo:
validate_array(minimal_swagger_spec, ipaddress_array_spec, request_body)
assert "'not_an_ip_address' is not a 'ipv4'" in str(excinfo.value)
|
walchko/soccer2 | pygecko_old/Example.py | Python | mit | 4,227 | 0.023657 | #!/usr/bin/env python
##############################################
# The MIT License (MIT)
# Copyright (c) 2016 Kevin Walchko
# see LICENSE for full details
##############################################
"""
http://www.howtogeek.com/225487/what-is-the-difference-between-127.0.0.1-and-0.0.0.0/
What is the Difference Between 127.0.0.1 and 0.0.0.0?
* 127.0.0.1 is th | e loopback address (also known as localhost).
* 0.0.0.0 is a non-routable meta-address used to designate an invalid, unknown,
or non-applicable target (a no particular address place holder).
In the con | text of a route entry, it usually means the default route.
In the context of servers, 0.0.0.0 means all IPv4 addresses on the local
machine. If a host has two IP addresses, 192.168.1.1 and 10.1.2.1, and a server
running on the host listens on 0.0.0.0, it will be reachable at both of those
IPs.
"""
from __future__ import print_function
from __future__ import division
import time
from pygecko.ZmqClass import Sub as zmqSub
from pygecko.ZmqClass import Pub as zmqPub
from pygecko import Messages as Msg
from quadruped import Engine
from quadruped import DiscreteRippleGait
from quadruped import AHRS # attitude and heading reference system
import multiprocessing as mp
from quadruped import MCP3208, SPI
##########################
class pyGeckoQuadruped(mp.Process):
sub_js = ('0.0.0.0', 9100)
sub_cmd = ('0.0.0.0', 9110)
telemetry_pub = ('0.0.0.0', 9120)
def __init__(self, data={}):
mp.Process.__init__(self)
# self.ahrs = AHRS() # compass sensor
self.robot = Engine(data)
leg = self.robot.getFoot0(0)
self.crawl = DiscreteRippleGait(45.0, leg, self.robot.moveFoot)
def handleMsg(self, topic, msg):
cmd = (0, 0, 0)
if topic is 'cmd':
# twist message
x, y, _ = msg.linear
_, _, z = msg.angular
cmd = (x, y, z)
print('>> got a command', cmd)
elif topic is 'js':
ps4 = msg
x, y = ps4.axes.leftStick
rz = ps4.axes.rightStick[1]
cmd = (x, y, rz)
print('>> got a command', cmd)
stop = ps4.buttons.share
if stop:
print('You hit <share> ... bye!')
exit()
return cmd
def read_ir(self):
mcp = self.mcp
adc = [0] * 8
for i in range(8):
adc[i] = mcp.read_adc(i)
msg = Msg.Range()
msg.fov = 20.0
msg.range = adc
return msg
def read_compass(self):
# read ahrs ---------
roll, pitch, heading = self.ahrs.read(deg=True)
msg = Msg.Compass(units=Msg.Compass.COMPASS_DEGREES)
msg.roll = roll
msg.pitch = pitch
msg.heading = heading
if (-90.0 > roll > 90.0) or (-90.0 > pitch > 90.0):
print('Crap we flipped!!!', msg)
return msg
def run(self):
# pubs ---------------------------------------------
print('>> Subscribe to cmd on {}:{}'.format(*self.sub_cmd))
print('>> Subscribe to js on {}:{}'.format(*self.sub_js))
print('>> Publishing telemetry on {}:{}'.format(*self.telemetry_pub))
cmd_sub = zmqSub(['cmd'], self.sub_cmd) # twist
js_sub = zmqSub(['js'], self.sub_js) # ps4 ... why? just make all cmd/twist?
telemetry_pub = zmqPub(self.telemetry_pub)
# ADC ----------------------------------------------
# Hardware SPI configuration:
SPI_PORT = 0
SPI_DEVICE = 0
self.mcp = MCP3208(spi=SPI.SpiDev(SPI_PORT, SPI_DEVICE))
# Compass ------------------------------------------
self.ahrs = AHRS()
sub = [cmd_sub, js_sub]
while True:
for s in sub:
topic, msg = s.recv()
if msg:
cmd = self.handleMsg(topic, msg)
self.crawl.command(cmd)
# read Compass -----
msg = self.read_compass()
# print('ahrs', msg)
telemetry_pub.pub('ahrs', msg)
# read IR ----------
msg = self.read_ir()
telemetry_pub.pub('ir', msg)
time.sleep(0.01)
def run():
# angles are always [min, max]
# xl-320
test = {
# 'serialPort': '/dev/serial0', # real robot
# 'legLengths': {
# 'coxaLength': 45,
# 'femurLength': 55,
# 'tibiaLength': 104
# },
# 'legAngleLimits': [[-90, 90], [-90, 90], [-150, 0]],
# 'legOffset': [150, 150, 150+90],
# 'port': 9020
}
robot = pyGeckoQuadruped(test)
# robot.daemon = True
robot.start()
print('pid', robot.pid)
robot.join()
print('Nothing to see here ... move along, move along')
if __name__ == "__main__":
run()
|
michaelnt/doorstop | doorstop/core/tests/test_builder.py | Python | lgpl-3.0 | 2,052 | 0.000487 | # SPDX-License-Identifier: LGPL-3.0-only
"""Unit tests for the doorstop.core.builder module."""
import unittest
from unittest.mock import Mock, patch
from doorstop.core.builder import _clear_tree, build, find_document, find_item
from doorstop.core.tests import EMPTY, FILES, Mock | DocumentNoSkip, MockDocumentSkip
from doorstop.core.tree import Tree
class TestModule(unittest.TestCase):
"""Unit tests for the doorstop.core.builder module."""
@patch('doorstop.core.vcs.find_root', Mock(return_value=EMPTY))
def test_run_empty(self):
"""Verify an empty directory is an empty hierarchy."""
tree = build(EMPTY)
self.assertEqual(0, len(tree))
@patch('doorstop.core.document.Document', MockDocumentNoSkip)
@patch('doorstop. | core.vcs.find_root', Mock(return_value=FILES))
def test_build(self):
"""Verify a tree can be built."""
tree = build(FILES)
self.assertEqual(4, len(tree))
@patch('doorstop.core.document.Document', MockDocumentSkip)
@patch('doorstop.core.vcs.find_root', Mock(return_value=FILES))
def test_build_with_skips(self):
"""Verify documents can be skipped while building a tree."""
tree = build(FILES)
self.assertEqual(0, len(tree))
@patch('doorstop.core.builder.build', Mock(return_value=Tree(Mock())))
@patch('doorstop.core.tree.Tree.find_document')
def test_find_document(self, mock_find_document): # pylint: disable=R0201
"""Verify documents can be found using a convenience function."""
_clear_tree()
prefix = 'req'
find_document(prefix)
mock_find_document.assert_called_once_with(prefix)
@patch('doorstop.core.builder.build', Mock(return_value=Tree(Mock())))
@patch('doorstop.core.tree.Tree.find_item')
def test_find_item(self, mock_find_item): # pylint: disable=R0201
"""Verify items can be found using a convenience function."""
_clear_tree()
uid = 'req1'
find_item(uid)
mock_find_item.assert_called_once_with(uid)
|
jchaffraix/ConfigMisc | install.py | Python | bsd-2-clause | 1,293 | 0.022428 | import os
import subprocess
import sys
GITHUB="git@github.com:jchaffraix/ConfigMisc.git"
# TODO: Allow customization.
# This is hardcoded to match the bash_profile file for now.
PATH="~/Tools/Scripts"
def install_config(path, config, copy=False):
# Check if the path exists.
name = config.split("/")[-1]
dst = path + "/" + name
print("Config %s, destination: %s" % (config, dst))
if os.path.exists(dst):
# Decide what to do.
while True:
print("File exist %s: Overwrite/Copy/Skip/Exit [ocsE]: " % dst)
answer = raw_input()
if answer in ["e", "E"]:
sys.exit(-1)
if answer in ["s", "S"]:
return
if answer in ["c", "C"]:
os.rename(dest, dest + ".bak")
if answer not in ["o", "O"]:
print("Unknown input")
continue
break
if copy:
subprocess.check_call(["cp", config, dst])
else:
os.symlink(config, dst)
def install(path):
# Create the root.
os.makedirs(path, exist_ok=True)
# Fetch this repository.
print("Fetching the config")
# TODO(Python3): subprocess.run.
subprocess.check_call(["git", "clone", | GITHUB, path])
# Install the different configuration.
install_c | onfig("~", "/".join([path, "config", ".bash_profile"]))
if __name__ == "__main__":
install(PATH)
|
TRECVT/vigir_footstep_planning_basics | vigir_footstep_planning_lib/src/vigir_footstep_planning_lib/qt_helper.py | Python | gpl-3.0 | 2,044 | 0.009295 | #!/usr/bin/env python
import rospy
from python_qt_binding.QtCore import Qt
from python_qt_binding.QtGui import QHBoxLayout, QGroupBox, QTextEdit, QDoubleSpinBox, QColor
# generic helper to generate quickly QDoubleSpinBox
def generate_q_double_spin_box(default_val, range_min, range_max, decimals, single_step):
spin_box = QDoubleSpinBox()
spin_box.setValue(default_val)
spin_box.setRange(range_min, range_max)
spin_box.setDecimals(decimals)
spin_box.setSingleStep(single_step)
#spin_box.valueChanged[unicode].connect(self.callback_ | spin_box)
return spin_box
# adds a layout with frame and text to parent widget
def add_layout_with_frame(parent, layout, text = ""):
box_layout = QHBoxLayout()
box_layout.addLayout(layo | ut)
group_box = QGroupBox()
group_box.setStyleSheet("QGroupBox { border: 1px solid gray; border-radius: 4px; margin-top: 0.5em; } QGroupBox::title { subcontrol-origin: margin; left: 10px; padding: 0 3px 0 3px; }")
group_box.setTitle(text)
group_box.setLayout(box_layout)
parent.addWidget(group_box)
# adds a widget with frame and text to parent widget
def add_widget_with_frame(parent, widget, text = ""):
box_layout = QHBoxLayout()
box_layout.addWidget(widget)
group_box = QGroupBox()
group_box.setStyleSheet("QGroupBox { border: 1px solid gray; border-radius: 4px; margin-top: 0.5em; } QGroupBox::title { subcontrol-origin: margin; left: 10px; padding: 0 3px 0 3px; }")
group_box.setTitle(text)
group_box.setLayout(box_layout)
parent.addWidget(group_box)
# outputs message with given color at a QTextEdit
def output_message(text_edit, msg, color):
text_edit.setTextColor(color)
text_edit.append(msg)
# outputs error_status msg at QTextEdit field
def output_status(text_edit, error_status):
if (error_status.error != 0):
output_message(text_edit, error_status.error_msg, Qt.red)
if (error_status.warning != 0):
output_message(text_edit, error_status.warning_msg, QColor(255, 165, 0))
|
LordSputnik/beets | beetsplug/types.py | Python | mit | 1,775 | 0 | # -*- coding: utf-8 -*-
# This file is part of beets.
# Copyright 2016, Thomas Scholtes.
#
# Permission is hereby granted, free of c | harge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies | or substantial portions of the Software.
from __future__ import (division, absolute_import, print_function,
unicode_literals)
from beets.plugins import BeetsPlugin
from beets.dbcore import types
from beets.util.confit import ConfigValueError
from beets import library
class TypesPlugin(BeetsPlugin):
@property
def item_types(self):
return self._types()
@property
def album_types(self):
return self._types()
def _types(self):
if not self.config.exists():
return {}
mytypes = {}
for key, value in self.config.items():
if value.get() == 'int':
mytypes[key] = types.INTEGER
elif value.get() == 'float':
mytypes[key] = types.FLOAT
elif value.get() == 'bool':
mytypes[key] = types.BOOLEAN
elif value.get() == 'date':
mytypes[key] = library.DateType()
else:
raise ConfigValueError(
u"unknown type '{0}' for the '{1}' field"
.format(value, key))
return mytypes
|
Brocade-OpenSource/OpenStack-DNRM-Nova | nova/tests/virt/baremetal/db/test_bm_node.py | Python | apache-2.0 | 6,918 | 0.000578 | # Copyright (c) 2012 NTT DOCOMO, INC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Bare-Metal DB testcase for BareMetalNode
"""
from nova import exception
from nova.tests.virt.baremetal.db import base
from nova.tests.virt.baremetal.db import utils
from nova.virt.baremetal import db
class BareMetalNodesTestCase(base.BMDBTestCase):
def _create_nodes(self):
nodes = [
utils.new_bm_node(pm_address='0', service_host="host1",
memory_mb=100000, cpus=100, local_gb=10000),
utils.new_bm_node(pm_address='1', service_host="host2",
instance_uuid='A',
memory_mb=100000, cpus=100, local_gb=10000),
utils.new_bm_node(pm_address='2', service_host="host2",
memory_mb=1000, cpus=1, local_gb=1000),
utils.new_bm_node(pm_address='3', service_host="host2",
memory_mb=1000, cpus=2, local_gb=1000),
utils.new_bm_node(pm_address='4', service_host="host2",
memory_mb=2000, cpus=1, local_gb=1000),
utils.new_bm_node(pm_address='5', service_host="host2",
memory_mb=2000, cpus=2, local_gb=1000),
]
self.ids = []
for n in nodes:
ref = db.bm_node_create(self.context, n)
self.ids.append(ref['id'])
def test_get_all(self):
r = db.bm_node_get_all(self.context)
self.assertEquals(r, [])
self._create_nodes()
r = db.bm_node_get_all(self.context)
self.assertEquals(len(r), 6)
def test_get(self):
self._create_nodes()
r = db.bm_node_get(self.context, self.ids[0])
self.assertEquals(r['pm_address'], '0')
r = db.bm_node_get(self.context, self.ids[1])
self.assertEquals(r['pm_address'], '1')
self.assertRaises(
exception.NodeNotFound,
db.bm_node_get,
self.context, -1)
def test_get_by_service_host(self):
self._create_nodes()
r = db.bm_node_get_all(self.context, service_host=None)
self.assertEquals(len(r), 6)
r = db.bm_node_get_all(self.context, service_host="host1")
self.assertEquals(len(r), 1)
self.assertEquals(r[0]['pm_address'], '0')
r = db.bm_node_get_all(self.context, service_host="host2")
self.assertEquals(len(r), 5)
pmaddrs = [x['pm_address'] for x in r]
self.assertIn('1', pmaddrs)
self.assertIn('2', pmaddrs)
self.assertIn('3', pmaddrs)
self.assertIn('4', pmaddrs)
self.assertIn('5', pmaddrs)
r = db.bm_node_get_all(self.context, service_host="host3")
self.assertEquals(r, [])
def test_get_associated(self):
self._create_nodes()
r = db.bm_node_get_associated(self.context, service_host=None)
self.assertEquals(len(r), 1)
self.assertEquals(r[0]['pm_address'], '1')
r = db.bm_node_get_unassociated(self.context, service_host=None)
self.assertEquals(len(r), 5)
pmaddrs = [x['pm_address'] for x in r]
self.assertIn('0', pmaddrs)
self.assertIn('2', pmaddrs)
self.assertIn('3', pmaddrs)
self.assertIn('4', pmaddrs)
self.assertIn('5', pmaddrs)
def test_destroy(self):
self._create_nodes()
db.bm_node_destroy(self.context, self.ids[0])
self.assertRaises(
exception.NodeNotFound,
db.bm_node_get,
self.context, self.ids[0])
r = db.bm_node_get_all(self.context)
self.assertEquals(len(r), 5)
def test_destroy_with_interfaces(self):
self._create_nodes()
if_a_id = db.bm_interface_create(self.context, self.ids[0],
'aa:aa:aa:aa:aa:aa', None, None)
if_b_id = db.bm_interface_create(self.context, self.ids[0],
'bb:bb:bb:bb:bb:bb', None, None)
if_x_id = db.bm_interface_create(self.context, self.ids[1],
'11:22:33:44:55:66', None, None)
db.bm_node_destroy(self.context, self.ids[0])
self.assertRaises(
exception.NovaException,
db.bm_interface_get,
self.context, if_a_id)
self.assertRaises(
exception.NovaException,
db.bm_interface_get,
self.context, if_b_id)
# Anothe | r node's interface is not affected
if_x = db.bm_interface_get(self.context, if_x_id)
self.assertEqual(self.ids[1], if_x['bm_node_id'])
self.assertRaises(
exception.NodeNotFound,
db.bm_node_get,
self.context, self.ids[0])
r = db.bm_node_get_all(self.context)
self.assertEquals(len(r), 5)
def test_find_free(self):
self._create_nodes()
fn = db.bm_node_find_free(self.context, 'host2')
self.asse | rtEqual(fn['pm_address'], '2')
fn = db.bm_node_find_free(self.context, 'host2',
memory_mb=500, cpus=2, local_gb=100)
self.assertEqual(fn['pm_address'], '3')
fn = db.bm_node_find_free(self.context, 'host2',
memory_mb=1001, cpus=1, local_gb=1000)
self.assertEqual(fn['pm_address'], '4')
fn = db.bm_node_find_free(self.context, 'host2',
memory_mb=2000, cpus=1, local_gb=1000)
self.assertEqual(fn['pm_address'], '4')
fn = db.bm_node_find_free(self.context, 'host2',
memory_mb=2000, cpus=2, local_gb=1000)
self.assertEqual(fn['pm_address'], '5')
# check memory_mb
fn = db.bm_node_find_free(self.context, 'host2',
memory_mb=2001, cpus=2, local_gb=1000)
self.assertTrue(fn is None)
# check cpus
fn = db.bm_node_find_free(self.context, 'host2',
memory_mb=2000, cpus=3, local_gb=1000)
self.assertTrue(fn is None)
# check local_gb
fn = db.bm_node_find_free(self.context, 'host2',
memory_mb=2000, cpus=2, local_gb=1001)
self.assertTrue(fn is None)
|
klanestro/vortaro | words/tools.py | Python | gpl-3.0 | 1,001 | 0.005994 | import random
# generate a random bit order
# you'll need to save this mapping permanently, perhaps just hardcode it
# map how ever many bits you need to represent your integer space
mapping = range(34)
mapping.reverse()
# alphabet for changing from base 10
chars | = '0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ'
# shuffle the bits
def encode(n):
result = 0
for i, b in enumerate(mapping):
| b1 = 1 << i
b2 = 1 << b
if n & b1:
result |= b2
return enbase(result)
# unshuffle the bits
def decode(n):
result = 0
for i, b in enumerate(mapping):
b1 = 1 << i
b2 = 1 << b
if n & b2:
result |= b1
return debase(result)
# change the base
def enbase(x):
n = len(chars)
if x < n:
return chars[x]
return enbase(x/n) + chars[x%n]
# go back to base 10
def debase(x):
n = len(chars)
result = 0
for i, c in enumerate(reversed(x)):
result += chars.index(c) * (n**i)
return result
|
vsajip/django | django/contrib/gis/db/backends/postgis/operations.py | Python | bsd-3-clause | 25,275 | 0.003521 | import re
from decimal import Decimal
from django.conf import settings
from django.contrib.gis.db.backends.base import BaseSpatialOperations
from django.contrib.gis.db.backends.util import SpatialOperation, SpatialFunction
from django.contrib.gis.db.backends.postgis.adapter import PostGISAdapter
from django.contrib.gis.geometry.backend import Geometry
from django.contrib.gis.measure import Distance
from django.core.exceptions import ImproperlyConfigured
from django.db.backends.postgresql_psycopg2.base import DatabaseOperations
from django.db.utils import DatabaseError
from django.utils import six
#### Classes used in constructing PostGIS spatial SQL ####
class PostGISOperator(SpatialOperation):
"For PostGIS operators (e.g. `&&`, `~`)."
def __init__(self, operator):
super(PostGISOperator, self).__init__(operator=operator)
class PostGISFunction(SpatialFunction):
"For PostGIS function calls (e.g., `ST_Contains(table, geom)`)."
def __init__(self, prefix, function, **kwargs):
super(PostGISFunction, self).__init__(prefix + function, **kwargs)
class PostGISFunctionParam(PostGISFunction):
"For PostGIS functions that take another parameter (e.g. DWithin, Relate)."
sql_template = '%(function)s(%(geo_col)s, %(geometry)s, %%s)'
class PostGISDistance(PostGISFunction):
"For PostGIS distance operations."
dist_func = 'Distance'
sql_template = '%(function)s(%(geo_col)s, %(geometry)s) %(operator)s %%s'
def __init__(self, prefix, operator):
super(PostGISDistance, self).__init__(prefix, self.dist_func,
operator=operator)
class PostGISSpheroidDistance(PostGISFunction):
"For PostGIS spherical distance operations (using the spheroid)."
dist_func = 'distance_spheroid'
sql_template = '%(function)s(%(geo_col)s, %(geometry)s, %%s) %(operator)s %%s'
def __init__(self, prefix, operator):
# An extra parameter in `end_subst` is needed for the spheroid string.
super(PostGISSpheroidDistance, self).__init__(prefix, self.dist_func,
operator=operator)
class PostGISSphereDistance(PostGISDistance):
"For PostGIS spherical distance operations."
dist_func = 'distance_sphere'
class PostGISRelate(PostGISFunctionParam):
"For PostGIS Relate(<geom>, <pattern>) calls."
pattern_regex = re.compile(r'^[012TF\*]{9}$')
def __init__(self, prefix, pattern):
if not self.pattern_regex.match(pattern):
raise ValueError('Invalid intersection matrix pattern "%s".' % pattern)
super(PostGISRelate, self).__init__(prefix, 'Relate')
class PostGISOperations(DatabaseOperations, BaseSpatialOperations):
compiler_module = 'django.contrib.gis.db.models.sql.compiler'
name = 'postgis'
postgis = True
version_regex = re.compile(r'^(?P<major>\d)\.(?P<minor1>\d)\.(?P<minor2>\d+)')
valid_aggregates = dict([(k, None) for k in
('Collect', 'Extent', 'Extent3D', 'MakeLine', 'Union')])
Adapter = PostGISAdapter
Adaptor = Adapter # Backwards-compatibility alias.
def __init__(self, connection):
super(PostGISOperations, self).__init__(connection)
# Trying to get the PostGIS version because the function
# signatures will depend on the version used. The cost
# here is a database query to determine the version, which
# can be mitigated by setting `POSTGIS_VERSION` with a 3-tuple
# comprising user-supplied values for the major, minor, and
# subminor revision of PostGIS.
try:
if hasattr(settings, 'POSTGIS_VERSION'):
vtup = settings.POSTGIS_VERSION
if len(vtup) == 3:
# The user-supplied PostGIS version.
version = vtup
else:
# This was the old documented way, but it's stupid to
# include the string.
version = vtup[1:4]
else:
vtup = self.postgis_version_tuple()
version = vtup[1:]
# Getting the prefix -- even though we don't officially support
# PostGIS 1.2 anymore, keeping it anyway in case a prefix change
# for something else is necessary.
if version >= (1, 2, 2):
prefix = 'ST_'
else:
prefix = ''
self.geom_func_prefix = prefix
self.spatial_version = version
except DatabaseError:
raise ImproperlyConfigured('Cannot determine PostGIS version for database "%s". '
'GeoDjango requires at least PostGIS version 1.3. '
'Was the database created from a spatial database '
'template?' % self.connection.settings_dict['NAME']
)
# TODO: Raise helpful exceptions as they become known.
# PostGIS-specific operators. The commented descriptions of these
# operators come from Section 7.6 of the PostGIS 1.4 documentation.
self.geometry_operators = {
# The "&<" operator returns true if A's bounding box overlaps or
# is to the left of B's bounding box.
'overlaps_left' : PostGISOperator('&<'),
# The "&>" operator returns true if A's bounding box overlaps or
# is to the right of B's bounding box.
'overlaps_right' : PostGISOperator('&>'),
# The "<<" operator returns true if A's bounding box is strictly
# to the left of B's bounding box.
'left' : PostGISOperator('<<'),
# The ">>" operator returns true if A's bounding box is strictly
# to the right of B's bounding box.
'right' : PostGISOperator('>>'),
# The "&<|" operator returns true if A's bounding box overlaps or
# is below B's bounding box.
| 'overlaps_below' : PostGISOperator('&<|'),
# The "|&>" operator returns true if A's bounding box overlaps or
# is above B's bounding box.
'overlaps_above' : PostGISOperator('|&>'),
# The "<<|" operator returns true if A's bounding box is strictly
# below B | 's bounding box.
'strictly_below' : PostGISOperator('<<|'),
# The "|>>" operator returns true if A's bounding box is strictly
# above B's bounding box.
'strictly_above' : PostGISOperator('|>>'),
# The "~=" operator is the "same as" operator. It tests actual
# geometric equality of two features. So if A and B are the same feature,
# vertex-by-vertex, the operator returns true.
'same_as' : PostGISOperator('~='),
'exact' : PostGISOperator('~='),
# The "@" operator returns true if A's bounding box is completely contained
# by B's bounding box.
'contained' : PostGISOperator('@'),
# The "~" operator returns true if A's bounding box completely contains
# by B's bounding box.
'bbcontains' : PostGISOperator('~'),
# The "&&" operator returns true if A's bounding box overlaps
# B's bounding box.
'bboverlaps' : PostGISOperator('&&'),
}
self.geometry_functions = {
'equals' : PostGISFunction(prefix, 'Equals'),
'disjoint' : PostGISFunction(prefix, 'Disjoint'),
'touches' : PostGISFunction(prefix, 'Touches'),
'crosses' : PostGISFunction(prefix, 'Crosses'),
'within' : PostGISFunction(prefix, 'Within'),
'overlaps' : PostGISFunction(prefix, 'Overlaps'),
'contains' : PostGISFunction(prefix, 'Contains'),
'intersects' : PostGISFunction(prefix, 'Intersects'),
'relate' : (PostGISRelate, six.string_types),
'coveredby' : PostGISFunction(prefix, 'CoveredBy'),
'covers' : PostGISFunction(prefix, 'Covers'),
}
# Valid distan |
eneasz/RasHeating-Control | custom.py | Python | gpl-3.0 | 2,070 | 0.016425 | #!/usr/bin/python
#This is where you can set up your own scheduled by providing hour and minute when you want to start your task and duration where you say how long do you want to run it for. This is devided by each day of the week so you can run differen scheduler each day. It is possi | ble to extend this with another column for example task and th | is way it can run different tasks for each setting.
week={
'Monday':[{'hour': 10, 'minute': 19, 'duration': 460},
{'hour': 12, 'minute': 20, 'duration': 2},
{'hour': 18, 'minute': 0, 'duration': 362},
{'hour': 20, 'minute': 30, 'duration': 20},
],
'Tuesday':[{'hour': 8, 'minute': 6, 'duration': 360},
{'hour': 12, 'minute': 20, 'duration': 2},
{'hour': 15, 'minute': 6, 'duration': 2},
{'hour': 21, 'minute': 38, 'duration': 360},
],
'Wednesday':[{'hour': 10, 'minute': 56, 'duration': 360},
{'hour': 15, 'minute': 30, 'duration': 360},
],
'Thursday':[{'hour': 8, 'minute': 30, 'duration': 360},
{'hour': 12, 'minute': 20, 'duration': 2},
{'hour': 15, 'minute': 20, 'duration': 2},
{'hour': 22, 'minute': 18, 'duration': 320},
{'hour': 23, 'minute': 14, 'duration': 1},
],
'Friday':[{'hour': 6, 'minute': 20, 'duration': 360},
{'hour': 9, 'minute': 10, 'duration': 362},
{'hour': 16, 'minute': 35, 'duration': 360},
{'hour': 23, 'minute': 55, 'duration': 1},
],
'Saturday':[{'hour': 11, 'minute': 44, 'duration': 1},
{'hour': 12, 'minute': 3, 'duration': 2},
{'hour': 12, 'minute': 8, 'duration': 2},
{'hour': 15, 'minute': 53, 'duration': 461},
],
'Sunday':[{'hour': 7, 'minute': 58, 'duration': 550},
{'hour': 20, 'minute': 30, 'duration': 520},
]
}
|
makeralchemy/stop-action-movie-maker | delete_frame_set.py | Python | mit | 4,978 | 0.003415 | #!/usr/bin/env python
# delete_frame_set.py
""" deletes the frame files associated with a stop action movie """
import argparse
import os
FILE_TYPE = '.png' # images will be saved as .png files
COUNT_TYPE = '.count' # file type for the file containing the image count
READ_ONLY = 'r' # open files in read only mode
SUCCESS_CODE = 0 # successful processing
CMD_ERROR_CODE = 1 # error with command parameters
DFS_ERROR_CODE = 2 # error occurred in delete_frame_set
def debug(program_name, print_msgs, display_text):
"""
print debug messages prefixed by the program name
"""
if print_msgs:
print program_name + ':', display_text
return
def delete_frame_set(target_movie_name, prog_name, print_dm):
"""
delete all the frame files for the specified movie
"""
# construct the file name for input file containing the count
# of images in the movie
target_count_file_name = target_movie_name + COUNT_TYPE
debug(prog_name, print_dm, 'target count file name is ' + \
target_count_file_name)
# verify the frame count file exists before trying to open it
if os.path.isfile(target_count_file_name):
tcf = open(target_count_file_name, READ_ONLY)
# read the number of frames in the moview
tcf_count = int(tcf.read())
debug(prog_name, print_dm, 'target frame count is ' + str(tcf_count))
# close the frame count file
tcf.close()
# loop through the input files and create new output files with the
# first frame repeat the number of time specified
target_frame_play_sequence = range(1, tcf_count + 1, 1)
# loop through all the image files
for i in target_frame_play_sequence:
# construct the image number used in the file name
target_frame_sequence_num = str(i).zfill(3)
target_file_name = target_movie_name + "." + \
target_frame_sequence_num + \
FILE_TYPE # construct the file name
if os.path.isfile(target_file_name):
debug(prog_name, print_dm, "deleting " + target_file_name)
os.remove(target_file_name)
else:
error_message = target_file_name + ' does not exist'
return DFS_ERROR_CODE, error_message
# the frame count file does not exist: print an error message
else:
error_message = 'can not open target frame count file ' + \
target_count_file_name
return DFS_ERROR_CODE, error_message
# delete the target count file
debug(prog_name, print_dm, "deleting " + target_count_file_name)
os.remove(target_count_file_name)
success_message = 'frame set ' + target_movie_name + ' deleted'
return SUCCESS_CODE, success_message
def main():
"""
main python program
"""
# extract the parameters specified on the command line
parser = argparse.ArgumentParser(description='Delete frame set')
parser.add_argument('target_movie_name',
help='name of the stop action movie frame set to delete')
parser.add_argument('-x', '--deletewithoutconfirmation',
dest='delete_without_confirmation', action='store_true',
help='delete without asking for confirmation')
parser.add_argument('-d', '--debug', dest='debug_switch',
action='store_true', help='display debugging messages')
args = parser.parse_args()
# remove the .py from the program name for use in debug messages
prog_name = parser.prog.rsplit(".", 1)[0]
print_dm = args.debug_switch
# display the command parameters if debug is turned on
debug(prog_name, print_dm, 'target movie name is ' + args.target_movie_name)
debug(prog_name, print_dm, 'delete without confirmation is set to ' + \
str(args.delete_without_confirmation))
debug(prog_name, print_dm, 'debug is set to ' + str(print_dm))
if args.delete_without_confirmation:
delete_files = True
else:
# ask for confirmation to delete the files
delete_confirmation = raw_input('delete frame files for ' + \
args.target_movie_name + \
'? Type YES to confirm: ')
delete_files = delete_confirmation == 'YES'
# if delete_confirmation == 'YES':
# delete_fi | les = True
# else:
# delete_files = False
if delete_files:
_, return_message = delete_frame_set(args.target_movie_name,
prog_name,
print_dm)
print return_message
else:
print "files will not be deleted"
# sys. | exit(return_code)
# command line execution starts here
if __name__ == "__main__":
main()
|
largetalk/tenbagger | capital/reactor/reactor/urls.py | Python | mit | 861 | 0 | """reactor URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpat | terns: path('blog/', include('b | log.urls'))
"""
from django.contrib import admin
from django.urls import path
from django.urls import include
urlpatterns = [
path('admin/', admin.site.urls),
path('cc/', include('cc.urls')),
path('quant/', include('quant.urls')),
]
|
odeke-em/restAssured | auth/urls.py | Python | mit | 477 | 0.002096 | from | django.conf.urls import patterns, include, url
import views
urlpatterns = patterns('',
url(r'^logout', views.lo | gout, name='logout'),
url(r'^newUser', views.newUser, name='newUser'),
url(r'^appHandler', views.appHandler, name='appHandler'),
url(r'^passToLogin', views.loginByPassword, name='passToLogin'),
url(r'^signToLogin', views.loginBySignature, name='signToLogin'),
url(r'^authUserHandler', views.authUserHandler, name='authUserHandler'),
)
|
caioariede/sorl-thumbnail | tests/thumbnail_tests/tests.py | Python | bsd-3-clause | 29,267 | 0.001846 | # coding=utf-8
from __future__ import unicode_literals, division
import sys
import logging
from subprocess import Popen, PIPE
import shutil
import os
import re
from os.path import join as pjoin
from PIL import Image |
from django.utils.six import StringIO
from django.core import management
from django.core.files.storage import default_storage
from django.template.loader import render_to_string
from django.test.client import Client
from django.test import TestCase
from django.test.utils import override_settings
from sorl.thumbnail import default, get_thumbnail, delete
from sorl.thumbnail.conf import settings
from sorl.thumbn | ail.engines.pil_engine import Engine as PILEngine
from sorl.thumbnail.helpers import get_module_class, ThumbnailError
from sorl.thumbnail.images import ImageFile
from sorl.thumbnail.log import ThumbnailLogHandler
from sorl.thumbnail.parsers import parse_crop, parse_geometry
from sorl.thumbnail.templatetags.thumbnail import margin
from sorl.thumbnail.base import ThumbnailBackend
from .models import Item
from .storage import MockLoggingHandler
from .compat import unittest
from .utils import same_open_fd_count, override_custom_settings
skip = unittest.skip
skipIf = unittest.skipIf
handler = ThumbnailLogHandler()
handler.setLevel(logging.ERROR)
logging.getLogger('sorl.thumbnail').addHandler(handler)
class BaseStorageTestCase(unittest.TestCase):
im = None
def setUp(self):
os.makedirs(settings.MEDIA_ROOT)
fn = pjoin(settings.MEDIA_ROOT, self.name)
Image.new('L', (100, 100)).save(fn)
self.im = ImageFile(self.name)
logger = logging.getLogger('slog')
logger.setLevel(logging.DEBUG)
handler = MockLoggingHandler(level=logging.DEBUG)
logger.addHandler(handler)
self.log = handler.messages['debug']
def tearDown(self):
shutil.rmtree(settings.MEDIA_ROOT)
class StorageTestCase(BaseStorageTestCase):
name = 'org.jpg'
def test_a_new(self):
get_thumbnail(self.im, '50x50')
actions = [
'open: org.jpg', # open the original for thumbnailing
# save the file
'save: test/cache/ca/1a/ca1afb02b7250c125d8830c0e8a492ad.jpg',
# check for filename
'get_available_name: test/cache/ca/1a/ca1afb02b7250c125d8830c0e8a492ad.jpg',
# called by get_available_name
'exists: test/cache/ca/1a/ca1afb02b7250c125d8830c0e8a492ad.jpg',
]
self.assertEqual(self.log, actions)
def test_b_cached(self):
get_thumbnail(self.im, '50x50')
self.assertEqual(self.log, []) # now this should all be in cache
def test_c_safe_methods(self):
im = default.kvstore.get(self.im)
im.url, im.x, im.y
self.assertEqual(self.log, [])
class AlternativeResolutionsTest(BaseStorageTestCase):
name = 'retina.jpg'
def setUp(self):
settings.THUMBNAIL_ALTERNATIVE_RESOLUTIONS = [1.5, 2]
super(AlternativeResolutionsTest, self).setUp()
def tearDown(self):
super(AlternativeResolutionsTest, self).tearDown()
settings.THUMBNAIL_ALTERNATIVE_RESOLUTIONS = []
def test_retina(self):
get_thumbnail(self.im, '50x50')
actions = [
# save regular resolution, same as in StorageTestCase
'open: retina.jpg',
'save: test/cache/19/10/1910dc350bbe9ee55fd9d8d3d5e38e19.jpg',
'get_available_name: test/cache/19/10/1910dc350bbe9ee55fd9d8d3d5e38e19.jpg',
'exists: test/cache/19/10/1910dc350bbe9ee55fd9d8d3d5e38e19.jpg',
# save the 1.5x resolution version
'save: test/cache/19/10/1910dc350bbe9ee55fd9d8d3d5e38e19@1.5x.jpg',
'get_available_name: test/cache/19/10/1910dc350bbe9ee55fd9d8d3d5e38e19@1.5x.jpg',
'exists: test/cache/19/10/1910dc350bbe9ee55fd9d8d3d5e38e19@1.5x.jpg',
# save the 2x resolution version
'save: test/cache/19/10/1910dc350bbe9ee55fd9d8d3d5e38e19@2x.jpg',
'get_available_name: test/cache/19/10/1910dc350bbe9ee55fd9d8d3d5e38e19@2x.jpg',
'exists: test/cache/19/10/1910dc350bbe9ee55fd9d8d3d5e38e19@2x.jpg'
]
self.assertEqual(self.log, actions)
with open(pjoin(settings.MEDIA_ROOT, 'test/cache/19/10/1910dc350bbe9ee55fd9d8d3d5e38e19@1.5x.jpg')) as fp:
engine = PILEngine()
self.assertEqual(engine.get_image_size(engine.get_image(ImageFile(file_=fp))), (75, 75))
class UrlStorageTestCase(unittest.TestCase):
def test_encode_utf8_filenames(self):
storage = get_module_class('sorl.thumbnail.images.UrlStorage')()
self.assertEqual(
storage.normalize_url('El jovencito emponzoñado de whisky, qué figura exhibe'),
'El%20jovencito%20emponzoado%20de%20whisky%2C%20qu%20figura%20exhibe'
)
class ParsersTestCase(unittest.TestCase):
def test_alias_crop(self):
crop = parse_crop('center', (500, 500), (400, 400))
self.assertEqual(crop, (50, 50))
crop = parse_crop('right', (500, 500), (400, 400))
self.assertEqual(crop, (100, 50))
def test_percent_crop(self):
crop = parse_crop('50% 0%', (500, 500), (400, 400))
self.assertEqual(crop, (50, 0))
crop = parse_crop('10% 80%', (500, 500), (400, 400))
self.assertEqual(crop, (10, 80))
def test_px_crop(self):
crop = parse_crop('200px 33px', (500, 500), (400, 400))
self.assertEqual(crop, (100, 33))
def test_bad_crop(self):
self.assertRaises(ThumbnailError, parse_crop, '-200px', (500, 500), (400, 400))
def test_geometry(self):
g = parse_geometry('222x30')
self.assertEqual(g, (222, 30))
g = parse_geometry('222')
self.assertEqual(g, (222, None))
g = parse_geometry('x999')
self.assertEqual(g, (None, 999))
class SimpleTestCaseBase(unittest.TestCase):
backend = None
engine = None
kvstore = None
def setUp(self):
self.backend = get_module_class(settings.THUMBNAIL_BACKEND)()
self.engine = get_module_class(settings.THUMBNAIL_ENGINE)()
self.kvstore = get_module_class(settings.THUMBNAIL_KVSTORE)()
if not os.path.exists(settings.MEDIA_ROOT):
os.makedirs(settings.MEDIA_ROOT)
dims = [
(500, 500),
(100, 100),
(200, 100),
]
for dim in dims:
name = '%sx%s.jpg' % dim
fn = pjoin(settings.MEDIA_ROOT, name)
im = Image.new('L', dim)
im.save(fn)
Item.objects.get_or_create(image=name)
def tearDown(self):
shutil.rmtree(settings.MEDIA_ROOT)
class SimpleTestCase(SimpleTestCaseBase):
def test_simple(self):
item = Item.objects.get(image='500x500.jpg')
t = self.backend.get_thumbnail(item.image, '400x300', crop='center')
self.assertEqual(t.x, 400)
self.assertEqual(t.y, 300)
t = self.backend.get_thumbnail(item.image, '1200x900', crop='13% 89%')
self.assertEqual(t.x, 1200)
self.assertEqual(t.y, 900)
def test_upscale(self):
item = Item.objects.get(image='100x100.jpg')
t = self.backend.get_thumbnail(item.image, '400x300', upscale=False)
self.assertEqual(t.x, 100)
self.assertEqual(t.y, 100)
t = self.backend.get_thumbnail(item.image, '400x300', upscale=True)
self.assertEqual(t.x, 300)
self.assertEqual(t.y, 300)
def test_upscale_and_crop(self):
item = Item.objects.get(image='200x100.jpg')
t = self.backend.get_thumbnail(item.image, '400x300', crop='center', upscale=False)
self.assertEqual(t.x, 200)
self.assertEqual(t.y, 100)
t = self.backend.get_thumbnail(item.image, '400x300', crop='center', upscale=True)
self.assertEqual(t.x, 400)
self.assertEqual(t.y, 300)
def test_kvstore(self):
im = ImageFile(Item.objects.get(image='500x500.jpg').image)
self.kvstore.delete_thumbnails(im)
th1 = self.backend.get_thumbnail(im, '50')
th2 = self.backend.get_thumbnail(im, 'x50')
t |
chromakey/django-salesforce | salesforce/backend/subselect.py | Python | mit | 4,158 | 0.006013 | import re
from unittest import TestCase
def mark_quoted_strings(sql):
"""Mark all quoted strings in the SOQL by '@' and get them as params,
with respect to all escaped backslashes and quotes.
"""
pm_pattern = re.compile(r"'[^\\']*(?:\\[\\'][^\\']*)*'")
bs_pattern = re.compile(r"\\([\\'])")
out_pattern = re.compile("^[-!()*+,.:<=>\w\s]*$")
start = 0
out = []
params = []
for match in pm_pattern.finditer(sql):
out.append(sql[start:match.start()])
assert out_pattern.match(sql[start:match.start()])
params.append(bs_pattern.sub('\\1', sql[match.start() + 1:match.end() -1]))
start = match.end()
out.append(sql[start:])
assert out_pattern.match(sql[start:])
return '@'.join(out), params
def subst_quoted_strings(sql, params):
"""Reverse operation to mark_quoted_strings - substitutes '@' by params.
"""
parts = sql.split('@')
assert len(parts) == len(params) + 1
out = []
for i, param in enumerate(params):
out.append(parts[i])
out.append("'%s'" % param.replace('\\', '\\\\').replace("\'", "\\\'"))
out.append(parts[-1])
return ''.join(out)
def find_closing_parenthesis(sql, startpos):
"""Find the pair of opening and closing parentheses.
Starts search at the position startpos.
Returns tuple of positions (opening, closing) if search succeeds, otherwise None.
"""
pattern = re.compile(r'[()]')
level = 0
opening = 0
for match in pattern.finditer(sql, startpos):
par = match.group()
if par == '(':
if level == 0:
opening = match.start()
level += 1
if par == ')':
assert level > 0
level -= 1
if level == 0:
closing = match.end()
return opening, closing
def transform_except_subselect(sql, func):
"""Call a func on every part of SOQL query except nested (SELECT ...)"""
start = 0
out = []
while sql.find('(SELECT', start) > -1:
pos = sql.find('(SELECT', start)
out.append(func(sql[start:pos]))
start, pos = find_closing_parenthesis(sql, pos)
out.append(sql[start:pos])
start = pos
out.append(func(sql[start:len(sql)]))
return ''.join(out)
class TestSubSelectSearch(TestCase):
def test_parenthesis(self):
self.assertEqual(find_closing_parenthesis('() (() (())) ()', 0), (0, 2))
self.assertEqual(find_closing_parenthesis('() (() (())) ()', 2), (3, 12))
self.assertEqual(find_closing_parenthesis('() (() (())) ()', 3), (3, 12))
self.assertEqual(find_closing_parenthesis('() (() (())) ()', 6), (7, 11))
self.assertEqual(find_closing_parenthesis('() (() (())) ()',13), (13,15))
self.assertRaises(AssertionError, find_closing_parenthesis, '() (() (())) ()',1)
def test_subselect(self):
sql = "SELECT a, (SELECT x FROM y) FROM b WHERE (c IN (SELECT p FROM q WHERE r = %s) AND c = %s)"
func = lambda sql: '*transfomed*'
expected = "*transfomed*(SELECT x FROM y)*transfomed*(SELECT p FROM q WHERE r = %s)*transfomed*"
self.assertEqual(transform_except_subselect(sql, func), expected)
def test_nested_subselect(self):
sql = "SELECT a, (SELECT x, (SELECT p FROM q) FROM y) FROM b"
func = lambda x: '*transfomed*'
expected = "*transfomed*(SELECT x, (SELECT p FROM q) FROM y)*transfomed*"
self.assertEqual(transform_except_subselect(sql, func), expected)
class ReplaceQuotedStringsTest(TestCase):
def test_subst_quoted_strings(self):
def inner(sql, expected):
result = mark_quoted_strings(sql)
self.assertEqual(result, expected)
self.assertEqual(subst_quoted_strings(*result), sql)
inner("where x=''", ("whe | re x=@", ['']))
inner("a'bc'd", ("a@d", ['bc']))
inner(r"a'bc\\'d", ("a@d", ['bc\\']))
inner(r"a'\'\\'b''''", ("a@b@@", ['\'\\', '', '']))
self.assertRaises(AssertionError, mark_quoted_str | ings, r"a'bc'\\d")
self.assertRaises(AssertionError, mark_quoted_strings, "a'bc''d")
|
mahabs/nitro | nssrc/com/citrix/netscaler/nitro/resource/config/appflow/appflowcollector.py | Python | apache-2.0 | 10,726 | 0.035614 | #
# Copyright (c) 2008-2015 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class appflowcollector(base_resource) :
""" Configuration for AppFlow collector resource. """
def __init__(self) :
self._name = ""
self._ipaddress = ""
self._port = 0
self._netprofile = ""
self._newname = ""
self.___count = 0
@property
def name(self) :
"""Name for the collector. Must begin with an ASCII alphabetic or underscore (_) character, and must contain only ASCII alphanumeric, underscore, hash (#), period (.), space, colon (:), at
(@), equals (=), and hyphen (-) characters.
Only four collectors can be configured.
The following requirement applies only to the NetScaler CLI:
If the name includes one or more spaces, enclose the name in double or single quotation marks (for example, "my appflow collector" or 'my appflow collector').<br/>Minimum length = 1<br/>Maximum length = 127.
"""
try :
return self._name
except Exception as e:
raise e
@name.setter
def name(self, name) :
"""Name for the collector. Must begin with an ASCII alphabetic or underscore (_) character, and must contain only ASCII alphanumeric, underscore, hash (#), period (.), space, colon (:), at
(@), equals (=), and hyphen (-) characters.
Only four collectors can be configured.
The following requirement applies only to the NetScaler CLI:
If the name includes one or more spaces, enclose the name in double or single quotation marks (for example, "my appflow collector" or 'my appflow collector').<br/>Minimum length = 1<br/>Maximum length = 127
"""
try :
self._name = name
except Exception as e:
raise e
@property
def ipaddress(self) :
"""IPv4 address of the collector.
"""
try :
return self._ipaddress
except Exception as e:
raise e
@ipaddress.setter
def ipaddress(self, ipaddress) :
"""IPv4 address of the collector.
"""
try :
self._ipaddress = ipaddress
except Exception as e:
raise e
@property
def port(self) :
"""UDP port on which the collector listens.<br/>Default value: 4739.
"""
try :
return self._port
except Exception as e:
raise e
@port.setter
def port(self, port) :
"""UDP port on which the collector listens.<br/>Default value: 4739
"""
try :
self._port = port
except Exception as e:
raise e
@property
def netprofile(self) :
"""Netprofile to associate with the collector. The IP address defined in the profile is used as the source IP address for AppFlow traffic for this collector. If you do not set this parameter, the NetScaler IP (NSIP) address is used as the source IP address.<br/>Maximum length = 128.
"""
try :
return self._netprofile
except Exception as e:
raise e
@netprofile.setter
def netprofile(self, netprofile) :
"""Netprofile to associate with the collector. The IP address defined in the profile is used as the source IP address for AppFlow traffic for this collector. If you do not set this parameter, the NetScaler IP (NSIP) address is used as the source IP address.<br/>Maximum length = 128
"""
try :
self._netprofile = netprofile
except Exception as e:
raise e
@property
def newname(self) :
"""New name for the collector. Must begin with an ASCII alphabetic or underscore (_) character, and must
contain only ASCII alphanumeric, underscore, hash (#), period (.), space, colon (:), at(@), equals (=), and hyphen (-) characters.
The following requirement applies only to the NetScaler CLI:
If the name includes one or more spaces, enclose the name in double or single quotation marks (for example, "my appflow coll" or 'my appflow coll').<br/>Minimum length = 1.
"""
try :
return self._newname
except Exception as e:
raise e
@newname.setter
def newname(self, newname) :
"""New name for the collector. Must begin with an ASCII alphabetic or underscore (_) character, and must
contain only ASCII alphanumeric, underscore, hash (#), period (.), space, colon (:), at(@), equals (=), and hyphen (-) characters.
The following requirement applies only to the NetScaler CLI:
If the name includes one or more spaces, enclose the name in double or single quotation marks (for example, "my appflow coll" or 'my appflow coll').<br/>Minimum length = 1
"""
try :
self._newname = newname
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(appflowcollector_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.appflowcollecto | r
except Exception as e :
raise e
def _get_object_name(self) :
""" Returns the value of object identifier argument
"""
try | :
if (self.name) :
return str(self.name)
return None
except Exception as e :
raise e
@classmethod
def add(cls, client, resource) :
""" Use this API to add appflowcollector.
"""
try :
if type(resource) is not list :
addresource = appflowcollector()
addresource.name = resource.name
addresource.ipaddress = resource.ipaddress
addresource.port = resource.port
addresource.netprofile = resource.netprofile
return addresource.add_resource(client)
else :
if (resource and len(resource) > 0) :
addresources = [ appflowcollector() for _ in range(len(resource))]
for i in range(len(resource)) :
addresources[i].name = resource[i].name
addresources[i].ipaddress = resource[i].ipaddress
addresources[i].port = resource[i].port
addresources[i].netprofile = resource[i].netprofile
result = cls.add_bulk_request(client, addresources)
return result
except Exception as e :
raise e
@classmethod
def delete(cls, client, resource) :
""" Use this API to delete appflowcollector.
"""
try :
if type(resource) is not list :
deleteresource = appflowcollector()
if type(resource) != type(deleteresource):
deleteresource.name = resource
else :
deleteresource.name = resource.name
return deleteresource.delete_resource(client)
else :
if type(resource[0]) != cls :
if (resource and len(resource) > 0) :
deleteresources = [ appflowcollector() for _ in range(len(resource))]
for i in range(len(resource)) :
deleteresources[i].name = resource[i]
else :
if (resource and len(resource) > 0) :
deleteresources = [ appflowcollector() for _ in range(len(resource))]
for i in range(len(resource)) :
deleteresources[i].name = resource[i].name
result = cls.delete_bulk_request(client, deleteresources)
return result
except Exception as e :
raise e
@classmethod
def rename(cls, client, resource, new_name) :
""" Use this API to rename a appflowcollector resource.
"""
try :
renameresource = appflowcollector()
if type(res |
ebt-hpc/cca | cca/scripts/outline_for_survey_fortran.py | Python | apache-2.0 | 29,483 | 0.005054 | #!/usr/bin/env python3
'''
A script for outlining Fortran programs
Copyright 2013-2018 RIKEN
Copyright 2018-2020 Chiba Institute of Technology
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
__author__ = 'Masatomo Hashimoto <m.hashimoto@stair.center>'
import os
import pathsetup
import dp
from cca_config import PROJECTS_DIR
import sparql
from factutils.entity import SourceCodeEntity
from sourcecode_metrics_for_survey_fortran import get_proj_list, get_lver, Metrics
import sourcecode_metrics_for_survey_fortran as metrics
from search_topic_for_survey import search
from siteconf import GIT_REPO_BASE
from virtuoso import VIRTUOSO_PW, VIRTUOSO_PORT
from outline_for_survey_base import QN_SEP, remove_leading_digits, Exit, norm_callee_name
from outline_for_survey_base import NodeBase, OutlineBase, tbl_get_list, tbl_get_set, tbl_get_dict
from outlining_queries_fortran import OMITTED, SUBPROGS, LOOPS, CALLS, TYPE_TBL, QUERY_TBL, get_root_entities
###
METRICS_ROW_HEADER = list(metrics.abbrv_tbl.keys()) + metrics.META_KEYS + ['nid','root_file']
class Node(NodeBase):
def __init__(self, ver, loc, uri, cat='',
prog=None, sub=None,
callee_name=None, pu_name=None, vpu_name=None,
all_sps=False):
NodeBase.__init__(self, ver, loc, uri, cat, callee_name, all_sps=all_sps,
SUBPROGS=SUBPROGS, CALLS=CALLS, LOOPS=LOOPS)
self.prog = prog
self.sub = sub
self.pu_name = pu_name
self.vpu_name = vpu_name
def __str__(self):
pu = self.get_pu()
sl = self.get_start_line()
el = self.get_end_line()
if sl == el:
r = '%d' % sl
else:
r = '%d-%d' % (sl, el)
s = '%s[%s:%s:%s:%s]' % (self.cat, r, self.sub, pu, os.path.basename(self.loc))
return s
def get_container(self): # subprog or main
if not self._container:
if self.cats & SUBPROGS or 'main-program' in self.cats:
self._container = self
else:
nparents = len(self._parents)
if nparents == 1:
p = list(self._parents)[0]
self._container = p.get_container()
self._parents_in_container.add(p)
self._parents_in_container.update(p.get_parents_in_container())
elif nparents > 1:
if 'pp' not in [TYPE_TBL.get(c, None) for c in self.cats]:
pstr = ', '.join([str(p) for p in self._parents])
self.warning('multiple parents:\n%s:\nparents=[%s]' % (self, pstr))
return self._container
def score_of_chain(self, chain):
if chain:
if 'main-program' in chain[-1].cats:
score = 0
for nd in chain:
if nd.cats & CALLS:
score += nd.count_parent_loops_in_container()
self.debug('%d <- [%s]' % (score, ';'.join([str(x) for x in chain])))
else:
score = -1
else:
score = -1
return score
def is_main(self):
return 'main-program' in self.cats
def get_pu(self):
pu = self.pu_name
if not pu:
pu = self.prog
return pu
def get_vpu(self):
return self.vpu_name
def get_type(self):
ty = None
for c in self.cats:
if c.startswith('omp-'):
ty = 'omp'
break
elif c.startswith('acc-'):
ty = 'acc'
break
elif c.startswith('dec-'):
ty = 'dec'
break
elif c.startswith('xlf-'):
ty = 'xlf'
break
elif c.startswith('ocl-'):
ty = 'ocl'
break
ty = TYPE_TBL.get(c, None)
if ty:
break
return ty
def is_construct(self):
b = False
for c in self.cats:
if c.endswith('-construct'):
b = True
break
return b
def is_block(self):
b = False
for c in self.cats:
if c.endswith('-block'):
b = True
break
return b
def get_block_cat(self):
cat = None
for c in self.cats:
if c.endswith('-block'):
cat = c
break
return cat
def is_constr_head(self, child):
b = all([self.is_construct(),
self.get_start_line() == child.get_start_line(),
not child.is_pp(),
not child.is_construct(),
not child.is_block()])
return b
def is_constr_tail(self, child):
b = all([self.is_construct(),
self.get_end_line() == child.get_end_line(),
not child.is_construct(),
not child.is_block()])
return b
def set_extra(self, d):
vpu = self.get_vpu()
if vpu:
d['vpu'] = vpu
def get_name(self):
return self.sub
def check_children(self, children_l):
if children_l:
if self.is_constr_head(children_l[0]):
children_l = children_l[1:]
if children_l:
if self.is_constr_tail(children_l[-1]):
children_ | l = children_l[:-1]
return children_l
class Outline(OutlineBase):
| def __init__(self,
proj_id,
commits=['HEAD'],
method='odbc',
pw=VIRTUOSO_PW,
port=VIRTUOSO_PORT,
gitrepo=GIT_REPO_BASE,
proj_dir=PROJECTS_DIR,
ver='unknown',
simple_layout=False,
all_sps=False):
OutlineBase.__init__(self, proj_id, commits, method, pw, port, gitrepo, proj_dir, ver, simple_layout, all_sps,
SUBPROGS=SUBPROGS, CALLS=CALLS, get_root_entities=get_root_entities,
METRICS_ROW_HEADER=METRICS_ROW_HEADER, add_root=True)
self._qspn_tbl = {} # (ver * loc * start_line) -> name list
def setup_aa_tbl(self): # assumes self._node_tbl
if not self._aa_tbl:
self.message('setting up array reference table...')
tbl = {}
query = QUERY_TBL['aa_in_loop'] % {'proj':self._graph_uri}
for qvs, row in self._sparql.query(query):
ver = row['ver']
loc = row['loc']
loop = row['loop']
pn = row['pn']
pu_name = row.get('pu_name', None)
vpu_name = row.get('vpu_name', None)
dtor = row.get('dtor', None)
lver = get_lver(ver)
loop_node = self.get_node(Node(ver, loc, loop,
cat='do-construct',
pu_name=pu_name,
vpu_name=vpu_name))
pns = tbl_get_list(tbl, loop_node.get_mkey())
pn_ent = SourceCodeEntity(uri=pn)
r = pn_ent.get_range()
st = {'line':r.get_start_line(),'ch':r.get_start_col()}
ed = {'line':r.get_end_line(),'ch':r.get_end_col()}
d = {'start':st,'end':ed}
if dtor:
dtor_ent = SourceCodeEntity(uri=dtor)
dtor_fid = dtor_ent.get_file_id()
df = {'line':dtor_ent.get_ra |
bolozna/EDENetworks | netpython/dialogues.py | Python | gpl-2.0 | 64,787 | 0.029126 | """
EDENetworks, a genetic network analyzer
Copyright (C) 2011 Aalto University
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
"""
Collection of dialogue windows
"""
import pynet,os,netio,netext,visuals,eden,transforms
import random
import heapq
import string
import percolator
import shutil
from math import ceil
from Tkinter import *
import tkMessageBox
#from pylab import *
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg,NavigationToolbar2TkAgg
# NEW DIALOGUE WINDOWS / JS / MAY-JUNE 09
class MySimpleDialog(Toplevel):
'''Master class for a dialog popup window.
Functions body() and apply() to be overridden
with whatever the dialog should be doing.'''
def __init__(self,parent,title=None):
Toplevel.__init__(self,parent)
self.transient(parent)
self.title(title)
self.parent=parent
self.result=None
body=Frame(self)
self.initial_focus=self.body(self,body)
body.pack(padx=5,pady=5)
self.buttonbox()
self.grab_set()
if not self.initial_focus:
self.initial_focus(self)
self.protocol("WM_DELETE_WINDOW",self.cancel)
self.geometry("+%d+%d" % (parent.winfo_rootx()+50,parent.winfo_rooty()+50))
self.initial_focus.focus_set()
self.wait_window(self)
def body(self,masterclass,masterwindow):
pass
def buttonbox(self):
"""OK and Cancel buttons"""
box=Frame(self)
w=Button(box,text="OK",width=10,command=self.ok,default=ACTIVE)
w.pack(side=LEFT,padx=5,pady=5)
w=Button(box,text="Cancel",width=10,command=self.cancel)
w.pack(side=LEFT,padx=5,pady=5)
self.bind("<Return>",self.ok)
self.bind("<Escape",self.cancel)
box.pack()
def ok(self,event=None):
if not self.validate():
self.initial_focus.focus_set()
return
self.withdraw()
self.update_idletasks()
self.applyme()
self.cancel()
def cancel(self,event=None):
self.parent.focus_set()
self.destroy()
def validate(self):
return 1
def applyme(self):
pass
def displayBusyCursor(self):
self.parent.configure(cursor='watch')
self.parent.update()
self.parent.after_idle(self.removeBusyCursor)
def removeBusyCursor(self):
self.parent.configure(cursor='arrow')
class WLogbinDialog(MySimpleDialog):
"""Asks for the number of bins for log binning
and allows linear bins for 1...10"""
def __init__(self,parent,title=None):
Toplevel.__init__(self,parent)
self.configure(bg='Gray80')
self.transient(parent)
if title:
self.title=title
self.parent=parent
self.result=None
self.linfirst=IntVar()
self.numbins=StringVar()
body=Frame(self,bg='Gray80')
self.initial_focus=self.body(self,body)
body.pack(padx=5,pady=5)
self.buttonbox()
self.grab_set()
if not self.initial_focus:
self.initial_focus(self)
self.protocol("WM_DELETE_WINDOW",self.cancel)
self.geometry("+%d+%d" % (parent.winfo_rootx()+50,parent.winfo_rooty()+50))
self.initial_focus.focus_set()
self.wait_window(self)
def body(self,masterclass,masterwindow):
self.b1=Checkbutton(masterwindow,text='Use linear bins for 1..10',variable=masterclass.linfirst,state=ACTIVE,bg='Gray80')
self.b1.grid(row=0,column=0,columnspan=2)
Label(masterwindow,text='Number of bins:',bg='Gray80').grid(row=1,column=0)
self.c1=Entry(masterwindow,textvariable=masterclass.numbins,bg='Gray95')
masterclass.numbins.set('30')
self.c1.grid(row=1,column=1)
return self.c1
def applyme(self):
self.result=[self.linfirst.get(),float(self.numbins.get())]
class LoadMatrixDialog(MySimpleDialog):
"""Asks for the number of bins for log binning
and allows linear bins for 1...10"""
def __init__(self,parent,title='Please provide information:'):
Toplevel.__init__(self,parent)
# self.configure(bg='Gray80')
# self.transient(parent)
self.title(title)
self.parent=parent
self.result=None
self.clones=StringVar()
self.measuretype=StringVar()
body=Frame(self)
self.initial_focus=self.body(self,body)
body.pack(padx=5,pady=5)
self.buttonbox()
self.grab_set()
if not self.initial_focus:
self.initial_focus(self)
self.protocol("WM_DELETE_WINDOW",self.cancel)
self.geometry("+%d+%d" % (parent.winfo_rootx()+50,parent.winfo_rooty()+50))
self.initial_focus.focus_set()
self.wait_window(self)
def body(self,masterclass,masterwindow):
self.c1=Label(masterwindow,text='What di | stance measure has been used | ?',bg='DarkOliveGreen2',anchor=W)
self.c1.grid(row=0,column=0)
r1=Radiobutton(masterwindow,text='Non-shared alleles',value='nsa',variable=masterclass.measuretype)
r2=Radiobutton(masterwindow,text='Linear Manhattan',value='lm',variable=masterclass.measuretype)
r3=Radiobutton(masterwindow,text='Allele parsimony',value='ap',variable=masterclass.measuretype)
r4=Radiobutton(masterwindow,text='Hybrid',value="hybrid",variable=masterclass.measuretype)
r5=Radiobutton(masterwindow,text='Other',value="other",variable=masterclass.measuretype)
r1.grid(row=1,column=0,sticky=W)
r2.grid(row=2,column=0,sticky=W)
r3.grid(row=3,column=0,sticky=W)
r4.grid(row=4,column=0,sticky=W)
r5.grid(row=5,column=0,sticky=W)
self.c2=Label(masterwindow,text='How have clones been handled?',bg='DarkOliveGreen2',anchor=W)
self.c2.grid(row=6,column=0)
r6=Radiobutton(masterwindow,text='Removed',value='collapsed',variable=masterclass.clones)
r7=Radiobutton(masterwindow,text='Kept',value='included',variable=masterclass.clones)
r8=Radiobutton(masterwindow,text='Unknown',value='unknown',variable=masterclass.clones)
r6.grid(row=7,column=0,sticky=W)
r7.grid(row=8,column=0,sticky=W)
r8.grid(row=9,column=0,sticky=W)
masterclass.measuretype.set('other')
masterclass.clones.set('unknown')
return self.c1
def applyme(self):
self.result=[self.measuretype.get(),self.clones.get()]
class MetaHelpWindow(MySimpleDialog):
def __init__(self,parent,title=None,datatype='msat'):
Toplevel.__init__(self,parent)
self.configure(bg='Gray80')
self.transient(parent)
self.datatype=datatype
if title:
self.title=title
self.parent=parent
self.result=None
self.linfirst=IntVar()
self.numbins=StringVar()
body=Frame(self,bg='Gray80')
self.initial_focus=self.body(self,body)
body.pack(padx=5,pady=5)
self.buttonbox()
self.grab_set()
if not self.initial_focus:
self.initial_focus(self)
self.protocol("WM_DELETE_WINDOW",self.cancel)
self.geometry("+%d+%d" % (parent.winfo_rootx()+50,parent.winfo_rooty()+50))
self.initial_focus.focus_set()
self.wait_window(self)
def body(self,masterclass,masterwindow):
self.text=Text(self,bg='Gray90')
self.text.pack(expand=YES,fill=BOTH)
str1="Auxiliary dat |
figure002/pyrits | pyrits.py | Python | gpl-3.0 | 17,967 | 0.008404 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2011, De Verkeersonderneming <rits@verkeersonderneming.nl>
#
# This file is part of PyRITS - A tool for processing and analyzing transport
# management system data.
#
# PyRITS is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PyRITS is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""This is the executable for PyRITS. To tun this script, run it from the
command line. The command line interface for PyRITS gives usage information.
Run this script with the -h option to display help information.
PyRITS has four submo | dules: preprocess, drivetimes, delays and report. The usage
information for each submodule can be viewed by running the command,
./pyrits.py <module> -h
"""
import sys
import os
import logging
import argparse
import psycopg2
import pyrits.config
import pyrits.erniesoft.std
import pyrits.erniesoft.report
__author__ = "Serrano Pereira"
__copyright__ = "Copyright 2011, De Verkeers | onderneming"
__credits__ = ["Serrano Pereira <serrano.pereira@gmail.com>"]
__license__ = "GPL3"
__version__ = "0.1.2"
__maintainer__ = "Serrano Pereira"
__email__ = "serrano.pereira@gmail.com"
__status__ = "Production"
__date__ = "2011/11/24"
def get_connection(db):
"""Return a PostgreSQL database connection object."""
conn_string = pyrits.config.cfg.get('conn_string', database=db)
try:
connection = psycopg2.connect(conn_string)
return connection
except:
# Get the most recent exception
exceptionType, exceptionValue, exceptionTraceback = sys.exc_info()
# Exit the script and print an error telling what happened.
sys.exit("Database connection failed!\n %s" % (exceptionValue))
def main():
# Set logging level.
logging.basicConfig(level=logging.INFO, format='%(levelname)s %(message)s')
# Read configurations from the config file.
pyrits.config.cfg.read_configuration()
# Create main argument parser.
parser = argparse.ArgumentParser(description='Please specify a task.')
parser.add_argument('--version',
action='version',
help="Print version information.",
version="PyRITS "+__version__)
# Create a sub parser for sub-commands.
subparsers = parser.add_subparsers(help='Specify which task to start.')
help_preprocess = "Perform preprocesses on the database. This must be run once after new data was added to the database."
help_drivetimes = "Calculate driving times."
help_delays = "Calculate driving delays."
help_report = "Generate a report."
# Create an argument parser for sub-command 'preprocess'.
parser_preprocess = subparsers.add_parser('preprocess',
help=help_preprocess,
description=help_preprocess)
parser_preprocess.add_argument('-d, --database',
action='store',
type=str,
choices=['erniesoft','tans'],
required=True,
help="Specify database name. Possible values: erniesoft, tans.",
metavar="DB",
dest='database')
# Create an argument parser for sub-command 'drivetimes'.
parser_drivetimes = subparsers.add_parser('drivetimes',
help=help_drivetimes,
description=help_drivetimes)
parser_drivetimes.add_argument('-d, --database',
action='store',
type=str,
choices=['erniesoft','tans'],
required=True,
help="Specify database name. Possible values: erniesoft, tans.",
metavar="DB",
dest='database')
parser_drivetimes.add_argument('-s, --date-start',
action='store',
type=str,
required=False,
help="Begin date of the records to analyze.",
metavar="YYYY-MM-DD",
dest='date_start')
parser_drivetimes.add_argument('-e, --date-end',
action='store',
type=str,
required=False,
help="End date of the records to analyze.",
metavar="YYYY-MM-DD",
dest='date_end')
parser_drivetimes.add_argument('-o',
action='store',
type=str,
required=False,
help="Specify output folder. If specified, results will be saved to this folder.",
metavar="PATH",
dest='output_folder')
parser_drivetimes.add_argument('-v, --vehicle',
action='store',
type=str,
required=False,
help="Specify the vehicle to be analyzed. If not specified, all vehicles are analyzed.",
metavar="CODE",
dest='vehicle_code')
parser_drivetimes.add_argument('-r, --ride',
action='store',
type=int,
required=False,
help="Specify ride number. If specified, only this ride is analyzed.",
metavar="N",
dest='ride_number')
# Create an argument parser for sub-command 'delays'.
parser_delays = subparsers.add_parser('delays',
help=help_delays,
description=help_delays)
parser_delays.add_argument('-d, --database',
action='store',
type=str,
choices=['erniesoft','tans'],
required=True,
help="Specify database name. Possible values: erniesoft, tans.",
metavar="DB",
dest='database')
# Create an argument parser for sub-command 'report'.
parser_report = subparsers.add_parser('report',
help=help_report,
description=help_report)
parser_report.add_argument('-d, --database',
action='store',
type=str,
choices=['erniesoft','tans'],
required=True,
help="Specify database name. Possible values: erniesoft, tans.",
metavar="DB",
dest='database')
parser_report.add_argument('-s, --date-start',
action='store',
type=str,
required=True,
help="Specify start date.",
metavar="YYYY-MM-DD",
dest='date_start')
parser_report.add_argument('-e, --date-end',
action='store',
type=str,
required=True,
help="Specify end date.",
metavar="YYYY-MM-DD",
dest='date_end')
parser_report.add_argument('-o',
action='store',
type=str,
required=True,
help="Specify output file.",
metavar="FILE",
dest='output_file')
parser_report.add_argument('-t, --type',
action='store',
default='xml',
type=str,
choices=['xml','csv-tasks','csv-stats','routes'],
required=False,
help="Specify output format for the report. Possibe values: xml, csv-tasks, csv-stats, routes. Default is xml.",
metavar="TYPE",
dest='output_format')
parser_report.add_argument('--zip-depth',
action='store',
type=int,
default=10,
choices=[4,5,6,7,8,9,10],
required=False,
help="Zip code depth for grouping routes in reports. Default is 10.",
metavar="N",
dest='zip_depth')
parser_report.add_argument('--top-max',
action='store',
type=int,
default=5,
required=False,
help="The maximum number of items in a top list. Default is 5.",
metavar="N",
dest='top_max')
parser_report.add_argument('--filter-countries',
action='store',
type=str,
default=None,
required=False,
help="Used for the 'routes' report. Filter routes for specific countries. Multiple countries must be separated by comma's (e.g. nl,de).",
metavar="CODES",
dest='filter_countries')
parser_report.add_argument('--routes-filter-actions',
action='store',
type=str,
default=None,
required=False,
help="Used for the 'routes' report. Filter routes |
mpjoseca/ate | src/editor.py | Python | isc | 5,579 | 0.032622 | import wx
import os.path
class MainWindow( wx.Frame ):
def __init__( self, filename = '*.txt' ):
super( MainWindow, self ).__init__( None, size = ( 800,640 ) )
self.filename = filename
self.dirname = '.'
self.panel = wx.Panel( self, -1 )
self.CreateInteriorWindowComponents()
sizer = wx.BoxSizer()
sizer.Add( self.multiText, proportion = 1, flag = wx.CENTER|wx.EXPAND )
self.panel.SetSizer( sizer )
self.CreateExteriorWindowComponents()
self.multiText.Bind( wx.EVT_KEY_UP, self.updateLineCol )
self.multiText.Bind( wx.EVT_LEFT_DOWN, self.updateLineCol )
def CreateInteriorWindowComponents( self ):
self.multiText = wx.TextCtrl( self.panel, style = wx.TE_MULTILINE )
def updateLineCol( self, event ):
l,c = self.multiText.PositionToXY( self.multiText.GetInsertionPoint() )
stat = "col=%s, row=%s" % ( l,c )
self.StatusBar.SetStatusText( stat, number = 0 )
event.Skip()
def CreateExteriorWindowComponents( self ):
self.CreateMenu()
self.CreateStatusBar()
self.SetTitle()
def CreateMenu( self ):
fileMenu = wx.Menu()
for id, label, helpText, handler in \
[( wx.ID_OPEN, '&Open', 'Open a new file', self.OnOpen ),
( wx.ID_SAVE, '&Save', 'Save the current file', self.OnSave ),
( wx.ID_SAVEAS, 'Save &As', 'Save the file under a different name',
self.OnSaveAs ),
( None, None, None, None ),
( wx.ID_EXIT, 'E&xit', 'Terminate the program', self.OnExit )]:
if id == None:
fileMenu.AppendSeparator()
else:
item = fileMenu.Append( id, label, helpText )
self.Bind( wx.EVT_MENU, handler, item )
editMenu = wx.Menu()
for id, label, helpText, handler in \
[( wx.ID_COPY, '&Copy', 'Copy selected text', self.OnCopy ),
( wx.ID_PASTE, '&Paste', 'Paste clipboard text', self.OnPaste )]:
if id == None:
editMenu.AppendSeparator()
else:
item = editMenu.Append( id, label, helpText )
self.Bind( wx.EVT_MENU, handler, item )
aboutMenu = wx.Menu()
| for id, label, helpText, handler in \
[( wx.ID_ABOUT, '&About', 'Information about this program',
self.OnAbout )]:
if id == None:
aboutMenu.AppendSeparator()
else:
item = aboutMenu.Append( id, label, helpText )
self.Bind( wx.EVT_MENU, handler, | item )
menuBar = wx.MenuBar()
menuBar.Append( fileMenu, '&File' ) # Add the fileMenu to the MenuBar
menuBar.Append( editMenu, '&Edit' )
menuBar.Append( aboutMenu, '&About' )
self.SetMenuBar( menuBar ) # Add the menuBar to the Frame
def SetTitle( self ):
super( MainWindow, self ).SetTitle( 'ATE %s'%self.filename )
# helper methods
def defaultFileDialogOptions( self ):
return dict( message = 'Choose a file', defaultDir = self.dirname,
wildcard = '*.*' )
def askUserForFilename (self, **dialogOptions ):
dialog = wx.FileDialog( self, **dialogOptions )
if dialog.ShowModal() == wx.ID_OK:
userProvidedFilename = True
self.filename = dialog.GetFilename()
self.dirname = dialog.GetDirectory()
self.SetTitle()
else:
userProvidedFilename = False
dialog.Destroy()
return userProvidedFilename
# event handlers
def OnAbout( self, event ):
dialog = wx.MessageDialog( self, 'A sample editor\n'
'in wxPython', 'About Sample Editor', wx.OK )
dialog.ShowModal()
dialog.Destroy()
def OnExit( self, event ):
self.Close()
def OnSave( self, event ):
if os.path.exists( self.filename ):
self.OnSaveFile( event )
else:
self.OnSaveAs( event )
def OnOpen( self, event ):
if self.askUserForFilename( style = wx.OPEN, **self.defaultFileDialogOptions() ):
textfile = open( os.path.join( self.dirname, self.filename ), 'r' )
self.multiText.SetValue( textfile.read() )
textfile.close()
def OnSaveFile( self, event ):
textfile = open( os.path.join( self.dirname, self.filename ), 'w' )
textfile.write( self.multiText.GetValue() )
textfile.close()
def OnSaveAs( self, event ):
if self.askUserForFilename( defaultFile = self.filename, style = wx.SAVE,
**self.defaultFileDialogOptions() ):
self.OnSaveFile( event )
# clipboard functions, flush for other programs
def OnCopy( self, event ):
self.dataObj = wx.TextDataObject()
self.dataObj.SetText( self.multiText.GetStringSelection() )
if wx.TheClipboard.Open():
wx.TheClipboard.SetData( self.dataObj )
wx.TheClipboard.Flush()
else:
wx.MessageBox( "Unable to open the clipboard", "Error" )
def OnPaste( self, event ):
if wx.TheClipboard.Open():
dataObj = wx.TextDataObject()
success = wx.TheClipboard.GetData( dataObj )
wx.TheClipboard.Flush()
wx.TheClipboard.Close()
if not success: return
text = dataObj.GetText()
if text: self.multiText.WriteText( text )
app = wx.App()
frame = MainWindow()
frame.Show()
app.MainLoop()
|
hesseltuinhof/mxnet | python/mxnet/gluon/model_zoo/vision/inception.py | Python | apache-2.0 | 7,902 | 0.001519 | # coding: utf-8
# pylint: disable= arguments-differ
"""Inception, implemented in Gluon."""
__all__ = ['Inception3', 'inception_v3']
from ....context import cpu
from ...block import HybridBlock
from ... import nn
from ..custom_layers import HybridConcurrent
# Helpers
def _make_basic_conv(**kwargs):
out = nn.HybridSequential(prefix='')
out.add(nn.Conv2D(use_bias=False, **kwargs))
out.add(nn.BatchNorm(epsilon=0.001))
out.add(nn.Activation('relu'))
return out
def _make_branch(use_pool, *conv_settings):
out = nn.HybridSequential(prefix='')
if use_pool == 'avg':
out.add(nn.AvgPool2D(pool_size=3, strides=1, padding=1))
elif use_pool == 'max':
out.add(nn.MaxPool2D(pool_size=3, strides=2))
setting_names = ['channels', 'kernel_size', 'strides', 'padding']
for setting in conv_settings:
kwargs = {}
for i, value in enumerate(setting):
if value is not None:
kwargs[setting_names[i]] = value
out.add(_make_basic_conv(**kwargs))
return out
def _make_A(pool_features, prefix):
out = HybridConcurrent(concat_dim=1, prefix=prefix)
with out.name_scope():
out.add(_make_branch(None,
(64, 1, None, None)))
out.add(_make_branch(None,
(48, 1, None, None),
(64, 5, None, 2)))
out.add(_make_branch(None,
(64, 1, None, None),
(96, 3, None, 1),
(96, 3, None, 1)))
out.add(_make_branch('avg',
(pool_features, 1, None, None)))
return out
def _make_B(prefix):
out = HybridConcurrent(concat_dim=1, prefix=prefix)
with out.name_scope():
out.add(_make_branch(None,
(384, 3, 2, None)))
out.add(_make_branch(None,
(64, 1, None, None),
(96, 3, None, 1),
(96, 3, 2, None)))
out.add(_make_branch('max'))
return out
def _make_C(channels_7x7, prefix):
out = HybridConcurrent(concat_dim=1, prefix=prefix)
with out.name_scope():
out.add(_make_branch(None,
(192, 1, None, None)))
out.add(_make_branch(None,
(channels_7x7, 1, None, None),
(channels_7x7, (1, 7), None, (0, 3)),
(192, (7, 1), None, (3, 0))))
out.add(_make_branch(None,
(channels_7x7, 1, None, None),
(channels_7x7, (7, 1), None, (3, 0)),
(channels_7x7, (1, 7), None, (0, 3)),
(channels_7x7, (7, 1), None, (3, 0)),
(192, (1, 7), None, (0, 3))))
out.add(_make_branch('avg',
(192, 1, None, None)))
return out
def _make_D(prefix):
out = HybridConcurrent(concat_dim=1, prefix=prefix)
with out.name_scope():
out.add(_make_branch(None,
(192, 1, None, None),
(320, 3, 2, None)))
out.add(_make_branch(None,
(192, 1, None, None),
(192, (1, 7), None, (0, 3)),
(192, (7, 1), None, (3, 0)),
(192, 3, 2, None)))
out.add(_make_branch('max'))
return out
def _make_E(prefix):
out = HybridConcurrent(concat_dim=1, prefix=prefix)
with out.name_scope():
out.add(_make_branch(None,
(320, 1, None, None)))
branch_3x3 = nn.HybridSequential(prefix='')
out.add(branch_3x3)
branch_3x3.add(_make_branch(None,
(384, 1, None, None)))
branch_3x3_split = HybridConcurrent(concat_dim=1, prefix='')
branch_3x3_split.add(_make_branch(None,
(384, (1, 3), None, (0, 1))))
branch_3x3_split.add(_make_branch(None,
(384, (3, 1), None, (1, 0))))
branch_3x3.add(branch_3x3_split)
branch_3x3dbl = nn.HybridSequential(prefix='')
out.add(branch_3x3dbl)
branch_3x3dbl.add(_make_branch(None,
(448, 1, None, None),
(384, 3, None, 1)))
branch_3x3dbl_split = HybridConcurrent(concat_dim=1, prefix='')
branch_3x3dbl.add(branch_3x3dbl_split)
branch_3x3dbl_split.add(_make_branch(None,
(384, (1, 3), None, (0, 1))))
branch_3x3dbl_split.add(_make_branch(None,
(384, (3, 1), None, (1, 0))))
out.add(_make_branch('avg',
(192, 1, None, None)))
return out
def make_aux(classes):
out = nn.HybridSequential(prefix='')
out.add(nn.AvgPool2D(pool_size=5, strides=3))
out.add(_make_basic_conv(channels=128, kernel_size=1))
out.add(_make_basic_conv(channels=768, kernel_size=5))
out.add(nn.Flatten())
out.add(nn.Dense(classes))
return out
# Net
class Inception3(HybridBlock):
r"""Inception v3 model from
`"Rethinking the Inception Architecture for Computer Vision"
<http://arxiv.org/abs/1512.00567>`_ paper.
Parameters
----------
classes : int, default 1000
Number of classification classes.
"""
def __init__(self, classes=1000, **kwargs):
super(Inception3, self).__init__(**kwargs)
# self.use_aux_logits = use_aux_logits
with self.name_scope():
self.features = nn.HybridSequential(prefix='')
self.features.add(_make_basic_conv(channels=32, kernel_size=3, strides=2))
self.features.add(_make_basic_conv(channels=32, kernel_size=3))
self.features.add(_make_basic_conv(channels=64, kernel_size=3, padding=1))
self.features.add(nn.MaxPool2D(pool_size=3, strides=2))
self.features.add(_make_basic_conv(channels=80, kernel_size=1))
self.features.add(_make_basic_conv(channels=192, kernel_size=3))
self.features.add(nn.MaxPool2D(pool_size=3, strides=2))
self.features.add(_make_A(32, 'A1_'))
self.features.add(_make_A(64, 'A2_'))
self.features.add(_make_A(64, 'A3_'))
self.features.add(_make_B('B_'))
self.features.add(_make_C(128, 'C1_'))
self.features.add(_make_C(160, 'C2_'))
self.features.add(_make_C(160, 'C3_'))
self.features.add(_make_C(192, 'C4_'))
self.classifier = nn.HybridSequential(prefix='')
self.classifier.add(_make_D('D_'))
self.classifier.add(_make_E('E1_'))
self.classifier.add(_make_E('E2_'))
self.classifier.add(nn.AvgPool2D(pool_size=8))
self.classifier.add(nn.Dropout(0.5))
self.classifier.add(nn.Flatten())
self.classifier.add(nn.Dense(classes))
def hybrid_forward(self, F, x):
x = self.features(x)
x = self.classifier(x)
return x
# Constructor
def inception_v3(pretrained=False, ctx=cpu(), **kwargs):
r"""Inception v3 model from
`"Rethinking the Inception Architecture for Computer Vision"
<http://arxiv.org/abs/1512.00567>`_ paper.
Parameters
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
"""
net = Incept | ion3(**kwargs)
if pre | trained:
from ..model_store import get_model_file
net.load_params(get_model_file('inceptionv3'), ctx=ctx)
return net
|
markuskont/kirka | algorithms/Frequent.py | Python | gpl-3.0 | 610 | 0.001639 | #!/usr/bin/env python
# coding: utf-8
class Frequent():
d | ef __init__(self):
self.counters = {}
def add(self, item, k, k2, t):
if item in self.counters:
counters[item] = counters[item] + 1
elif len(self.counters) <= k:
self.counters[item] = 1
else:
for key, value in self.counters.copy( | ).items():
if value > 1:
self.counters[key] = value - 1
else:
del self.counters[key]
return key
def returnItems(self):
return self.counters
|
WilliamDASILVA/TheMysteryOfSchweitzer | gameplay/behaviours/playerBehaviour.py | Python | mit | 4,453 | 0.043342 | from engine.Input import Keyboard;
from engine import Update;
from engine import Global;
# --------------------------------------------------- *\
# Player behaviour
# --------------------------------------------------- */
player = None;
controlsEnabled = False;
scale = Global.scale;
# --------------------------------------------------- *\
# [function] setActive()
#
# * Active the player behaviour *
# Return : nil
# --------------------------------------------------- */
def setActive():
leftKey = Keyboard("left");
leftKey.on(moveLeft);
rightKey = Keyboard("right");
rightKey.on(moveRight);
setControlsEnabled(True);
Update.on(lambda: updatePosition());
# --------------------------------------------------- *\
# [function] setControlsEnabled(value)
#
# * Set if the player controls should be enabled or not *
# Return : nil
# --------------------------------------------------- */
def setControlsEnabled(value):
global controlsEnabled;
controlsEnabled = value;
def isControlsEnabled():
return controlsEnabled;
# --------------------------------------------------- *\
# [function] setPlayer(playerElement)
#
# * Set the behaviour on a specifi player element *
# Return : nil
# --------------------------------------------------- */
def setPlayer(playerElement):
global player;
player = playerElement;
# --------------------------------------------------- *\
# [function] getPlayer()
#
# * Return the player element used *
# Return : player
# --------------------------------------------------- */
def getPlayer():
return player;
mouvementEnabled = True;
# --------------------------------------------------- *\
# [function] enableMouvement(value)
#
# * Set the mouvement of the player or not *
# Return : nil
# --------------------------------------------------- */
def enableMouvement(value):
global mouvementEnabled;
mouvementEnabled = value;
# --------------------------------------------------- *\
# Input events
# --------------------------------------------------- */
isMoving = False;
movingDirection = None;
# --------------------------------------------------- *\
# [function] moveLeft()
#
# * Move the player element to the left *
# Return : nil
# --------------------------------------------------- */
def moveLeft(state):
global isMoving
global movingDirection;
if controlsEnabled:
if state == "down":
isMoving = True;
movingDirection = "left";
# change sprite state
if(player):
player.useSprite("walking");
drawable = player.getAssignedDrawables()[0];
drawable.setFlip(True);
else:
isMoving = False;
# change sprite state
if(player):
player.useSprite("static");
drawable = player.getAssignedDrawables()[0];
drawable.setFlip(True);
# --------------------------------------------------- *\
# [function] moveRight()
#
# * Move the player element to the right *
# Return : nil
# --------------------------------------------------- */
def moveRight(state):
global isMoving
global movingDirection;
if controlsEnabled:
if state == "down":
isMoving = True;
movingDirection = "right";
# change sprite state
if(player):
player.useSprite("w | alking");
drawable = player.getAssignedDrawables()[0];
drawable.setFlip(False);
else:
isMoving = False;
# change sprite state
if(player):
player.useSprite("static");
drawable = player.getAssignedDrawables()[0];
drawable.setFlip(False);
# --------------------------------------------------- *\
# [function] onUpdate()
# |
# * Updating player's position *
# Return : nil
# --------------------------------------------------- */
def updatePosition():
if isMoving and player:
if mouvementEnabled:
position = player.getPosition();
size = player.getSize();
k = [position[0], position[1]];
# check for collisions with the scene before
scene = player.getAssignedScene();
sceneSize = scene.getSize();
scenePosition = scene.getPosition();
canMoveLeft = True;
canMoveRight = True;
if position[0] <= scenePosition[0]:
canMoveLeft = False;
canMoveRight = True;
if position[0] >= scenePosition[0] + sceneSize[0] - size[0]:
canMoveLeft = True;
canMoveRight = False;
if movingDirection == "left" and canMoveLeft:
k[0] -= player.getSpeed() *scale;
elif movingDirection == "right" and canMoveRight:
k[0] += player.getSpeed() *scale;
k[1] = 400 - size[1];
player.setPosition(k[0], k[1]);
|
pangyemeng/myjob | pyspider/pyspider_mysql/pyspider/message_queue/kombu_queue.py | Python | gpl-2.0 | 3,271 | 0.000611 | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
# vim: set et sw=4 ts=4 sts=4 ff=unix fenc=utf8:
# Author: Binux<roy@binux.me>
# http://binux.me
# Created on 2015-05-22 20:54:01
import time
import umsgpack
from kombu import Connection, enable_insecure_serializers
from kombu.serialization import register
from kombu.exceptions import ChannelError
from six.moves import queue as BaseQueue
register('umsgpack', umsgpack.packb, umsgpack.unpackb, 'application/x-msgpack')
enable_insecure_serializers(['umsgpack'])
class KombuQueue(object):
"""
kombu is a high-level interface for multiple message queue backends.
KombuQueue is built on top of kombu API.
"""
Empty = BaseQueue | .Empty
Full = BaseQueue.Full
max_timeout = 0.3
def __init__(self, name, url="amqp://", maxsize=0, lazy_limit=True):
"""
Constructor for KombuQueue
url: ht | tp://kombu.readthedocs.org/en/latest/userguide/connections.html#urls
maxsize: an integer that sets the upperbound limit on the number of
items that can be placed in the queue.
"""
self.name = name
self.conn = Connection(url)
self.queue = self.conn.SimpleQueue(self.name, no_ack=True, serializer='umsgpack')
self.maxsize = maxsize
self.lazy_limit = lazy_limit
if self.lazy_limit and self.maxsize:
self.qsize_diff_limit = int(self.maxsize * 0.1)
else:
self.qsize_diff_limit = 0
self.qsize_diff = 0
def qsize(self):
try:
return self.queue.qsize()
except ChannelError:
return 0
def empty(self):
if self.qsize() == 0:
return True
else:
return False
def full(self):
if self.maxsize and self.qsize() >= self.maxsize:
return True
else:
return False
def put(self, obj, block=True, timeout=None):
if not block:
return self.put_nowait()
start_time = time.time()
while True:
try:
return self.put_nowait(obj)
except BaseQueue.Full:
if timeout:
lasted = time.time() - start_time
if timeout > lasted:
time.sleep(min(self.max_timeout, timeout - lasted))
else:
raise
else:
time.sleep(self.max_timeout)
def put_nowait(self, obj):
if self.lazy_limit and self.qsize_diff < self.qsize_diff_limit:
pass
elif self.full():
raise BaseQueue.Full
else:
self.qsize_diff = 0
return self.queue.put(obj)
def get(self, block=True, timeout=None):
try:
ret = self.queue.get(block, timeout)
return ret.payload
except self.queue.Empty:
raise BaseQueue.Empty
def get_nowait(self):
try:
ret = self.queue.get_nowait()
return ret.payload
except self.queue.Empty:
raise BaseQueue.Empty
def delete(self):
self.queue.queue.delete()
def __del__(self):
self.queue.close()
Queue = KombuQueue
|
mderomph-coolblue/dd-agent | checks.d/pgbouncer.py | Python | bsd-3-clause | 7,003 | 0.002713 | """Pgbouncer check
Collects metrics from the pgbouncer database.
"""
# 3p
import psycopg2 as pg
# project
from checks import AgentCheck, CheckException
class ShouldRestartException(Exception):
pass
class PgBouncer(AgentCheck):
"""Collects metrics from pgbouncer
"""
RATE = AgentCheck.rate
GAUGE = AgentCheck.gauge
DB_NAME = 'pgbouncer'
SERVICE_CHECK_NAME = 'pgbouncer.can_connect'
STATS_METRICS = {
'descriptors': [
('database', 'db'),
],
'metrics': [
('total_requests', ('pgbouncer.stats.requests_per_second', RATE)),
('total_received', ('pgbouncer.stats.bytes_received_per_second', RATE)),
('total_sent', ('pgbouncer.stats.bytes_sent_per_second', RATE)),
('total_query_time', ('pgbouncer.stats.total_query_time', GAUGE)),
('avg_req', ('pgbouncer.stats.avg_req', GAUGE)),
('avg_recv', ('pgbouncer.stats.avg_recv', GAUGE)),
('avg_sent', ('pgbouncer.stats.avg_sent', GAUGE)),
('avg_query', ('pgbouncer.stats.avg_query', GAUGE)),
],
'query': """SHOW STATS""",
}
POOLS_METRICS = {
'descriptors': [
('database', 'db'),
('user', 'user'),
],
'metrics': [
('cl_active', ('pgbouncer.pools.cl_active', GAUGE)),
('cl_waiting', ('pgbouncer.pools.cl_waiting', GAUGE)),
('sv_active', ('pgbouncer.pools.sv_active', GAUGE)),
('sv_idle', ('pgbouncer.pools.sv_idle', GAUGE)),
('sv_used', ('pgbouncer.pools.sv_used', GAUGE)),
('sv_tested', ('pgbouncer.pools.sv_tested', GAUGE)),
('sv_login', ('pgbouncer.pools.sv_login', GAUGE)),
('maxwait', ('pgbouncer.pools.maxwait', GAUGE)),
],
'query': """SHOW POOLS""",
}
def __init__(self, name, init_config, agentConfig, instances=None):
AgentCheck.__init__(self, name, init_config, agentConfig, instances)
self.dbs = {}
def _get_service_checks_tags(self, host, port):
service_checks_tags = [
"host:%s" % host,
"port:%s" % port,
"db:%s" % self.DB_NAME
]
return service_checks_tags
def _collect_stats(self, db, instance_tags):
"""Query pgbouncer for various metrics
"""
metric_scope = [self.STATS_METRICS, self.POOLS_METRICS]
try:
cursor = db.cursor()
results = None
for scope in metric_scope:
metrics = scope['metrics']
cols = [m[0] for m in metrics]
try:
query = scope['query']
self.log.debug("Running query: %s" % query)
cursor.execute(query)
results = cursor.fetchall()
except pg.Error, e:
self.log.warning("Not all metrics may be available: %s" % str(e))
continue
for row in results:
if row[0] == self.DB_NAME:
continue
desc = scope['descriptors']
if len(row) == len(cols) + len(desc) + 1:
# Some versions of pgbouncer have an extra field at the end of show pools
row = row[:-1]
assert len(row) == len(cols) + len(desc)
tags = list(instance_tags)
tags += ["%s:%s" % (d[0][1], d[1]) for d in zip(desc, row[:len(desc)])]
for i, (key_name, (mname, mtype)) in enumerate(metrics):
value = row[i + len(desc)]
mtype(self, mname, value, tags)
if not results:
self.warning('No results were found for query: "%s"' % query)
cursor.close()
except pg.Error, e:
| self.log.error("Connection error: %s" % str(e))
raise ShouldRestartExc | eption
def _get_connection(self, key, host, port, user, password, use_cached=True):
"Get and memoize connections to instances"
if key in self.dbs and use_cached:
return self.dbs[key]
elif host != "" and user != "":
try:
if host == 'localhost' and password == '':
# Use ident method
connection = pg.connect("user=%s dbname=%s" % (user, self.DB_NAME))
elif port != '':
connection = pg.connect(host=host, port=port, user=user,
password=password, database=self.DB_NAME)
else:
connection = pg.connect(host=host, user=user, password=password,
database=self.DB_NAME)
connection.set_isolation_level(pg.extensions.ISOLATION_LEVEL_AUTOCOMMIT)
self.log.debug('pgbouncer status: %s' % AgentCheck.OK)
except Exception:
message = u'Cannot establish connection to pgbouncer://%s:%s/%s' % (host, port, self.DB_NAME)
self.service_check(self.SERVICE_CHECK_NAME, AgentCheck.CRITICAL,
tags=self._get_service_checks_tags(host, port),
message=message)
self.log.debug('pgbouncer status: %s' % AgentCheck.CRITICAL)
raise
else:
if not host:
raise CheckException("Please specify a PgBouncer host to connect to.")
elif not user:
raise CheckException("Please specify a user to connect to PgBouncer as.")
self.dbs[key] = connection
return connection
def check(self, instance):
host = instance.get('host', '')
port = instance.get('port', '')
user = instance.get('username', '')
password = instance.get('password', '')
tags = instance.get('tags', [])
key = '%s:%s' % (host, port)
if tags is None:
tags = []
else:
tags = list(set(tags))
try:
db = self._get_connection(key, host, port, user, password)
self._collect_stats(db, tags)
except ShouldRestartException:
self.log.info("Resetting the connection")
db = self._get_connection(key, host, port, user, password, use_cached=False)
self._collect_stats(db, tags)
message = u'Established connection to pgbouncer://%s:%s/%s' % (host, port, self.DB_NAME)
self.service_check(self.SERVICE_CHECK_NAME, AgentCheck.OK,
tags=self._get_service_checks_tags(host, port),
message=message)
self.log.debug('pgbouncer status: %s' % AgentCheck.OK)
|
trb/Multicache | backends/__init__.py | Python | bsd-2-clause | 827 | 0.001209 | import json
class Cache(object):
def __init__(self, backend):
self.backend = backend
def set(self, key, data):
if type(data) is not str:
data = ('json', json.dumps(data))
self.backend.store(key, data)
def get(self, key):
data = self.backend.retrieve(key)
if type(data) is tuple:
encoding, data = data
| if encoding != 'json':
raise TypeError('No decoder found for encoding "{0}".'
+ ' Available decode | r: "json"'.format(encoding))
return json.loads(data)
return data
def has(self, key):
return self.backend.check(key)
def delete(self, key):
self.backend.remove(key)
def default():
import Local
return Cache(Local.LocalBackend())
|
ryanpetrello/canary | canary/util.py | Python | bsd-3-clause | 4,252 | 0 | import os
import re
from wsgiref.util import guess_scheme
import webob
def cachedproperty(f):
"""returns a cached property that is calculated by function f"""
def get(self):
try:
return self._property_cache[f]
except AttributeError:
self._property_cache = {}
x = self._property_cache[f] = f(self)
return x
except KeyError:
x = self._property_cache[f] = f(self)
return x
return property(get)
class EnvironContext(object):
def __init__(self, environ, sensitive_keys=[]):
"""
Compose additional information about the CGI and WSGI environment.
:param environ: the WSGI environ for the request
:param sensitive_keys: a list of HTTP request argument names that
should be filtered out of debug data (e.g., CC
numbers and passwords)
"""
self._environ = environ
self._sensitive_keys = sensitive_keys
@cachedproperty
def _metadata(self):
environ = self.filtered_environ
data = {}
data['HTTP_SCHEME'] = guess_scheme(environ)
cgi_vars = data['CGI Variables'] = {}
wsgi_vars = data['WSGI Variables'] = {}
hide_vars = ['wsgi.errors', 'wsgi.input',
'wsgi.multithread', 'wsgi.multiprocess',
'wsgi.run_once', 'wsgi.version',
'wsgi.url_scheme', 'webob._parsed_cookies']
for name, value in environ.items():
if name.upper() == name:
if name in os.environ: # Skip OS environ variables
continue
if value:
cgi_vars[name] = value
elif name not in hide_vars:
wsgi_vars[name] = value
proc_desc = tuple([int(bool(environ[key]))
for ke | y in ('wsgi.multiprocess',
'wsgi.multithread',
'wsgi.run_once')])
wsgi_vars['wsgi process'] = self.process_combos[proc_desc]
return {'fields': data, 'filter_sensitive': self._filter_sensitive}
@cachedpropert | y
def sensitive_values(self):
"""
Returns a list of sensitive GET/POST values to filter from logs.
"""
values = set()
params = webob.Request(self._environ).params
for key in self._sensitive_keys:
if key in params:
values |= set([params[key]])
return values
def __getitem__(self, name):
return self._metadata.__getitem__(name)
def __iter__(self):
return self._metadata.__iter__()
process_combos = {
# multiprocess, multithread, run_once
(0, 0, 0): 'Non-concurrent server',
(0, 1, 0): 'Multithreaded',
(1, 0, 0): 'Multiprocess',
(1, 1, 0): 'Multi process AND threads (?)',
(0, 0, 1): 'Non-concurrent CGI',
(0, 1, 1): 'Multithread CGI (?)',
(1, 0, 1): 'CGI',
(1, 1, 1): 'Multi thread/process CGI (?)',
}
@property
def filtered_environ(self):
"""
A WSGI environ with which has had sensitive values filtered
"""
return self._filter_sensitive(self._environ)
def _filter_sensitive(self, value):
# Compile an regex of OR'ed strings to filter out
sensitive_values_re = re.compile(
'|'.join([
re.escape(v)
for v in self.sensitive_values
])
)
def _gen(o):
def _filter(string):
if sensitive_values_re.pattern:
return sensitive_values_re.sub('********', string)
return string
if isinstance(o, basestring):
return _filter(o)
elif isinstance(o, (list, tuple)):
return [
_gen(x)
for x in o
]
elif isinstance(o, dict):
return dict(
(k, _gen(v))
for k, v in o.items()
)
else:
return _filter(str(o))
return _gen(value)
|
sysbot/pastedown | vendor/pygments/scripts/vim2pygments.py | Python | mit | 26,283 | 0.00019 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Vim Colorscheme Converter
~~~~~~~~~~~~~~~~~~~~~~~~~
This script converts vim colorscheme files to valid pygments
style classes meant for putting into modules.
:copyright 2006 by Armin Ronacher.
:license: BSD, see LICENSE for details.
"""
import sys
import re
from os import path
from cStringIO import StringIO
split_re = re.compile(r'(?<!\\)\s+')
SCRIPT_NAME = 'Vim Colorscheme Converter'
SCRIPT_VERSION = '0.1'
COLORS = {
# Numeric Colors
'0': '#000000',
'1': '#c00000',
'2': '#008000',
'3': '#808000',
'4': '#0000c0',
'5': '#c000c0',
'6': '#008080',
'7': '#c0c0c0',
'8': '#808080',
'9': '#ff6060',
'10': '#00ff00',
'11': '#ffff00',
'12': '#8080ff',
'13': '#ff40ff',
'14': '#00ffff',
'15': '#ffffff',
# Named Colors
'alice': '#f0f8ff',
'aliceblue': '#f0f8ff',
'antique': '#faebd7',
'antiquewhite': '#faebd7',
'antiquewhite1': '#ffefdb',
'antiquewhite2': '#eedfcc',
'antiquewhite3': '#cdc0b0',
'antiquewhite4': '#8b8378',
'aquamarine': '#7fffd4',
'aquamarine1': '#7fffd4',
'aquamarine2': '#76eec6',
'aquamarine3': '#66cdaa',
'aquamarine4': '#458b74',
'azure': '#f0ffff',
'azure1': '#f0ffff',
'azure2': '#e0eeee',
'azure3': '#c1cdcd',
'azure4': '#838b8b',
'beige': '#f5f5dc',
'bisque': '#ffe4c4',
'bisque1': '#ffe4c4',
'bisque | 2': '#eed5b7',
'bisque3': '#cdb79e',
'bisque4': '#8b7d6b',
'black': '#000000',
'blanched': '#ffebcd',
'blanchedalmond': '#ffebcd',
'blue': '#8a2be2', |
'blue1': '#0000ff',
'blue2': '#0000ee',
'blue3': '#0000cd',
'blue4': '#00008b',
'blueviolet': '#8a2be2',
'brown': '#a52a2a',
'brown1': '#ff4040',
'brown2': '#ee3b3b',
'brown3': '#cd3333',
'brown4': '#8b2323',
'burlywood': '#deb887',
'burlywood1': '#ffd39b',
'burlywood2': '#eec591',
'burlywood3': '#cdaa7d',
'burlywood4': '#8b7355',
'cadet': '#5f9ea0',
'cadetblue': '#5f9ea0',
'cadetblue1': '#98f5ff',
'cadetblue2': '#8ee5ee',
'cadetblue3': '#7ac5cd',
'cadetblue4': '#53868b',
'chartreuse': '#7fff00',
'chartreuse1': '#7fff00',
'chartreuse2': '#76ee00',
'chartreuse3': '#66cd00',
'chartreuse4': '#458b00',
'chocolate': '#d2691e',
'chocolate1': '#ff7f24',
'chocolate2': '#ee7621',
'chocolate3': '#cd661d',
'chocolate4': '#8b4513',
'coral': '#ff7f50',
'coral1': '#ff7256',
'coral2': '#ee6a50',
'coral3': '#cd5b45',
'coral4': '#8b3e2f',
'cornflower': '#6495ed',
'cornflowerblue': '#6495ed',
'cornsilk': '#fff8dc',
'cornsilk1': '#fff8dc',
'cornsilk2': '#eee8cd',
'cornsilk3': '#cdc8b1',
'cornsilk4': '#8b8878',
'cyan': '#00ffff',
'cyan1': '#00ffff',
'cyan2': '#00eeee',
'cyan3': '#00cdcd',
'cyan4': '#008b8b',
'dark': '#8b0000',
'darkblue': '#00008b',
'darkcyan': '#008b8b',
'darkgoldenrod': '#b8860b',
'darkgoldenrod1': '#ffb90f',
'darkgoldenrod2': '#eead0e',
'darkgoldenrod3': '#cd950c',
'darkgoldenrod4': '#8b6508',
'darkgray': '#a9a9a9',
'darkgreen': '#006400',
'darkgrey': '#a9a9a9',
'darkkhaki': '#bdb76b',
'darkmagenta': '#8b008b',
'darkolivegreen': '#556b2f',
'darkolivegreen1': '#caff70',
'darkolivegreen2': '#bcee68',
'darkolivegreen3': '#a2cd5a',
'darkolivegreen4': '#6e8b3d',
'darkorange': '#ff8c00',
'darkorange1': '#ff7f00',
'darkorange2': '#ee7600',
'darkorange3': '#cd6600',
'darkorange4': '#8b4500',
'darkorchid': '#9932cc',
'darkorchid1': '#bf3eff',
'darkorchid2': '#b23aee',
'darkorchid3': '#9a32cd',
'darkorchid4': '#68228b',
'darkred': '#8b0000',
'darksalmon': '#e9967a',
'darkseagreen': '#8fbc8f',
'darkseagreen1': '#c1ffc1',
'darkseagreen2': '#b4eeb4',
'darkseagreen3': '#9bcd9b',
'darkseagreen4': '#698b69',
'darkslateblue': '#483d8b',
'darkslategray': '#2f4f4f',
'darkslategray1': '#97ffff',
'darkslategray2': '#8deeee',
'darkslategray3': '#79cdcd',
'darkslategray4': '#528b8b',
'darkslategrey': '#2f4f4f',
'darkturquoise': '#00ced1',
'darkviolet': '#9400d3',
'deep': '#ff1493',
'deeppink': '#ff1493',
'deeppink1': '#ff1493',
'deeppink2': '#ee1289',
'deeppink3': '#cd1076',
'deeppink4': '#8b0a50',
'deepskyblue': '#00bfff',
'deepskyblue1': '#00bfff',
'deepskyblue2': '#00b2ee',
'deepskyblue3': '#009acd',
'deepskyblue4': '#00688b',
'dim': '#696969',
'dimgray': '#696969',
'dimgrey': '#696969',
'dodger': '#1e90ff',
'dodgerblue': '#1e90ff',
'dodgerblue1': '#1e90ff',
'dodgerblue2': '#1c86ee',
'dodgerblue3': '#1874cd',
'dodgerblue4': '#104e8b',
'firebrick': '#b22222',
'firebrick1': '#ff3030',
'firebrick2': '#ee2c2c',
'firebrick3': '#cd2626',
'firebrick4': '#8b1a1a',
'floral': '#fffaf0',
'floralwhite': '#fffaf0',
'forest': '#228b22',
'forestgreen': '#228b22',
'gainsboro': '#dcdcdc',
'ghost': '#f8f8ff',
'ghostwhite': '#f8f8ff',
'gold': '#ffd700',
'gold1': '#ffd700',
'gold2': '#eec900',
'gold3': '#cdad00',
'gold4': '#8b7500',
'goldenrod': '#daa520',
'goldenrod1': '#ffc125',
'goldenrod2': '#eeb422',
'goldenrod3': '#cd9b1d',
'goldenrod4': '#8b6914',
'gray': '#bebebe',
'gray0': '#000000',
'gray1': '#030303',
'gray10': '#1a1a1a',
'gray100': '#ffffff',
'gray11': '#1c1c1c',
'gray12': '#1f1f1f',
'gray13': '#212121',
'gray14': '#242424',
'gray15': '#262626',
'gray16': '#292929',
'gray17': '#2b2b2b',
'gray18': '#2e2e2e',
'gray19': '#303030',
'gray2': '#050505',
'gray20': '#333333',
'gray21': '#363636',
'gray22': '#383838',
'gray23': '#3b3b3b',
'gray24': '#3d3d3d',
'gray25': '#404040',
'gray26': '#424242',
'gray27': '#454545',
'gray28': '#474747',
'gray29': '#4a4a4a',
'gray3': '#080808',
'gray30': '#4d4d4d',
'gray31': '#4f4f4f',
'gray32': '#525252',
'gray33': '#545454',
'gray34': '#575757',
'gray35': '#595959',
'gray36': '#5c5c5c',
'gray37': '#5e5e5e',
'gray38': '#616161',
'gray39': '#636363',
'gray4': '#0a0a0a',
'gray40': '#666666',
'gray41': '#696969',
'gray42': '#6b6b6b',
'gray43': '#6e6e6e',
'gray44': '#707070',
'gray45': '#737373',
'gray46': '#757575',
'gray47': '#787878',
'gray48': '#7a7a7a',
'gray49': '#7d7d7d',
'gray5': '#0d0d0d',
'gray50': '#7f7f7f',
'gray51': '#828282',
'gray52': '#858585',
'gray53': '#878787',
'gray54': '#8a8a8a',
'gray55': '#8c8c8c',
'gray56': '#8f8f8f',
'gray57': '#919191',
'gray58': '#949494',
'gray59': '#969696',
'gray6': '#0f0f0f',
'gray60': '#999999',
'gray61': '#9c9c9c',
'gray62': '#9e9e9e',
'gray63': '#a1a1a1',
'gray64': '#a3a3a3',
'gray65': '#a6a6a6',
'gray66': '#a8a8a8',
'gray67': '#ababab',
'gray68': '#adadad',
'gray69': '#b0b0b0',
'gray7': '#121212',
'gray70': '#b3b3b3',
'gray71': '#b5b5b5',
'gray72': '#b8b8b8',
'gray73': '#bababa',
'gray74': '#bdbdbd',
'gray75': '#bfbfbf',
'gray76': '#c2c2c2',
'gray77': '#c4c4c4',
'gray78': '#c7c7c7',
'gray79': '#c9c9c9',
'gray8': '#141414',
'gray80': '#cccccc',
'gray81': '#cfcfcf',
'gray82': '#d1d1d1',
'gray83': '#d4d4d4',
'gray84': '#d6d6d6',
'gray85': '#d9d9d9',
'gray86': '#dbdbdb',
'gray87': '#dedede',
'gray88': '#e0e0e0',
'gray89': '#e3e3e3',
'gray9': '#171717',
'gray90': '#e5e5e5',
'gray91': '#e8e8e8',
'gray92': '#ebebeb',
'gray93': '#ededed',
'gray94': '#f0f0f0',
'gray95': '#f2f2f2',
'gray96': '#f5f5f5',
'gray97': '#f7f7f7',
'gray98': '#fafafa',
'gray99': '#fcfcfc',
'green': '#adff2f',
'green1': '#00ff00',
'green2': '#00ee00',
'green3': '#00cd00',
'green4': '#008b00',
'greenyellow': '#adff2f',
'grey': '#bebebe',
'grey0': |
remotesyssupport/cobbler-1 | cobbler/config.py | Python | gpl-2.0 | 8,992 | 0.022464 | """
Config.py is a repository of the Cobbler object model
Copyright 2006-2009, Red Hat, Inc
Michael DeHaan <mdehaan@redhat.com>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
02110-1301 USA
"""
import os
import weakref
import time
import random
import string
import binascii
import item_distro as distro
import item_profile as profile
import item_system as system
import item_repo as repo
import item_image as image
import item_mgmtclass as mgmtclass
import item_package as package
import item_file as file
import collection_distros as distros
import collection_profiles as profiles
import collection_systems as systems
import collection_repos as repos
import collection_images as images
import collection_mgmtclasses as mgmtclasses
import collection_packages as packages
import collection_files as files
import settings
import serializer
import traceback
from utils import _
from cexceptions import *
class Config:
has_loaded = False
__shared_state = {}
def __init__(self,api):
"""
Constructor. Manages a definitive copy of all data collections with weakrefs
pointing back into the class so they can understand each other's contents
"""
self.__dict__ = Config.__shared_state
if not Config.has_loaded:
self.__load(api)
def __load(self,api):
Config.has_loaded = True
self.init_time = time.time()
self.current_id = 0
self.api = api
self._distros = distros.Distros(weakref.proxy(self))
self._repos = repos.Repos(weakref.proxy(self))
self._profiles = profiles.Profiles(weakref.proxy(self))
self._systems = systems.Systems(weakref.proxy(self))
self._images = images.Images(weakref.proxy(self))
self._mgmtclasses = mgmtclasses.Mgmtclasses(weakref.proxy(self))
self._packages = packages.Packages(weakref.proxy(self))
self._files = files.Files(weakref.proxy(self))
self._settings = settings.Settings() # not a true collection
def generate_uid(self):
"""
Cobbler itself does not use this GUID's though they are provided
to allow for easier API linkage with other applications.
Cobbler uses unique names in each collection as the object id
aka primary key
"""
data = "%s%s" % (time.time(), random.uniform(1,9999999))
return binascii.b2a_base64(data).replace("=","").strip()
def __cmp(self,a,b):
return cmp(a.name,b.name)
def distros(self):
"""
Return the definitive copy of the Distros collection
"""
return self._distros
def profiles(self):
"""
Return the definitive copy of the Profiles collection
"""
return self._profiles
def systems(self):
"""
Return the definitive copy of the Systems collection
"""
return self._systems
def settings(self):
"""
Return the definitive copy of the application settings
"""
return self._settings
def repos(self):
"""
Return the definitive copy of the Repos collection
"""
return self._repos
def images(self):
"""
Return the definitive copy of the Images collection
"""
return self._images
def mgmtclasses(self):
"""
Return the definitive copy of the Mgmtclasses collection
"""
return self._mgmtclasses
def packages(self):
"""
Return the definitive copy of the Packages collection
"""
return self._packages
def files(self):
"""
Return the definitive copy of the Files collection
"""
return self._files
def new_distro(self,is_subobject=False):
"""
Create a new distro object with a backreference to this object
"""
return distro.Distro(weakref.proxy(self),is_subobject=is_subobject)
def new_system(self,is_subobject=False):
"""
Create a new system with a backreference to this object
"""
return system.System(weakref.proxy(self),is_subobject=is_subobject)
def new_profile(self,is_subobject=False):
"""
Create a new profile with a backreference to this object
"""
return profile.Profile(weakref.proxy(self),is_subobject=is_subobject)
def new_repo(self,is_subobject=False):
"""
Create a new mirror to keep track of...
"""
return repo.Repo(weakref.proxy(self),is_subobject=is_subobject)
def new_image(self,is_subobject=False):
"""
Create a new image object...
"""
return image.Image(weakref.proxy(self),is_subobject=is_subobject)
def new_mgmtclass(self,is_subobject=False):
"""
Create a new mgmtclass object...
"""
return mgmtclass.Mgmtclass(weakref.proxy(self),is_subobject=is_subobject)
def new_package(self,is_subobject=False):
"""
Create a new package object...
"""
return package.Package(weakref.proxy(self),is_subobject=is_subobject)
def new_file(self,is_subobject=False):
"""
Create a new image object...
"""
return file.File(weakref.proxy(self),is_subobject=is_subobject)
def clear(self):
"""
Forget about all loaded configuration data
"""
self._distros.clear(),
self._repos.clear(),
self._profiles.clear(),
self._images.clear()
self._systems.clear(),
self._mgmtclasses.clear(),
self._packages.clear(),
self._files.clear(),
return True
def serialize(self):
"""
Save the object hierarchy to disk, using the filenames referenced in each object.
"""
serializer.serialize(self._distros)
serializer.serialize(self._repos)
serializer.serialize(self._profiles)
serializer.serialize(self._images)
serializer.serialize(self._systems)
serializer.serialize(self._mgmtclasses)
serializer.serialize(self._packages)
serializer.serialize(self._files)
return True
def serialize_item(self,collection,item):
"""
Save item in the collection, resaving the whole collection if needed,
but ideally just saving the item.
"""
return serializer.serialize_item(collection,item)
def serialize_delete(self,collection,item):
"""
Erase item from a storage file, if neccessary rewritting the file.
"""
return serializer.serialize_delete(collection,item)
def deserialize(self):
"""
Load the object hierachy from disk, using the filenames referenced in each object.
"""
for item in [
self._settings,
self._distros,
self._repos,
self._profiles,
self._images,
self._systems,
self._mgmtclasses,
self._packages,
self._files,
]:
try:
if not serializer.deserialize(item): raise ""
exce | pt:
raise CX("serializer: error loading collection %s. Check /etc/cobbler/modules.conf" % item.collection_type())
return True
def deserialize_raw(self,collection_type):
"""
Get obj | ect data from disk, not objects.
"""
return serializer.deserialize_raw(collection_type)
def deserialize_item_raw(self,collection_type,obj_name):
"""
Get a raw single object.
"""
return serializer.de |
Temesis/django-avatar | avatar/models.py | Python | bsd-3-clause | 6,049 | 0.000331 | import datetime
import os
from django.db import models
from django.core.files.base import ContentFile
from django.core.files.storage import get_storage_class
from django.utils.translation import ugettext as _
from django.utils.hashcompat import md5_constructor
from django.utils.encoding import smart_str
from django.db.models import signals
from django.contrib.auth.models import User
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
try:
from PIL import Image
except ImportError:
import Image
try:
from django.utils.timezone import now
except ImportError:
now = datetime.datetime.now
from avatar.util import invalidate_cache
from avatar.settings import (AVATAR_STORAGE_DIR, AVATAR_RESIZE_METHOD,
AVATAR_MAX_AVATARS_PER_USER, AVATAR_THUMB_FORMAT,
AVATAR_HASH_USERDIRNAMES, AVATAR_HASH_FILENAMES,
AVATAR_THUMB_QUALITY, AUTO_GENERATE_AVATAR_SIZES,
AVATAR_DEFAULT_SIZE, AVATAR_STORAGE,
AVATAR_CLEANUP_DELETED, AVATAR_USE_PK_IN_PATH)
avatar_storage = get_storage_class(AVATAR_STORAGE)()
def avatar_file_path(instance=None, filename=None, size=None, ext=None):
tmppath = [AVATAR_STORAGE_DIR]
dir = instance.user.username
if AVATAR_USE_PK_IN_PATH:
dir = str(instance.user.pk)
if AVATAR_HASH_USERDIRNAMES:
tmp = md5_constructor(dir).hexdigest()
tmppath.extend([tmp[0], tmp[1], dir])
else:
tmppath.append(dir)
if not filename:
# Filename already stored in database
filename = instance.avatar.name
if ext and AVATAR_HASH_FILENAMES:
# An extension was provided, probably because the thumbnail
# is in a different format than the file. Use it. Because it's
# only enabled if AVATAR_HASH_FILENAMES is true, we can trust
# it won't conflict with another filename
(root, oldext) = os.path.splitext(filename)
filename = root + "." + ext
else:
# File doesn't exist yet
if AVATAR_HASH_FILENAMES:
(root, ext) = os.path.splitext(filename)
filename = md5_constructor(smart_str(filename)).hexdigest()
filename = filename + ext
if size:
tmppath.extend(['resized', str(size)])
tmppath.append(os.path.basename(filename))
return os.path.join(*tmppath)
def find_extension(format):
format = format.lower()
if format == 'jpeg':
format = 'jpg'
return format
class Avatar(models.Model):
user = models.ForeignKey(User)
primary = models.BooleanField(default=False)
avatar = models.ImageField(max_length=1024,
upload_to=avatar_file_path,
storage=avatar_storage,
blank=True)
date_uploaded = models.DateTimeField(default=now)
def __unicode__(self):
return _(u'Avatar for %s') % self.user
def save(self, *args, **kwargs):
avatars = Avatar.objects.filter(user=self.user)
if self.pk:
avatars = avatars.exclude(pk=self.pk)
if AVATAR_MAX_AVATARS_PER_USER > 1:
if self.primary:
avatars = avatars.filter(primary=True)
avatars.update(primary=False)
else:
avatars.delete()
super(Avatar, self).save(*args, **kwargs)
def thumbnail_exists(self, size):
return self.avatar.storage.exists(self.avatar_name(size))
def create_thumbnail(self, size, quality=None):
# invalidate the cache of the thumbnail with the given size first
invalidate_cache(self.user, size)
try:
orig = self.avatar.storage.open(self.av | atar.name, 'rb').read()
image = Image.open(StringIO(orig))
quality = quality or AVATAR_THUMB_QUALITY
(w, h) = image.size
if w != siz | e or h != size:
if w > h:
diff = (w - h) / 2
image = image.crop((diff, 0, w - diff, h))
else:
diff = (h - w) / 2
image = image.crop((0, diff, w, h - diff))
if image.mode != "RGB":
image = image.convert("RGB")
image = image.resize((size, size), AVATAR_RESIZE_METHOD)
thumb = StringIO()
image.save(thumb, AVATAR_THUMB_FORMAT, quality=quality)
thumb_file = ContentFile(thumb.getvalue())
else:
thumb_file = ContentFile(orig)
thumb = self.avatar.storage.save(self.avatar_name(size), thumb_file)
except IOError:
return # What should we do here? Render a "sorry, didn't work" img?
def avatar_url(self, size):
return self.avatar.storage.url(self.avatar_name(size))
def get_absolute_url(self):
return self.avatar_url(AVATAR_DEFAULT_SIZE)
def avatar_name(self, size):
ext = find_extension(AVATAR_THUMB_FORMAT)
return avatar_file_path(
instance=self,
size=size,
ext=ext
)
def invalidate_avatar_cache(sender, instance, **kwargs):
invalidate_cache(instance.user)
def create_default_thumbnails(sender, instance, created=False, **kwargs):
invalidate_avatar_cache(sender, instance)
if created:
for size in AUTO_GENERATE_AVATAR_SIZES:
instance.create_thumbnail(size)
def remove_avatar_images(instance=None, **kwargs):
for size in AUTO_GENERATE_AVATAR_SIZES:
if instance.thumbnail_exists(size):
instance.avatar.storage.delete(instance.avatar_name(size))
instance.avatar.storage.delete(instance.avatar.name)
signals.post_save.connect(create_default_thumbnails, sender=Avatar)
signals.post_delete.connect(invalidate_avatar_cache, sender=Avatar)
if AVATAR_CLEANUP_DELETED:
signals.post_delete.connect(remove_avatar_images, sender=Avatar)
|
twaugh/docker-registry-client | tests/test_image.py | Python | apache-2.0 | 409 | 0 | from __f | uture__ import absolute_import
from docker_registry_client.Image import Image
from docker_registry_client._BaseClient import BaseClientV1
from tests.mock_registry import mock_v1_registry
class TestImage(object):
def test_init(self):
url = mock_v1_registry()
image_id = 'test_image_id'
image = Image(image_id, BaseClientV1(url))
assert image.image_id == image_id | |
googleapis/python-bigquery-datatransfer | samples/generated_samples/bigquerydatatransfer_v1_generated_data_transfer_service_get_data_source_sync.py | Python | apache-2.0 | 1,542 | 0.001946 | # -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, sof | tware
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for GetDataSource
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package depen | dency, execute the following:
# python3 -m pip install google-cloud-bigquery-datatransfer
# [START bigquerydatatransfer_v1_generated_DataTransferService_GetDataSource_sync]
from google.cloud import bigquery_datatransfer_v1
def sample_get_data_source():
# Create a client
client = bigquery_datatransfer_v1.DataTransferServiceClient()
# Initialize request argument(s)
request = bigquery_datatransfer_v1.GetDataSourceRequest(
name="name_value",
)
# Make the request
response = client.get_data_source(request=request)
# Handle the response
print(response)
# [END bigquerydatatransfer_v1_generated_DataTransferService_GetDataSource_sync]
|
MayankGo/ec2-api | ec2api/api/quota.py | Python | apache-2.0 | 2,313 | 0.003026 | # Copyright 2014
# The Cloudscaling Group, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from neutronclient.common import exceptions as neutron_exception
from oslo_config import cfg
from oslo_log import log as logging
from ec2api.api import clients
from ec2api.api import common
from ec2api.api import ec2utils
from ec2api import exception
from ec2api.i18n import _
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
"""Quota related API implementation
"""
Validator = common.Validator
de | f update_quota(context, account, resource, quota):
account = account[4:]
neutron = clients.neutron(context)
with common.OnCrashCleaner() as cleaner:
os_quota_body = {
'quota': {
resource : quota,
}
}
os_quota = neutron.update_quota(account, os_quota_body)['quota']
return {'quota-update': | _format_quota_update(context, resource, os_quota)}
def _format_quota_update(context, resource, os_quota):
return {
resource : os_quota[resource]
}
def show_quota(context, account):
account = account[4:]
neutron = clients.neutron(context)
with common.OnCrashCleaner() as cleaner:
os_quota = neutron.show_quota(account)
return {'show-quota': _format_show_quota(context, os_quota)}
def _format_show_quota(context, os_quota):
return {
'Networks': os_quota['quota']['network'],
'Subnets': os_quota['quota']['subnet'],
'Ports': os_quota['quota']['port'],
'Routers': os_quota['quota']['router'],
'FloatingIps': os_quota['quota']['floatingip'],
'SecurityGroups': os_quota['quota']['security_group'],
'SecurityGroupRules': os_quota['quota']['security_group_rule']
}
|
PaddlePaddle/Paddle | python/paddle/fluid/tests/unittests/seresnext_test_base.py | Python | apache-2.0 | 2,516 | 0 | # Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import seresnext_net
import paddle.fluid.core as core
from parallel_executor_test_base import TestParallelExecutorBase, DeviceType
from parallel_executor_test_base import Device | Type
import numpy as np
class TestResnetBase(Te | stParallelExecutorBase):
def _compare_result_with_origin_model(self,
check_func,
use_device,
delta2=1e-5,
compare_seperately=True):
if use_device == DeviceType.CUDA and not core.is_compiled_with_cuda():
return
func_1_first_loss, func_1_last_loss = self.check_network_convergence(
seresnext_net.model,
feed_dict=seresnext_net.feed_dict(use_device),
iter=seresnext_net.iter(use_device),
batch_size=seresnext_net.batch_size(use_device),
use_device=use_device,
use_reduce=False,
optimizer=seresnext_net.optimizer)
func_2_first_loss, func_2_last_loss = check_func(
seresnext_net.model,
feed_dict=seresnext_net.feed_dict(use_device),
iter=seresnext_net.iter(use_device),
batch_size=seresnext_net.batch_size(use_device),
use_device=use_device)
if compare_seperately:
for loss in zip(func_1_first_loss, func_2_first_loss):
self.assertAlmostEquals(loss[0], loss[1], delta=1e-5)
for loss in zip(func_1_last_loss, func_2_last_loss):
self.assertAlmostEquals(loss[0], loss[1], delta=delta2)
else:
self.assertAlmostEquals(
np.mean(func_1_first_loss), func_2_first_loss[0], delta=1e-5)
self.assertAlmostEquals(
np.mean(func_1_last_loss), func_2_last_loss[0], delta=delta2)
|
zozo123/buildbot | master/contrib/coverage2text.py | Python | gpl-3.0 | 4,085 | 0.00049 | #!/usr/bin/env python
import sys
from coverage import coverage
from coverage.results import Numbers
from coverage.summary import SummaryReporter
from twisted.python import usage
# this is an adaptation of the code behind "coverage report", modified to
# display+sortby "lines uncovered", which (IMHO) is more important of a
# metric than lines covered or percentage covered. Concentrating on the files
# with the most uncovered lines encourages getting the tree and test suite
# into a state that provides full line-coverage on all files.
# much of this code was adapted from coverage/summary.py in the 'coverage'
# distribution, and is used under their BSD license.
class Options(usage.Options):
optParameters = [
("sortby", "s", "uncovered", "how to sort: uncovered, covered, name"),
]
class MyReporter(SummaryReporter):
def report(self, outfile=None, sortby="uncovered"):
self.find_code_units(None, ["/System", "/Library", "/usr/lib",
"buildbot/test", "simplejson"])
# Prepare the formatting strings
max_name = max([len(cu.name) for cu in self.code_units] + [5])
fmt_name = "%%- %ds " % max_name
fmt_err = "%s %s: %s\n"
header1 = (fmt_name % "") + " Statements "
header2 = (fmt_name % "Name") + " Uncovered Covered"
fmt_coverage = fmt_name + "%9d %7d "
if self.branches:
header1 += " Branches "
header2 += " Found Excutd"
fmt_coverage += " %6d %6d"
header1 += " Percent"
header2 += " Covered"
fmt_coverage += " %7d%%"
if self.show_missing:
header1 += " "
header2 += " Missing"
fmt_coverage += " %s"
rule = "-" * len(header1) + "\n"
header1 += "\n"
header2 += "\n"
fmt_coverage | += "\n"
if not outfile:
outfile = sys.stdout
# Write the header
outfile.write(header1)
outfile.write(header2)
outfile.write(rule)
total = Numbers()
| total_uncovered = 0
lines = []
for cu in self.code_units:
try:
analysis = self.coverage._analyze(cu)
nums = analysis.numbers
uncovered = nums.n_statements - nums.n_executed
total_uncovered += uncovered
args = (cu.name, uncovered, nums.n_executed)
if self.branches:
args += (nums.n_branches, nums.n_executed_branches)
args += (nums.pc_covered,)
if self.show_missing:
args += (analysis.missing_formatted(),)
if sortby == "covered":
sortkey = nums.pc_covered
elif sortby == "uncovered":
sortkey = uncovered
else:
sortkey = cu.name
lines.append((sortkey, fmt_coverage % args))
total += nums
except KeyboardInterrupt: # pragma: no cover
raise
except:
if not self.ignore_errors:
typ, msg = sys.exc_info()[:2]
outfile.write(fmt_err % (cu.name, typ.__name__, msg))
lines.sort()
if sortby in ("uncovered", "covered"):
lines.reverse()
for sortkey, line in lines:
outfile.write(line)
if total.n_files > 1:
outfile.write(rule)
args = ("TOTAL", total_uncovered, total.n_executed)
if self.branches:
args += (total.n_branches, total.n_executed_branches)
args += (total.pc_covered,)
if self.show_missing:
args += ("",)
outfile.write(fmt_coverage % args)
def report(o):
c = coverage()
c.load()
r = MyReporter(c, show_missing=False, ignore_errors=False)
r.report(sortby=o['sortby'])
if __name__ == '__main__':
o = Options()
o.parseOptions()
report(o)
|
4thegr8just1ce/maze_py | main.py | Python | gpl-2.0 | 2,067 | 0.068347 | import os
import time
maze = [[1,1,1,1,1,1,1,1,1,1,1],
[1,0,0,0,0,0,1,0,0,0,1],
[1,0,1,0,1,1,1,0,1,0,1],
[1,0,1,0,1,0,0,0,1,0,1],
[1,0,1,0,1,1,1,0,1, | 1,1],
[1,0,1,0,0,0,0,0,0,0,1],
[1,0,1,0,1,1,1,0,1,0,1],
[1,0,1,0,1,0,0,0,1,0,1],
[1,1,1,0,1,0,1,1,1,0,1],
[1,0,0,0,1,0,1,0,0,0,0,4],
[1,1,1,1,1,1,1,1,1,1,1]]
width = 11
height = 11
x = 1
y = 1
v = 0
def draw (maze):
time.sleep(0.1)
os.system('cls')
for i in range(len(maze)):
for j in range(len(maze[0])):
if (i == y and j == x):
print("oo", end="")
conti | nue
if (maze[i][j] == 0):
print (" ", end="")
elif (maze[i][j] == 1):
print ("██", end="")
elif (maze[i][j] == 4):
print (" ", end="")
print()
step = 0
while maze[y][x]!= 4:
draw(maze)
if (v == 0):
if (maze[y][x+1] != 1):
v = 3
elif (maze[y-1][x] != 1):
v = 0
elif (maze[y][x - 1] != 1):
v = 1
else:
v = 2
elif (v == 1):
if (maze[y-1][x] != 1):
v = 0
elif (maze[y][x-1] != 1):
v = 1
elif (maze[y + 1][x] != 1):
v = 2
else:
v = 3
elif (v == 2):
if (maze[y][x - 1] != 1):
v = 1
elif (maze[y+1][x] != 1):
v = 2
elif (maze[y][x + 1] != 1):
v = 3
else:
v = 0
elif (v == 3):
if (maze[y+1][x] != 1):
v = 2
elif (maze[y][x + 1] != 1):
v = 3
elif (maze[y-1][x] != 1):
v = 0
else:
v = 1
if (v == 0):
y -= 1
step +=1
elif (v == 1):
x -= 1
step +=1
elif (v == 2):
y += 1
step +=1
else:
x += 1
step +=1
print (step)
|
alandmoore/KiLauncher | kilauncher/__init__.py | Python | gpl-3.0 | 31 | 0 | from | .app import KiLaunc | herApp
|
misaksen/umediaproxy | mediaproxy/headers.py | Python | gpl-2.0 | 3,161 | 0.002847 | # Copyright (C) 2008 AG Projects
# Author: Ruud K | laver <ruud@ag-projects | .com>
#
"""Header encoding and decoding rules for communication between the dispatcher and relay components"""
class EncodingError(Exception):
pass
class DecodingError(Exception):
pass
class MediaProxyHeaders(object):
@classmethod
def encode(cls, name, value):
func_name = "encode_%s" % name
if hasattr(cls, func_name):
return getattr(cls, func_name)(value)
else:
return value
@classmethod
def decode(cls, name, value):
func_name = "decode_%s" % name
if hasattr(cls, func_name):
return getattr(cls, func_name)(value)
else:
return value
@staticmethod
def encode_cseq(value):
return str(value)
@staticmethod
def decode_cseq(value):
try:
return int(value)
except ValueError:
raise DecodingError("Not an integer: %s" % value)
@staticmethod
def encode_type(value):
if value not in ["request", "reply"]:
raise EncodingError('"type" header should be either "request" or "reply"')
return value
@staticmethod
def decode_type(value):
if value not in ["request", "reply"]:
raise DecodingError('"type" header should be either "request" or "reply"')
return value
@staticmethod
def encode_media(value):
try:
return ','.join(':'.join([type, ip, str(port), direction] + ['%s=%s' % param for param in parameters]) for type, ip, port, direction, parameters in value)
except:
raise EncodingError("Ill-formatted media information")
@staticmethod
def decode_media(value):
try:
streams = []
for stream_data in (data for data in value.split(",") if data):
stream_data = stream_data.split(":")
type, ip, port, direction = stream_data[:4]
parameters = dict(param.split("=") for param in stream_data[4:] if param)
streams.append((type, ip, int(port), direction, parameters))
return streams
except:
raise DecodingError("Ill-formatted media header")
return retval
class CodingDict(dict):
def __init__(self, *args, **kwargs):
if not args and not kwargs:
it = []
elif kwargs:
it = kwargs.iteritems()
elif isinstance(args[0], dict):
it = args[0].iteritems()
else:
try:
it = iter(args[0])
except:
dict.__init__(self, *args, **kwargs)
return
dict.__init__(self)
for key, value in it:
self.__setitem__(key, value)
class EncodingDict(CodingDict):
def __setitem__(self, key, value):
encoded_value = MediaProxyHeaders.encode(key, value)
dict.__setitem__(self, key, encoded_value)
class DecodingDict(CodingDict):
def __setitem__(self, key, value):
decoded_value = MediaProxyHeaders.decode(key, value)
dict.__setitem__(self, key, decoded_value)
|
caioserra/apiAdwords | examples/adspygoogle/adwords/v201309/reporting/get_report_fields.py | Python | apache-2.0 | 1,806 | 0.008306 | #!/usr/bin/python
#
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an | "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example gets report fields.
Tags: ReportDefinitionService.getReportFields
"""
__author__ = 'api.kwinter@gmail.com (Kevin | Winter)'
import os
import sys
sys.path.insert(0, os.path.join('..', '..', '..', '..', '..'))
# Import appropriate classes from the client library.
from adspygoogle import AdWordsClient
report_type = 'INSERT_REPORT_TYPE_HERE'
def main(client, report_type):
# Initialize appropriate service.
report_definition_service = client.GetReportDefinitionService(
version='v201309')
# Get report fields.
fields = report_definition_service.GetReportFields(report_type)
# Display results.
print 'Report type \'%s\' contains the following fields:' % report_type
for field in fields:
print ' - %s (%s)' % (field['fieldName'], field['fieldType'])
if field.get('enumValues'):
print ' := [%s]' % ', '.join(field['enumValues'])
print
print ('Usage: %s units, %s operations' % (client.GetUnits(),
client.GetOperations()))
if __name__ == '__main__':
# Initialize client object.
client = AdWordsClient(path=os.path.join('..', '..', '..', '..', '..'))
main(client, report_type)
|
TheWitchers/Team | TestingArea/Multithreading.py | Python | gpl-2.0 | 608 | 0 | __author__ = 'dvir'
import threading
import random
def Splitter(words):
list = words.split()
nlist = []
while (list):
nlist.append(list.pop(random.randrange(0, len(list))))
print(' '.join(nlist))
if __name__ == '__main__':
sen = 'Your god damn right.'
numOfTreads = 5
threadList = []
print("STARTING...\n")
for i in range(numOfTreads):
t = threading.Thread(target=Splitter,
| args=(sen,))
t.start()
threadList.append(t)
print("\nTread Count: " + str(threading.activeCount()))
print("EXIT | ING...\n")
|
bryansim/Python | piglow/clock.py | Python | gpl-2.0 | 3,562 | 0.011791 | ######################################
## A binary clock using the PiGlow ##
## ##
## Example by Jason - @Boeeerb ##
######################################
from piglow import PiGlow
from time import sleep
from datetime import datetime
piglow = PiGlow()
### You can customise these settings ###
show12hr = 1 # Show 12 or 24hr clock - 0= 24hr, 1= 12hr
ledbrightness = 10 # Set brightness of LED - 1-255 (recommend 10-20, put 0 and you won't see it!)
hourflash = 1 # Choose how to flash change of hour - 1= white leds, 2= all flash
armtop = "s" # h= hour, m= minutes, s= seconds
armright = "m"
armbottom = "h"
### End of customising ###
piglow.all(0)
hourcount = 0
hourcurrent = 0
while True:
time = datetime.now().time()
hour,min,sec = str(time).split(":")
# Bug fix by Phil Moyer - Tested and verified by Ric Woods - Thanks guys!
try:
rv = str(sec).index(".")
sec,micro = str(sec).split(".")
except ValueError:
sec = str(sec)
micro = "0"
hour = int(hour)
if show12hr == 1:
if hour > 12:
hour = hour - 12
min = int(min)
sec = int(sec)
binhour = "%06d" % int(bin(hour)[2:])
binmin = "%06d" % int(bin(min)[2:])
binsec = "%06d" % int(bin(sec)[2:])
# Check if current hour is different and set ready to flash hour
if hourcurrent != hour:
hourcount = hour
hourcurrent = hour
if armbottom == "h":
arm3 = list(binhour)
elif armbottom == "m":
arm3 = list(binmin)
else:
arm3 = list(binsec)
led13 = ledbrightness if arm3[5] == "1" else 0
piglow.led(13,led13)
led14 = ledbrightness if arm3[4] == "1" else 0
piglow.led(14,led14)
led15 = ledbrightness if arm3[3] == "1" else 0
piglow.led(15,led15)
led16 = ledbrightness if arm3[2] == "1" else 0
piglow.led(16,led16)
led17 = ledbrightness if arm3[1] == "1" else 0
piglow.led(17,led17)
led18 = ledbrightness if arm3[0] == "1" else 0
piglow.led(18,led18)
if armright == "h":
arm2 = list(binhour)
elif armright == "m":
arm2 = list(binmin)
else:
arm2 = list(binsec)
led07 = ledbrightness if arm2[5] == "1" else 0
piglow.led(7,led07)
led08 = ledbrightness if arm2[4] == "1" else 0
piglow.led(8,led08)
led09 = ledbrightness if arm2[3] == "1" else 0
piglow.led(9,led09)
led10 = ledbrightness if arm2[2] == "1" else 0
piglow.led(10,led10)
led11 = ledbrightness if arm2[1] == "1" else 0
piglow.led(11,led11)
led12 = ledbrightness if arm2[0] == "1" else 0
piglow.led(12,led12)
if armtop == "h":
arm1 = list(binhour)
elif armtop == "m":
arm1 = list(binmin)
else:
arm1 = list(binsec)
led01 = ledbrightness if arm1[5] == "1" else 0
| piglow.led(1,led01)
led02 = ledbrightness if arm1[4] == "1" else 0
piglow.led(2,led02)
led03 = ledbrightness if arm1[3] == "1" else 0
piglow.led(3,led03)
led04 = ledbrightne | ss if arm1[2] == "1" else 0
piglow.led(4,led04)
led05 = ledbrightness if arm1[1] == "1" else 0
piglow.led(5,led05)
led06 = ledbrightness if arm1[0] == "1" else 0
piglow.led(6,led06)
# Flash the white leds for the hour
if hourcount != 0:
sleep(0.5)
if hourflash == 1:
piglow.white(ledbrightness)
if hourflash == 2:
piglow.all(ledbrightness)
sleep(0.5)
hourcount = hourcount - 1
else:
sleep(0.1)
|
qewerty/moto.old | tools/scons/engine/SCons/Node/Alias.py | Python | gpl-2.0 | 4,276 | 0.002105 |
"""scons.Node.Alias
Alias nodes.
This creates a hash of global Aliases (dummy targets).
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, s | ublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notic | e and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Node/Alias.py 4720 2010/03/24 03:14:11 jars"
import string
import UserDict
import SCons.Errors
import SCons.Node
import SCons.Util
class AliasNameSpace(UserDict.UserDict):
def Alias(self, name, **kw):
if isinstance(name, SCons.Node.Alias.Alias):
return name
try:
a = self[name]
except KeyError:
a = apply(SCons.Node.Alias.Alias, (name,), kw)
self[name] = a
return a
def lookup(self, name, **kw):
try:
return self[name]
except KeyError:
return None
class AliasNodeInfo(SCons.Node.NodeInfoBase):
current_version_id = 1
field_list = ['csig']
def str_to_node(self, s):
return default_ans.Alias(s)
class AliasBuildInfo(SCons.Node.BuildInfoBase):
current_version_id = 1
class Alias(SCons.Node.Node):
NodeInfo = AliasNodeInfo
BuildInfo = AliasBuildInfo
def __init__(self, name):
SCons.Node.Node.__init__(self)
self.name = name
def str_for_display(self):
return '"' + self.__str__() + '"'
def __str__(self):
return self.name
def make_ready(self):
self.get_csig()
really_build = SCons.Node.Node.build
is_up_to_date = SCons.Node.Node.children_are_up_to_date
def is_under(self, dir):
# Make Alias nodes get built regardless of
# what directory scons was run from. Alias nodes
# are outside the filesystem:
return 1
def get_contents(self):
"""The contents of an alias is the concatenation
of the content signatures of all its sources."""
childsigs = map(lambda n: n.get_csig(), self.children())
return string.join(childsigs, '')
def sconsign(self):
"""An Alias is not recorded in .sconsign files"""
pass
#
#
#
def changed_since_last_build(self, target, prev_ni):
cur_csig = self.get_csig()
try:
return cur_csig != prev_ni.csig
except AttributeError:
return 1
def build(self):
"""A "builder" for aliases."""
pass
def convert(self):
try: del self.builder
except AttributeError: pass
self.reset_executor()
self.build = self.really_build
def get_csig(self):
"""
Generate a node's content signature, the digested signature
of its content.
node - the node
cache - alternate node to use for the signature cache
returns - the content signature
"""
try:
return self.ninfo.csig
except AttributeError:
pass
contents = self.get_contents()
csig = SCons.Util.MD5signature(contents)
self.get_ninfo().csig = csig
return csig
default_ans = AliasNameSpace()
SCons.Node.arg2nodes_lookups.append(default_ans.lookup)
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
rjschwei/azure-sdk-for-python | azure-mgmt-servermanager/azure/mgmt/servermanager/models/prompt_message_response.py | Python | mit | 856 | 0.001168 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behav | ior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------- | -----------
from msrest.serialization import Model
class PromptMessageResponse(Model):
"""The response to a prompt message.
:param response: The list of responses a cmdlet expects.
:type response: list of str
"""
_attribute_map = {
'response': {'key': 'response', 'type': '[str]'},
}
def __init__(self, response=None):
self.response = response
|
MBoustani/Geothon | Create Spatial File/Vector/create_shp_multipoint.py | Python | apache-2.0 | 1,696 | 0.008844 | #!/usr/bin/env python
'''
Project: Geothon (https://github.com/MBoustani/Geothon)
File: Vector/create_shp_multipoint.py
Description: This code creates points shapefile from some latitudes and longitues.
Author: Maziyar Bous | tani (github.com/MBoustani)
'''
import os
try:
import ogr
except ImportError:
from osgeo import ogr
try:
import osr
except ImportError:
from osgeo import osr
latitudes = [30, 30, 30]
longitudes = [10, 20, 30]
shapefile = 'multipoints.shp'
layer_name = 'multipoint_layer'
#create ESRI shapefile dirver
driver = ogr.GetDriverByName('ESRI Shapefile')
#create shapefile data_source(file)
if os.path.exists(shapefile):
driver.DeleteDataSource(shapefile)
data_source = driver.CreateDat | aSource(shapefile)
#create spatial reference
srs = osr.SpatialReference()
#in this case wgs84
srs.ImportFromEPSG(4326)
#create shapefile layer as points data with wgs84 as spatial reference
layer = data_source.CreateLayer(layer_name, srs, ogr.wkbPoint)
#create "Name" column for attribute table and set type as string
field_name = ogr.FieldDefn("Name", ogr.OFTString)
field_name.SetWidth(24)
layer.CreateField(field_name)
for i in range(len(latitudes)):
#define a point geometry
point = ogr.Geometry(ogr.wkbPoint)
#add point to the geometry
point.AddPoint(longitudes[i], latitudes[i])
#create a feature
feature = ogr.Feature(layer.GetLayerDefn())
#set point geometry to feature
feature.SetGeometry(point)
#add field "Name" to feature
feature.SetField("Name", 'point_{0}'.format(str(i)))
#create feature in layer
layer.CreateFeature(feature)
#destroy feature
feature.Destroy()
|
XXMrHyde/android_external_chromium_org | build/android/pylib/instrumentation/test_jar.py | Python | bsd-3-clause | 8,461 | 0.008037 | # Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Helper class for instrumenation test jar."""
import collections
import logging
import os
import pickle
import re
from pylib import cmd_helper
from pylib import constants
# If you change the cached output of proguard, increment this number
PICKLE_FORMAT_VERSION = 1
class TestJar(object):
_ANNOTATIONS = frozenset(
['Smoke', 'SmallTest', 'MediumTest', 'LargeTest', 'EnormousTest',
'FlakyTest', 'DisabledTest', 'Manual', 'PerfTest'])
_DEFAULT_ANNOTATION = 'SmallTest'
_PROGUARD_CLASS_RE = re.compile(r'\s*?- Program class:\s*([\S]+)$')
_PROGUARD_METHOD_RE = re.compile(r'\s*?- Method:\s*(\S*)[(].*$')
_PROGUARD_ANNOTATION_RE = re.compile(r'\s*?- Annotation \[L(\S*);\]:$')
_PROGUARD_ANNOTATION_CONST_RE = (
re.compile(r'\s*?- Constant element value.*$'))
_PROGUARD_ANNOTATION_VALUE_RE = re.compile(r'\s*?- \S+? \[(.*)\]$')
def __init__(self, jar_path):
if not os.path.exists(jar_path):
raise Exception('%s not found, please bu | ild it' % jar_path)
sdk_root = os.getenv('ANDROID_SDK_ROOT', constants.ANDROID_SDK_ROOT)
self._PROGUARD_PATH | = os.path.join(sdk_root,
'tools/proguard/bin/proguard.sh')
if not os.path.exists(self._PROGUARD_PATH):
self._PROGUARD_PATH = os.path.join(os.environ['ANDROID_BUILD_TOP'],
'external/proguard/bin/proguard.sh')
self._jar_path = jar_path
self._annotation_map = collections.defaultdict(list)
self._pickled_proguard_name = self._jar_path + '-proguard.pickle'
self._test_methods = []
if not self._GetCachedProguardData():
self._GetProguardData()
def _GetCachedProguardData(self):
if (os.path.exists(self._pickled_proguard_name) and
(os.path.getmtime(self._pickled_proguard_name) >
os.path.getmtime(self._jar_path))):
logging.info('Loading cached proguard output from %s',
self._pickled_proguard_name)
try:
with open(self._pickled_proguard_name, 'r') as r:
d = pickle.loads(r.read())
if d['VERSION'] == PICKLE_FORMAT_VERSION:
self._annotation_map = d['ANNOTATION_MAP']
self._test_methods = d['TEST_METHODS']
return True
except:
logging.warning('PICKLE_FORMAT_VERSION has changed, ignoring cache')
return False
def _GetProguardData(self):
proguard_output = cmd_helper.GetCmdOutput([self._PROGUARD_PATH,
'-injars', self._jar_path,
'-dontshrink',
'-dontoptimize',
'-dontobfuscate',
'-dontpreverify',
'-dump',
]).split('\n')
clazz = None
method = None
annotation = None
has_value = False
qualified_method = None
for line in proguard_output:
m = self._PROGUARD_CLASS_RE.match(line)
if m:
clazz = m.group(1).replace('/', '.') # Change package delim.
annotation = None
continue
m = self._PROGUARD_METHOD_RE.match(line)
if m:
method = m.group(1)
annotation = None
qualified_method = clazz + '#' + method
if method.startswith('test') and clazz.endswith('Test'):
self._test_methods += [qualified_method]
continue
if not qualified_method:
# Ignore non-method annotations.
continue
m = self._PROGUARD_ANNOTATION_RE.match(line)
if m:
annotation = m.group(1).split('/')[-1] # Ignore the annotation package.
self._annotation_map[qualified_method].append(annotation)
has_value = False
continue
if annotation:
if not has_value:
m = self._PROGUARD_ANNOTATION_CONST_RE.match(line)
if m:
has_value = True
else:
m = self._PROGUARD_ANNOTATION_VALUE_RE.match(line)
if m:
value = m.group(1)
self._annotation_map[qualified_method].append(
annotation + ':' + value)
has_value = False
logging.info('Storing proguard output to %s', self._pickled_proguard_name)
d = {'VERSION': PICKLE_FORMAT_VERSION,
'ANNOTATION_MAP': self._annotation_map,
'TEST_METHODS': self._test_methods}
with open(self._pickled_proguard_name, 'w') as f:
f.write(pickle.dumps(d))
def _GetAnnotationMap(self):
return self._annotation_map
def _IsTestMethod(self, test):
class_name, method = test.split('#')
return class_name.endswith('Test') and method.startswith('test')
def GetTestAnnotations(self, test):
"""Returns a list of all annotations for the given |test|. May be empty."""
if not self._IsTestMethod(test):
return []
return self._GetAnnotationMap()[test]
def _AnnotationsMatchFilters(self, annotation_filter_list, annotations):
"""Checks if annotations match any of the filters."""
if not annotation_filter_list:
return True
for annotation_filter in annotation_filter_list:
filters = annotation_filter.split('=')
if len(filters) == 2:
key = filters[0]
value_list = filters[1].split(',')
for value in value_list:
if key + ':' + value in annotations:
return True
elif annotation_filter in annotations:
return True
return False
def GetAnnotatedTests(self, annotation_filter_list):
"""Returns a list of all tests that match the given annotation filters."""
return [test for test, annotations in self._GetAnnotationMap().iteritems()
if self._IsTestMethod(test) and self._AnnotationsMatchFilters(
annotation_filter_list, annotations)]
def GetTestMethods(self):
"""Returns a list of all test methods in this apk as Class#testMethod."""
return self._test_methods
def _GetTestsMissingAnnotation(self):
"""Get a list of test methods with no known annotations."""
tests_missing_annotations = []
for test_method in self.GetTestMethods():
annotations_ = frozenset(self.GetTestAnnotations(test_method))
if (annotations_.isdisjoint(self._ANNOTATIONS) and
not self.IsHostDrivenTest(test_method)):
tests_missing_annotations.append(test_method)
return sorted(tests_missing_annotations)
def _GetAllMatchingTests(self, annotation_filter_list,
exclude_annotation_list, test_filter):
"""Get a list of tests matching any of the annotations and the filter.
Args:
annotation_filter_list: List of test annotations. A test must have at
least one of these annotations. A test without any annotations is
considered to be SmallTest.
exclude_annotation_list: List of test annotations. A test must not have
any of these annotations.
test_filter: Filter used for partial matching on the test method names.
Returns:
List of all matching tests.
"""
if annotation_filter_list:
available_tests = self.GetAnnotatedTests(annotation_filter_list)
# Include un-annotated tests in SmallTest.
if annotation_filter_list.count(self._DEFAULT_ANNOTATION) > 0:
for test in self._GetTestsMissingAnnotation():
logging.warning(
'%s has no annotations. Assuming "%s".', test,
self._DEFAULT_ANNOTATION)
available_tests.append(test)
if exclude_annotation_list:
excluded_tests = self.GetAnnotatedTests(exclude_annotation_list)
available_tests = list(set(available_tests) - set(excluded_tests))
else:
available_tests = [m for m in self.GetTestMethods()
if not self.IsHostDrivenTest(m)]
tests = []
if test_filter:
# |available_tests| are in adb instrument format: package.path.class#test.
filter_without_hash |
garyphone/com_nets | visualization/enabled/_10010_project_visualization_panel.py | Python | mit | 401 | 0.002494 | # The slug of the panel to be added to HORIZON_CONFIG. Required.
PANEL = 'visualization'
# The slug of the dashboard the PANEL associated with. Required.
PANEL_DASHBOARD = 'project'
| # The slug of the panel group the PANEL is associated with.
PANEL_GROUP = 'network'
# Python panel class of the PANEL to be added.
ADD_PANEL = 'openstack_dashboard | .dashboards.project.visualization.panel.Visualization'
|
Jc2k/libcloud | libcloud/storage/drivers/cloudfiles.py | Python | apache-2.0 | 35,768 | 0.00014 | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from hashlib import sha1
import hmac
import os
from time import time
from libcloud.utils.py3 import httplib
from libcloud.utils.py3 import urlencode
try:
import simplejson as json
except ImportError:
import json
from libcloud.utils.py3 import PY3
from libcloud.utils.py3 import b
from libcloud.utils.py3 import urlquote
if PY3:
from io import FileIO as file
from libcloud.utils.files import read_in_chunks
from libcloud.common.types import MalformedResponseError, LibcloudError
from libcloud.common.base import Response, RawResponse
from libcloud.storage.providers import Provider
from libcloud.storage.base import Object, Container, StorageDriver
from libcloud.storage.types import ContainerAlreadyExistsError
from libcloud.storage.types import ContainerDoesNotExistError
from libcloud.storage.types import ContainerIsNotEmptyError
from libcloud.storage.types import ObjectDoesNotExistError
from libcloud.storage.types import ObjectHashMismatchError
from libcloud.storage.types import InvalidContainerNameError
from libcloud.common.openstack import OpenStackBaseConnection
from libcloud.common.openstack import OpenStackDriverMixin
from libcloud.common.rackspace import AUTH_URL_US, AUTH_URL_UK
CDN_HOST = 'cdn.clouddrive.com'
API_VERSION = 'v1.0'
class CloudFilesResponse(Response):
valid_response_codes = [httplib.NOT_FOUND, httplib.CONFLICT]
def success(self):
i = int(self.status)
return i >= 200 and i <= 299 or i in self.valid_response_codes
def parse_body(self):
if not self.body:
return None
if 'content-type' in self.headers:
key = 'content-type'
elif 'Content-Type' in self.headers:
key = 'Content-Type'
else:
raise LibcloudError('Missing content-type header')
content_type = self.headers[key]
if content_type.find(';') != -1:
content_type = content_type.split(';')[0]
if content_type == 'application/json':
try:
data = json.loads(self.body)
except:
raise MalformedResponseError('Failed to parse JSON',
body=self.body,
driver=CloudFilesStorageDriver)
elif content_type == 'text/plain':
data = self.body
else:
data = self.body
return data
class CloudFilesRawResponse(CloudFilesResponse, RawResponse):
pass
class CloudFilesConnection(OpenStackBaseConnection):
"""
Base connection class for the Cloudfiles driver.
"""
responseCls = CloudFilesResponse
rawResponseCls = CloudFilesRawResponse
def __init__(self, user_id, key, secure=True, auth_url=AUTH_URL_US,
**kwargs):
super(CloudFilesConnection, self).__init__(user_id, key, secure=secure,
**kwargs)
self.auth_url = auth_url
self.api_version = API_VERSION
self.accept_format = 'application/json'
self.cdn_request = False
if self._ex_force_service_region:
self.service_region = self._ex_force_service_region
def get_endpoint(self):
# First, we parse out both files and cdn endpoints
# for each auth version
if '2.0' in self._auth_version:
eps = self.service_catalog.get_endpoints(
service_type='object-store',
name='cloudFiles')
cdn_eps = self.service_catalog.get_endpoints(
service_type='object-store',
name='cloudFilesCDN')
elif ('1.1' in self._auth_version) or ('1.0' in self._auth_version):
eps = self.service_catalog.get_endpoints(name='cloudFiles')
cdn_eps = self.service_catalog.get_endpoints(name='cloudFilesCDN')
# if this is a CDN request, return the cdn url instead
if self.cdn_request:
eps = cdn_eps
if self._ex_force_service_region:
eps = [ep for ep in eps if ep['region'].lower() == self._ex_force_service_region.lower()]
if len(eps) == 0:
# TODO: Better error message
raise LibcloudError('Could not find specified endpoint')
ep = eps[0]
if 'publicURL' in ep:
return ep['publicURL']
else:
raise LibcloudError('Could not find specified endpoint')
def request(self, action, params=None, data='', headers=None, method='GET',
raw=False, cdn_request=False):
if not headers:
headers = {}
if not params:
params = {}
self.cdn_request = cdn_request
params['format'] = 'json'
if method in ['POST', 'PUT'] and 'C | ontent-Type' not in headers:
headers.update({'Content-Type': 'application/json; charset=UTF-8'})
return super(CloudFilesConnection, self).request(
action=action,
params=params, data=data,
method=method, headers=headers,
raw=raw | )
class CloudFilesSwiftConnection(CloudFilesConnection):
"""
Connection class for the Cloudfiles Swift endpoint.
"""
def __init__(self, *args, **kwargs):
self.region_name = kwargs.pop('ex_region_name', None)
super(CloudFilesSwiftConnection, self).__init__(*args, **kwargs)
def get_endpoint(self, *args, **kwargs):
if '2.0' in self._auth_version:
endpoint = self.service_catalog.get_endpoint(
service_type='object-store',
name='swift',
region=self.region_name)
elif ('1.1' in self._auth_version) or ('1.0' in self._auth_version):
endpoint = self.service_catalog.get_endpoint(
name='swift', region=self.region_name)
if 'publicURL' in endpoint:
return endpoint['publicURL']
else:
raise LibcloudError('Could not find specified endpoint')
class CloudFilesStorageDriver(StorageDriver, OpenStackDriverMixin):
"""
CloudFiles driver.
"""
name = 'CloudFiles'
website = 'http://www.rackspace.com/'
connectionCls = CloudFilesConnection
hash_type = 'md5'
supports_chunked_encoding = True
def __init__(self, key, secret=None, secure=True, host=None, port=None,
region='ord', **kwargs):
"""
@inherits: L{StorageDriver.__init__}
@param region: ID of the region which should be used.
@type region: C{str}
"""
if hasattr(self, '_region'):
region = self._region
# This is here for backard compatibility
if 'ex_force_service_region' in kwargs:
region = kwargs['ex_force_service_region']
self.region = region
OpenStackDriverMixin.__init__(self, (), **kwargs)
super(CloudFilesStorageDriver, self).__init__(key=key, secret=secret,
secure=secure, host=host,
port=port, **kwargs)
def iterate_containers(self):
response = self.connection.request('')
if response.status == httplib.NO_CONTENT:
return []
elif response.status == httplib.OK:
return self._to_container_list(json.loads(response.body))
raise LibcloudError('Unexpected status code: %s' % |
devs1991/test_edx_docmode | venv/lib/python2.7/site-packages/sklearn/decomposition/tests/test_dict_learning.py | Python | agpl-3.0 | 5,110 | 0.000587 | import numpy as np
from numpy.testing import assert_array_almost_equal, assert_array_equal, \
assert_equal
from nose import SkipTest
from nose.tools import assert_true
from sklearn.utils.testing import assert_less
from .. import DictionaryLearning, MiniBatchDictionaryLearning, SparseCoder, \
dict_learning_online, sparse_encode
rng = np.random.RandomState(0)
n_samples, n_features = 10, 8
X = rng.randn(n_samples, n_features)
def test_dict_learning_shapes():
n_atoms = 5
dico = DictionaryLearning(n_atoms).fit(X)
assert_true(dico.components_.shape == (n_atoms, n_features))
def test_dict_learning_overcomplete():
n_atoms = 12
X = rng.randn(n_samples, n_features)
dico = DictionaryLearning(n_atoms).fit(X)
assert_true(dico.components_.shape == (n_atoms, n_features))
def test_dict_learning_reconstruction():
n_atoms = 12
dico = DictionaryLearning(n_atoms, transform_algorithm='omp',
transform_alpha=0.001, random_state=0)
code = dico.fit(X).transform(X)
assert_array_almost_equal(np.dot(code, dico.components_), X)
dico.set_params(transform_algorithm='lasso_lars')
code = dico.transform(X)
assert_array_almost_equal(np.dot(code, dico.components_), X, decimal=2)
# used to test lars here too, but there's no guarantee the number of
# nonzero atoms is right.
def test_dict_learning_nonzero_coefs():
n_atoms = 4
dico = DictionaryLearning(n_atoms, transform_algorithm='lars',
transform_n_nonzero_coefs=3, random_state=0)
code = dico.fit(X).transform(X[1])
assert_true(len(np.flatnonzero(code)) == 3)
dico.set_params(transform_algorithm='omp')
code = dico.transform(X[1])
assert_equal(len(np.flatnonzero(code)), 3)
def test_dict_learning_split():
n_atoms = 5
dico = DictionaryLearning(n_atoms, transform_algorithm='threshold')
code = dico.fit(X).transform(X)
dico.split_sign = True
split_code = dico.transform(X)
assert_array_equal(split_code[:, :n_atoms] - split_code[:, n_atoms:], code)
def test_dict_learning_online_shapes():
# rng = np.random.RandomState(0)
# X = rng.randn(12, 10)
n_atoms = 8
code, dictionary = dict_learning_online(X, n_atoms=n_atoms, alpha=1,
random_state=rng)
assert_equal(code.shape, (n_samples, n_atoms))
assert_equal(dictionary.shape, (n_atoms, n_features))
assert_equal(np.dot(code, dictionary).shape, X.shape)
def test_dict_learning_online_estimator_shapes():
n_atoms = 5
dico = MiniBatchDictionaryLearning(n_atoms, n_iter=20).fit(X)
assert_true(dico.components_.shape == (n_atoms, n_features))
def test_dict_learning_online_overcomplete():
n_atoms = 12
dico = MiniBatchDictionaryLearning(n_atoms, n_iter=20).fit(X)
assert_true(dico.components_.shape == (n_atoms, n_features))
def test_dict_learning_online_initialization():
n_atoms = 12
V = rng.randn(n_atoms, n_features)
dico = MiniBatchDictionaryLearning(n_atoms, n_iter=0, dict_init=V).fit(X)
assert_array_equal(dico.components_, V)
def test_dict_learning_online_partial_fit():
# this test was not actually passing before!
raise SkipTest
n_atoms = 12
V = rng.randn(n_atoms, n_features) # random init
V /= np.sum(V ** 2, axis=1)[:, np.newaxis]
rng1 = np.random.RandomState(0)
rng2 = np.random.RandomState(0)
dico1 = MiniBatchDictionaryLearning(n_atoms, n_iter=10, chunk_size=1,
shuffle=False, dict_init=V,
random_state=rng1).fit(X)
dico2 = MiniBatchDictionaryLearning(n_atoms, n_iter=1, dict_init=V,
random_state=rng2)
for ii, sample in enumerate(X):
dico2.partial_fit(sample, iter_offset=ii * dico2.n_iter)
# if ii == 1: break
assert_true(not np.all(sparse_encode(X, dico1.components_, alpha=100) ==
0))
assert_array_equal(dico1.components_, dico2.components_)
def test_sparse_encode_shapes():
n_atoms = 12
V = rng.randn(n_atoms, n_features) # random init
V /= np.sum(V ** 2, axis=1)[:, np.newaxis]
for algo in ('lasso_lars', 'lasso_cd', 'lars', 'omp', 'threshold'):
code = sparse_encode(X, V, algorithm=algo)
assert_equal(code.shape, (n_samples, n_atoms))
def test_sparse_encode_error():
n_atoms = 12
V = rng.randn(n_atoms, n_features) # random init
V /= np.sum(V ** 2, axis=1)[:, np.newaxis]
code = sparse_encode(X, V, alpha=0.001)
assert_true(not np.all(code == 0))
assert_less(np.sqrt(np.sum((np.dot(code, V) - X) ** 2)), 0.1)
def test_sparse_coder_estimator():
n_atoms = 12
V = rng.randn(n_atoms, n_features) # random init
V /= np.sum(V ** | 2, axis=1)[:, np.newaxis]
code = SparseCoder(dictionary=V, | transform_algorithm='lasso_lars',
transform_alpha=0.001).transform(X)
assert_true(not np.all(code == 0))
assert_less(np.sqrt(np.sum((np.dot(code, V) - X) ** 2)), 0.1)
|
ASMlover/study | python/py-interpreter/byterun/main.py | Python | bsd-2-clause | 2,143 | 0.0014 | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
#
# Copyright (c) 2016 ASMlover. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list ofconditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materialsprovided with the
# distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import argparse
import logging
def parse_argument():
parser = argparse.ArgumentParser(
prog='byterun',
description='Run Python wit | h a bytecode interpreter.')
parser.add_argument(
'-m', dest='module', action='stroe_true',
help='prog is a module name, not a file name')
parser.add_argument(
'-v', '--verbose', dest='verbose', action='stroe_true',
help='trace the execution of the bytecode')
| parser.add_argument('prog', help='the program to run')
parser.add_argument(
'args', nargs=argparse.REMAINDER,
help='arguments to pass to the program')
return parser.parse_args()
def main():
pass
if __name__ == '__main__':
main()
|
edx/edx-platform | common/lib/xmodule/xmodule/util/xmodule_django.py | Python | agpl-3.0 | 1,184 | 0.005068 | """
Exposes Django utilities for consumption in the xmodule library
NOTE: This file should only be im | ported into 'django-safe' code, i.e. known that this code runs int the Django
runtime environment with the djangoapps in common configured to load
"""
import webpack_loader
# NOTE: we are importing this method so that any module that imports us has access to get_current_request
from crum import get_current_request
def get_current_request_hostname():
| """
This method will return the hostname that was used in the current Django request
"""
hostname = None
request = get_current_request()
if request:
hostname = request.META.get('HTTP_HOST')
return hostname
def add_webpack_to_fragment(fragment, bundle_name, extension=None, config='DEFAULT'):
"""
Add all webpack chunks to the supplied fragment as the appropriate resource type.
"""
for chunk in webpack_loader.utils.get_files(bundle_name, extension, config):
if chunk['name'].endswith(('.js', '.js.gz')):
fragment.add_javascript_url(chunk['url'])
elif chunk['name'].endswith(('.css', '.css.gz')):
fragment.add_css_url(chunk['url'])
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.