code stringlengths 3 1.05M | repo_name stringlengths 5 104 | path stringlengths 4 251 | language stringclasses 1 value | license stringclasses 15 values | size int64 3 1.05M |
|---|---|---|---|---|---|
'''
XBMCVFS override for Dharma.
Version: 1.0
'''
import os, sys, time, errno
def exists(target):
return os.path.exists(target)
def rename(origin, target):
return os.rename(origin, target)
def delete(target):
if os.path.isfile(target) and not os.path.isdir(target):
return os.unlink(target)
return False
| SMALLplayer/smallplayer-image-creator | storage/.xbmc/addons/script.module.simple.downloader/lib/xbmcvfsdummy.py | Python | gpl-2.0 | 353 |
"""
stubo
~~~~~
Stub-O-Matic - Enable automated testing by mastering system dependencies.
Use when reality is simply not good enough.
:copyright: (c) 2015 by OpenCredo.
:license: GPLv3, see LICENSE for more details.
"""
import os
import sys
version = "0.8.18"
version_info = tuple(version.split('.'))
def stubo_path():
# Find folder that this module is contained in
module = sys.modules[__name__]
return os.path.dirname(os.path.abspath(module.__file__))
def static_path(*args):
return os.path.join(stubo_path(), 'static', *args)
| Stub-O-Matic-BA/stubo-app | stubo/__init__.py | Python | gpl-3.0 | 586 |
from .QuoteAdapter import QuoteAdapter
from .GoogleFinanceQuoteAdapter import GoogleFinanceQuoteAdapter
| philipodonnell/paperbroker | paperbroker/adapters/quotes/__init__.py | Python | mit | 104 |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
# Copyright (C) 2014-2016 goavki contributors <https://github.com/goavki/streamparser>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Usage: streamparser.py [FILE]
Consumes input from a file (first argument) or stdin, parsing and pretty printing the readings of lexical units found.
"""
import re, pprint, sys, itertools, fileinput
from enum import Enum
from collections import namedtuple
Knownness = Enum('Knownness', 'known unknown biunknown genunknown')
try:
Knownness.__doc__ = """Level of knowledge associated with a lexical unit.
Values:
known
unknown: Denoted by '*', analysis not available.
biunknown: Denoted by '@', translation not available.
genunknown: Denoted by '#', generated form not available.
"""
except AttributeError:
# Python 3.2 users have to read the source
pass
SReading = namedtuple('SReading', ['baseform', 'tags'])
try:
SReading.__doc__ = """A single subreading of an analysis of a token.
Fields:
baseform (str): The base form (lemma, lexical form, citation form) of the reading.
tags (list of str): The morphological tags associated with the reading.
"""
except AttributeError:
# Python 3.2 users have to read the source
pass
def subreadingToString(sub):
return sub.baseform+"".join("<"+t+">" for t in sub.tags)
def readingToString(reading):
return "+".join(subreadingToString(sub) for sub in reading)
def mainpos(reading, ltr=False):
"""Return the first part-of-speech tag of a reading. If there are
several subreadings, by default give the first tag of the last
subreading. If ltr=True, give the first tag of the first
subreading, see
http://beta.visl.sdu.dk/cg3/single/#sub-stream-apertium for more
information.
"""
if ltr:
return reading[0].tags[0]
else:
return reading[-1].tags[0]
class LexicalUnit:
"""A lexical unit consisting of a lemma and its readings.
Attributes:
lexicalUnit (str): The lexical unit in Apertium stream format.
wordform (str): The word form (surface form) of the lexical unit.
readings (list of list of SReading): The analyses of the lexical unit with sublists containing all subreadings.
knownness (Knownness): The level of knowledge of the lexical unit.
"""
knownness = Knownness.known
def __init__(self, lexicalUnit):
self.lexicalUnit = lexicalUnit
cohort = re.split(r'(?<!\\)/', lexicalUnit)
self.wordform = cohort[0]
readings = cohort[1:]
self.readings = []
for reading in readings:
reading = reading
if readings[0][0] not in '*#@':
subreadings = []
subreadingParts = re.findall(r'([^<]+)((?:<[^>]+>)+)', reading)
for subreading in subreadingParts:
baseform = subreading[0].lstrip('+')
tags = re.findall(r'<([^>]+)>', subreading[1])
subreadings.append(SReading(baseform=baseform, tags=tags))
self.readings.append(subreadings)
else:
self.knownness = {'*': Knownness.unknown, '@': Knownness.biunknown, '#': Knownness.genunknown}[readings[0][0]]
def __repr__(self):
return self.lexicalUnit
def parse(stream, withText=False):
"""Generates lexical units from a character stream.
Args:
stream (iterable): A character stream containing lexical units, superblanks and other text.
withText (bool, optional): A boolean defining whether to output preceding text with each lexical unit.
Yields:
LexicalUnit: The next lexical unit found in the character stream. (if withText is False)
(str, LexicalUnit): The next lexical unit found in the character stream and the the text that seperated it from the prior unit in a tuple. (if withText is True)
"""
buffer = ''
textBuffer = ''
inLexicalUnit = False
inSuperblank = False
for char in stream:
if inSuperblank:
if char == ']':
inSuperblank = False
textBuffer += char
elif char == '\\':
textBuffer += char
textBuffer += next(stream)
else:
textBuffer += char
elif inLexicalUnit:
if char == '$':
if withText:
yield (textBuffer, LexicalUnit(buffer))
else:
yield LexicalUnit(buffer)
buffer = ''
textBuffer = ''
inLexicalUnit = False
elif char == '\\':
buffer += char
buffer += next(stream)
else:
buffer += char
else:
if char == '[':
inSuperblank = True
textBuffer += char
elif char == '^':
inLexicalUnit = True
elif char == '\\':
textBuffer += char
textBuffer += next(stream)
else:
textBuffer += char
def parse_file(f, withText=False):
"""Generates lexical units from a file.
Args:
f (file): A file containing lexical units, superblanks and other text.
Yields:
LexicalUnit: The next lexical unit found in the file.
"""
return parse(itertools.chain.from_iterable(f), withText)
if __name__ == '__main__':
lexicalUnits = parse_file(fileinput.input())
for lexicalUnit in lexicalUnits:
pprint.pprint(lexicalUnit.readings, width=120)
| eddieantonio/big-practice-repo | hug-api/apertium_streamparser.py | Python | agpl-3.0 | 6,231 |
# -*- coding: utf-8 -*-
# Copyright (c) 2015, Frappe Technologies and contributors
# For license information, please see license.txt
import frappe
import json
from frappe.desk.doctype.bulk_update.bulk_update import show_progress
from frappe.model.document import Document
from frappe import _
class DeletedDocument(Document):
pass
@frappe.whitelist()
def restore(name, alert=True):
deleted = frappe.get_doc('Deleted Document', name)
if deleted.restored:
frappe.throw(_("Document {0} Already Restored").format(name), exc=frappe.DocumentAlreadyRestored)
doc = frappe.get_doc(json.loads(deleted.data))
try:
doc.insert()
except frappe.DocstatusTransitionError:
frappe.msgprint(_("Cancelled Document restored as Draft"))
doc.docstatus = 0
doc.insert()
doc.add_comment('Edit', _('restored {0} as {1}').format(deleted.deleted_name, doc.name))
deleted.new_name = doc.name
deleted.restored = 1
deleted.db_update()
if alert:
frappe.msgprint(_('Document Restored'))
@frappe.whitelist()
def bulk_restore(docnames):
docnames = frappe.parse_json(docnames)
message = _('Restoring Deleted Document')
restored, invalid, failed = [], [], []
for i, d in enumerate(docnames):
try:
show_progress(docnames, message, i + 1, d)
restore(d, alert=False)
frappe.db.commit()
restored.append(d)
except frappe.DocumentAlreadyRestored:
frappe.message_log.pop()
invalid.append(d)
except Exception:
frappe.message_log.pop()
failed.append(d)
frappe.db.rollback()
return {
"restored": restored,
"invalid": invalid,
"failed": failed
}
| mhbu50/frappe | frappe/core/doctype/deleted_document/deleted_document.py | Python | mit | 1,584 |
'''
Draw a star
'''
from turtle import *
color('red', 'yellow')
begin_fill()
while True:
forward(200)
left(170)
if abs(pos()) < 1:
break
end_fill()
done()
| samuelzq/Learn-Python-with-kids | part2/draw_star.py | Python | apache-2.0 | 176 |
"""Test the TcEx Threat Intel Module."""
# standard library
import os
from datetime import datetime, timedelta
from .ti_helpers import TestThreatIntelligence, TIHelper
class TestCampaignGroups(TestThreatIntelligence):
"""Test TcEx Campaign Groups."""
group_type = 'Campaign'
owner = os.getenv('TC_OWNER')
ti = None
ti_helper = None
tcex = None
def setup_method(self):
"""Configure setup before all tests."""
self.ti_helper = TIHelper(self.group_type)
self.ti = self.ti_helper.ti
self.tcex = self.ti_helper.tcex
def tests_ti_campaign_create(self):
"""Create a group using specific interface."""
group_data = {
'first_seen': datetime.now().isoformat(),
'name': self.ti_helper.rand_name(),
'owner': self.owner,
}
ti = self.ti.campaign(**group_data)
r = ti.create()
# assert response
assert r.status_code == 201
# retrieve group for asserts
group_data['unique_id'] = ti.unique_id
ti = self.ti.campaign(**group_data)
r = ti.single()
response_data = r.json()
ti_data = response_data.get('data', {}).get(ti.api_entity)
# validate response data
assert r.status_code == 200
assert response_data.get('status') == 'Success'
# validate ti data
assert ti_data.get(ti.api_entity) == group_data.get(ti.api_entity)
# cleanup group
r = ti.delete()
assert r.status_code == 200
def tests_ti_campaign_add_attribute(self, request):
"""Test group add attribute."""
super().group_add_attribute(request)
def tests_ti_campaign_add_label(self):
"""Test group add label."""
super().group_add_label()
def tests_ti_campaign_add_tag(self, request):
"""Test group add tag."""
super().group_add_tag(request)
def tests_ti_campaign_delete(self):
"""Test group delete."""
super().group_delete()
def tests_ti_campaign_get(self):
"""Test group get with generic group method."""
super().group_get()
def tests_ti_campaign_get_filter(self):
"""Test group get with filter."""
super().group_get_filter()
def tests_ti_campaign_get_includes(self, request):
"""Test group get with includes."""
super().group_get_includes(request)
def tests_ti_campaign_get_attribute(self, request):
"""Test group get attribute."""
super().group_get_attribute(request)
def tests_ti_campaign_get_label(self):
"""Test group get label."""
super().group_get_label()
def tests_ti_campaign_get_tag(self, request):
"""Test group get tag."""
super().group_get_tag(request)
def tests_ti_campaign_update(self, request):
"""Test updating group metadata."""
super().group_update(request)
#
# Custom test cases
#
def tests_ti_campaign_first_seen(self):
"""Update first seen value."""
helper_ti = self.ti_helper.create_group()
# update first seen (coverage)
first_seen = (datetime.now() - timedelta(days=2)).isoformat()
r = helper_ti.first_seen(first_seen)
assert r.status_code == 200
def tests_ti_campaign_first_seen_no_update(self):
"""Test update on group with no id."""
group_data = {
'first_seen': datetime.now().isoformat(),
'name': self.ti_helper.rand_name(),
'owner': self.owner,
}
ti = self.ti.campaign(**group_data)
# update first seen (coverage)
try:
first_seen = (datetime.now() - timedelta(days=2)).isoformat()
ti.first_seen(first_seen)
assert False, 'failed to catch group method call with no id.'
except RuntimeError:
assert True, 'caught group method call with no id'
| ThreatConnect-Inc/tcex | tests/api/tc/v2/threat_intelligence/test_campaign_interface.py | Python | apache-2.0 | 3,930 |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Provides utilities to Cifar-10 dataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from absl import logging
import tensorflow as tf
from official.vision.image_classification.resnet import imagenet_preprocessing
HEIGHT = 32
WIDTH = 32
NUM_CHANNELS = 3
_DEFAULT_IMAGE_BYTES = HEIGHT * WIDTH * NUM_CHANNELS
# The record is the image plus a one-byte label
_RECORD_BYTES = _DEFAULT_IMAGE_BYTES + 1
# TODO(tobyboyd): Change to best practice 45K(train)/5K(val)/10K(test) splits.
NUM_IMAGES = {
'train': 50000,
'validation': 10000,
}
_NUM_DATA_FILES = 5
NUM_CLASSES = 10
def parse_record(raw_record, is_training, dtype):
"""Parses a record containing a training example of an image.
The input record is parsed into a label and image, and the image is passed
through preprocessing steps (cropping, flipping, and so on).
This method converts the label to one hot to fit the loss function.
Args:
raw_record: scalar Tensor tf.string containing a serialized
Example protocol buffer.
is_training: A boolean denoting whether the input is for training.
dtype: Data type to use for input images.
Returns:
Tuple with processed image tensor and one-hot-encoded label tensor.
"""
# Convert bytes to a vector of uint8 that is record_bytes long.
record_vector = tf.io.decode_raw(raw_record, tf.uint8)
# The first byte represents the label, which we convert from uint8 to int32
# and then to one-hot.
label = tf.cast(record_vector[0], tf.int32)
# The remaining bytes after the label represent the image, which we reshape
# from [depth * height * width] to [depth, height, width].
depth_major = tf.reshape(record_vector[1:_RECORD_BYTES],
[NUM_CHANNELS, HEIGHT, WIDTH])
# Convert from [depth, height, width] to [height, width, depth], and cast as
# float32.
image = tf.cast(tf.transpose(a=depth_major, perm=[1, 2, 0]), tf.float32)
image = preprocess_image(image, is_training)
image = tf.cast(image, dtype)
return image, label
def preprocess_image(image, is_training):
"""Preprocess a single image of layout [height, width, depth]."""
if is_training:
# Resize the image to add four extra pixels on each side.
image = tf.image.resize_with_crop_or_pad(
image, HEIGHT + 8, WIDTH + 8)
# Randomly crop a [HEIGHT, WIDTH] section of the image.
image = tf.image.random_crop(image, [HEIGHT, WIDTH, NUM_CHANNELS])
# Randomly flip the image horizontally.
image = tf.image.random_flip_left_right(image)
# Subtract off the mean and divide by the variance of the pixels.
image = tf.image.per_image_standardization(image)
return image
def get_filenames(is_training, data_dir):
"""Returns a list of filenames."""
assert tf.io.gfile.exists(data_dir), (
'Run cifar10_download_and_extract.py first to download and extract the '
'CIFAR-10 data.')
if is_training:
return [
os.path.join(data_dir, 'data_batch_%d.bin' % i)
for i in range(1, _NUM_DATA_FILES + 1)
]
else:
return [os.path.join(data_dir, 'test_batch.bin')]
def input_fn(is_training,
data_dir,
batch_size,
dtype=tf.float32,
datasets_num_private_threads=None,
parse_record_fn=parse_record,
input_context=None,
drop_remainder=False):
"""Input function which provides batches for train or eval.
Args:
is_training: A boolean denoting whether the input is for training.
data_dir: The directory containing the input data.
batch_size: The number of samples per batch.
dtype: Data type to use for images/features
datasets_num_private_threads: Number of private threads for tf.data.
parse_record_fn: Function to use for parsing the records.
input_context: A `tf.distribute.InputContext` object passed in by
`tf.distribute.Strategy`.
drop_remainder: A boolean indicates whether to drop the remainder of the
batches. If True, the batch dimension will be static.
Returns:
A dataset that can be used for iteration.
"""
filenames = get_filenames(is_training, data_dir)
dataset = tf.data.FixedLengthRecordDataset(filenames, _RECORD_BYTES)
if input_context:
logging.info(
'Sharding the dataset: input_pipeline_id=%d num_input_pipelines=%d',
input_context.input_pipeline_id, input_context.num_input_pipelines)
dataset = dataset.shard(input_context.num_input_pipelines,
input_context.input_pipeline_id)
return imagenet_preprocessing.process_record_dataset(
dataset=dataset,
is_training=is_training,
batch_size=batch_size,
shuffle_buffer=NUM_IMAGES['train'],
parse_record_fn=parse_record_fn,
dtype=dtype,
datasets_num_private_threads=datasets_num_private_threads,
drop_remainder=drop_remainder
)
| tombstone/models | official/benchmark/models/cifar_preprocessing.py | Python | apache-2.0 | 5,637 |
import unittest
import unittest.mock
import tornado.gen
import tornado.testing
import robot.tests
import robot.lib.shell
class BaseShellTest(robot.tests.TestCase):
@unittest.mock.patch("tornado.process.Subprocess")
@tornado.testing.gen_test
def test_run(self, Subprocess):
shell = robot.lib.shell.BaseShell()
proc = Subprocess.return_value = unittest.mock.MagicMock()
proc.proc.wait.return_value = 0
proc.stdout.read_until_close.side_effect = tornado.gen.coroutine(
lambda streaming_callback: streaming_callback(b"hello\n"))
status, output = yield shell.run("cat", stdin="hello")
self.assertEqual(status, 0)
self.assertEqual(output, "hello\n")
proc.stdin.write_to_fd.assert_called_once_with(b"hello")
proc.stdin.close_fd.assert_called_once()
Subprocess.assert_called_once_with(
"cat", shell=True,
stdin=Subprocess.STREAM, stdout=Subprocess.STREAM)
@unittest.mock.patch("tornado.process.Subprocess")
@tornado.testing.gen_test
def test_run_streaming_callback(self, Subprocess):
@tornado.gen.coroutine
def streaming_callback(chunk):
nonlocal data
data += chunk
shell = robot.lib.shell.BaseShell()
data = ""
proc = Subprocess.return_value = unittest.mock.MagicMock()
proc.stdout.read_until_close.side_effect = tornado.gen.coroutine(
lambda streaming_callback: streaming_callback(b"hello\n"))
status, output = yield shell.run(
"echo hello", streaming_callback=streaming_callback)
self.assertEqual(data, "hello\n")
class ShellTest(robot.tests.TestCase):
@unittest.mock.patch("robot.lib.shell.BaseShell.run")
@tornado.testing.gen_test
def test_run(self, run):
shell = robot.lib.shell.Shell()
run.side_effect = tornado.gen.coroutine(lambda *a, **kw: (0, ""))
status, output = yield shell.run("exit")
self.assertEqual((status, output), (0, ""))
run.assert_called_once_with(
"/usr/bin/env -i bash - 2>&1", stdin="exit")
run.reset_mock()
shell = robot.lib.shell.Shell(sudo=True)
status, output = yield shell.run("exit")
self.assertEqual((status, output), (0, ""))
run.assert_called_once_with(
"sudo /usr/bin/env -i bash - 2>&1", stdin="exit")
@unittest.mock.patch("robot.lib.shell.BaseShell.run")
@tornado.testing.gen_test
def test_run_ssh(self, run):
shell = robot.lib.shell.Shell(host="example.com", user="root")
run.side_effect = tornado.gen.coroutine(lambda *a, **kw: (0, ""))
status, output = yield shell.run("exit")
self.assertEqual((status, output), (0, ""))
run.assert_called_once_with(
"ssh root@example.com -- "
"/usr/bin/env -i bash - 2>&1", stdin="exit")
run.reset_mock()
shell = robot.lib.shell.Shell(host="example.com")
status, output = yield shell.run("exit")
self.assertEqual((status, output), (0, ""))
run.assert_called_once_with(
"ssh example.com -- "
"/usr/bin/env -i bash - 2>&1", stdin="exit")
@unittest.mock.patch("robot.lib.shell.BaseShell.run")
@tornado.testing.gen_test
def test_run_container(self, run):
shell = robot.lib.shell.Shell(container="base")
run.side_effect = tornado.gen.coroutine(lambda *a, **kw: (0, ""))
status, output = yield shell.run("exit")
self.assertEqual((status, output), (0, ""))
run.assert_called_once_with(
"lxc-start-ephemeral -o base -- "
"/usr/bin/env -i bash - 2>&1", stdin="exit")
run.reset_mock()
shell = robot.lib.shell.Shell(host="example.com", container="base")
status, output = yield shell.run("exit")
self.assertEqual((status, output), (0, ""))
run.assert_called_once_with(
"ssh example.com -- "
"lxc-start-ephemeral -o base -- "
"/usr/bin/env -i bash - 2>&1", stdin="exit")
| robot-ci/robot-ci | robot/tests/lib/test_shell.py | Python | gpl-3.0 | 4,095 |
from .base import *
DEBUG = True
# Email
# https://docs.djangoproject.com/en/1.8/topics/email/
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
SERVER_EMAIL = 'contact@fc-bolzplatz.eu'
DEFAULT_FROM_EMAIL = 'no-reply@fc-bolzplatz.eu'
EMAIL_SUBJECT_PREFIX = '[Bolzplatz] '
MANAGERS = (
('Us', 'ourselves@fc-bolzplatz.eu'),
)
DATABASES = {
"default": {
"ENGINE": "django.db.backends.postgresql_psycopg2",
"NAME": get_env_variable('BOLZPLATZ_DB_NAME'),
"USER": get_env_variable('BOLZPLATZ_DB_USER'),
"PASSWORD": get_env_variable('BOLZPLATZ_DB_PASSWORD'),
"HOST": "localhost",
"PORT": "",
}
}
INSTALLED_APPS += ("debug_toolbar",) | evonaut/bolzplatz | bolzplatz/config/settings/local.py | Python | gpl-2.0 | 704 |
"basic 2D vector geometry"
from math import acos, sqrt, sin, cos, pi
class Vec2D(object):
" Simple 2D vector class for euclidean geometry "
EPSILON = 0.0001
def __init__(self, x=0.0, y=0.0):
self.pos_x = x
self.pos_y = y
def dot(self, other):
"dot product"
return self.pos_x * other.pos_x + self.pos_y * other.pos_y
def cross(self, other):
"2d cross product"
return self.pos_x * other.pos_y - self.pos_y * other.pos_x
def length(self):
"length of vector"
return sqrt(self.dot(self))
def normalized(self):
"unit vector with same direction as self"
length = self.length()
return self * (1/length)
def rotate(self, angle, center=None):
"rotate self by angle radians around center"
if center is None:
center = Vec2D()
centered = self - center
cosine = cos(angle)
sine = sin(angle)
new_pos_x = cosine * centered.pos_x - sine * centered.pos_y
new_pos_y = sine * centered.pos_x + cosine * centered.pos_y
final = Vec2D(new_pos_x, new_pos_y) + center
return final
def oriented_angle(self, other):
"oriented angle from self to other"
vec1 = self.normalized()
vec2 = other.normalized()
cross_prod = vec1.cross(vec2) # sin(angle)
dot_prod = vec1.dot(vec2) # cos(angle)
if dot_prod < -1.0:
dot_prod = -1.0
if dot_prod > 1.0:
dot_prod = 1.0
if cross_prod > 0:
angle = acos(dot_prod)
else:
angle = -acos(dot_prod)
if angle < 0:
angle = angle + 2 * pi
return angle
def __neg__(self):
return Vec2D(-self.pos_x, -self.pos_y)
def __add__(self, other):
return Vec2D(self.pos_x + other.pos_x, self.pos_y + other.pos_y)
def __sub__(self, other):
return Vec2D(self.pos_x - other.pos_x, self.pos_y - other.pos_y)
def __mul__(self, other):
return Vec2D(self.pos_x * other, self.pos_y * other)
def __str__(self):
return "({x},{y})".format(x=self.pos_x, y=self.pos_y)
def is_equal(self, other):
return (self - other).length() < Vec2D.EPSILON
@staticmethod
def orientation(vec1, vec2, vec3):
"return positive number if the points are mathematically \
positively oriented negative number for negative orientation \
and zero for colinear points"
vec12 = vec2 - vec1
vec23 = vec3 - vec2
return vec12.cross(vec23)
| 31415us/linda-lidar-rangefinder-playground | linda/Vec2D.py | Python | mit | 2,599 |
#!/bin/python
#coding:utf-8
import roomai.games.common
class TexasHoldemStatePublic(roomai.games.common.AbstractStatePublic):
'''
The public state of TexasHoldem
'''
def __init__(self):
super(TexasHoldemStatePublic, self).__init__()
self.__stage__ = None
self.__public_cards__ = None
#state of players
self.__is_fold__ = None
self.__num_fold__ = None
self.__is_allin__ = None
self.__num_allin__ = None
self.__is_needed_to_action__ = None
self.__num_needed_to_action__ = None
self.__param_dealer_id__ = -1
self.__param_big_blind_bet__ = -1
self.__param_init_chips__ = None
#chips is array which contains the chips of all players
self.__chips__ = None
#bets is array which contains the bets from all players
self.__bets__ = None
#max_bet = max(self.bets)
self.__max_bet_sofar__ = None
#the raise acount
self.__raise_account__ = None
def __get_max_bet_sofar__(self): return self.__max_bet_sofar__
max_bet_sofar = property(__get_max_bet_sofar__, doc="The max bet used by one player so far")
def __get_raise_account__(self): return self.__raise_account__
raise_account = property(__get_raise_account__, doc="The raise account. If a player want to raise, the price must be max_bet_sofar + raise_account * N. The raise account will increases as the game goes forward")
def __get_chips__(self):
if self.__chips__ is None:
return None
else:
return tuple(self.__chips__)
chips = property(__get_chips__, doc = "chips is an array of the chips of all players. For example, chips=[50,50,50]")
def __get_bets__(self):
if self.__bets__ is None:
return None
else:
return tuple(self.__bets__)
bets = property(__get_bets__, doc = "bets is an array which contains the bets from all players. For example, bets=[50,25,25]")
def __get_is_fold__(self):
if self.__is_fold__ is None: return None
else: return tuple(self.__is_fold__)
is_fold = property(__get_is_fold__, doc="is_fold is an array of which player has take the fold action. For example, is_fold = [true,true,false] denotes the player0 and player1 have taken the fold action")
def __get_num_fold__(self):
return self.__num_fold__
num_fold = property(__get_num_fold__, doc = "The number of players who has taken the fold action")
def __get_is_allin__(self):
if self.__is_allin__ is None: return None
else: return tuple(self.__is_allin__)
is_allin = property(__get_is_allin__, doc="is_allin is an array of which player has take the allin action. For example, is_allin = [true,true,false] denotes the player0 and player1 have taken the allin action")
def __get_num_allin__(self):
return self.__num_allin__
num_allin = property(__get_num_allin__, doc = "The number of players who has taken the allin action")
def __get_is_needed_to_action__(self):
if self.__is_needed_to_action__ is None: return None
else: return tuple(self.__is_needed_to_action__)
is_needed_to_action = property(__get_is_needed_to_action__, doc="is_needed_to_action is an array of which player has take the needed_to_action action. For example, is_needed_to_action = [true,true,false] denotes the player0 and player1 are need to take action")
def __get_num_needed_to_action__(self):
return self.__num_needed_to_action__
num_needed_to_action = property(__get_num_needed_to_action__, doc = "The number of players who has taken the needed_to_action action")
def __get_public_cards__(self):
if self.__public_cards__ is None:
return None
else:
return tuple(self.__public_cards__)
public_cards = property(__get_public_cards__, doc="The public cards of this game. For example, public_cards = [roomai.common.PokerCards.lookup(\"A_Spade\"), roomai.common.PokerCards.lookup(\"A_Heart\")]")
def __get_stage__(self):
return self.__stage__
stage = property(__get_stage__, doc="The stage of the TexasHoldem game. The stage must be one of 1,2,3 or 4.")
######################### initialization param ##################
__param_dealer_id__ = 0
def __get_param_dealer_id__(self): return self.__param_dealer_id__
param_dealer_id = property(__get_param_dealer_id__, doc="The player id of the dealer. The next player after the dealer is the small blind. The next player after the small blind is the big blind.For example, param_dealer_id = 2")
__param_init_chips__ = None
def __get_param_init_chips__(self): return self.__param_init_chips__
param_init_chips = property(__get_param_init_chips__, doc="The initialization chips of this game. For example, param_initialization_chips = [10,5,6]")
__param_big_blind_bet__ = 10
def __get_param_big_blind_bet__(self): return self.__param_big_blind_bet__
param_big_blind_bet = property(__get_param_big_blind_bet__, doc="The big blind bet")
def __deepcopy__(self, memodict={}, newinstance = None):
if newinstance is None:
newinstance = TexasHoldemStatePublic()
newinstance = super(TexasHoldemStatePublic, self).__deepcopy__(newinstance=newinstance)
newinstance.__param_dealer_id__ = self.param_dealer_id
newinstance.__param_big_blind_bet__ = self.param_big_blind_bet
newinstance.__param_init_chips__ = self.__param_init_chips__
newinstance.__stage__ = self.stage
if self.public_cards is None:
newinstance.__public_cards__ = None
else:
newinstance.__public_cards__ = [self.public_cards[i].__deepcopy__() for i in range(len(self.public_cards))]
######## quit, allin , needed_to_action
newinstance.__num_fold__ = self.__num_fold__
if self.is_fold is None:
newinstance.__is_fold__ = None
else:
newinstance.__is_fold__ = [self.is_fold[i] for i in range(len(self.is_fold))]
newinstance.__num_allin__ = self.__num_allin__
if self.is_allin is None:
newinstance.__is_allin__ = None
else:
newinstance.__is_allin__ = [self.is_allin[i] for i in range(len(self.is_allin))]
newinstance.__num_needed_to_action__ = self.__num_needed_to_action__
if self.is_needed_to_action is None:
newinstance.__is_needed_to_action__ = None
else:
newinstance.__is_needed_to_action__ = [self.is_needed_to_action[i] for i in
range(len(self.is_needed_to_action))]
# chips is array which contains the chips of all players
if self.chips is None:
newinstance.__chips__ = None
else:
newinstance.__chips__ = [self.chips[i] for i in range(len(self.chips))]
# bets is array which contains the bets from all players
if self.bets is None:
newinstance.__bets__ = None
else:
newinstance.__bets__ = [self.bets[i] for i in range(len(self.bets))]
newinstance.__max_bet_sofar__ = self.max_bet_sofar
newinstance.__raise_account__ = self.raise_account
newinstance.__turn__ = self.turn
### isterminal, scores
newinstance.__is_terminal__ = self.is_terminal
if self.scores is None:
newinstance.__scores__ = None
else:
newinstance.__scores__ = [self.scores[i] for i in range(len(self.scores))]
return newinstance | roomai/RoomAI | roomai/games/texasholdem/TexasHoldemStatePublic.py | Python | mit | 8,028 |
# --------------------------------------------------------------------------------------
# Copyright 2016, Benedikt J. Daurer, Filipe R.N.C. Maia, Max F. Hantke, Carl Nettelblad
# Hummingbird is distributed under the terms of the Simplified BSD License.
# -------------------------------------------------------------------------
"""Dialog to change the line plot settings"""
from interface.Qt import QtGui, QtCore
from interface.ui import Ui_linePlotSettings
import numpy
class LinePlotSettings(QtGui.QDialog, Ui_linePlotSettings):
"""Dialog to change the line plot settings"""
def __init__(self, parent=None):
QtGui.QDialog.__init__(self, parent)
self.setupUi(self)
self.bg = None
self.bg_filename = None
self.bg_filename_loaded = None
self.bg_angle_loaded = None
self.bg_file.released.connect(self._on_bg_file)
def get_state(self, _settings = None):
settings = _settings or {}
settings["xmin"] = self.xmin.text()
settings["xmax"] = self.xmax.text()
settings["ymin"] = self.ymin.text()
settings["ymax"] = self.ymax.text()
settings["xlimits_auto"] = self.xlimits_auto.isChecked()
settings["ylimits_auto"] = self.ylimits_auto.isChecked()
settings["bg_filename"] = self.bg_filename
settings["bg_xmin"] = self.bg_xmin.text()
settings["bg_xmax"] = self.bg_xmax.text()
settings["bg_ymin"] = self.bg_ymin.text()
settings["bg_ymax"] = self.bg_ymax.text()
settings["bg_angle"] = self.bg_angle.text()
settings["histogram"] = self.histogram.isChecked()
settings["histAutorange"] = self.histAutorange.isChecked()
settings["histBins"] = self.histBins.text()
settings["histMin"] = self.histMin.text()
settings["histMax"] = self.histMax.text()
settings["histMode"] = self.histMode.currentText()
#settings["x_label"] = self.x_label.text()
#settings["y_label"] = self.y_label.text()
#settings["x_auto"] = self.x_auto.isChecked()
#settings["y_auto"] = self.y_auto.isChecked()
settings["logx"] = self.logx.isChecked()
settings["logy"] = self.logy.isChecked()
settings["showTrendScalar"] = self.showTrendScalar.isChecked()
settings["windowLength"] = self.windowLength.text()
settings["showTrendVector"] = self.showTrendVector.isChecked()
settings["showMainLine"] = self.showMainLine.isChecked()
settings["trendVector_min"] = self.trendVector_min.isChecked()
settings["trendVector_max"] = self.trendVector_max.isChecked()
settings["trendVector_std"] = self.trendVector_std.isChecked()
settings["trendVector_mean"] = self.trendVector_mean.isChecked()
settings["trendVector_median"] = self.trendVector_median.isChecked()
settings["aspect_locked"] = self.aspect_locked.isChecked()
settings["flip_x"] = self.flip_x.isChecked()
settings["flip_y"] = self.flip_y.isChecked()
return settings
def restore_from_state(self, settings):
if "xmin" not in settings:
return
self.xmin.setText(settings["xmin"])
self.xmax.setText(settings["xmax"])
self.ymin.setText(settings["ymin"])
self.ymax.setText(settings["ymax"])
self.xlimits_auto.setChecked(settings["xlimits_auto"])
self.ylimits_auto.setChecked(settings["ylimits_auto"])
self.bg_filename = settings["bg_filename"]
self.bg_xmin.setText(settings["bg_xmin"])
self.bg_xmax.setText(settings["bg_xmax"])
self.bg_ymin.setText(settings["bg_ymin"])
self.bg_ymax.setText(settings["bg_ymax"])
self.bg_angle.setText(settings["bg_angle"])
if settings["bg_filename"] is not None:
self._read_bg_file()
self.histogram.setChecked(settings["histogram"])
self.histAutorange.setChecked(settings["histAutorange"])
self.histBins.setText(settings["histBins"])
self.histMin.setText(settings["histMin"])
self.histMax.setText(settings["histMax"])
self.histMode.setCurrentIndex(self.histMode.findText(settings["histMode"]))
#self.x_label.setText(settings["x_label"])
#self.y_label.setText(settings["y_label"])
#self.x_auto.setChecked(settings["x_auto"])
#self.y_auto.setChecked(settings["y_auto"])
self.logx.setChecked(settings["logx"])
self.logy.setChecked(settings["logy"])
self.showTrendScalar.setChecked(settings["showTrendScalar"])
self.windowLength.setText(settings["windowLength"])
self.showTrendVector.setChecked(settings["showTrendVector"])
self.showMainLine.setChecked(settings["showMainLine"])
self.trendVector_min.setChecked(settings["trendVector_min"])
self.trendVector_max.setChecked(settings["trendVector_max"])
self.trendVector_std.setChecked(settings["trendVector_std"])
self.trendVector_mean.setChecked(settings["trendVector_mean"])
self.trendVector_median.setChecked(settings["trendVector_median"])
self.aspect_locked.setChecked(settings["aspect_locked"])
self.flip_x.setChecked(settings["flip_x"])
self.flip_y.setChecked(settings["flip_y"])
def _configure_limits(self, xmin=0., xmax=1., ymin=0., ymax=1., xlimits_auto=True, ylimits_auto=True):
self.xmin.setText("%e" % xmin)
self.xmax.setText("%e" % xmax)
self.xlimits_auto.setChecked(xlimits_auto)
self.ymin.setText("%e" % ymin)
self.ymax.setText("%e" % ymax)
self.ylimits_auto.setChecked(ylimits_auto)
def _configure_bg(self, bg_xmin=0., bg_xmax=1., bg_ymin=0., bg_ymax=1., bg_angle=0., bg_filename=None):
self.bg_xmin.setText("%e" % bg_xmin)
self.bg_xmax.setText("%e" % bg_xmax)
self.bg_ymin.setText("%e" % bg_ymin)
self.bg_ymax.setText("%e" % bg_ymax)
self.bg_angle.setText("%f" % bg_angle)
self.bg_filename = bg_filename
self.bg = None
if bg_filename is not None:
self._read_bg_file()
def _on_bg_file(self):
fname = QtGui.QFileDialog.getOpenFileName(self, "Load Background Image", filter="NPY Files (*.npy)",
options=QtGui.QFileDialog.DontUseNativeDialog)
if(fname):
self.bg_filename = fname
self._read_bg_file()
def _read_bg_file(self):
if self.bg_filename is None:
return
if self.bg_filename_loaded == self.bg_filename and self.bg_angle_loaded == self.bg_angle.text():
return
print("Reading background image from file (%s) ..." % self.bg_filename)
self.bg = numpy.load(self.bg_filename)
self.bg = numpy.array(self.bg, dtype=numpy.float64)
print("... done")
a = float(self.bg_angle.text())
if a != 0.:
# Interpolate image with on roateted grid
print("Interpolating rotated background image ...")
from scipy.interpolate import griddata
X,Y = numpy.meshgrid(numpy.arange(self.bg.shape[1]), numpy.arange(self.bg.shape[0]))
X = X - (X.shape[1]-1)/2.
Y = Y - (Y.shape[0]-1)/2.
points = numpy.asarray([[xi,yi] for xi,yi in zip(X.flat,Y.flat)])
X2 = X*numpy.cos(a) - Y*numpy.sin(a)
Y2 = X*numpy.sin(a) + Y*numpy.cos(a)
self.bg = griddata(points, self.bg.flat, (X2, Y2), method='nearest')
print("... done")
self.bg_filename_loaded = self.bg_filename
self.bg_angle_loaded = self.bg_angle.text()
| SPIhub/hummingbird | src/interface/ui/line_plot_settings.py | Python | bsd-2-clause | 7,721 |
from IPython import get_ipython
from prompt_toolkit.enums import DEFAULT_BUFFER
from prompt_toolkit.filters import HasFocus, ViInsertMode
from prompt_toolkit.key_binding.vi_state import InputMode
ip = get_ipython()
def switch_to_navigation_mode(event):
vi_state = event.cli.vi_state
vi_state.input_mode = InputMode.NAVIGATION
if getattr(ip, 'pt_app', None):
registry = ip.pt_app.key_bindings
registry.add_binding(u'j',u'k',
filter=(HasFocus(DEFAULT_BUFFER)
& ViInsertMode()))(switch_to_navigation_mode)
| mphe/dotfiles | ipython/startup/keybindings.py | Python | mit | 583 |
import json
import discord
def pp_json(json_thing, sort=True, indents=4):
with open('keys.json', 'w') as outfile:
if type(json_thing) is str:
json.dump(json.loads(json_thing), outfile, sort_keys=sort, indent=indents)
else:
json.dump(json_thing, outfile, sort_keys=sort, indent=indents)
async def addKey(client, message, name, lvl):
keystone = name + " --- " + lvl
with open('keys.json') as x:
usrData = json.load(x)
if str(message.server.id) not in usrData:
usrData[message.server.id] = {}
if str(message.author.id) not in usrData[message.server.id]:
usrData[message.server.id][str(message.author.id)] = {}
usrData[message.server.id][str(message.author.id)]["name"] = str(message.author)
usrData[message.server.id][str(message.author.id)]["keystoneName"] = name
usrData[message.server.id][str(message.author.id)]["keystoneLvl"] = lvl
pp_json(usrData)
await client.send_message(message.channel, "Keystone added/updated")
async def completedKey(client, message, lvl):
with open('keys.json') as x:
usrData = json.load(x)
if str(message.server.id) not in usrData:
usrData[message.server.id] = {}
if str(message.author.id) not in usrData[message.server.id]:
usrData[message.server.id][str(message.author.id)] = {}
usrData[message.server.id][str(message.author.id)]["keyCompleted"] = lvl
usrData[message.server.id][str(message.author.id)]["name"] = str(message.author)
pp_json(usrData)
await client.send_message(message.channel, "Completed added/updated")
async def keys(client, message):
try:
i = message.content.split(' ')
if i[0] == '!keyadd':
if len(i[1]) > 15 or int(i[2]) > 50:
await client.send_message(message.channel, "Invalid format. Try !key <name> <lvl>")
else:
await addKey(client, message, i[1], i[2])
if i[0] == '!keycompleted':
if int(i[1]) > 30:
await client.send_message(message.channel, "Please don't lie to me :'c")
else:
await completedKey(client, message, i[1])
except Exception as e:
print(e)
await client.send_message(message.channel, "Invalid format. Try !keyadd/!keycompleted <name> <lvl>")
| curiouspiano/BotSep | commands/keys.py | Python | mit | 2,355 |
# the python stuff
import sys
import math
import signal
from threading import Lock
# numerics
import numpy as np
# the interface stuff
from PyQt4 import QtCore, QtGui
import pyqtgraph as pg
# the messaging stuff
import lcm
from mithl import vectorXf_t
from lcm_utils import *
class DataPlotWidget(QtGui.QWidget):
''' Helps display data in a nice-looking way'''
def __init__(self, ncurves, title="Data plotter", histLength=100, dataRange=None, color=None, parent=None, alsoNumeric=False, avgWindow=0.0, levelLines=None, name=None, runInBackground=False):
super(DataPlotWidget, self).__init__(parent)
if (name):
self.setObjectName(name)
self.runInBackground = runInBackground
self.accessMutex = Lock()
self.curves = []
self.ncurves = ncurves
self.dataRange = dataRange
self.alsoNumeric = alsoNumeric
self.avgWindow = avgWindow
if (self.avgWindow > 0.0):
fulltitle = title + " , Low-Pass RC=" + ("%.2f"%avgWindow)
self.avgs = [0.0]*self.ncurves
self.lastGotPoint = [0.0]*self.ncurves
else:
fulltitle = title
self.title = title
self.plot = pg.PlotWidget(title=fulltitle)
self.plot.hideAxis("bottom")
if levelLines:
for line in levelLines:
line = pg.InfiniteLine(pos=line, angle=0)
self.plot.addItem(line)
for i in range(self.ncurves):
if not color:
self.curves.append(self.plot.plot([0], [0], pen=(i, self.ncurves)))
else:
self.curves.append(self.plot.plot([0], [0], pen=pg.mkPen(color)))
self.ptr = [0] * self.ncurves
self.histLength = histLength
self.good_ptr = [0] * self.ncurves
self.dataHist = []
self.timeDataHist = np.zeros((self.histLength, self.ncurves))
for i in range(self.ncurves):
self.dataHist.append(np.zeros(self.histLength))
vBoxLayout = QtGui.QVBoxLayout()
vBoxLayout.addWidget(self.plot)
if (self.alsoNumeric):
self.labels = []
self.labelText = ["???"]*self.ncurves
for i in range(self.ncurves):
self.labels.append(QtGui.QLabel("Val: ???"))
vBoxLayout.addWidget(self.labels[-1])
self.setLayout(vBoxLayout)
self.timer = QtCore.QTimer()
self.timer.timeout.connect(self.update)
self.timer.start(33)
def update(self):
self.accessMutex.acquire()
for i in range(self.ncurves):
self.curves[i].setData(self.timeDataHist[0:self.good_ptr[i], i], self.dataHist[i][0:self.good_ptr[i]])
if self.dataRange:
self.plot.setYRange(self.dataRange[0], self.dataRange[1])
if (self.alsoNumeric):
for i in range(self.ncurves):
self.labels[i].setText(self.labelText[i])
#self.plot.setXRange(np.min(self.timeDataHist[0:self.good_ptr], np.max(self.timeDataHist[0:self.good_ptr])))
#self.plot.setXRange(0, 1000)
self.accessMutex.release()
def autoRangeY(self):
minY = 0
maxY = 0
for i in range(self.ncurves):
minY = min(minY, np.min(self.dataHist[i][0:self.good_ptr[i]]))
maxY = max(maxY, np.max(self.dataHist[i][0:self.good_ptr[i]]))
self.plot.setYRange(minY, maxY)
def addDataPoint(self, i, t, x):
self.accessMutex.acquire()
self.timeDataHist[self.ptr, i] = t
# Do the running average
if (self.avgWindow > 0):
dt = t - self.lastGotPoint[i]
self.lastGotPoint[i] = t
alpha = dt / (dt + self.avgWindow)
self.avgs[i] = x * alpha + (1.0 - alpha) * self.avgs[i]
x = self.avgs[i]
if self.ptr[i] == self.histLength-1 and self.good_ptr[i] == self.histLength-1:
# todo: this is horrendously inefficient
self.dataHist[i] = np.roll(self.dataHist[i], -1)
self.dataHist[i][self.ptr[i]] = x
if self.ptr[i] == self.histLength-1 and self.good_ptr[i] == self.histLength-1:
self.timeDataHist[:, i] = np.roll(self.timeDataHist[:, i], -1)
self.good_ptr[i] = max(self.ptr[i], self.good_ptr[i])
self.ptr[i] = min(self.ptr[i]+1, self.histLength-1)
if (self.alsoNumeric):
# update label text
# (don't touch actual GUI state outside of main thread)
if (self.avgWindow > 0):
self.labelText[i] = self.title + " #" + str(i) + ": " + str(self.avgs[i])
else:
self.labelText[i] = self.title + " #" + str(i) + ": " + str(x)
self.accessMutex.release()
| MITHyperloopTeam/software_core | software/UI/data_plot_widget.py | Python | lgpl-3.0 | 4,783 |
# Copyright (C) 2010 Adam Olsen
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
#
# The developers of the Exaile media player hereby grant permission
# for non-GPL compatible GStreamer and Exaile plugins to be used and
# distributed together with GStreamer and Exaile. This permission is
# above and beyond the permissions granted by the GPL license by which
# Exaile is covered. If you modify this code, you may extend this
# exception to your version of the code, but you are not obligated to
# do so. If you do not wish to do so, delete this exception statement
# from your version.
from gi.repository import Gdk
from gi.repository import GdkPixbuf
from gi.repository import GLib
from gi.repository import GObject
from gi.repository import Gtk
from gi.repository import Pango
from itertools import izip
import logging
import sys
from xl.nls import gettext as _
from xl.playlist import (
Playlist,
is_valid_playlist,
import_playlist,
)
from xl import (
common,
event,
main,
player,
providers,
settings,
trax,
xdg
)
from xlgui.widgets.common import AutoScrollTreeView
from xlgui.widgets.notebook import NotebookPage
from xlgui.widgets import (
dialogs,
menu,
menuitems,
playlist_columns
)
from xlgui import (
guiutil,
icons
)
logger = logging.getLogger(__name__)
def default_get_playlist_func(parent, context):
return player.QUEUE.current_playlist
class ModesMenuItem(menu.MenuItem):
"""
A menu item having a submenu containing entries for shuffle modes.
Defaults to adjusting the currently-playing playlist.
"""
modetype = ''
display_name = ""
def __init__(self, name, after, get_playlist_func=default_get_playlist_func):
menu.MenuItem.__init__(self, name, None, after)
self.get_playlist_func = get_playlist_func
def factory(self, menu, parent, context):
item = Gtk.ImageMenuItem.new_with_mnemonic(self.display_name)
image = Gtk.Image.new_from_icon_name('media-playlist-'+self.modetype,
size=Gtk.IconSize.MENU)
item.set_image(image)
submenu = self.create_mode_submenu(item)
item.set_submenu(submenu)
pl = self.get_playlist_func(parent, context)
item.set_sensitive(pl != None)
return item
def create_mode_submenu(self, parent_item):
names = getattr(Playlist, "%s_modes"%self.modetype)
displays = getattr(Playlist, "%s_mode_names"%self.modetype)
items = []
previous = None
for name, display in zip(names, displays):
after = [previous] if previous else []
item = menu.radio_menu_item(name, after, display,
'%s_modes'%self.modetype, self.mode_is_selected,
self.on_mode_activated)
items.append(item)
if previous is None:
items.append(menu.simple_separator("sep", [items[-1].name]))
previous = items[-1].name
m = menu.Menu(parent_item)
for item in items:
m.add_item(item)
return m
def mode_is_selected(self, name, parent, context):
pl = self.get_playlist_func(parent, context)
if pl is None:
return False
return getattr(pl, "%s_mode"%self.modetype) == name
def on_mode_activated(self, widget, name, parent, context):
pl = self.get_playlist_func(parent, context)
if pl is None:
return False
setattr(pl, "%s_mode"%self.modetype, name)
class ShuffleModesMenuItem(ModesMenuItem):
modetype = 'shuffle'
display_name = _("S_huffle")
class RepeatModesMenuItem(ModesMenuItem):
modetype = 'repeat'
display_name = _("R_epeat")
class DynamicModesMenuItem(ModesMenuItem):
modetype = 'dynamic'
display_name = _("_Dynamic")
class RemoveCurrentMenuItem(menu.MenuItem):
"""
Allows for removing the currently playing
track from the current playlist
"""
def __init__(self, after, get_playlist_func=default_get_playlist_func):
menu.MenuItem.__init__(self, 'remove-current', None, after)
self.get_playlist_func = get_playlist_func
def factory(self, menu, parent, context):
"""
Sets up the menu item
"""
item = Gtk.ImageMenuItem.new_with_mnemonic(_('Remove _Current Track From Playlist'))
item.set_image(Gtk.Image.new_from_icon_name('list-remove', Gtk.IconSize.MENU))
item.connect('activate', self.on_activate, parent, context)
if player.PLAYER.is_stopped():
item.set_sensitive(False)
return item
def on_activate(self, menuitem, parent, context):
"""
Removes the currently playing track from the current playlist
"""
playlist = self.get_playlist_func(parent, context)
if playlist and playlist.current == player.PLAYER.current:
del playlist[playlist.current_position]
class RandomizeMenuItem(menu.MenuItem):
"""
A menu item which randomizes the full
playlist or the current selection
"""
def __init__(self, after):
menu.MenuItem.__init__(self, 'randomize', None, after)
def factory(self, menu, parent, context):
"""
Sets up the menu item
"""
label = _('R_andomize Playlist')
if not context['selection-empty']:
label = _('R_andomize Selection')
item = Gtk.MenuItem.new_with_mnemonic(label)
item.connect('activate', self.on_activate, parent, context)
return item
def on_activate(self, menuitem, parent, context):
"""
Randomizes the playlist or the selection
"""
positions = [path[0] for path in context['selected-paths']]
# Randomize the full playlist if only one track was selected
positions = positions if len(positions) > 1 else []
context['playlist'].randomize(positions)
# do this in a function to avoid polluting the global namespace
def __create_playlist_tab_context_menu():
smi = menu.simple_menu_item
sep = menu.simple_separator
items = []
items.append(smi('new-tab', [], _("_New Playlist"), 'tab-new',
lambda w, n, o, c: o.tab.notebook.create_new_playlist()))
items.append(sep('new-tab-sep', ['new-tab']))
items.append(smi('save', ['new-tab-sep'], _("_Save"), 'document-save',
callback=lambda w, n, p, c: p.on_save(),
condition_fn=lambda n, p, c: p.can_save() and main.exaile().playlists.has_playlist_name(p.playlist.name)))
items.append(smi('saveas', ['save'], _("Save _As"), 'document-save-as',
callback=lambda w, n, p, c: p.on_saveas(),
condition_fn=lambda n, p, c: p.can_saveas()))
items.append(smi('rename', ['saveas'], _("_Rename"), None,
callback=lambda w, n, p, c: p.tab.start_rename(),
condition_fn=lambda n, p, c: p.tab.can_rename()))
items.append(smi('clear', ['rename'], _("_Clear"), 'edit-clear-all',
lambda w, n, o, c: o.playlist.clear()))
items.append(sep('tab-close-sep', ['clear']))
def _get_pl_func(o, c):
return o.playlist
items.append(menuitems.ExportPlaylistMenuItem('export', ['tab-close-sep'], _get_pl_func))
items.append(menuitems.ExportPlaylistFilesMenuItem('export-files', ['export'], _get_pl_func))
items.append(sep('tab-export-sep', ['export']))
items.append(smi('tab-close', ['tab-export-sep'], _("Close _Tab"), 'window-close',
lambda w, n, o, c: o.tab.close()))
for item in items:
providers.register('playlist-tab-context-menu', item)
__create_playlist_tab_context_menu()
class PlaylistContextMenu(menu.ProviderMenu):
def __init__(self, page):
"""
:param page: The :class:`PlaylistPage` this menu is
associated with.
"""
menu.ProviderMenu.__init__(self, 'playlist-context-menu', page)
def get_context(self):
context = common.LazyDict(self._parent)
context['playlist'] = lambda name, parent: parent.playlist
context['selection-empty'] = lambda name, parent: parent.get_selection_count() == 0
context['selected-paths'] = lambda name, parent: parent.get_selected_paths()
context['selected-items'] = lambda name, parent: parent.get_selected_items()
context['selected-tracks'] = lambda name, parent: parent.get_selected_tracks()
context['selection-count'] = lambda name, parent: parent.get_selection_count()
return context
class SPATMenuItem(menu.MenuItem):
"""
Menu item allowing for toggling playback
stop after the selected track (SPAT)
"""
def __init__(self, name, after):
menu.MenuItem.__init__(self, name, None, after)
def factory(self, menu, parent, context):
"""
Generates the menu item
"""
display_name = _('_Stop Playback After This Track')
icon_name = 'media-playback-stop'
if context['selected-items']:
selection_position = context['selected-items'][0][0]
if selection_position == parent.playlist.spat_position:
display_name = _('_Continue Playback After This Track')
icon_name = 'media-playback-play'
menuitem = Gtk.ImageMenuItem.new_with_mnemonic(display_name)
menuitem.set_image(Gtk.Image.new_from_icon_name(icon_name,
Gtk.IconSize.MENU))
menuitem.connect('activate', self.on_menuitem_activate,
parent, context)
return menuitem
def on_menuitem_activate(self, menuitem, parent, context):
"""
Toggles the SPAT state
"""
selection_position = context['selected-items'][0][0]
if selection_position == parent.playlist.spat_position:
parent.playlist.spat_position = -1
else:
parent.playlist.spat_position = selection_position
def __create_playlist_context_menu():
smi = menu.simple_menu_item
sep = menu.simple_separator
items = []
items.append(menuitems.EnqueueMenuItem('enqueue', []))
items.append(SPATMenuItem('toggle-spat', [items[-1].name]))
def rating_get_tracks_func(menuobj, parent, context):
return [row[1] for row in context['selected-items']]
items.append(menuitems.RatingMenuItem('rating', [items[-1].name]))
# TODO: custom playlist item here
items.append(sep('sep1', [items[-1].name]))
def remove_tracks_cb(widget, name, playlistpage, context):
tracks = context['selected-items']
playlist = playlistpage.playlist
# If it's all one block, just delete it in one chunk for
# maximum speed.
positions = [t[0] for t in tracks]
if positions == range(positions[0], positions[0]+len(positions)):
del playlist[positions[0]:positions[0]+len(positions)]
else:
for position, track in tracks[::-1]:
del playlist[position]
items.append(smi('remove', [items[-1].name], _("_Remove from Playlist"),
'list-remove', remove_tracks_cb))
items.append(RandomizeMenuItem([items[-1].name]))
def playlist_menu_condition(name, parent, context):
"""
Returns True if the containing notebook's tab bar is hidden
"""
scrolledwindow = parent.get_parent()
page = scrolledwindow.get_parent()
return not page.tab.notebook.get_show_tabs()
items.append(smi('playlist-menu', [items[-1].name], _('Playlist'),
submenu=menu.ProviderMenu('playlist-tab-context-menu', None),
condition_fn=playlist_menu_condition))
items.append(sep('sep2', [items[-1].name]))
items.append(smi('properties', [items[-1].name], _("_Track Properties"),
'document-properties', lambda w, n, o, c: o.show_properties_dialog()))
for item in items:
providers.register('playlist-context-menu', item)
__create_playlist_context_menu()
class PlaylistPageBase(NotebookPage):
'''
Base class for playlist pages. Subclasses can indicate that
they support the following operations:
save:
- Define a function called 'on_save'
save as:
- Define a function called 'on_saveas'
'''
menu_provider_name = 'playlist-tab-context-menu'
def can_save(self):
return hasattr(self, 'on_save')
def can_saveas(self):
return hasattr(self, 'on_saveas')
class PlaylistPage(PlaylistPageBase):
"""
Displays a playlist and associated controls.
"""
def __init__(self, playlist, player):
"""
:param playlist: The :class:`xl.playlist.Playlist` to display
in this page.
:param player: The :class:`xl.player._base.ExailePlayer` that
this page is associated with
:param queue:
"""
NotebookPage.__init__(self)
self.playlist = playlist
self.icon = None
self.loading = None
self.loading_timer = None
uifile = xdg.get_data_path("ui", "playlist.ui")
self.builder = Gtk.Builder()
self.builder.add_from_file(uifile)
playlist_page = self.builder.get_object("playlist_page")
for child in playlist_page.get_children():
packing = playlist_page.query_child_packing(child)
child.reparent(self)
self.set_child_packing(child, *packing)
self.shuffle_button = self.builder.get_object("shuffle_button")
self.repeat_button = self.builder.get_object("repeat_button")
self.dynamic_button = self.builder.get_object("dynamic_button")
self.search_entry = guiutil.SearchEntry(
self.builder.get_object("search_entry"))
self.builder.connect_signals(self)
self.playlist_window = self.builder.get_object("playlist_window")
self.playlist_utilities_bar = self.builder.get_object(
'playlist_utilities_bar')
self.view = PlaylistView(playlist, player)
self.playlist_window.add(self.view)
event.add_ui_callback(self.on_mode_changed,
'playlist_shuffle_mode_changed', self.playlist,
self.shuffle_button)
event.add_ui_callback(self.on_mode_changed,
'playlist_repeat_mode_changed', self.playlist,
self.repeat_button)
event.add_ui_callback(self.on_mode_changed,
'playlist_dynamic_mode_changed', self.playlist,
self.dynamic_button)
event.add_ui_callback(self.on_dynamic_playlists_provider_changed,
'dynamic_playlists_provider_added')
event.add_ui_callback(self.on_dynamic_playlists_provider_changed,
'dynamic_playlists_provider_removed')
event.add_ui_callback(self.on_option_set,
'gui_option_set')
self.on_mode_changed(None, None, self.playlist.shuffle_mode, self.shuffle_button)
self.on_mode_changed(None, None, self.playlist.repeat_mode, self.repeat_button)
self.on_mode_changed(None, None, self.playlist.dynamic_mode, self.dynamic_button)
self.on_dynamic_playlists_provider_changed(None, None, None)
self.on_option_set('gui_option_set', settings, 'gui/playlist_utilities_bar_visible')
self.view.connect('button-press-event', self.on_view_button_press_event)
self.view.model.connect('row-changed', self.on_row_changed)
self.view.model.connect('data-loading', self.on_data_loading)
if self.view.model.data_loading:
self.on_data_loading(None, True)
self.show_all()
## NotebookPage API ##
def focus(self):
self.view.grab_focus()
def get_page_name(self):
return self.playlist.name
def set_page_name(self, name):
self.playlist.name = name
self.name_changed()
def get_search_entry(self):
return self.search_entry
## End NotebookPage ##
## PlaylistPageBase API ##
# TODO: These two probably shouldn't reach back to main..
def on_save(self):
main.exaile().playlists.save_playlist(self.playlist, overwrite=True)
def on_saveas(self):
exaile = main.exaile()
playlists = exaile.playlists
name = dialogs.ask_for_playlist_name(
exaile.gui.main.window, playlists, self.playlist.name)
if name is not None:
self.set_page_name(name)
playlists.save_playlist(self.playlist)
## End PlaylistPageBase API ##
def on_shuffle_button_press_event(self, widget, event):
self.__show_toggle_menu(Playlist.shuffle_modes,
Playlist.shuffle_mode_names, self.on_shuffle_mode_set,
'shuffle_mode', widget, event)
def on_shuffle_button_popup_menu(self, widget):
self.__show_toggle_menu(Playlist.shuffle_modes,
Playlist.shuffle_mode_names, self.on_shuffle_mode_set,
'shuffle_mode', widget, None)
return True
def on_repeat_button_press_event(self, widget, event):
self.__show_toggle_menu(Playlist.repeat_modes,
Playlist.repeat_mode_names, self.on_repeat_mode_set,
'repeat_mode', widget, event)
def on_repeat_button_popup_menu(self, widget):
self.__show_toggle_menu(Playlist.repeat_modes,
Playlist.repeat_mode_names, self.on_repeat_mode_set,
'repeat_mode', widget, None)
return True
def on_dynamic_button_toggled(self, widget):
if widget.get_active():
self.playlist.dynamic_mode = self.playlist.dynamic_modes[1]
else:
self.playlist.dynamic_mode = self.playlist.dynamic_modes[0]
def on_search_entry_activate(self, entry):
filter_string = entry.get_text().decode('utf-8')
self.view.filter_tracks(filter_string or None)
def __show_toggle_menu(self, names, display_names, callback, attr,
widget, event):
"""
Display the menu on the shuffle/repeat toggle buttons
:param names: The list of names of the menu entries
:param display_names: The list of names to display on
each menu entry.
:param callback: The function to call when a menu item is
activated. It will be passed the name of the activated item.
:param attr: The attribute of self.playlist to look at to
determine the currently-selected item.
:param widget: The ToggleButton to display the menu on
:param event: The gtk event that triggered the menu display
"""
widget.set_active(True)
menu = Gtk.Menu()
menu.connect('deactivate', self._mode_menu_set_toggle, widget, attr)
prev = None
mode = getattr(self.playlist, attr)
for name, disp in zip(names, display_names):
group = None if prev is None else prev.get_group()
item = Gtk.RadioMenuItem.new_with_mnemonic(group, disp)
if name == mode:
item.set_active(True)
item.connect('activate', callback, name)
menu.append(item)
if prev is None:
menu.append(Gtk.SeparatorMenuItem())
prev = item
menu.attach_to_widget(widget)
menu.show_all()
if event is not None:
menu.popup(None, None, guiutil.position_menu, widget,
event.button, event.time)
else:
menu.popup(None, None, guiutil.position_menu, widget,
0, 0)
menu.reposition()
def _mode_menu_set_toggle(self, menu, button, name):
mode = getattr(self.playlist, name)
self.on_mode_changed(None, None, mode, button)
def on_shuffle_mode_set(self, widget, mode):
"""
Callback for the Shuffle mode menu
"""
self.playlist.shuffle_mode = mode
def on_repeat_mode_set(self, widget, mode):
"""
Callback for the Repeat mode menu
"""
self.playlist.repeat_mode = mode
def on_mode_changed(self, evtype, playlist, mode, button):
GLib.idle_add(button.set_active, mode != 'disabled')
def on_dynamic_playlists_provider_changed(self, evtype, manager, provider):
"""
Updates the dynamic button on provider changes
"""
providers_available = len(providers.get('dynamic_playlists')) > 0
sensitive = False
tooltip_text = _('Requires plugins providing dynamic playlists')
if providers_available:
sensitive = True
tooltip_text = _('Dynamically add similar tracks to the playlist')
GLib.idle_add(self.dynamic_button.set_sensitive, sensitive)
GLib.idle_add(self.dynamic_button.set_tooltip_text, tooltip_text)
def on_option_set(self, evtype, settings, option):
"""
Handles option changes
"""
if option == 'gui/playlist_utilities_bar_visible':
visible = settings.get_option(option, True)
GLib.idle_add(self.playlist_utilities_bar.set_visible, visible)
GLib.idle_add(self.playlist_utilities_bar.set_sensitive, visible)
GLib.idle_add(self.playlist_utilities_bar.set_no_show_all, not visible)
def on_row_changed(self, model, path, iter):
"""
Sets the tab icon to reflect the playback status
"""
if path[0] == self.playlist.current_position:
pixbuf = model.get_value(iter, 1)
if pixbuf == model.clear_pixbuf:
pixbuf = None
self.tab.set_icon(pixbuf)
# there's a race condition on playback stop at the end of
# a playlist (current_position gets set before this is called),
# so this sets the icon correctly.
elif self.playlist.current_position == -1:
self.tab.set_icon(None)
def on_data_loading(self, model, loading):
'''Called when tracks are being loaded into the model'''
if loading:
if self.loading is None and self.loading_timer is None:
self.loading_timer = GLib.timeout_add(500, self.on_data_loading_timer)
else:
if self.loading_timer is not None:
GLib.source_remove(self.loading_timer)
self.loading_timer = None
if self.loading is not None:
guiutil.gtk_widget_replace(self.loading, self.playlist_window)
self.loading.destroy()
self.loading = None
def on_data_loading_timer(self):
if self.loading_timer is None:
return
self.loading_timer = None
grid = Gtk.Grid()
sp = Gtk.Spinner()
sp.start()
lbl = Gtk.Label.new(_('Loading'))
grid.attach(sp, 0, 0, 1, 1)
grid.attach(lbl, 1, 0, 1, 1)
grid.set_halign(Gtk.Align.CENTER)
grid.set_valign(Gtk.Align.CENTER)
self.loading = grid
guiutil.gtk_widget_replace(self.playlist_window, self.loading)
self.loading.show_all()
def on_view_button_press_event(self, view, e):
"""
Displays the tab context menu upon
clicks in the contained view
"""
selection = view.get_selection()
path = view.get_path_at_pos(int(e.x), int(e.y))
# We only need the tree path if present
path = path[0] if path else None
if not path and e.type == Gdk.EventType.BUTTON_PRESS and e.button == 3:
self.tab_menu.popup(None, None, None, None, e.button, e.time)
class PlaylistView(AutoScrollTreeView, providers.ProviderHandler):
__gsignals__ = {}
def __init__(self, playlist, player):
AutoScrollTreeView.__init__(self)
providers.ProviderHandler.__init__(self, 'playlist-columns')
self.playlist = playlist
self.player = player
self.menu = PlaylistContextMenu(self)
self.tabmenu = menu.ProviderMenu('playlist-tab-context-menu', self)
self.dragging = False
self.pending_event = None
self.button_pressed = False # used by columns to determine whether
# a notify::width event was initiated
# by the user.
self._insert_focusing = False
self._hack_is_osx = sys.platform == 'darwin'
self._hack_osx_control_mask = False
# Set to true if you only want things to be copied here, not moved
self.dragdrop_copyonly = False
self.set_fixed_height_mode(True) # MASSIVE speedup - don't disable this!
self.set_rules_hint(True)
self.set_enable_search(True)
self.selection = self.get_selection()
self.selection.set_mode(Gtk.SelectionMode.MULTIPLE)
self._filter_matcher = None
self._setup_columns()
self.columns_changed_id = self.connect("columns-changed",
self.on_columns_changed)
self.targets = [Gtk.TargetEntry.new("exaile-index-list", Gtk.TargetFlags.SAME_APP, 0),
Gtk.TargetEntry.new("text/uri-list", 0, 0)]
self.drag_source_set(Gdk.ModifierType.BUTTON1_MASK, self.targets,
Gdk.DragAction.COPY|Gdk.DragAction.MOVE)
self.drag_dest_set(Gtk.DestDefaults.ALL, self.targets,
Gdk.DragAction.COPY|Gdk.DragAction.DEFAULT|
Gdk.DragAction.MOVE)
event.add_ui_callback(self.on_option_set, "gui_option_set")
event.add_ui_callback(self.on_playback_start, "playback_track_start", self.player)
self.connect("cursor-changed", self.on_cursor_changed )
self.connect("row-activated", self.on_row_activated)
self.connect("key-press-event", self.on_key_press_event)
self.connect("drag-begin", self.on_drag_begin)
self.connect("drag-drop", self.on_drag_drop)
self.connect("drag-data-get", self.on_drag_data_get)
self.connect("drag-data-received", self.on_drag_data_received)
self.connect("drag-data-delete", self.on_drag_data_delete)
self.connect("drag-end", self.on_drag_end)
self.connect("drag-motion", self.on_drag_motion)
def filter_tracks(self, filter_string):
'''
Only show tracks that match the filter. If filter is None, then
clear any existing filters.
The filter will search any currently enabled columns AND the
default columns.
'''
if filter_string is None:
self._filter_matcher = None
self.modelfilter.refilter()
else:
# Merge default columns and currently enabled columns
keyword_tags = set(playlist_columns.DEFAULT_COLUMNS + [c.name for c in self.get_columns()])
self._filter_matcher = trax.TracksMatcher(filter_string,
case_sensitive=False,
keyword_tags=keyword_tags)
logger.debug("Filtering playlist %r by %r.", self.playlist.name, filter_string)
self.modelfilter.refilter()
logger.debug("Filtering playlist %r by %r completed.", self.playlist.name, filter_string)
def get_selection_count(self):
'''
Returns the number of items currently selected in the
playlist. Prefer this to len(get_selected_tracks()) et al
if you will discard the actual track list
'''
return self.get_selection().count_selected_rows()
def get_selected_tracks(self):
"""
Returns a list of :class:`xl.trax.Track`
which are currently selected in the playlist.
"""
return [x[1] for x in self.get_selected_items()]
def get_selected_paths(self):
"""
Returns a list of pairs of treepaths which are currently
selected in the playlist.
The treepaths are returned for the base model, so they are
indices that can be used with the playlist currently
associated with this view.
"""
selection = self.get_selection()
model, paths = selection.get_selected_rows()
if isinstance(model, Gtk.TreeModelFilter):
paths = [model.convert_path_to_child_path(path) for path in paths]
return paths
def get_selected_items(self):
"""
Returns a list of pairs of indices and :class:`xl.trax.Track`
which are currently selected in the playlist.
The indices can be used with the playlist currently associated
with this view.
"""
selection = self.get_selection()
model, paths = selection.get_selected_rows()
try:
if isinstance(model, Gtk.TreeModelFilter):
tracks = [(model.convert_path_to_child_path(path)[0], model.get_value(model.get_iter(path), 0)) for path in paths]
else:
tracks = [(path[0], model.get_value(model.get_iter(path), 0)) for path in paths]
except TypeError: #one of the paths was invalid
return []
return tracks
def get_sort_column(self):
for col in self.get_columns():
if col.get_sort_indicator():
return col
return None
def get_sort_by(self):
sortcol = self.get_sort_column()
if sortcol:
reverse = sortcol.get_sort_order() == Gtk.SortType.DESCENDING
sort_by = [sortcol.name] + list(common.BASE_SORT_TAGS)
else:
reverse = False
sort_by = list(common.BASE_SORT_TAGS)
return (sort_by, reverse)
def play_track_at(self, position, track):
'''
When called, this will begin playback of a track at a given
position in the internal playlist
'''
self._play_track_at(position, track)
def _play_track_at(self, position, track, on_activated=False):
'''Internal API'''
if not settings.get_option('playlist/enqueue_by_default', False) or \
(self.playlist is self.player.queue and on_activated):
if self.player.queue.is_play_enabled():
self.playlist.set_current_position(position)
self.player.queue.set_current_playlist(self.playlist)
self.player.queue.play(track=track)
elif self.playlist is not self.player.queue:
self.player.queue.append(track)
def _setup_columns(self):
columns = settings.get_option('gui/columns', playlist_columns.DEFAULT_COLUMNS)
provider_names = [p.name for p in providers.get('playlist-columns')]
columns = [name for name in columns if name in provider_names]
if not columns:
columns = playlist_columns.DEFAULT_COLUMNS
# FIXME: this is kinda ick because of supporting both models
#self.model.columns = columns
# TODO: What is the fixme talking about?
self.model = PlaylistModel(self.playlist, columns, self.player)
self.model.connect('row-inserted', self.on_row_inserted)
self.set_model(self.model)
self._setup_filter()
font = settings.get_option('gui/playlist_font', None)
if font is not None:
font = Pango.FontDescription(font)
for position, column in enumerate(columns):
position += 2 # offset for pixbuf column
playlist_column = providers.get_provider(
'playlist-columns', column)(self, position, self.player, font)
playlist_column.connect('clicked', self.on_column_clicked)
self.append_column(playlist_column)
header = playlist_column.get_widget()
header.show()
header.get_ancestor(Gtk.Button).connect('button-press-event',
self.on_header_button_press)
header.get_ancestor(Gtk.Button).connect('key-press-event',
self.on_header_key_press_event)
def _setup_filter(self):
'''Call this anytime after you call set_model()'''
self.modelfilter = self.get_model().filter_new()
self.modelfilter.set_visible_func(self.modelfilter_visible_func)
self.set_model(self.modelfilter)
if self._filter_matcher is not None:
self.modelfilter.refilter()
def _refresh_columns(self):
selection = self.get_selection()
if not selection: # The widget has been destroyed
return
info = selection.get_selected_rows()
# grab the first visible raw of the treeview
firstpath = self.get_path_at_pos(4,4)
topindex = None
if firstpath:
topindex = firstpath[0][0]
self.disconnect(self.columns_changed_id)
columns = self.get_columns()
for col in columns:
self.remove_column(col)
self._setup_columns()
self.columns_changed_id = self.connect("columns-changed",
self.on_columns_changed)
self.queue_draw()
if firstpath:
self.scroll_to_cell(topindex)
if info:
for path in info[1]:
selection.select_path(path)
def on_header_button_press(self, widget, event):
if event.button == 3:
m = menu.ProviderMenu('playlist-columns-menu', self)
m.popup(None, None, None, None, event.button, event.time)
return True
def on_columns_changed(self, widget):
columns = [c.name for c in self.get_columns()]
if columns != settings.get_option('gui/columns', []):
settings.set_option('gui/columns', columns)
def on_column_clicked(self, column):
order = None
for col in self.get_columns():
if col.name == column.name:
order = column.get_sort_order()
if order == Gtk.SortType.ASCENDING:
order = Gtk.SortType.DESCENDING
else:
order = Gtk.SortType.ASCENDING
col.set_sort_indicator(True)
col.set_sort_order(order)
else:
col.set_sort_indicator(False)
col.set_sort_order(Gtk.SortType.DESCENDING)
reverse = order == Gtk.SortType.DESCENDING
self.playlist.sort([column.name] + list(common.BASE_SORT_TAGS), reverse=reverse)
def on_option_set(self, typ, obj, data):
if data == "gui/columns" or data == 'gui/playlist_font':
GLib.idle_add(self._refresh_columns, priority=GLib.PRIORITY_DEFAULT)
def on_playback_start(self, type, player, track):
if player.queue.current_playlist == self.playlist and \
player.current == self.playlist.current and \
settings.get_option('gui/ensure_visible', True):
GLib.idle_add(self.scroll_to_current)
def scroll_to_current(self):
position = self.playlist.current_position
if position >= 0:
model = self.get_model()
# If it's a filter, then the position isn't actually the path
if hasattr(model, 'convert_child_path_to_path'):
path = model.convert_child_path_to_path(Gtk.TreePath((position,)))
if path:
self.scroll_to_cell(path)
self.set_cursor(path)
def on_cursor_changed(self, widget):
context = common.LazyDict(self)
context['selection-empty'] = lambda name, parent: parent.get_selection_count() == 0
context['selected-items'] = lambda name, parent: parent.get_selected_items()
context['selected-tracks'] = lambda name, parent: parent.get_selected_tracks()
event.log_event( 'playlist_cursor_changed', self, context)
def on_row_activated(self, *args):
try:
position, track = self.get_selected_items()[0]
except IndexError:
return
self._play_track_at(position, track, True)
def on_row_inserted(self, model, path, iter):
'''
When something is inserted into the playlist, focus on it. If
there are multiple things inserted, focus only on the first.
'''
if not self._insert_focusing:
self._insert_focusing = True
# HACK: GI: We get a segfault if we don't do this, because the
# GtkTreePath gets deleted before the idle function is run.
path = path.copy()
def _set_cursor():
self.set_cursor(path)
self._insert_focusing = False
GLib.idle_add(_set_cursor)
def do_button_press_event(self, e):
"""
Adds some custom selection work to
1) unselect all rows if clicking an empty area,
2) updating the selection upon right click and
3) popping up the context menu upon right click
Also sets the internal state for button pressed
Taken from the following sources:
* thunar_details_view_button_press_event() of thunar-details-view.c
* MultiDragTreeView.__button_press/__button.release of quodlibet/qltk/views.py
"""
self.button_pressed = True
self.grab_focus()
# need this to workaround bug in GTK+ on OSX when dragging/dropping
# -> https://bugzilla.gnome.org/show_bug.cgi?id=722815
if self._hack_is_osx:
self._hack_osx_control_mask = True if e.state & Gdk.ModifierType.CONTROL_MASK else False
selection = self.get_selection()
pathtuple = self.get_path_at_pos(int(e.x), int(e.y))
# We only need the tree path if present
if pathtuple:
path = pathtuple[0]
col = pathtuple[1]
else:
path = None
# We unselect all selected items if the user clicks on an empty
# area of the treeview and no modifier key is active
if not e.state & Gtk.accelerator_get_default_mod_mask() and not path:
selection.unselect_all()
if path and e.type == Gdk.EventType.BUTTON_PRESS:
# Prevent unselection of all except the clicked item on left
# clicks, required to preserve the selection for DnD
if e.button == 1 and not e.state & Gtk.accelerator_get_default_mod_mask() and \
selection.path_is_selected(path):
selection.set_select_function(lambda *args: False, None)
self.pending_event = (path, col)
# Open the context menu on right clicks
if e.button == 3:
# Select the path on which the user clicked if not selected yet
if not selection.path_is_selected(path):
# We don't unselect all other items if Control is active
if not e.state & Gdk.ModifierType.CONTROL_MASK:
selection.unselect_all()
selection.select_path(path)
self.menu.popup(None, None, None, None, e.button, e.time)
return True
return Gtk.TreeView.do_button_press_event(self, e)
def do_button_release_event(self, e):
"""
Unsets the internal state for button press
"""
self.button_pressed = False
self._hack_osx_control_mask = False
# Restore regular selection behavior in any case
self.get_selection().set_select_function(lambda *args: True, None)
if self.pending_event:
path, col = self.pending_event
# perform the normal selection that would have happened
self.set_cursor(path, col, 0)
self.pending_event = None
return Gtk.TreeView.do_button_release_event(self, e)
def on_key_press_event(self, widget, event):
if event.keyval == Gdk.KEY_Menu:
self.menu.popup(None, None, None, None, 0, event.time)
return True
elif event.keyval == Gdk.KEY_Delete:
indexes = [x[0] for x in self.get_selected_paths()]
if indexes and indexes == range(indexes[0], indexes[0]+len(indexes)):
del self.playlist[indexes[0]:indexes[0]+len(indexes)]
else:
for i in indexes[::-1]:
del self.playlist[i]
def on_header_key_press_event(self, widget, event):
if event.keyval == Gdk.KEY_Menu:
# Open context menu for selecting visible columns
m = menu.ProviderMenu('playlist-columns-menu', self)
m.popup(None, None, None, None, 0, event.time)
return True
### DND handlers ###
## Source
def on_drag_begin(self, widget, context):
"""
Activates the dragging state
"""
# TODO: set drag icon
self.dragging = True
self.pending_event = None
def on_drag_data_get(self, widget, context, selection, info, etime):
"""
Stores indices and URIs of the selected items in the drag selection
"""
target = selection.get_target()
if target == "exaile-index-list":
positions = self.get_selected_paths()
if positions:
s = ",".join(str(i[0]) for i in positions)
selection.set(target, 8, s)
elif target == "text/uri-list":
tracks = self.get_selected_tracks()
uris = trax.util.get_uris_from_tracks(tracks)
selection.set_uris(uris)
def on_drag_data_delete(self, widget, context):
"""
Stops the default handler from running, all
processing occurs in the drag-data-received handler
"""
self.stop_emission('drag-data-delete')
def on_drag_end(self, widget, context):
"""
Deactivates the dragging state
"""
self.dragging = False
## Dest
def on_drag_drop(self, widget, context, x, y, etime):
"""
Always allows processing of drop operations
"""
return True
def on_drag_data_received(self, widget, context, x, y, selection, info, etime):
"""
Builds a list of tracks either from internal indices or
external URIs and inserts or appends them to the playlist
"""
# Stop default handler from running
self.stop_emission('drag-data-received')
drop_info = self.get_dest_row_at_pos(x, y)
if drop_info:
path, position = drop_info
model = self.get_model()
if isinstance(model, Gtk.TreeModelFilter):
path = model.convert_path_to_child_path(path)
insert_position = path[0]
if position in (Gtk.TreeViewDropPosition.AFTER, Gtk.TreeViewDropPosition.INTO_OR_AFTER):
insert_position += 1
else:
insert_position = -1
tracks = []
target = selection.get_target().name()
if target == "exaile-index-list":
positions = [int(x) for x in selection.data.split(",")]
tracks = common.MetadataList()
source_playlist_view = Gtk.drag_get_source_widget(context)
playlist = self.playlist
# Get the playlist of the
if source_playlist_view is not self:
playlist = source_playlist_view.playlist
# TODO: this can probably be made more-efficient
for i in positions:
tracks.extend(playlist[i:i+1])
# Insert at specific position if possible
if insert_position >= 0:
self.playlist[insert_position:insert_position] = tracks
if source_playlist_view is self:
# Update position for tracks after the insert position
for i, position in enumerate(positions[:]):
if position >= insert_position:
position += len(tracks)
positions[i] = position
else:
# Otherwise just append the tracks
self.playlist.extend(tracks)
# Remove tracks from the source playlist if moved
if context.action == Gdk.DragAction.MOVE:
for i in positions[::-1]:
del playlist[i]
elif target == "text/uri-list":
uris = selection.get_uris()
tracks = []
for uri in uris:
if is_valid_playlist(uri):
tracks.extend(import_playlist(uri))
else:
tracks.extend(trax.get_tracks_from_uri(uri))
sort_by, reverse = self.get_sort_by()
tracks = trax.sort_tracks(sort_by, tracks, reverse=reverse,
artist_compilations=True)
if insert_position >= 0:
self.playlist[insert_position:insert_position] = tracks
else:
self.playlist.extend(tracks)
#delete = context.action == Gdk.DragAction.MOVE
# TODO: Selected? Suggested?
delete = context.get_selected_action() == Gdk.DragAction.MOVE
context.finish(True, delete, etime)
scroll_when_appending_tracks = settings.get_option(
'gui/scroll_when_appending_tracks', False)
if scroll_when_appending_tracks and tracks:
self.scroll_to_cell(self.playlist.index(tracks[-1]))
def on_drag_motion(self, widget, context, x, y, etime):
"""
Makes sure tracks can only be inserted before or after tracks
and sets the drop action to move or copy depending on target
and user interaction (e.g. Ctrl key)
"""
drop_info = self.get_dest_row_at_pos(x, y)
if not drop_info:
return False
path, position = drop_info
if position == Gtk.TreeViewDropPosition.INTO_OR_BEFORE:
position = Gtk.TreeViewDropPosition.BEFORE
elif position == Gtk.TreeViewDropPosition.INTO_OR_AFTER:
position = Gtk.TreeViewDropPosition.AFTER
self.set_drag_dest_row(path, position)
action = Gdk.DragAction.MOVE
_, _, _, modifier = self.get_window().get_pointer()
target = self.drag_dest_find_target(context, self.drag_dest_get_target_list()).name()
if target == 'text/uri-list' or \
(self._hack_is_osx and self._hack_osx_control_mask) or \
(not self._hack_is_osx and modifier & Gdk.ModifierType.CONTROL_MASK):
action = Gdk.DragAction.COPY
if self.dragdrop_copyonly and Gtk.drag_get_source_widget(context) != self:
action = Gdk.DragAction.COPY
Gdk.drag_status(context, action, etime)
return True
def show_properties_dialog(self):
from xlgui import properties
items = self.get_selected_items()
# If only one track is selected, we expand `tracks` to include all
# tracks in the playlist... except for really large playlists, this
# essentially hangs. Instead, only show all tracks *if* the playlist
# size is less than 100.
#
# A better option would be to lazy load the files, but I'm too lazy
# to implement that now.. :)
if len(items) == 1 and len(self.playlist) < 100:
tracks = self.playlist[:]
current_position = items[0][0]
with_extras = True
else:
tracks = [i[1] for i in items]
current_position = 0
with_extras = False
properties.TrackPropertiesDialog(None, tracks,
current_position, with_extras)
def on_provider_removed(self, provider):
"""
Called when a column provider is removed
"""
columns = settings.get_option('gui/columns')
if provider.name in columns:
columns.remove(provider.name)
settings.set_option('gui/columns', columns)
def modelfilter_visible_func(self, model, iter, data):
if self._filter_matcher is not None:
track = model.get_value(iter, 0)
return self._filter_matcher.match(trax.SearchResultTrack(track))
return True
class PlaylistModel(Gtk.ListStore):
__gsignals__ = {
# Called with true indicates starting operation, False ends op
'data-loading': (
GObject.SignalFlags.RUN_LAST,
None,
(GObject.TYPE_BOOLEAN,)
)
}
def __init__(self, playlist, columns, player):
Gtk.ListStore.__init__(self, int) # real types are set later
self.playlist = playlist
self.columns = columns
self.player = player
self.data_loading = False
self.data_load_queue = []
self.coltypes = [object, GdkPixbuf.Pixbuf] + [providers.get_provider('playlist-columns', c).datatype for c in columns]
self.set_column_types(self.coltypes)
self._redraw_timer = None
self._redraw_queue = []
event.add_ui_callback(self.on_tracks_added,
"playlist_tracks_added", playlist)
event.add_ui_callback(self.on_tracks_removed,
"playlist_tracks_removed", playlist)
event.add_ui_callback(self.on_current_position_changed,
"playlist_current_position_changed", playlist)
event.add_ui_callback(self.on_spat_position_changed,
"playlist_spat_position_changed", playlist)
event.add_ui_callback(self.on_playback_state_change,
"playback_track_start", self.player)
event.add_ui_callback(self.on_playback_state_change,
"playback_track_end", self.player)
event.add_ui_callback(self.on_playback_state_change,
"playback_player_pause", self.player)
event.add_ui_callback(self.on_playback_state_change,
"playback_player_resume", self.player)
event.add_ui_callback(self.on_track_tags_changed,
"track_tags_changed")
event.add_ui_callback(self.on_option_set, "gui_option_set")
self._setup_icons()
self.on_tracks_added(None, self.playlist, list(enumerate(self.playlist))) # populate the list
def _setup_icons(self):
self.play_pixbuf = icons.ExtendedPixbuf(
icons.MANAGER.pixbuf_from_stock(Gtk.STOCK_MEDIA_PLAY))
self.pause_pixbuf = icons.ExtendedPixbuf(
icons.MANAGER.pixbuf_from_stock(Gtk.STOCK_MEDIA_PAUSE))
self.stop_pixbuf = icons.ExtendedPixbuf(
icons.MANAGER.pixbuf_from_stock(Gtk.STOCK_MEDIA_STOP))
stop_overlay_pixbuf = self.stop_pixbuf.scale_simple(
dest_width=self.stop_pixbuf.pixbuf.get_width() / 2,
dest_height=self.stop_pixbuf.pixbuf.get_height() / 2,
interp_type=GdkPixbuf.InterpType.BILINEAR)
stop_overlay_pixbuf = stop_overlay_pixbuf.move(
offset_x=stop_overlay_pixbuf.pixbuf.get_width(),
offset_y=stop_overlay_pixbuf.pixbuf.get_height(),
resize=True)
self.play_stop_pixbuf = self.play_pixbuf & stop_overlay_pixbuf
self.pause_stop_pixbuf = self.pause_pixbuf & stop_overlay_pixbuf
self.clear_pixbuf = self.play_pixbuf.copy()
self.clear_pixbuf.pixbuf.fill(0x00000000)
font = settings.get_option('gui/playlist_font', None)
if font is not None:
# get default font
default = float(Gtk.Widget.get_default_style().font_desc.get_size())
new_font = Pango.FontDescription(font).get_size()
# scale pixbuf accordingly
t = GdkPixbuf.InterpType.BILINEAR
s = max(int(self.play_pixbuf.get_width() * (new_font/default)),1)
self.play_pixbuf = self.play_pixbuf.scale_simple(s,s,t)
self.pause_pixbuf = self.pause_pixbuf.scale_simple(s,s,t)
self.stop_pixbuf = self.stop_pixbuf.scale_simple(s,s,t)
self.play_stop_pixbuf = self.play_stop_pixbuf.scale_simple(s,s,t)
self.pause_stop_pixbuf = self.pause_stop_pixbuf.scale_simple(s,s,t)
self.clear_pixbuf = self.clear_pixbuf.scale_simple(s,s,t)
def _refresh_icons(self):
self._setup_icons()
for i,row in enumerate(self):
row[1] = self.icon_for_row(i).pixbuf
def on_option_set(self, typ, obj, data):
if data == "gui/playlist_font":
GLib.idle_add(self._refresh_icons)
def icon_for_row(self, row):
# TODO: we really need some sort of global way to say "is this playlist/pos the current one?
if self.playlist.current_position == row and \
self.playlist[row] == self.player.current and \
self.playlist == self.player.queue.current_playlist:
state = self.player.get_state()
spat = self.playlist.spat_position == row
if state == 'playing':
if spat:
return self.play_stop_pixbuf
else:
return self.play_pixbuf
elif state == 'paused':
if spat:
return self.pause_stop_pixbuf
else:
return self.pause_pixbuf
if self.playlist.spat_position == row:
return self.stop_pixbuf
return self.clear_pixbuf
def update_icon(self, position):
iter = self.iter_nth_child(None, position)
if iter is not None:
self.set(iter, 1, self.icon_for_row(position).pixbuf)
### Event callbacks to keep the model in sync with the playlist ###
def on_tracks_added(self, event_type, playlist, tracks):
self._load_data(tracks)
def on_tracks_removed(self, event_type, playlist, tracks):
tracks.reverse()
for position, track in tracks:
self.remove(self.iter_nth_child(None, position))
def on_current_position_changed(self, event_type, playlist, positions):
for position in positions:
if position < 0:
continue
GLib.idle_add(self.update_icon, position)
def on_spat_position_changed(self, event_type, playlist, positions):
spat_position = min(positions)
for position in xrange(spat_position, len(self)):
GLib.idle_add(self.update_icon, position)
def on_playback_state_change(self, event_type, player_obj, track):
position = self.playlist.current_position
if position < 0 or position >= len(self):
return
GLib.idle_add(self.update_icon, position)
@guiutil.idle_add() # sync this call to prevent race conditions
def on_track_tags_changed(self, type, track, tag):
if not track or not \
settings.get_option('gui/sync_on_tag_change', True) or not\
tag in self.columns:
return
if self._redraw_timer:
GLib.source_remove(self._redraw_timer)
self._redraw_queue.append( track )
self._redraw_timer = GLib.timeout_add(100, self._on_track_tags_changed)
def _on_track_tags_changed(self):
self._redraw_timer = None
tracks = {}
redraw_queue = self._redraw_queue
self._redraw_queue = []
for track in redraw_queue:
tracks[track.get_loc_for_io()] = track
for row in self:
track = tracks.get( row[0].get_loc_for_io() )
if track is not None:
track_data = [providers.get_provider('playlist-columns', name).formatter.format(track) for name in self.columns]
for i in range(len(track_data)):
row[2+i] = track_data[i]
#
# Loading data into the playlist:
#
# - Profiler reveals that most of the playlist loading time is spent in two
# places
# - Formatting values
# - Converting them to GValue objects for insert into the treeview
#
# Now, the program is annoyingly blocked when this happens, so we need to
# process new tracks on a different thread, and show some kind of loading
# indicator instead. That's what these four functions help us do.
#
def _load_data(self, tracks):
# Don't allow race condition between adds.. there's probably a race
# condition for removal
if self.data_loading:
self.data_load_queue.extend(tracks)
return
# get column types
coltypes = [self.get_column_type(i) for i in xrange(self.get_n_columns())]
formatters = [providers.get_provider('playlist-columns', name).formatter.format for name in self.columns]
self.data_loading = True
self.emit('data-loading', True)
if len(tracks) > 50:
self._load_data_thread(coltypes, formatters, tracks)
else:
render_data = self._load_data_fn(coltypes, formatters, tracks)
self._load_data_done(render_data)
@common.threaded
def _load_data_thread(self, coltypes, formatters, tracks):
render_data = self._load_data_fn(coltypes, formatters, tracks)
GLib.idle_add(self._load_data_done, render_data)
def _load_data_fn(self, coltypes, formatters, tracks):
Value = GObject.Value
render_data = []
for position, track in tracks:
track_data = [track, self.icon_for_row(position).pixbuf] + [formatter(track) for formatter in formatters]
render_data.append((position, [Value(typ, val) for typ, val in izip(coltypes, track_data)]))
return render_data
def _load_data_done(self, render_data):
for args in render_data:
self.insert(*args)
self.data_loading = False
self.emit('data-loading', False)
if self.data_load_queue:
tracks = self.data_load_queue
self.data_load_queue = None
self._load_data(tracks)
| strahlc/exaile | xlgui/widgets/playlist.py | Python | gpl-2.0 | 59,205 |
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Uploads files to Google Storage content addressed."""
import hashlib
import optparse
import os
import Queue
import re
import stat
import sys
import threading
import time
from download_from_google_storage import check_bucket_permissions
from download_from_google_storage import get_sha1
from download_from_google_storage import Gsutil
from download_from_google_storage import printer_worker
from download_from_google_storage import GSUTIL_DEFAULT_PATH
USAGE_STRING = """%prog [options] target [target2 ...].
Target is the file intended to be uploaded to Google Storage.
If target is "-", then a list of files will be taken from standard input
This script will generate a file (original filename).sha1 containing the
sha1 sum of the uploaded file.
It is recommended that the .sha1 file is checked into the repository,
the original file removed from the repository, and a hook added to the
DEPS file to call download_from_google_storage.py.
Example usages
--------------
Scan the current directory and upload all files larger than 1MB:
find . -name .svn -prune -o -size +1000k -type f -print0 | %prog -0 -b bkt -
(Replace "bkt" with the name of a writable bucket.)
"""
def get_md5(filename):
md5_calculator = hashlib.md5()
with open(filename, 'rb') as f:
while True:
chunk = f.read(1024*1024)
if not chunk:
break
md5_calculator.update(chunk)
return md5_calculator.hexdigest()
def get_md5_cached(filename):
"""Don't calculate the MD5 if we can find a .md5 file."""
# See if we can find an existing MD5 sum stored in a file.
if os.path.exists('%s.md5' % filename):
with open('%s.md5' % filename, 'rb') as f:
md5_match = re.search('([a-z0-9]{32})', f.read())
if md5_match:
return md5_match.group(1)
else:
md5_hash = get_md5(filename)
with open('%s.md5' % filename, 'wb') as f:
f.write(md5_hash)
return md5_hash
def _upload_worker(
thread_num, upload_queue, base_url, gsutil, md5_lock, force,
use_md5, stdout_queue, ret_codes):
while True:
filename, sha1_sum = upload_queue.get()
if not filename:
break
file_url = '%s/%s' % (base_url, sha1_sum)
if gsutil.check_call('ls', file_url)[0] == 0 and not force:
# File exists, check MD5 hash.
_, out, _ = gsutil.check_call('ls', '-L', file_url)
etag_match = re.search('ETag:\s+([a-z0-9]{32})', out)
if etag_match:
remote_md5 = etag_match.group(1)
# Calculate the MD5 checksum to match it to Google Storage's ETag.
with md5_lock:
if use_md5:
local_md5 = get_md5_cached(filename)
else:
local_md5 = get_md5(filename)
if local_md5 == remote_md5:
stdout_queue.put(
'%d> File %s already exists and MD5 matches, upload skipped' %
(thread_num, filename))
continue
stdout_queue.put('%d> Uploading %s...' % (
thread_num, filename))
code, _, err = gsutil.check_call('cp', filename, file_url)
if code != 0:
ret_codes.put(
(code,
'Encountered error on uploading %s to %s\n%s' %
(filename, file_url, err)))
continue
# Mark executable files with the header "x-goog-meta-executable: 1" which
# the download script will check for to preserve the executable bit.
if not sys.platform.startswith('win'):
if os.stat(filename).st_mode & stat.S_IEXEC:
code, _, err = gsutil.check_call('setmeta', '-h',
'x-goog-meta-executable:1', file_url)
if code:
ret_codes.put(
(code,
'Encountered error on setting metadata on %s\n%s' %
(file_url, err)))
def get_targets(args, parser, use_null_terminator):
if not args:
parser.error('Missing target.')
if len(args) == 1 and args[0] == '-':
# Take stdin as a newline or null seperated list of files.
if use_null_terminator:
return sys.stdin.read().split('\0')
else:
return sys.stdin.read().splitlines()
else:
return args
def upload_to_google_storage(
input_filenames, base_url, gsutil, force,
use_md5, num_threads, skip_hashing):
# We only want one MD5 calculation happening at a time to avoid HD thrashing.
md5_lock = threading.Lock()
# Start up all the worker threads plus the printer thread.
all_threads = []
ret_codes = Queue.Queue()
ret_codes.put((0, None))
upload_queue = Queue.Queue()
upload_timer = time.time()
stdout_queue = Queue.Queue()
printer_thread = threading.Thread(target=printer_worker, args=[stdout_queue])
printer_thread.daemon = True
printer_thread.start()
for thread_num in range(num_threads):
t = threading.Thread(
target=_upload_worker,
args=[thread_num, upload_queue, base_url, gsutil, md5_lock,
force, use_md5, stdout_queue, ret_codes])
t.daemon = True
t.start()
all_threads.append(t)
# We want to hash everything in a single thread since its faster.
# The bottleneck is in disk IO, not CPU.
hashing_start = time.time()
for filename in input_filenames:
if not os.path.exists(filename):
stdout_queue.put('Main> Error: %s not found, skipping.' % filename)
continue
if os.path.exists('%s.sha1' % filename) and skip_hashing:
stdout_queue.put(
'Main> Found hash for %s, sha1 calculation skipped.' % filename)
with open(filename + '.sha1', 'rb') as f:
sha1_file = f.read(1024)
if not re.match('^([a-z0-9]{40})$', sha1_file):
print >> sys.stderr, 'Invalid sha1 hash file %s.sha1' % filename
return 1
upload_queue.put((filename, sha1_file))
continue
stdout_queue.put('Main> Calculating hash for %s...' % filename)
sha1_sum = get_sha1(filename)
with open(filename + '.sha1', 'wb') as f:
f.write(sha1_sum)
stdout_queue.put('Main> Done calculating hash for %s.' % filename)
upload_queue.put((filename, sha1_sum))
hashing_duration = time.time() - hashing_start
# Wait for everything to finish.
for _ in all_threads:
upload_queue.put((None, None)) # To mark the end of the work queue.
for t in all_threads:
t.join()
stdout_queue.put(None)
printer_thread.join()
# Print timing information.
print 'Hashing %s files took %1f seconds' % (
len(input_filenames), hashing_duration)
print 'Uploading took %1f seconds' % (time.time() - upload_timer)
# See if we ran into any errors.
max_ret_code = 0
for ret_code, message in ret_codes.queue:
max_ret_code = max(ret_code, max_ret_code)
if message:
print >> sys.stderr, message
if not max_ret_code:
print 'Success!'
return max_ret_code
def main(args):
parser = optparse.OptionParser(USAGE_STRING)
parser.add_option('-b', '--bucket',
help='Google Storage bucket to upload to.')
parser.add_option('-e', '--boto', help='Specify a custom boto file.')
parser.add_option('-f', '--force', action='store_true',
help='Force upload even if remote file exists.')
parser.add_option('-g', '--gsutil_path', default=GSUTIL_DEFAULT_PATH,
help='Path to the gsutil script.')
parser.add_option('-m', '--use_md5', action='store_true',
help='Generate MD5 files when scanning, and don\'t check '
'the MD5 checksum if a .md5 file is found.')
parser.add_option('-t', '--num_threads', default=1, type='int',
help='Number of uploader threads to run.')
parser.add_option('-s', '--skip_hashing', action='store_true',
help='Skip hashing if .sha1 file exists.')
parser.add_option('-0', '--use_null_terminator', action='store_true',
help='Use \\0 instead of \\n when parsing '
'the file list from stdin. This is useful if the input '
'is coming from "find ... -print0".')
(options, args) = parser.parse_args()
# Enumerate our inputs.
input_filenames = get_targets(args, parser, options.use_null_terminator)
# Make sure we can find a working instance of gsutil.
if os.path.exists(GSUTIL_DEFAULT_PATH):
gsutil = Gsutil(GSUTIL_DEFAULT_PATH, boto_path=options.boto)
else:
gsutil = None
for path in os.environ["PATH"].split(os.pathsep):
if os.path.exists(path) and 'gsutil' in os.listdir(path):
gsutil = Gsutil(os.path.join(path, 'gsutil'), boto_path=options.boto)
if not gsutil:
parser.error('gsutil not found in %s, bad depot_tools checkout?' %
GSUTIL_DEFAULT_PATH)
base_url = 'gs://%s' % options.bucket
# Check we have a valid bucket with valid permissions.
code = check_bucket_permissions(base_url, gsutil)
if code:
return code
return upload_to_google_storage(
input_filenames, base_url, gsutil, options.force, options.use_md5,
options.num_threads, options.skip_hashing)
if __name__ == '__main__':
sys.exit(main(sys.argv))
| HackFisher/depot_tools | upload_to_google_storage.py | Python | bsd-3-clause | 9,187 |
"""
edge Detection GPU implementation : SOBEL ALGORITHM
@philipchicco
"""
# cpu host imports
from tools.Picture import Picture
from tools.edgeDetector_Impl import EdgeDetector
import numpy as np
import math
import string
# gpu device imports : NVIDIA CUDA
import pycuda.autoinit # memory management
import pycuda.driver as drv # cuda driver
import pycuda.gpuarray as gpuarray # gpu array handle
from pycuda.compiler import SourceModule # Module wrapper for C
class edgeDetector_gpu(EdgeDetector):
def __init__(self, kernel_code=None):
self.kernel_code = kernel_code
# to be updated
def device_kernel(self, width, height, kernel_code=None):
"""
Define device kernel function and compile
:param height: image height
:param width: image width
:param kernel_code: kernel function definition
:return: kernel code handle
"""
if kernel_code is None: # use default
self.kernel_code = """
// add code here
"""
else:
assert isinstance(kernel_code, str)
self.kernel_code = kernel_code
# get kernel_code
# compile code
template_code = self.kernel_code % {
'WIDTH': width,
'HEIGHT': height
}
#print(self.kernel_code)
self.module = SourceModule(template_code)
return self.module.get_function("sobel_edges")
def sobel_edges(self, image, channel=1):
"""
GPU IMplementation of sobel algorithm
:param image: image ndarray
:param channel: color channels
:return: edged image array
"""
# allocate gpu device memory for array: convert to float32
# auto memory allocation
if isinstance(image, np.ndarray): # its a must
lw, lh = image.shape
array_gpu_In = gpuarray.to_gpu(image.flatten().astype(np.float32)).astype(np.float32)
array_gpu_Out = gpuarray.empty((lw, lh), dtype=np.float32).astype(np.float32)
else:
print("Error: image array is not instance of numpy.ndarray")
return
# configurations
# block
bdim = (32, 32, 1) # x, y, z
dx, mx = divmod(lh, bdim[0]) # cols
dy, my = divmod(lw, bdim[1]) # rows
# grid
gdim = ((dx + (mx > 0)) * bdim[0], (dy + (my > 0)) + bdim[1])
# execute kernel
kernel = self.device_kernel(lw, lh, kernel_code=self.sobel_edges_kernel()) # compile
# calculate time
start = drv.Event()
end = drv.Event()
start.record()
# kernel execution
kernel(array_gpu_In, array_gpu_Out, block=bdim, grid=gdim)
end.record()
end.synchronize()
start.synchronize()
# collect result : convert back to original format
return (array_gpu_Out.get().reshape(lw, lh)).astype(np.float32), start.time_till(end) + 1e-3
def brightness(self):
kernel_code = """
#include <math.h>
__global__ void brightness(float *matrix_in, float *matrix_out)
{
// 2D threads
int pos_y = blockIdx.y * blockIdx.y + threadIdx.y;
int pos_x = blockIdx.x * blockIdx.x + threadIdx.x;
// brightness value = alpha * pixel + beta
if (pos_y < %(HEIGHT)s && pos_x < %(WIDTH)s){
float value = (2.0 * matrix_in[pos_y * %(WIDTH)s + pos_x]) + 100.0;
matrix_out[pos_y * %(WIDTH)s + pos_x] = value;
}
}
"""
return kernel_code
def sobel_edges_kernel(self):
kernel = """
#include <math.h>
__global__ void sobel_edges(float *matrix_in, float *matrix_out)
{
// pixel location
int pos_y = blockIdx.y * blockDim.y + threadIdx.y;
int pos_x = blockIdx.x * blockDim.x + threadIdx.x;
int ret_x = 0;
int ret_y = 0;
int res = 0;
//
if ( (pos_y >= 0 && pos_y < %(HEIGHT)s) && (pos_x >= 0 && pos_x < %(WIDTH)s) )
{
ret_x += -matrix_in[%(WIDTH)s *(pos_y - 1) + (pos_x - 1)] + matrix_in[%(WIDTH)s * (pos_y -1)+(pos_x+1)]
-2*matrix_in[%(WIDTH)s *(pos_y) + (pos_x - 1)] + 2*matrix_in[%(WIDTH)s * (pos_y)+(pos_x+1)]
-matrix_in[%(WIDTH)s *(pos_y + 1) + (pos_x - 1)] + matrix_in[%(WIDTH)s * (pos_y +1)+(pos_x+1)];
ret_y += matrix_in[%(WIDTH)s * (pos_y-1) + (pos_x-1)] + 2*matrix_in[%(WIDTH)s *(pos_y-1)+(pos_x+1)] +
matrix_in[%(WIDTH)s * (pos_y-1)+(pos_x+1)] - matrix_in[%(WIDTH)s *(pos_y+1)+(pos_x-1)]
-2*matrix_in[%(WIDTH)s * (pos_y+1)+(pos_x)] - matrix_in[%(WIDTH)s *(pos_y+1)+(pos_x+1)];
ret_x = ret_x/5;
ret_y = ret_y/5;
res = (int)sqrtf(powf((float)ret_x, 2) + powf((float)ret_y, 2));
if (res > 255)
res = 255;
matrix_out[pos_y * %(WIDTH)s + pos_x] = res;
}
}
// end of definitions
"""
return kernel
| PhilipChicco/PyCudaImageProcessing | PyCUDAImageProcessing/gpu/edgeDetector_gpu.py | Python | mit | 5,327 |
from mtools.util.logevent import LogEvent
from mtools.util.pattern import json2pattern
from base_filter import BaseFilter
class LogLineFilter(BaseFilter):
"""
"""
filterArgs = [
('--namespace', {'action':'store', 'metavar':'NS', 'help':'only output log lines matching operations on NS.'}),
('--operation', {'action':'store', 'metavar':'OP', 'help':'only output log lines matching operations of type OP.'}),
('--thread', {'action':'store', 'help':'only output log lines of thread THREAD.'}),
('--pattern', {'action':'store', 'help':'only output log lines that query with the pattern PATTERN (queries, getmores, updates, removes)'})
]
def __init__(self, mlogfilter):
BaseFilter.__init__(self, mlogfilter)
self.namespace = None
self.operation = None
self.thread = None
self.pattern = None
if 'namespace' in self.mlogfilter.args and self.mlogfilter.args['namespace']:
self.namespace = self.mlogfilter.args['namespace']
self.active = True
if 'operation' in self.mlogfilter.args and self.mlogfilter.args['operation']:
self.operation = self.mlogfilter.args['operation']
self.active = True
if 'thread' in self.mlogfilter.args and self.mlogfilter.args['thread']:
self.thread = self.mlogfilter.args['thread']
self.active = True
if 'pattern' in self.mlogfilter.args and self.mlogfilter.args['pattern']:
self.pattern = json2pattern(self.mlogfilter.args['pattern'])
self.active = True
def accept(self, logevent):
# if several filters are active, all have to agree
res = False
if self.namespace and logevent.namespace != self.namespace:
return False
if self.operation and logevent.operation != self.operation:
return False
if self.thread and logevent.thread != self.thread:
return False
if self.pattern and logevent.pattern != self.pattern:
return False
return True
| corymintz/mtools | mtools/mlogfilter/filters/logline_filter.py | Python | apache-2.0 | 2,092 |
"""
BenchExec is a framework for reliable benchmarking.
This file is part of BenchExec.
Copyright (C) 2007-2015 Dirk Beyer
All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import benchexec.util as util
import benchexec.tools.template
import benchexec.result as result
class Tool(benchexec.tools.template.BaseTool):
def executable(self):
return util.find_executable('AProVE.sh')
def name(self):
return 'AProVE'
def determine_result(self, returncode, returnsignal, output, isTimeout):
output = '\n'.join(output)
if "YES" in output:
return result.RESULT_TRUE_PROP
elif "TRUE" in output:
return result.RESULT_TRUE_PROP
elif "FALSE" in output:
return result.RESULT_FALSE_TERMINATION
elif "NO" in output:
return result.RESULT_FALSE_TERMINATION
else:
return result.RESULT_UNKNOWN
return status
| bjowac/impara-benchexec | benchexec/tools/aprove.py | Python | apache-2.0 | 1,440 |
from django.contrib import admin
# Register your models here.
from .models import Posts
@admin.register(Posts)
class PostAdmin(admin.ModelAdmin):
list_display = ('id', 'title', 'date', 'str_edu_category', 'str_tags', 'post_category')
| ran777/edu_intell | posts/admin.py | Python | gpl-3.0 | 241 |
# -*- coding: utf-8 -*-
"""
***************************************************************************
BarPlot.py
---------------------
Date : January 2013
Copyright : (C) 2013 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'January 2013'
__copyright__ = '(C) 2013, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import plotly as plt
import plotly.graph_objs as go
import numpy as np
from qgis.core import (QgsProcessingParameterFeatureSource,
QgsProcessingParameterField,
QgsProcessingParameterFileDestination,
QgsProcessingOutputHtml)
from processing.algs.qgis.QgisAlgorithm import QgisAlgorithm
from processing.tools import vector
class PolarPlot(QgisAlgorithm):
INPUT = 'INPUT'
OUTPUT = 'OUTPUT'
NAME_FIELD = 'NAME_FIELD'
VALUE_FIELD = 'VALUE_FIELD'
def group(self):
return self.tr('Graphics')
def __init__(self):
super().__init__()
def initAlgorithm(self, config=None):
self.addParameter(QgsProcessingParameterFeatureSource(self.INPUT,
self.tr('Input layer')))
self.addParameter(QgsProcessingParameterField(self.NAME_FIELD,
self.tr('Category name field'), parentLayerParameterName=self.INPUT)) # FIXME unused?
self.addParameter(QgsProcessingParameterField(self.VALUE_FIELD,
self.tr('Value field'), parentLayerParameterName=self.INPUT))
self.addParameter(QgsProcessingParameterFileDestination(self.OUTPUT, self.tr('Polar plot'), self.tr('HTML files (*.html)')))
self.addOutput(QgsProcessingOutputHtml(self.OUTPUT, self.tr('Polar plot')))
def name(self):
return 'polarplot'
def displayName(self):
return self.tr('Polar plot')
def processAlgorithm(self, parameters, context, feedback):
source = self.parameterAsSource(parameters, self.INPUT, context)
namefieldname = self.parameterAsString(parameters, self.NAME_FIELD, context) # NOQA FIXME unused?
valuefieldname = self.parameterAsString(parameters, self.VALUE_FIELD, context)
output = self.parameterAsFileOutput(parameters, self.OUTPUT, context)
values = vector.values(source, valuefieldname)
data = [go.Area(r=values[valuefieldname],
t=np.degrees(np.arange(0.0, 2 * np.pi, 2 * np.pi / len(values[valuefieldname]))))]
plt.offline.plot(data, filename=output, auto_open=False)
return {self.OUTPUT: output}
| nirvn/QGIS | python/plugins/processing/algs/qgis/PolarPlot.py | Python | gpl-2.0 | 3,399 |
# Copyright 2017 Tecnativa - Vicent Cubells <vicent.cubells@tecnativa.com>
# Copyright 2018 Camptocamp SA - Julien Coux
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
from odoo.exceptions import UserError
from odoo.tests.common import SavepointCase
class TestStockSplitPicking(SavepointCase):
@classmethod
def setUpClass(cls):
super(TestStockSplitPicking, cls).setUpClass()
cls.src_location = cls.env.ref("stock.stock_location_stock")
cls.dest_location = cls.env.ref("stock.stock_location_customers")
cls.product = cls.env["product.product"].create({"name": "Test product"})
cls.partner = cls.env["res.partner"].create({"name": "Test partner"})
cls.picking = cls.env["stock.picking"].create(
{
"partner_id": cls.partner.id,
"picking_type_id": cls.env.ref("stock.picking_type_out").id,
"location_id": cls.src_location.id,
"location_dest_id": cls.dest_location.id,
}
)
cls.move = cls.env["stock.move"].create(
{
"name": "/",
"picking_id": cls.picking.id,
"product_id": cls.product.id,
"product_uom_qty": 10,
"product_uom": cls.product.uom_id.id,
"location_id": cls.src_location.id,
"location_dest_id": cls.dest_location.id,
}
)
def test_stock_split_picking(self):
# Picking state is draft
self.assertEqual(self.picking.state, "draft")
# We can't split a draft picking
with self.assertRaises(UserError):
self.picking.split_process()
# Confirm picking
self.picking.action_confirm()
# We can't split an unassigned picking
with self.assertRaises(UserError):
self.picking.split_process()
# We assign quantities in order to split
self.picking.action_assign()
move_line = self.env["stock.move.line"].search(
[("picking_id", "=", self.picking.id)], limit=1
)
move_line.qty_done = 4.0
# Split picking: 4 and 6
# import pdb; pdb.set_trace()
self.picking.split_process()
# We have a picking with 4 units in state assigned
self.assertAlmostEqual(move_line.qty_done, 4.0)
self.assertAlmostEqual(move_line.product_qty, 4.0)
self.assertAlmostEqual(move_line.product_uom_qty, 4.0)
self.assertAlmostEqual(self.move.quantity_done, 4.0)
self.assertAlmostEqual(self.move.product_qty, 4.0)
self.assertAlmostEqual(self.move.product_uom_qty, 4.0)
self.assertEqual(self.picking.state, "assigned")
# An another one with 6 units in state assigned
new_picking = self.env["stock.picking"].search(
[("backorder_id", "=", self.picking.id)], limit=1
)
move_line = self.env["stock.move.line"].search(
[("picking_id", "=", new_picking.id)], limit=1
)
self.assertAlmostEqual(move_line.qty_done, 0.0)
self.assertAlmostEqual(move_line.product_qty, 6.0)
self.assertAlmostEqual(move_line.product_uom_qty, 6.0)
self.assertAlmostEqual(new_picking.move_lines.quantity_done, 0.0)
self.assertAlmostEqual(new_picking.move_lines.product_qty, 6.0)
self.assertAlmostEqual(new_picking.move_lines.product_uom_qty, 6.0)
self.assertEqual(new_picking.state, "assigned")
def test_stock_split_picking_wizard_move(self):
self.move2 = self.move.copy()
self.assertEqual(self.move2.picking_id, self.picking)
wizard = (
self.env["stock.split.picking"]
.with_context(active_ids=self.picking.ids)
.create({"mode": "move"})
)
wizard.action_apply()
self.assertNotEqual(self.move2.picking_id, self.picking)
self.assertEqual(self.move.picking_id, self.picking)
def test_stock_split_picking_wizard_selection(self):
self.move2 = self.move.copy()
self.assertEqual(self.move2.picking_id, self.picking)
wizard = (
self.env["stock.split.picking"]
.with_context(active_ids=self.picking.ids)
.create({"mode": "selection", "move_ids": [(6, False, self.move2.ids)]})
)
wizard.action_apply()
self.assertNotEqual(self.move2.picking_id, self.picking)
self.assertEqual(self.move.picking_id, self.picking)
def test_stock_picking_split_off_moves(self):
with self.assertRaises(UserError):
# fails because we can't split off all lines
self.picking._split_off_moves(self.picking.move_lines)
with self.assertRaises(UserError):
# fails because we can't split cancelled pickings
self.picking.action_cancel()
self.picking._split_off_moves(self.picking.move_lines)
| OCA/stock-logistics-workflow | stock_split_picking/tests/test_stock_split_picking.py | Python | agpl-3.0 | 4,933 |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module is deprecated. Please use `airflow.providers.google.cloud.hooks.kubernetes_engine`."""
import warnings
from airflow.providers.google.cloud.hooks.kubernetes_engine import GKEHook
warnings.warn(
"This module is deprecated. Please use `airflow.providers.google.cloud.hooks.kubernetes_engine`",
DeprecationWarning,
stacklevel=2,
)
class GKEClusterHook(GKEHook):
"""This class is deprecated. Please use `airflow.providers.google.cloud.hooks.container.GKEHook`."""
def __init__(self, *args, **kwargs):
warnings.warn(
"This class is deprecated. Please use `airflow.providers.google.cloud.hooks.container.GKEHook`.",
DeprecationWarning,
stacklevel=2,
)
super().__init__(*args, **kwargs)
| airbnb/airflow | airflow/contrib/hooks/gcp_container_hook.py | Python | apache-2.0 | 1,567 |
# (c) 2015, Jonathan Davila <jdavila(at)ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
# USAGE: {{ lookup('hashi_vault', 'secret=secret/hello token=c975b780-d1be-8016-866b-01d0f9b688a5 url=http://myvault:8200')}}
#
# You can skip setting the url if you set the VAULT_ADDR environment variable
# or if you want it to default to localhost:8200
#
# NOTE: Due to a current limitation in the HVAC library there won't
# necessarily be an error if a bad endpoint is specified.
#
# Requires hvac library. Install with pip.
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
from ansible.errors import AnsibleError
from ansible.plugins.lookup import LookupBase
ANSIBLE_HASHI_VAULT_ADDR = 'http://127.0.0.1:8200'
if os.getenv('VAULT_ADDR') is not None:
ANSIBLE_HASHI_VAULT_ADDR = os.environ['VAULT_ADDR']
class HashiVault:
def __init__(self, **kwargs):
try:
import hvac
except ImportError:
AnsibleError("Please pip install hvac to use this module")
self.url = kwargs.pop('url')
self.secret = kwargs.pop('secret')
self.token = kwargs.pop('token')
self.client = hvac.Client(url=self.url, token=self.token)
if self.client.is_authenticated():
pass
else:
raise AnsibleError("Invalid Hashicorp Vault Token Specified")
def get(self):
data = self.client.read(self.secret)
if data is None:
raise AnsibleError("The secret %s doesn't seem to exist" % self.secret)
else:
return data['data']['value']
class LookupModule(LookupBase):
def run(self, terms, variables, **kwargs):
vault_args = terms[0].split(' ')
vault_dict = {}
ret = []
for param in vault_args:
key, value = param.split('=')
vault_dict[key] = value
vault_conn = HashiVault(**vault_dict)
for term in terms:
key = term.split()[0]
value = vault_conn.get()
ret.append(value)
return ret
| goozbach/ansible | lib/ansible/plugins/lookup/hashi_vault.py | Python | gpl-3.0 | 2,701 |
import chardet
from vint.ast.node_type import NodeType
from vint.ast.traversing import traverse, SKIP_CHILDREN
from vint.linting.level import Level
from vint.linting.policy.abstract_policy import AbstractPolicy
from vint.linting.policy_registry import register_policy
@register_policy
class ProhibitMissingScriptEncoding(AbstractPolicy):
def __init__(self):
super(ProhibitMissingScriptEncoding, self).__init__()
self.description = 'Use scriptencoding when multibyte char exists'
self.reference = ':help :scriptencoding'
self.level = Level.WARNING
self.has_scriptencoding = False
def listen_node_types(self):
return [NodeType.TOPLEVEL]
def is_valid(self, node, lint_context):
""" Whether the specified node is valid.
This policy prohibit scriptencoding missing when multibyte char exists.
"""
traverse(node, on_enter=self._check_scriptencoding)
if self.has_scriptencoding:
return True
return not self._check_script_has_multibyte_char(lint_context)
def _check_scriptencoding(self, node):
# TODO: Use BREAK when implemented
if self.has_scriptencoding:
return SKIP_CHILDREN
node_type = NodeType(node['type'])
if node_type is not NodeType.EXCMD:
return
self.has_scriptencoding = node['str'].startswith('scripte')
def _check_script_has_multibyte_char(self, lint_context):
# TODO: Use cache to make performance efficiency
with lint_context['path'].open(mode='br') as f:
byte_seq = f.read()
return len(byte_seq) and chardet.detect(byte_seq)['encoding'] != 'ascii'
| RianFuro/vint | vint/linting/policy/prohibit_missing_scriptencoding.py | Python | mit | 1,702 |
from orm import model
from orm import fields
from .contact import Contact
from .user import User
class Invoice(model.Model):
title = fields.CharField(max_length=200)
owner = fields.ForeignKeyField(User)
description = fields.CharField(max_length=400, blank=True)
contact = fields.ForeignKeyField(Contact, blank=True)
created = fields.DateTimeField()
invoice_id = fields.IntegerField()
ref = fields.CharField(max_length=200)
our_ref = fields.CharField(max_length=200, blank=True)
your_ref = fields.CharField(max_length=200, blank=True)
payment_type = fields.CharField(max_length=200)
due_date = fields.DateField()
reclamation_time = fields.IntegerField()
penalty_interest = fields.DecimalField()
info1 = fields.CharField(max_length=200, blank=True)
info2 = fields.CharField(max_length=200, blank=True)
status = fields.CharField(max_length=40)
products = fields.HasField('Product', 'invoice')
def __repr__(self):
return str(self.title)
class Product(model.Model):
owner = fields.ForeignKeyField(User)
invoice = fields.ForeignKeyField(Invoice)
name = fields.CharField(max_length=200)
price = fields.IntegerField()
count = fields.IntegerField()
discount = fields.IntegerField()
vat = fields.IntegerField()
def __repr__(self):
return str(self.name)
| theikkila/lopputili | app/models/invoice.py | Python | mit | 1,277 |
import sys
from pathlib import Path
JAR_PATH = Path(__file__).parent
if 'win32' == sys.platform:
JAVA = Path('/Program Files/Java/jdk1.8.0_40/bin/java.exe')
else:
JAVA = Path('/usr/bin/java')
def available():
return JAVA.is_file()
| koceg/gouda | gouda/java/java.py | Python | gpl-2.0 | 247 |
# ext/declarative/__init__.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
from .api import AbstractConcreteBase
from .api import as_declarative
from .api import ConcreteBase
from .api import declarative_base
from .api import DeclarativeMeta
from .api import declared_attr
from .api import DeferredReflection
from .api import has_inherited_table
from .api import instrument_declarative
from .api import synonym_for
__all__ = [
"declarative_base",
"synonym_for",
"has_inherited_table",
"instrument_declarative",
"declared_attr",
"as_declarative",
"ConcreteBase",
"AbstractConcreteBase",
"DeclarativeMeta",
"DeferredReflection",
]
| graingert/sqlalchemy | lib/sqlalchemy/ext/declarative/__init__.py | Python | mit | 844 |
####
#### Setup gross testing environment.
####
#### This currently includes the UI instance target and browser type
#### (FF vs PhantomJS).
####
import os
import time
from selenium import webdriver
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
###
### Simple (but somewhat excessive for the data parts) environment.
###
## Run this before anything else.
def before_all(context):
## Determine the server target. Default: http://beta.monarchinitiative.org.
context.target = 'http://beta.monarchinitiative.org'
if 'TARGET' in os.environ:
context.target = os.environ['TARGET']
if 'BROWSER' in os.environ and os.environ['BROWSER'] == 'phantomjs':
d = DesiredCapabilities.PHANTOMJS
d['loggingPrefs'] = {'browser': 'ALL', 'client': 'ALL', 'driver': 'ALL', 'performance': 'ALL', 'server': 'ALL'}
context.browser = webdriver.PhantomJS(desired_capabilities=d)
context.browser.desired_capabilities['loggingPrefs'] = {'browser': 'ALL', 'client': 'ALL', 'driver': 'ALL', 'performance': 'ALL', 'server': 'ALL'}
print("# Using PhantomJS webdriver")
else:
d = DesiredCapabilities.FIREFOX
lp = {'browser': 'SEVERE', 'client': 'OFF', 'driver': 'OFF', 'performance': 'OFF', 'server': 'OFF'}
d['loggingPrefs'] = lp
fp = webdriver.FirefoxProfile()
fp.set_preference('javascript.options.showInConsole', True)
fp.set_preference('browser.dom.window.dump.enabled', True)
fp.set_preference('devtools.chrome.enabled', True)
fp.set_preference('devtools.theme', 'dark')
fp.set_preference("devtools.webconsole.persistlog", True)
fp.set_preference("devtools.browserconsole.filter.jslog", True)
fp.set_preference("devtools.browserconsole.filter.jswarn", True)
fp.set_preference("devtools.browserconsole.filter.error", True)
fp.set_preference("devtools.browserconsole.filter.warn", True)
fp.set_preference("devtools.browserconsole.filter.info", True)
fp.set_preference("devtools.browserconsole.filter.log", True)
fp.set_preference("devtools.webconsole.filter.jslog", True)
fp.set_preference("devtools.webconsole.filter.jswarn", True)
fp.set_preference("devtools.webconsole.filter.error", True)
fp.set_preference("devtools.webconsole.filter.warn", True)
fp.set_preference("devtools.webconsole.filter.info", True)
fp.set_preference("devtools.webconsole.filter.log", True)
fp.set_preference("devtools.hud.loglimit.console", 5000)
fp.set_preference("devtools.hud.loglimit.exception", 5000)
# fp.set_preference("webdriver.log.driver", "DEBUG")
# fp.set_preference("webdriver.development", True)
# fp.set_preference("webdriver.firefox.useExisting", True)
fp.set_preference("webdriver.log.file", os.getcwd() + "/webdriver.log")
fp.set_preference("webdriver.firefox.logfile", os.getcwd() + "/firefox.log")
# fp.add_extension(extension='firebug-2.0.13-fx.xpi')
# fp.add_extension('consoleExport-0.5b5.xpi')
# fp.set_preference("extensions.firebug.currentVersion", "2.0.13") #Avoid startup screen
# fp.set_preference("extensions.firebug.console.enableSites", True)
# fp.set_preference("extensions.firebug.console.logLimit", 5000)
# fp.set_preference("extensions.firebug.net.enableSites", True)
# fp.set_preference("extensions.firebug.script.enableSites", True)
# fp.set_preference("extensions.firebug.allPagesActivation", "on")
# fp.set_preference("extensions.firebug.defaultPanelName", "console")
# fp.set_preference("extensions.firebug.framePosition", "detached")
# fp.set_preference('extensions.firebug.showFirstRunPage', False)
# fp.set_preference('extensions.firebug.delayLoad', False)
# fp.set_preference('extensions.firebug.showJSWarnings', True)
# fp.set_preference('extensions.firebug.showJSErrors', True)
# fp.set_preference('extensions.firebug.showCSSErrors', True)
# fp.set_preference('extensions.firebug.showStackTrace', True)
# fp.set_preference('extensions.firebug.consoleexport.active', True)
# fp.set_preference("extensions.firebug.consoleexport.defaultLogDir", os.getcwd());
# fp.set_preference("extensions.firebug.consoleexport.active", True);
# fp.set_preference("extensions.firebug.consoleexport.alwaysEnableAutoExport", True);
# fp.set_preference("extensions.firebug.consoleexport.autoExportToFile", True);
# fp.set_preference("extensions.firebug.consoleexport.autoExportToServer", True);
# # fp.set_preference("extensions.firebug.consoleexport.format", "xml");
# fp.set_preference("extensions.firebug.consoleexport.logFilePath", os.getcwd() + "/log.xml");
# # fp.set_preference("extensions.firebug.consoleexport.serverURL", "http://127.0.0.1:8000/log.php");
fp.update_preferences()
context.browser = webdriver.Firefox(capabilities=d, firefox_profile=fp)
# print("# Using Firefox webdriver. Make any adjustments. You have 15 seconds...")
# time.sleep(15)
#
# Set a 30 second implicit wait - http://selenium-python.readthedocs.org/en/latest/waits.html#implicit-waits
# Once set, the implicit wait is set for the life of the WebDriver object instance.
#
context.browser.set_window_size(2000, 1500)
context.browser.implicitly_wait(30) # seconds
## Do this after completing everything.
def after_all(context):
context.browser.quit()
pass
# Run this before each scenario
# This works around a problem with the FireFox driver where the window size periodically
# gets smaller and hides the navbar search field.
#
def before_scenario(context, scenario):
context.browser.set_window_size(2000, 1500)
time.sleep(1)
def after_scenario(context, scenario):
dump_log(context, scenario.name)
# time.sleep(20)
pass
def dump_log(context, scenarioName):
log = context.browser.get_log("browser")
print('')
print('--------- console.log for: "' + scenarioName + '"')
for l in log:
# if l['level'] != 'WARNING':
print(l['level'] + ': ' + l['message'])
print('')
###
### Working on a more complex run environment for the future.
###
# ## Run this before anything else.
# def before_all(context):
# ## Determine the server target. Default: http://tartini.crbs.ucsd.edu.
# context.target = 'http://tartini.crbs.ucsd.edu'
# if 'TARGET' in os.environ:
# context.target = os.environ['TARGET']
# ## Run this before anything else.
# def before_feature(context, feature):
# ## Get the browser we're going to use. Default: firefox.
# if 'ui' in feature.tags: # only spin up browser when doing ui work
# if 'BROWSER' in os.environ and os.environ['BROWSER'] == 'phantomjs':
# context.browser = webdriver.PhantomJS()
# else:
# context.browser = webdriver.Firefox()
# ## Do this after completing every feature
# def after_feature(context, feature):
# if 'ui' in feature.tags: # only spin up browser when doing ui work
# context.browser.quit()
# ## Do this after completing everything.
# def after_all(context):
# pass
| jmcmurry/monarch-app | tests/behave/environment.py | Python | bsd-3-clause | 7,314 |
# Copyright (c) 2015 RIPE NCC
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from __future__ import print_function
import sys
import itertools
try:
import ujson as json
except ImportError:
import json
from ripe.atlas.sagan import Result
from ..aggregators import RangeKeyAggregator, ValueKeyAggregator, aggregate
from ..helpers.rendering import SaganSet, Rendering
from ..helpers.validators import ArgumentType
from ..renderers import Renderer
from .base import Command as BaseCommand
class Command(BaseCommand):
NAME = "render"
DESCRIPTION = "Render the contents of an arbitrary file.\n\nExample:\n" \
" cat /my/file | ripe-atlas render\n"
AGGREGATORS = {
"country": ["probe.country_code", ValueKeyAggregator],
"rtt-median": [
"rtt_median",
RangeKeyAggregator,
[10, 20, 30, 40, 50, 100, 200, 300]
],
"status": ["probe.status", ValueKeyAggregator],
"asn_v4": ["probe.asn_v4", ValueKeyAggregator],
"asn_v6": ["probe.asn_v6", ValueKeyAggregator],
"prefix_v4": ["probe.prefix_v4", ValueKeyAggregator],
"prefix_v6": ["probe.prefix_v6", ValueKeyAggregator],
}
def __init__(self, *args, **kwargs):
BaseCommand.__init__(self, *args, **kwargs)
self.file = None
def add_arguments(self):
self.parser.add_argument(
"--renderer",
choices=Renderer.get_available(),
help="The renderer you want to use. If this isn't defined, an "
"appropriate renderer will be selected."
)
self.parser.add_argument(
"--probes",
type=ArgumentType.comma_separated_integers_or_file,
help="Either a comma-separated list of probe ids you want to see "
"exclusively, a path to a file containing probe ids (one on "
"each line), or \"-\" for standard input in the same format."
)
self.parser.add_argument(
"--from-file",
type=ArgumentType.path,
default="-",
help='The source of the data to be rendered. If nothing is '
'specified, we assume "-" or, standard in (the default).'
)
self.parser.add_argument(
"--aggregate-by",
type=str,
choices=self.AGGREGATORS.keys(),
action="append",
help="Tell the rendering engine to aggregate the results by the "
"selected option. Note that if you opt for aggregation, no "
"output will be generated until all results are received, and "
"if large data sets may explode your system."
)
def run(self):
using_regular_file = self.arguments.from_file != "-"
sample, source = self._get_sample_result_and_source(using_regular_file)
results = SaganSet(iterable=source, probes=self.arguments.probes)
if self.arguments.aggregate_by:
results = aggregate(results, self.get_aggregators())
renderer = Renderer.get_renderer(
self.arguments.renderer, Result.get(sample).type)()
Rendering(renderer=renderer, payload=results).render()
if using_regular_file:
self.file.close()
def get_aggregators(self):
"""
Return aggregators list based on user input
"""
aggregation_keys = []
for aggr_key in self.arguments.aggregate_by:
# Get class and aggregator key
aggregation_class = self.AGGREGATORS[aggr_key][1]
key = self.AGGREGATORS[aggr_key][0]
if aggr_key == "rtt":
# Get range for the aggregation
key_range = self.AGGREGATORS[aggr_key][2]
aggregation_keys.append(
aggregation_class(key=key, ranges=key_range)
)
else:
aggregation_keys.append(aggregation_class(key=key))
return aggregation_keys
def _get_sample_result_and_source(self, using_regular_file):
"""
We need to get the first result from the source in order to detect the
type. Additionally, if the source is actually one great big JSON list,
then we need to parse it so we iterate over the results since there's no
newline characters.
"""
self.file = sys.stdin
if using_regular_file:
self.file = open(self.arguments.from_file)
# Pop the first line off the source stack. This may very well be a Very
# Large String and cause a memory explosion, but we like to let our
# users shoot themselves in the foot.
sample = self.file.next()
# Re-attach the line back onto the iterable so we don't lose anything
source = itertools.chain([sample], self.file)
# In the case of the Very Large String, we parse out the JSON here
if sample.startswith("["):
source = json.loads("".join(source))
sample = source[0] # Reassign sample to an actual result
return sample, source
| danielquinn/ripe-atlas-tools | ripe/atlas/tools/commands/render.py | Python | gpl-3.0 | 5,730 |
import json, sys
from lxml import etree
from models import geo_coords, landmarks
from database import get_or_create
def insert_data(db):
d = etree.parse(open("monumentaltrees.xml", "r+")).getroot()
for x in list(d):
if x.tag == "m":
lat = float(x.attrib['lat'])
lng = float(x.attrib['lng'])
gc = geo_coords.GeoCoords()
geo, isFound = get_or_create(db.session, geo_coords.GeoCoords, latitude=lat, longitude=lng)
lp = landmarks.Landmark()
lp.name = x.attrib['t'].encode('ascii', 'xmlcharrefreplace')
lp.url = "http://www.monumentaltrees.com/" + x.attrib['u']
lp.geo = geo
lp.landmark_category = "tree"
db.session.add(lp)
db.session.commit()
| sellerlink/sellerlink | fixtures/create_monumentaltrees.py | Python | gpl-3.0 | 725 |
from mongoengine import *
import datetime
class ModReq(Document):
uid = SequenceField(unique=True)
server = StringField(required=True)
username = StringField(required=True)
request = StringField(required=True)
location = StringField(required=True)
status = StringField(required=True, choices=["open", "claimed", "closed"])
time = DateTimeField(required=True, default=datetime.datetime.utcnow)
elevate_group = StringField()
handled_by = StringField()
close_message = StringField()
close_time = DateTimeField()
def __repr__(self):
return self.uid
meta = {
'collection': 'modreq',
'indexed': ['uid']
}
| JunctionAt/JunctionWWW | models/modreq_model.py | Python | agpl-3.0 | 686 |
import math
import time
t1 = time.time()
# read the base & exp into a list
f = open('pb099_base_exp.txt','r')
bae= f.read().split('\n')
f.close()
def tonumber(p):
number = 0
for i in range(0,len(p)):
temp = ord(p[i])-48
number = number*10 + temp
return number
count = 0
def tobe(bae):
global count
count += 1
p = bae.split(',')
b = tonumber(p[0])
e = tonumber(p[1])
temp = [count,b,e,math.log10(b)*e]
return temp
listbe = []
for i in range(0,len(bae)):
listbe.append(tobe(bae[i]))
def quickSort(L, low, high):
i = low
j = high
if i >= j:
return L
key = L[i][:]
while i < j:
while i < j and L[j][3] >= key[3]:
j = j-1
L[i] = L[j][:]
while i < j and L[i][3] <= key[3]:
i = i+1
L[j] = L[i][:]
L[i] = key[:]
quickSort(L, low, i-1)
quickSort(L, j+1, high)
return L
listbe = quickSort(listbe,0,len(listbe)-1)
print(listbe[-1][0])
print("time:",time.time()-t1)
| Adamssss/projectEuler | Problem 001-150 Python/pb099.py | Python | mit | 1,103 |
# Generated by Django 2.0.2 on 2018-04-11 13:32
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('wagtailcore', '0040_page_draft_title'),
('wagtailforms', '0003_capitalizeverbose'),
('wagtailredirects', '0005_capitalizeverbose'),
('content', '0025_auto_20180406_1541'),
]
operations = [
migrations.RemoveField(
model_name='offerspage',
name='page_ptr',
),
migrations.RemoveField(
model_name='singleofferpage',
name='category',
),
migrations.RemoveField(
model_name='singleofferpage',
name='company_logo',
),
migrations.RemoveField(
model_name='singleofferpage',
name='page_ptr',
),
migrations.DeleteModel(
name='OfferCategory',
),
migrations.DeleteModel(
name='OffersPage',
),
migrations.DeleteModel(
name='SingleOfferPage',
),
]
| sussexstudent/falmer | falmer/content/migrations/0026_auto_20180411_1432.py | Python | mit | 1,068 |
#!/usr/bin/env python
# $Id: FileUtils.py,v 1.12 2005/11/02 22:26:07 tavis_rudd Exp $
"""File utitilies for Python:
Meta-Data
================================================================================
Author: Tavis Rudd <tavis@damnsimple.com>
License: This software is released for unlimited distribution under the
terms of the MIT license. See the LICENSE file.
Version: $Revision: 1.12 $
Start Date: 2001/09/26
Last Revision Date: $Date: 2005/11/02 22:26:07 $
"""
__author__ = "Tavis Rudd <tavis@damnsimple.com>"
__revision__ = "$Revision: 1.12 $"[11:-2]
from glob import glob
import os
from os import listdir
import os.path
import re
from types import StringType
from tempfile import mktemp
def _escapeRegexChars(txt,
escapeRE=re.compile(r'([\$\^\*\+\.\?\{\}\[\]\(\)\|\\])')):
return escapeRE.sub(r'\\\1' , txt)
def findFiles(*args, **kw):
"""Recursively find all the files matching a glob pattern.
This function is a wrapper around the FileFinder class. See its docstring
for details about the accepted arguments, etc."""
return FileFinder(*args, **kw).files()
def replaceStrInFiles(files, theStr, repl):
"""Replace all instances of 'theStr' with 'repl' for each file in the 'files'
list. Returns a dictionary with data about the matches found.
This is like string.replace() on a multi-file basis.
This function is a wrapper around the FindAndReplace class. See its
docstring for more details."""
pattern = _escapeRegexChars(theStr)
return FindAndReplace(files, pattern, repl).results()
def replaceRegexInFiles(files, pattern, repl):
"""Replace all instances of regex 'pattern' with 'repl' for each file in the
'files' list. Returns a dictionary with data about the matches found.
This is like re.sub on a multi-file basis.
This function is a wrapper around the FindAndReplace class. See its
docstring for more details."""
return FindAndReplace(files, pattern, repl).results()
##################################################
## CLASSES
class FileFinder:
"""Traverses a directory tree and finds all files in it that match one of
the specified glob patterns."""
def __init__(self, rootPath,
globPatterns=('*',),
ignoreBasenames=('CVS','.svn'),
ignoreDirs=(),
):
self._rootPath = rootPath
self._globPatterns = globPatterns
self._ignoreBasenames = ignoreBasenames
self._ignoreDirs = ignoreDirs
self._files = []
self.walkDirTree(rootPath)
def walkDirTree(self, dir='.',
listdir=os.listdir,
isdir=os.path.isdir,
join=os.path.join,
):
"""Recursively walk through a directory tree and find matching files."""
processDir = self.processDir
filterDir = self.filterDir
pendingDirs = [dir]
addDir = pendingDirs.append
getDir = pendingDirs.pop
while pendingDirs:
dir = getDir()
## process this dir
processDir(dir)
## and add sub-dirs
for baseName in listdir(dir):
fullPath = join(dir, baseName)
if isdir(fullPath):
if filterDir(baseName, fullPath):
addDir( fullPath )
def filterDir(self, baseName, fullPath):
"""A hook for filtering out certain dirs. """
return not (baseName in self._ignoreBasenames or
fullPath in self._ignoreDirs)
def processDir(self, dir, glob=glob):
extend = self._files.extend
for pattern in self._globPatterns:
extend( glob(os.path.join(dir, pattern)) )
def files(self):
return self._files
class _GenSubberFunc:
"""Converts a 'sub' string in the form that one feeds to re.sub (backrefs,
groups, etc.) into a function that can be used to do the substitutions in
the FindAndReplace class."""
backrefRE = re.compile(r'\\([1-9][0-9]*)')
groupRE = re.compile(r'\\g<([a-zA-Z_][a-zA-Z_]*)>')
def __init__(self, replaceStr):
self._src = replaceStr
self._pos = 0
self._codeChunks = []
self.parse()
def src(self):
return self._src
def pos(self):
return self._pos
def setPos(self, pos):
self._pos = pos
def atEnd(self):
return self._pos >= len(self._src)
def advance(self, offset=1):
self._pos += offset
def readTo(self, to, start=None):
if start == None:
start = self._pos
self._pos = to
if self.atEnd():
return self._src[start:]
else:
return self._src[start:to]
## match and get methods
def matchBackref(self):
return self.backrefRE.match(self.src(), self.pos())
def getBackref(self):
m = self.matchBackref()
self.setPos(m.end())
return m.group(1)
def matchGroup(self):
return self.groupRE.match(self.src(), self.pos())
def getGroup(self):
m = self.matchGroup()
self.setPos(m.end())
return m.group(1)
## main parse loop and the eat methods
def parse(self):
while not self.atEnd():
if self.matchBackref():
self.eatBackref()
elif self.matchGroup():
self.eatGroup()
else:
self.eatStrConst()
def eatStrConst(self):
startPos = self.pos()
while not self.atEnd():
if self.matchBackref() or self.matchGroup():
break
else:
self.advance()
strConst = self.readTo(self.pos(), start=startPos)
self.addChunk(repr(strConst))
def eatBackref(self):
self.addChunk( 'm.group(' + self.getBackref() + ')' )
def eatGroup(self):
self.addChunk( 'm.group("' + self.getGroup() + '")' )
def addChunk(self, chunk):
self._codeChunks.append(chunk)
## code wrapping methods
def codeBody(self):
return ', '.join(self._codeChunks)
def code(self):
return "def subber(m):\n\treturn ''.join([%s])\n" % (self.codeBody())
def subberFunc(self):
exec self.code()
return subber
class FindAndReplace:
"""Find and replace all instances of 'patternOrRE' with 'replacement' for
each file in the 'files' list. This is a multi-file version of re.sub().
'patternOrRE' can be a raw regex pattern or
a regex object as generated by the re module. 'replacement' can be any
string that would work with patternOrRE.sub(replacement, fileContents).
"""
def __init__(self, files, patternOrRE, replacement,
recordResults=True):
if type(patternOrRE) == StringType:
self._regex = re.compile(patternOrRE)
else:
self._regex = patternOrRE
if type(replacement) == StringType:
self._subber = _GenSubberFunc(replacement).subberFunc()
else:
self._subber = replacement
self._pattern = pattern = self._regex.pattern
self._files = files
self._results = {}
self._recordResults = recordResults
## see if we should use pgrep to do the file matching
self._usePgrep = False
if (os.popen3('pgrep')[2].read()).startswith('Usage:'):
## now check to make sure pgrep understands the pattern
tmpFile = mktemp()
open(tmpFile, 'w').write('#')
if not (os.popen3('pgrep "' + pattern + '" ' + tmpFile)[2].read()):
# it didn't print an error msg so we're ok
self._usePgrep = True
os.remove(tmpFile)
self._run()
def results(self):
return self._results
def _run(self):
regex = self._regex
subber = self._subDispatcher
usePgrep = self._usePgrep
pattern = self._pattern
for file in self._files:
if not os.path.isfile(file):
continue # skip dirs etc.
self._currFile = file
found = False
if locals().has_key('orig'):
del orig
if self._usePgrep:
if os.popen('pgrep "' + pattern + '" ' + file ).read():
found = True
else:
orig = open(file).read()
if regex.search(orig):
found = True
if found:
if not locals().has_key('orig'):
orig = open(file).read()
new = regex.sub(subber, orig)
open(file, 'w').write(new)
def _subDispatcher(self, match):
if self._recordResults:
if not self._results.has_key(self._currFile):
res = self._results[self._currFile] = {}
res['count'] = 0
res['matches'] = []
else:
res = self._results[self._currFile]
res['count'] += 1
res['matches'].append({'contents':match.group(),
'start':match.start(),
'end':match.end(),
}
)
return self._subber(match)
class SourceFileStats:
"""
"""
_fileStats = None
def __init__(self, files):
self._fileStats = stats = {}
for file in files:
stats[file] = self.getFileStats(file)
def rawStats(self):
return self._fileStats
def summary(self):
codeLines = 0
blankLines = 0
commentLines = 0
totalLines = 0
for fileStats in self.rawStats().values():
codeLines += fileStats['codeLines']
blankLines += fileStats['blankLines']
commentLines += fileStats['commentLines']
totalLines += fileStats['totalLines']
stats = {'codeLines':codeLines,
'blankLines':blankLines,
'commentLines':commentLines,
'totalLines':totalLines,
}
return stats
def printStats(self):
pass
def getFileStats(self, fileName):
codeLines = 0
blankLines = 0
commentLines = 0
commentLineRe = re.compile(r'\s#.*$')
blankLineRe = re.compile('\s$')
lines = open(fileName).read().splitlines()
totalLines = len(lines)
for line in lines:
if commentLineRe.match(line):
commentLines += 1
elif blankLineRe.match(line):
blankLines += 1
else:
codeLines += 1
stats = {'codeLines':codeLines,
'blankLines':blankLines,
'commentLines':commentLines,
'totalLines':totalLines,
}
return stats
| CymaticLabs/Unity3D.Amqp | lib/rabbitmq-dotnet-client-rabbitmq_v3_4_4/docs/pyle2-fcfcf7e/Cheetah/FileUtils.py | Python | mit | 11,266 |
# basictimerapp - a really simple timer application.
# This should be run using the command line:
# pythonwin /app demos\basictimerapp.py
import win32ui
import win32api
import win32con
import sys
from pywin.framework import app, cmdline, dlgappcore, cmdline
import timer
import time
import string
class TimerAppDialog(dlgappcore.AppDialog):
softspace=1
def __init__(self, appName = ""):
dlgappcore.AppDialog.__init__(self, win32ui.IDD_GENERAL_STATUS)
self.timerAppName = appName
self.argOff = 0
if len(self.timerAppName)==0:
if len(sys.argv)>1 and sys.argv[1][0]!='/':
self.timerAppName = sys.argv[1]
self.argOff = 1
def PreDoModal(self):
# sys.stderr = sys.stdout
pass
def ProcessArgs(self, args):
for arg in args:
if arg=="/now":
self.OnOK()
def OnInitDialog(self):
win32ui.SetProfileFileName('pytimer.ini')
self.title = win32ui.GetProfileVal(self.timerAppName, "Title", "Remote System Timer")
self.buildTimer = win32ui.GetProfileVal(self.timerAppName, "Timer", "EachMinuteIntervaler()")
self.doWork = win32ui.GetProfileVal(self.timerAppName, "Work", "DoDemoWork()")
# replace "\n" with real \n.
self.doWork = self.doWork.replace('\\n','\n')
dlgappcore.AppDialog.OnInitDialog(self)
self.SetWindowText(self.title)
self.prompt1 = self.GetDlgItem(win32ui.IDC_PROMPT1)
self.prompt2 = self.GetDlgItem(win32ui.IDC_PROMPT2)
self.prompt3 = self.GetDlgItem(win32ui.IDC_PROMPT3)
self.butOK = self.GetDlgItem(win32con.IDOK)
self.butCancel = self.GetDlgItem(win32con.IDCANCEL)
self.prompt1.SetWindowText("Python Timer App")
self.prompt2.SetWindowText("")
self.prompt3.SetWindowText("")
self.butOK.SetWindowText("Do it now")
self.butCancel.SetWindowText("Close")
self.timerManager = TimerManager(self)
self.ProcessArgs(sys.argv[self.argOff:])
self.timerManager.go()
return 1
def OnDestroy(self,msg):
dlgappcore.AppDialog.OnDestroy(self, msg)
self.timerManager.stop()
def OnOK(self):
# stop the timer, then restart after setting special boolean
self.timerManager.stop()
self.timerManager.bConnectNow = 1
self.timerManager.go()
return
# def OnCancel(self): default behaviour - cancel == close.
# return
class TimerManager:
def __init__(self, dlg):
self.dlg = dlg
self.timerId = None
self.intervaler = eval(self.dlg.buildTimer)
self.bConnectNow = 0
self.bHaveSetPrompt1 = 0
def CaptureOutput(self):
self.oldOut = sys.stdout
self.oldErr = sys.stderr
sys.stdout = sys.stderr = self
self.bHaveSetPrompt1 = 0
def ReleaseOutput(self):
sys.stdout = self.oldOut
sys.stderr = self.oldErr
def write(self, str):
s = str.strip()
if len(s):
if self.bHaveSetPrompt1:
dest = self.dlg.prompt3
else:
dest = self.dlg.prompt1
self.bHaveSetPrompt1 = 1
dest.SetWindowText(s)
def go(self):
self.OnTimer(None,None)
def stop(self):
if self.timerId: timer.kill_timer (self.timerId)
self.timerId = None
def OnTimer(self, id, timeVal):
if id: timer.kill_timer (id)
if self.intervaler.IsTime() or self.bConnectNow :
# do the work.
try:
self.dlg.SetWindowText(self.dlg.title + " - Working...")
self.dlg.butOK.EnableWindow(0)
self.dlg.butCancel.EnableWindow(0)
self.CaptureOutput()
try:
exec(self.dlg.doWork)
print "The last operation completed successfully."
except:
t, v, tb = sys.exc_info()
str = "Failed: %s: %s" % (t, repr(v))
print str
self.oldErr.write(str)
tb = None # Prevent cycle
finally:
self.ReleaseOutput()
self.dlg.butOK.EnableWindow()
self.dlg.butCancel.EnableWindow()
self.dlg.SetWindowText(self.dlg.title)
else:
now = time.time()
nextTime = self.intervaler.GetNextTime()
if nextTime:
timeDiffSeconds = nextTime - now
timeDiffMinutes = int(timeDiffSeconds / 60)
timeDiffSeconds = timeDiffSeconds % 60
timeDiffHours = int(timeDiffMinutes / 60)
timeDiffMinutes = timeDiffMinutes % 60
self.dlg.prompt1.SetWindowText("Next connection due in %02d:%02d:%02d" % (timeDiffHours,timeDiffMinutes,timeDiffSeconds))
self.timerId = timer.set_timer (self.intervaler.GetWakeupInterval(), self.OnTimer)
self.bConnectNow = 0
class TimerIntervaler:
def __init__(self):
self.nextTime = None
self.wakeUpInterval = 2000
def GetWakeupInterval(self):
return self.wakeUpInterval
def GetNextTime(self):
return self.nextTime
def IsTime(self):
now = time.time()
if self.nextTime is None:
self.nextTime = self.SetFirstTime(now)
ret = 0
if now >= self.nextTime:
ret = 1
self.nextTime = self.SetNextTime(self.nextTime, now)
# do the work.
return ret
class EachAnyIntervaler(TimerIntervaler):
def __init__(self, timeAt, timePos, timeAdd, wakeUpInterval = None):
TimerIntervaler.__init__(self)
self.timeAt = timeAt
self.timePos = timePos
self.timeAdd = timeAdd
if wakeUpInterval:
self.wakeUpInterval = wakeUpInterval
def SetFirstTime(self, now):
timeTup = time.localtime(now)
lst = []
for item in timeTup:
lst.append(item)
bAdd = timeTup[self.timePos] > self.timeAt
lst[self.timePos] = self.timeAt
for pos in range(self.timePos+1, 6):
lst[pos]=0
ret = time.mktime(tuple(lst))
if (bAdd):
ret = ret + self.timeAdd
return ret;
def SetNextTime(self, lastTime, now):
return lastTime + self.timeAdd
class EachMinuteIntervaler(EachAnyIntervaler):
def __init__(self, at=0):
EachAnyIntervaler.__init__(self, at, 5, 60, 2000)
class EachHourIntervaler(EachAnyIntervaler):
def __init__(self, at=0):
EachAnyIntervaler.__init__(self, at, 4, 3600, 10000)
class EachDayIntervaler(EachAnyIntervaler):
def __init__(self,at=0):
EachAnyIntervaler.__init__(self, at, 3, 86400, 10000)
class TimerDialogApp(dlgappcore.DialogApp):
def CreateDialog(self):
return TimerAppDialog()
def DoDemoWork():
print "Doing the work..."
print "About to connect"
win32api.MessageBeep(win32con.MB_ICONASTERISK)
win32api.Sleep(2000)
print "Doing something else..."
win32api.MessageBeep(win32con.MB_ICONEXCLAMATION)
win32api.Sleep(2000)
print "More work."
win32api.MessageBeep(win32con.MB_ICONHAND)
win32api.Sleep(2000)
print "The last bit."
win32api.MessageBeep(win32con.MB_OK)
win32api.Sleep(2000)
app = TimerDialogApp()
def t():
t = TimerAppDialog("Test Dialog")
t.DoModal()
return t
if __name__=='__main__':
import demoutils
demoutils.NeedApp()
| zhanqxun/cv_fish | pythonwin/pywin/Demos/app/basictimerapp.py | Python | apache-2.0 | 6,596 |
#!/usr/bin/python
from math import ceil
class OverlapException:
pass
class OutOfRackException:
pass
class RackFullException:
pass
unitsize = 43.5
class Rack(object):
def __init__(self, name, attr, units):
self._name = name
self.units = units
self.affinity = "bottom"
self._elements = {}
self.__attributes = attr
## size things
self.width = 445
self.height = unitsize * self.units
def addElement(self, position, element):
if position > self.units or position < 0:
raise OutOfRackException
if self._elements.has_key(position):
raise OverlapException
if element.units > 1:
for i in range(position+1, position + element.units):
if i > self.units or i < 0:
raise OutOfRackException
if self._elements.has_key(i):
raise OverlapException
# all ok, place the element
self._elements[position] = element
for i in range(position + 1, position + element.units):
self._elements[i] = None
def visit(self, visitor):
return visitor.visitRack(self)
def _get_network(self):
ports = 0
for e in self._elements.keys():
if self._elements[e] is not None:
ports += self._elements[e].network
return ports
network = property(_get_network)
def _get_units(self):
return self._units
def _set_units(self, value):
self._units = int(value)
units = property(_get_units, _set_units)
def _get_power(self):
n = 0
for e in self._elements.keys():
if self._elements[e] is not None:
n += self._elements[e].power
return n
power = property(_get_power)
def __iadd__(self, o):
if self.affinity == "bottom":
r = range(0, self.units - o.units + 1)
else:
r = range(self.units - o.units, -1, -1)
for pos in r:
if not self._elements.has_key(pos):
try:
for i in range(pos+1, pos + o.units):
if self._elements.has_key(i):
raise OverlapException
except OverlapException:
continue
# all ok, place element
for i in range(pos+1, pos+o.units):
self._elements[i] = None
self._elements[pos] = o
break
if o not in self._elements.values():
raise RackFullException
return self
def update(self, d):
self.__dict__.update(d)
def _get_depth(self):
if self.__attributes.has_key('depth'):
return self.__attributes['depth']
else:
return 0
def _set_depth(self, value):
self.__attributes['depth'] = value
depth = property(_get_depth, _set_depth)
class RackElement(object):
def __init__(self, units=1, name="rack element", network=1, power=1, cliplock=4, image="", notes=""):
self.units = units
self.name = name
self.network = network
self.power = power
self.cliplock = cliplock
self.image = image
self.notes = notes
self._elements = []
print "units on %s is %s" % (self.__class__.__name__, units)
self._baseline = units * unitsize
print "baseline on %s is %s" % (self.__class__.__name__, self._baseline)
# evil hack
self.gap = 0
def addElement(self, element):
self._elements.append(element)
def __iadd__(self, o):
self._elements.append(o)
return self
def visit(self, visitor):
return visitor.visitRackElement(self)
def _get_units(self):
h = 0
for e in self._elements:
if e.height > h:
h = e.height
if self.__class__.__name__[:5] == "Shelf":
h += self._baseline
u = int(ceil(h / unitsize))
if self.__class__.__name__[:5] != "Shelf":
u += self._units
# evil hack
u -= self.gap
return u
def _set_units(self, units):
self._units = int(units)
self._baseline = self._units * unitsize
units = property(_get_units, _set_units)
def _get_network(self):
return self.__network
def _set_network(self, value):
self.__network = int(value)
network = property(_get_network, _set_network)
def _get_power(self):
return self.__power
def _set_power(self, value):
self.__power = int(value)
power = property(_get_power, _set_power)
def visit(self, visitor):
return getattr(visitor, 'visit%s' % (self.__class__.__name__,))(self)
class Rackmount(RackElement):
def __init__(self, units=1, name="rackmount", network=1, power=1, cliplock=4, image="", notes=""):
RackElement.__init__(self, units, name, network, power, cliplock, image, notes)
def visit(self, visitor):
return visitor.visitRackmount(self)
class APC(RackElement):
def __init__(self, units=1, name="APC", network=1, power=1, cliplock=4, image=""):
RackElement.__init__(self, units, name, network, power, cliplock, image)
def visit(self, visitor):
return visitor.visitAPC(self)
class PatchPanel(RackElement):
def __init__(self, units=1, name = "patch panel", network=0, power=0, cliplock=4):
RackElement.__init__(self, units, name, network, power, cliplock)
def visit(self, visitor):
return visitor.visitPatchPanel(self)
class CableManagement(RackElement):
def __init__(self, units=1, name = "cable management", network=0, power=0, cliplock=4):
RackElement.__init__(self, units, name, network, power, cliplock)
def visit(self, visitor):
return visitor.visitCableManagement(self)
class Gap(RackElement):
def __init__(self, units=1, name="gap"):
RackElement.__init__(self, units, name, 0, 0, 0)
self.units = units
def visit(self, visitor):
return visitor.visitGap(self)
def _get_units(self):
return self._units
def _set_units(self, units):
self._units = int(units)
units = property(_get_units, _set_units)
class Switch(RackElement):
def __init__(self, units=1, name="switch", network=1, power=1, cliplock=4, image="", notes=""):
RackElement.__init__(self,
units,
name,
network,
power,
cliplock,
image,
notes)
def visit(self, visitor):
return visitor.visitSwitch(self)
class Shelf(RackElement):
def __init__(self, units=1, name = "shelf", network=0, power=0, cliplock=4, gap=0, notes=""):
RackElement.__init__(self, units, name, network, power, cliplock, notes)
self._baseline = 43.5
self._bottomline = 0
self._bracketunits = 1
# hack to allow us to tell the builder if the elem above it has
# enough space to allow things to go into its space
self.gap = gap
def visit(self, visitor):
return visitor.visitShelf(self)
def _get_network(self):
n = self.__network
for e in self._elements:
n += e.network
return n
def _set_network(self, value):
self.__network = int(value)
network = property(_get_network, _set_network)
def _get_power(self):
n = self.__power
for e in self._elements:
n += e.power
return n
def _set_power(self, value):
self.__power = int(value)
power = property(_get_power, _set_power)
def _get_gap(self):
return self.__gap
def _set_gap(self, value):
self.__gap = int(value)
gap = property(_get_gap, _set_gap)
# def _get_units(self):
# h = 0
# for e in self._elements:
# if e.height > h:
# h = e.height
# h += self._baseline
# u = int(ceil(h/unitsize))
# u -= self.gap
# return u
# def _set_units(self, units):
# pass
# return
class ShelfThin(Shelf):
def __init__(self, units=1, name="Thin Black Shelf", network=0, power=0, cliplock=4, gap=0, notes=""):
Shelf.__init__(self, units, name, network, power, cliplock, gap, notes)
self._baseline=11
self._bottomline=0
self._bracketunits=0.3
class Shelf1RU(Shelf):
def __init__(self, units=1, name = "1RU shelf", network=0, power=0, cliplock=4, gap=0, notes=""):
Shelf.__init__(self, units, name, network, power, cliplock, gap, notes)
self._baseline = 35
self._bottomline = 10
self._bracketunits = 1
def visit(self, visitor):
return visitor.visitShelf1RU(self)
class Shelf2U(Shelf):
def __init__(self, units=2, name = "thin shelf w/ 30kg rating", network=0, power=0, cliplock=0, gap=0, notes=""):
Shelf.__init__(self, units, name, network, power, cliplock, gap, notes)
self._baseline = 0
self._bottomline = -15
self._bracketunits = 2
def visit(self, visitor):
return visitor.visitShelf2U(self)
class Shelf1a(Shelf):
def __init__(self, units=1, name="1U shelf",network=0,power=0,cliplock=0,gap=0, notes=""):
Shelf.__init__(self, units, name, network, power, cliplock, gap, notes)
self._baseline = 28.5
self._bottomline = 10
self._bracketunits = 1
def visit(self, visitor):
return visitor.visitShelf1a(self)
class ShelfElement(object):
def __init__(self, height=0, width=0, name="shelf element", network=1, power=1, cliplock=0, image="", notes=""):
self.width = width
self.height = height
self._name = name
self.network = network
self.power = power
self.cliplock = cliplock
self.image = image
self.notes = notes
def visit(self, visitor):
return visitor.visitShelfElement(self)
def _get_height(self):
return self.__height
def _set_height(self, value):
self.__height = float(value)
height = property(_get_height, _set_height)
def _get_width(self):
return self.__width
def _set_width(self, value):
self.__width = float(value)
width = property(_get_width, _set_width)
def _get_network(self):
return self.__network
def _set_network(self, value):
self.__network = int(value)
network = property(_get_network, _set_network)
def _get_power(self):
return self.__power
def _set_power(self, value):
self.__power = int(value)
power = property(_get_power, _set_power)
class Box(ShelfElement):
def __init__(self, height=0, width=0, name = "box", network=1, power=1, cliplock=0, image="", notes=""):
ShelfElement.__init__(self, height, width, name, network, power, cliplock, image, notes)
def visit(self, visitor):
return visitor.visitBox(self)
class RackArray:
def __init__(self):
self._elements = []
def addElement(self, rack):
self._elements.append(rack)
def visit(self, visitor):
return visitor.visitRackArray(self)
class RackVisitor:
"""Base class for visitors to inherit from"""
def __init__(self):
pass
def visit(self, ast):
pass
def visitRackArray(self, ast):
pass
def visitRack(self, ast):
pass
def visitRackElement(self, ast):
pass
def visitRackmount(self, ast):
pass
def visitPatchPanel(self, ast):
pass
def visitCableManagement(self, ast):
pass
def visitShelf(self, ast):
pass
def visitShelf1RU(self, ast):
pass
def visitShelf2U(self, ast):
pass
def visitShelfElement(self, ast):
pass
def visitBox(self, ast):
pass
| jaqx0r/fengshui | rack.py | Python | gpl-2.0 | 10,254 |
"""
Set up: Works with python 2 or python 3 version miniconda(out of box)
Usage: take an input dictionary/json ( all valid inputs)
return an dictionary/json ( with one value replaced by string
from the xss/sql string file)
Note: 'yield' is heavily used, helps to separate test data creation logic from use of it
__maintainer__: debadityamohankudo+github at gmail dot com
"""
import pickle
import os
import time
import copy
import codecs
import json
import hashlib
class Utils(object):
"""docstring for Utils"""
def __init__(self):
pass
"""
def postdata_generator_with_insecure_values(self, filename, input_dict, specific_params=None):
parameters_list = specific_params if specific_params is not None else input_dict.keys()
for parameter in parameters_list:
print('-' * 60)
print('parameter targeted : {param}'.format(param=parameter))
print('-' * 60)
for value in generate_insecure_strings(filename):
print('-' * 20)
print('value is:{v}'.format(v=value))
print('-' * 20)
output_dict = input_dict.copy()
output_dict[parameter] = value
yield output_dict"""
def generate_insecure_strings(self, mal_data):
# yield is for lazy binding -> iterator pattern
# http://stackoverflow.com/questions/2223882/whats-different-between-utf-8-and-utf-8-without-bom
# encoding used to remove BOM char in mal input issue
if isinstance(mal_data, str):
if os.path.isfile(mal_data):
for line in codecs.open(mal_data, 'r').readlines():
data = line.rstrip('\n').rstrip('\r')
if data != '':
#print(data) # dotn delete this works
yield data
else:
for data in mal_data:
yield data
def write_details_to_file_ee(self, filename, *args):
for arg in args:
with open(filename, 'a') as f:
if isinstance(arg, (list, dict)):
json.dump(arg, f, ensure_ascii=False)
else:
f.write(str(arg))
def parse_json_get_items(self, a_object,
str_malicious=None,
key=None,
get_item=True,
set_item=False):
''' this function does two operations
1. When get_item is true -> gets all key value pairs in self.list_k,
2. When set_item is true -> sets the value for one key to malicious
3. When there are duplicate keys like zip, try giving unique values
4. What is the use of key parameter: key is to be supplied from the temp list_k
'''
if not hasattr(self, 'done_set_item'): # dont delete this: used while reading json
self.done_set_item = False
if not hasattr(self, 'list_k'):
self.list_k = [] # stores all key-val pairs - one time execution
if not self.done_set_item: # if set quit the function
if isinstance(a_object, list):
for item in a_object:
if isinstance(item, (list, dict)): # can list appear in list?
self.parse_json_get_items(a_object=item,
str_malicious=str_malicious,
key=key,
get_item=get_item,
set_item=set_item)
else:
self.parse_json_get_items(a_object=(item, a_object),
str_malicious=str_malicious,
key=key,
get_item=get_item,
set_item=set_item)
elif isinstance(a_object, dict):
for item in a_object:
if isinstance(a_object[item], (dict, list)):
self.parse_json_get_items(a_object=a_object[item],
str_malicious=str_malicious,
key=key,
get_item=get_item,
set_item=set_item)
else:
self.parse_json_get_items(a_object=(item, a_object),
str_malicious=str_malicious,
key=key,
get_item=get_item,
set_item=set_item)
elif isinstance(a_object, tuple): # each tuple is key, value, parent dict
if True:
hashobj = hashlib.md5()
hashobj.update(json.dumps(a_object).encode('utf-8'))
hashobj.update(str(a_object[0]).encode('utf-8'))
hash_key1 = hashobj.hexdigest()
if get_item is True:
if hash_key1 not in self.list_k:
self.list_k.append(hash_key1)
elif set_item is True:
if key == hash_key1 and isinstance(a_object[1], dict):
a_object[1][a_object[0]] = str_malicious
self.done_set_item = True
if key == hash_key1 and isinstance(a_object[1], list):
a_object[1][a_object[1].index(a_object[0])] = str_malicious
self.done_set_item = True
else:
pass
def parse_json_set_items(self, a_object,
str_malicious=None,
key=None,
get_item=False,
set_item=True):
self.parse_json_get_items(a_object=a_object,
str_malicious=str_malicious,
key=key,
get_item=False,
set_item=True)
def generate_testdata_with_malicious_str_ee(self, a_json_list, str_malicious):
self.parse_json_get_items(a_json_list) # fills the self.list_k with unique hash values
for key in self.list_k:
self.done_set_item = False # reset the value
input_json = copy.deepcopy(a_json_list) #http://stackoverflow.com/questions/184643/what-is-the-best-way-to-copy-a-list
self.parse_json_set_items(input_json, str_malicious, key, get_item=False, set_item=True)
yield input_json, key
def postdata_generator_with_insecure_values_ee(self, a_json_list, mal_data):
for value in self.generate_insecure_strings(mal_data):
for td, key in self.generate_testdata_with_malicious_str_ee(a_json_list, value):
yield td, value, key
def postdata_generator_with_insecure_values_GET_req_ee(self, api_url_get, mal_data, target_param):
for value in self.generate_insecure_strings(mal_data):
if target_param != []:
for param in target_param:
yield value, api_url_get.replace(param, value)
class detection(object):
"""docstring for detection"""
def __init__(self):
pass
def detect_in_response(self, response_obj,
http_status_codes=[],
resp_contains=None,
resp_time_more_than=None, #: in seconds
result_prefix='PASS'):
'''result_prefix - allowed values 'PASS' OR 'FAIL'
'''
checks = set()
if http_status_code:
checks.add(response_obj.status_code in http_status_codes)
if resp_contains:
checks.add(resp_contains in response_obj.text)
if resp_time_more_than:
checks.add(response_obj.elapsed_time > resp_time_more_than)
if False in checks:
return ['PASS', 'FAIL'].remove(result_prefix.upper())[0]
else:
return result_prefix
class logParsing(object):
def __init__(self):
pass
def parse(self, log_file):
now = time.ctime().replace(':','_')
pass_log = open('pass' + now, 'w')
fail_log = open('fail' + now, 'w')
other = open('other' + now, 'w')
with open(log_file, 'r') as f:
for line in f.readlines():
if 'PASS-result:' in line :
pass_log.write(line)
elif 'FAIL-result:' in line:
fail_log.write(line)
else:
other.write(line)
pass_log.close()
fail_log.close()
other.close()
if __name__ == '__main__':
######################################################################
a = [{
"partnerOrderId": ["xxxxxx", 1, False],
"productType": "hujhuuu",
"csr": "dgdghdfh",
"serverType": "Apache",
"validityPeriodDays": 365,
"authType": "DNS",
"domain": {
"cn": "sfs.net",
"sans": None
},
"org": {
"orgName": "xxxxxx",
"orgUnit": "Eng",
"address": {
"addressLine1": "4201 norwalk dr1",
"addressLine2": "",
"addressLine3": "",
"phoneNumber": "",
"city": "san jose",
"state": "$california$",
"country": "us",
"zip": "95129"}
}
,
"certTransparency":{
"ctLogging":False
},
"signatureAlgorithm": "sha256WithRSAEncryption",
"certChainType": "MIXED",
"locale": None
}
]
######################################################################
#: UNIT TESTING
if __name__ == '__main__':
u = Utils() # create instance
mal_source = ['abcccccccccccc']
for postdata, val, key in u.postdata_generator_with_insecure_values_ee(a, mal_source):
print(postdata)
| dmohankudo/APIFuzzing | UtilsLibFuzzing.py | Python | apache-2.0 | 10,185 |
import numpy as np
def makeRankingForClass(data):
index=0
tmp = []
ret = []
for elem in data:
tmp.append((elem,index))
index+=1
tmp.sort(key=lambda tup: tup[0], reverse=True)
for elem in tmp:
ret.append(elem[1])
return ret
def makeRankingsForModel(probabilitiesVector):
nClasses = probabilitiesVector.shape[1] #number of Columns is equal to the number of classes
ret = {}
for classIndex in range(nClasses):
classData = probabilitiesVector[:,classIndex]
rankClass = makeRankingForClass(classData)
ret.update( {classIndex: rankClass} )
return ret
def makeRankings(probabilitiesDict):
ret = {}
for index, probs in probabilitiesDict.items():
rankingsForModel = makeRankingsForModel(probs)
ret.update( {index: rankingsForModel } )
return ret | rsboos/DistributedClassifier | src/rankings.py | Python | gpl-3.0 | 769 |
# -*- coding: utf-8 -*-
import sys
import types
from .env import ISIS_VERSION
from .isiscommand import Isis
class ModuleWrapper(Isis, types.ModuleType):
def __init__(self, self_module, **kwargs):
# this is super ugly to have to copy attributes like this,
# but it seems to be the only way to make reload() behave
# nicely. if i make these attributes dynamic lookups in
# __getattr__, reload sometimes chokes in weird ways...
for attr in ['__builtins__', '__doc__', '__name__', '__package__']:
setattr(self, attr, getattr(self_module, attr, None))
# python 3.2 (2.7 and 3.3 work fine) breaks on osx (not ubuntu)
# if we set this to None. and 3.3 needs a value for __path__
self.__path__ = []
self.__all__ = []
self.__self_module = self_module
super(ModuleWrapper, self).__init__(**kwargs)
def __getattr__(self, name):
try:
return getattr(self.__self_module, name)
except AttributeError:
return super(Isis, self).__getattr__(name)
def _add_command(self, name, cmd):
super(ModuleWrapper, self)._add_command(name, cmd)
self.__all__.append(name)
sys.modules[__name__] = ModuleWrapper(
self_module=sys.modules[__name__],
strict=(ISIS_VERSION is not None),
)
| wtolson/pysis | pysis/isis.py | Python | bsd-3-clause | 1,339 |
# Python Art - Twitter Text Art
#
# This code generates Python Turtle text from a Twitter feed
#
# The project has been inspired and helped through lots of examples and questions on forums or websites including:
# http://stackoverflow.com/questions/743806/split-string-into-a-list-in-python
# http://stackoverflow.com/questions/17371652/tweepy-twitter-api-not-returning-all-search-results?rq=1
# http://stackoverflow.com/questions/15141031/python-turtle-draw-text-with-on-screen-with-larger-font
# http://marcobonzanini.com/2015/03/02/mining-twitter-data-with-python-part-1/
#
# Thanks to these programmers!
#
# CC0 Ian Simpson, 2nd April 2016 @familysimpson
import tweepy
import turtle
import random
Ckey = '...' # Put your own credentials here
Csec = '...'
Akey = '...'
Asec = '...'
lstTweet = []
def processTweets(tweet):
global lstTweet
# split string into individual words
tmpWords = tweet.split()
# append to lstTweet
for word in tmpWords:
lstTweet.append(word)
def sortTweets():
global lstTweet
lstTweet.sort()
def purgeTweets():
# remove any tweets that start with a non alphabetic character e.g. hashtags, mentions, etc.
global lstTweet
tmpTweet = []
for words in lstTweet:
if (ord(words[0])<65) or (ord(words[0])>122) or (words[0:4]=="http"):
print(ord(words[0]))
else:
tmpTweet.append(words)
lstTweet = tmpTweet
auth = tweepy.OAuthHandler(Ckey, Csec)
auth.set_access_token(Akey, Asec)
twapi = tweepy.API(auth)
for status in tweepy.Cursor(twapi.home_timeline).items(10):
# Process a single status
processTweets(status.text)
sortTweets()
purgeTweets()
# Another alternative is:
#for tweet in tweepy.Cursor(twapi.search,
# q="#python",
# count=100,
# result_type="recent",
# include_entities=True,
# lang="en").items(10):
# processTweets(tweet.text)
wn = turtle.Screen()
w = wn.window_width()
h = wn.window_height()
t1 = turtle.Turtle()
for word in lstTweet:
t1.color(192/255.,192/255.,192/255.) # silver lines
t1.goto(random.randrange(int(-(w/2)),int(w/2)), random.randrange(int(-(h/2)),int(h/2)))
t1.color(random.randrange(0,255)/255.,random.randrange(0,255)/255.,random.randrange(0,255)/255.)
fontsize = random.randrange(8,32)
t1.write(word, False, font=("Palatino", fontsize, "normal"))
wn.exitonclick()
| familysimpson/PythonArt | TwitterTextArt-github.py | Python | cc0-1.0 | 2,497 |
# -*- coding: utf-8 -*-
from __future__ import division, absolute_import, print_function, unicode_literals
from oucfeed.crawler import util
from oucfeed.crawler.newsspider import NewsSpider
class Spider(NewsSpider):
"""医药学院
这个网站的内容页链接尾部有一些奇怪的东西
党团相关的内容在另一个网站 http://222.195.158.131/yiyaodtgz/
"""
name = "院系/医药"
list_urls = [
"http://www2.ouc.edu.cn/yiyao/newsmore.asp?bigclassname=%CD%A8%D6%AA%B9%AB%B8%E6",
"http://www2.ouc.edu.cn/yiyao/newsmore.asp?bigclassname=%D1%A7%D4%BA%D0%C2%CE%C5",
"http://www2.ouc.edu.cn/yiyao/newsmore.asp?bigclassname=%BF%C6%D1%D0%B6%AF%CC%AC",
"http://www2.ouc.edu.cn/yiyao/newsmore.asp?bigclassname=%BD%CC%D1%A7%B9%A4%D7%F7",
"http://www2.ouc.edu.cn/yiyao/newsmore.asp?bigclassname=%D1%A7%CA%F5%BB%E1%D2%E9",
"http://www2.ouc.edu.cn/yiyao/newsmore.asp?bigclassname=%BA%CF%D7%F7%BD%BB%C1%F7",
"http://www2.ouc.edu.cn/yiyao/newsmore.asp?bigclassname=%D5%D0%C6%B8%D0%C5%CF%A2",
#"http://www2.ouc.edu.cn/yiyao/newsmore.asp?bigclassname=%D1%D0%BE%BF%B3%C9%B9%FB",
]
list_extract_scope = "//table[@width='650']"
list_extract_field = {
'link': ".//@href",
'datetime': ".//td[@width='15%']/text()",
'category': "//span[@class='STYLE2']/text()",
'title': ".//a/text()",
}
item_url_pattern = r"http://www2.ouc.edu.cn/yiyao/news\.asp"
item_extract_scope = "//table[@width='630']"
item_extract_field = {
'title': ".//b[1]/text()",
'content': ".//td[2]",
}
datetime_format = "%Y-%m-%d"
| D6C92FE5/oucfeed.crawler | oucfeed/crawler/spiders/yuanxi_yi_yao.py | Python | mit | 1,670 |
from urllib import request
import re
from bs4 import BeautifulSoup
# Search google, match links by regex, return the links, integration functions get names from links
"""
Function to return links from google search
https://github.com/aviaryan/pythons/blob/master/Others/GoogleSearchLinks.py
"""
def googleSearchLinks(search, re_match = None):
name = search
name = name.replace(' ','+')
url = 'http://www.google.com/search?q=' + name
req = request.Request(url, headers={'User-Agent' : "foobar"})
response = request.urlopen(req)
html = response.read()
soup = BeautifulSoup(html.decode(errors='replace'), 'lxml') # phast
links = []
for h3 in soup.find_all('h3'):
link = h3.a['href']
link = re.sub(r'^.*?=', '', link, count=1) # prefixed over links \url=q?
link = re.sub(r'\&sa.*$', '', link, count=1) # suffixed google things
link = re.sub(r'\%.*$', '', link) # NOT SAFE
if re_match is not None:
if re.match(re_match, link, flags=re.IGNORECASE) is None:
continue
links.append(link) # link
#print(h3.get_text()) # text
return links | iiitv/hackathon-fullstack-server | fullstackserver/api/integrations/googlesearch.py | Python | apache-2.0 | 1,060 |
# -*- coding: utf-8 -*-
"""
productporter.utils.helper
~~~~~~~~~~~~~~~~~~~~
A module that makes creating data more easily
:copyright: (c) 2014 by the ProductPorter Team.
:license: BSD, see LICENSE for more details.
"""
import datetime, time
from markdown2 import markdown as render_markdown
from flask import current_app
from flask.ext.themes2 import render_theme_template
from flask import render_template as flask_render_template
from productporter.extensions import db
from productporter.user.models import User, Group
from productporter.product.phapi import ProductHuntAPI
from productporter.product.models import Product, Tag
from productporter.configs.default import porter_config
def create_default_groups():
"""
This will create the 5 default groups
"""
from productporter.fixtures.groups import fixture
result = []
for key, value in fixture.items():
group = Group(name=key)
for k, v in value.items():
setattr(group, k, v)
group.save()
result.append(group)
return result
def create_default_tags():
"""
This will create default tags
"""
from productporter.fixtures.tags import fixture
result = []
for key, value in fixture.items():
tag = Tag(name=key)
for k, v in value.items():
setattr(tag, k, v)
tag.save()
result.append(tag)
return result
def create_admin_user(username, password, email):
"""
Creates the administrator user
"""
admin_group = Group.query.filter_by(admin=True).first()
user = User()
user.username = username
user.password = password
user.email = email
user.primary_group_id = admin_group.id
user.save()
def pull_and_save_posts(day=None):
"""
Pull and save posts to database
"""
api = ProductHuntAPI()
posts = api.posts(day)
for jsondata in posts:
pi = Product.from_json(jsondata)
pi.save()
return len(posts)
def render_template(template, **context):
"""
A helper function that uses the `render_theme_template` function
without needing to edit all the views
"""
theme = current_app.config['DEFAULT_THEME']
return render_theme_template(theme, template, **context)
def render_markup(text):
"""Renders the given text as markdown
:param text: The text to be rendered
"""
if text is None:
text = ""
return render_markdown(text, extras=['tables'])
def query_products(spec_day=None):
""" get all the products of the day """
day = spec_day
if not day:
day = format_date(datetime.date.today())
posts = Product.query.filter(Product.date==day).\
order_by(Product.votes_count.desc()).all()
# when not specific a day and the content is empty, we show yesterday's data
if not spec_day and len(posts) == 0:
delta = datetime.timedelta(days=-1)
d = datetime.date.today() + delta
day = format_date(d)
posts = Product.query.filter(Product.date==day).\
order_by(Product.votes_count.desc()).all()
# when spec_day is some old date and data is empty, we pull from PH server
if spec_day and len(posts) == 0:
today = datetime.date.today()
ymd = spec_day.split('-')
spec_date = datetime.date(int(ymd[0]), int(ymd[1]), int(ymd[2]))
if spec_date < today:
day = spec_day
pull_and_save_posts(day)
posts = Product.query.filter(Product.date==day).\
order_by(Product.votes_count.desc()).all()
return day, posts
def query_top_voted_products(days_ago=2, limit=10):
""" query to voted products days ago """
delta = datetime.timedelta(days=-days_ago)
d2 = datetime.date.today()
d1 = d2 + delta
return Product.query.filter(Product.date.between(d1, d2)).\
order_by(Product.votes_count.desc()).limit(limit).offset(0).all()
def query_search_products(keyword, limit=10):
""" search product in product's name and tagline """
k = '%%%s%%' % (keyword)
return Product.query.filter(db.or_(Product.name.like(k), \
Product.tagline.like(k))).order_by(Product.votes_count.desc()).\
limit(limit).offset(0).all()
def format_date(d):
""" format a datetime.date object to string """
return '%04d-%02d-%02d' % (d.year, d.month, d.day)
def root_url_prefix(app, prefix_key):
""" return the url prefix """
return app.config["ROOT_URL_PREFIX"] + app.config[prefix_key]
def send_reset_token(user, token):
send_mail(
subject="Reset password ",
recipient=user.email,
body=flask_render_template(
"user/reset_password_mail.html",
user=user,
token=token
),
subtype = "html",
)
def send_mail(subject, recipient, body, subtype='plain', as_attachment=False):
""" send mail """
import smtplib
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
sender = current_app.config["MAIL_SENDER"]
smtpserver = current_app.config["MAIL_SERVER"]
username = current_app.config["MAIL_USERNAME"]
password = current_app.config["MAIL_PASSWORD"]
# attachment
if as_attachment:
msgroot = MIMEMultipart('related')
msgroot['Subject'] = subject
att = MIMEText(body, 'base64', 'utf-8')
att["Content-Type"] = 'text/plain'
att["Content-Disposition"] = \
'attachment; filename="%s.txt"' % (time.strftime("%Y%m%d"))
msgroot.attach(att)
else:
msgroot = MIMEText(body, subtype, 'utf-8')
msgroot['Subject'] = subject
smtp = smtplib.SMTP()
smtp.connect(smtpserver)
smtp.login(username, password)
smtp.sendmail(sender, recipient, msgroot.as_string())
smtp.quit()
def is_online(user):
"""A simple check to see if the user was online within a specified
time range
:param user: The user who needs to be checked
"""
return user.lastseen >= time_diff()
def time_diff():
"""Calculates the time difference between now and the ONLINE_LAST_MINUTES
variable from the configuration.
"""
now = datetime.datetime.utcnow()
diff = now - datetime.timedelta(minutes=porter_config['ONLINE_LAST_MINUTES'])
return diff
## permission related
def is_moderator(user):
"""Returns ``True`` if the user is in a moderator group.
:param user: The user who should be checked.
"""
return user.permissions['mod'] or user.permissions['admin']
def is_admin(user):
"""Returns ``True`` if the user is a administrator.
:param user: The user who should be checked.
"""
return user.permissions['admin']
def can_translate(user):
"""Checks if a user translate a product"""
return user.permissions['perm_translate']
def can_comment(user):
"""Checks if a user can post comments to product"""
return user.permissions['perm_comment']
def can_review(user):
"""Checks if a user can review a translate"""
return user.permissions['perm_review']
def can_report(user):
"""Checks if a user can generate a daily report"""
return user.permissions['perm_report']
def can_topic(user):
"""Checks if a user can generate a topic"""
return user.permissions['perm_topic']
def can_setgroup(user):
"""Checks if a user can change other user's secondary group"""
return user.permissions['perm_setgroup']
| kamidox/weixin_producthunt | productporter/utils/helper.py | Python | bsd-2-clause | 7,440 |
"""Tests for http-proxy UI component.
:Requirement: HttpProxy
:CaseLevel: Acceptance
:CaseComponent: Repositories
:Assignee: jpathan
:TestType: Functional
:CaseImportance: High
:CaseAutomation: Automated
:Upstream: No
"""
import pytest
from fauxfactory import gen_integer
from fauxfactory import gen_string
from fauxfactory import gen_url
from nailgun import entities
from robottelo.config import settings
from robottelo.constants import REPO_TYPE
@pytest.fixture(scope='module')
def module_org():
return entities.Organization().create()
@pytest.fixture(scope='module')
def module_loc():
return entities.Location().create()
@pytest.mark.tier2
@pytest.mark.upgrade
def test_positive_create_update_delete(session, module_org, module_loc):
"""Create new http-proxy with attributes, update and delete it.
:id: 0c7cdf3d-778f-427a-9a2f-42ad7c23aa15
:expectedresults: All expected CRUD actions finished successfully
:CaseLevel: Integration
:CaseImportance: High
"""
http_proxy_name = gen_string('alpha', 15)
updated_proxy_name = gen_string('alpha', 15)
http_proxy_url = '{}:{}'.format(
gen_url(scheme='https'), gen_integer(min_value=10, max_value=9999)
)
password = gen_string('alpha', 15)
username = gen_string('alpha', 15)
with session:
session.http_proxy.create(
{
'http_proxy.name': http_proxy_name,
'http_proxy.url': http_proxy_url,
'http_proxy.username': username,
'http_proxy.password': password,
'locations.resources.assigned': [module_loc.name],
'organizations.resources.assigned': [module_org.name],
}
)
assert session.http_proxy.search(http_proxy_name)[0]['Name'] == http_proxy_name
http_proxy_values = session.http_proxy.read(http_proxy_name)
assert http_proxy_values['http_proxy']['name'] == http_proxy_name
assert http_proxy_values['http_proxy']['url'] == http_proxy_url
assert http_proxy_values['http_proxy']['username'] == username
assert http_proxy_values['locations']['resources']['assigned'][0] == module_loc.name
assert http_proxy_values['organizations']['resources']['assigned'][0] == module_org.name
# Update http_proxy with new name
session.http_proxy.update(http_proxy_name, {'http_proxy.name': updated_proxy_name})
assert session.http_proxy.search(updated_proxy_name)[0]['Name'] == updated_proxy_name
# Delete http_proxy
session.http_proxy.delete(updated_proxy_name)
assert not entities.HTTPProxy().search(query={'search': f'name={updated_proxy_name}'})
@pytest.mark.tier2
@pytest.mark.skipif((not settings.robottelo.REPOS_HOSTING_URL), reason='Missing repos_hosting_url')
def test_positive_assign_http_proxy_to_products_repositories(session, module_org, module_loc):
"""Assign HTTP Proxy to Products and Repositories.
:id: 2b803f9c-8d5d-4467-8eba-18244ebc0201
:expectedresults: HTTP Proxy is assigned to all repos present
in Products.
:CaseImportance: Critical
"""
# create HTTP proxies
http_proxy_a = entities.HTTPProxy(
name=gen_string('alpha', 15),
url=settings.http_proxy.un_auth_proxy_url,
organization=[module_org.id],
location=[module_loc.id],
).create()
http_proxy_b = entities.HTTPProxy(
name=gen_string('alpha', 15),
url=settings.http_proxy.auth_proxy_url,
username=settings.http_proxy.username,
password=settings.http_proxy.password,
organization=[module_org.id],
location=[module_loc.id],
).create()
# Create products
product_a = entities.Product(
organization=module_org.id,
).create()
product_b = entities.Product(
organization=module_org.id,
).create()
# Create repositories from UI.
with session:
repo_a1_name = gen_string('alpha')
session.repository.create(
product_a.name,
{
'name': repo_a1_name,
'repo_type': REPO_TYPE['yum'],
'repo_content.upstream_url': settings.repos.yum_0.url,
'repo_content.http_proxy_policy': 'No HTTP Proxy',
},
)
repo_a1_values = session.repository.read(product_a.name, repo_a1_name)
assert repo_a1_values['repo_content']['http_proxy_policy'] == 'No HTTP Proxy'
repo_a2_name = gen_string('alpha')
session.repository.create(
product_a.name,
{
'name': repo_a2_name,
'repo_type': REPO_TYPE['yum'],
'repo_content.upstream_url': settings.repos.yum_1.url,
'repo_content.http_proxy_policy': 'Use specific HTTP Proxy',
'repo_content.proxy_policy.http_proxy': http_proxy_a.name,
},
)
repo_a2_values = session.repository.read(product_a.name, repo_a2_name)
expected_policy = f'Use specific HTTP Proxy ({http_proxy_a.name})'
assert repo_a2_values['repo_content']['http_proxy_policy'] == expected_policy
repo_b1_name = gen_string('alpha')
session.repository.create(
product_b.name,
{
'name': repo_b1_name,
'repo_type': REPO_TYPE['yum'],
'repo_content.upstream_url': settings.repos.yum_0.url,
'repo_content.http_proxy_policy': 'Global Default',
},
)
repo_b1_values = session.repository.read(product_b.name, repo_b1_name)
assert 'Global Default' in repo_b1_values['repo_content']['http_proxy_policy']
repo_b2_name = gen_string('alpha')
session.repository.create(
product_b.name,
{
'name': repo_b2_name,
'repo_type': REPO_TYPE['yum'],
'repo_content.upstream_url': settings.repos.yum_1.url,
'repo_content.http_proxy_policy': 'No HTTP Proxy',
},
)
# Add http_proxy to products
session.product.search('')
session.product.manage_http_proxy(
[product_a.name, product_b.name],
{
'http_proxy_policy': 'Use specific HTTP Proxy',
'proxy_policy.http_proxy': http_proxy_b.name,
},
)
# Verify that Http Proxy is updated for all repos of product_a and product_b.
proxy_policy = 'Use specific HTTP Proxy ({})'
repo_a1_values = session.repository.read(product_a.name, repo_a1_name)
assert repo_a1_values['repo_content']['http_proxy_policy'] == proxy_policy.format(
http_proxy_b.name
)
repo_a2_values = session.repository.read(product_a.name, repo_a2_name)
assert repo_a2_values['repo_content']['http_proxy_policy'] == proxy_policy.format(
http_proxy_b.name
)
repo_b1_values = session.repository.read(product_b.name, repo_b1_name)
assert repo_b1_values['repo_content']['http_proxy_policy'] == proxy_policy.format(
http_proxy_b.name
)
repo_b2_values = session.repository.read(product_b.name, repo_b2_name)
assert repo_b2_values['repo_content']['http_proxy_policy'] == proxy_policy.format(
http_proxy_b.name
)
| lpramuk/robottelo | tests/foreman/ui/test_http_proxy.py | Python | gpl-3.0 | 7,372 |
__author__ = 'Madison'
from flask import Flask
app = Flask(__name__)
from app import views
#
# print 'i am doing something'
# from flask import Flask
# # import app.config
# # import os.path
# # import app.db_controller
#
# print 'dir of flask: ', dir(Flask)
#
# #
# # if not os.path.isfile(config.DATABASE_LOC): # create DB if needed
# # print 'No DB present. Creating DB file '+ config.DATABASE_LOC
# # open(config.DATABASE_LOC, 'a').close()
# # db_controller.create_table()
#
# app = Flask(__name__, static_url_path='')
# # from app import views, db_controller, config
| jakemadison/v2 | app/__init__.py | Python | mit | 591 |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Third in a series of four pipelines that tell a story in a 'gaming' domain.
Concepts include: processing unbounded data using fixed windows; use of custom
timestamps and event-time processing; generation of early/speculative results;
using AccumulationMode.ACCUMULATING to do cumulative processing of late-arriving
data.
This pipeline processes an unbounded stream of 'game events'. The calculation of
the team scores uses fixed windowing based on event time (the time of the game
play event), not processing time (the time that an event is processed by the
pipeline). The pipeline calculates the sum of scores per team, for each window.
By default, the team scores are calculated using one-hour windows.
In contrast-- to demo another windowing option-- the user scores are calculated
using a global window, which periodically (every ten minutes) emits cumulative
user score sums.
In contrast to the previous pipelines in the series, which used static, finite
input data, here we're using an unbounded data source, which lets us provide
speculative results, and allows handling of late data, at much lower latency.
We can use the early/speculative results to keep a 'leaderboard' updated in
near-realtime. Our handling of late data lets us generate correct results,
e.g. for 'team prizes'. We're now outputting window results as they're
calculated, giving us much lower latency than with the previous batch examples.
Run injector.Injector to generate pubsub data for this pipeline. The Injector
documentation provides more detail on how to do this. The injector is currently
implemented in Java only, it can be used from the Java SDK.
The PubSub topic you specify should be the same topic to which the Injector is
publishing.
To run the Java injector:
<beam_root>/examples/java$ mvn compile exec:java \
-Dexec.mainClass=org.apache.beam.examples.complete.game.injector.Injector \
-Dexec.args="$PROJECT_ID $PUBSUB_TOPIC none"
For a description of the usage and options, use -h or --help.
To specify a different runner:
--runner YOUR_RUNNER
NOTE: When specifying a different runner, additional runner-specific options
may have to be passed in as well
EXAMPLES
--------
# DirectRunner
python leader_board.py \
--project $PROJECT_ID \
--topic projects/$PROJECT_ID/topics/$PUBSUB_TOPIC \
--dataset $BIGQUERY_DATASET
# DataflowRunner
python leader_board.py \
--project $PROJECT_ID \
--topic projects/$PROJECT_ID/topics/$PUBSUB_TOPIC \
--dataset $BIGQUERY_DATASET \
--runner DataflowRunner \
--temp_location gs://$BUCKET/user_score/temp
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import csv
import logging
import sys
import time
from datetime import datetime
import apache_beam as beam
from apache_beam.metrics.metric import Metrics
from apache_beam.options.pipeline_options import GoogleCloudOptions
from apache_beam.options.pipeline_options import PipelineOptions
from apache_beam.options.pipeline_options import SetupOptions
from apache_beam.options.pipeline_options import StandardOptions
from apache_beam.transforms import trigger
def timestamp2str(t, fmt='%Y-%m-%d %H:%M:%S.000'):
"""Converts a unix timestamp into a formatted string."""
return datetime.fromtimestamp(t).strftime(fmt)
class ParseGameEventFn(beam.DoFn):
"""Parses the raw game event info into a Python dictionary.
Each event line has the following format:
username,teamname,score,timestamp_in_ms,readable_time
e.g.:
user2_AsparagusPig,AsparagusPig,10,1445230923951,2015-11-02 09:09:28.224
The human-readable time string is not used here.
"""
def __init__(self):
super(ParseGameEventFn, self).__init__()
self.num_parse_errors = Metrics.counter(self.__class__, 'num_parse_errors')
def process(self, elem):
try:
row = list(csv.reader([elem]))[0]
yield {
'user': row[0],
'team': row[1],
'score': int(row[2]),
'timestamp': int(row[3]) / 1000.0,
}
except: # pylint: disable=bare-except
# Log and count parse errors
self.num_parse_errors.inc()
logging.error('Parse error on "%s"', elem)
class ExtractAndSumScore(beam.PTransform):
"""A transform to extract key/score information and sum the scores.
The constructor argument `field` determines whether 'team' or 'user' info is
extracted.
"""
def __init__(self, field):
super(ExtractAndSumScore, self).__init__()
self.field = field
def expand(self, pcoll):
return (pcoll
| beam.Map(lambda elem: (elem[self.field], elem['score']))
| beam.CombinePerKey(sum))
class TeamScoresDict(beam.DoFn):
"""Formats the data into a dictionary of BigQuery columns with their values
Receives a (team, score) pair, extracts the window start timestamp, and
formats everything together into a dictionary. The dictionary is in the format
{'bigquery_column': value}
"""
def process(self, team_score, window=beam.DoFn.WindowParam):
team, score = team_score
start = timestamp2str(int(window.start))
yield {
'team': team,
'total_score': score,
'window_start': start,
'processing_time': timestamp2str(int(time.time()))
}
class WriteToBigQuery(beam.PTransform):
"""Generate, format, and write BigQuery table row information."""
def __init__(self, table_name, dataset, schema, project):
"""Initializes the transform.
Args:
table_name: Name of the BigQuery table to use.
dataset: Name of the dataset to use.
schema: Dictionary in the format {'column_name': 'bigquery_type'}
project: Name of the Cloud project containing BigQuery table.
"""
super(WriteToBigQuery, self).__init__()
self.table_name = table_name
self.dataset = dataset
self.schema = schema
self.project = project
def get_schema(self):
"""Build the output table schema."""
return ', '.join(
'%s:%s' % (col, self.schema[col]) for col in self.schema)
def expand(self, pcoll):
return (
pcoll
| 'ConvertToRow' >> beam.Map(
lambda elem: {col: elem[col] for col in self.schema})
| beam.io.WriteToBigQuery(
self.table_name, self.dataset, self.project, self.get_schema()))
# [START window_and_trigger]
class CalculateTeamScores(beam.PTransform):
"""Calculates scores for each team within the configured window duration.
Extract team/score pairs from the event stream, using hour-long windows by
default.
"""
def __init__(self, team_window_duration, allowed_lateness):
super(CalculateTeamScores, self).__init__()
self.team_window_duration = team_window_duration * 60
self.allowed_lateness_seconds = allowed_lateness * 60
def expand(self, pcoll):
# NOTE: the behavior does not exactly match the Java example
# TODO: allowed_lateness not implemented yet in FixedWindows
# TODO: AfterProcessingTime not implemented yet, replace AfterCount
return (
pcoll
# We will get early (speculative) results as well as cumulative
# processing of late data.
| 'LeaderboardTeamFixedWindows' >> beam.WindowInto(
beam.window.FixedWindows(self.team_window_duration),
trigger=trigger.AfterWatermark(trigger.AfterCount(10),
trigger.AfterCount(20)),
accumulation_mode=trigger.AccumulationMode.ACCUMULATING)
# Extract and sum teamname/score pairs from the event data.
| 'ExtractAndSumScore' >> ExtractAndSumScore('team'))
# [END window_and_trigger]
# [START processing_time_trigger]
class CalculateUserScores(beam.PTransform):
"""Extract user/score pairs from the event stream using processing time, via
global windowing. Get periodic updates on all users' running scores.
"""
def __init__(self, allowed_lateness):
super(CalculateUserScores, self).__init__()
self.allowed_lateness_seconds = allowed_lateness * 60
def expand(self, pcoll):
# NOTE: the behavior does not exactly match the Java example
# TODO: allowed_lateness not implemented yet in FixedWindows
# TODO: AfterProcessingTime not implemented yet, replace AfterCount
return (
pcoll
# Get periodic results every ten events.
| 'LeaderboardUserGlobalWindows' >> beam.WindowInto(
beam.window.GlobalWindows(),
trigger=trigger.Repeatedly(trigger.AfterCount(10)),
accumulation_mode=trigger.AccumulationMode.ACCUMULATING)
# Extract and sum username/score pairs from the event data.
| 'ExtractAndSumScore' >> ExtractAndSumScore('user'))
# [END processing_time_trigger]
def run(argv=None):
"""Main entry point; defines and runs the hourly_team_score pipeline."""
parser = argparse.ArgumentParser()
parser.add_argument('--topic',
type=str,
help='Pub/Sub topic to read from')
parser.add_argument('--subscription',
type=str,
help='Pub/Sub subscription to read from')
parser.add_argument('--dataset',
type=str,
required=True,
help='BigQuery Dataset to write tables to. '
'Must already exist.')
parser.add_argument('--table_name',
default='leader_board',
help='The BigQuery table name. Should not already exist.')
parser.add_argument('--team_window_duration',
type=int,
default=60,
help='Numeric value of fixed window duration for team '
'analysis, in minutes')
parser.add_argument('--allowed_lateness',
type=int,
default=120,
help='Numeric value of allowed data lateness, in minutes')
args, pipeline_args = parser.parse_known_args(argv)
if args.topic is None and args.subscription is None:
parser.print_usage()
print(sys.argv[0] + ': error: one of --topic or --subscription is required')
sys.exit(1)
options = PipelineOptions(pipeline_args)
# We also require the --project option to access --dataset
if options.view_as(GoogleCloudOptions).project is None:
parser.print_usage()
print(sys.argv[0] + ': error: argument --project is required')
sys.exit(1)
# We use the save_main_session option because one or more DoFn's in this
# workflow rely on global context (e.g., a module imported at module level).
options.view_as(SetupOptions).save_main_session = True
# Enforce that this pipeline is always run in streaming mode
options.view_as(StandardOptions).streaming = True
with beam.Pipeline(options=options) as p:
# Read game events from Pub/Sub using custom timestamps, which are extracted
# from the pubsub data elements, and parse the data.
# Read from PubSub into a PCollection.
if args.subscription:
scores = p | 'ReadPubSub' >> beam.io.ReadFromPubSub(
subscription=args.subscription)
else:
scores = p | 'ReadPubSub' >> beam.io.ReadFromPubSub(
topic=args.topic)
events = (
scores
| 'ParseGameEventFn' >> beam.ParDo(ParseGameEventFn())
| 'AddEventTimestamps' >> beam.Map(
lambda elem: beam.window.TimestampedValue(elem, elem['timestamp'])))
# Get team scores and write the results to BigQuery
(events # pylint: disable=expression-not-assigned
| 'CalculateTeamScores' >> CalculateTeamScores(
args.team_window_duration, args.allowed_lateness)
| 'TeamScoresDict' >> beam.ParDo(TeamScoresDict())
| 'WriteTeamScoreSums' >> WriteToBigQuery(
args.table_name + '_teams', args.dataset, {
'team': 'STRING',
'total_score': 'INTEGER',
'window_start': 'STRING',
'processing_time': 'STRING',
}, options.view_as(GoogleCloudOptions).project))
def format_user_score_sums(user_score):
(user, score) = user_score
return {'user': user, 'total_score': score}
# Get user scores and write the results to BigQuery
(events # pylint: disable=expression-not-assigned
| 'CalculateUserScores' >> CalculateUserScores(args.allowed_lateness)
| 'FormatUserScoreSums' >> beam.Map(format_user_score_sums)
| 'WriteUserScoreSums' >> WriteToBigQuery(
args.table_name + '_users', args.dataset, {
'user': 'STRING',
'total_score': 'INTEGER',
}, options.view_as(GoogleCloudOptions).project))
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
run()
| rangadi/beam | sdks/python/apache_beam/examples/complete/game/leader_board.py | Python | apache-2.0 | 13,517 |
import numpy as np
from numpy import linalg as la
from numpy import testing as np_testing
from pymanopt.manifolds import FixedRankEmbedded
from .._test import TestCase
class TestFixedRankEmbeddedManifold(TestCase):
def setUp(self):
self.m = m = 10
self.n = n = 5
self.k = k = 3
self.man = FixedRankEmbedded(m, n, k)
def test_dim(self):
assert self.man.dim == (self.m + self.n - self.k) * self.k
def test_typicaldist(self):
assert self.man.dim == self.man.typicaldist
def test_dist(self):
e = self.man
a = e.rand()
x = e.randvec(a)
y = e.randvec(a)
with self.assertRaises(NotImplementedError):
e.dist(x, y)
def test_inner(self):
e = self.man
x = e.rand()
a = e.randvec(x)
b = e.randvec(x)
# First embed in the ambient space
A = x[0] @ a[1] @ x[2] + a[0] @ x[2] + x[0] @ a[2].T
B = x[0] @ b[1] @ x[2] + b[0] @ x[2] + x[0] @ b[2].T
trueinner = np.sum(A * B)
np_testing.assert_almost_equal(trueinner, e.inner(x, a, b))
def test_proj_range(self):
m = self.man
x = m.rand()
v = np.random.randn(self.m, self.n)
g = m.proj(x, v)
# Check that g is a true tangent vector
np_testing.assert_allclose(
g[0].T @ x[0], np.zeros((self.k, self.k)), atol=1e-6
)
np_testing.assert_allclose(
g[2].T @ x[2].T, np.zeros((self.k, self.k)), atol=1e-6
)
def test_proj(self):
# Verify that proj gives the closest point within the tangent space
# by displacing the result slightly and checking that this increases
# the distance.
m = self.man
x = self.man.rand()
v = np.random.randn(self.m, self.n)
g = m.proj(x, v)
# Displace g a little
g_disp = g + 0.01 * m.randvec(x)
# Return to the ambient representation
g = m.tangent2ambient(x, g)
g_disp = m.tangent2ambient(x, g_disp)
g = g[0] @ g[1] @ g[2].T
g_disp = g_disp[0] @ g_disp[1] @ g_disp[2].T
assert np.linalg.norm(g - v) < np.linalg.norm(g_disp - v)
def test_proj_tangents(self):
# Verify that proj leaves tangent vectors unchanged
e = self.man
x = e.rand()
u = e.randvec(x)
A = e.proj(x, e.tangent2ambient(x, u))
B = u
# diff = [A[k]-B[k] for k in range(len(A))]
np_testing.assert_allclose(A[0], B[0])
np_testing.assert_allclose(A[1], B[1])
np_testing.assert_allclose(A[2], B[2])
def test_norm(self):
e = self.man
x = e.rand()
u = e.randvec(x)
np_testing.assert_almost_equal(np.sqrt(e.inner(x, u, u)), e.norm(x, u))
def test_rand(self):
e = self.man
x = e.rand()
y = e.rand()
assert np.shape(x[0]) == (self.m, self.k)
assert np.shape(x[1]) == (self.k,)
assert np.shape(x[2]) == (self.k, self.n)
np_testing.assert_allclose(x[0].T @ x[0], np.eye(self.k), atol=1e-6)
np_testing.assert_allclose(x[2] @ x[2].T, np.eye(self.k), atol=1e-6)
assert la.norm(x[0] - y[0]) > 1e-6
assert la.norm(x[1] - y[1]) > 1e-6
assert la.norm(x[2] - y[2]) > 1e-6
def test_transp(self):
s = self.man
x = s.rand()
y = s.rand()
u = s.randvec(x)
A = s.transp(x, y, u)
B = s.proj(y, s.tangent2ambient(x, u))
diff = [A[k] - B[k] for k in range(len(A))]
np_testing.assert_almost_equal(s.norm(y, diff), 0)
def test_apply_ambient(self):
m = self.man
z = np.random.randn(self.m, self.n)
# Set u, s, v so that z = u @ s @ v.T
u, s, v = np.linalg.svd(z, full_matrices=False)
s = np.diag(s)
v = v.T
w = np.random.randn(self.n, self.n)
np_testing.assert_allclose(z @ w, m._apply_ambient(z, w))
np_testing.assert_allclose(z @ w, m._apply_ambient((u, s, v), w))
def test_apply_ambient_transpose(self):
m = self.man
z = np.random.randn(self.n, self.m)
# Set u, s, v so that z = u @ s @ v.T
u, s, v = np.linalg.svd(z, full_matrices=False)
s = np.diag(s)
v = v.T
w = np.random.randn(self.n, self.n)
np_testing.assert_allclose(z.T @ w, m._apply_ambient_transpose(z, w))
np_testing.assert_allclose(
z.T @ w, m._apply_ambient_transpose((u, s, v), w)
)
def test_tangent2ambient(self):
m = self.man
x = m.rand()
z = m.randvec(x)
z_ambient = x[0] @ z[1] @ x[2] + z[0] @ x[2] + x[0] @ z[2].T
u, s, v = m.tangent2ambient(x, z)
np_testing.assert_allclose(z_ambient, u @ s @ v.T)
def test_ehess2rhess(self):
pass
def test_retr(self):
# Test that the result is on the manifold and that for small
# tangent vectors it has little effect.
x = self.man.rand()
u = self.man.randvec(x)
y = self.man.retr(x, u)
np_testing.assert_allclose(y[0].T @ y[0], np.eye(self.k), atol=1e-6)
np_testing.assert_allclose(y[2] @ y[2].T, np.eye(self.k), atol=1e-6)
u = u * 1e-6
y = self.man.retr(x, u)
y = y[0] @ np.diag(y[1]) @ y[2]
u = self.man.tangent2ambient(x, u)
u = u[0] @ u[1] @ u[2].T
x = x[0] @ np.diag(x[1]) @ x[2]
np_testing.assert_allclose(y, x + u, atol=1e-5)
def test_egrad2rgrad(self):
# Verify that egrad2rgrad and proj are equivalent.
m = self.man
x = m.rand()
u, s, vt = x
i = np.eye(self.k)
f = 1 / (s[..., np.newaxis, :] ** 2 - s[..., :, np.newaxis] ** 2 + i)
du = np.random.randn(self.m, self.k)
ds = np.random.randn(self.k)
dvt = np.random.randn(self.k, self.n)
Up = (np.eye(self.m) - u @ u.T) @ du @ np.linalg.inv(np.diag(s))
M = (
f * (u.T @ du - du.T @ u) @ np.diag(s)
+ np.diag(s) @ f * (vt @ dvt.T - dvt @ vt.T)
+ np.diag(ds)
)
Vp = (np.eye(self.n) - vt.T @ vt) @ dvt.T @ np.linalg.inv(np.diag(s))
up, m, vp = m.egrad2rgrad(x, (du, ds, dvt))
np_testing.assert_allclose(Up, up)
np_testing.assert_allclose(M, m)
np_testing.assert_allclose(Vp, vp)
def test_randvec(self):
e = self.man
x = e.rand()
u = e.randvec(x)
# Check that u is a tangent vector
assert np.shape(u[0]) == (self.m, self.k)
assert np.shape(u[1]) == (self.k, self.k)
assert np.shape(u[2]) == (self.n, self.k)
np_testing.assert_allclose(
u[0].T @ x[0], np.zeros((self.k, self.k)), atol=1e-6
)
np_testing.assert_allclose(
u[2].T @ x[2].T, np.zeros((self.k, self.k)), atol=1e-6
)
v = e.randvec(x)
np_testing.assert_almost_equal(e.norm(x, u), 1)
assert e.norm(x, u - v) > 1e-6
| pymanopt/pymanopt | tests/test_manifolds/test_fixed_rank.py | Python | bsd-3-clause | 7,028 |
# -*- coding: utf-8 -*-
"""
celery.local
~~~~~~~~~~~~
This module contains critical utilities that
needs to be loaded as soon as possible, and that
shall not load any third party modules.
Parts of this module is Copyright by Werkzeug Team.
"""
from __future__ import absolute_import
import importlib
import sys
from .five import string
__all__ = ['Proxy', 'PromiseProxy', 'try_import', 'maybe_evaluate']
__module__ = __name__ # used by Proxy class body
PY3 = sys.version_info[0] == 3
def _default_cls_attr(name, type_, cls_value):
# Proxy uses properties to forward the standard
# class attributes __module__, __name__ and __doc__ to the real
# object, but these needs to be a string when accessed from
# the Proxy class directly. This is a hack to make that work.
# -- See Issue #1087.
def __new__(cls, getter):
instance = type_.__new__(cls, cls_value)
instance.__getter = getter
return instance
def __get__(self, obj, cls=None):
return self.__getter(obj) if obj is not None else self
return type(name, (type_, ), {
'__new__': __new__, '__get__': __get__,
})
def try_import(module, default=None):
"""Try to import and return module, or return
None if the module does not exist."""
try:
return importlib.import_module(module)
except ImportError:
return default
class Proxy(object):
"""Proxy to another object."""
# Code stolen from werkzeug.local.Proxy.
__slots__ = ('__local', '__args', '__kwargs', '__dict__')
def __init__(self, local,
args=None, kwargs=None, name=None, __doc__=None):
object.__setattr__(self, '_Proxy__local', local)
object.__setattr__(self, '_Proxy__args', args or ())
object.__setattr__(self, '_Proxy__kwargs', kwargs or {})
if name is not None:
object.__setattr__(self, '__custom_name__', name)
if __doc__ is not None:
object.__setattr__(self, '__doc__', __doc__)
@_default_cls_attr('name', str, __name__)
def __name__(self):
try:
return self.__custom_name__
except AttributeError:
return self._get_current_object().__name__
@_default_cls_attr('module', str, __module__)
def __module__(self):
return self._get_current_object().__module__
@_default_cls_attr('doc', str, __doc__)
def __doc__(self):
return self._get_current_object().__doc__
def _get_class(self):
return self._get_current_object().__class__
@property
def __class__(self):
return self._get_class()
def _get_current_object(self):
"""Return the current object. This is useful if you want the real
object behind the proxy at a time for performance reasons or because
you want to pass the object into a different context.
"""
loc = object.__getattribute__(self, '_Proxy__local')
if not hasattr(loc, '__release_local__'):
return loc(*self.__args, **self.__kwargs)
try:
return getattr(loc, self.__name__)
except AttributeError:
raise RuntimeError('no object bound to {0.__name__}'.format(self))
@property
def __dict__(self):
try:
return self._get_current_object().__dict__
except RuntimeError: # pragma: no cover
raise AttributeError('__dict__')
def __repr__(self):
try:
obj = self._get_current_object()
except RuntimeError: # pragma: no cover
return '<{0} unbound>'.format(self.__class__.__name__)
return repr(obj)
def __bool__(self):
try:
return bool(self._get_current_object())
except RuntimeError: # pragma: no cover
return False
__nonzero__ = __bool__ # Py2
def __unicode__(self):
try:
return string(self._get_current_object())
except RuntimeError: # pragma: no cover
return repr(self)
def __dir__(self):
try:
return dir(self._get_current_object())
except RuntimeError: # pragma: no cover
return []
def __getattr__(self, name):
if name == '__members__':
return dir(self._get_current_object())
return getattr(self._get_current_object(), name)
def __setitem__(self, key, value):
self._get_current_object()[key] = value
def __delitem__(self, key):
del self._get_current_object()[key]
def __setslice__(self, i, j, seq):
self._get_current_object()[i:j] = seq
def __delslice__(self, i, j):
del self._get_current_object()[i:j]
__setattr__ = lambda x, n, v: setattr(x._get_current_object(), n, v)
__delattr__ = lambda x, n: delattr(x._get_current_object(), n)
__str__ = lambda x: str(x._get_current_object())
__lt__ = lambda x, o: x._get_current_object() < o
__le__ = lambda x, o: x._get_current_object() <= o
__eq__ = lambda x, o: x._get_current_object() == o
__ne__ = lambda x, o: x._get_current_object() != o
__gt__ = lambda x, o: x._get_current_object() > o
__ge__ = lambda x, o: x._get_current_object() >= o
__hash__ = lambda x: hash(x._get_current_object())
__call__ = lambda x, *a, **kw: x._get_current_object()(*a, **kw)
__len__ = lambda x: len(x._get_current_object())
__getitem__ = lambda x, i: x._get_current_object()[i]
__iter__ = lambda x: iter(x._get_current_object())
__contains__ = lambda x, i: i in x._get_current_object()
__getslice__ = lambda x, i, j: x._get_current_object()[i:j]
__add__ = lambda x, o: x._get_current_object() + o
__sub__ = lambda x, o: x._get_current_object() - o
__mul__ = lambda x, o: x._get_current_object() * o
__floordiv__ = lambda x, o: x._get_current_object() // o
__mod__ = lambda x, o: x._get_current_object() % o
__divmod__ = lambda x, o: x._get_current_object().__divmod__(o)
__pow__ = lambda x, o: x._get_current_object() ** o
__lshift__ = lambda x, o: x._get_current_object() << o
__rshift__ = lambda x, o: x._get_current_object() >> o
__and__ = lambda x, o: x._get_current_object() & o
__xor__ = lambda x, o: x._get_current_object() ^ o
__or__ = lambda x, o: x._get_current_object() | o
__div__ = lambda x, o: x._get_current_object().__div__(o)
__truediv__ = lambda x, o: x._get_current_object().__truediv__(o)
__neg__ = lambda x: -(x._get_current_object())
__pos__ = lambda x: +(x._get_current_object())
__abs__ = lambda x: abs(x._get_current_object())
__invert__ = lambda x: ~(x._get_current_object())
__complex__ = lambda x: complex(x._get_current_object())
__int__ = lambda x: int(x._get_current_object())
__float__ = lambda x: float(x._get_current_object())
__oct__ = lambda x: oct(x._get_current_object())
__hex__ = lambda x: hex(x._get_current_object())
__index__ = lambda x: x._get_current_object().__index__()
__coerce__ = lambda x, o: x._get_current_object().__coerce__(o)
__enter__ = lambda x: x._get_current_object().__enter__()
__exit__ = lambda x, *a, **kw: x._get_current_object().__exit__(*a, **kw)
__reduce__ = lambda x: x._get_current_object().__reduce__()
if not PY3:
__cmp__ = lambda x, o: cmp(x._get_current_object(), o) # noqa
__long__ = lambda x: long(x._get_current_object()) # noqa
class PromiseProxy(Proxy):
"""This is a proxy to an object that has not yet been evaulated.
:class:`Proxy` will evaluate the object each time, while the
promise will only evaluate it once.
"""
def _get_current_object(self):
try:
return object.__getattribute__(self, '__thing')
except AttributeError:
return self.__evaluate__()
def __evaluated__(self):
try:
object.__getattribute__(self, '__thing')
except AttributeError:
return False
return True
def __maybe_evaluate__(self):
return self._get_current_object()
def __evaluate__(self,
_clean=('_Proxy__local',
'_Proxy__args',
'_Proxy__kwargs')):
try:
thing = Proxy._get_current_object(self)
object.__setattr__(self, '__thing', thing)
return thing
finally:
for attr in _clean:
try:
object.__delattr__(self, attr)
except AttributeError: # pragma: no cover
# May mask errors so ignore
pass
def maybe_evaluate(obj):
try:
return obj.__maybe_evaluate__()
except AttributeError:
return obj
| hubert667/AIR | build/celery/celery/local.py | Python | gpl-3.0 | 8,770 |
from mock import patch
from bravado_core.param import cast_request_param
@patch('bravado_core.param.log')
def test_logs_cast_failure(mock_logger):
cast_request_param('integer', 'gimme_int', 'not_int')
assert mock_logger.warn.call_count == 1
@patch('bravado_core.param.log')
def test_cast_failures_return_untouched_value(mock_logger):
initial_val = 'not_int'
result_val = cast_request_param('integer', 'gimme_int', initial_val)
assert result_val == initial_val
def test_unknown_type_returns_untouched_value():
assert 'abc123' == cast_request_param('unknown_type', 'blah', 'abc123')
def test_none_returns_none():
assert cast_request_param('integer', 'biz_id', None) is None
def test_integer_cast():
assert 34 == cast_request_param('integer', 'biz_id', '34')
def test_number_cast():
assert 2.34 == cast_request_param('number', 'score', '2.34')
def test_empty_string_becomes_none_for_type_integer():
assert cast_request_param('integer', 'biz_id', '') is None
def test_empty_string_becomes_none_for_type_number():
assert cast_request_param('number', 'score', '') is None
def test_empty_string_becomes_none_for_type_boolean():
assert cast_request_param('boolean', 'is_open', '') is None
def test_empty_string_stays_empty_string_for_type_string():
assert '' == cast_request_param('string', 'address3', '')
| analogue/bravado-core | tests/param/cast_request_param_test.py | Python | bsd-3-clause | 1,372 |
#!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies that a dependency on two gyp files with the same name do not create a
uid collision in the resulting generated xcode file.
"""
import TestGyp
import sys
test = TestGyp.TestGyp()
test.run_gyp('test.gyp', chdir='library')
test.pass_test()
| ibc/MediaSoup | worker/deps/gyp/test/same-gyp-name/gyptest-library.py | Python | isc | 435 |
# stdlib
from collections import defaultdict
from Queue import Empty, Queue
import threading
import time
# project
from checks import AgentCheck
from checks.libs.thread_pool import Pool
from config import _is_affirmative
TIMEOUT = 180
DEFAULT_SIZE_POOL = 6
MAX_LOOP_ITERATIONS = 1000
FAILURE = "FAILURE"
class Status:
DOWN = "DOWN"
WARNING = "WARNING"
CRITICAL = "CRITICAL"
UP = "UP"
class EventType:
DOWN = "servicecheck.state_change.down"
UP = "servicecheck.state_change.up"
class NetworkCheck(AgentCheck):
SOURCE_TYPE_NAME = 'servicecheck'
SERVICE_CHECK_PREFIX = 'network_check'
STATUS_TO_SERVICE_CHECK = {
Status.UP : AgentCheck.OK,
Status.WARNING : AgentCheck.WARNING,
Status.CRITICAL : AgentCheck.CRITICAL,
Status.DOWN : AgentCheck.CRITICAL,
}
"""
Services checks inherits from this class.
This class should never be directly instanciated.
Work flow:
The main agent loop will call the check function for each instance for
each iteration of the loop.
The check method will make an asynchronous call to the _process method in
one of the thread initiated in the thread pool created in this class constructor.
The _process method will call the _check method of the inherited class
which will perform the actual check.
The _check method must return a tuple which first element is either
Status.UP or Status.DOWN.
The second element is a short error message that will be displayed
when the service turns down.
"""
def __init__(self, name, init_config, agentConfig, instances):
AgentCheck.__init__(self, name, init_config, agentConfig, instances)
# A dictionary to keep track of service statuses
self.statuses = {}
self.notified = {}
self.nb_failures = 0
self.pool_started = False
# Make sure every instance has a name that we use as a unique key
# to keep track of statuses
names = []
for inst in instances:
if 'name' not in inst:
raise Exception("All instances should have a 'name' parameter,"
" error on instance: {0}".format(inst))
if inst['name'] in names:
raise Exception("Duplicate names for instances with name {0}"
.format(inst['name']))
def stop(self):
self.stop_pool()
self.pool_started = False
def start_pool(self):
# The pool size should be the minimum between the number of instances
# and the DEFAULT_SIZE_POOL. It can also be overridden by the 'threads_count'
# parameter in the init_config of the check
self.log.info("Starting Thread Pool")
default_size = min(self.instance_count(), DEFAULT_SIZE_POOL)
self.pool_size = int(self.init_config.get('threads_count', default_size))
self.pool = Pool(self.pool_size)
self.resultsq = Queue()
self.jobs_status = {}
self.pool_started = True
def stop_pool(self):
self.log.info("Stopping Thread Pool")
if self.pool_started:
self.pool.terminate()
self.pool.join()
self.jobs_status.clear()
assert self.pool.get_nworkers() == 0
def restart_pool(self):
self.stop_pool()
self.start_pool()
def check(self, instance):
if not self.pool_started:
self.start_pool()
if threading.activeCount() > 5 * self.pool_size + 5: # On Windows the agent runs on multiple threads so we need to have an offset of 5 in case the pool_size is 1
raise Exception("Thread number (%s) is exploding. Skipping this check" % threading.activeCount())
self._process_results()
self._clean()
name = instance.get('name', None)
if name is None:
self.log.error('Each service check must have a name')
return
if name not in self.jobs_status:
# A given instance should be processed one at a time
self.jobs_status[name] = time.time()
self.pool.apply_async(self._process, args=(instance,))
else:
self.log.error("Instance: %s skipped because it's already running." % name)
def _process(self, instance):
try:
statuses = self._check(instance)
if isinstance(statuses, tuple):
# Assume the check only returns one service check
status, msg = statuses
self.resultsq.put((status, msg, None, instance))
elif isinstance(statuses, list):
for status in statuses:
sc_name, status, msg = status
self.resultsq.put((status, msg, sc_name, instance))
except Exception:
result = (FAILURE, FAILURE, FAILURE, FAILURE)
self.resultsq.put(result)
def _process_results(self):
for i in range(MAX_LOOP_ITERATIONS):
try:
# We want to fetch the result in a non blocking way
status, msg, sc_name, instance = self.resultsq.get_nowait()
except Empty:
break
if status == FAILURE:
self.nb_failures += 1
if self.nb_failures >= self.pool_size - 1:
self.nb_failures = 0
self.restart_pool()
continue
self.report_as_service_check(sc_name, status, instance, msg)
# FIXME: 5.3, this has been deprecated before, get rid of events
# Don't create any event to avoid duplicates with server side
# service_checks
skip_event = _is_affirmative(instance.get('skip_event', False))
instance_name = instance['name']
if not skip_event:
self.warning("Using events for service checks is deprecated in favor of monitors and will be removed in future versions of the Datadog Agent.")
event = None
if instance_name not in self.statuses:
self.statuses[instance_name] = defaultdict(list)
self.statuses[instance_name][sc_name].append(status)
window = int(instance.get('window', 1))
if window > 256:
self.log.warning("Maximum window size (256) exceeded, defaulting it to 256")
window = 256
threshold = instance.get('threshold', 1)
if len(self.statuses[instance_name][sc_name]) > window:
self.statuses[instance_name][sc_name].pop(0)
nb_failures = self.statuses[instance_name][sc_name].count(Status.DOWN)
if nb_failures >= threshold:
if self.notified.get((instance_name, sc_name), Status.UP) != Status.DOWN:
event = self._create_status_event(sc_name, status, msg, instance)
self.notified[(instance_name, sc_name)] = Status.DOWN
else:
if self.notified.get((instance_name, sc_name), Status.UP) != Status.UP:
event = self._create_status_event(sc_name, status, msg, instance)
self.notified[(instance_name, sc_name)] = Status.UP
if event is not None:
self.events.append(event)
# The job is finished here, this instance can be re processed
if instance_name in self.jobs_status:
del self.jobs_status[instance_name]
def _check(self, instance):
"""This function should be implemented by inherited classes"""
raise NotImplementedError
def _clean(self):
now = time.time()
for name in self.jobs_status.keys():
start_time = self.jobs_status[name]
if now - start_time > TIMEOUT:
self.log.critical("Restarting Pool. One check is stuck: %s" % name)
self.restart_pool()
break
| amalakar/dd-agent | checks/network_checks.py | Python | bsd-3-clause | 8,122 |
"""Matrix equation solver routines"""
# Author: Jeffrey Armstrong <jeff@approximatrix.com>
# February 24, 2012
import numpy as np
from numpy.linalg import inv, LinAlgError
from basic import solve
from lapack import get_lapack_funcs
from decomp_schur import schur
from special_matrices import kron
__all__ = ['solve_sylvester', 'solve_lyapunov', 'solve_discrete_lyapunov',
'solve_continuous_are', 'solve_discrete_are']
def solve_sylvester(a,b,q):
"""Computes a solution (X) to the Sylvester equation (AX + XB = Q).
Parameters
----------
a : array, shape (M, M)
Leading matrix of the Sylvester equation
b : array, shape (N, N)
Trailing matrix of the Sylvester equation
q : array, shape (M, N)
Right-hand side
Returns
-------
x : array, shape (M, N)
The solution to the Sylvester equation.
Raises
------
LinAlgError
If solution was not found
Notes
-----
Computes a solution to the Sylvester matrix equation via the Bartels-
Stewart algorithm. The A and B matrices first undergo Schur
decompositions. The resulting matrices are used to construct an
alternative Sylvester equation (``RY + YS^T = F``) where the R and S
matrices are in quasi-triangular form (or, when R, S or F are complex,
triangular form). The simplified equation is then solved using
``*TRSYL`` from LAPACK directly.
"""
# Compute the Schur decomp form of a
r,u = schur(a, output='real')
# Compute the Schur decomp of b
s,v = schur(b.conj().transpose(), output='real')
# Construct f = u'*q*v
f = np.dot(np.dot(u.conj().transpose(), q), v)
# Call the Sylvester equation solver
trsyl, = get_lapack_funcs(('trsyl',), (r,s,f))
if trsyl == None:
raise RuntimeError('LAPACK implementation does not contain a proper Sylvester equation solver (TRSYL)')
y, scale, info = trsyl(r, s, f, tranb='C')
y = scale*y
if info < 0:
raise LinAlgError("Illegal value encountered in the %d term" % (-info,))
return np.dot(np.dot(u, y), v.conj().transpose())
def solve_lyapunov(a, q):
"""Solves the continuous Lyapunov equation (AX + XA^H = Q) given the values
of A and Q using the Bartels-Stewart algorithm.
Parameters
----------
a : array_like
A square matrix
q : array_like
Right-hand side square matrix
Returns
-------
x : array_like
Solution to the continuous Lyapunov equation
Notes
-----
Because the continuous Lyapunov equation is just a special form of the
Sylvester equation, this solver relies entirely on solve_sylvester for a
solution.
See Also
--------
solve_sylvester : computes the solution to the Sylvester equation
"""
return solve_sylvester(a, a.conj().transpose(), q)
def solve_discrete_lyapunov(a, q):
"""Solves the Discrete Lyapunov Equation (A'XA-X=-Q) directly.
Parameters
----------
a : array_like
A square matrix
q : array_like
Right-hand side square matrix
Returns
-------
x : array_like
Solution to the continuous Lyapunov equation
Notes
-----
Algorithm is based on a direct analytical solution from:
Hamilton, James D. Time Series Analysis, Princeton: Princeton University
Press, 1994. 265. Print.
http://www.scribd.com/doc/20577138/Hamilton-1994-Time-Series-Analysis
"""
lhs = kron(a, a.conj())
lhs = np.eye(lhs.shape[0]) - lhs
x = solve(lhs, q.flatten())
return np.reshape(x, q.shape)
def solve_continuous_are(a, b, q, r):
"""Solves the continuous algebraic Riccati equation, or CARE, defined
as (A'X + XA - XBR^-1B'X+Q=0) directly using a Schur decomposition
method.
Parameters
----------
a : array_like
m x m square matrix
b : array_like
m x n matrix
q : array_like
m x m square matrix
r : array_like
Non-singular n x n square matrix
Returns
-------
x : array_like
Solution (m x m) to the continuous algebraic Riccati equation
Notes
-----
Method taken from:
Laub, "A Schur Method for Solving Algebraic Riccati Equations."
U.S. Energy Research and Development Agency under contract
ERDA-E(49-18)-2087.
http://dspace.mit.edu/bitstream/handle/1721.1/1301/R-0859-05666488.pdf
See Also
--------
solve_discrete_are : Solves the discrete algebraic Riccati equation
"""
try:
g = inv(r)
except LinAlgError:
raise ValueError('Matrix R in the algebraic Riccati equation solver is ill-conditioned')
g = np.dot(np.dot(b, g), b.conj().transpose())
z11 = a
z12 = -1.0*g
z21 = -1.0*q
z22 = -1.0*a.conj().transpose()
z = np.vstack((np.hstack((z11, z12)), np.hstack((z21, z22))))
# Note: we need to sort the upper left of s to have negative real parts,
# while the lower right is positive real components (Laub, p. 7)
[s, u, sorted] = schur(z, sort='lhp')
(m, n) = u.shape
u11 = u[0:m/2, 0:n/2]
u12 = u[0:m/2, n/2:n]
u21 = u[m/2:m, 0:n/2]
u22 = u[m/2:m, n/2:n]
u11i = inv(u11)
return np.dot(u21, u11i)
def solve_discrete_are(a, b, q, r):
"""Solves the disctrete algebraic Riccati equation, or DARE, defined as
(X = A'XA-(A'XB)(R+B'XB)^-1(B'XA)+Q), directly using a Schur decomposition
method.
Parameters
----------
a : array_like
Non-singular m x m square matrix
b : array_like
m x n matrix
q : array_like
m x m square matrix
r : array_like
Non-singular n x n square matrix
Returns
-------
x : array_like
Solution to the continuous Lyapunov equation
Notes
-----
Method taken from:
Laub, "A Schur Method for Solving Algebraic Riccati Equations."
U.S. Energy Research and Development Agency under contract
ERDA-E(49-18)-2087.
http://dspace.mit.edu/bitstream/handle/1721.1/1301/R-0859-05666488.pdf
See Also
--------
solve_continuous_are : Solves the continuous algebraic Riccati equation
"""
try:
g = inv(r)
except LinAlgError:
raise ValueError('Matrix R in the algebraic Riccati equation solver is ill-conditioned')
g = np.dot(np.dot(b, g), b.conj().transpose())
try:
ait = inv(a).conj().transpose() # ait is "A inverse transpose"
except LinAlgError:
raise ValueError('Matrix A in the algebraic Riccati equation solver is ill-conditioned')
z11 = a+np.dot(np.dot(g, ait), q)
z12 = -1.0*np.dot(g, ait)
z21 = -1.0*np.dot(ait, q)
z22 = ait
z = np.vstack((np.hstack((z11, z12)), np.hstack((z21, z22))))
# Note: we need to sort the upper left of s to lie within the unit circle,
# while the lower right is outside (Laub, p. 7)
[s, u, sorted] = schur(z, sort='iuc')
(m,n) = u.shape
u11 = u[0:m/2, 0:n/2]
u12 = u[0:m/2, n/2:n]
u21 = u[m/2:m, 0:n/2]
u22 = u[m/2:m, n/2:n]
u11i = inv(u11)
return np.dot(u21, u11i)
| teoliphant/scipy | scipy/linalg/_solvers.py | Python | bsd-3-clause | 7,110 |
#!/usr/bin/python2
from info import __version__, __desc__
| leosartaj/tvstats | tvstats/__init__.py | Python | mit | 59 |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Pipeline Example.
"""
# $example on$
from pyspark.ml import Pipeline
from pyspark.ml.classification import LogisticRegression
from pyspark.ml.feature import HashingTF, Tokenizer
# $example off$
from pyspark.sql import SparkSession
if __name__ == "__main__":
spark = SparkSession\
.builder\
.appName("PipelineExample")\
.getOrCreate()
# $example on$
# Prepare training documents from a list of (id, text, label) tuples.
training = spark.createDataFrame([
(0, "a b c d e spark", 1.0),
(1, "b d", 0.0),
(2, "spark f g h", 1.0),
(3, "hadoop mapreduce", 0.0)
], ["id", "text", "label"])
# Configure an ML pipeline, which consists of three stages: tokenizer, hashingTF, and lr.
tokenizer = Tokenizer(inputCol="text", outputCol="words")
hashingTF = HashingTF(inputCol=tokenizer.getOutputCol(), outputCol="features")
lr = LogisticRegression(maxIter=10, regParam=0.001)
pipeline = Pipeline(stages=[tokenizer, hashingTF, lr])
# Fit the pipeline to training documents.
model = pipeline.fit(training)
# Prepare test documents, which are unlabeled (id, text) tuples.
test = spark.createDataFrame([
(4, "spark i j k"),
(5, "l m n"),
(6, "spark hadoop spark"),
(7, "apache hadoop")
], ["id", "text"])
# Make predictions on test documents and print columns of interest.
prediction = model.transform(test)
selected = prediction.select("id", "text", "probability", "prediction")
for row in selected.collect():
rid, text, prob, prediction = row # type: ignore
print(
"(%d, %s) --> prob=%s, prediction=%f" % (
rid, text, str(prob), prediction # type: ignore
)
)
# $example off$
spark.stop()
| mahak/spark | examples/src/main/python/ml/pipeline_example.py | Python | apache-2.0 | 2,607 |
# -*- coding: utf-8 -*-
DEFAULT_CONFIG_PARAMS = {
"development_mode": True,
"database": {
"user": "postgres",
"pass": "matusjeuzasny",
"name": "gold-digger",
"host": "127.0.0.1",
"port": "5432",
"dialect": "postgres"
},
"graylog": {
"address": "logs.roihunter.com",
"port": 12211,
},
"supported_currencies": {
"AED", "AFN", "ALL", "AMD", "ANG", "AOA", "ARS", "ATS", "AUD", "AWG", "AZN", "BAM", "BBD",
"BDT", "BEF", "BGN", "BHD", "BIF", "BMD", "BND", "BOB", "BRL", "BSD", "BTC", "BTN", "BWP",
"BYR", "BZD", "CAD", "CDF", "CHF", "CLF", "CLP", "CNH", "CNY", "COP", "CRC", "CUC", "CUP",
"CVE", "CYP", "CZK", "DEM", "DJF", "DKK", "DOP", "DZD", "EEK", "EGP", "ERN", "ESP", "ETB",
"EUR", "FIM", "FJD", "FKP", "FRF", "GBP", "GEL", "GGP", "GHS", "GIP", "GMD", "GNF", "GRD",
"GTQ", "GYD", "HKD", "HNL", "HRK", "HTG", "HUF", "IDR", "IEP", "ILS", "IMP", "INR", "IQD",
"IRR", "ISK", "ITL", "JEP", "JMD", "JOD", "JPY", "KES", "KGS", "KHR", "KMF", "KPW", "KRW",
"KWD", "KYD", "KZT", "LAK", "LBP", "LKR", "LRD", "LSL", "LTL", "LUF", "LVL", "LYD", "MAD",
"MCF", "MDL", "MGA", "MKD", "MMK", "MNT", "MOP", "MRO", "MTL", "MUR", "MVR", "MWK", "MXN",
"MYR", "MZN", "NAD", "NGN", "NIO", "NLG", "NOK", "NPR", "NZD", "OMR", "PAB", "PEN", "PGK",
"PHP", "PKR", "PLN", "PTE", "PYG", "QAR", "RON", "RSD", "RUB", "RWF", "SAR", "SBD", "SCR",
"SDG", "SEK", "SGD", "SHP", "SIT", "SKK", "SLL", "SML", "SOS", "SRD", "STD", "SYP",
"SZL", "THB", "TJS", "TMT", "TND", "TOP", "TRY", "TTD", "TWD", "TZS", "UAH", "UGX", "USD",
"UYU", "UZS", "VAL", "VEB", "VEF", "VND", "VUV", "WST", "XAF", "XAG", "XAU", "XCD", "XCP",
"XDR", "XOF", "XPD", "XPF", "XPT", "YER", "ZAR", "ZMK", "ZMW", "ZWL"
},
"secrets": {
"currency_layer": list("supported_currencies")
},
}
| dorotapalicova/GoldDigger | gold_digger/config/params.py | Python | apache-2.0 | 1,943 |
from django.apps import AppConfig
class FluentAppConfig(AppConfig):
name = "fluent"
def ready(self):
from django.core.signals import request_finished, request_started
from fluent.trans import ensure_threads_join, invalidate_caches_if_necessary
request_finished.connect(ensure_threads_join, dispatch_uid="fluent.ensure_threads_join")
request_started.connect(invalidate_caches_if_necessary, dispatch_uid="fluent.invalidate_caches_if_necessary")
| potatolondon/fluent-2.0 | fluent/apps.py | Python | mit | 487 |
#!/usr/bin/python
import pygame as pg
from pygame.locals import *
from constantes_PunchinBall import *
# from constantesDataStream import *
import os
import sys
import signal
from subprocess import Popen, PIPE
from subprocess import call
from threading import Thread
from sys import platform
from tempfile import TemporaryFile
from requests import *
import datetime
from functions import *
'''background'''
screen = pg.display.set_mode((w_display, h_display), RESIZABLE)
fond = pg.image.load(image_ring).convert()
fond = pg.transform.scale( fond, (w_display, h_display))
'''Punching ball'''
punchBall = pg.image.load(punchBallImage)
punchBall = pg.transform.scale(punchBall, (int(math.floor(0.244 * w_display)), int(math.floor(0.78*h_display))))
'''Score Bar'''
scoreBar = pg.image.load(levels_images[level]).convert_alpha()
scoreBar = pg.transform.scale(scoreBar, (int(math.floor(0.088*w_display)), int(math.floor(0.69*h_display))))
scoreBar = pg.transform.rotate(scoreBar, -90)
# test =pg.transform.scale(scoreBar, (90, 400))
'''Winner image'''
winImg = pg.image.load(winImg).convert_alpha()
winImg = pg.transform.scale(winImg, (int(math.floor(0.68*w_display)), int(math.floor(0.76*h_display))))
# punchBall = punch.set_colorkey((255,255,255))
'''Score digit '''
scoreTxt = pg.image.load(image_score)
scoreTxt = pg.transform.scale(scoreTxt, (int(math.floor(0.15*w_display)), int(math.floor(h_display*0.087))))
scoreDigit = pg.image.load(scoreDigitImages[0])
scoreDigit = pg.transform.scale(scoreDigit, (int(math.floor(0.068*w_display)), int(math.floor(0.156*h_display))))
'''Fly game'''
sky = pg.image.load(skyImage).convert()
sky = pg.transform.scale(sky, (w_display, h_display))
# cloud = pg.image.load(cloudImage).convert()
# cloud = pg.image.transform(cloud, ())
plane = pg.image.load(planeImage).convert_alpha()
plane = pg.transform.scale(plane, (50, 50))
# plane = plane.set_colorkey((255, 255, 255))
'''Resting state'''
timerImage = pg.image.load(timer[0])
timerImage = pg.transform.scale(timerImage, (int(math.floor(0.068*w_display)), int(math.floor(0.156*h_display))))
restingImage = pg.image.load('images/restingState.png').convert()
restingStateImage = pg.transform.scale(restingImage, (w_display, h_display))
# '''Tinnitus questionnaire '''
# questionsSerie1Image = pg.image.load(questionsSerie1)
# questionsSerie1Image = pg.transform.scale(questionsSerie1Image, (w_display, h_display))
'''MAIN LOOP'''
gameOn = 1
now = datetime.datetime.now()
sessionName = str(str(now.month)+'_'+str(now.day)+'_'+str(now.hour)+'_'+str(now.minute)+'_'+str(now.second))
if not os.path.isdir('data'):
os.mkdir('data')
os.mkdir('data/session_'+sessionName)
os.mkdir('data/session_'+sessionName+'/Fly-data')
os.mkdir('data/session_'+sessionName+'/PB-data')
os.mkdir('data/session_'+sessionName+'/RS-data')
pathF = str('data/session_'+sessionName+'/Fly-data')
pathPB = str('data/session_'+sessionName+'/PB-data')
pathRS = str('data/session_'+sessionName+'/RS-data')
print '\n \n \n You are running Zeta Game on ', platform
print ' \n \n -----------------------------\n ------ Z E T A A C S -----\n -----------------------------'
print '\n\n -------------------------------------------------------------\n ----- ________ ________ _________ . -----\n ----- / | | / \ -----\n ----- / | | / \ -----\n ----- / | | / \ -----\n ----- / |____ | / \ -----\n ----- / | | /_________\ -----\n ----- / | | / \ -----\n ----- / | | / \ -----\n ----- /_______ |_______ | / \ -----\n --------------------------------------------------------------'
print ' \n Data will be saved here : ', pathRS, pathPB, pathF
while gameOn:
#LOAD screen Image
home = pg.image.load(image_home).convert() #TODO add image_home
home = pg.transform.scale(home, (w_display, h_display))
screen.blit(home, (0,0))
# load home menu buttons
settings = 'Etalonnage'
settingsSurf, settingsRect = text_objects(settings, buttonText)
settingsRect.center = (1.*w_display/4, 3.3*h_display/4)
gameA = 'Jeu A'
gameASurf, gameARect = text_objects(gameA, buttonText)
gameARect.center = (1.*w_display/2, 3.3*h_display/4)
gameB = 'Jeu B'
gameBSurf, gameBRect = text_objects(gameB, buttonText)
gameBRect.center = (3.*w_display/4, 3.3*h_display/4)
screen.blit(gameASurf, gameARect)
screen.blit(gameBSurf, gameBRect)
screen.blit(settingsSurf, settingsRect)
pg.display.flip()
# Home window loop
while homeOn:
pg.time.Clock().tick(60)
for event in pg.event.get():
if event.type == QUIT:
pg.quit()
sys.exit()
elif event.type == MOUSEBUTTONUP:
mouseHome = pg.mouse.get_pos()
choice = whichButtonHome(mouseHome, w_display, h_display)
if choice == 1: # 1 is for resting state
homeOn = 0
punchinBall = 0
fly = 0
restingState = 1
questionnaire = 0
elif choice == 2: # is for flying game
homeOn = 0
punchinBall = 0
fly = 1
questionnaire = 0
restingState = 0
elif choice == 3: # 3 is for punchinBall
homeOn = 0
punchinBall = 1
fly = 0
restingState = 0
questionnaire = 0
if punchinBall :
sessionPB += 1
'''launch node process'''
if platform == 'darwin' and sessionPB == 0: # mac
processPB = Popen(['/usr/local/bin/node', 'openBCIDataStream.js'], stdout=PIPE) # for MAC
elif platform == 'linux' or platform == 'linux2' and sessionPB == 0: #linux
processPB = Popen(['sudo', '/usr/bin/node', 'openBCIDataStream.js'], stdout=PIPE) # for LINUX
queuePB = Queue()
threadPB = Thread(target=enqueue_output, args=(processPB.stdout, queuePB))
threadPB.daemon = True
threadPB.start()
bufferPB = []
'''Position everything on the screen'''
screen.blit(scoreTxt, (670, 30))
screen.blit(fond, (0, 0))
screen.blit(punchBall, (350*w_display/1024, -5*h_display/576))
screen.blit(scoreBar, (317*w_display/1024, 460*h_display/576))
screen.blit(scoreDigit, (800*w_display/1024, 30*h_display/576))
pg.display.flip()
queuePB.queue.clear()
# punch_noise = pg.mixer.Sound("songs/punch.ogg") # TODO resolve the MemoryError due to pg.mixer.Sound
if fly:
'''launch node process'''
if platform == 'darwin' and sessionF == 0: # mac
processF = Popen(['/usr/local/bin/node', 'openBCIDataStream.js'], stdout=PIPE) # for MAC
elif platform == 'linux' or platform == 'linux2' and sessionF == 0: #linux
processF = Popen(['sudo', '/usr/bin/node', 'openBCIDataStream.js'], stdout=PIPE) # for LINUX
sessionF += 1
queueF = Queue()
threadF = Thread(target=enqueue_output, args=(processF.stdout, queueF))
threadF.daemon = True # kill all on exit
threadF.start()
# Chargement du fond
bufferF = []
sessionF += 1
'''Position everything on the screen'''
screen.blit(sky, (0, 0))
# screen.blit(cloud, (800*w_display/1024, 100*h_display/576))
# screen.blit(plane, (300*w_display/1024, 200*h_display/576))
screen.blit(plane, ( 5.* w_display / 12, maxDisplayY))
# screen.blit(scoreBar, (317, 460))
# screen.blit(scoreDigit, (800, 30))
# screen.blit(test, (317, 460))
pg.display.flip()
queueF.queue.clear()
if restingState:
if platform == 'darwin' and sessionRS == 0: # mac
processRS = Popen(['/usr/local/bin/node', 'openBCIDataStream.js'], stdout=PIPE) # for MAC
'''launch node process'''
queueRS = Queue()
threadRS = Thread(target=enqueue_output, args=(processRS.stdout, queueRS))
threadRS.daemon = True
threadRS.start()
elif platform == 'linux' or platform == 'linux2' and sessionRS == 0: #linux
processRS = Popen(['sudo', '/usr/bin/node', 'openBCIDataStream.js'], stdout=PIPE, preexec_fn=os.setsid) # for LINUX
'''launch node process'''
queueRS = Queue()
threadRS = Thread(target=enqueue_output, args=(processRS.stdout, queueRS))
threadRS.daemon = True
threadRS.start()
sessionRS += 1
bufferRS = []
band_alphaRS_ch1 = []
band_alphaRS_ch2 = []
band_alphaRS_ch3 = []
band_alphaRS_ch4 = []
band_deltaRS_ch1 = []
band_deltaRS_ch2 = []
band_deltaRS_ch3 = []
band_deltaRS_ch4 = []
screen.blit(restingStateImage, (0,0))
displayNumber(0, screen, 'down')
pg.display.flip()
if questionnaire:
screen.blit(questionsSerie1Image, (0,0))
pg.display.flip()
# questionText = pg.font.Font('freesansbold.ttf',15)
smallText = pg.font.Font("freesansbold.ttf",15)
question = questions[0]
textQSurf, textQRect = text_objects(question, smallText)
textQRect.center = (1.*w_display/2, 29)
screen.blit(textQSurf, textQRect)
for nb in range(11):
# TextSurf, TextRect = text_objects("question serie 1", questionText)
# TextRect.center = ((display_width/2),(display_height/2))
# gameDisplay.blit(TextSurf, TextRect)
# pg.draw.rect(screen, (218, 227, 243), (1.*w_display/12*(nb+1),58,1.*w_display/12*(nb+2),75))
pg.draw.rect(screen, (255, 255, 255), (1.*w_display/13*(nb+1),58,1.*w_display/13,20))
text = "{}%".format(nb)
textSurf, textRect = text_objects(text, smallText)
textRect.center = ( 1. * w_display/13*(nb+1) + 1.*w_display/13/2, 58 + 10 )
screen.blit(textSurf, textRect)
pg.display.update()
# sec = sec + 1
# print sec
while punchinBall:
pg.time.Clock().tick(60)
for event in pg.event.get():
if event.type == QUIT:
saveAllChannelsData(pathPB, sessionPB, 'PB', saved_bufferPB_ch1, saved_bufferPB_ch2, saved_bufferPB_ch3, saved_bufferPB_ch4)
pg.quit()
sys.exit()
if event.type == KEYDOWN:
if event.key == K_ESCAPE:
saveAllChannelsData(pathPB, sessionPB, 'PB', saved_bufferPB_ch1, saved_bufferPB_ch2, saved_bufferPB_ch3, saved_bufferPB_ch4)
punchinBall = 0
saved_bufferPB_ch1 = []
saved_bufferPB_ch2 = []
saved_bufferPB_ch3 = []
saved_bufferPB_ch4 = []
elif event.type == MOUSEBUTTONUP:
mouseReturn = pg.mouse.get_pos()
if whichButtonReturn(mouseReturn, w_display, h_display):
homeOn = 1
punchinBall = 0
fly = 0
restingState = 0
questionnaire = 0
processPB.terminate() # terminates the node process to close connection with openBCI
# call(['sudo service bluetooth restart'])
# os.system('sudo service bluetooth restart')
bufferPB = []
cpt = 0
queuePB.queue.clear()
saveAllChannelsData(pathPB, sessionPB, 'PB', saved_bufferPB_ch1, saved_bufferPB_ch2, saved_bufferPB_ch3, saved_bufferPB_ch4)
saved_bufferPB_ch1 = []
saved_bufferPB_ch2 = []
saved_bufferPB_ch3 = []
saved_bufferPB_ch4 = []
try:
while len(bufferPB) < buffersize * nb_channels :
bufferPB.append(queuePB.get_nowait())
saved_bufferPB.append(queuePB.get_nowait())
if len(bufferPB) == 800:
bufferPB_array = np.asarray(bufferPB)
dataPB[0, :] = bufferPB_array[ind_channel_1]
dataPB[1, :] = bufferPB_array[ind_channel_2]
dataPB[2, :] = bufferPB_array[ind_channel_3]
dataPB[3, :] = bufferPB_array[ind_channel_4]
saved_bufferPB_ch1.append(dataPB[0, :])
saved_bufferPB_ch2.append(dataPB[1, :])
saved_bufferPB_ch3.append(dataPB[2, :])
saved_bufferPB_ch4.append(dataPB[3, :])
fdataPB[0, :] = filter_data(dataPB[0, :], fs_hz)
fdataPB[1, :] = filter_data(dataPB[1, :], fs_hz)
fdataPB[2, :] = filter_data(dataPB[2, :], fs_hz)
fdataPB[3, :] = filter_data(dataPB[3, :], fs_hz)
bandmean_alphaPB = np.zeros(nb_channels)
bandmax_alphaPB = np.zeros(nb_channels)
bandmin_alphaPB = np.zeros(nb_channels)
bandmean_deltaPB = np.zeros(nb_channels)
bandmax_deltaPB = np.zeros(nb_channels)
bandmin_deltaPB = np.zeros(nb_channels)
for channel in range(4):
bandmean_alphaPB[channel] = extract_freqbandmean(200, fs_hz, fdataPB[channel,:], freqMaxAlpha-2, freqMaxAlpha+2)
bandmean_deltaPB[channel] = extract_freqbandmean(200, fs_hz, fdataPB[channel,:], 3, 4)
''' Get the mean, min and max of the last result of all channels'''
newMean_alphaPB = np.average(bandmean_alphaPB) #mean of the 4 channels, not the best metric I guess
newMean_deltaPB = np.average(bandmean_deltaPB)
''' increment the mean, min and max arrays of the freqRange studied'''
mean_array_uvPB.append(newMean_alphaPB)
if len(mean_array_uvPB) != 0:
deltaPB = np.amax(mean_array_uvPB) - np.min(mean_array_uvPB)
if len(mean_array_uvPB) == 0:
deltaPB = 0
# print "new Mean of 4 channels", newMean_alphaPB
# print "Max - Min ", deltaPB
if deltaPB == 0:
level = 0
if deltaPB !=0:
level = int(math.floor(7*(newMean_alphaPB-np.min(mean_array_uvPB))/deltaPB))
if level == 7:
scorePB = scorePB + 1
# punch_noise.play()
scoreDigit = pg.image.load(scoreDigitImages[scorePB]).convert()
scoreDigit = pg.transform.scale(scoreDigit, (70*w_display/1024, 90*h_display/576))
screen.blit(fond, (0, 0))
screen.blit(scoreDigit, (800*w_display/1024, 30*h_display/576))
screen.blit(winImg, (100*w_display/1024, 100*h_display/576))
if level != 7:
scoreBar = pg.image.load(levels_images[level]).convert_alpha()
scoreBar = pg.transform.scale(scoreBar, (90*w_display/1024, 400*h_display/576))
scoreBar = pg.transform.rotate(scoreBar, -90)
screen.blit(fond, (0, 0))
screen.blit(punchBall, (350*w_display/1024,-5*h_display/576))
screen.blit(scoreBar, (317*w_display/1024, 460*h_display/576))
screen.blit(scoreDigit, (800*w_display/1024, 30*h_display/576))
print "level", level
pg.display.update()
cpt = 0
bufferPB = []
except Empty:
continue # do stuff
else:
str(bufferPB)
#sys.stdout.write(char)
while fly:
pg.time.Clock().tick(60)
for event in pg.event.get():
if event.type == QUIT:
saveAllChannelsData(pathF, sessionF, 'F', saved_bufferF_ch1, saved_bufferF_ch2, saved_bufferF_ch3, saved_bufferF_ch4)
bufferF = []
saved_bufferF_ch1 = []
saved_bufferF_ch2 = []
saved_bufferF_ch3 = []
saved_bufferF_ch4 = []
pg.quit()
sys.exit()
if event.type == KEYDOWN:
if event.key == K_ESCAPE:
saveAllChannelsData(pathF, sessionF, 'F', saved_bufferF_ch1, saved_bufferF_ch2, saved_bufferF_ch3, saved_bufferF_ch4)
bufferF = []
saved_bufferF_ch1 = []
saved_bufferF_ch2 = []
saved_bufferF_ch3 = []
saved_bufferF_ch4 = []
fly = 0
elif event.type == MOUSEBUTTONUP:
mouseReturn = pg.mouse.get_pos()
if whichButtonReturn(mouseReturn, w_display, h_display):
homeOn = 1
punchinBall = 0
fly = 0
restingState = 0
questionnaire = 0
processF.terminate()
# call(['sudo service bluetooth restart'])
# os.system('sudo service bluetooth restart')
queueF.queue.clear()
saveAllChannelsData(pathF, sessionF, 'F', saved_bufferF_ch1, saved_bufferF_ch2, saved_bufferF_ch3, saved_bufferF_ch4)
bufferF = []
saved_bufferF_ch1 = []
saved_bufferF_ch2 = []
saved_bufferF_ch3 = []
saved_bufferF_ch4 = []
cpt = 0
if durationSession > 0:
try:
while len(bufferF) < buffersize * nb_channels:
if len(bufferF) % int(math.floor(1.*buffersize/5)) == 0:
screen.blit(sky, (0,0))
screen.blit(plane, (5. * w_display / 12, veryoldPosy + 1.*(oldPosy - veryoldPosy)/steps ))
displayNumber(math.floor(scoreF), screen, 'down')
displayNumber(durationSession, screen, 'down_left')
veryoldPosy += 1.*(oldPosy - veryoldPosy)/steps
pg.display.flip()
bufferF.append(queueF.get_nowait())
cpt += 1
if len(bufferF) == 800 :
bufferF_array = np.asarray(bufferF)
dataF[0, :] = bufferF_array[ind_channel_1]
dataF[1, :] = bufferF_array[ind_channel_2]
dataF[2, :] = bufferF_array[ind_channel_3]
dataF[3, :] = bufferF_array[ind_channel_4]
saved_bufferF_ch1.append(dataF[0, :])
saved_bufferF_ch2.append(dataF[1, :])
saved_bufferF_ch3.append(dataF[2, :])
saved_bufferF_ch4.append(dataF[3, :])
fdataF[0, :] = filter_data(dataF[0, :], fs_hz)
fdataF[1, :] = filter_data(dataF[1, :], fs_hz)
fdataF[2, :] = filter_data(dataF[2, :], fs_hz)
fdataF[3, :] = filter_data(dataF[3, :], fs_hz)
bandmean_alphaF = np.zeros(nb_channels)
bandmax_alphaF = np.zeros(nb_channels)
bandmin_alphaF = np.zeros(nb_channels)
bandmean_deltaF = np.zeros(nb_channels)
bandmax_deltaF = np.zeros(nb_channels)
bandmin_deltaF = np.zeros(nb_channels)
ratioF = np.zeros(nb_channels)
for channel in range(nb_channels):
bandmean_alphaF[channel] = extract_freqbandmean(200, fs_hz, fdataF[channel,:], freqMaxAlpha-2, freqMaxAlpha+2)
bandmean_deltaF[channel] = extract_freqbandmean(200, fs_hz, fdataF[channel,:], 3, 4)
ratioF[channel] = 1.* bandmean_alphaF[channel] / bandmean_deltaF[channel]
# maximiser alpha/delta
''' Get the mean, min and max of the last reslt of all channels'''
newMean_alphaF = np.average(bandmean_alphaF)
# maxAlphaF = np.amax(mean_array_uvF)
# minAlphaF = np.min(mean_array_uvF)
medRatioF = np.median(ratioF)
mean_array_uvF.append(medRatioF)
if medRatioF == maxRatioAlphaOverDelta:
newPosy = minDisplayY
elif medRatioF == minRatioAlphaOverDelta:
newPosy = maxDisplayY
else:
a = (maxDisplayY - minDisplayY) * 1. / (minRatioAlphaOverDelta - maxRatioAlphaOverDelta)
b = maxDisplayY - minRatioAlphaOverDelta * a
newPosy = a * medRatioF + b
scoreF = scoreF + flyScore(newPosy)
# deltaPosy_1 = 1. * (newPosy - oldPosy) / steps
# deltaPosy_2 = 1. * (oldPosy - veryoldPosy) / steps
# screen.blit(sky, (0, 0))
# for step in range(steps):
# # print newPosy
# # screen.blit(sky, (0,0))
#
# print step
# screen.blit(plane, (5. * w_display / 12, oldPosy + deltaPosy))
# pg.time.delay(100)
# pg.display.update()
# oldPosy += deltaPosy
# displayNumber(math.floor(scoreF), screen, 'down')
# screen.blit(plane, (5. * w_display / 12, newPosy))
# displayNumber(math.floor(scoreF), screen, 'down')
# screen.blit(scoreImg, ())
# print oldPosy, newPosy
# pg.time.delay(400)
# pg.display.flip()
# print "new Mean of 4 channels", newMean_alpha, maxAlpha, minAlpha
# scoreBar = pg.image.load(levels_images[level]).convert_alpha()
# scoreBar = pg.transform.scale(scoreBar, (90, 400))
# scoreBar = pg.transform.rotate(scoreBar, -90)
durationSession = durationSession - 1
except Empty:
continue # do stuff
else:
str(bufferF)
# sys.stdout.write(char)
veryoldPosy = oldPosy
oldPosy = newPosy
cpt = 0
saved_bufferF.append(bufferF)
bufferF = []
else :
homeOn = 1
punchinBall = 0
fly = 0
restingState = 0
questionnaire = 0
processF.terminate()
# call(['sudo service bluetooth restart'])
# os.system('sudo service bluetooth restart')
queueF.queue.clear()
saveAllChannelsData(pathF, sessionF, 'F', saved_bufferF_ch1, saved_bufferF_ch2, saved_bufferF_ch3, saved_bufferF_ch4)
bufferF = []
saved_bufferF_ch1 = []
saved_bufferF_ch2 = []
saved_bufferF_ch3 = []
saved_bufferF_ch4 = []
cpt = 0
durationSession = durationSessionInit
while restingState:
pg.time.Clock().tick(30)
for event in pg.event.get():
if event.type == QUIT:
saveAllChannelsData(pathRS, sessionRS, 'RS', saved_bufferRS_ch1, saved_bufferRS_ch2, saved_bufferRS_ch3, saved_bufferRS_ch4)
saved_bufferRS_ch1 = []
saved_bufferRS_ch2 = []
saved_bufferRS_ch3 = []
saved_bufferRS_ch4 = []
pg.quit()
sys.exit()
if event.type == KEYDOWN:
if event.key == K_ESCAPE:
restingState = 0
elif event.type == MOUSEBUTTONUP:
mouseReturn = pg.mouse.get_pos()
if whichButtonReturn(mouseReturn, w_display, h_display):
homeOn = 1
punchinBall = 0
fly = 0
restingState = 0
questionnaire = 0
# processRS.terminate()
# call(['sudo service bluetooth restart'])
# os.system('sudo service bluetooth restart')
# os.killpg(os.getpgid(processRS.pid), signal.SIGTERM) # Send the signal to all the process groups
bufferRS = []
queueRS.queue.clear()
saveAllChannelsData(pathRS, sessionRS, 'RS', saved_bufferRS_ch1, saved_bufferRS_ch2, saved_bufferRS_ch3, saved_bufferRS_ch4)
saved_bufferRS_ch1 = []
saved_bufferRS_ch2 = []
saved_bufferRS_ch3 = []
saved_bufferRS_ch4 = []
cpt = 0
print bufferRS
# print band_alphaRS_ch1
if sec == restingStateDuration :
# np.zeros(nb_freq_alpha)
band_alphaRS_ch1 = np.asarray(band_alphaRS_ch1)
band_alphaRS_ch2 = np.asarray(band_alphaRS_ch2)
band_alphaRS_ch3 = np.asarray(band_alphaRS_ch3)
band_alphaRS_ch4 = np.asarray(band_alphaRS_ch4)
# print 'band_alphaRS_ch1', band_alphaRS_ch1
# print 'band_alphaRS_ch1[:, 0]', np.average(band_alphaRS_ch1[:,0])
freqMaxAlphaCh1 = getfreqmaxband(band_alphaRS_ch1, 'alpha', nb_freq_alpha)
freqMaxAlphaCh2 = getfreqmaxband(band_alphaRS_ch2, 'alpha', nb_freq_alpha)
freqMaxAlphaCh3 = getfreqmaxband(band_alphaRS_ch3, 'alpha', nb_freq_alpha)
freqMaxAlphaCh4 = getfreqmaxband(band_alphaRS_ch4, 'alpha', nb_freq_alpha)
freqMaxAlpha = int(np.average([freqMaxAlphaCh1, freqMaxAlphaCh2, freqMaxAlphaCh3, freqMaxAlphaCh4]))
for chunk in range(restingStateDuration):
ratios_ch1.append(1.*np.median((extract_freqband(200, fs_hz, fdataRS[0,:, chunk], freqMaxAlphaCh1-2, freqMaxAlphaCh1+2)[0]))/np.median((extract_freqband(200, fs_hz, fdataRS[0,:, chunk], 3, 4)[0])))
ratios_ch2.append(1.*np.median((extract_freqband(200, fs_hz, fdataRS[1,:, chunk], freqMaxAlphaCh2-2, freqMaxAlphaCh2+2)[0]))/np.median((extract_freqband(200, fs_hz, fdataRS[1,:, chunk], 3, 4)[0])))
ratios_ch3.append(1.*np.median((extract_freqband(200, fs_hz, fdataRS[2,:, chunk], freqMaxAlphaCh3-2, freqMaxAlphaCh3+2)[0]))/np.median((extract_freqband(200, fs_hz, fdataRS[2,:, chunk], 3, 4)[0])))
ratios_ch4.append(1.*np.median((extract_freqband(200, fs_hz, fdataRS[3,:, chunk], freqMaxAlphaCh4-2, freqMaxAlphaCh4+2)[0]))/np.median((extract_freqband(200, fs_hz, fdataRS[3,:, chunk], 3, 4)[0])))
# print ratios_ch1
median_ratio_ch1 = np.median(ratios_ch1)
median_ratio_ch2 = np.median(ratios_ch2)
median_ratio_ch3 = np.median(ratios_ch3)
median_ratio_ch4 = np.median(ratios_ch4)
# print median_ratio_ch1
mad_ch1 = mad(ratios_ch1)
mad_ch2 = mad(ratios_ch2)
mad_ch3 = mad(ratios_ch3)
mad_ch4 = mad(ratios_ch4)
madRatioAlphaOverDelta = np.average([mad_ch4, mad_ch3, mad_ch2, mad_ch1])
# print madRatioAlphaOverDelta
medianratioAlphaoverDelta = np.average([median_ratio_ch1, median_ratio_ch2, median_ratio_ch3, median_ratio_ch4])
# print medianratioAlphaoverDelta
minRatioAlphaOverDelta = medianratioAlphaoverDelta - 3 * madRatioAlphaOverDelta
maxRatioAlphaOverDelta = medianratioAlphaoverDelta + 3 * madRatioAlphaOverDelta
# print minRatioAlphaOverDelta, maxRatioAlphaOverDelta
print 'fin de la seance de reglage', freqMaxAlpha
homeOn = 1
punchinBall = 0
fly = 0
restingState = 0
questionnaire = 0
# processRS.terminate()
# call(['sudo service bluetooth restart'])
# os.system('sudo service bluetooth restart')
bufferRS = []
queueRS.queue.clear()
saveAllChannelsData(pathRS, sessionRS, 'RS', saved_bufferRS_ch1, saved_bufferRS_ch2, saved_bufferRS_ch3, saved_bufferRS_ch4)
saved_bufferRS_ch1 = []
saved_bufferRS_ch2 = []
saved_bufferRS_ch3 = []
saved_bufferRS_ch4 = []
elif sec < restingStateDuration:
try:
# queueRS.queue.clear()
while len(bufferRS) < buffersize * nb_channels:
bufferRS.append(queueRS.get_nowait())
if len(bufferRS) == 800:
# print sec
bufferRS_array = np.asarray(bufferRS)
dataRS[0, :, sec] = bufferRS_array[ind_channel_1]
dataRS[1, :, sec] = bufferRS_array[ind_channel_2]
dataRS[2, :, sec] = bufferRS_array[ind_channel_3]
dataRS[3, :, sec] = bufferRS_array[ind_channel_4]
saved_bufferRS_ch1.append(dataRS[0, :, sec])
saved_bufferRS_ch2.append(dataRS[1, :, sec])
saved_bufferRS_ch3.append(dataRS[2, :, sec])
saved_bufferRS_ch4.append(dataRS[3, :, sec])
fdataRS[0, :, sec] = filter_data(dataRS[0, :, sec], fs_hz)
fdataRS[1, :, sec] = filter_data(dataRS[1, :, sec], fs_hz)
fdataRS[2, :, sec] = filter_data(dataRS[2, :, sec], fs_hz)
fdataRS[3, :, sec] = filter_data(dataRS[3, :, sec], fs_hz)
band_alphaRS_ch1.append(extract_freqband(200, fs_hz, fdataRS[0,:, sec], 6, 13)[0])
band_alphaRS_ch2.append(extract_freqband(200, fs_hz, fdataRS[1,:, sec], 6, 13)[0])
band_alphaRS_ch3.append(extract_freqband(200, fs_hz, fdataRS[2,:, sec], 6, 13)[0])
band_alphaRS_ch4.append(extract_freqband(200, fs_hz, fdataRS[3,:, sec], 6, 13)[0])
nb_freq_alpha = extract_freqband(200, fs_hz, fdataRS[0,:], 6, 13)[1]
band_deltaRS_ch1.append(extract_freqband(200, fs_hz, fdataRS[0,:, sec], 3, 4)[0])
band_deltaRS_ch2.append(extract_freqband(200, fs_hz, fdataRS[1,:, sec], 3, 4)[0])
band_deltaRS_ch3.append(extract_freqband(200, fs_hz, fdataRS[2,:, sec], 3, 4)[0])
band_deltaRS_ch4.append(extract_freqband(200, fs_hz, fdataRS[3,:, sec], 3, 4)[0])
nb_freq_delta = extract_freqband(200, fs_hz, fdataRS[3,:], 3, 4)[1]
# for channel in range(4):
# band_alphaRS[channel] = extract_freqband(200, fs_hz, fdataRS[channel,:], 6, 11)
# bandmean_deltaRS[channel] = extract_freqband(200, fs_hz, fdataRS[channel,:], 3, 4)
# globalAlpha.append(bandmean_alphaRS)
bufferRS = []
displayNumber(sec, screen, 'down')
# checkImp() # TODO check impedances function
pg.display.update()
queueRS.queue.clear()
sec = sec + 1
except Empty:
continue # do stuff
else:
str(bufferRS)
# sys.stdout.write(char)
# time.sleep(1)
# pg.time.delay(993) # wait to display the next second on screen
# print sec
# queueRS.queue.clear()
while questionnaire:
# pg.time.Clock().tick(30)
mouse = pg.mouse.get_pos()
for event in pg.event.get():
if event.type == QUIT:
pg.quit()
sys.exit()
if event.type == KEYDOWN:
if event.key == K_ESCAPE:
restingState = 0
if event.type == MOUSEBUTTONUP:
# print int(mouse[1])
if (int(mouse[1]) >= 58) & (int(mouse[1]) <= 80 ) :
answers.append('question 1 in %')
answers.append(math.floor(1.*mouse[0]/(w_display/13)))
print answers
if (int(mouse[1]) >= 58) & (int(mouse[1]) <= 80 ) :
answers.append('question 2 in %')
answers.append(math.floor(1.*mouse[0]/(w_display/13)))
print answers
if (int(mouse[1]) >= 58) & (int(mouse[1]) <= 80 ) :
answers.append('question 3 in %')
answers.append(math.floor(1.*mouse[0]/(w_display/13)))
print answers
pg.display.update()
| zeta-technologies/tests-raspberry | gamePunchinBall.py | Python | apache-2.0 | 33,427 |
# Copyright 2020-2021 The MediaPipe Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""MediaPipe Pose."""
import enum
from typing import NamedTuple
import numpy as np
# The following imports are needed because python pb2 silently discards
# unknown protobuf fields.
# pylint: disable=unused-import
from mediapipe.calculators.core import constant_side_packet_calculator_pb2
from mediapipe.calculators.core import gate_calculator_pb2
from mediapipe.calculators.core import split_vector_calculator_pb2
from mediapipe.calculators.image import warp_affine_calculator_pb2
from mediapipe.calculators.tensor import image_to_tensor_calculator_pb2
from mediapipe.calculators.tensor import inference_calculator_pb2
from mediapipe.calculators.tensor import tensors_to_classification_calculator_pb2
from mediapipe.calculators.tensor import tensors_to_detections_calculator_pb2
from mediapipe.calculators.tensor import tensors_to_landmarks_calculator_pb2
from mediapipe.calculators.tensor import tensors_to_segmentation_calculator_pb2
from mediapipe.calculators.tflite import ssd_anchors_calculator_pb2
from mediapipe.calculators.util import detections_to_rects_calculator_pb2
from mediapipe.calculators.util import landmarks_smoothing_calculator_pb2
from mediapipe.calculators.util import local_file_contents_calculator_pb2
from mediapipe.calculators.util import logic_calculator_pb2
from mediapipe.calculators.util import non_max_suppression_calculator_pb2
from mediapipe.calculators.util import rect_transformation_calculator_pb2
from mediapipe.calculators.util import thresholding_calculator_pb2
from mediapipe.calculators.util import visibility_smoothing_calculator_pb2
from mediapipe.framework.tool import switch_container_pb2
# pylint: enable=unused-import
from mediapipe.python.solution_base import SolutionBase
from mediapipe.python.solutions import download_utils
# pylint: disable=unused-import
from mediapipe.python.solutions.pose_connections import POSE_CONNECTIONS
# pylint: enable=unused-import
class PoseLandmark(enum.IntEnum):
"""The 33 pose landmarks."""
NOSE = 0
LEFT_EYE_INNER = 1
LEFT_EYE = 2
LEFT_EYE_OUTER = 3
RIGHT_EYE_INNER = 4
RIGHT_EYE = 5
RIGHT_EYE_OUTER = 6
LEFT_EAR = 7
RIGHT_EAR = 8
MOUTH_LEFT = 9
MOUTH_RIGHT = 10
LEFT_SHOULDER = 11
RIGHT_SHOULDER = 12
LEFT_ELBOW = 13
RIGHT_ELBOW = 14
LEFT_WRIST = 15
RIGHT_WRIST = 16
LEFT_PINKY = 17
RIGHT_PINKY = 18
LEFT_INDEX = 19
RIGHT_INDEX = 20
LEFT_THUMB = 21
RIGHT_THUMB = 22
LEFT_HIP = 23
RIGHT_HIP = 24
LEFT_KNEE = 25
RIGHT_KNEE = 26
LEFT_ANKLE = 27
RIGHT_ANKLE = 28
LEFT_HEEL = 29
RIGHT_HEEL = 30
LEFT_FOOT_INDEX = 31
RIGHT_FOOT_INDEX = 32
_BINARYPB_FILE_PATH = 'mediapipe/modules/pose_landmark/pose_landmark_cpu.binarypb'
def _download_oss_pose_landmark_model(model_complexity):
"""Downloads the pose landmark lite/heavy model from the MediaPipe Github repo if it doesn't exist in the package."""
if model_complexity == 0:
download_utils.download_oss_model(
'mediapipe/modules/pose_landmark/pose_landmark_lite.tflite')
elif model_complexity == 2:
download_utils.download_oss_model(
'mediapipe/modules/pose_landmark/pose_landmark_heavy.tflite')
class Pose(SolutionBase):
"""MediaPipe Pose.
MediaPipe Pose processes an RGB image and returns pose landmarks on the most
prominent person detected.
Please refer to https://solutions.mediapipe.dev/pose#python-solution-api for
usage examples.
"""
def __init__(self,
static_image_mode=False,
model_complexity=1,
smooth_landmarks=True,
enable_segmentation=False,
smooth_segmentation=True,
min_detection_confidence=0.5,
min_tracking_confidence=0.5):
"""Initializes a MediaPipe Pose object.
Args:
static_image_mode: Whether to treat the input images as a batch of static
and possibly unrelated images, or a video stream. See details in
https://solutions.mediapipe.dev/pose#static_image_mode.
model_complexity: Complexity of the pose landmark model: 0, 1 or 2. See
details in https://solutions.mediapipe.dev/pose#model_complexity.
smooth_landmarks: Whether to filter landmarks across different input
images to reduce jitter. See details in
https://solutions.mediapipe.dev/pose#smooth_landmarks.
enable_segmentation: Whether to predict segmentation mask. See details in
https://solutions.mediapipe.dev/pose#enable_segmentation.
smooth_segmentation: Whether to filter segmentation across different input
images to reduce jitter. See details in
https://solutions.mediapipe.dev/pose#smooth_segmentation.
min_detection_confidence: Minimum confidence value ([0.0, 1.0]) for person
detection to be considered successful. See details in
https://solutions.mediapipe.dev/pose#min_detection_confidence.
min_tracking_confidence: Minimum confidence value ([0.0, 1.0]) for the
pose landmarks to be considered tracked successfully. See details in
https://solutions.mediapipe.dev/pose#min_tracking_confidence.
"""
_download_oss_pose_landmark_model(model_complexity)
super().__init__(
binary_graph_path=_BINARYPB_FILE_PATH,
side_inputs={
'model_complexity': model_complexity,
'smooth_landmarks': smooth_landmarks and not static_image_mode,
'enable_segmentation': enable_segmentation,
'smooth_segmentation':
smooth_segmentation and not static_image_mode,
'use_prev_landmarks': not static_image_mode,
},
calculator_params={
'posedetectioncpu__TensorsToDetectionsCalculator.min_score_thresh':
min_detection_confidence,
'poselandmarkbyroicpu__tensorstoposelandmarksandsegmentation__ThresholdingCalculator.threshold':
min_tracking_confidence,
},
outputs=['pose_landmarks', 'pose_world_landmarks', 'segmentation_mask'])
def process(self, image: np.ndarray) -> NamedTuple:
"""Processes an RGB image and returns the pose landmarks on the most prominent person detected.
Args:
image: An RGB image represented as a numpy ndarray.
Raises:
RuntimeError: If the underlying graph throws any error.
ValueError: If the input image is not three channel RGB.
Returns:
A NamedTuple with fields describing the landmarks on the most prominate
person detected:
1) "pose_landmarks" field that contains the pose landmarks.
2) "pose_world_landmarks" field that contains the pose landmarks in
real-world 3D coordinates that are in meters with the origin at the
center between hips.
3) "segmentation_mask" field that contains the segmentation mask if
"enable_segmentation" is set to true.
"""
results = super().process(input_data={'image': image})
if results.pose_landmarks:
for landmark in results.pose_landmarks.landmark:
landmark.ClearField('presence')
if results.pose_world_landmarks:
for landmark in results.pose_world_landmarks.landmark:
landmark.ClearField('presence')
return results
| google/mediapipe | mediapipe/python/solutions/pose.py | Python | apache-2.0 | 7,781 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# complexity documentation build configuration file, created by
# sphinx-quickstart on Tue Jul 9 22:26:36 2013.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# Get the project root dir, which is the parent dir of this
cwd = os.getcwd()
project_root = os.path.dirname(cwd)
# Insert the project root dir as the first element in the PYTHONPATH.
# This lets us ensure that the source package is imported, and that its
# version is used.
sys.path.insert(0, project_root)
import airframe
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'airframe'
copyright = u'2013, Virantha Ekanayake'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = airframe.__version__
# The full version, including alpha/beta/rc tags.
release = airframe.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinxdoc'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'airframedoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'airframe.tex', u'airframe Documentation',
u'Virantha Ekanayake', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'airframe', u'airframe Documentation',
[u'Virantha Ekanayake'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'airframe', u'airframe Documentation',
u'Virantha Ekanayake', 'airframe', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| virantha/airframe | docs/conf.py | Python | apache-2.0 | 8,385 |
#
# Copyright 2016 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from datetime import timedelta, time
from itertools import chain
from nose_parameterized import parameterized
import numpy as np
from numpy import nan
from numpy.testing import assert_almost_equal
import pandas as pd
from toolz import concat
from zipline._protocol import handle_non_market_minutes
from zipline.finance.asset_restrictions import (
Restriction,
HistoricalRestrictions,
RESTRICTION_STATES,
)
from zipline.testing import (
MockDailyBarReader,
create_daily_df_for_asset,
create_minute_df_for_asset,
str_to_seconds,
)
from zipline.testing.fixtures import (
WithCreateBarData,
WithDataPortal,
ZiplineTestCase,
)
from zipline.utils.calendars import get_calendar
from zipline.utils.calendars.trading_calendar import days_at_time
OHLC = ["open", "high", "low", "close"]
OHLCP = OHLC + ["price"]
ALL_FIELDS = OHLCP + ["volume", "last_traded"]
# offsets used in test data
field_info = {
"open": 1,
"high": 2,
"low": -1,
"close": 0
}
def str_to_ts(dt_str):
return pd.Timestamp(dt_str, tz='UTC')
class WithBarDataChecks(object):
def assert_same(self, val1, val2):
try:
self.assertEqual(val1, val2)
except AssertionError:
if val1 is pd.NaT:
self.assertTrue(val2 is pd.NaT)
elif np.isnan(val1):
self.assertTrue(np.isnan(val2))
else:
raise
def check_internal_consistency(self, bar_data):
df = bar_data.current([self.ASSET1, self.ASSET2], ALL_FIELDS)
asset1_multi_field = bar_data.current(self.ASSET1, ALL_FIELDS)
asset2_multi_field = bar_data.current(self.ASSET2, ALL_FIELDS)
for field in ALL_FIELDS:
asset1_value = bar_data.current(self.ASSET1, field)
asset2_value = bar_data.current(self.ASSET2, field)
multi_asset_series = bar_data.current(
[self.ASSET1, self.ASSET2], field
)
# make sure all the different query forms are internally
# consistent
self.assert_same(multi_asset_series.loc[self.ASSET1], asset1_value)
self.assert_same(multi_asset_series.loc[self.ASSET2], asset2_value)
self.assert_same(df.loc[self.ASSET1][field], asset1_value)
self.assert_same(df.loc[self.ASSET2][field], asset2_value)
self.assert_same(asset1_multi_field[field], asset1_value)
self.assert_same(asset2_multi_field[field], asset2_value)
# also verify that bar_data doesn't expose anything bad
for field in ["data_portal", "simulation_dt_func", "data_frequency",
"_views", "_universe_func", "_last_calculated_universe",
"_universe_last_updatedat"]:
with self.assertRaises(AttributeError):
getattr(bar_data, field)
class TestMinuteBarData(WithCreateBarData,
WithBarDataChecks,
WithDataPortal,
ZiplineTestCase):
START_DATE = pd.Timestamp('2016-01-05', tz='UTC')
END_DATE = ASSET_FINDER_EQUITY_END_DATE = pd.Timestamp(
'2016-01-07',
tz='UTC',
)
ASSET_FINDER_EQUITY_SIDS = 1, 2, 3, 4, 5
SPLIT_ASSET_SID = 3
ILLIQUID_SPLIT_ASSET_SID = 4
HILARIOUSLY_ILLIQUID_ASSET_SID = 5
@classmethod
def make_equity_minute_bar_data(cls):
# asset1 has trades every minute
# asset2 has trades every 10 minutes
# split_asset trades every minute
# illiquid_split_asset trades every 10 minutes
for sid in (1, cls.SPLIT_ASSET_SID):
yield sid, create_minute_df_for_asset(
cls.trading_calendar,
cls.equity_minute_bar_days[0],
cls.equity_minute_bar_days[-1],
)
for sid in (2, cls.ILLIQUID_SPLIT_ASSET_SID):
yield sid, create_minute_df_for_asset(
cls.trading_calendar,
cls.equity_minute_bar_days[0],
cls.equity_minute_bar_days[-1],
10,
)
yield cls.HILARIOUSLY_ILLIQUID_ASSET_SID, create_minute_df_for_asset(
cls.trading_calendar,
cls.equity_minute_bar_days[0],
cls.equity_minute_bar_days[-1],
50,
)
@classmethod
def make_futures_info(cls):
return pd.DataFrame.from_dict(
{
6: {
'symbol': 'CLG06',
'root_symbol': 'CL',
'start_date': pd.Timestamp('2005-12-01', tz='UTC'),
'notice_date': pd.Timestamp('2005-12-20', tz='UTC'),
'expiration_date': pd.Timestamp('2006-01-20', tz='UTC'),
'exchange': 'ICEUS',
},
7: {
'symbol': 'CLK06',
'root_symbol': 'CL',
'start_date': pd.Timestamp('2005-12-01', tz='UTC'),
'notice_date': pd.Timestamp('2006-03-20', tz='UTC'),
'expiration_date': pd.Timestamp('2006-04-20', tz='UTC'),
'exchange': 'ICEUS',
},
},
orient='index',
)
@classmethod
def make_splits_data(cls):
return pd.DataFrame([
{
'effective_date': str_to_seconds("2016-01-06"),
'ratio': 0.5,
'sid': cls.SPLIT_ASSET_SID,
},
{
'effective_date': str_to_seconds("2016-01-06"),
'ratio': 0.5,
'sid': cls.ILLIQUID_SPLIT_ASSET_SID,
},
])
@classmethod
def init_class_fixtures(cls):
super(TestMinuteBarData, cls).init_class_fixtures()
cls.ASSET1 = cls.asset_finder.retrieve_asset(1)
cls.ASSET2 = cls.asset_finder.retrieve_asset(2)
cls.SPLIT_ASSET = cls.asset_finder.retrieve_asset(
cls.SPLIT_ASSET_SID,
)
cls.ILLIQUID_SPLIT_ASSET = cls.asset_finder.retrieve_asset(
cls.ILLIQUID_SPLIT_ASSET_SID,
)
cls.HILARIOUSLY_ILLIQUID_ASSET = cls.asset_finder.retrieve_asset(
cls.HILARIOUSLY_ILLIQUID_ASSET_SID,
)
cls.ASSETS = [cls.ASSET1, cls.ASSET2]
def test_current_session(self):
regular_minutes = self.trading_calendar.minutes_for_sessions_in_range(
self.equity_minute_bar_days[0],
self.equity_minute_bar_days[-1]
)
bts_minutes = days_at_time(
self.equity_minute_bar_days,
time(8, 45),
"US/Eastern"
)
# some other non-market-minute
three_oh_six_am_minutes = days_at_time(
self.equity_minute_bar_days,
time(3, 6),
"US/Eastern"
)
all_minutes = [regular_minutes, bts_minutes, three_oh_six_am_minutes]
for minute in list(concat(all_minutes)):
bar_data = self.create_bardata(lambda: minute)
self.assertEqual(
self.trading_calendar.minute_to_session_label(minute),
bar_data.current_session
)
def test_current_session_minutes(self):
first_day_minutes = self.trading_calendar.minutes_for_session(
self.equity_minute_bar_days[0]
)
for minute in first_day_minutes:
bar_data = self.create_bardata(lambda: minute)
np.testing.assert_array_equal(
first_day_minutes,
bar_data.current_session_minutes
)
def test_minute_before_assets_trading(self):
# grab minutes that include the day before the asset start
minutes = self.trading_calendar.minutes_for_session(
self.trading_calendar.previous_session_label(
self.equity_minute_bar_days[0]
)
)
# this entire day is before either asset has started trading
for idx, minute in enumerate(minutes):
bar_data = self.create_bardata(
lambda: minute,
)
self.check_internal_consistency(bar_data)
self.assertFalse(bar_data.can_trade(self.ASSET1))
self.assertFalse(bar_data.can_trade(self.ASSET2))
self.assertFalse(bar_data.is_stale(self.ASSET1))
self.assertFalse(bar_data.is_stale(self.ASSET2))
for field in ALL_FIELDS:
for asset in self.ASSETS:
asset_value = bar_data.current(asset, field)
if field in OHLCP:
self.assertTrue(np.isnan(asset_value))
elif field == "volume":
self.assertEqual(0, asset_value)
elif field == "last_traded":
self.assertTrue(asset_value is pd.NaT)
def test_regular_minute(self):
minutes = self.trading_calendar.minutes_for_session(
self.equity_minute_bar_days[0]
)
for idx, minute in enumerate(minutes):
# day2 has prices
# (every minute for asset1, every 10 minutes for asset2)
# asset1:
# opens: 2-391
# high: 3-392
# low: 0-389
# close: 1-390
# volume: 100-3900 (by 100)
# asset2 is the same thing, but with only every 10th minute
# populated.
# this test covers the "IPO morning" case, because asset2 only
# has data starting on the 10th minute.
bar_data = self.create_bardata(
lambda: minute,
)
self.check_internal_consistency(bar_data)
asset2_has_data = (((idx + 1) % 10) == 0)
self.assertTrue(bar_data.can_trade(self.ASSET1))
self.assertFalse(bar_data.is_stale(self.ASSET1))
if idx < 9:
self.assertFalse(bar_data.can_trade(self.ASSET2))
self.assertFalse(bar_data.is_stale(self.ASSET2))
else:
self.assertTrue(bar_data.can_trade(self.ASSET2))
if asset2_has_data:
self.assertFalse(bar_data.is_stale(self.ASSET2))
else:
self.assertTrue(bar_data.is_stale(self.ASSET2))
for field in ALL_FIELDS:
asset1_value = bar_data.current(self.ASSET1, field)
asset2_value = bar_data.current(self.ASSET2, field)
# now check the actual values
if idx == 0 and field == "low":
# first low value is 0, which is interpreted as NaN
self.assertTrue(np.isnan(asset1_value))
else:
if field in OHLC:
self.assertEqual(
idx + 1 + field_info[field],
asset1_value
)
if asset2_has_data:
self.assertEqual(
idx + 1 + field_info[field],
asset2_value
)
else:
self.assertTrue(np.isnan(asset2_value))
elif field == "volume":
self.assertEqual((idx + 1) * 100, asset1_value)
if asset2_has_data:
self.assertEqual((idx + 1) * 100, asset2_value)
else:
self.assertEqual(0, asset2_value)
elif field == "price":
self.assertEqual(idx + 1, asset1_value)
if asset2_has_data:
self.assertEqual(idx + 1, asset2_value)
elif idx < 9:
# no price to forward fill from
self.assertTrue(np.isnan(asset2_value))
else:
# forward-filled price
self.assertEqual((idx // 10) * 10, asset2_value)
elif field == "last_traded":
self.assertEqual(minute, asset1_value)
if idx < 9:
self.assertTrue(asset2_value is pd.NaT)
elif asset2_has_data:
self.assertEqual(minute, asset2_value)
else:
last_traded_minute = minutes[(idx // 10) * 10]
self.assertEqual(
last_traded_minute - timedelta(minutes=1),
asset2_value
)
def test_minute_of_last_day(self):
minutes = self.trading_calendar.minutes_for_session(
self.equity_daily_bar_days[-1],
)
# this is the last day the assets exist
for idx, minute in enumerate(minutes):
bar_data = self.create_bardata(
lambda: minute,
)
self.assertTrue(bar_data.can_trade(self.ASSET1))
self.assertTrue(bar_data.can_trade(self.ASSET2))
def test_minute_after_assets_stopped(self):
minutes = self.trading_calendar.minutes_for_session(
self.trading_calendar.next_session_label(
self.equity_minute_bar_days[-1]
)
)
last_trading_minute = self.trading_calendar.minutes_for_session(
self.equity_minute_bar_days[-1]
)[-1]
# this entire day is after both assets have stopped trading
for idx, minute in enumerate(minutes):
bar_data = self.create_bardata(
lambda: minute,
)
self.assertFalse(bar_data.can_trade(self.ASSET1))
self.assertFalse(bar_data.can_trade(self.ASSET2))
self.assertFalse(bar_data.is_stale(self.ASSET1))
self.assertFalse(bar_data.is_stale(self.ASSET2))
self.check_internal_consistency(bar_data)
for field in ALL_FIELDS:
for asset in self.ASSETS:
asset_value = bar_data.current(asset, field)
if field in OHLCP:
self.assertTrue(np.isnan(asset_value))
elif field == "volume":
self.assertEqual(0, asset_value)
elif field == "last_traded":
self.assertEqual(last_trading_minute, asset_value)
def test_get_value_is_unadjusted(self):
# verify there is a split for SPLIT_ASSET
splits = self.adjustment_reader.get_adjustments_for_sid(
"splits",
self.SPLIT_ASSET.sid
)
self.assertEqual(1, len(splits))
split = splits[0]
self.assertEqual(
split[0],
pd.Timestamp("2016-01-06", tz='UTC')
)
# ... but that's it's not applied when using spot value
minutes = self.trading_calendar.minutes_for_sessions_in_range(
self.equity_minute_bar_days[0],
self.equity_minute_bar_days[1]
)
for idx, minute in enumerate(minutes):
bar_data = self.create_bardata(
lambda: minute,
)
self.assertEqual(
idx + 1,
bar_data.current(self.SPLIT_ASSET, "price")
)
def test_get_value_is_adjusted_if_needed(self):
# on cls.days[1], the first 9 minutes of ILLIQUID_SPLIT_ASSET are
# missing. let's get them.
day0_minutes = self.trading_calendar.minutes_for_session(
self.equity_minute_bar_days[0]
)
day1_minutes = self.trading_calendar.minutes_for_session(
self.equity_minute_bar_days[1]
)
for idx, minute in enumerate(day0_minutes[-10:-1]):
bar_data = self.create_bardata(
lambda: minute,
)
self.assertEqual(
380,
bar_data.current(self.ILLIQUID_SPLIT_ASSET, "price")
)
bar_data = self.create_bardata(
lambda: day0_minutes[-1],
)
self.assertEqual(
390,
bar_data.current(self.ILLIQUID_SPLIT_ASSET, "price")
)
for idx, minute in enumerate(day1_minutes[0:9]):
bar_data = self.create_bardata(
lambda: minute,
)
# should be half of 390, due to the split
self.assertEqual(
195,
bar_data.current(self.ILLIQUID_SPLIT_ASSET, "price")
)
def test_get_value_at_midnight(self):
# make sure that if we try to get a minute price at a non-market
# minute, we use the previous market close's timestamp
day = self.equity_minute_bar_days[1]
eight_fortyfive_am_eastern = \
pd.Timestamp("{0}-{1}-{2} 8:45".format(
day.year, day.month, day.day),
tz='US/Eastern'
)
bar_data = self.create_bardata(
lambda: day,
)
bar_data2 = self.create_bardata(
lambda: eight_fortyfive_am_eastern,
)
with handle_non_market_minutes(bar_data), \
handle_non_market_minutes(bar_data2):
for bd in [bar_data, bar_data2]:
for field in ["close", "price"]:
self.assertEqual(
390,
bd.current(self.ASSET1, field)
)
# make sure that if the asset didn't trade at the previous
# close, we properly ffill (or not ffill)
self.assertEqual(
350,
bd.current(self.HILARIOUSLY_ILLIQUID_ASSET, "price")
)
self.assertTrue(
np.isnan(bd.current(self.HILARIOUSLY_ILLIQUID_ASSET,
"high"))
)
self.assertEqual(
0,
bd.current(self.HILARIOUSLY_ILLIQUID_ASSET, "volume")
)
def test_get_value_during_non_market_hours(self):
# make sure that if we try to get the OHLCV values of ASSET1 during
# non-market hours, we don't get the previous market minute's values
bar_data = self.create_bardata(
simulation_dt_func=lambda:
pd.Timestamp("2016-01-06 4:15", tz="US/Eastern"),
)
self.assertTrue(np.isnan(bar_data.current(self.ASSET1, "open")))
self.assertTrue(np.isnan(bar_data.current(self.ASSET1, "high")))
self.assertTrue(np.isnan(bar_data.current(self.ASSET1, "low")))
self.assertTrue(np.isnan(bar_data.current(self.ASSET1, "close")))
self.assertEqual(0, bar_data.current(self.ASSET1, "volume"))
# price should still forward fill
self.assertEqual(390, bar_data.current(self.ASSET1, "price"))
def test_can_trade_equity_same_cal_outside_lifetime(self):
# verify that can_trade returns False for the session before the
# asset's first session
session_before_asset1_start = \
self.trading_calendar.previous_session_label(
self.ASSET1.start_date
)
minutes_for_session = self.trading_calendar.minutes_for_session(
session_before_asset1_start
)
# for good measure, check the minute before the session too
minutes_to_check = chain(
[minutes_for_session[0] - pd.Timedelta(minutes=1)],
minutes_for_session
)
for minute in minutes_to_check:
bar_data = self.create_bardata(
simulation_dt_func=lambda: minute,
)
self.assertFalse(bar_data.can_trade(self.ASSET1))
# after asset lifetime
session_after_asset1_end = self.trading_calendar.next_session_label(
self.ASSET1.end_date
)
bts_after_asset1_end = session_after_asset1_end.replace(
hour=8, minute=45
).tz_convert(None).tz_localize("US/Eastern")
minutes_to_check = chain(
self.trading_calendar.minutes_for_session(
session_after_asset1_end
),
[bts_after_asset1_end]
)
for minute in minutes_to_check:
bar_data = self.create_bardata(
simulation_dt_func=lambda: minute,
)
self.assertFalse(bar_data.can_trade(self.ASSET1))
def test_can_trade_equity_same_cal_exchange_closed(self):
# verify that can_trade returns true for minutes that are
# outside the asset's calendar (assuming the asset is alive and
# there is a last price), because the asset is alive on the
# next market minute.
minutes = self.trading_calendar.minutes_for_sessions_in_range(
self.ASSET1.start_date,
self.ASSET1.end_date
)
for minute in minutes:
bar_data = self.create_bardata(
simulation_dt_func=lambda: minute,
)
self.assertTrue(bar_data.can_trade(self.ASSET1))
def test_can_trade_equity_same_cal_no_last_price(self):
# self.HILARIOUSLY_ILLIQUID_ASSET's first trade is at
# 2016-01-05 15:20:00+00:00. Make sure that can_trade returns false
# for all minutes in that session before the first trade, and true
# for all minutes afterwards.
minutes_in_session = \
self.trading_calendar.minutes_for_session(self.ASSET1.start_date)
for minute in minutes_in_session[0:49]:
bar_data = self.create_bardata(
simulation_dt_func=lambda: minute,
)
self.assertFalse(bar_data.can_trade(
self.HILARIOUSLY_ILLIQUID_ASSET)
)
for minute in minutes_in_session[50:]:
bar_data = self.create_bardata(
simulation_dt_func=lambda: minute,
)
self.assertTrue(bar_data.can_trade(
self.HILARIOUSLY_ILLIQUID_ASSET)
)
def test_is_stale_during_non_market_hours(self):
bar_data = self.create_bardata(
lambda: self.equity_minute_bar_days[1],
)
with handle_non_market_minutes(bar_data):
self.assertTrue(bar_data.is_stale(self.HILARIOUSLY_ILLIQUID_ASSET))
def test_overnight_adjustments(self):
# verify there is a split for SPLIT_ASSET
splits = self.adjustment_reader.get_adjustments_for_sid(
"splits",
self.SPLIT_ASSET.sid
)
self.assertEqual(1, len(splits))
split = splits[0]
self.assertEqual(
split[0],
pd.Timestamp("2016-01-06", tz='UTC')
)
# Current day is 1/06/16
day = self.equity_daily_bar_days[1]
eight_fortyfive_am_eastern = \
pd.Timestamp("{0}-{1}-{2} 8:45".format(
day.year, day.month, day.day),
tz='US/Eastern'
)
bar_data = self.create_bardata(
lambda: eight_fortyfive_am_eastern,
)
expected = {
'open': 391 / 2.0,
'high': 392 / 2.0,
'low': 389 / 2.0,
'close': 390 / 2.0,
'volume': 39000 * 2.0,
'price': 390 / 2.0,
}
with handle_non_market_minutes(bar_data):
for field in OHLCP + ['volume']:
value = bar_data.current(self.SPLIT_ASSET, field)
# Assert the price is adjusted for the overnight split
self.assertEqual(value, expected[field])
def test_can_trade_restricted(self):
"""
Test that can_trade will return False for a sid if it is restricted
on that dt
"""
minutes_to_check = [
(str_to_ts("2016-01-05 14:31"), False),
(str_to_ts("2016-01-06 14:31"), False),
(str_to_ts("2016-01-07 14:31"), True),
(str_to_ts("2016-01-07 15:00"), False),
(str_to_ts("2016-01-07 15:30"), True),
]
rlm = HistoricalRestrictions([
Restriction(1, str_to_ts('2016-01-05'),
RESTRICTION_STATES.FROZEN),
Restriction(1, str_to_ts('2016-01-07'),
RESTRICTION_STATES.ALLOWED),
Restriction(1, str_to_ts('2016-01-07 15:00'),
RESTRICTION_STATES.FROZEN),
Restriction(1, str_to_ts('2016-01-07 15:30'),
RESTRICTION_STATES.ALLOWED),
])
for info in minutes_to_check:
bar_data = self.create_bardata(
simulation_dt_func=lambda: info[0],
restrictions=rlm,
)
self.assertEqual(bar_data.can_trade(self.ASSET1), info[1])
class TestMinuteBarDataFuturesCalendar(WithCreateBarData,
WithBarDataChecks,
ZiplineTestCase):
START_DATE = pd.Timestamp('2016-01-05', tz='UTC')
END_DATE = ASSET_FINDER_EQUITY_END_DATE = pd.Timestamp(
'2016-01-07',
tz='UTC',
)
ASSET_FINDER_EQUITY_SIDS = [1]
@classmethod
def make_equity_minute_bar_data(cls):
# asset1 has trades every minute
yield 1, create_minute_df_for_asset(
cls.trading_calendar,
cls.equity_minute_bar_days[0],
cls.equity_minute_bar_days[-1],
)
@classmethod
def make_futures_info(cls):
return pd.DataFrame.from_dict(
{
6: {
'symbol': 'CLH16',
'root_symbol': 'CL',
'start_date': pd.Timestamp('2016-01-04', tz='UTC'),
'notice_date': pd.Timestamp('2016-01-19', tz='UTC'),
'expiration_date': pd.Timestamp('2016-02-19', tz='UTC'),
'exchange': 'ICEUS',
},
7: {
'symbol': 'FVH16',
'root_symbol': 'FV',
'start_date': pd.Timestamp('2016-01-04', tz='UTC'),
'notice_date': pd.Timestamp('2016-01-22', tz='UTC'),
'expiration_date': pd.Timestamp('2016-02-22', tz='UTC'),
'auto_close_date': pd.Timestamp('2016-01-20', tz='UTC'),
'exchange': 'CME',
},
},
orient='index',
)
@classmethod
def init_class_fixtures(cls):
super(TestMinuteBarDataFuturesCalendar, cls).init_class_fixtures()
cls.trading_calendar = get_calendar('CME')
def test_can_trade_multiple_exchange_closed(self):
nyse_asset = self.asset_finder.retrieve_asset(1)
ice_asset = self.asset_finder.retrieve_asset(6)
# minutes we're going to check (to verify that that the same bardata
# can check multiple exchange calendars, all times Eastern):
# 2016-01-05:
# 20:00 (minute before ICE opens)
# 20:01 (first minute of ICE session)
# 20:02 (second minute of ICE session)
# 00:00 (Cinderella's ride becomes a pumpkin)
# 2016-01-06:
# 9:30 (minute before NYSE opens)
# 9:31 (first minute of NYSE session)
# 9:32 (second minute of NYSE session)
# 15:59 (second-to-last minute of NYSE session)
# 16:00 (last minute of NYSE session)
# 16:01 (minute after NYSE closed)
# 17:59 (second-to-last minute of ICE session)
# 18:00 (last minute of ICE session)
# 18:01 (minute after ICE closed)
# each row is dt, whether-nyse-is-open, whether-ice-is-open
minutes_to_check = [
(pd.Timestamp("2016-01-05 20:00", tz="US/Eastern"), False, False),
(pd.Timestamp("2016-01-05 20:01", tz="US/Eastern"), False, True),
(pd.Timestamp("2016-01-05 20:02", tz="US/Eastern"), False, True),
(pd.Timestamp("2016-01-06 00:00", tz="US/Eastern"), False, True),
(pd.Timestamp("2016-01-06 9:30", tz="US/Eastern"), False, True),
(pd.Timestamp("2016-01-06 9:31", tz="US/Eastern"), True, True),
(pd.Timestamp("2016-01-06 9:32", tz="US/Eastern"), True, True),
(pd.Timestamp("2016-01-06 15:59", tz="US/Eastern"), True, True),
(pd.Timestamp("2016-01-06 16:00", tz="US/Eastern"), True, True),
(pd.Timestamp("2016-01-06 16:01", tz="US/Eastern"), False, True),
(pd.Timestamp("2016-01-06 17:59", tz="US/Eastern"), False, True),
(pd.Timestamp("2016-01-06 18:00", tz="US/Eastern"), False, True),
(pd.Timestamp("2016-01-06 18:01", tz="US/Eastern"), False, False),
]
for info in minutes_to_check:
# use the CME calendar, which covers 24 hours
bar_data = self.create_bardata(
simulation_dt_func=lambda: info[0],
)
series = bar_data.can_trade([nyse_asset, ice_asset])
self.assertEqual(info[1], series.loc[nyse_asset])
self.assertEqual(info[2], series.loc[ice_asset])
def test_can_trade_delisted(self):
"""
Test that can_trade returns False for an asset on or after its auto
close date.
"""
auto_closing_asset = self.asset_finder.retrieve_asset(7)
# Our asset's auto close date is 2016-01-20, which means that as of the
# market open for the 2016-01-20 session, `can_trade` should return
# False.
minutes_to_check = [
(pd.Timestamp('2016-01-19 00:00:00', tz='UTC'), True),
(pd.Timestamp('2016-01-19 23:00:00', tz='UTC'), True),
(pd.Timestamp('2016-01-19 23:01:00', tz='UTC'), False),
(pd.Timestamp('2016-01-19 23:59:00', tz='UTC'), False),
(pd.Timestamp('2016-01-20 00:00:00', tz='UTC'), False),
(pd.Timestamp('2016-01-20 00:01:00', tz='UTC'), False),
(pd.Timestamp('2016-01-21 00:00:00', tz='UTC'), False),
]
for info in minutes_to_check:
bar_data = self.create_bardata(simulation_dt_func=lambda: info[0])
self.assertEqual(bar_data.can_trade(auto_closing_asset), info[1])
class TestDailyBarData(WithCreateBarData,
WithBarDataChecks,
WithDataPortal,
ZiplineTestCase):
START_DATE = pd.Timestamp('2016-01-05', tz='UTC')
END_DATE = ASSET_FINDER_EQUITY_END_DATE = pd.Timestamp(
'2016-01-11',
tz='UTC',
)
CREATE_BARDATA_DATA_FREQUENCY = 'daily'
sids = ASSET_FINDER_EQUITY_SIDS = set(range(1, 9))
SPLIT_ASSET_SID = 3
ILLIQUID_SPLIT_ASSET_SID = 4
MERGER_ASSET_SID = 5
ILLIQUID_MERGER_ASSET_SID = 6
DIVIDEND_ASSET_SID = 7
ILLIQUID_DIVIDEND_ASSET_SID = 8
@classmethod
def make_equity_info(cls):
frame = super(TestDailyBarData, cls).make_equity_info()
frame.loc[[1, 2], 'end_date'] = pd.Timestamp('2016-01-08', tz='UTC')
return frame
@classmethod
def make_splits_data(cls):
return pd.DataFrame.from_records([
{
'effective_date': str_to_seconds("2016-01-06"),
'ratio': 0.5,
'sid': cls.SPLIT_ASSET_SID,
},
{
'effective_date': str_to_seconds("2016-01-07"),
'ratio': 0.5,
'sid': cls.ILLIQUID_SPLIT_ASSET_SID,
},
])
@classmethod
def make_mergers_data(cls):
return pd.DataFrame.from_records([
{
'effective_date': str_to_seconds('2016-01-06'),
'ratio': 0.5,
'sid': cls.MERGER_ASSET_SID,
},
{
'effective_date': str_to_seconds('2016-01-07'),
'ratio': 0.6,
'sid': cls.ILLIQUID_MERGER_ASSET_SID,
}
])
@classmethod
def make_dividends_data(cls):
return pd.DataFrame.from_records([
{
# only care about ex date, the other dates don't matter here
'ex_date':
pd.Timestamp('2016-01-06', tz='UTC').to_datetime64(),
'record_date':
pd.Timestamp('2016-01-06', tz='UTC').to_datetime64(),
'declared_date':
pd.Timestamp('2016-01-06', tz='UTC').to_datetime64(),
'pay_date':
pd.Timestamp('2016-01-06', tz='UTC').to_datetime64(),
'amount': 2.0,
'sid': cls.DIVIDEND_ASSET_SID,
},
{
'ex_date':
pd.Timestamp('2016-01-07', tz='UTC').to_datetime64(),
'record_date':
pd.Timestamp('2016-01-07', tz='UTC').to_datetime64(),
'declared_date':
pd.Timestamp('2016-01-07', tz='UTC').to_datetime64(),
'pay_date':
pd.Timestamp('2016-01-07', tz='UTC').to_datetime64(),
'amount': 4.0,
'sid': cls.ILLIQUID_DIVIDEND_ASSET_SID,
}],
columns=[
'ex_date',
'record_date',
'declared_date',
'pay_date',
'amount',
'sid',
]
)
@classmethod
def make_adjustment_writer_equity_daily_bar_reader(cls):
return MockDailyBarReader()
@classmethod
def make_equity_daily_bar_data(cls):
for sid in cls.sids:
asset = cls.asset_finder.retrieve_asset(sid)
yield sid, create_daily_df_for_asset(
cls.trading_calendar,
asset.start_date,
asset.end_date,
interval=2 - sid % 2
)
@classmethod
def init_class_fixtures(cls):
super(TestDailyBarData, cls).init_class_fixtures()
cls.ASSET1 = cls.asset_finder.retrieve_asset(1)
cls.ASSET2 = cls.asset_finder.retrieve_asset(2)
cls.SPLIT_ASSET = cls.asset_finder.retrieve_asset(
cls.SPLIT_ASSET_SID,
)
cls.ILLIQUID_SPLIT_ASSET = cls.asset_finder.retrieve_asset(
cls.ILLIQUID_SPLIT_ASSET_SID,
)
cls.MERGER_ASSET = cls.asset_finder.retrieve_asset(
cls.MERGER_ASSET_SID,
)
cls.ILLIQUID_MERGER_ASSET = cls.asset_finder.retrieve_asset(
cls.ILLIQUID_MERGER_ASSET_SID,
)
cls.DIVIDEND_ASSET = cls.asset_finder.retrieve_asset(
cls.DIVIDEND_ASSET_SID,
)
cls.ILLIQUID_DIVIDEND_ASSET = cls.asset_finder.retrieve_asset(
cls.ILLIQUID_DIVIDEND_ASSET_SID,
)
cls.ASSETS = [cls.ASSET1, cls.ASSET2]
def get_last_minute_of_session(self, session_label):
return self.trading_calendar.open_and_close_for_session(
session_label
)[1]
def test_current_session(self):
for session in self.trading_calendar.sessions_in_range(
self.equity_daily_bar_days[0],
self.equity_daily_bar_days[-1]
):
bar_data = self.create_bardata(
simulation_dt_func=lambda: self.get_last_minute_of_session(
session
)
)
self.assertEqual(session, bar_data.current_session)
def test_day_before_assets_trading(self):
# use the day before self.bcolz_daily_bar_days[0]
minute = self.get_last_minute_of_session(
self.trading_calendar.previous_session_label(
self.equity_daily_bar_days[0]
)
)
bar_data = self.create_bardata(
simulation_dt_func=lambda: minute,
)
self.check_internal_consistency(bar_data)
self.assertFalse(bar_data.can_trade(self.ASSET1))
self.assertFalse(bar_data.can_trade(self.ASSET2))
self.assertFalse(bar_data.is_stale(self.ASSET1))
self.assertFalse(bar_data.is_stale(self.ASSET2))
for field in ALL_FIELDS:
for asset in self.ASSETS:
asset_value = bar_data.current(asset, field)
if field in OHLCP:
self.assertTrue(np.isnan(asset_value))
elif field == "volume":
self.assertEqual(0, asset_value)
elif field == "last_traded":
self.assertTrue(asset_value is pd.NaT)
def test_semi_active_day(self):
# on self.equity_daily_bar_days[0], only asset1 has data
bar_data = self.create_bardata(
simulation_dt_func=lambda: self.get_last_minute_of_session(
self.equity_daily_bar_days[0]
),
)
self.check_internal_consistency(bar_data)
self.assertTrue(bar_data.can_trade(self.ASSET1))
self.assertFalse(bar_data.can_trade(self.ASSET2))
# because there is real data
self.assertFalse(bar_data.is_stale(self.ASSET1))
# because there has never been a trade bar yet
self.assertFalse(bar_data.is_stale(self.ASSET2))
self.assertEqual(3, bar_data.current(self.ASSET1, "open"))
self.assertEqual(4, bar_data.current(self.ASSET1, "high"))
self.assertEqual(1, bar_data.current(self.ASSET1, "low"))
self.assertEqual(2, bar_data.current(self.ASSET1, "close"))
self.assertEqual(200, bar_data.current(self.ASSET1, "volume"))
self.assertEqual(2, bar_data.current(self.ASSET1, "price"))
self.assertEqual(self.equity_daily_bar_days[0],
bar_data.current(self.ASSET1, "last_traded"))
for field in OHLCP:
self.assertTrue(np.isnan(bar_data.current(self.ASSET2, field)),
field)
self.assertEqual(0, bar_data.current(self.ASSET2, "volume"))
self.assertTrue(
bar_data.current(self.ASSET2, "last_traded") is pd.NaT
)
def test_fully_active_day(self):
bar_data = self.create_bardata(
simulation_dt_func=lambda: self.get_last_minute_of_session(
self.equity_daily_bar_days[1]
),
)
self.check_internal_consistency(bar_data)
# on self.equity_daily_bar_days[1], both assets have data
for asset in self.ASSETS:
self.assertTrue(bar_data.can_trade(asset))
self.assertFalse(bar_data.is_stale(asset))
self.assertEqual(4, bar_data.current(asset, "open"))
self.assertEqual(5, bar_data.current(asset, "high"))
self.assertEqual(2, bar_data.current(asset, "low"))
self.assertEqual(3, bar_data.current(asset, "close"))
self.assertEqual(300, bar_data.current(asset, "volume"))
self.assertEqual(3, bar_data.current(asset, "price"))
self.assertEqual(
self.equity_daily_bar_days[1],
bar_data.current(asset, "last_traded")
)
def test_last_active_day(self):
bar_data = self.create_bardata(
simulation_dt_func=lambda: self.get_last_minute_of_session(
self.equity_daily_bar_days[-1]
),
)
self.check_internal_consistency(bar_data)
for asset in self.ASSETS:
if asset in (1, 2):
self.assertFalse(bar_data.can_trade(asset))
else:
self.assertTrue(bar_data.can_trade(asset))
self.assertFalse(bar_data.is_stale(asset))
if asset in (1, 2):
assert_almost_equal(nan, bar_data.current(asset, "open"))
assert_almost_equal(nan, bar_data.current(asset, "high"))
assert_almost_equal(nan, bar_data.current(asset, "low"))
assert_almost_equal(nan, bar_data.current(asset, "close"))
assert_almost_equal(0, bar_data.current(asset, "volume"))
assert_almost_equal(nan, bar_data.current(asset, "price"))
else:
self.assertEqual(6, bar_data.current(asset, "open"))
self.assertEqual(7, bar_data.current(asset, "high"))
self.assertEqual(4, bar_data.current(asset, "low"))
self.assertEqual(5, bar_data.current(asset, "close"))
self.assertEqual(500, bar_data.current(asset, "volume"))
self.assertEqual(5, bar_data.current(asset, "price"))
def test_after_assets_dead(self):
session = self.END_DATE
bar_data = self.create_bardata(
simulation_dt_func=lambda: session,
)
self.check_internal_consistency(bar_data)
for asset in self.ASSETS:
self.assertFalse(bar_data.can_trade(asset))
self.assertFalse(bar_data.is_stale(asset))
for field in OHLCP:
self.assertTrue(np.isnan(bar_data.current(asset, field)))
self.assertEqual(0, bar_data.current(asset, "volume"))
last_traded_dt = bar_data.current(asset, "last_traded")
if asset in (self.ASSET1, self.ASSET2):
self.assertEqual(self.equity_daily_bar_days[3],
last_traded_dt)
@parameterized.expand([
("split", 2, 3, 3, 1.5),
("merger", 2, 3, 3, 1.8),
("dividend", 2, 3, 3, 2.88)
])
def test_get_value_adjustments(self,
adjustment_type,
liquid_day_0_price,
liquid_day_1_price,
illiquid_day_0_price,
illiquid_day_1_price_adjusted):
"""Test the behaviour of spot prices during adjustments."""
table_name = adjustment_type + 's'
liquid_asset = getattr(self, (adjustment_type.upper() + "_ASSET"))
illiquid_asset = getattr(
self,
("ILLIQUID_" + adjustment_type.upper() + "_ASSET")
)
# verify there is an adjustment for liquid_asset
adjustments = self.adjustment_reader.get_adjustments_for_sid(
table_name,
liquid_asset.sid
)
self.assertEqual(1, len(adjustments))
adjustment = adjustments[0]
self.assertEqual(
adjustment[0],
pd.Timestamp("2016-01-06", tz='UTC')
)
# ... but that's it's not applied when using spot value
bar_data = self.create_bardata(
simulation_dt_func=lambda: self.equity_daily_bar_days[0],
)
self.assertEqual(
liquid_day_0_price,
bar_data.current(liquid_asset, "price")
)
bar_data = self.create_bardata(
simulation_dt_func=lambda: self.equity_daily_bar_days[1],
)
self.assertEqual(
liquid_day_1_price,
bar_data.current(liquid_asset, "price")
)
# ... except when we have to forward fill across a day boundary
# ILLIQUID_ASSET has no data on days 0 and 2, and a split on day 2
bar_data = self.create_bardata(
simulation_dt_func=lambda: self.equity_daily_bar_days[1],
)
self.assertEqual(
illiquid_day_0_price, bar_data.current(illiquid_asset, "price")
)
bar_data = self.create_bardata(
simulation_dt_func=lambda: self.equity_daily_bar_days[2],
)
# 3 (price from previous day) * 0.5 (split ratio)
self.assertAlmostEqual(
illiquid_day_1_price_adjusted,
bar_data.current(illiquid_asset, "price")
)
def test_can_trade_restricted(self):
"""
Test that can_trade will return False for a sid if it is restricted
on that dt
"""
minutes_to_check = [
(pd.Timestamp("2016-01-05", tz="UTC"), False),
(pd.Timestamp("2016-01-06", tz="UTC"), False),
(pd.Timestamp("2016-01-07", tz="UTC"), True),
]
rlm = HistoricalRestrictions([
Restriction(1, str_to_ts('2016-01-05'),
RESTRICTION_STATES.FROZEN),
Restriction(1, str_to_ts('2016-01-07'),
RESTRICTION_STATES.ALLOWED),
])
for info in minutes_to_check:
bar_data = self.create_bardata(
simulation_dt_func=lambda: info[0],
restrictions=rlm
)
self.assertEqual(bar_data.can_trade(self.ASSET1), info[1])
| bartosh/zipline | tests/test_bar_data.py | Python | apache-2.0 | 45,350 |
from setuptools import setup
version = '1.0.0'
setup(
name='chanGenerator',
version=version,
description='generate changelog for your github repository based on closed issues',
long_description=open('README.md').read(),
author='Pratyush Verma',
keywords="github changelog git command-line cli",
install_requires=[
"requests",
],
scripts=['chanGenerator'],
)
| p-v/chanGenerator | setup.py | Python | mit | 404 |
import unittest
import json
import flask
import friendsNet.resources as resources
import friendsNet.database as database
DB_PATH = 'db/friendsNet_test.db'
ENGINE = database.Engine(DB_PATH)
COLLECTION_JSON = "application/vnd.collection+json"
COMMENT_PROFILE = "/profiles/comment-profile"
#Tell Flask that I am running it in testing mode.
resources.app.config['TESTING'] = True
#Necessary for correct translation in url_for
resources.app.config['SERVER_NAME'] = 'localhost:5000'
#Database Engine utilized in our testing
resources.app.config.update({'Engine': ENGINE})
class ResourcesAPITestCase(unittest.TestCase):
#INITIATION AND TEARDOWN METHODS
@classmethod
def setUpClass(cls):
''' Creates the database structure. Removes first any preexisting database file.'''
print "Testing ", cls.__name__
ENGINE.remove_database()
ENGINE.create_tables()
@classmethod
def tearDownClass(cls):
'''Remove the testing database.'''
print "Testing ENDED for ", cls.__name__
ENGINE.remove_database()
def setUp(self):
'''Populates the database.'''
#This method loads the initial values from friendsNet_data_db.sql
ENGINE.populate_tables()
#Activate app_context for using url_for
self.app_context = resources.app.app_context()
self.app_context.push()
#Create a test client
self.client = resources.app.test_client()
def tearDown(self):
'''
Remove all records from database.
'''
ENGINE.clear()
self.app_context.pop()
class UserCommentsTestCase (ResourcesAPITestCase):
resp_get = {
"collection" : {
"version" : "1.0",
"href" : "/friendsNet/api/users/4/comments/",
"links" : [
{"href" : "/friendsNet/api/users/4/profile/", "rel" : "author", "prompt" : "User profile"}
],
"items" : [
{
"href" : "/friendsNet/api/comments/10/",
"data" : [
{"name" : "id", "value" : 10, "prompt" : "Comment id"},
{"name" : "status_id", "value" : 5, "prompt" : "Status id"},
{"name" : "user_id", "value" : 4, "prompt" : "User id"},
{"name" : "content", "value" : "Gooooood.", "prompt" : "Content"},
{"name" : "creation_time", "value" : 189, "prompt" : "Creation time"}
],
"links" : [
{"href" : "/friendsNet/api/statuses/5/", "rel" : "status", "prompt" : "Status commented"}
]
},
{
"href" : "/friendsNet/api/comments/2/",
"data" : [
{"name" : "id", "value" : 2, "prompt" : "Comment id"},
{"name" : "status_id", "value" : 1, "prompt" : "Status id"},
{"name" : "user_id", "value" : 4, "prompt" : "User id"},
{"name" : "content", "value" : "What!!?", "prompt" : "Content"},
{"name" : "creation_time", "value" : 140, "prompt" : "Creation time"}
],
"links" : [
{"href" : "/friendsNet/api/statuses/1/", "rel" : "status", "prompt" : "Status commented"}
]
}
]
}
}
resp_get_empty = {
"collection" : {
"version" : "1.0",
"href" : "/friendsNet/api/users/6/comments/",
"links" : [
{"href" : "/friendsNet/api/users/6/profile/", "rel" : "author", "prompt" : "User profile"}
],
"items" : []
}
}
def setUp(self):
super(UserCommentsTestCase, self).setUp()
self.url = resources.api.url_for(resources.User_comments, user_id = 4, _external = False)
self.url_empty = resources.api.url_for(resources.User_comments, user_id = 6, _external = False)
self.url_wrong = resources.api.url_for(resources.User_comments, user_id = 999, _external = False)
def test_url(self):
#Checks that the URL points to the right resource
_url = '/friendsNet/api/users/4/comments/'
print '('+self.test_url.__name__+')', self.test_url.__doc__
with resources.app.test_request_context(_url):
rule = flask.request.url_rule
view_point = resources.app.view_functions[rule.endpoint].view_class
self.assertEquals(view_point, resources.User_comments)
def test_wrong_url(self):
resp = self.client.get(self.url_wrong, headers = {"Accept" : COLLECTION_JSON})
self.assertEquals(resp.status_code, 404)
data = json.loads(resp.data)["collection"]
version = data["version"] #test VERSION
self.assertEquals(version, self.resp_get["collection"]["version"])
href = data["href"] #test HREF
self.assertEquals(href, self.url_wrong)
error = data["error"]
self.assertEquals(error["code"], 404)
#TEST GET
#200 + MIMETYPE & PROFILE
def test_get_comments(self):
print '('+self.test_get_comments.__name__+')', self.test_get_comments.__doc__
with resources.app.test_client() as client:
resp = client.get(self.url, headers = {"Accept" : COLLECTION_JSON})
self.assertEquals(resp.status_code, 200)
data = json.loads(resp.data)
self.assertEquals(self.resp_get, data)
self.assertEqual(resp.headers.get("Content-Type", None), COLLECTION_JSON + ";profile=" + COMMENT_PROFILE)
#EMPTY ITEMS
def test_get_empty_comments(self):
print '('+self.test_get_empty_comments.__name__+')', self.test_get_empty_comments.__doc__
with resources.app.test_client() as client:
resp = client.get(self.url_empty, headers = {"Accept" : COLLECTION_JSON})
self.assertEquals(resp.status_code, 200)
data = json.loads(resp.data)
self.assertEquals(self.resp_get_empty, data)
self.assertEqual(resp.headers.get("Content-Type", None), COLLECTION_JSON + ";profile=" + COMMENT_PROFILE)
#404
def test_get_not_existing_user(self):
print '('+self.test_get_not_existing_user.__name__+')', self.test_get_not_existing_user.__doc__
with resources.app.test_client() as client:
resp = client.get(self.url_wrong, headers = {"Accept" : COLLECTION_JSON})
self.assertEquals(resp.status_code, 404)
if __name__ == '__main__':
print 'Start running tests'
unittest.main() | Diiaablo95/friendsNet | test/services_api_test_user_comments.py | Python | gpl-3.0 | 6,705 |
from django.conf import settings
from django.db import models
class DeploymentManager(models.Manager):
def get_last_id(self):
return super(DeploymentManager, self).get_queryset().count()
# def get_sensor_config_not_assigned(self):
# return Deployment.objects.filter(platform_id='').first()
def get_all_sensor_detail(self):
items = Deployment.objects.get_queryset()
list_items = list()
for item in items:
list_items.append({'sensor_id': item.sensor_id, 'sensor_ip': item.sensor_ip, 'config_path': item.sensor_config_path})
return list_items
def get_sensor_config_by_sensor_id(self, sensor_id):
result = Deployment.objects.filter(sensor_id=sensor_id).first()
if result:
return result.sensor_config_path
return None
# def update_assign_sensor(self, sensor_id, platform_id, platform_ip):
# return super(DeploymentManager, self).get_queryset().filter(sensor_id=sensor_id)\
# .update(platform_id=platform_id, platform_ip=platform_ip)
def get_sensor_detail_by_sensor_id(self, sensor_id):
item = Deployment.objects.filter(sensor_id=sensor_id).first()
if item:
return {'sensor_id': item.sensor_id, 'sensor_ip': item.sensor_ip, 'config_path': item.sensor_config_path}
return None
class Deployment(models.Model):
id = models.IntegerField(primary_key=True)
sensor_id = models.TextField(unique=True)
sensor_ip = models.TextField()
sensor_config_path = models.TextField()
# platform_id = models.TextField(default='')
# platform_ip = models.TextField(default='')
objects = DeploymentManager()
| cloudcomputinghust/IoT | co-ordinator/api/models.py | Python | mit | 1,691 |
# coding=utf-8
# Copyright 2020 The TF-Agents Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Tests for tf_agents.replay_buffers.reverb_replay_buffer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import reverb
import tensorflow as tf
from tf_agents.drivers import py_driver
from tf_agents.environments import test_envs
from tf_agents.policies import random_py_policy
from tf_agents.replay_buffers import reverb_replay_buffer
from tf_agents.replay_buffers import reverb_utils
from tf_agents.specs import array_spec
from tf_agents.specs import tensor_spec
from tf_agents.trajectories import trajectory
from tf_agents.utils import common
from tf_agents.utils import test_utils
class ReverbReplayBufferTest(parameterized.TestCase, test_utils.TestCase):
def setUp(self):
super(ReverbReplayBufferTest, self).setUp()
# Prepare the environment (and the corresponding specs).
self._env = test_envs.EpisodeCountingEnv(steps_per_episode=3)
tensor_time_step_spec = tf.nest.map_structure(tensor_spec.from_spec,
self._env.time_step_spec())
tensor_action_spec = tensor_spec.from_spec(self._env.action_spec())
self._data_spec = trajectory.Trajectory(
step_type=tensor_time_step_spec.step_type,
observation=tensor_time_step_spec.observation,
action=tensor_action_spec,
policy_info=(),
next_step_type=tensor_time_step_spec.step_type,
reward=tensor_time_step_spec.reward,
discount=tensor_time_step_spec.discount,
)
# TODO(b/188427258) Add time dimension when using Reverb.TrajectoryWriters.
# table_signature = tensor_spec.add_outer_dim(self._data_spec)
self._array_data_spec = tensor_spec.to_nest_array_spec(self._data_spec)
# Initialize and start a Reverb server (and set up a client to it).
self._table_name = 'test_table'
uniform_table = reverb.Table(
self._table_name,
max_size=100,
sampler=reverb.selectors.Uniform(),
remover=reverb.selectors.Fifo(),
rate_limiter=reverb.rate_limiters.MinSize(1),
# signature=table_signature,
)
self._server = reverb.Server([uniform_table])
self._py_client = reverb.Client('localhost:{}'.format(self._server.port))
def tearDown(self):
if self._server:
# Stop the Reverb server if it is running.
self._server.stop()
self._server = None
super(ReverbReplayBufferTest, self).tearDown()
def _insert_random_data(self,
env,
num_steps,
sequence_length=2,
additional_observers=None):
"""Insert `num_step` random observations into Reverb server."""
observers = [] if additional_observers is None else additional_observers
traj_obs = reverb_utils.ReverbAddTrajectoryObserver(
self._py_client, self._table_name, sequence_length=sequence_length)
observers.append(traj_obs)
policy = random_py_policy.RandomPyPolicy(env.time_step_spec(),
env.action_spec())
driver = py_driver.PyDriver(env,
policy,
observers=observers,
max_steps=num_steps)
time_step = env.reset()
driver.run(time_step)
traj_obs.close()
@parameterized.named_parameters(
('_sequence_length_none', None),
('_sequence_length_eq_num_steps', 2),
('_sequence_length_gt_num_steps', 4))
def test_dataset_samples_sequential(self, sequence_length):
def validate_data_observer(traj):
if not array_spec.check_arrays_nest(traj, self._array_data_spec):
raise ValueError('Trajectory incompatible with array_data_spec')
# Observe 20 steps from the env. This isn't the num_steps we're testing.
self._insert_random_data(
self._env, num_steps=20,
additional_observers=[validate_data_observer],
sequence_length=sequence_length or 4)
replay = reverb_replay_buffer.ReverbReplayBuffer(
self._data_spec, self._table_name, local_server=self._server,
sequence_length=sequence_length)
# Make sure observations belong to the same episode and their step are off
# by 1.
for sample, _ in replay.as_dataset(num_steps=2).take(100):
episode, step = sample.observation
self.assertEqual(episode[0], episode[1])
self.assertEqual(step[0] + 1, step[1])
def test_dataset_with_variable_sequence_length_truncates(self):
spec = tf.TensorSpec((), tf.int64)
table_spec = tf.TensorSpec((None,), tf.int64)
table = reverb.Table(
name=self._table_name,
sampler=reverb.selectors.Fifo(),
remover=reverb.selectors.Fifo(),
max_times_sampled=1,
max_size=100,
rate_limiter=reverb.rate_limiters.MinSize(1),
signature=table_spec,
)
server = reverb.Server([table])
py_client = reverb.Client('localhost:{}'.format(server.port))
# Insert two episodes: one of length 3 and one of length 5
with py_client.trajectory_writer(10) as writer:
writer.append(1)
writer.append(2)
writer.append(3)
writer.create_item(
self._table_name, trajectory=writer.history[-3:], priority=5)
with py_client.trajectory_writer(10) as writer:
writer.append(10)
writer.append(20)
writer.append(30)
writer.append(40)
writer.append(50)
writer.create_item(
self._table_name, trajectory=writer.history[-5:], priority=5)
replay = reverb_replay_buffer.ReverbReplayBuffer(
spec, self._table_name, local_server=server, sequence_length=None,
rate_limiter_timeout_ms=100)
ds = replay.as_dataset(single_deterministic_pass=True, num_steps=2)
it = iter(ds)
# Expect [1, 2]
data, _ = next(it)
self.assertAllEqual(data, [1, 2])
# Expect [10, 20]
data, _ = next(it)
self.assertAllEqual(data, [10, 20])
# Expect [30, 40]
data, _ = next(it)
self.assertAllEqual(data, [30, 40])
with self.assertRaises(StopIteration):
next(it)
def test_dataset_with_preprocess(self):
def validate_data_observer(traj):
if not array_spec.check_arrays_nest(traj, self._array_data_spec):
raise ValueError('Trajectory incompatible with array_data_spec')
def preprocess(traj):
episode, step = traj.observation
return traj.replace(observation=(episode, step + 1))
# Observe 10 steps from the env. This isn't the num_steps we're testing.
self._insert_random_data(
self._env,
num_steps=10,
additional_observers=[validate_data_observer],
sequence_length=4)
replay = reverb_replay_buffer.ReverbReplayBuffer(
self._data_spec,
self._table_name,
local_server=self._server,
sequence_length=4)
dataset = replay.as_dataset(num_steps=2)
for sample, _ in dataset.take(5):
episode, step = sample.observation
self.assertEqual(episode[0], episode[1])
self.assertEqual(step[0] + 1, step[1])
# From even to odd steps
self.assertEqual(0, step[0].numpy() % 2)
self.assertEqual(1, step[1].numpy() % 2)
dataset = replay.as_dataset(
num_steps=2, sample_batch_size=1, sequence_preprocess_fn=preprocess)
for sample, _ in dataset.take(5):
episode, step = sample.observation
self.assertEqual(episode[0, 0], episode[0, 1])
self.assertEqual(step[0, 0] + 1, step[0, 1])
# Makes sure the preprocess has happened.
# From odd to even steps
self.assertEqual(1, step[0, 0].numpy() % 2)
self.assertEqual(0, step[0, 1].numpy() % 2)
def test_single_episode_dataset(self):
sequence_length = 3
self._insert_random_data(
self._env,
num_steps=sequence_length,
sequence_length=sequence_length)
replay = reverb_replay_buffer.ReverbReplayBuffer(
self._data_spec,
self._table_name,
sequence_length=None,
local_server=self._server)
# Make sure observations are off by 1 given we are counting transitions in
# the env observations.
dataset = replay.as_dataset()
for sample, _ in dataset.take(5):
episode, step = sample.observation
self.assertEqual((sequence_length,), episode.shape)
self.assertEqual((sequence_length,), step.shape)
self.assertAllEqual([0] * sequence_length, episode - episode[:1])
self.assertAllEqual(list(range(sequence_length)), step - step[:1])
def test_variable_length_episodes_dataset(self):
# Add one episode of each length.
for sequence_length in range(1, 10):
env = test_envs.EpisodeCountingEnv(steps_per_episode=sequence_length)
self._insert_random_data(
env,
num_steps=sequence_length,
sequence_length=sequence_length)
replay = reverb_replay_buffer.ReverbReplayBuffer(
self._data_spec,
self._table_name,
sequence_length=None,
local_server=self._server)
# Make sure observations are off by 1 given we are counting transitions in
# the env observations.
dataset = replay.as_dataset(sample_batch_size=1)
for sample, _ in dataset.take(5):
episode, step = sample.observation
self.assertIn(episode.shape[1], range(1, 10))
self.assertIn(step.shape[1], range(1, 10))
length = episode.shape[1]
# All Episode id are 0.
self.assertAllEqual([[0] * length], episode)
# Steps id is sequential up its length.
self.assertAllEqual([list(range(length))], step)
@parameterized.named_parameters(
('_sequence_length_1', 1),
('_sequence_length_2', 2),
('_sequence_length_5', 5))
def test_batched_episodes_dataset(self, sequence_length):
# Observe batch_size * sequence_length steps to have at least 3 episodes
batch_size = 3
env = test_envs.EpisodeCountingEnv(steps_per_episode=sequence_length)
self._insert_random_data(
env,
num_steps=batch_size * sequence_length,
sequence_length=sequence_length)
replay = reverb_replay_buffer.ReverbReplayBuffer(
self._data_spec,
self._table_name,
sequence_length=None,
local_server=self._server)
dataset = replay.as_dataset(batch_size)
for sample, _ in dataset.take(5):
episode, step = sample.observation
self.assertEqual((batch_size, sequence_length), episode.shape)
self.assertEqual((batch_size, sequence_length), step.shape)
for n in range(sequence_length):
# All elements in the same batch should belong to the same episode.
self.assertAllEqual(episode[:, 0], episode[:, n])
# All elements in the same batch should have consecutive steps.
self.assertAllEqual(step[:, 0] + n, step[:, n])
@parameterized.named_parameters(
('_num_steps_1', 1),
('_num_steps_2', 2),
('_num_steps_5', 5),
('_num_steps_10', 10),
('_num_steps_None', None))
def test_sequential_ordering(self, num_steps):
sequence_length = 10
batch_size = 5
env = test_envs.EpisodeCountingEnv(steps_per_episode=sequence_length)
self._insert_random_data(
env,
num_steps=batch_size * sequence_length,
sequence_length=sequence_length)
replay = reverb_replay_buffer.ReverbReplayBuffer(
self._data_spec,
self._table_name,
sequence_length=sequence_length,
local_server=self._server)
dataset = replay.as_dataset(batch_size, num_steps=num_steps)
num_steps = num_steps or sequence_length
for sample, _ in dataset.take(10):
episode, step = sample.observation
self.assertEqual((batch_size, num_steps), episode.shape)
self.assertEqual((batch_size, num_steps), step.shape)
for n in range(num_steps):
# All elements in the same batch should belong to the same episode.
self.assertAllEqual(episode[:, 0], episode[:, n])
# All elements in the batch should have consecutive steps.
self.assertAllEqual(step[:, 0] + n, step[:, n])
def test_sample_single_episode(self):
num_episodes = 1
sequence_length = 100
batch_size = 10
num_steps = 5
env = test_envs.EpisodeCountingEnv(steps_per_episode=sequence_length)
# Insert only one episode in the RB.
self._insert_random_data(
env,
num_steps=num_episodes * sequence_length,
sequence_length=sequence_length)
replay = reverb_replay_buffer.ReverbReplayBuffer(
self._data_spec,
self._table_name,
sequence_length=sequence_length,
local_server=self._server)
dataset = replay.as_dataset(batch_size, num_steps=num_steps)
n_samples = 0
for sample, _ in dataset.take(10):
n_samples += 1
episode, step = sample.observation
# The episode should always be 0.
episode_id = tf.constant(0, dtype=episode.dtype, shape=episode.shape)
# All elements in the same batch should belong to the same episode.
self.assertAllEqual(episode_id, episode)
for n in range(num_steps):
# All elements in the batch should have consecutive steps.
self.assertAllEqual(step[:, 0] + n, step[:, n])
# Ensure we can actually sampled 10 times.
self.assertEqual(10, n_samples)
def test_capacity_set(self):
table_name = 'test_table'
capacity = 100
uniform_table = reverb.Table(
table_name,
max_size=capacity,
sampler=reverb.selectors.Uniform(),
remover=reverb.selectors.Fifo(),
rate_limiter=reverb.rate_limiters.MinSize(3))
server = reverb.Server([uniform_table])
data_spec = tensor_spec.TensorSpec((), tf.float32)
replay = reverb_replay_buffer.ReverbReplayBuffer(
data_spec, table_name, local_server=server, sequence_length=None)
self.assertEqual(capacity, replay.capacity)
server.stop()
def test_size_empty(self):
replay = reverb_replay_buffer.ReverbReplayBuffer(
self._data_spec, self._table_name, local_server=self._server,
sequence_length=None)
self.assertEqual(replay.num_frames(), 0)
@parameterized.named_parameters(
('_sequence_length_none', None),
('_sequence_length_eq_num_steps', 20))
def test_size_with_data_inserted(self, sequence_length):
num_steps = 20
self._insert_random_data(self._env, num_steps=num_steps)
replay = reverb_replay_buffer.ReverbReplayBuffer(
self._data_spec, self._table_name, local_server=self._server,
sequence_length=sequence_length)
# The number of observations are off by 1 given we are counting transitions
# in the env observations.
self.assertEqual(replay.num_frames(), 19)
def test_raises_if_ask_for_num_steps_gt_sequence_length(self):
replay = reverb_replay_buffer.ReverbReplayBuffer(
self._data_spec, self._table_name, local_server=self._server,
sequence_length=2)
with self.assertRaisesRegex(ValueError, r'num_steps > sequence_length'):
replay.as_dataset(num_steps=4)
def test_raises_if_ask_for_num_steps_not_multiple_sequence_length(self):
replay = reverb_replay_buffer.ReverbReplayBuffer(
self._data_spec, self._table_name, local_server=self._server,
sequence_length=4)
with self.assertRaisesRegex(ValueError, r'not a multiple of num_steps'):
replay.as_dataset(num_steps=3)
def test_raises_deterministic_dataset_from_random_table(self):
replay = reverb_replay_buffer.ReverbReplayBuffer(
self._data_spec, self._table_name, local_server=self._server,
sequence_length=None)
with self.assertRaisesRegex(
ValueError, r'either the sampler or the remover is not deterministic'):
replay.as_dataset(single_deterministic_pass=True)
def test_deterministic_dataset_from_heap_sampler_remover(self):
uniform_sampler_min_heap_remover_table = reverb.Table(
name=self._table_name,
sampler=reverb.selectors.MaxHeap(),
remover=reverb.selectors.MinHeap(),
max_size=100,
max_times_sampled=0,
rate_limiter=reverb.rate_limiters.MinSize(1))
server = reverb.Server([uniform_sampler_min_heap_remover_table])
replay = reverb_replay_buffer.ReverbReplayBuffer(
self._data_spec,
self._table_name,
local_server=server,
sequence_length=None)
replay.as_dataset(single_deterministic_pass=True)
server.stop()
@parameterized.named_parameters(
('_default', tf.distribute.get_strategy()),
('_one_device', tf.distribute.OneDeviceStrategy('/cpu:0')),
('_mirrored', tf.distribute.MirroredStrategy(devices=('/cpu:0',
'/cpu:1'))))
def test_experimental_distribute_dataset(self, strategy):
sequence_length = 3
batch_size = 10
self._insert_random_data(
self._env,
num_steps=sequence_length,
sequence_length=sequence_length)
replay = reverb_replay_buffer.ReverbReplayBuffer(
self._data_spec,
self._table_name,
sequence_length=sequence_length,
local_server=self._server)
dataset = replay.as_dataset(batch_size)
with strategy.scope():
dataset = strategy.experimental_distribute_dataset(dataset)
iterator = iter(dataset)
def train_step():
with strategy.scope():
sample, _ = next(iterator)
_, step = sample.observation
loss = strategy.run(lambda x: tf.reduce_mean(x, axis=-1), args=(step,))
return strategy.reduce(tf.distribute.ReduceOp.SUM, loss, axis=0)
# Test running eagerly
for _ in range(5):
with strategy.scope():
loss = train_step()
self.assertEqual(batch_size, loss)
# Test with wrapping into a tf.function
train_step_fn = common.function(train_step)
for _ in range(5):
with strategy.scope():
loss = train_step_fn()
self.assertEqual(batch_size, loss)
@parameterized.named_parameters(
('_default', tf.distribute.get_strategy()),
('_one_device', tf.distribute.OneDeviceStrategy('/cpu:0')),
('_mirrored', tf.distribute.MirroredStrategy(devices=('/cpu:0',
'/cpu:1'))))
def test_experimental_distribute_datasets_from_function(self, strategy):
sequence_length = 3
batch_size = 10
self._insert_random_data(
self._env,
num_steps=sequence_length,
sequence_length=sequence_length)
replay = reverb_replay_buffer.ReverbReplayBuffer(
self._data_spec,
self._table_name,
sequence_length=sequence_length,
local_server=self._server)
num_replicas = strategy.num_replicas_in_sync
with strategy.scope():
dataset = strategy.experimental_distribute_datasets_from_function(
lambda _: replay.as_dataset(batch_size // num_replicas))
iterator = iter(dataset)
@common.function()
def train_step():
with strategy.scope():
sample, _ = next(iterator)
_, step = sample.observation
loss = strategy.run(lambda x: tf.reduce_mean(x, axis=-1), args=(step,))
return strategy.reduce(tf.distribute.ReduceOp.SUM, loss, axis=0)
# Test running eagerly
for _ in range(5):
with strategy.scope():
loss = train_step()
self.assertEqual(batch_size, loss)
# Test with wrapping into a tf.function
train_step_fn = common.function(train_step)
for _ in range(5):
with strategy.scope():
loss = train_step_fn()
self.assertEqual(batch_size, loss)
if __name__ == '__main__':
test_utils.main()
| tensorflow/agents | tf_agents/replay_buffers/reverb_replay_buffer_test.py | Python | apache-2.0 | 20,280 |
"""Utility for testing certificate display.
This command will create a fake certificate for a user
in a course. The certificate will display on the student's
dashboard, but no PDF will be generated.
Example usage:
$ ./manage.py lms create_fake_cert test_user edX/DemoX/Demo_Course --mode honor --grade 0.89
"""
from __future__ import absolute_import
import logging
from django.contrib.auth.models import User
from django.core.management.base import BaseCommand
from opaque_keys.edx.keys import CourseKey
from six import text_type
from lms.djangoapps.certificates.models import CertificateStatuses, GeneratedCertificate
LOGGER = logging.getLogger(__name__)
class Command(BaseCommand):
"""Create a fake certificate for a user in a course. """
def add_arguments(self, parser):
parser.add_argument(
'username',
metavar='USERNAME',
help='Username of the user to create the fake cert for'
)
parser.add_argument(
'course_key',
metavar='COURSE_KEY',
help='Course key of the course to grant the cert for'
)
parser.add_argument(
'-m', '--mode',
metavar='CERT_MODE',
dest='cert_mode',
default='honor',
help='The course mode of the certificate (e.g. "honor", "verified", or "professional")'
)
parser.add_argument(
'-s', '--status',
metavar='CERT_STATUS',
dest='status',
default=CertificateStatuses.downloadable,
help='The status of the certificate'
)
parser.add_argument(
'-g', '--grade',
metavar='CERT_GRADE',
dest='grade',
default='',
help='The grade for the course, as a decimal (e.g. "0.89" for 89 percent)'
)
def handle(self, *args, **options):
"""Create a fake certificate for a user.
Arguments:
username (unicode): Identifier for the certificate's user.
course_key (unicode): Identifier for the certificate's course.
Keyword Arguments:
cert_mode (str): The mode of the certificate (e.g "honor")
status (str): The status of the certificate (e.g. "downloadable")
grade (str): The grade of the certificate (e.g "0.89" for 89%)
Raises:
CommandError
"""
user = User.objects.get(username=options['username'])
course_key = CourseKey.from_string(options['course_key'])
cert_mode = options.get('cert_mode', 'honor')
status = options.get('status', CertificateStatuses.downloadable)
grade = options.get('grade', '')
cert, created = GeneratedCertificate.eligible_certificates.get_or_create(
user=user,
course_id=course_key
)
cert.mode = cert_mode
cert.status = status
cert.grade = grade
if status == CertificateStatuses.downloadable:
cert.download_uuid = 'test'
cert.verify_uuid = 'test'
cert.download_url = 'http://www.example.com'
cert.save()
if created:
LOGGER.info(
u"Created certificate for user %s in course %s "
u"with mode %s, status %s, "
u"and grade %s",
user.id, text_type(course_key),
cert_mode, status, grade
)
else:
LOGGER.info(
u"Updated certificate for user %s in course %s "
u"with mode %s, status %s, "
u"and grade %s",
user.id, text_type(course_key),
cert_mode, status, grade
)
| jolyonb/edx-platform | lms/djangoapps/certificates/management/commands/create_fake_cert.py | Python | agpl-3.0 | 3,745 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals, absolute_import
from django.apps import AppConfig
class PympaAffariGeneraliConfig(AppConfig):
name = 'pympa_affarigenerali'
verbose_name = 'Affari Generali'
| simodalla/pympa-affarigenerali | pympa_affarigenerali/apps.py | Python | bsd-3-clause | 233 |
from __future__ import unicode_literals
from django.apps import AppConfig
class TwitterhutConfig(AppConfig):
name = 'twitterhut'
| kingsdigitallab/kdl-django | twitterhut/apps.py | Python | mit | 136 |
#!/usr/bin/env python
"""
@package mi.dataset.parser.issmcnsm_dostad
@file marine-integrations/mi/dataset/parser/issmcnsm_dostad.py
@author Emily Hahn
@brief Parser for the issmcnsm_dosta dataset driver
Release notes:
Initial release
"""
__author__ = 'Emily Hahn'
__license__ = 'Apache 2.0'
import copy
import re
import ntplib
import time
from dateutil import parser
from functools import partial
from mi.core.log import get_logger ; log = get_logger()
from mi.core.common import BaseEnum
from mi.core.instrument.data_particle import DataParticle, DataParticleKey
from mi.core.exceptions import SampleException, DatasetParserException
from mi.dataset.dataset_parser import BufferLoadingParser
from mi.core.instrument.chunker import StringChunker
TIME_REGEX = r'(\d{4})/(\d\d)/(\d\d) (\d\d):(\d\d):(\d\d.\d{3}) '
TIME_MATCHER = re.compile(TIME_REGEX)
DATA_REGEX = r'\d{4}/\d\d/\d\d \d\d:\d\d:\d\d.\d{3} (\d+)\t(\d+)\t(\d+.\d+)\t(\d+.\d+)\t' \
'(\d+.\d+)\t(\d+.\d+)\t(\d+.\d+)\t(\d+.\d+)\t(\d+.\d+)\t(\d+.\d)\t(\d+.\d)\t(\d+.\d)\r'
DATA_MATCHER = re.compile(DATA_REGEX)
class DataParticleType(BaseEnum):
SAMPLE = 'issmcnsm_dostad_parsed'
class Issmcnsm_dostadParserDataParticleKey(BaseEnum):
PRODUCT_NUMBER = 'product_number'
SERIAL_NUMBER = 'serial_number'
ESTIMATED_OXYGEN = 'estimated_oxygen'
AIR_SATURATION = 'air_saturation'
OPTODE_TEMPERATURE = 'optode_temperature'
CALIBRATED_PHASE = 'calibrated_phase'
TEMP_COMPENSATED_PHASE = 'temp_compensated_phase'
BLUE_PHASE = 'blue_phase'
RED_PHASE = 'red_phase'
BLUE_AMPLITUDE = 'blue_amplitude'
RED_AMPLITUDE = 'red_amplitude'
RAW_TEMP = 'raw_temp'
class StateKey(BaseEnum):
TIMESTAMP='timestamp' #holds the most recent data sample timestamp
POSITION='position' #hold the current file position
class Issmcnsm_dostadParserDataParticle(DataParticle):
"""
Class for parsing data from the issmcnsm_dosta instrument
"""
_data_particle_type = DataParticleType.SAMPLE
def _build_parsed_values(self):
"""
Take something in the data format and turn it into
a particle with the appropriate tag.
@throws SampleException If there is a problem with sample creation
"""
# match the data inside the wrapper
match = DATA_MATCHER.match(self.raw_data)
if not match:
raise SampleException("Issmcnsm_dostadParserDataParticle: No regex match of \
parsed sample data [%s]", self.raw_data)
try:
prod_num = int(match.group(1))
serial_num = int(match.group(2))
est_oxygen = float(match.group(3))
air_sat = float(match.group(4))
optode_temp = float(match.group(5))
calibrated_phase = float(match.group(6))
temp_compens_phase = float(match.group(7))
blue_phase = float(match.group(8))
red_phase = float(match.group(9))
blue_amp = float(match.group(10))
red_amp = float(match.group(11))
raw_temp = float(match.group(12))
except (ValueError, TypeError, IndexError) as ex:
raise SampleException("Error (%s) while decoding parameters in data: [%s]"
% (ex, match.group(0)))
result = [{DataParticleKey.VALUE_ID: Issmcnsm_dostadParserDataParticleKey.PRODUCT_NUMBER,
DataParticleKey.VALUE: prod_num},
{DataParticleKey.VALUE_ID: Issmcnsm_dostadParserDataParticleKey.SERIAL_NUMBER,
DataParticleKey.VALUE: serial_num},
{DataParticleKey.VALUE_ID: Issmcnsm_dostadParserDataParticleKey.ESTIMATED_OXYGEN,
DataParticleKey.VALUE: est_oxygen},
{DataParticleKey.VALUE_ID: Issmcnsm_dostadParserDataParticleKey.AIR_SATURATION,
DataParticleKey.VALUE: air_sat},
{DataParticleKey.VALUE_ID: Issmcnsm_dostadParserDataParticleKey.OPTODE_TEMPERATURE,
DataParticleKey.VALUE: optode_temp},
{DataParticleKey.VALUE_ID: Issmcnsm_dostadParserDataParticleKey.CALIBRATED_PHASE,
DataParticleKey.VALUE: calibrated_phase},
{DataParticleKey.VALUE_ID: Issmcnsm_dostadParserDataParticleKey.TEMP_COMPENSATED_PHASE,
DataParticleKey.VALUE: temp_compens_phase},
{DataParticleKey.VALUE_ID: Issmcnsm_dostadParserDataParticleKey.BLUE_PHASE,
DataParticleKey.VALUE: blue_phase},
{DataParticleKey.VALUE_ID: Issmcnsm_dostadParserDataParticleKey.RED_PHASE,
DataParticleKey.VALUE: red_phase},
{DataParticleKey.VALUE_ID: Issmcnsm_dostadParserDataParticleKey.BLUE_AMPLITUDE,
DataParticleKey.VALUE: blue_amp},
{DataParticleKey.VALUE_ID: Issmcnsm_dostadParserDataParticleKey.RED_AMPLITUDE,
DataParticleKey.VALUE: red_amp},
{DataParticleKey.VALUE_ID: Issmcnsm_dostadParserDataParticleKey.RAW_TEMP,
DataParticleKey.VALUE: raw_temp}]
log.debug('Issmcnsm_dostadParserDataParticle: particle=%s', result)
return result
def __eq__(self, arg):
"""
Quick equality check for testing purposes. If they have the same raw
data, timestamp, and new sequence, they are the same enough for this
particle
"""
if ((self.raw_data == arg.raw_data) and \
(self.contents[DataParticleKey.INTERNAL_TIMESTAMP] - \
arg.contents[DataParticleKey.INTERNAL_TIMESTAMP] < .0000001)):
return True
else:
if self.raw_data != arg.raw_data:
log.debug('Raw data does not match')
elif self.contents[DataParticleKey.INTERNAL_TIMESTAMP] - \
arg.contents[DataParticleKey.INTERNAL_TIMESTAMP] >= .0000001:
log.debug('Timestamp %f and %f do not match',
self.contents[DataParticleKey.INTERNAL_TIMESTAMP],
arg.contents[DataParticleKey.INTERNAL_TIMESTAMP])
return False
class Issmcnsm_dostadParser(BufferLoadingParser):
def __init__(self,
config,
state,
stream_handle,
state_callback,
publish_callback,
*args, **kwargs):
super(Issmcnsm_dostadParser, self).__init__(config,
stream_handle,
state,
partial(StringChunker.regex_sieve_function,
regex_list=[DATA_MATCHER]),
state_callback,
publish_callback,
*args,
**kwargs)
self._timestamp = 0.0
self._record_buffer = [] # holds tuples of (record, state)
self._read_state = {StateKey.POSITION:0, StateKey.TIMESTAMP:0.0}
if state:
self.set_state(self._state)
def set_state(self, state_obj):
"""
Set the value of the state object for this parser
@param state_obj The object to set the state to.
@throws DatasetParserException if there is a bad state structure
"""
log.debug("Attempting to set state to: %s", state_obj)
if not isinstance(state_obj, dict):
raise DatasetParserException("Invalid state structure")
if not ((StateKey.POSITION in state_obj) and (StateKey.TIMESTAMP in state_obj)):
raise DatasetParserException("Invalid state keys")
self._timestamp = state_obj[StateKey.TIMESTAMP]
self._record_buffer = []
self._state = state_obj
self._read_state = state_obj
# make sure the chunker is clean of old data
self._clean_all_chunker()
# seek to the position
self._stream_handle.seek(state_obj[StateKey.POSITION])
def _increment_state(self, read_increment, timestamp):
"""
Increment the parser position by a certain amount in bytes. This
indicates what has been READ from the file, not what has been published.
The increment takes into account a timestamp of WHEN in the data the
position corresponds to. This allows a reload of both timestamp and the
position.
@param increment Number of bytes to increment the parser position.
@param timestamp The timestamp completed up to that position
"""
log.trace("Incrementing current state: %s with inc: %s, timestamp: %s",
self._read_state, read_increment, timestamp)
self._read_state[StateKey.POSITION] += read_increment
self._read_state[StateKey.TIMESTAMP] = timestamp
@staticmethod
def _convert_string_to_timestamp(ts_str):
"""
Converts the given string from this data stream's format into an NTP
timestamp.
@param ts_str The timestamp string in the format "yyyy/mm/dd hh:mm:ss.sss"
@retval The NTP4 timestamp
"""
match = TIME_MATCHER.match(ts_str)
if not match:
raise ValueError("Invalid time format: %s" % ts_str)
zulu_ts = "%04d-%02d-%02dT%02d:%02d:%fZ" % (
int(match.group(1)), int(match.group(2)), int(match.group(3)),
int(match.group(4)), int(match.group(5)), float(match.group(6))
)
log.trace("converted ts '%s' to '%s'", ts_str[match.start(0):(match.start(0) + 24)], zulu_ts)
converted_time = float(parser.parse(zulu_ts).strftime("%s.%f"))
adjusted_time = converted_time - time.timezone
ntptime = ntplib.system_to_ntp_time(adjusted_time)
log.trace("Converted time \"%s\" (unix: %s) into %s", ts_str, adjusted_time, ntptime)
return ntptime
def parse_chunks(self):
"""
Parse out any pending data chunks in the chunker. If
it is a valid data piece, build a particle, update the position and
timestamp. Go until the chunker has no more valid data.
@retval a list of tuples with sample particles encountered in this
parsing, plus the state. An empty list of nothing was parsed.
"""
result_particles = []
(timestamp, chunk, start, end) = self._chunker.get_next_data_with_index()
if end == None:
data_increment = 0
else:
data_increment = end
while (chunk != None):
data_match = DATA_MATCHER.match(chunk)
if data_match:
# time is inside the data regex
self._timestamp = self._convert_string_to_timestamp(chunk)
# particle-ize the data block received, return the record
sample = self._extract_sample(self._particle_class, DATA_MATCHER, chunk, self._timestamp)
if sample:
# create particle
log.trace("Extracting sample chunk %s with read_state: %s", chunk, self._read_state)
self._increment_state(data_increment, self._timestamp)
result_particles.append((sample, copy.copy(self._read_state)))
(nd_timestamp, non_data, non_start, non_end) = self._chunker.get_next_non_data_with_index(clean=True)
(timestamp, chunk, start, end) = self._chunker.get_next_data_with_index()
if end == None:
data_increment = 0
else:
data_increment = end - start
# need to add length of non-data to data to get the final position
if non_end != None and non_end != 0:
data_increment += (non_end - non_start)
return result_particles
def _clean_all_chunker(self):
"""
Clean out the chunker of all possible data types
"""
# clear out any non matching data.
(nd_timestamp, non_data) = self._chunker.get_next_non_data(clean=True)
while non_data is not None:
(nd_timestamp, non_data) = self._chunker.get_next_non_data(clean=True)
# clean out raw data
(nd_timestamp, raw_data) = self._chunker.get_next_raw(clean=True)
while raw_data is not None:
(nd_timestamp, raw_data) = self._chunker.get_next_raw(clean=True)
# clean out data
(nd_timestamp, data) = self._chunker.get_next_data(clean=True)
while data is not None:
(nd_timestamp, data) = self._chunker.get_next_data(clean=True)
| ooici/marine-integrations | mi/dataset/parser/issmcnsm_dostad.py | Python | bsd-2-clause | 12,802 |
'''
Created by auto_sdk on 2015.04.24
'''
from top.api.base import RestApi
class TradeAmountGetRequest(RestApi):
def __init__(self,domain='gw.api.taobao.com',port=80):
RestApi.__init__(self,domain, port)
self.fields = None
self.tid = None
def getapiname(self):
return 'taobao.trade.amount.get'
| colaftc/webtool | top/api/rest/TradeAmountGetRequest.py | Python | mit | 317 |
# Create Windows executable for cmongo2sql using
# Py2Exe module - http://www.py2exe.org
# Usage: python create_exe.py py2exe
from distutils.core import setup
import py2exe
setup(
options = {'py2exe': {'bundle_files': 1}},
console = ['cmongo2sql.py'],
zipfile = None,
)
| stpettersens/cmongo2sql | create_exe.py | Python | mit | 283 |
from __future__ import division, absolute_import, print_function
import warnings
import numpy as np
from numpy.core import (array, arange, atleast_1d, atleast_2d, atleast_3d,
block, vstack, hstack, newaxis, concatenate, stack)
from numpy.testing import (TestCase, assert_, assert_raises,
assert_array_equal, assert_equal, run_module_suite,
assert_raises_regex, assert_almost_equal)
from numpy.compat import long
class TestAtleast1d(TestCase):
def test_0D_array(self):
a = array(1)
b = array(2)
res = [atleast_1d(a), atleast_1d(b)]
desired = [array([1]), array([2])]
assert_array_equal(res, desired)
def test_1D_array(self):
a = array([1, 2])
b = array([2, 3])
res = [atleast_1d(a), atleast_1d(b)]
desired = [array([1, 2]), array([2, 3])]
assert_array_equal(res, desired)
def test_2D_array(self):
a = array([[1, 2], [1, 2]])
b = array([[2, 3], [2, 3]])
res = [atleast_1d(a), atleast_1d(b)]
desired = [a, b]
assert_array_equal(res, desired)
def test_3D_array(self):
a = array([[1, 2], [1, 2]])
b = array([[2, 3], [2, 3]])
a = array([a, a])
b = array([b, b])
res = [atleast_1d(a), atleast_1d(b)]
desired = [a, b]
assert_array_equal(res, desired)
def test_r1array(self):
""" Test to make sure equivalent Travis O's r1array function
"""
assert_(atleast_1d(3).shape == (1,))
assert_(atleast_1d(3j).shape == (1,))
assert_(atleast_1d(long(3)).shape == (1,))
assert_(atleast_1d(3.0).shape == (1,))
assert_(atleast_1d([[2, 3], [4, 5]]).shape == (2, 2))
class TestAtleast2d(TestCase):
def test_0D_array(self):
a = array(1)
b = array(2)
res = [atleast_2d(a), atleast_2d(b)]
desired = [array([[1]]), array([[2]])]
assert_array_equal(res, desired)
def test_1D_array(self):
a = array([1, 2])
b = array([2, 3])
res = [atleast_2d(a), atleast_2d(b)]
desired = [array([[1, 2]]), array([[2, 3]])]
assert_array_equal(res, desired)
def test_2D_array(self):
a = array([[1, 2], [1, 2]])
b = array([[2, 3], [2, 3]])
res = [atleast_2d(a), atleast_2d(b)]
desired = [a, b]
assert_array_equal(res, desired)
def test_3D_array(self):
a = array([[1, 2], [1, 2]])
b = array([[2, 3], [2, 3]])
a = array([a, a])
b = array([b, b])
res = [atleast_2d(a), atleast_2d(b)]
desired = [a, b]
assert_array_equal(res, desired)
def test_r2array(self):
""" Test to make sure equivalent Travis O's r2array function
"""
assert_(atleast_2d(3).shape == (1, 1))
assert_(atleast_2d([3j, 1]).shape == (1, 2))
assert_(atleast_2d([[[3, 1], [4, 5]], [[3, 5], [1, 2]]]).shape == (2, 2, 2))
class TestAtleast3d(TestCase):
def test_0D_array(self):
a = array(1)
b = array(2)
res = [atleast_3d(a), atleast_3d(b)]
desired = [array([[[1]]]), array([[[2]]])]
assert_array_equal(res, desired)
def test_1D_array(self):
a = array([1, 2])
b = array([2, 3])
res = [atleast_3d(a), atleast_3d(b)]
desired = [array([[[1], [2]]]), array([[[2], [3]]])]
assert_array_equal(res, desired)
def test_2D_array(self):
a = array([[1, 2], [1, 2]])
b = array([[2, 3], [2, 3]])
res = [atleast_3d(a), atleast_3d(b)]
desired = [a[:,:, newaxis], b[:,:, newaxis]]
assert_array_equal(res, desired)
def test_3D_array(self):
a = array([[1, 2], [1, 2]])
b = array([[2, 3], [2, 3]])
a = array([a, a])
b = array([b, b])
res = [atleast_3d(a), atleast_3d(b)]
desired = [a, b]
assert_array_equal(res, desired)
class TestHstack(TestCase):
def test_non_iterable(self):
assert_raises(TypeError, hstack, 1)
def test_empty_input(self):
assert_raises(ValueError, hstack, ())
def test_0D_array(self):
a = array(1)
b = array(2)
res = hstack([a, b])
desired = array([1, 2])
assert_array_equal(res, desired)
def test_1D_array(self):
a = array([1])
b = array([2])
res = hstack([a, b])
desired = array([1, 2])
assert_array_equal(res, desired)
def test_2D_array(self):
a = array([[1], [2]])
b = array([[1], [2]])
res = hstack([a, b])
desired = array([[1, 1], [2, 2]])
assert_array_equal(res, desired)
class TestVstack(TestCase):
def test_non_iterable(self):
assert_raises(TypeError, vstack, 1)
def test_empty_input(self):
assert_raises(ValueError, vstack, ())
def test_0D_array(self):
a = array(1)
b = array(2)
res = vstack([a, b])
desired = array([[1], [2]])
assert_array_equal(res, desired)
def test_1D_array(self):
a = array([1])
b = array([2])
res = vstack([a, b])
desired = array([[1], [2]])
assert_array_equal(res, desired)
def test_2D_array(self):
a = array([[1], [2]])
b = array([[1], [2]])
res = vstack([a, b])
desired = array([[1], [2], [1], [2]])
assert_array_equal(res, desired)
def test_2D_array2(self):
a = array([1, 2])
b = array([1, 2])
res = vstack([a, b])
desired = array([[1, 2], [1, 2]])
assert_array_equal(res, desired)
class TestConcatenate(TestCase):
def test_exceptions(self):
# test axis must be in bounds
for ndim in [1, 2, 3]:
a = np.ones((1,)*ndim)
np.concatenate((a, a), axis=0) # OK
assert_raises(np.AxisError, np.concatenate, (a, a), axis=ndim)
assert_raises(np.AxisError, np.concatenate, (a, a), axis=-(ndim + 1))
# Scalars cannot be concatenated
assert_raises(ValueError, concatenate, (0,))
assert_raises(ValueError, concatenate, (np.array(0),))
# test shapes must match except for concatenation axis
a = np.ones((1, 2, 3))
b = np.ones((2, 2, 3))
axis = list(range(3))
for i in range(3):
np.concatenate((a, b), axis=axis[0]) # OK
assert_raises(ValueError, np.concatenate, (a, b), axis=axis[1])
assert_raises(ValueError, np.concatenate, (a, b), axis=axis[2])
a = np.rollaxis(a, -1)
b = np.rollaxis(b, -1)
axis.append(axis.pop(0))
# No arrays to concatenate raises ValueError
assert_raises(ValueError, concatenate, ())
def test_concatenate_axis_None(self):
a = np.arange(4, dtype=np.float64).reshape((2, 2))
b = list(range(3))
c = ['x']
r = np.concatenate((a, a), axis=None)
assert_equal(r.dtype, a.dtype)
assert_equal(r.ndim, 1)
r = np.concatenate((a, b), axis=None)
assert_equal(r.size, a.size + len(b))
assert_equal(r.dtype, a.dtype)
r = np.concatenate((a, b, c), axis=None)
d = array(['0.0', '1.0', '2.0', '3.0',
'0', '1', '2', 'x'])
assert_array_equal(r, d)
def test_large_concatenate_axis_None(self):
# When no axis is given, concatenate uses flattened versions.
# This also had a bug with many arrays (see gh-5979).
x = np.arange(1, 100)
r = np.concatenate(x, None)
assert_array_equal(x, r)
# This should probably be deprecated:
r = np.concatenate(x, 100) # axis is >= MAXDIMS
assert_array_equal(x, r)
def test_concatenate(self):
# Test concatenate function
# One sequence returns unmodified (but as array)
r4 = list(range(4))
assert_array_equal(concatenate((r4,)), r4)
# Any sequence
assert_array_equal(concatenate((tuple(r4),)), r4)
assert_array_equal(concatenate((array(r4),)), r4)
# 1D default concatenation
r3 = list(range(3))
assert_array_equal(concatenate((r4, r3)), r4 + r3)
# Mixed sequence types
assert_array_equal(concatenate((tuple(r4), r3)), r4 + r3)
assert_array_equal(concatenate((array(r4), r3)), r4 + r3)
# Explicit axis specification
assert_array_equal(concatenate((r4, r3), 0), r4 + r3)
# Including negative
assert_array_equal(concatenate((r4, r3), -1), r4 + r3)
# 2D
a23 = array([[10, 11, 12], [13, 14, 15]])
a13 = array([[0, 1, 2]])
res = array([[10, 11, 12], [13, 14, 15], [0, 1, 2]])
assert_array_equal(concatenate((a23, a13)), res)
assert_array_equal(concatenate((a23, a13), 0), res)
assert_array_equal(concatenate((a23.T, a13.T), 1), res.T)
assert_array_equal(concatenate((a23.T, a13.T), -1), res.T)
# Arrays much match shape
assert_raises(ValueError, concatenate, (a23.T, a13.T), 0)
# 3D
res = arange(2 * 3 * 7).reshape((2, 3, 7))
a0 = res[..., :4]
a1 = res[..., 4:6]
a2 = res[..., 6:]
assert_array_equal(concatenate((a0, a1, a2), 2), res)
assert_array_equal(concatenate((a0, a1, a2), -1), res)
assert_array_equal(concatenate((a0.T, a1.T, a2.T), 0), res.T)
def test_stack():
# non-iterable input
assert_raises(TypeError, stack, 1)
# 0d input
for input_ in [(1, 2, 3),
[np.int32(1), np.int32(2), np.int32(3)],
[np.array(1), np.array(2), np.array(3)]]:
assert_array_equal(stack(input_), [1, 2, 3])
# 1d input examples
a = np.array([1, 2, 3])
b = np.array([4, 5, 6])
r1 = array([[1, 2, 3], [4, 5, 6]])
assert_array_equal(np.stack((a, b)), r1)
assert_array_equal(np.stack((a, b), axis=1), r1.T)
# all input types
assert_array_equal(np.stack(list([a, b])), r1)
assert_array_equal(np.stack(array([a, b])), r1)
# all shapes for 1d input
arrays = [np.random.randn(3) for _ in range(10)]
axes = [0, 1, -1, -2]
expected_shapes = [(10, 3), (3, 10), (3, 10), (10, 3)]
for axis, expected_shape in zip(axes, expected_shapes):
assert_equal(np.stack(arrays, axis).shape, expected_shape)
assert_raises_regex(np.AxisError, 'out of bounds', stack, arrays, axis=2)
assert_raises_regex(np.AxisError, 'out of bounds', stack, arrays, axis=-3)
# all shapes for 2d input
arrays = [np.random.randn(3, 4) for _ in range(10)]
axes = [0, 1, 2, -1, -2, -3]
expected_shapes = [(10, 3, 4), (3, 10, 4), (3, 4, 10),
(3, 4, 10), (3, 10, 4), (10, 3, 4)]
for axis, expected_shape in zip(axes, expected_shapes):
assert_equal(np.stack(arrays, axis).shape, expected_shape)
# empty arrays
assert_(stack([[], [], []]).shape == (3, 0))
assert_(stack([[], [], []], axis=1).shape == (0, 3))
# edge cases
assert_raises_regex(ValueError, 'need at least one array', stack, [])
assert_raises_regex(ValueError, 'must have the same shape',
stack, [1, np.arange(3)])
assert_raises_regex(ValueError, 'must have the same shape',
stack, [np.arange(3), 1])
assert_raises_regex(ValueError, 'must have the same shape',
stack, [np.arange(3), 1], axis=1)
assert_raises_regex(ValueError, 'must have the same shape',
stack, [np.zeros((3, 3)), np.zeros(3)], axis=1)
assert_raises_regex(ValueError, 'must have the same shape',
stack, [np.arange(2), np.arange(3)])
# np.matrix
m = np.matrix([[1, 2], [3, 4]])
assert_raises_regex(ValueError, 'shape too large to be a matrix',
stack, [m, m])
class TestBlock(TestCase):
def test_block_simple_row_wise(self):
a_2d = np.ones((2, 2))
b_2d = 2 * a_2d
desired = np.array([[1, 1, 2, 2],
[1, 1, 2, 2]])
result = block([a_2d, b_2d])
assert_equal(desired, result)
def test_block_simple_column_wise(self):
a_2d = np.ones((2, 2))
b_2d = 2 * a_2d
expected = np.array([[1, 1],
[1, 1],
[2, 2],
[2, 2]])
result = block([[a_2d], [b_2d]])
assert_equal(expected, result)
def test_block_with_1d_arrays_row_wise(self):
# # # 1-D vectors are treated as row arrays
a = np.array([1, 2, 3])
b = np.array([2, 3, 4])
expected = np.array([1, 2, 3, 2, 3, 4])
result = block([a, b])
assert_equal(expected, result)
def test_block_with_1d_arrays_multiple_rows(self):
a = np.array([1, 2, 3])
b = np.array([2, 3, 4])
expected = np.array([[1, 2, 3, 2, 3, 4],
[1, 2, 3, 2, 3, 4]])
result = block([[a, b], [a, b]])
assert_equal(expected, result)
def test_block_with_1d_arrays_column_wise(self):
# # # 1-D vectors are treated as row arrays
a_1d = np.array([1, 2, 3])
b_1d = np.array([2, 3, 4])
expected = np.array([[1, 2, 3],
[2, 3, 4]])
result = block([[a_1d], [b_1d]])
assert_equal(expected, result)
def test_block_mixed_1d_and_2d(self):
a_2d = np.ones((2, 2))
b_1d = np.array([2, 2])
result = block([[a_2d], [b_1d]])
expected = np.array([[1, 1],
[1, 1],
[2, 2]])
assert_equal(expected, result)
def test_block_complicated(self):
# a bit more complicated
one_2d = np.array([[1, 1, 1]])
two_2d = np.array([[2, 2, 2]])
three_2d = np.array([[3, 3, 3, 3, 3, 3]])
four_1d = np.array([4, 4, 4, 4, 4, 4])
five_0d = np.array(5)
six_1d = np.array([6, 6, 6, 6, 6])
zero_2d = np.zeros((2, 6))
expected = np.array([[1, 1, 1, 2, 2, 2],
[3, 3, 3, 3, 3, 3],
[4, 4, 4, 4, 4, 4],
[5, 6, 6, 6, 6, 6],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0]])
result = block([[one_2d, two_2d],
[three_2d],
[four_1d],
[five_0d, six_1d],
[zero_2d]])
assert_equal(result, expected)
def test_nested(self):
one = np.array([1, 1, 1])
two = np.array([[2, 2, 2], [2, 2, 2], [2, 2, 2]])
three = np.array([3, 3, 3])
four = np.array([4, 4, 4])
five = np.array(5)
six = np.array([6, 6, 6, 6, 6])
zero = np.zeros((2, 6))
result = np.block([
[
np.block([
[one],
[three],
[four]
]),
two
],
[five, six],
[zero]
])
expected = np.array([[1, 1, 1, 2, 2, 2],
[3, 3, 3, 2, 2, 2],
[4, 4, 4, 2, 2, 2],
[5, 6, 6, 6, 6, 6],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0]])
assert_equal(result, expected)
def test_3d(self):
a000 = np.ones((2, 2, 2), int) * 1
a100 = np.ones((3, 2, 2), int) * 2
a010 = np.ones((2, 3, 2), int) * 3
a001 = np.ones((2, 2, 3), int) * 4
a011 = np.ones((2, 3, 3), int) * 5
a101 = np.ones((3, 2, 3), int) * 6
a110 = np.ones((3, 3, 2), int) * 7
a111 = np.ones((3, 3, 3), int) * 8
result = np.block([
[
[a000, a001],
[a010, a011],
],
[
[a100, a101],
[a110, a111],
]
])
expected = array([[[1, 1, 4, 4, 4],
[1, 1, 4, 4, 4],
[3, 3, 5, 5, 5],
[3, 3, 5, 5, 5],
[3, 3, 5, 5, 5]],
[[1, 1, 4, 4, 4],
[1, 1, 4, 4, 4],
[3, 3, 5, 5, 5],
[3, 3, 5, 5, 5],
[3, 3, 5, 5, 5]],
[[2, 2, 6, 6, 6],
[2, 2, 6, 6, 6],
[7, 7, 8, 8, 8],
[7, 7, 8, 8, 8],
[7, 7, 8, 8, 8]],
[[2, 2, 6, 6, 6],
[2, 2, 6, 6, 6],
[7, 7, 8, 8, 8],
[7, 7, 8, 8, 8],
[7, 7, 8, 8, 8]],
[[2, 2, 6, 6, 6],
[2, 2, 6, 6, 6],
[7, 7, 8, 8, 8],
[7, 7, 8, 8, 8],
[7, 7, 8, 8, 8]]])
assert_array_equal(result, expected)
def test_block_with_mismatched_shape(self):
a = np.array([0, 0])
b = np.eye(2)
assert_raises(ValueError, np.block, [a, b])
assert_raises(ValueError, np.block, [b, a])
def test_no_lists(self):
assert_equal(np.block(1), np.array(1))
assert_equal(np.block(np.eye(3)), np.eye(3))
def test_invalid_nesting(self):
msg = 'depths are mismatched'
assert_raises_regex(ValueError, msg, np.block, [1, [2]])
assert_raises_regex(ValueError, msg, np.block, [1, []])
assert_raises_regex(ValueError, msg, np.block, [[1], 2])
assert_raises_regex(ValueError, msg, np.block, [[], 2])
assert_raises_regex(ValueError, msg, np.block, [
[[1], [2]],
[[3, 4]],
[5] # missing brackets
])
def test_empty_lists(self):
assert_raises_regex(ValueError, 'empty', np.block, [])
assert_raises_regex(ValueError, 'empty', np.block, [[]])
assert_raises_regex(ValueError, 'empty', np.block, [[1], []])
def test_tuple(self):
assert_raises_regex(TypeError, 'tuple', np.block, ([1, 2], [3, 4]))
assert_raises_regex(TypeError, 'tuple', np.block, [(1, 2), (3, 4)])
if __name__ == "__main__":
run_module_suite()
| mbayon/TFG-MachineLearning | venv/lib/python3.6/site-packages/numpy/core/tests/test_shape_base.py | Python | mit | 18,544 |
from setuptools import setup
import sys
revision = None
# must match PEP 440
_MAJOR_VERSION = 0
_MINOR_VERSION = 5
_MICRO_VERSION = None
_PRE_RELEASE_TYPE = 'a' # a | b | rc
_PRE_RELEASE_VERSION = 5
_DEV_RELEASE_VERSION = None
version = '{}.{}'.format(_MAJOR_VERSION, _MINOR_VERSION)
revision = None
if _MICRO_VERSION is not None:
version += '.{}'.format(_MICRO_VERSION)
if _PRE_RELEASE_TYPE is not None and _PRE_RELEASE_VERSION is not None:
version += '{}{}'.format(_PRE_RELEASE_TYPE, _PRE_RELEASE_VERSION)
if _DEV_RELEASE_VERSION is not None:
version += '.dev{}'.format(_DEV_RELEASE_VERSION)
revision = 'master'
else:
revision = version
download_url = 'https://github.com/datamachine/twx/archive/{}.tar.gz'.format(revision)
print(version)
print(download_url)
setup(
name = 'twx',
packages = ['twx'],
version = version,
description = "Abstraction Layer Over Telegram's Bot API and MTProto Chat Potocols",
long_description = open("README.rst").read(),
author = 'Vince Castellano, Phillip Lopo',
author_email = 'surye80@gmail.com, philliplopo@gmail.com',
keywords = ['datamachine', 'telex', 'telegram', 'bot', 'api', 'rpc'],
url = 'https://github.com/datamachine/twx',
download_url = download_url,
install_requires=['requests', 'twx.botapi'],
platforms = ['Linux', 'Unix', 'MacOsX', 'Windows'],
classifiers = [
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3 :: Only',
'Programming Language :: Python :: 3.4',
'Topic :: Communications :: Chat',
'Topic :: Communications :: File Sharing'
]
)
| datamachine/twx | setup.py | Python | mit | 1,804 |
#!/usr/bin/env python2
# coding: utf8
from __future__ import print_function
import json
import itertools
import optparse
import os
import random
import re
import sys
from .utilities import unicode_dammit, persistently_apply
default_config = os.path.join(os.path.dirname(__file__), 'config_data.json')
def load_config(fname):
data = []
skips = (b'//',)
with open(fname, 'rb') as f:
data = [l.decode('utf8')
for l in f
if not any(l.strip().startswith(s) for s in skips)]
try:
data = persistently_apply(json.loads, args=(u''.join(data),))
except ValueError as e:
print("Could not load config: {}".format(e), file=sys.stderr)
return {}
if not valid_config(data):
return {}
return unicode_dammit(data)
def valid_config(config):
c = config
genres = c['genre']
errord = []
for genre, streams in genres.items():
for stream in streams:
if stream not in c['stream']:
errord.append(stream)
if errord:
print("These stations don't exist!")
for s in errord:
print("\t" + s)
return False
for stream, data in c["stream"].items():
for net in data["network"]:
if net not in c["ad"]:
print("Unknown net: {} ({})".format(net, stream))
errord.append(net)
return not errord
def parseargs(argv=sys.argv):
parser = optparse.OptionParser()
parser.add_option('-s', '--shuffle', action='store_true', dest='shuffle',
help="Play streams in random order")
parser.add_option('-p', '--port', type=int, default=1986,
help="Port on which to listen for incoming connections")
parser.add_option('-H', '--host', default="localhost",
help="Host on which to listen for incoming connections")
parser.add_option('-o', '--output', default=None, metavar='DIR',
help="Directory in which to save incoming audio")
parser.add_option('-g', '--genre', default='lounge',
help="Musical genre (as defined in config file)")
parser.add_option('-c', '--config', default=default_config,
help="Config file containing streams, ads, and genres")
parser.add_option('--list-streams', action='store_true',
dest='list_streams', help="Show all streams and exit")
parser.add_option('--list-genres', action='store_true',
dest='list_genres', help="Show all genres and exit")
(options, args) = parser.parse_args(argv)
return (options, args)
class Stream(object):
def __init__(self, name, url, networks):
self.name = name
self.url = url
self.networks = networks
self.data = None
def read(self, bytes):
if self.data is None:
raise RuntimeError("Stream data not set")
return self.data.read(bytes)
def __str__(self):
msg = "<Stream {name!s}: {url!r}>"
return msg.format(**vars(self))
def __repr__(self):
msg = "Stream(name={name}, url={url}, \
networks={networks}, data={data})"
return msg.format(**vars(self))
def make_stream(name, rioconfig):
streamdata = rioconfig._config['stream'][name]
url = streamdata['url']
network = streamdata['network']
stream = Stream(name, url, network)
return stream
class RioConfig(object):
_opts = None
_args = None
_config = None
ICY_METAINT = 8192
# The number of seconds required before something isn't an ad
min_ad_length, max_ad_length = 3, 120
forward_metadata = False
def __init__(self, argv=sys.argv, config_file=None):
if not self._opts:
self._opts, self._args = parseargs(argv)
self.port = self._opts.port
self.host = self._opts.host
self.config_file = config_file or self._opts.config
# FIXME: can this postprocessing be done in optparse?
if self._opts.output:
target_dir = os.path.expanduser(self._opts.output)
self.output_directory = os.path.join(target_dir, self._opts.genre)
else:
self.output_directory = None
self.age = 0.0
self.update()
def render_config(self):
raw = json.dumps(self._config, indent=4)
return '\n'.join(l.rstrip() for l in raw.splitlines()) + '\n'
def write_config(self, fname=None):
if fname is None:
fname = self.config_file
with open(fname, 'w') as fout:
fout.write(self.render_config())
# Force an update
self.age = 0
@property
def list_streams(self):
return self._opts.list_streams
@property
def list_genres(self):
return self._opts.list_genres
@property
def config_age(self):
return os.stat(self.config_file).st_mtime
def add_bacterium(self, networks, bacterium):
msg = "New bacterium for networks {}: {!r}"
print(msg.format(networks, bacterium))
for net in networks:
self._config['ad'].setdefault(net, []).append(bacterium)
self.write_config()
@property
def bacteria(self):
self.update(safe=True)
if self._bacteria is None:
self._bacteria = {
net: [re.compile(ad.encode('utf8')) for ad in ads]
for net, ads in self._config['ad'].items()}
return self._bacteria
def bacteria_for_stream(self, stream):
bacteria = {bacterium
for net in stream.networks
for bacterium in self.bacteria.get(net, [])}
return bacteria
@property
def all_streams(self):
self.update(safe=True)
streams = [make_stream(name, self)
for genre, names in self._config['genre'].items()
for name in names]
return streams
@property
def streams(self):
self.update(safe=True)
if self._streams is None:
self._streams = [make_stream(name, self) for name in
self._config['genre'][self._opts.genre]]
return self._streams
def cycle_streams(self):
lasturl = ''
streams = list(self.streams)
while True:
age = self.age
# Rotate the list to the last one
try:
previous = [s.url for s in self.streams].index(lasturl)
nextidx = (previous + 1) % len(streams)
except ValueError:
nextidx = 0
streams = streams[nextidx:] + streams[:nextidx]
if self._opts.shuffle:
random.shuffle(streams)
for stream in itertools.cycle(streams):
lasturl = stream.url
yield stream
if self.age != age:
break
def update(self, safe=False):
if self.config_age <= self.age:
return False
new_config = load_config(self.config_file)
if new_config:
self._config = new_config
self.age = self.config_age
self._bacteria = None
self._streams = None
return self.age
else:
msg = "Invalid JSON in config?"
if safe:
print("ValueError: {}".format(msg), file=sys.stderr)
else:
raise ValueError(msg)
return False
if __name__ == '__main__':
# A validator
RioConfig()
| johntyree/rio | rio/config.py | Python | gpl-3.0 | 7,576 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (C) 2009-2012:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
# Gregory Starck, g.starck@gmail.com
# Hartmut Goebel, h.goebel@goebel-consult.de
# Frederic Mohier, frederic.mohier@gmail.com
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
### Will be populated by the UI with it's own value
app = None
# Contact page
def show_contact(name):
user = app.request.environ['USER']
contact = app.datamgr.get_contact(name=name, user=user) or app.redirect404()
return {'contact': contact}
# All contacts
def show_contacts():
user = app.request.environ['USER']
user.is_administrator() or app.redirect403()
return {'contacts': sorted(app.datamgr.get_contacts(user=user), key=lambda c: c.contact_name)}
pages = {
show_contact: {
'name': 'Contact', 'route': '/contact/:name', 'view': 'contact', 'static': True
},
show_contacts: {
'name': 'Contacts', 'route': '/contacts', 'view': 'contacts', 'static': True
}
}
| rednach/mod-webui | module/plugins/contacts/contacts.py | Python | agpl-3.0 | 1,687 |
# -*- encoding: utf-8 -*-
from fabric.api import (
local,
run,
)
from fabric.context_managers import shell_env
from lib.error import TaskError
def _db_host(site_info):
result = ''
if site_info.db_host:
result = ' --host={} '.format(site_info.db_host)
return result
def _pg_data_database(site_info):
result = {}
if site_info.db_pass:
result.update(dict(PGPASSWORD=site_info.db_pass))
return result
def _pg_data_postgres(site_info):
result = {}
if site_info.postgres_pass:
result.update(dict(PGPASSWORD=site_info.postgres_pass))
return result
def _result_true_or_false(out):
result = False
if int(out) == 1:
result = True
elif int(out) == 0:
result = False
else:
message = "Cannot work out if 'local_database_exists': {}".format(out)
raise TaskError(message)
return result
def _sql_user_create(user_name, password):
return (
"CREATE ROLE {} WITH PASSWORD '{}' "
"NOSUPERUSER CREATEDB NOCREATEROLE LOGIN".format(user_name, password)
)
def _sql_database_create(database_name, table_space):
parameter = ''
if table_space:
print(yellow("using block storage - table space: {}".format(table_space)))
parameter = 'TABLESPACE={}'.format(table_space)
return (
"CREATE DATABASE {} "
"TEMPLATE=template0 ENCODING='utf-8' {};".format(database_name, parameter)
)
def _sql_database_exists(database_name):
return "SELECT COUNT(*) FROM pg_database WHERE datname='{}'".format(
database_name
)
def _sql_database_owner(database_name, user_name):
return "ALTER DATABASE {} OWNER TO {}".format(database_name, user_name)
def _sql_drop_database(database_name):
return "DROP DATABASE {}".format(database_name)
def _sql_drop_user(user_name):
return "DROP ROLE {}".format(user_name)
def _sql_reassign_owner(from_user_name, to_user_name):
return "REASSIGN OWNED BY {} TO {}".format(from_user_name, to_user_name)
def _sql_user_exists(user_name):
return "SELECT COUNT(*) FROM pg_user WHERE usename = '{}'".format(
user_name
)
def _run_local(sql, database_name=None):
database = ''
if database_name:
database = '-d {}'.format(database_name)
local('psql -X -U postgres {} -c "{}"'.format(database, sql))
def _run_local_psycopg2(sql):
import psycopg2
conn = psycopg2.connect('dbname={0} user={0}'.format('postgres'))
cursor = conn.cursor()
cursor.execute(sql)
return cursor.fetchone()
def _run_remote(site_info, sql):
"""Run a remote command as the 'postgres' user.
We need the 'postgres' user to create a role or database.
"""
pg_data = _pg_data_postgres(site_info)
with shell_env(**pg_data):
result = run('psql -X {} -U postgres -t -A -c "{}"'.format(
_db_host(site_info),
sql,
))
return result
def _run_remote_as_user(site_info, sql):
"""Run a remote command as the user of the database.
We need to log into the database as the owner to delete the database.
"""
pg_data = _pg_data_database(site_info)
with shell_env(**pg_data):
result = run('psql -X {} -U {} -d postgres -t -A -c "{}"'.format(
_db_host(site_info),
site_info.db_name,
sql,
))
return result
def database_name(site_info, workflow=None):
if workflow:
database_name = site_info.db_name_workflow
else:
database_name = site_info.db_name
return database_name
def drop_local_database(database_name):
sql = _sql_drop_database(database_name)
_run_local(sql)
def drop_remote_database(site_info, workflow):
sql = _sql_drop_database(database_name(site_info, workflow))
_run_remote_as_user(site_info, sql)
def drop_remote_user(site_info):
sql = _sql_drop_user(site_info.db_name)
_run_remote(site_info, sql)
def local_database_create(database_name):
sql = _sql_database_create(database_name, None)
_run_local(sql)
def local_database_exists(database_name):
sql = _sql_database_exists(database_name)
result = _run_local_psycopg2(sql)
return _result_true_or_false(result[0])
def local_load_file(database_name, file_name):
local(
"psql -X --set ON_ERROR_STOP=on -U postgres -d {0} --file {1}".format(
database_name,
file_name,
),
capture=True,
)
def local_reassign_owner(database_name, from_user_name, to_user_name):
sql = _sql_reassign_owner(from_user_name, to_user_name)
_run_local(sql, database_name)
def local_user_create(site_info):
sql = _sql_user_create(site_info.db_name, site_info.db_name)
_run_local(sql)
def local_user_exists(site_info):
sql = _sql_user_exists(site_info.db_name)
result = _run_local_psycopg2(sql)
return _result_true_or_false(result[0])
def remote_database_create(site_info, table_space, workflow=None):
db_name = database_name(site_info, workflow)
sql = _sql_database_create(db_name, table_space)
_run_remote(site_info, sql)
# amazon rds the 'postgres' user sets the owner (after the database is created)
sql = _sql_database_owner(db_name, site_info.db_name)
_run_remote(site_info, sql)
def remote_database_exists(site_info, workflow=None):
db_name = database_name(site_info, workflow)
sql = _sql_database_exists(db_name)
result = _run_remote(site_info, sql)
return _result_true_or_false(result)
def remote_user_create(site_info):
"""Create the user role for the database."""
sql = _sql_user_create(site_info.db_name, site_info.db_pass)
_run_remote(site_info, sql)
def remote_user_exists(site_info):
sql = _sql_user_exists(site_info.db_name)
result = _run_remote(site_info, sql)
return _result_true_or_false(result)
| pkimber/fabric | lib/postgres.py | Python | apache-2.0 | 5,864 |
########
# Copyright (c) 2014 GigaSpaces Technologies Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
# Built-in Imports
import os
import shutil
from subprocess import Popen, PIPE
from cloudify import ctx
from cloudify import exceptions
# Third-party Imports
# Cloudify imports
CLOUDIFY_MANAGER_PRIVATE_KEY_PATH = 'CLOUDIFY_MANAGER_PRIVATE_KEY_PATH'
def get_executible_path(executable_name):
"""home = os.path.expanduser("~")
deployment_home = \
os.path.join(home, '{}{}'.format('cloudify.', ctx.deployment.id))
return os.path.join(deployment_home, 'env', 'bin', executible_name)
"""
return executable_name
def get_roles(roles, target_path):
try:
path_to_file = ctx.download_resource(roles, os.path.join(target_path, roles))
except exceptions.HttpException as e:
raise exceptions.NonRecoverableError(
'Could not get roles file: {}.'.format(str(e)))
return path_to_file
def get_playbook_path(playbook, target_path):
try:
path_to_file = ctx.download_resource(playbook, os.path.join(target_path, playbook))
except exceptions.HttpException as e:
raise exceptions.NonRecoverableError(
'Could not get playbook file: {}.'.format(str(e)))
return path_to_file
def get_inventory_path(inventory, target_path):
path_to_file = os.path.join(target_path, '{}.inventory'.format(ctx.deployment.id))
if not inventory:
inventory.append(ctx.instance.host_ip)
with open(path_to_file, 'w') as f:
for host in inventory:
f.write('{0}\n'.format(host))
return path_to_file
def get_agent_user(user=None):
if not user:
if 'user' not in ctx.instance.runtime_properties:
user = ctx.bootstrap_context.cloudify_agent.user
ctx.instance.runtime_properties['user'] = user
else:
user = ctx.instance.runtime_properties['user']
elif 'user' not in ctx.instance.runtime_properties:
ctx.instance.runtime_properties['user'] = user
return user
def get_keypair_path(keypair):
home = os.path.expanduser("~")
path_to_file = \
os.path.join(home, '.ssh', keypair)
if not os.path.exists(path_to_file):
raise exceptions.RecoverableError(
'Keypair file does not exist.')
ansible_home = get_ansible_home()
target_path = os.path.join(ansible_home, keypair)
if not os.path.exists(ansible_home):
os.makedirs(ansible_home)
ctx.logger.info('Created folder for ansible scripts: {}'.format(ansible_home))
if not os.path.isfile(target_path):
shutil.copy2(path_to_file, target_path)
return target_path
def write_configuration_file(path, config):
file_path = os.path.join(path, 'ansible.cfg')
with open(file_path, 'w') as f:
f.write(config)
return file_path
def run_command(command):
try:
run = Popen(command, stdout=PIPE)
except Exception as e:
raise exceptions.NonRecoverableError(
'Unable to run command. Error {}'.format(str(e)))
try:
output = run.communicate()
except Exception as e:
raise exceptions.NonRecoverableError(
'Unable to run command. Error {}'.format(str(e)))
if run.returncode != 0:
raise exceptions.NonRecoverableError(
'Non-zero returncode. Output {}.'.format(output))
return output
def get_ansible_home():
home = os.path.expanduser("~")
return os.path.join(home, '{}{}'.format('cloudify.', ctx.deployment.id), '{}'.format(ctx.instance.id))
def create_playbook_from_roles(hosts, roles, filename = 'playbook.yaml' , sudo = 'no', path=None):
if path == None:
path = get_ansible_home()
pb_string = '- hosts:\n'\
for host in hosts:
pb_string += ' - ' + host + '\n'
pb_string += ' sudo: ' + sudo + '\n' \
' roles:\n'
for role in roles:
pb_string += ' - ' + role + '\n'
with open(os.path.join(path, filename), 'w') as f:
f.write('{0}\n'.format(pb_string))
f.close()
return os.path.join(path, filename)
| cdntn/cloudify-ansible-plugin | ansible_plugin/utils.py | Python | apache-2.0 | 4,759 |
# Copyright 2015-2021 D.G. MacCarthy <https://dmaccarthy.github.io/sc8pr>
#
# This file is part of "sc8pr".
#
# "sc8pr" is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# "sc8pr" is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with "sc8pr". If not, see <http://www.gnu.org/licenses/>.
"GUI methods for robot-user interaction"
from sc8pr.util import nothing
from sc8pr.gui.dialog import MessageBox, NumInputBox
import sc8pr.robot
class Robot(sc8pr.robot.Robot):
def _title(self, title):
if not title:
name = self.name
if name is None: name = "Robot"
title = "{} says...".format(name)
return title
def confirm(self, prompt, title=None, response=True):
"Synchronous input to the robot brain"
sk = self.sketch
btns = ["Yes", "No"] if response else ["Okay"]
mb = MessageBox(prompt, None, btns, self._title(title))
mb.config(pos=sk.center).setCanvas(sk)
while mb.result is None: self.updateSensors()
return mb.result
def textinput(self, prompt, title=None, allowCancel=False, num=False):
"Synchronous input to the robot brain"
sk = self.sketch
btns = None if allowCancel else ["Okay"]
cls = NumInputBox if num else MessageBox
mb = cls(prompt, "", btns, self._title(title))
mb.config(pos=sk.center).setCanvas(sk).bind(onaction=nothing)
while mb.result is None: self.updateSensors()
return mb.remove().result
def numinput(self, prompt, title=None, allowCancel=False):
return self.textinput(prompt, title, allowCancel, True)
| dmaccarthy/sc8pr | sc8pr/robot/gui.py | Python | gpl-3.0 | 2,064 |
#!/usr/bin/python
import os, sys
import gzip
import json
import gsutil
from path import Path as path
from collections import defaultdict
from check_schema_tracking_log import schema2dict, check_schema
from load_course_sql import find_course_sql_dir
def mongo_dump_user_info_files(course_id, basedir=None, datedir=None, dbname=None, use_dataset_latest=False):
'''
In early SQL tables from edX, enrollment records were deleted when a learner un-enrolled.
This would then cause that learner's records to no longer appear in the SQL data
dump to researchers. This further caused problems because there would be learners
who received certificates (and appeared in the certificates tables) but weren't
in the auth_user or auth_userprofile or enrollment tables.
One way around this was to incrementally load all the user, userprofile, etc. tables
from every weekly dump, so that if a learner ever appeared as being registered
in a course, the learner would stay that way.
This workaround didn't completely solve the problem, however, for early courses,
and thus additional data had to be manualy requested from edX.
These issues were all resolved for courses after ~Spring 2014, when un-enrollment
was changed such that it did not cause deletion of the enrollment record, but
rather, just a change of the "active" flag within the enrollment record.
Here, to workaround the problem, for a given course_id, we generate users.csv, profiles.csv,
enrollment.csv, certificates.csv, from collections
stored in mongodb, for a specified course. These mongodb collections were curated
and produced for the Hx and MITx Year 1 reports.
Uses mongoexport.
'''
basedir = path(basedir or '')
lfp = find_course_sql_dir(course_id, basedir, datedir, use_dataset_latest=use_dataset_latest)
mongodir = lfp.dirname() / 'from_mongodb'
print "[mongo_dump_user_info_files] processing %s, output directory = %s" % (course_id, mongodir)
if not mongodir.exists():
os.mkdir(mongodir)
def do_mongo_export(collection, ofn, ckey='course_id'):
query = '{"%s": "%s"}' % (ckey, course_id)
cmd = "mongoexport -d %s -c %s -q '%s' | gzip -9 > %s" % (dbname, collection, query, ofn)
print "--> %s" % cmd
sys.stdout.flush()
os.system(cmd)
# make users with javascript join
js = """conn = new Mongo();
db = conn.getDB('%s');
var cursor = db.student_courseenrollment.find({'course_id': '%s'});
while (cursor.hasNext()) {
var doc = cursor.next();
udoc = db.auth_user.find({_id: doc.user_id})[0];
print(JSON.stringify(udoc));
}
var course = '%s';
var cursor = db.certificates_generatedcertificate.find({'course_id': course});
while (cursor.hasNext()) {
var doc = cursor.next();
usc = db.student_courseenrollment.find({'course_id': course, 'user_id': doc.user_id });
if (usc.length()==0){
udoc = db.auth_user.find({_id: doc.user_id})[0];
db.auth_userprofile.update({'user_id' : doc.user_id}, {\$addToSet : {courses: course }});
print(JSON.stringify(udoc));
}
}
""" % (dbname, course_id, course_id)
ofn = mongodir / "users.json.gz"
cmd = 'echo "%s" | mongo --quiet | tail -n +3 | gzip -9 > %s' % (js.replace('\n',''), ofn)
print "--> %s" % cmd
sys.stdout.flush()
os.system(cmd)
# profiles and enrollment and certificates
do_mongo_export("auth_userprofile", mongodir / "profiles.json.gz", 'courses')
do_mongo_export("student_courseenrollment", mongodir / "enrollment.json.gz")
do_mongo_export("certificates_generatedcertificate", mongodir / "certificates.json.gz")
| mitodl/edx2bigquery | edx2bigquery/fix_missing_user_info.py | Python | gpl-2.0 | 3,875 |
# -*- coding: utf-8 -*-
from importlib import reload
import sys
sys.path.append("src/models")
import os
import shutil
import glob
import fileinput
import keras as ke
import Model
reload(Model)
import Model as ModModule
import matplotlib.pyplot as plt
import pandas as pd
import os
#%%
class Run(ModModule.Model):
def __init__(self):
self.project_path = (os.path.dirname(
os.path.dirname(
os.path.dirname(ModModule.__file__)))
)
if self.project_path =='':
self.model_path = "models/"
else:
self.model_path = self.project_path+"/models/"
def to_file(self,estimator,loss_history):
#
pd.DataFrame(loss_history.history).to_csv(self.model_path+self.id+"history.csv")
estimator.save(self.model_path+self.id+"_estimator.h5")
def load_estimator(self,run_vs,run_id):
run_vs = str(run_vs).zfill(2)
run_id = str(run_id).zfill(3)
estimator = ke.models.load_model(self.model_path+"run"+run_vs+"_"+run_id+"_estimator.h5")
return estimator
def read_mod_hist(self,run_vs,run_id):
run_vs = str(run_vs).zfill(2)
run_id = str(run_id).zfill(3)
#loss_history = numpy_loss_history.history["loss"]
ModHist = pd.read_csv(self.model_path+"run"+run_vs+"_"+run_id+"history.csv")
return ModHist
def plot_loss(self,run_vs,run_id):
ModHist = self.read_mod_hist(run_vs,run_id)
plt.plot(ModHist["loss"],'-',ModHist["val_loss"],'-')
plt.xlabel('Epoch')
plt.ylabel('Loss')
#plt.title('A title')
# add a legend with legend entries (because we didn't have labels when we plotted the data series)
plt.legend(['train', 'val'])
def plot_loss_comp(self,run_vs,run_ids):
#
f, (ax1, ax2) = plt.subplots(2, 1, sharex=True)
for r in run_ids:
ModHist = self.read_mod_hist(run_vs,r)
ax1.plot(ModHist["loss"])
ax2.plot(ModHist["val_loss"])
print(ModHist)
ax1.set_title('loss')
ax2.set_title('val_loss')
plt.xlabel('Epoch')
plt.legend(run_ids, loc='upper right')
plt.show()
def new(self):
# Find existing run files and create new run id and file name
os.chdir(self.project_path+"/src/models/")
last_file = glob.glob("run*.py")[-1]
# Reset working directory
os.chdir(self.project_path)
last_file_name = last_file[:-3]
last_id = int(last_file[6:-3])
last_version = last_file[3:5]
new_id = str(last_id+1).zfill(3)
new_file_name = "run"+last_version+'_'+new_id
# define source and destination to copy files
srcfile = self.project_path+"/src/models/"+last_file_name+".py"
dstdir = self.project_path+"/src/models/"+new_file_name+".py"
# Copy old file to new file
shutil.copy(srcfile, dstdir)
# Replace all instances of old run in the script with new run ids
with fileinput.FileInput(dstdir, inplace=True) as file:
for line in file:
print(line.replace(last_file_name, new_file_name), end='')
| hstorm/nn_spatial | src/models/ModRun.py | Python | mit | 3,255 |
import numpy as np
#Evaluate the linear regression
def compute_cost(X, y, theta):
'''
Comput cost for linear regression
'''
#Number of training samples
m = y.size
predictions = X.dot(theta).flatten()
sqErrors = (predictions - y) ** 2
J = (1.0 / (2 * m)) * sqErrors.sum()
return J
def gradient_descent(X, y, theta, alpha, num_iters):
'''
Performs gradient descent to learn theta
by taking num_items gradient steps with learning
rate alpha
'''
m = y.size
J_history = np.zeros(shape=(num_iters, 1))
for i in range(num_iters):
predictions = X.dot(theta).flatten()
errors_x1 = (predictions - y) * X[:, 0]
errors_x2 = (predictions - y) * X[:, 1]
theta[0][0] = theta[0][0] - alpha * (1.0 / m) * errors_x1.sum()
theta[1][0] = theta[1][0] - alpha * (1.0 / m) * errors_x2.sum()
J_history[i, 0] = compute_cost(X, y, theta)
return theta, J_history | matrixorz/ut_ali | ut_engine/LR/linregr.py | Python | mit | 967 |
import threading
import time
from unittest import mock
from multiple_database.routers import TestRouter
from django.core.exceptions import FieldError
from django.db import (
DatabaseError, NotSupportedError, connection, connections, router,
transaction,
)
from django.test import (
TransactionTestCase, override_settings, skipIfDBFeature,
skipUnlessDBFeature,
)
from django.test.utils import CaptureQueriesContext
from .models import City, Country, Person, PersonProfile
class SelectForUpdateTests(TransactionTestCase):
available_apps = ['select_for_update']
def setUp(self):
# This is executed in autocommit mode so that code in
# run_select_for_update can see this data.
self.country1 = Country.objects.create(name='Belgium')
self.country2 = Country.objects.create(name='France')
self.city1 = City.objects.create(name='Liberchies', country=self.country1)
self.city2 = City.objects.create(name='Samois-sur-Seine', country=self.country2)
self.person = Person.objects.create(name='Reinhardt', born=self.city1, died=self.city2)
self.person_profile = PersonProfile.objects.create(person=self.person)
# We need another database connection in transaction to test that one
# connection issuing a SELECT ... FOR UPDATE will block.
self.new_connection = connection.copy()
def tearDown(self):
try:
self.end_blocking_transaction()
except (DatabaseError, AttributeError):
pass
self.new_connection.close()
def start_blocking_transaction(self):
self.new_connection.set_autocommit(False)
# Start a blocking transaction. At some point,
# end_blocking_transaction() should be called.
self.cursor = self.new_connection.cursor()
sql = 'SELECT * FROM %(db_table)s %(for_update)s;' % {
'db_table': Person._meta.db_table,
'for_update': self.new_connection.ops.for_update_sql(),
}
self.cursor.execute(sql, ())
self.cursor.fetchone()
def end_blocking_transaction(self):
# Roll back the blocking transaction.
self.cursor.close()
self.new_connection.rollback()
self.new_connection.set_autocommit(True)
def has_for_update_sql(self, queries, **kwargs):
# Examine the SQL that was executed to determine whether it
# contains the 'SELECT..FOR UPDATE' stanza.
for_update_sql = connection.ops.for_update_sql(**kwargs)
return any(for_update_sql in query['sql'] for query in queries)
@skipUnlessDBFeature('has_select_for_update')
def test_for_update_sql_generated(self):
"""
The backend's FOR UPDATE variant appears in
generated SQL when select_for_update is invoked.
"""
with transaction.atomic(), CaptureQueriesContext(connection) as ctx:
list(Person.objects.all().select_for_update())
self.assertTrue(self.has_for_update_sql(ctx.captured_queries))
@skipUnlessDBFeature('has_select_for_update_nowait')
def test_for_update_sql_generated_nowait(self):
"""
The backend's FOR UPDATE NOWAIT variant appears in
generated SQL when select_for_update is invoked.
"""
with transaction.atomic(), CaptureQueriesContext(connection) as ctx:
list(Person.objects.all().select_for_update(nowait=True))
self.assertTrue(self.has_for_update_sql(ctx.captured_queries, nowait=True))
@skipUnlessDBFeature('has_select_for_update_skip_locked')
def test_for_update_sql_generated_skip_locked(self):
"""
The backend's FOR UPDATE SKIP LOCKED variant appears in
generated SQL when select_for_update is invoked.
"""
with transaction.atomic(), CaptureQueriesContext(connection) as ctx:
list(Person.objects.all().select_for_update(skip_locked=True))
self.assertTrue(self.has_for_update_sql(ctx.captured_queries, skip_locked=True))
@skipUnlessDBFeature('has_select_for_update_of')
def test_for_update_sql_generated_of(self):
"""
The backend's FOR UPDATE OF variant appears in the generated SQL when
select_for_update() is invoked.
"""
with transaction.atomic(), CaptureQueriesContext(connection) as ctx:
list(Person.objects.select_related(
'born__country',
).select_for_update(
of=('born__country',),
).select_for_update(
of=('self', 'born__country')
))
features = connections['default'].features
if features.select_for_update_of_column:
expected = ['select_for_update_person"."id', 'select_for_update_country"."id']
else:
expected = ['select_for_update_person', 'select_for_update_country']
expected = [connection.ops.quote_name(value) for value in expected]
self.assertTrue(self.has_for_update_sql(ctx.captured_queries, of=expected))
@skipUnlessDBFeature('has_select_for_update_of')
def test_for_update_of_followed_by_values(self):
with transaction.atomic():
values = list(Person.objects.select_for_update(of=('self',)).values('pk'))
self.assertEqual(values, [{'pk': self.person.pk}])
@skipUnlessDBFeature('has_select_for_update_of')
def test_for_update_of_followed_by_values_list(self):
with transaction.atomic():
values = list(Person.objects.select_for_update(of=('self',)).values_list('pk'))
self.assertEqual(values, [(self.person.pk,)])
@skipUnlessDBFeature('has_select_for_update_of')
def test_for_update_of_self_when_self_is_not_selected(self):
"""
select_for_update(of=['self']) when the only columns selected are from
related tables.
"""
with transaction.atomic():
values = list(Person.objects.select_related('born').select_for_update(of=('self',)).values('born__name'))
self.assertEqual(values, [{'born__name': self.city1.name}])
@skipUnlessDBFeature('has_select_for_update_nowait')
def test_nowait_raises_error_on_block(self):
"""
If nowait is specified, we expect an error to be raised rather
than blocking.
"""
self.start_blocking_transaction()
status = []
thread = threading.Thread(
target=self.run_select_for_update,
args=(status,),
kwargs={'nowait': True},
)
thread.start()
time.sleep(1)
thread.join()
self.end_blocking_transaction()
self.assertIsInstance(status[-1], DatabaseError)
@skipUnlessDBFeature('has_select_for_update_skip_locked')
def test_skip_locked_skips_locked_rows(self):
"""
If skip_locked is specified, the locked row is skipped resulting in
Person.DoesNotExist.
"""
self.start_blocking_transaction()
status = []
thread = threading.Thread(
target=self.run_select_for_update,
args=(status,),
kwargs={'skip_locked': True},
)
thread.start()
time.sleep(1)
thread.join()
self.end_blocking_transaction()
self.assertIsInstance(status[-1], Person.DoesNotExist)
@skipIfDBFeature('has_select_for_update_nowait')
@skipUnlessDBFeature('has_select_for_update')
def test_unsupported_nowait_raises_error(self):
"""
NotSupportedError is raised if a SELECT...FOR UPDATE NOWAIT is run on
a database backend that supports FOR UPDATE but not NOWAIT.
"""
with self.assertRaisesMessage(NotSupportedError, 'NOWAIT is not supported on this database backend.'):
with transaction.atomic():
Person.objects.select_for_update(nowait=True).get()
@skipIfDBFeature('has_select_for_update_skip_locked')
@skipUnlessDBFeature('has_select_for_update')
def test_unsupported_skip_locked_raises_error(self):
"""
NotSupportedError is raised if a SELECT...FOR UPDATE SKIP LOCKED is run
on a database backend that supports FOR UPDATE but not SKIP LOCKED.
"""
with self.assertRaisesMessage(NotSupportedError, 'SKIP LOCKED is not supported on this database backend.'):
with transaction.atomic():
Person.objects.select_for_update(skip_locked=True).get()
@skipIfDBFeature('has_select_for_update_of')
@skipUnlessDBFeature('has_select_for_update')
def test_unsupported_of_raises_error(self):
"""
NotSupportedError is raised if a SELECT...FOR UPDATE OF... is run on
a database backend that supports FOR UPDATE but not OF.
"""
msg = 'FOR UPDATE OF is not supported on this database backend.'
with self.assertRaisesMessage(NotSupportedError, msg):
with transaction.atomic():
Person.objects.select_for_update(of=('self',)).get()
@skipUnlessDBFeature('has_select_for_update', 'has_select_for_update_of')
def test_unrelated_of_argument_raises_error(self):
"""
FieldError is raised if a non-relation field is specified in of=(...).
"""
msg = (
'Invalid field name(s) given in select_for_update(of=(...)): %s. '
'Only relational fields followed in the query are allowed. '
'Choices are: self, born, born__country.'
)
invalid_of = [
('nonexistent',),
('name',),
('born__nonexistent',),
('born__name',),
('born__nonexistent', 'born__name'),
]
for of in invalid_of:
with self.subTest(of=of):
with self.assertRaisesMessage(FieldError, msg % ', '.join(of)):
with transaction.atomic():
Person.objects.select_related('born__country').select_for_update(of=of).get()
@skipUnlessDBFeature('has_select_for_update', 'has_select_for_update_of')
def test_related_but_unselected_of_argument_raises_error(self):
"""
FieldError is raised if a relation field that is not followed in the
query is specified in of=(...).
"""
msg = (
'Invalid field name(s) given in select_for_update(of=(...)): %s. '
'Only relational fields followed in the query are allowed. '
'Choices are: self, born, profile.'
)
for name in ['born__country', 'died', 'died__country']:
with self.subTest(name=name):
with self.assertRaisesMessage(FieldError, msg % name):
with transaction.atomic():
Person.objects.select_related(
'born', 'profile',
).exclude(profile=None).select_for_update(of=(name,)).get()
@skipUnlessDBFeature('has_select_for_update', 'has_select_for_update_of')
def test_reverse_one_to_one_of_arguments(self):
"""
Reverse OneToOneFields may be included in of=(...) as long as NULLs
are excluded because LEFT JOIN isn't allowed in SELECT FOR UPDATE.
"""
with transaction.atomic():
person = Person.objects.select_related(
'profile',
).exclude(profile=None).select_for_update(of=('profile',)).get()
self.assertEqual(person.profile, self.person_profile)
@skipUnlessDBFeature('has_select_for_update')
def test_for_update_after_from(self):
features_class = connections['default'].features.__class__
attribute_to_patch = "%s.%s.for_update_after_from" % (features_class.__module__, features_class.__name__)
with mock.patch(attribute_to_patch, return_value=True):
with transaction.atomic():
self.assertIn('FOR UPDATE WHERE', str(Person.objects.filter(name='foo').select_for_update().query))
@skipUnlessDBFeature('has_select_for_update')
def test_for_update_requires_transaction(self):
"""
A TransactionManagementError is raised
when a select_for_update query is executed outside of a transaction.
"""
msg = 'select_for_update cannot be used outside of a transaction.'
with self.assertRaisesMessage(transaction.TransactionManagementError, msg):
list(Person.objects.all().select_for_update())
@skipUnlessDBFeature('has_select_for_update')
def test_for_update_requires_transaction_only_in_execution(self):
"""
No TransactionManagementError is raised
when select_for_update is invoked outside of a transaction -
only when the query is executed.
"""
people = Person.objects.all().select_for_update()
msg = 'select_for_update cannot be used outside of a transaction.'
with self.assertRaisesMessage(transaction.TransactionManagementError, msg):
list(people)
@skipUnlessDBFeature('supports_select_for_update_with_limit')
def test_select_for_update_with_limit(self):
other = Person.objects.create(name='Grappeli', born=self.city1, died=self.city2)
with transaction.atomic():
qs = list(Person.objects.all().order_by('pk').select_for_update()[1:2])
self.assertEqual(qs[0], other)
@skipIfDBFeature('supports_select_for_update_with_limit')
def test_unsupported_select_for_update_with_limit(self):
msg = 'LIMIT/OFFSET is not supported with select_for_update on this database backend.'
with self.assertRaisesMessage(NotSupportedError, msg):
with transaction.atomic():
list(Person.objects.all().order_by('pk').select_for_update()[1:2])
def run_select_for_update(self, status, **kwargs):
"""
Utility method that runs a SELECT FOR UPDATE against all
Person instances. After the select_for_update, it attempts
to update the name of the only record, save, and commit.
This function expects to run in a separate thread.
"""
status.append('started')
try:
# We need to enter transaction management again, as this is done on
# per-thread basis
with transaction.atomic():
person = Person.objects.select_for_update(**kwargs).get()
person.name = 'Fred'
person.save()
except (DatabaseError, Person.DoesNotExist) as e:
status.append(e)
finally:
# This method is run in a separate thread. It uses its own
# database connection. Close it without waiting for the GC.
connection.close()
@skipUnlessDBFeature('has_select_for_update')
@skipUnlessDBFeature('supports_transactions')
def test_block(self):
"""
A thread running a select_for_update that accesses rows being touched
by a similar operation on another connection blocks correctly.
"""
# First, let's start the transaction in our thread.
self.start_blocking_transaction()
# Now, try it again using the ORM's select_for_update
# facility. Do this in a separate thread.
status = []
thread = threading.Thread(
target=self.run_select_for_update, args=(status,)
)
# The thread should immediately block, but we'll sleep
# for a bit to make sure.
thread.start()
sanity_count = 0
while len(status) != 1 and sanity_count < 10:
sanity_count += 1
time.sleep(1)
if sanity_count >= 10:
raise ValueError('Thread did not run and block')
# Check the person hasn't been updated. Since this isn't
# using FOR UPDATE, it won't block.
p = Person.objects.get(pk=self.person.pk)
self.assertEqual('Reinhardt', p.name)
# When we end our blocking transaction, our thread should
# be able to continue.
self.end_blocking_transaction()
thread.join(5.0)
# Check the thread has finished. Assuming it has, we should
# find that it has updated the person's name.
self.assertFalse(thread.is_alive())
# We must commit the transaction to ensure that MySQL gets a fresh read,
# since by default it runs in REPEATABLE READ mode
transaction.commit()
p = Person.objects.get(pk=self.person.pk)
self.assertEqual('Fred', p.name)
@skipUnlessDBFeature('has_select_for_update')
def test_raw_lock_not_available(self):
"""
Running a raw query which can't obtain a FOR UPDATE lock raises
the correct exception
"""
self.start_blocking_transaction()
def raw(status):
try:
list(
Person.objects.raw(
'SELECT * FROM %s %s' % (
Person._meta.db_table,
connection.ops.for_update_sql(nowait=True)
)
)
)
except DatabaseError as e:
status.append(e)
finally:
# This method is run in a separate thread. It uses its own
# database connection. Close it without waiting for the GC.
# Connection cannot be closed on Oracle because cursor is still
# open.
if connection.vendor != 'oracle':
connection.close()
status = []
thread = threading.Thread(target=raw, kwargs={'status': status})
thread.start()
time.sleep(1)
thread.join()
self.end_blocking_transaction()
self.assertIsInstance(status[-1], DatabaseError)
@skipUnlessDBFeature('has_select_for_update')
@override_settings(DATABASE_ROUTERS=[TestRouter()])
def test_select_for_update_on_multidb(self):
query = Person.objects.select_for_update()
self.assertEqual(router.db_for_write(Person), query.db)
@skipUnlessDBFeature('has_select_for_update')
def test_select_for_update_with_get(self):
with transaction.atomic():
person = Person.objects.select_for_update().get(name='Reinhardt')
self.assertEqual(person.name, 'Reinhardt')
def test_nowait_and_skip_locked(self):
with self.assertRaisesMessage(ValueError, 'The nowait option cannot be used with skip_locked.'):
Person.objects.select_for_update(nowait=True, skip_locked=True)
def test_ordered_select_for_update(self):
"""
Subqueries should respect ordering as an ORDER BY clause may be useful
to specify a row locking order to prevent deadlocks (#27193).
"""
with transaction.atomic():
qs = Person.objects.filter(id__in=Person.objects.order_by('-id').select_for_update())
self.assertIn('ORDER BY', str(qs.query))
| georgemarshall/django | tests/select_for_update/tests.py | Python | bsd-3-clause | 18,931 |
"""
hybrid.py: IRCD-Hybrid protocol module for PyLink.
"""
import time
from pylinkirc import conf
from pylinkirc.classes import *
from pylinkirc.log import log
from pylinkirc.protocols.ts6 import TS6Protocol
__all__ = ['HybridProtocol']
# This protocol module inherits from the TS6 protocol.
class HybridProtocol(TS6Protocol):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.casemapping = 'ascii'
self.hook_map = {'EOB': 'ENDBURST', 'TBURST': 'TOPIC', 'SJOIN': 'JOIN'}
self.protocol_caps -= {'slash-in-hosts'}
def post_connect(self):
"""Initializes a connection to a server."""
ts = self.start_ts
f = self.send
# https://github.com/grawity/irc-docs/blob/master/server/ts6.txt#L80
# Note: according to hybrid source code, +p is paranoia, noknock,
# AND rfc1459-style private, though the last isn't documented.
cmodes = {
# TS6 generic modes:
'op': 'o', 'halfop': 'h', 'voice': 'v', 'ban': 'b', 'key': 'k',
'limit': 'l', 'moderated': 'm', 'noextmsg': 'n',
'secret': 's', 'topiclock': 't', 'private': 'p',
# hybrid-specific modes:
'blockcolor': 'c', 'inviteonly': 'i', 'noctcp': 'C',
'regmoderated': 'M', 'operonly': 'O', 'regonly': 'R',
'sslonly': 'S', 'banexception': 'e', 'noknock': 'p',
'registered': 'r', 'invex': 'I', 'paranoia': 'p',
'banexception': 'e',
# Now, map all the ABCD type modes:
'*A': 'beI', '*B': 'k', '*C': 'l', '*D': 'cimnprstCMORS'
}
self.cmodes = cmodes
umodes = {
'oper': 'o', 'invisible': 'i', 'wallops': 'w', 'locops': 'l',
'cloak': 'x', 'hidechans': 'p', 'regdeaf': 'R', 'deaf': 'D',
'callerid': 'g', 'admin': 'a', 'deaf_commonchan': 'G', 'hideoper': 'H',
'webirc': 'W', 'sno_clientconnections': 'c', 'sno_badclientconnections': 'u',
'sno_rejectedclients': 'j', 'sno_skill': 'k', 'sno_fullauthblock': 'f',
'sno_remoteclientconnections': 'F', 'sno_stats': 'y', 'sno_debug': 'd',
'sno_nickchange': 'n', 'hideidle': 'q', 'registered': 'r',
'snomask': 's', 'ssl': 'S', 'sno_serverconnects': 'e', 'sno_botfloods': 'b',
# Now, map all the ABCD type modes:
'*A': '', '*B': '', '*C': '', '*D': 'DFGHRSWabcdefgijklnopqrsuwxy'
}
self.umodes = umodes
self.extbans_matching.clear()
# halfops is mandatory on Hybrid
self.prefixmodes = {'o': '@', 'h': '%', 'v': '+'}
# https://github.com/grawity/irc-docs/blob/master/server/ts6.txt#L55
f('PASS %s TS 6 %s' % (self.serverdata["sendpass"], self.sid))
# We request the following capabilities (for hybrid):
# ENCAP: message encapsulation for certain commands
# EX: Support for ban exemptions (+e)
# IE: Support for invite exemptions (+e)
# CHW: Allow sending messages to @#channel and the like.
# KNOCK: Support for /knock
# SVS: Deal with extended NICK/UID messages that contain service IDs/stamps
# TBURST: Topic Burst command; we send this in topic_burst
# DLN: DLINE command
# UNDLN: UNDLINE command
# KLN: KLINE command
# UNKLN: UNKLINE command
# HOPS: Supports HALFOPS
# CHW: Can do channel wall (@#)
# CLUSTER: Supports server clustering
# EOB: Supports EOB (end of burst) command
f('CAPAB :TBURST DLN KNOCK UNDLN UNKLN KLN ENCAP IE EX HOPS CHW SVS CLUSTER EOB QS')
f('SERVER %s 0 :%s' % (self.serverdata["hostname"],
self.serverdata.get('serverdesc') or conf.conf['pylink']['serverdesc']))
# send endburst now
self.send(':%s EOB' % (self.sid,))
def spawn_client(self, nick, ident='null', host='null', realhost=None, modes=set(),
server=None, ip='0.0.0.0', realname=None, ts=None, opertype=None,
manipulatable=False):
"""
Spawns a new client with the given options.
Note: No nick collision / valid nickname checks are done here; it is
up to plugins to make sure they don't introduce anything invalid.
"""
server = server or self.sid
if not self.is_internal_server(server):
raise ValueError('Server %r is not a PyLink server!' % server)
uid = self.uidgen[server].next_uid()
ts = ts or int(time.time())
realname = realname or conf.conf['pylink']['realname']
realhost = realhost or host
raw_modes = self.join_modes(modes)
u = self.users[uid] = User(self, nick, ts, uid, server, ident=ident, host=host, realname=realname,
realhost=realhost, ip=ip, manipulatable=manipulatable)
self.apply_modes(uid, modes)
self.servers[server].users.add(uid)
self._send_with_prefix(server, "UID {nick} {hopcount} {ts} {modes} {ident} {host} {ip} {uid} "
"* :{realname}".format(ts=ts, host=host,
nick=nick, ident=ident, uid=uid,
modes=raw_modes, ip=ip, realname=realname,
hopcount=self.servers[server].hopcount))
return u
def update_client(self, target, field, text):
"""Updates the ident, host, or realname of a PyLink client."""
# https://github.com/ircd-hybrid/ircd-hybrid/blob/58323b8/modules/m_svsmode.c#L40-L103
# parv[0] = command
# parv[1] = nickname <-- UID works too -jlu5
# parv[2] = TS <-- Of the user, not the current time. -jlu5
# parv[3] = mode
# parv[4] = optional argument (services account, vhost)
field = field.upper()
ts = self.users[target].ts
if field == 'HOST':
self.users[target].host = text
# On Hybrid, it appears that host changing is actually just forcing umode
# "+x <hostname>" on the target. -jlu5
self._send_with_prefix(self.sid, 'SVSMODE %s %s +x %s' % (target, ts, text))
else:
raise NotImplementedError("Changing field %r of a client is unsupported by this protocol." % field)
def oper_notice(self, source, text):
"""
Send a message to all opers.
"""
self._send_with_prefix(source, 'GLOBOPS :%s' % text)
def set_server_ban(self, source, duration, user='*', host='*', reason='User banned'):
"""
Sets a server ban.
"""
# source: user
# parameters: target server mask, duration, user mask, host mask, reason
assert not (user == host == '*'), "Refusing to set ridiculous ban on *@*"
if not source in self.users:
log.debug('(%s) Forcing KLINE sender to %s as TS6 does not allow KLINEs from servers', self.name, self.pseudoclient.uid)
source = self.pseudoclient.uid
self._send_with_prefix(source, 'KLINE * %s %s %s :%s' % (duration, user, host, reason))
def topic_burst(self, numeric, target, text):
"""Sends a topic change from a PyLink server. This is usually used on burst."""
# <- :0UY TBURST 1459308205 #testchan 1459309379 dan!~d@localhost :sdf
if not self.is_internal_server(numeric):
raise LookupError('No such PyLink server exists.')
ts = self._channels[target].ts
servername = self.servers[numeric].name
self._send_with_prefix(numeric, 'TBURST %s %s %s %s :%s' % (ts, target, int(time.time()), servername, text))
self._channels[target].topic = text
self._channels[target].topicset = True
# command handlers
def handle_capab(self, numeric, command, args):
# We only get a list of keywords here. Hybrid obviously assumes that
# we know what modes it supports (indeed, this is a standard list).
# <- CAPAB :UNDLN UNKLN KLN TBURST KNOCK ENCAP DLN IE EX HOPS CHW SVS CLUSTER EOB QS
self._caps = caps = args[0].split()
for required_cap in ('SVS', 'EOB', 'HOPS', 'QS', 'TBURST'):
if required_cap not in caps:
raise ProtocolError('%s not found in TS6 capabilities list; this is required! (got %r)' % (required_cap, caps))
def handle_uid(self, numeric, command, args):
"""
Handles Hybrid-style UID commands (user introduction). This is INCOMPATIBLE
with standard TS6 implementations, as the arguments are slightly different.
"""
# <- :0UY UID dan 1 1451041551 +Facdeiklosuw ~ident localhost 127.0.0.1 0UYAAAAAB * :realname
nick = args[0]
self._check_nick_collision(nick)
ts, modes, ident, host, ip, uid, account, realname = args[2:10]
ts = int(ts)
if account == '*':
account = None
log.debug('(%s) handle_uid: got args nick=%s ts=%s uid=%s ident=%s '
'host=%s realname=%s ip=%s', self.name, nick, ts, uid,
ident, host, realname, ip)
self.users[uid] = User(self, nick, ts, uid, numeric, ident, host, realname, host, ip)
parsedmodes = self.parse_modes(uid, [modes])
log.debug('(%s) handle_uid: Applying modes %s for %s', self.name, parsedmodes, uid)
self.apply_modes(uid, parsedmodes)
self.servers[numeric].users.add(uid)
# Call the OPERED UP hook if +o is being added to the mode list.
self._check_oper_status_change(uid, parsedmodes)
# Track SSL/TLS status
has_ssl = self.users[uid].ssl = ('+S', None) in parsedmodes
# Set the account name if present
if account:
self.call_hooks([uid, 'CLIENT_SERVICES_LOGIN', {'text': account}])
return {'uid': uid, 'ts': ts, 'nick': nick, 'realname': realname, 'host': host, 'ident': ident, 'ip': ip, 'secure': has_ssl}
def handle_tburst(self, numeric, command, args):
"""Handles incoming topic burst (TBURST) commands."""
# <- :0UY TBURST 1459308205 #testchan 1459309379 dan!~d@localhost :sdf
channel = args[1]
ts = args[2]
setter = args[3]
topic = args[-1]
self._channels[channel].topic = topic
self._channels[channel].topicset = True
return {'channel': channel, 'setter': setter, 'ts': ts, 'text': topic}
def handle_eob(self, numeric, command, args):
"""EOB (end-of-burst) handler."""
log.debug('(%s) end of burst received from %s', self.name, numeric)
if not self.servers[numeric].has_eob:
# Don't fight with TS6's generic PING-as-EOB
self.servers[numeric].has_eob = True
if numeric == self.uplink:
self.connected.set()
return {}
def handle_svsmode(self, numeric, command, args):
"""
Handles SVSMODE, which is used for sending services metadata
(vhosts, account logins), and other forced usermode changes.
"""
target = args[0]
ts = args[1]
modes = args[2:]
parsedmodes = self.parse_modes(target, modes)
for modepair in parsedmodes:
if modepair[0] == '+d':
# Login sequence (tested with Anope 2.0.4-git):
# A mode change +d accountname is used to propagate logins,
# before setting umode +r on the target.
# <- :5ANAAAAAG SVSMODE 5HYAAAAAA 1460175209 +d jlu5
# <- :5ANAAAAAG SVSMODE 5HYAAAAAA 1460175209 +r
# Logout sequence:
# <- :5ANAAAAAG SVSMODE 5HYAAAAAA 1460175209 +d *
# <- :5ANAAAAAG SVSMODE 5HYAAAAAA 1460175209 -r
account = args[-1]
if account == '*':
account = '' # Logout
# Send the login hook, and remove this mode from the mode
# list, as it shouldn't be parsed literally.
self.call_hooks([target, 'CLIENT_SERVICES_LOGIN', {'text': account}])
parsedmodes.remove(modepair)
elif modepair[0] == '+x':
# SVSMODE is also used to set cloaks on Hybrid.
# "SVSMODE 001TARGET +x some.host" would change 001TARGET's host
# to some.host, for example.
host = args[-1]
self.users[target].host = host
# Propagate the hostmask change as a hook.
self.call_hooks([numeric, 'CHGHOST',
{'target': target, 'newhost': host}])
parsedmodes.remove(modepair)
if parsedmodes:
self.apply_modes(target, parsedmodes)
return {'target': target, 'modes': parsedmodes}
Class = HybridProtocol
| GLolol/PyLink | protocols/hybrid.py | Python | mpl-2.0 | 12,742 |
"""
This algorithm receives an array and returns most_frequent_value
Also, sometimes it is possible to have multiple 'most_frequent_value's,
so this function returns a list. This result can be used to find a
representative value in an array.
This algorithm gets an array, makes a dictionary of it,
finds the most frequent count, and makes the result list.
For example: top_1([1, 1, 2, 2, 3, 4]) will return [1, 2]
(TL:DR) Get mathematical Mode
Complexity: O(n)
"""
def top_1(arr):
values = {}
#reserve each value which first appears on keys
#reserve how many time each value appears by index number on values
result = []
f_val = 0
for i in arr:
if i in values:
values[i] += 1
else:
values[i] = 1
f_val = max(values.values())
for i in values.keys():
if values[i] == f_val:
result.append(i)
else:
continue
return result
| keon/algorithms | algorithms/arrays/top_1.py | Python | mit | 959 |
# -*- coding: utf-8 -*-
import pytest
import sys
import time
from .test_base_class import TestBaseClass
from aerospike import exception as e
aerospike = pytest.importorskip("aerospike")
try:
import aerospike
except:
print("Please install aerospike python client.")
sys.exit(1)
@pytest.mark.usefixtures("connection_config")
class TestDropUser(object):
pytestmark = pytest.mark.skipif(
not TestBaseClass.auth_in_use(),
reason="No user specified, may be not secured cluster.")
def setup_method(self, method):
"""
Setup method.
"""
config = TestBaseClass.get_connection_config()
TestDropUser.Me = self
self.client = aerospike.client(config).connect(config['user'], config['password'])
try:
self.client.admin_drop_user("foo-test")
time.sleep(2)
except:
pass
def teardown_method(self, method):
"""
Teardoen method.
"""
self.client.close()
def test_drop_user_with_no_parameters(self):
"""
Invoke drop_user() without any mandatory parameters.
"""
with pytest.raises(TypeError) as typeError:
self.client.admin_drop_user()
assert "argument 'user' (pos 1)" in str(
typeError.value)
def test_drop_user_with_policy_none(self):
"""
Invoke drop_user() with policy none
"""
policy = None
user = "foo-test"
password = "foo1"
roles = ["read", "read-write", "sys-admin"]
status = self.client.admin_create_user(user, password, roles, policy)
time.sleep(2)
assert status == 0
user_details = self.client.admin_query_user(user, policy)
assert user_details == ['read', 'read-write', 'sys-admin']
status = self.client.admin_drop_user(user, policy)
assert status == 0
try:
user_details = self.client.admin_query_user(user)
except e.InvalidUser as exception:
assert exception.code == 60
assert exception.msg == 'AEROSPIKE_INVALID_USER'
def test_drop_user_with_user_none(self):
"""
Invoke drop_user() with policy none
"""
policy = {'timeout': 1000}
try:
self.client.admin_drop_user(None, policy)
except e.ParamError as exception:
assert exception.code == -2
assert exception.msg == 'Username should be a string'
def test_drop_user_positive(self):
"""
Invoke drop_user() with correct arguments.
"""
policy = {'timeout': 1000}
user = "foo-test"
password = "foo1"
roles = ["read", "read-write", "sys-admin"]
status = self.client.admin_create_user(user, password, roles, policy)
time.sleep(1)
assert status == 0
user_details = self.client.admin_query_user(user, policy)
assert user_details == ['read', 'read-write', 'sys-admin']
status = self.client.admin_drop_user(user, policy)
assert status == 0
time.sleep(2)
try:
user_details = self.client.admin_query_user(user, policy)
except e.InvalidUser as exception:
assert exception.code == 60
assert exception.msg == 'AEROSPIKE_INVALID_USER'
def test_drop_user_positive_without_policy(self):
"""
Invoke drop_user() with correct arguments.
"""
policy = {
'timeout': 1000
}
user = "foo-test"
password = "foo1"
roles = ["read", "read-write", "sys-admin"]
status = self.client.admin_create_user(user, password, roles, policy)
time.sleep(1)
assert status == 0
user_details = self.client.admin_query_user(user, policy)
assert user_details == ['read', 'read-write', 'sys-admin']
status = self.client.admin_drop_user(user)
assert status == 0
time.sleep(1)
try:
user_details = self.client.admin_query_user(user, policy)
except e.InvalidUser as exception:
assert exception.code == 60
assert exception.msg == 'AEROSPIKE_INVALID_USER'
def test_drop_user_negative(self):
"""
Invoke drop_user() with non-existent user.
"""
policy = {}
user = "foo-test"
try:
self.client.admin_query_user(user, policy)
except e.InvalidUser as exception:
assert exception.code == 60
assert exception.msg == 'AEROSPIKE_INVALID_USER'
try:
self.client.admin_drop_user(user)
except e.InvalidUser as exception:
assert exception.code == 60
assert exception.msg == 'AEROSPIKE_INVALID_USER'
def test_drop_user_policy_incorrect(self):
"""
Invoke drop_user() with policy incorrect
"""
policy = {'timeout': 1000}
user = "incorrect-policy"
password = "foo1"
roles = ["read", "read-write", "sys-admin"]
status = self.client.admin_create_user(user, password, roles, policy)
time.sleep(1)
assert status == 0
user_details = self.client.admin_query_user(user, policy)
assert user_details == ['read', 'read-write', 'sys-admin']
policy = {
'timeout': 0.2
}
try:
status = self.client.admin_drop_user(user, policy)
except e.ParamError as exception:
assert exception.code == -2
assert exception.msg == 'timeout is invalid'
status = self.client.admin_drop_user(user)
def test_drop_user_with_extra_argument(self):
"""
Invoke drop_user() with extra argument.
"""
policy = {'timeout': 1000}
with pytest.raises(TypeError) as typeError:
self.client.admin_drop_user("foo-test", policy, "")
assert "admin_drop_user() takes at most 2 arguments (3 given)" in str(
typeError.value)
@pytest.mark.xfail(reason="It is no longer possible to create a user with"
"a name too long")
def test_drop_user_with_too_long_username(self):
policy = {}
user = "user$" * 1000
password = "user10"
roles = ["sys-admin"]
try:
self.client.admin_create_user(user, password, roles, policy)
except e.InvalidUser as exception:
assert exception.code == 60
assert exception.msg == "AEROSPIKE_INVALID_USER"
try:
self.client.admin_drop_user(user, policy)
except e.InvalidUser as exception:
assert exception.code == 60
assert exception.msg == "AEROSPIKE_INVALID_USER"
def test_drop_user_with_special_characters_in_username(self):
policy = {}
user = "!#Q#AEQ@#$%&^*((^&*~~~````"
password = "user4"
roles = ["read-write"]
try:
status = self.client.admin_create_user(
user, password, roles, policy)
assert status == 0
time.sleep(1)
except:
pass
status = self.client.admin_drop_user(user)
assert status == 0
| aerospike/aerospike-client-python | test/new_tests/test_admin_drop_user.py | Python | apache-2.0 | 7,293 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class ShareAccessRight(Model):
"""Specifies the mapping between this particular user and the type of access
he has on shares on this device.
All required parameters must be populated in order to send to Azure.
:param share_id: Required. The share ID.
:type share_id: str
:param access_type: Required. Type of access to be allowed on the share
for this user. Possible values include: 'Change', 'Read', 'Custom'
:type access_type: str or ~azure.mgmt.edgegateway.models.ShareAccessType
"""
_validation = {
'share_id': {'required': True},
'access_type': {'required': True},
}
_attribute_map = {
'share_id': {'key': 'shareId', 'type': 'str'},
'access_type': {'key': 'accessType', 'type': 'str'},
}
def __init__(self, *, share_id: str, access_type, **kwargs) -> None:
super(ShareAccessRight, self).__init__(**kwargs)
self.share_id = share_id
self.access_type = access_type
| Azure/azure-sdk-for-python | sdk/edgegateway/azure-mgmt-edgegateway/azure/mgmt/edgegateway/models/share_access_right_py3.py | Python | mit | 1,500 |
import unittest
from assertutil import get_assert_tuple_args
from binder import *
from bindertest.testdbconfig import connect
from bindertest.tabledefs import Foo, Bar
foo1 = Foo.new(foo_id=1, i1=101, s1="alpha")
foo2 = Foo.new(foo_id=2, i1=101, s1="beta")
class ConnSelectByIdTest(unittest.TestCase):
def setUp(self):
conn = connect()
conn.drop_table_if_exists(Foo)
conn.create_table(Foo)
conn.commit()
conn = connect()
conn.insert(Foo, foo1)
conn.insert(Foo, foo2)
conn.commit()
def test_select_by_id(self):
conn = connect()
self.assertEquals(foo1, conn.select_by_id(Foo, 1))
self.assertEquals(foo2, conn.get(Foo, 2))
self.assertEquals(None, conn.select_by_id(Foo, 3))
def test_select_by_id_more_than_one_row(self):
conn = connect()
Foo2 = Table("foo", AutoIdCol("i1"), IntCol("foo_id"))
try:
conn.select_by_id(Foo2, 101)
except AssertionError, e:
msg, info = get_assert_tuple_args(e)
self.assertEquals("select_one(): more than 1 row", msg)
table_name, sqlcond = info
#table_name, sqlcond, rc = info
self.assertEquals("foo", table_name)
self.assertEquals('"i1 = 101"', repr(sqlcond))
#self.assertEquals(2, rc)
else:
self.fail()
def test_no_auto_id_col(self):
conn = connect()
try:
conn.get(Bar, 12)
except AssertionError, e:
self.assertEquals(
"select_by_id(): table 'bar' does not have AutoIdCol", str(e)
)
else:
self.fail()
def test_auto_id(self):
conn = connect()
try:
conn.get(Foo, None)
except AssertionError, e:
self.assertEquals(
"select_by_id(): cannot use None for AutoIdCol", str(e)
)
else:
self.fail()
def test_bad_values(self):
conn = connect()
try:
conn.get(Foo, '4')
except TypeError, e:
self.assertEquals(
"AutoIdCol 'foo_id': int expected, got str", str(e)
)
else:
self.fail()
if __name__ == '__main__':
unittest.main()
| divtxt/binder | bindertest/test_select_by_id.py | Python | mit | 2,327 |
from __future__ import absolute_import, unicode_literals
import sys
from django.utils.importlib import import_module
from appconf import AppConf
class EventsAppConf(AppConf):
MODEL = None
class Meta:
required = ['MODEL']
def configure_model(self, value):
module_name, dot, class_name = value.rpartition('.')
try:
module = import_module(module_name)
except ImportError as e:
raise ImportError, "Failed to import %s: %s" % (module_name, e), sys.exc_info()[2]
return getattr(module, class_name)
| aptivate/djangocms_events | djangocms_events/conf.py | Python | gpl-3.0 | 589 |
from ert.test import TestRun
from ert.test import path_exists
from ert.test import SourceEnumerator
from ert.test import TestArea , TestAreaContext
from ert.test import ErtTestRunner
from ert.test import PathContext
from ert.test import LintTestCase
from ert.test import ImportTestCase
from tests import EclTest
class ErtLegacyTestTest(EclTest):
pass
| Statoil/libecl | python/tests/legacy_tests/test_test.py | Python | gpl-3.0 | 358 |
# -*- encoding: utf-8 -*-
##############################################################################
#
# Avanzosc - Avanced Open Source Consulting
# Copyright (C) 2011 - 2012 Avanzosc <http://www.avanzosc.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see http://www.gnu.org/licenses/.
#
##############################################################################
from osv import osv
from osv import fields
import netsvc
import time
from tools.translate import _
class sale_order(osv.osv):
_inherit='sale.order'
def _count_meetings(self, cr, uid, ids, field_name, arg, context=None):
res = {}
meeting_obj = self.pool.get('crm.meeting')
for id in ids:
res[id] = len(meeting_obj.search(cr, uid, [('sale_order_id', '=', id)]))
return res
def _calculate_agreement_state(self, cr, uid, ids, field_name, arg, context=None):
res = {}
agreement_obj= self.pool.get('inv.agreement')
sale_obj = self.pool.get('sale.order')
for id in ids:
sale = sale_obj.browse(cr,uid,id)
res[id]=''
if sale.agreement:
state = agreement_obj.browse(cr, uid, sale.agreement.id).state
if state:
res[id]=state
return res
_columns = {
'partner_id': fields.many2one('res.partner', 'Customer', readonly=True, states={'draft': [('readonly', False)], 'waiting_install': [('readonly', False)]}, required=True, change_default=True, select=True),
'order_line': fields.one2many('sale.order.line', 'order_id', 'Order Lines', readonly=True, states={'draft': [('readonly', False)], 'waiting_install': [('readonly', False)]}),
'project_id': fields.many2one('account.analytic.account', 'Analytic Account', readonly=True, states={'draft': [('readonly', False)], 'waiting_install': [('readonly', False)]}, help="The analytic account related to a sales order."),
'meeting_num': fields.function(_count_meetings, method=True, type='integer', string='Nº Meetings'),
'agreement_state':fields.function(_calculate_agreement_state, method=True, type='char', string='Agreement state'),
'state': fields.selection([
('draft', 'Quotation'),
('waiting_install', 'Waiting Installation'),
('waiting_date', 'Waiting Schedule'),
('manual', 'Manual In Progress'),
('progress', 'In Progress'),
('shipping_except', 'Shipping Exception'),
('invoice_except', 'Invoice Exception'),
('done', 'Done'),
('cancel', 'Cancelled')
], 'Order State', readonly=True, help="Gives the state of the quotation or sales order. \nThe exception state is automatically set when a cancel operation occurs in the invoice validation (Invoice Exception) or in the picking list process (Shipping Exception). \nThe 'Waiting Schedule' state is set when the invoice is confirmed but waiting for the scheduler to run on the date 'Ordered Date'.", select=True),
}
def action_wait_install(self, cr, uid, ids, context=None):
self.write(cr, uid, ids, {'state': 'waiting_install'})
return True
def is_analytic(self, cr, uid, ids, context=None):
for sale in self.browse(cr, uid, ids):
if sale.order_policy == 'analytic' and not sale.project_id:
return False
return True
def action_wait(self, cr, uid, ids, *args):
partner_obj = self.pool.get('res.partner')
address_obj = self.pool.get('res.partner.address')
for o in self.browse(cr, uid, ids):
partner = o.partner_id
address = o.partner_invoice_id
cif = partner.vat
project = address.zone_id
analytic = address.analytic
banks = partner.bank_ids
#payment = partner.payment_type_customer.id
#bank_payment = True
#payment_list = self.pool.get('payment.type').search(cr,uid,[('name', '=', 'Recibo domiciliado')])
#if payment in payment_list:
# if not banks:
# bank_payment = False
#if cif and project and analytic and bank_payment:
if cif and project and analytic:
res=super(sale_order, self).action_wait(cr, uid, ids, *args)
else:
message = ''
if not cif:
message = message + 'VAT, '
if not project:
message = message + 'Project, '
if not analytic:
message = message + 'Analytic, '
#if not bank_payment:
# message = message + 'Banks, '
raise osv.except_osv(_('Error!'),_('The field(s) %sare not specified in the client form.' %(message)))
return res
sale_order()
#class sale_order_line(osv.osv):
# _inherit = 'sale.order.line'
#
# def create(self, cr, uid, vals, context=None):
# if 'pack_parent_line_id' in vals:
# date = self.pool.get('sale.order').browse(cr, uid, vals['order_id']).agreement_date or False
# if date:
# vals.update({'invoice_date': date})
# return super(sale_order_line,self).create(cr, uid, vals, context)
#
#sale_order_line()
| avanzosc/avanzosc6.1 | avanzosc_sale_mrp_wk/sale_mrp.py | Python | agpl-3.0 | 5,948 |
import logging
from decimal import Decimal
from typing import Any, Dict, Optional
from urllib.parse import urlencode, urljoin, urlunsplit
from django import forms
from django.conf import settings
from django.core import signing
from django.db import transaction
from django.http import HttpRequest, HttpResponse, HttpResponseRedirect
from django.shortcuts import render
from django.urls import reverse
from corporate.lib.stripe import (
DEFAULT_INVOICE_DAYS_UNTIL_DUE,
MIN_INVOICED_LICENSES,
STRIPE_PUBLISHABLE_KEY,
BillingError,
get_latest_seat_count,
is_sponsored_realm,
process_initial_upgrade,
sign_string,
unsign_string,
update_sponsorship_status,
validate_licenses,
)
from corporate.models import (
CustomerPlan,
ZulipSponsorshipRequest,
get_current_plan_by_customer,
get_customer_by_realm,
)
from corporate.views.billing_page import billing_home
from zerver.decorator import require_organization_member, zulip_login_required
from zerver.lib.actions import do_make_user_billing_admin
from zerver.lib.request import REQ, has_request_variables
from zerver.lib.response import json_success
from zerver.lib.send_email import FromAddress, send_email
from zerver.lib.validator import check_int, check_string_in
from zerver.models import Realm, UserProfile, get_org_type_display_name, get_realm
billing_logger = logging.getLogger("corporate.stripe")
VALID_BILLING_MODALITY_VALUES = ["send_invoice", "charge_automatically"]
VALID_BILLING_SCHEDULE_VALUES = ["annual", "monthly"]
VALID_LICENSE_MANAGEMENT_VALUES = ["automatic", "manual"]
def unsign_seat_count(signed_seat_count: str, salt: str) -> int:
try:
return int(unsign_string(signed_seat_count, salt))
except signing.BadSignature:
raise BillingError("tampered seat count")
def check_upgrade_parameters(
billing_modality: str,
schedule: str,
license_management: Optional[str],
licenses: Optional[int],
has_stripe_token: bool,
seat_count: int,
) -> None:
if billing_modality not in VALID_BILLING_MODALITY_VALUES: # nocoverage
raise BillingError("unknown billing_modality")
if schedule not in VALID_BILLING_SCHEDULE_VALUES: # nocoverage
raise BillingError("unknown schedule")
if license_management not in VALID_LICENSE_MANAGEMENT_VALUES: # nocoverage
raise BillingError("unknown license_management")
charge_automatically = False
if billing_modality == "charge_automatically":
charge_automatically = True
if not has_stripe_token:
raise BillingError("autopay with no card")
validate_licenses(charge_automatically, licenses, seat_count)
@require_organization_member
@has_request_variables
def upgrade(
request: HttpRequest,
user: UserProfile,
billing_modality: str = REQ(str_validator=check_string_in(VALID_BILLING_MODALITY_VALUES)),
schedule: str = REQ(str_validator=check_string_in(VALID_BILLING_SCHEDULE_VALUES)),
signed_seat_count: str = REQ(),
salt: str = REQ(),
license_management: Optional[str] = REQ(
default=None, str_validator=check_string_in(VALID_LICENSE_MANAGEMENT_VALUES)
),
licenses: Optional[int] = REQ(json_validator=check_int, default=None),
stripe_token: Optional[str] = REQ(default=None),
) -> HttpResponse:
try:
seat_count = unsign_seat_count(signed_seat_count, salt)
if billing_modality == "charge_automatically" and license_management == "automatic":
licenses = seat_count
if billing_modality == "send_invoice":
schedule = "annual"
license_management = "manual"
check_upgrade_parameters(
billing_modality,
schedule,
license_management,
licenses,
stripe_token is not None,
seat_count,
)
assert licenses is not None
automanage_licenses = license_management == "automatic"
billing_schedule = {"annual": CustomerPlan.ANNUAL, "monthly": CustomerPlan.MONTHLY}[
schedule
]
process_initial_upgrade(user, licenses, automanage_licenses, billing_schedule, stripe_token)
except BillingError as e:
if not settings.TEST_SUITE: # nocoverage
billing_logger.warning(
"BillingError during upgrade: %s. user=%s, realm=%s (%s), billing_modality=%s, "
"schedule=%s, license_management=%s, licenses=%s, has stripe_token: %s",
e.error_description,
user.id,
user.realm.id,
user.realm.string_id,
billing_modality,
schedule,
license_management,
licenses,
stripe_token is not None,
)
raise
except Exception:
billing_logger.exception("Uncaught exception in billing:", stack_info=True)
error_message = BillingError.CONTACT_SUPPORT.format(email=settings.ZULIP_ADMINISTRATOR)
error_description = "uncaught exception during upgrade"
raise BillingError(error_description, error_message)
else:
return json_success()
@zulip_login_required
def initial_upgrade(request: HttpRequest) -> HttpResponse:
user = request.user
assert user.is_authenticated
if not settings.BILLING_ENABLED or user.is_guest:
return render(request, "404.html", status=404)
billing_page_url = reverse(billing_home)
customer = get_customer_by_realm(user.realm)
if customer is not None and (
get_current_plan_by_customer(customer) is not None or customer.sponsorship_pending
):
if request.GET.get("onboarding") is not None:
billing_page_url = f"{billing_page_url}?onboarding=true"
return HttpResponseRedirect(billing_page_url)
if is_sponsored_realm(user.realm):
return HttpResponseRedirect(billing_page_url)
percent_off = Decimal(0)
if customer is not None and customer.default_discount is not None:
percent_off = customer.default_discount
seat_count = get_latest_seat_count(user.realm)
signed_seat_count, salt = sign_string(str(seat_count))
context: Dict[str, Any] = {
"realm": user.realm,
"publishable_key": STRIPE_PUBLISHABLE_KEY,
"email": user.delivery_email,
"seat_count": seat_count,
"signed_seat_count": signed_seat_count,
"salt": salt,
"min_invoiced_licenses": max(seat_count, MIN_INVOICED_LICENSES),
"default_invoice_days_until_due": DEFAULT_INVOICE_DAYS_UNTIL_DUE,
"plan": "Zulip Standard",
"free_trial_days": settings.FREE_TRIAL_DAYS,
"onboarding": request.GET.get("onboarding") is not None,
"page_params": {
"seat_count": seat_count,
"annual_price": 8000,
"monthly_price": 800,
"percent_off": float(percent_off),
},
"realm_org_type": user.realm.org_type,
"sorted_org_types": sorted(
(
[org_type_name, org_type]
for (org_type_name, org_type) in Realm.ORG_TYPES.items()
if not org_type.get("hidden")
),
key=lambda d: d[1]["display_order"],
),
}
response = render(request, "corporate/upgrade.html", context=context)
return response
class SponsorshipRequestForm(forms.Form):
website = forms.URLField(max_length=ZulipSponsorshipRequest.MAX_ORG_URL_LENGTH)
organization_type = forms.IntegerField()
description = forms.CharField(widget=forms.Textarea)
@require_organization_member
@has_request_variables
def sponsorship(
request: HttpRequest,
user: UserProfile,
organization_type: str = REQ("organization-type"),
website: str = REQ(),
description: str = REQ(),
) -> HttpResponse:
realm = user.realm
requested_by = user.full_name
user_role = user.get_role_name()
support_realm_uri = get_realm(settings.STAFF_SUBDOMAIN).uri
support_url = urljoin(
support_realm_uri,
urlunsplit(("", "", reverse("support"), urlencode({"q": realm.string_id}), "")),
)
post_data = request.POST.copy()
# We need to do this because the field name in the template
# for organization type contains a hyphen and the form expects
# an underscore.
post_data.update(organization_type=organization_type)
form = SponsorshipRequestForm(post_data)
with transaction.atomic():
if form.is_valid():
sponsorship_request = ZulipSponsorshipRequest(
realm=realm,
requested_by=user,
org_website=form.cleaned_data["website"],
org_description=form.cleaned_data["description"],
org_type=form.cleaned_data["organization_type"],
)
sponsorship_request.save()
org_type = form.cleaned_data["organization_type"]
if realm.org_type != org_type:
realm.org_type = org_type
realm.save(update_fields=["org_type"])
update_sponsorship_status(realm, True, acting_user=user)
do_make_user_billing_admin(user)
org_type_display_name = get_org_type_display_name(org_type)
context = {
"requested_by": requested_by,
"user_role": user_role,
"string_id": realm.string_id,
"support_url": support_url,
"organization_type": org_type_display_name,
"website": website,
"description": description,
}
send_email(
"zerver/emails/sponsorship_request",
to_emails=[FromAddress.SUPPORT],
from_name="Zulip sponsorship",
from_address=FromAddress.tokenized_no_reply_address(),
reply_to_email=user.delivery_email,
context=context,
)
return json_success()
| hackerkid/zulip | corporate/views/upgrade.py | Python | apache-2.0 | 9,828 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2015-2018:
# Matthieu Estrada, ttamalfor@gmail.com
#
# This file is part of (AlignakApp).
#
# (AlignakApp) is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# (AlignakApp) is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with (AlignakApp). If not, see <http://www.gnu.org/licenses/>.
import unittest2
from alignak_app.utils.config import settings
from alignak_app.locales.locales import init_localization
from alignak_app.backend.backend import app_backend
from alignak_app.items.daemon import Daemon
from alignak_app.items.event import Event
from alignak_app.items.history import History
from alignak_app.items.host import Host
from alignak_app.items.item import *
from alignak_app.items.livesynthesis import LiveSynthesis
from alignak_app.items.service import Service
from alignak_app.items.user import User
from alignak_app.items.realm import Realm
class TestAllItems(unittest2.TestCase):
"""
This file test methods of ItemModel class objects
"""
settings.init_config()
init_localization()
app_backend.login()
# Host data test
host_list = []
for i in range(0, 10):
host = Host()
host.create(
'_id%d' % i,
{
'name': 'host%d' % i,
'ls_downtimed': True,
'ls_acknowledged': True,
'ls_state': 'UNKNOWN',
'_overall_state_id': 4,
'passive_checks_enabled': False,
'active_checks_enabled': True
},
'host%d' % i
)
host_list.append(host)
# Service data test
service_list = []
for i in range(0, 10):
service = Service()
service.create(
'_id%d' % i,
{
'name': 'service%d' % i,
'alias': 'Service %d' % i,
'host': '_id%d' % i,
'ls_acknowledged': False,
'ls_downtimed': False,
'ls_state': 'CRITICAL',
'aggregation': 'disk',
'_overall_state_id': 4,
'passive_checks_enabled': False,
'active_checks_enabled': True
},
'service%d' % i
)
service_list.append(service)
def test_item_model(self):
"""Create ItemModel"""
under_test = Item()
under_test.create('_id', {'ls_state': 'DOWN'}, 'name')
self.assertTrue('_id' == under_test.item_id)
self.assertTrue('ls_state' in under_test.data)
self.assertTrue('DOWN' == under_test.data['ls_state'])
self.assertTrue('name' == under_test.name)
def test_item_model_get_data(self):
"""Get Data ItemModel"""
under_test = Item()
under_test.create('_id', {'ls_state': 'DOWN', 'ls_acknowledged': True}, 'name')
data_test = under_test.data['ls_state']
self.assertTrue('DOWN' == data_test)
def test_item_model_update_data(self):
"""Update Data ItemModel"""
under_test = Item()
under_test.create('_id', {'ls_state': 'DOWN', 'ls_acknowledged': True}, 'name')
under_test.update_data('ls_acknowledged', False)
data_test = under_test.data['ls_acknowledged']
self.assertTrue(data_test is False)
def test_get_icon_name(self):
"""Get Icon Name"""
under_test = get_icon_name(
'host', 'UP', acknowledge=False, downtime=False, monitored=1)
self.assertEqual('hosts_up', under_test)
under_test = get_icon_name(
'service', 'WARNING', acknowledge=False, downtime=False, monitored=1)
self.assertEqual('services_warning', under_test)
under_test = get_icon_name(
'host', 'DOWN', acknowledge=True, downtime=False, monitored=1)
self.assertEqual('acknowledge', under_test)
under_test = get_icon_name(
'service', 'UNREACHABLE', acknowledge=True, downtime=True, monitored=2)
self.assertEqual('downtime', under_test)
under_test = get_icon_name(
'host', 'WRONG_STATUS', acknowledge=False, downtime=False, monitored=1)
self.assertEqual('error', under_test)
under_test = get_icon_name(
'host', 'UP', acknowledge=False, downtime=False, monitored=False + False)
self.assertEqual('hosts_not_monitored', under_test)
def test_get_icon_name_from_state(self):
"""Get Icon Name from State"""
under_test = get_icon_name_from_state('host', 'UP')
self.assertEqual('hosts_up', under_test)
under_test = get_icon_name_from_state('service', 'CRITICAL')
self.assertEqual('services_critical', under_test)
under_test = get_icon_name_from_state('host', 'ACKNOWLEDGE')
self.assertEqual('acknowledge', under_test)
under_test = get_icon_name_from_state('service', 'DOWNTIME')
self.assertEqual('downtime', under_test)
def test_get_real_host_state_icon(self):
"""Get Real Host State Icon"""
# Service data test
services_test = []
for i in range(0, 5):
service = Service()
service.create(
'_id%d' % i,
{'name': 'service%d' % i, '_overall_state_id': i},
'service%d' % i
)
services_test.append(service)
service = Service()
service.create(
'other_id2%d' % i,
{'name': 'other_service2%d' % i, '_overall_state_id': i},
'other_service%d' % i
)
services_test.append(service)
under_test = get_overall_state_icon(services_test, 0)
self.assertEqual('all_services_critical', under_test)
# Overall state id of 10 does not exist
under_test = get_overall_state_icon([], 10)
self.assertEqual('all_services_none', under_test)
def test_get_request_history_model(self):
"""Get History Request Model"""
under_test = History.get_request_model()
self.assertTrue('endpoint' in under_test)
self.assertEqual('history', under_test['endpoint'])
self.assertTrue('params' in under_test)
self.assertTrue('projection' in under_test)
def test_get_history_icon_name_from_message(self):
"""Get History Icon from State"""
under_test = History.get_history_icon_name('UNKNOWN', 'downtime')
self.assertEqual('downtime', under_test)
under_test = History.get_history_icon_name('UP', 'ack')
self.assertEqual('acknowledge', under_test)
under_test = History.get_history_icon_name('UP', 'event_type')
self.assertEqual('hosts_up', under_test)
under_test = History.get_history_icon_name('DOWN', 'event_type')
self.assertEqual('hosts_down', under_test)
under_test = History.get_history_icon_name('UNREACHABLE', 'event_type')
self.assertEqual('services_unreachable', under_test)
under_test = History.get_history_icon_name('OK', 'event_type')
self.assertEqual('services_ok', under_test)
under_test = History.get_history_icon_name('WARNING', 'event_type')
self.assertEqual('services_warning', under_test)
under_test = History.get_history_icon_name('CRITICAL', 'event_type')
self.assertEqual('services_critical', under_test)
under_test = History.get_history_icon_name('UNKNOWN', 'event_type')
self.assertEqual('services_unknown', under_test)
under_test = History.get_history_icon_name('error', 'event_type')
self.assertEqual('error', under_test)
def test_get_request_user_model(self):
"""Get User Request Model"""
under_test = User.get_request_model('')
self.assertTrue('endpoint' in under_test)
self.assertEqual('user', under_test['endpoint'])
self.assertTrue('params' in under_test)
self.assertTrue('projection' in under_test)
def test_get_user_role(self):
"""Get User Role"""
# User case
user_test = User()
user_test.create(
'_id',
{'is_admin': False, 'can_submit_commands': False, 'back_role_super_admin': False},
'name'
)
under_test = user_test.get_role()
self.assertEqual('user', under_test)
# Administrator case
user_test = User()
user_test.create(
'_id',
{'is_admin': True, 'can_submit_commands': False, 'back_role_super_admin': False},
'name'
)
under_test = user_test.get_role()
self.assertEqual('administrator', under_test)
# Power case
user_test = User()
user_test.create(
'_id',
{'is_admin': False, 'can_submit_commands': True, 'back_role_super_admin': False},
'name'
)
under_test = user_test.get_role()
self.assertEqual('power', under_test)
def test_get_request_host_model(self):
"""Get Host Request Model"""
under_test = Host.get_request_model()
self.assertTrue('endpoint' in under_test)
self.assertEqual('host', under_test['endpoint'])
self.assertTrue('params' in under_test)
self.assertTrue('projection' in under_test)
def test_get_overall_tooltip(self):
"""Get Overall Tooltip of Host"""
under_test = Host()
under_test.create(
'_id1',
{
'_overall_state_id': 1,
'ls_state': 'DOWN',
'ls_downtimed': True,
'ls_acknowledged': False,
'active_checks_enabled': True,
'passive_checks_enabled': False,
},
'hostname'
)
tooltip_test = under_test.get_overall_tooltip([])
self.assertEqual('Hostname is DOWN but downtimed, no services...', tooltip_test)
def test_get_request_service_model(self):
"""Get Service Request Model"""
under_test = Service.get_request_model()
self.assertTrue('endpoint' in under_test)
self.assertEqual('service', under_test['endpoint'])
self.assertTrue('params' in under_test)
self.assertTrue('projection' in under_test)
def test_get_request_daemon_model(self):
"""Get Daemon Request Model"""
under_test = Daemon.get_request_model()
self.assertTrue('endpoint' in under_test)
self.assertEqual('alignakdaemon', under_test['endpoint'])
self.assertTrue('params' in under_test)
self.assertTrue('projection' in under_test)
def test_get_daemons_names(self):
"""Get All Daemon Names"""
daemon_names = [
'poller',
'receiver',
'reactionner',
'arbiter',
'scheduler',
'broker'
]
self.assertEqual(daemon_names, Daemon.get_daemons_names())
def test_get_request_event_model(self):
"""Get Event Request Model"""
under_test = Event.get_request_model()
self.assertTrue('endpoint' in under_test)
self.assertEqual('history', under_test['endpoint'])
self.assertTrue('params' in under_test)
self.assertTrue('projection' in under_test)
def test_get_request_livesynthesis_model(self):
"""Get LiveSynthesis Request Model"""
under_test = LiveSynthesis.get_request_model()
self.assertTrue('endpoint' in under_test)
self.assertEqual('livesynthesis', under_test['endpoint'])
self.assertTrue('params' in under_test)
self.assertTrue('projection' in under_test)
def get_request_realm_model(self):
"""get Realm Request Model"""
under_test = Realm.get_request_model()
self.assertTrue('endpoint' in under_test)
self.assertEqual('realm', under_test['endpoint'])
self.assertTrue('params' in under_test)
self.assertTrue('projection' in under_test)
| Alignak-monitoring-contrib/alignak-app | test/test_all_items.py | Python | agpl-3.0 | 12,482 |
"""
Fun facts about the St. Jude Memphis Marathons.
Data retrieved from:
https://www.stjude.org/get-involved/at-play/fitness-for-st-jude/memphis-marathon/participants/results.html
"""
import locale
import sys
from statistics import mean, median, mode, StatisticsError
from collections import Counter
import tablib
import parsers
def is_numeric(value):
"""given some value, test if it's numeric & can safely be converted to an int"""
return any([
type(value) is str and value.isnumeric(),
hasattr(value, 'is_integer') and value.is_integer(),
type(value) is int,
])
def print_data(year):
"""
Print some fun facts & stats for the given year's marathon.
"""
locale.setlocale(locale.LC_ALL, '') # Use locale to pretty-print the combined distance run.
# parse our data...
runners = parsers.parse(year)
if runners:
print("\n\n{} Marathon Runners".format(len(runners)))
print("From {} different states".format(len(set(runner.state for runner in runners))))
print("Total distance combined ==> {:n}+ miles".format(int(len(runners) * 26.2)))
print("Mostly from (top-10 cities):")
cities = Counter(runner.city for runner in runners)
for city, count in cities.most_common(10):
print("- {} ({})".format(city, count))
# Average age.
ages = [int(runner.age) for runner in runners if is_numeric(runner.age)]
try:
mode_age = mode(ages)
except StatisticsError:
mode_age = 'No unique'
print("Average age: {} mean / {} median / {} mode".format(
int(mean(ages)), int(median(ages)), mode_age))
# Count Female / Male participants.
females = len([runner.sex for runner in runners if runner.sex == "F"])
males = len([runner.sex for runner in runners if runner.sex == "M"])
print("Females: {}!\nMales: {}!".format(females, males))
# Calculate Average paces.
paces = []
for runner in runners:
minutes, seconds = runner.pace.split(":")
paces.append(int(seconds) + (int(minutes) * 60))
mean_pace = int(mean(paces))
mean_pace_minutes, mean_pace_seconds = divmod(mean_pace, 60)
median_pace = int(median(paces))
median_pace_minutes, median_pace_seconds = divmod(median_pace, 60)
try:
mode_pace = mode(paces)
mode_pace_minutes, mode_pace_seconds = divmod(mode_pace, 60)
mode_pace = "{}:{}".format(mode_pace_minutes, mode_pace_seconds)
except StatisticsError:
mode_pace = 'No unique'
print("Average Pace: {}:{} mean / {}:{} median / {} mode".format(
mean_pace_minutes, mean_pace_seconds,
median_pace_minutes, median_pace_seconds,
mode_pace
))
# Average finish times.
times = []
for runner in runners:
hours, minutes, seconds = runner.time.split(":")
times.append(int(seconds) + (int(minutes) * 60) + (int(hours) * 3600))
mean_time = int(mean(times))
minutes, seconds = divmod(mean_time, 60)
hours, minutes = divmod(minutes, 60)
mean_time = "{}:{}:{}".format(hours, minutes, seconds)
median_time = int(median(times))
minutes, seconds = divmod(median_time, 60)
hours, minutes = divmod(minutes, 60)
median_time = "{}:{}:{}".format(hours, minutes, seconds)
try:
mode_time = mode(times)
minutes, seconds = divmod(mode_time, 60)
hours, minutes = divmod(minutes, 60)
mode_time = "{}:{}:{}".format(hours, minutes, seconds)
except StatisticsError:
mode_time = 'No unique'
print("Average Finish Time: {} mean / {} median / {} mode.".format(
mean_time, median_time, mode_time))
else:
print("Sorry, either no data or parser for {}.".format(year))
def export_data():
"""
Generate a CSV for all the years in which we have a parser.
"""
# What we'll keep:
# - year
# - total_runners
# - num males
# - num females
# - num states
# - combined distance
# - average age (mean)
# - average pace (mean)
# - average finish time (mean)
headers = [
'Year', 'Total Runners', 'Males', 'Females', 'States',
'Combined Distance', 'Mean Age', 'Mean Pace', 'Mean Finish Time',
]
data = tablib.Dataset(headers=headers)
years = sorted([
year for year, parser in parsers.PARSERS.items()
if parser is not None
])
for year in years:
# parse the the data fot he year
runners = parsers.parse(year)
row = []
if runners:
row.append(year) # Year
row.append(len(runners)) # Total Runners
row.append(len([runner.sex for runner in runners if runner.sex == "F"]))
row.append(len([runner.sex for runner in runners if runner.sex == "M"]))
row.append(len(set(runner.state for runner in runners))) # Sates
row.append(int(len(runners) * 26.2)) # Combined Distance
# Average age.
ages = [int(runner.age) for runner in runners if is_numeric(runner.age)]
row.append(int(mean(ages)))
# Average pace.
paces = []
for runner in runners:
minutes, seconds = runner.pace.split(":")
paces.append(int(seconds) + (int(minutes) * 60))
mean_pace = int(mean(paces))
mean_pace_minutes, mean_pace_seconds = divmod(mean_pace, 60)
mean_pace = "{}:{}".format(mean_pace_minutes, mean_pace_seconds)
row.append(mean_pace)
# Average finish times.
times = []
for runner in runners:
hours, minutes, seconds = runner.time.split(":")
times.append(int(seconds) + (int(minutes) * 60) + (int(hours) * 3600))
mean_time = int(mean(times))
minutes, seconds = divmod(mean_time, 60)
hours, minutes = divmod(minutes, 60)
mean_time = "{}:{}:{}".format(hours, minutes, seconds)
row.append(mean_time)
# Append to our dataset
data.append(row)
print('Calculated data for {}...'.format(year))
with open("output.csv", "w") as csvfile:
csvfile.write(data.csv)
print("Output written to 'output.csv'")
if __name__ == "__main__":
if len(sys.argv) == 2 and sys.argv[1].lower() == 'export':
export_data()
elif len(sys.argv) == 2:
print_data(year=int(sys.argv[1]))
else:
print("USAGE:\n\tpython marathon_details.py <year>")
print("OR:\n\tpyton marathon_details.py export")
| bradmontgomery/st-jude-marathon | marathon_details.py | Python | mit | 6,815 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class KeyItem(Model):
"""The key item containing key metadata.
Variables are only populated by the server, and will be ignored when
sending a request.
:param kid: Key identifier.
:type kid: str
:param attributes: The key management attributes.
:type attributes: :class:`KeyAttributes
<azure.keyvault.models.KeyAttributes>`
:param tags: Application specific metadata in the form of key-value pairs.
:type tags: dict
:ivar managed: True if the key's lifetime is managed by key vault. If this
is a key backing a certificate, then managed will be true.
:vartype managed: bool
"""
_validation = {
'managed': {'readonly': True},
}
_attribute_map = {
'kid': {'key': 'kid', 'type': 'str'},
'attributes': {'key': 'attributes', 'type': 'KeyAttributes'},
'tags': {'key': 'tags', 'type': '{str}'},
'managed': {'key': 'managed', 'type': 'bool'},
}
def __init__(self, kid=None, attributes=None, tags=None):
self.kid = kid
self.attributes = attributes
self.tags = tags
self.managed = None
| SUSE/azure-sdk-for-python | azure-keyvault/azure/keyvault/models/key_item.py | Python | mit | 1,644 |
import tempfile
from config import *
from caffe.proto import caffe_pb2 as PB
def create_solver(solver_param, file_name=""):
if file_name:
f = open(file_name, 'w')
else:
f = tempfile.NamedTemporaryFile(mode='w+', delete=False)
f.write(str(solver_param))
f.close()
solver = caffe.get_solver(f.name)
return solver
def create_solver_proto(train_net,
test_net,
lr,
prefix,
solver_type='SGD',
test_iter=100,
test_interval=1000,
max_iter=1e5,
iter_size=1,
snapshot=1000,
display=1,
debug_info=False):
solver = PB.SolverParameter()
solver.train_net = train_net
solver.test_net.extend([test_net])
solver.test_iter.extend([test_iter])
solver.test_interval = test_interval
solver.display = display
solver.max_iter = max_iter
solver.iter_size = iter_size
solver.snapshot = snapshot
solver.snapshot_prefix = prefix
solver.solver_mode = PB.SolverParameter.GPU
if solver_type is 'SGD':
solver.solver_type = PB.SolverParameter.SGD
elif solver_type is 'ADAM':
solver.solver_type = PB.SolverParameter.ADAM
solver.base_lr = lr
solver.lr_policy = "fixed"
solver.momentum = 0.9
solver.momentum2 = 0.999
solver.debug_info = debug_info
return solver
| MPI-IS/bilateralNN | bilateralnn_code/examples/tile_segmentation/create_solver.py | Python | bsd-3-clause | 1,529 |
"""
Core Linear Algebra Tools
=========================
=============== ==========================================================
Linear algebra basics
==========================================================================
norm Vector or matrix norm
inv Inverse of a square matrix
solve Solve a linear system of equations
det Determinant of a square matrix
slogdet Logarithm of the determinant of a square matrix
lstsq Solve linear least-squares problem
pinv Pseudo-inverse (Moore-Penrose) calculated using a singular
value decomposition
matrix_power Integer power of a square matrix
=============== ==========================================================
=============== ==========================================================
Eigenvalues and decompositions
==========================================================================
eig Eigenvalues and vectors of a square matrix
eigh Eigenvalues and eigenvectors of a Hermitian matrix
eigvals Eigenvalues of a square matrix
eigvalsh Eigenvalues of a Hermitian matrix
qr QR decomposition of a matrix
svd Singular value decomposition of a matrix
cholesky Cholesky decomposition of a matrix
=============== ==========================================================
=============== ==========================================================
Tensor operations
==========================================================================
tensorsolve Solve a linear tensor equation
tensorinv Calculate an inverse of a tensor
=============== ==========================================================
=============== ==========================================================
Exceptions
==========================================================================
LinAlgError Indicates a failed linear algebra operation
=============== ==========================================================
"""
# To get sub-modules
from info import __doc__
from linalg import *
from numpy.testing import Tester
test = Tester().test
bench = Tester().test
| devs1991/test_edx_docmode | venv/lib/python2.7/site-packages/numpy/linalg/__init__.py | Python | agpl-3.0 | 2,178 |
# -*- coding: utf-8 -*-
'''
Video Uav Tracker v 2.0
Replay a video in sync with a gps track displayed on the map.
-------------------
copyright : (C) 2017 by Salvatore Agosta
email : sagost@katamail.com
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
INSTRUCTION:
Synching:
- Create new project
- Select video and .gpx track (1 trkpt per second)
- Identify first couple Frame/GpsTime and select it.
- Push Synchronize
- Push Start
Replay:
- Move on map
- Create associated DB shapefile
- Add POI with associated video frame saved
- Extract frames with associated coordinates for rapid photogrammetry use
'''
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtMultimediaWidgets import QVideoWidget
import resources
class Ui_Form(object):
def setupUi(self, Form):
Form.setObjectName("Form")
Form.resize(706, 493)
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(":/plugins/Video_UAV_Tracker/icon.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
Form.setWindowIcon(icon)
self.verticalLayout_3 = QtWidgets.QVBoxLayout(Form)
self.verticalLayout_3.setObjectName("verticalLayout_3")
self.horizontalLayout = QtWidgets.QHBoxLayout()
self.horizontalLayout.setObjectName("horizontalLayout")
self.pushButton_3 = QtWidgets.QPushButton(Form)
font = QtGui.QFont()
font.setKerning(True)
self.pushButton_3.setFont(font)
icon1 = QtGui.QIcon()
icon1.addPixmap(QtGui.QPixmap(":/VgisIcon/Hand-icon.png"), QtGui.QIcon.Normal, QtGui.QIcon.On)
self.pushButton_3.setIcon(icon1)
self.pushButton_3.setAutoExclusive(False)
self.pushButton_3.setAutoDefault(False)
self.pushButton_3.setDefault(False)
self.pushButton_3.setFlat(False)
self.pushButton_3.setObjectName("pushButton_3")
self.horizontalLayout.addWidget(self.pushButton_3)
self.toolButton_6 = QtWidgets.QToolButton(Form)
icon2 = QtGui.QIcon()
icon2.addPixmap(QtGui.QPixmap(":/plugins/Video_UAV_Tracker/iconNewTabEditorConsole.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.toolButton_6.setIcon(icon2)
self.toolButton_6.setObjectName("toolButton_6")
self.horizontalLayout.addWidget(self.toolButton_6)
spacerItem = QtWidgets.QSpacerItem(23, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout.addItem(spacerItem)
self.toolButton_4 = QtWidgets.QToolButton(Form)
self.toolButton_4.setStyleSheet("background: url(/mnt/574916AB2EEEC400/LAVORO/Sviluppo_VUT_StandAlone/Progetto_VUT/115757-magic-marker-icon-people-things-hand22-sc48.png)")
self.toolButton_4.setObjectName("toolButton_4")
self.horizontalLayout.addWidget(self.toolButton_4)
self.toolButton_5 = QtWidgets.QToolButton(Form)
self.toolButton_5.setObjectName("toolButton_5")
self.horizontalLayout.addWidget(self.toolButton_5)
self.verticalLayout_3.addLayout(self.horizontalLayout)
self.dockWidget_2 = QtWidgets.QDockWidget(Form)
self.dockWidget_2.setFeatures(QtWidgets.QDockWidget.NoDockWidgetFeatures)
self.dockWidget_2.setAllowedAreas(QtCore.Qt.LeftDockWidgetArea|QtCore.Qt.RightDockWidgetArea)
self.dockWidget_2.setObjectName("dockWidget_2")
self.dockWidgetContents_7 = QtWidgets.QWidget()
self.dockWidgetContents_7.setObjectName("dockWidgetContents_7")
self.gridLayout_2 = QtWidgets.QGridLayout(self.dockWidgetContents_7)
self.gridLayout_2.setObjectName("gridLayout_2")
self.verticalLayout = QtWidgets.QVBoxLayout()
self.verticalLayout.setObjectName("verticalLayout")
self.video_frame = QVideoWidget(Form)
p = self.video_frame.palette()
p.setColor(QtGui.QPalette.Window, QtCore.Qt.black)
self.video_frame.setPalette(p)
self.video_frame.setAttribute(QtCore.Qt.WA_OpaquePaintEvent)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.video_frame.sizePolicy().hasHeightForWidth())
self.video_frame.setSizePolicy(sizePolicy)
self.video_frame.setMinimumSize(QtCore.QSize(200, 200))
self.video_frame.setStyleSheet("background-color: rgb(0, 0, 0);")
self.video_frame.setObjectName("video_frame")
self.verticalLayout.addWidget(self.video_frame)
self.horizontalSlider = QtWidgets.QSlider(self.dockWidgetContents_7)
self.horizontalSlider.setOrientation(QtCore.Qt.Horizontal)
self.horizontalSlider.setObjectName("horizontalSlider")
self.verticalLayout.addWidget(self.horizontalSlider)
self.horizontalLayout_3 = QtWidgets.QHBoxLayout()
self.horizontalLayout_3.setObjectName("horizontalLayout_3")
spacerItem1 = QtWidgets.QSpacerItem(98, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_3.addItem(spacerItem1)
self.toolButton_11 = QtWidgets.QToolButton(self.dockWidgetContents_7)
icon3 = QtGui.QIcon()
icon3.addPixmap(QtGui.QPixmap(":/VgisIcon/mActionArrowLeft.svg"), QtGui.QIcon.Normal, QtGui.QIcon.On)
self.toolButton_11.setIcon(icon3)
self.toolButton_11.setObjectName("toolButton_11")
self.horizontalLayout_3.addWidget(self.toolButton_11)
self.SkipBacktoolButton_8 = QtWidgets.QToolButton(self.dockWidgetContents_7)
self.SkipBacktoolButton_8.setStyleSheet("")
icon4 = QtGui.QIcon()
icon4.addPixmap(QtGui.QPixmap(":/VgisIcon/mActionAtlasPrev.svg"), QtGui.QIcon.Normal, QtGui.QIcon.On)
self.SkipBacktoolButton_8.setIcon(icon4)
self.SkipBacktoolButton_8.setObjectName("SkipBacktoolButton_8")
self.horizontalLayout_3.addWidget(self.SkipBacktoolButton_8)
self.playButton = QtWidgets.QToolButton(self.dockWidgetContents_7)
self.playButton.setObjectName("playButton")
self.horizontalLayout_3.addWidget(self.playButton)
self.muteButton = QtWidgets.QToolButton(self.dockWidgetContents_7)
self.muteButton.setText("")
self.muteButton.setObjectName("muteButton")
self.horizontalLayout_3.addWidget(self.muteButton)
self.replayPosition_label = QtWidgets.QLabel(self.dockWidgetContents_7)
self.replayPosition_label.setObjectName("replayPosition_label")
self.horizontalLayout_3.addWidget(self.replayPosition_label)
self.SkipFortoolButton_9 = QtWidgets.QToolButton(self.dockWidgetContents_7)
self.SkipFortoolButton_9.setStyleSheet("")
icon5 = QtGui.QIcon()
icon5.addPixmap(QtGui.QPixmap(":/VgisIcon/mActionAtlasNext.svg"), QtGui.QIcon.Normal, QtGui.QIcon.On)
self.SkipFortoolButton_9.setIcon(icon5)
self.SkipFortoolButton_9.setObjectName("SkipFortoolButton_9")
self.horizontalLayout_3.addWidget(self.SkipFortoolButton_9)
self.toolButton_12 = QtWidgets.QToolButton(self.dockWidgetContents_7)
icon6 = QtGui.QIcon()
icon6.addPixmap(QtGui.QPixmap(":/VgisIcon/mActionArrowRight.svg"), QtGui.QIcon.Normal, QtGui.QIcon.On)
self.toolButton_12.setIcon(icon6)
self.toolButton_12.setObjectName("toolButton_12")
self.horizontalLayout_3.addWidget(self.toolButton_12)
spacerItem2 = QtWidgets.QSpacerItem(98, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_3.addItem(spacerItem2)
self.verticalLayout.addLayout(self.horizontalLayout_3)
self.gridLayout_2.addLayout(self.verticalLayout, 0, 0, 1, 1)
self.dockWidget_2.setWidget(self.dockWidgetContents_7)
self.verticalLayout_3.addWidget(self.dockWidget_2)
self.dockWidget_4 = QtWidgets.QDockWidget(Form)
self.dockWidget_4.setMaximumSize(QtCore.QSize(524287, 121))
self.dockWidget_4.setFeatures(QtWidgets.QDockWidget.NoDockWidgetFeatures)
self.dockWidget_4.setAllowedAreas(QtCore.Qt.BottomDockWidgetArea)
self.dockWidget_4.setObjectName("dockWidget_4")
self.dockWidgetContents_6 = QtWidgets.QWidget()
self.dockWidgetContents_6.setObjectName("dockWidgetContents_6")
self.verticalLayout_2 = QtWidgets.QVBoxLayout(self.dockWidgetContents_6)
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.label = QtWidgets.QLabel(self.dockWidgetContents_6)
self.label.setObjectName("label")
self.verticalLayout_2.addWidget(self.label)
self.horizontalLayout_2 = QtWidgets.QHBoxLayout()
self.horizontalLayout_2.setSizeConstraint(QtWidgets.QLayout.SetFixedSize)
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
self.pushButtonCutA_6 = QtWidgets.QPushButton(self.dockWidgetContents_6)
self.pushButtonCutA_6.setEnabled(True)
self.pushButtonCutA_6.setObjectName("pushButtonCutA_6")
self.horizontalLayout_2.addWidget(self.pushButtonCutA_6)
self.pushButtonCutB_6 = QtWidgets.QPushButton(self.dockWidgetContents_6)
self.pushButtonCutB_6.setObjectName("pushButtonCutB_6")
self.horizontalLayout_2.addWidget(self.pushButtonCutB_6)
self.label_7 = QtWidgets.QLabel(self.dockWidgetContents_6)
self.label_7.setObjectName("label_7")
self.horizontalLayout_2.addWidget(self.label_7)
self.doubleSpinBox_2 = QtWidgets.QDoubleSpinBox(self.dockWidgetContents_6)
self.doubleSpinBox_2.setObjectName("doubleSpinBox_2")
self.horizontalLayout_2.addWidget(self.doubleSpinBox_2)
self.comboBox_6 = QtWidgets.QComboBox(self.dockWidgetContents_6)
self.comboBox_6.setObjectName("comboBox_6")
self.comboBox_6.addItem("")
self.comboBox_6.addItem("")
self.horizontalLayout_2.addWidget(self.comboBox_6)
self.pushButton_5 = QtWidgets.QPushButton(self.dockWidgetContents_6)
self.pushButton_5.setObjectName("pushButton_5")
self.horizontalLayout_2.addWidget(self.pushButton_5)
self.pushButtonCut_2 = QtWidgets.QPushButton(self.dockWidgetContents_6)
self.pushButtonCut_2.setObjectName("pushButtonCut_2")
self.horizontalLayout_2.addWidget(self.pushButtonCut_2)
self.verticalLayout_2.addLayout(self.horizontalLayout_2)
self.progressBar = QtWidgets.QProgressBar(self.dockWidgetContents_6)
self.progressBar.setProperty("value", 24)
self.progressBar.setObjectName("progressBar")
self.verticalLayout_2.addWidget(self.progressBar)
self.dockWidget_4.setWidget(self.dockWidgetContents_6)
self.verticalLayout_3.addWidget(self.dockWidget_4)
self.retranslateUi(Form)
QtCore.QMetaObject.connectSlotsByName(Form)
def retranslateUi(self, Form):
_translate = QtCore.QCoreApplication.translate
Form.setWindowTitle(_translate("Form", "Video UAV Tracker - Player"))
self.pushButton_3.setToolTip(_translate("Form", "<html><head/><body><p>Move along Video directly clicking on gps track</p></body></html>"))
self.pushButton_3.setText(_translate("Form", "MapTool "))
self.toolButton_6.setToolTip(_translate("Form", "<html><head/><body><p>Add point</p></body></html>"))
self.toolButton_6.setText(_translate("Form", "o"))
self.toolButton_4.setToolTip(_translate("Form", "<html><head/><body><p>Enable extract frames toolbox</p><p><br/></p></body></html>"))
self.toolButton_4.setText(_translate("Form", "Extract frames"))
self.toolButton_5.setText(_translate("Form", "Close"))
self.toolButton_11.setText(_translate("Form", "<<"))
self.SkipBacktoolButton_8.setText(_translate("Form", "<"))
self.playButton.setText(_translate("Form", "> / ||"))
self.replayPosition_label.setText(_translate("Form", "-:- / -:-"))
self.SkipFortoolButton_9.setText(_translate("Form", ">"))
self.toolButton_12.setText(_translate("Form", ">>"))
self.label.setText(_translate("Form", "Export Frames Tool"))
self.pushButtonCutA_6.setToolTip(_translate("Form", "<html><head/><body><p>Export from actual Video Frame</p></body></html>"))
self.pushButtonCutA_6.setText(_translate("Form", "From A"))
self.pushButtonCutB_6.setToolTip(_translate("Form", "<html><head/><body><p>Export to actual Video Frame</p></body></html>"))
self.pushButtonCutB_6.setText(_translate("Form", "To B"))
self.label_7.setText(_translate("Form", "Pick one frame every"))
self.comboBox_6.setItemText(0, _translate("Form", "meters"))
self.comboBox_6.setItemText(1, _translate("Form", "seconds"))
self.pushButton_5.setText(_translate("Form", "Cancel"))
self.pushButtonCut_2.setText(_translate("Form", "Extract!"))
| sagost/VideoUavTracker | vut_qgismap.py | Python | gpl-2.0 | 13,143 |
# Copyright 2016 Sean Dague
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import math
import re
regex = '(-?\d+(\.\d+)?)(F|C|K)'
# The scale factor between C and F
CScale = 1.8
# The offset between C and F
FOffset = 32
# The offset between K and C
KOffset = 273.15
# The offset between R and F
ROffset = 459.67
# Dewpoint constants
a = 17.271
b = 237.7 # degC
class Temperature(object):
units = "F"
temp = 0.0
def __init__(self, data="0F"):
m = re.match(regex, data)
self.temp = float(m.group(1))
self.units = m.group(3)
def __str__(self):
return "%f%s" % (self.temp, self.units)
def is_F(self):
return self.units == "F"
def is_C(self):
return self.units == "C"
def is_K(self):
return self.units == "K"
def _convert_to(self, unit):
if unit == self.units:
return self.temp
if unit == "C":
if self.is_F():
return (self.temp - FOffset) / CScale
elif self.is_K():
return (self.temp + KOffset)
elif unit == "F":
if self.is_C():
return (self.temp * CScale) + FOffset
elif self.is_K():
return ((self.temp + KOffset) * CScale) + FOffset
elif unit == "K":
if self.is_F():
return ((self.temp - FOffset) / CScale) + KOffset
elif self.is_C():
return (self.temp - KOffset)
return self.temp
def to_C(self):
return self._convert_to("C")
def to_F(self):
return self._convert_to("F")
def to_K(self):
return self._convert_to("K")
def as_C(self):
temp = self._convert_to("C")
return Temperature("%fC" % temp)
def as_F(self):
temp = self._convert_to("F")
return Temperature("%fF" % temp)
def as_K(self):
temp = self._convert_to("K")
return Temperature("%fK" % temp)
# TODO(sdague): unit tests
def dewpoint(self, humid):
def _gamma(t, humid):
return (a * t / (b + t)) + math.log(humid / 100.0)
t = self.as_C()
T = t.temp
t.temp = (b * _gamma(T, humid)) / (a - _gamma(T, humid))
return t._convert_to(self.units)
| sdague/arwn | arwn/temperature.py | Python | apache-2.0 | 2,776 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.