repo_name stringlengths 5 100 | path stringlengths 4 231 | language stringclasses 1 value | license stringclasses 15 values | size int64 6 947k | score float64 0 0.34 | prefix stringlengths 0 8.16k | middle stringlengths 3 512 | suffix stringlengths 0 8.17k |
|---|---|---|---|---|---|---|---|---|
glenjarvis/Lemonade | src/lemonade/models/chart_of_accounts.py | Python | bsd-3-clause | 3,271 | 0 | #!/usr/bin/env python
# pylint: disable=W0201
"""Chart of Accounts Module for the Lemonade accounting project"""
import csv
from collections import OrderedDict
from lemonade import helpers
class ChartOfAccountsFormatException(Exception):
"""Exception when parsing Chart of Accounts"""
pass
class MissingAccountException(Exception):
"""Exception when the account in question is missing"""
pass
class ChartOfAccounts(object):
"""ChartOfAccounts abstraction for Chart of Accounts"""
def __init__(self, filename=None, raw_data=None):
"""Validate and Initialize ChartOfAccounts with `raw_data`"""
self._accounts = OrderedDict()
if filename is None:
if raw_data is None:
# It was called without any initializing data. We're done.
return
else:
self.populate_accounts(raw_data)
else:
self.from_csv(filename)
def populate_accounts(self, raw_data):
"""Given iterable raw_data, populate the account data"""
if hasattr(raw_data, '__iter__'):
for record in raw_data:
if len(record) < 2:
# We need both account numbers and account titles
raise ChartOfAccountsFormatException(
"Each records needs to be at least of len 2")
else:
# Data is validated. Let's save in self._accounts
account_number = record[0]
if isinstance(account_number, int):
account_number = str(account_number)
account_title = record[1]
self._accounts[account_number] = account_title
else:
# Whatever was sent as initiliazing data just can't be used.
raise ChartOfAccountsFormatException(
"'{0}' not iterable".format(raw_data))
def get_title(self, account_number):
"""Return Account Title for given Account Number"""
if isinstance(account_number, int):
account_number = str(account_number)
if account_number not in self._accounts:
raise MissingAccountException(
"Account number '{0}' not in Chart Of Accounts ".format(
account_number))
return self._accounts[account_number]
def to_csv(self, output_file):
"""Write Chart of Accounts to file given in Excel dialect CSV"""
handler, do_i_close = helpers.check_or_open_file(output_file, 'w')
spreadsheet = csv.writer(handler, dialect="excel")
for account_number in self._accounts:
spreadsheet.writer | ow([account_number,
self._accounts[account_number]])
helpers.close_file_if_necessary(handler, do_i_close)
def from_csv(self, | input_file):
"""Read Chart of Accounts from file given in Excel dialect CSV"""
handler, do_i_close = helpers.check_or_open_file(input_file, 'r')
spreadsheet = csv.reader(handler, dialect="excel")
self.populate_accounts(spreadsheet)
helpers.close_file_if_necessary(handler, do_i_close)
def __iter__(self):
return self._accounts.iteritems()
|
prolifik/Furrycoin | contrib/seeds/makeseeds.py | Python | mit | 709 | 0.015515 | #!/usr/bin/env python
#
# Generate pnSeed[] from Pieter's DNS seeder
#
NSEEDS=600
import re
import sys
from subprocess import check_output
def main():
lines = sys.stdin.readlines()
ips = []
pattern = re.compile(r"^(\d{1,3})\.(\d{1,3})\.(\d{1,3})\.(\d{1,3}):11000")
for line in lines:
m | = pattern.match(line)
if m is None:
continue
ip = 0
for i in range(0,4):
ip = ip + (int(m.group(i+1)) << (8*(i)))
if ip == 0:
continue
ips.append(ip)
for row in range(0, min(NSEEDS,len(ips)), 8):
print " " + ", ".join([ "0x%08x"%i for i in ips[row:row+8] ]) + ","
if __name__ == '__mai | n__':
main()
|
south-coast-science/scs_core | tests/sys/timeout_test.py | Python | mit | 494 | 0 | #!/usr/bin/env python3
"""
Created on 17 Sep 2019
@author: Bruno Beloff (bruno.beloff@southcoastscience.com)
"""
import time
from scs_core.sys.time | out import Timeout
# --------------------------------------------------------------------------------------------------------------------
# run...
timeout = Timeout(5)
print(timeout)
print("-")
try:
with timeout:
time.sleep(10)
print("slept")
except TimeoutError:
print("TimeoutEr | ror")
finally:
print("done")
|
Southpaw-TACTIC/TACTIC | src/install/service/win32_service.py | Python | epl-1.0 | 2,526 | 0.015439 | ###########################################################
#
# Copyright (c) 2005, Southpaw Technology
# All Rights Reserved
#
# PROPRIETARY INFORMATION. This software is proprietary to
# Southpaw Technology, and is not to be reproduced | , transmitted,
# or disclosed in any way without written permission.
#
#
#
# Repurposed from http://docs.cherrypy.org/cherrypy22-as-windows-service
'''
The most basic (working) CherryPy 2.2 Windows service possible.
Requires Mark Hammond's pywin32 package.
'''
__all__ = ['TacticService']
import os, sys
import win32serviceutil
import win32service
import win32event
import tacticenv
from pyasm.common import Environment
from pyasm.web import TacticMonitor
class WinService(object):
| def __init__(self):
self.monitor = TacticMonitor()
def init(self):
self.monitor.mode = "init"
self.monitor.execute()
def run(self):
self.monitor.mode = "monitor"
self.monitor.execute()
def write_stop_monitor():
'''write a stop.monitor file to notify TacticMonitor to exit'''
log_dir = "%s/log" % Environment.get_tmp_dir()
if not os.path.exists(log_dir):
os.makedirs(log_dir)
file = open("%s/stop.monitor" % log_dir, "w")
pid = os.getpid()
file.write(str(pid))
file.close()
def stop():
write_stop_monitor()
import time
time.sleep(3)
# let monitor.py handle killing of start_up and watch_folder
class TacticService(win32serviceutil.ServiceFramework):
'''NT Service.'''
_svc_name_ = "TacticService"
_svc_display_name_ = "Tactic Application Server"
def __init__(self, args):
win32serviceutil.ServiceFramework.__init__(self, args)
# create an event that SvcDoRun can wait on and SvcStop
# can set.
self.stop_event = win32event.CreateEvent(None, 0, 0, None)
def SvcDoRun(self):
self.ReportServiceStatus(win32service.SERVICE_START_PENDING)
service = WinService()
service.init()
self.ReportServiceStatus(win32service.SERVICE_RUNNING)
service.run()
# run() needs to run after SERVICE_RUNNING...
win32event.WaitForSingleObject(self.stop_event, win32event.INFINITE)
def SvcStop(self):
self.ReportServiceStatus(win32service.SERVICE_STOP_PENDING)
stop()
win32event.SetEvent(self.stop_event)
if __name__ == '__main__':
win32serviceutil.HandleCommandLine(TacticService)
|
clemkoa/scikit-learn | sklearn/ensemble/forest.py | Python | bsd-3-clause | 78,779 | 0.000114 | """Forest of trees-based ensemble methods
Those methods include random forests and extremely randomized trees.
The module structure is the following:
- The ``BaseForest`` base class implements a common ``fit`` method for all
the estimators in the module. The ``fit`` method of the base ``Forest``
class calls the ``fit`` method of each sub-estimator on random samples
(with replacement, a.k.a. bootstrap) of the training set.
The init of the sub-estimator is further delegated to the
``BaseEnsemble`` constructor.
- The ``ForestClassifier`` and ``ForestRegressor`` base classes further
implement the prediction logic by computing an average of the predicted
outcomes of the sub-estimators.
- The ``RandomForestClassifier`` and ``RandomForestRegressor`` derived
classes provide the user with concrete implementations of
the forest ensemble method using classical, deterministic
``DecisionTreeClassifier`` and ``DecisionTreeRegressor`` as
sub-estimator implementations.
- The ``ExtraTreesClassifier`` and ``ExtraTreesRegressor`` derived
classes provide the user with concrete implementations of the
forest ensemble method using the extremely randomized trees
``ExtraTreeClassifier`` and ``ExtraTreeRegressor`` as
sub-estimator implementations.
Single and multi-output problems are both handled.
"""
# Authors: Gilles Louppe <g.louppe@gmail.com>
# Brian Holt <bdholt1@gmail.com>
# Joly Arnaud <arnaud.v.joly@gmail.com>
# Fares Hedayati <fares.hedayati@gmail.com>
#
# License: BSD 3 clause
from __future__ import division
import warnings
from warnings import warn
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy.sparse import issparse
from scipy.sparse import hstack as sparse_hstack
from ..base import ClassifierMixin, RegressorMixin
from ..externals.joblib import Parallel, delayed
from ..externals import six
from ..metrics import r2_score
from ..preprocessing import OneHotEncoder
from ..tree import (DecisionTreeClassifier, DecisionTreeRegressor,
ExtraTreeClassifier, ExtraTreeRegressor)
from ..tree._tree import DTYPE, DOUBLE
from ..utils import check_random_state, check_array, compute_sample_weight
from ..exceptions import DataConversionWarning, NotFittedError
from .base import BaseEnsemble, _partition_estimators
from ..utils.fixes import parallel_helper
from ..utils.multiclass import check_classification_targets
from ..utils.validation import check_is_fitted
__all__ = ["RandomForestClassifier",
"RandomForestRegressor",
"ExtraTreesClassifier",
"ExtraTreesRegressor",
"RandomTreesEmbedding"]
MAX_INT = np.iinfo(np.int32).max
def _generate_sample_indices(random_state, n_samples):
"""Private function used to _parallel_build_trees function."""
random_instance = check_random_state(random_state)
sample_indices = random_instance.randint(0, n_samples, n_samples)
return sample_indices
def _generate_unsampled_indices(random_state, n_samples):
"""Private function used to forest._set_oob_score function."""
sample_indices = _generate_sample_indices(random_state, n_samples)
sample_counts = np.bincount(sample_indices, minlength=n_samples)
unsampled_mask = sample_counts == 0
indices_range = np.arange(n_samples)
unsampled_indices = indices_range[unsampled_mask]
return unsampled_indices
def _parallel_build_trees(tree, forest, X, y, sample_weight, tree_idx, n_trees,
| verb | ose=0, class_weight=None):
"""Private function used to fit a single tree in parallel."""
if verbose > 1:
print("building tree %d of %d" % (tree_idx + 1, n_trees))
if forest.bootstrap:
n_samples = X.shape[0]
if sample_weight is None:
curr_sample_weight = np.ones((n_samples,), dtype=np.float64)
else:
curr_sample_weight = sample_weight.copy()
indices = _generate_sample_indices(tree.random_state, n_samples)
sample_counts = np.bincount(indices, minlength=n_samples)
curr_sample_weight *= sample_counts
if class_weight == 'subsample':
with warnings.catch_warnings():
warnings.simplefilter('ignore', DeprecationWarning)
curr_sample_weight *= compute_sample_weight('auto', y, indices)
elif class_weight == 'balanced_subsample':
curr_sample_weight *= compute_sample_weight('balanced', y, indices)
tree.fit(X, y, sample_weight=curr_sample_weight, check_input=False)
else:
tree.fit(X, y, sample_weight=sample_weight, check_input=False)
return tree
class BaseForest(six.with_metaclass(ABCMeta, BaseEnsemble)):
"""Base class for forests of trees.
Warning: This class should not be used directly. Use derived classes
instead.
"""
@abstractmethod
def __init__(self,
base_estimator,
n_estimators=10,
estimator_params=tuple(),
bootstrap=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False,
class_weight=None):
super(BaseForest, self).__init__(
base_estimator=base_estimator,
n_estimators=n_estimators,
estimator_params=estimator_params)
self.bootstrap = bootstrap
self.oob_score = oob_score
self.n_jobs = n_jobs
self.random_state = random_state
self.verbose = verbose
self.warm_start = warm_start
self.class_weight = class_weight
def apply(self, X):
"""Apply trees in the forest to X, return leaf indices.
Parameters
----------
X : array-like or sparse matrix, shape = [n_samples, n_features]
The input samples. Internally, its dtype will be converted to
``dtype=np.float32``. If a sparse matrix is provided, it will be
converted into a sparse ``csr_matrix``.
Returns
-------
X_leaves : array_like, shape = [n_samples, n_estimators]
For each datapoint x in X and for each tree in the forest,
return the index of the leaf x ends up in.
"""
X = self._validate_X_predict(X)
results = Parallel(n_jobs=self.n_jobs, verbose=self.verbose,
backend="threading")(
delayed(parallel_helper)(tree, 'apply', X, check_input=False)
for tree in self.estimators_)
return np.array(results).T
def decision_path(self, X):
"""Return the decision path in the forest
.. versionadded:: 0.18
Parameters
----------
X : array-like or sparse matrix, shape = [n_samples, n_features]
The input samples. Internally, its dtype will be converted to
``dtype=np.float32``. If a sparse matrix is provided, it will be
converted into a sparse ``csr_matrix``.
Returns
-------
indicator : sparse csr array, shape = [n_samples, n_nodes]
Return a node indicator matrix where non zero elements
indicates that the samples goes through the nodes.
n_nodes_ptr : array of size (n_estimators + 1, )
The columns from indicator[n_nodes_ptr[i]:n_nodes_ptr[i+1]]
gives the indicator value for the i-th estimator.
"""
X = self._validate_X_predict(X)
indicators = Parallel(n_jobs=self.n_jobs, verbose=self.verbose,
backend="threading")(
delayed(parallel_helper)(tree, 'decision_path', X,
check_input=False)
for tree in self.estimators_)
n_nodes = [0]
n_nodes.extend([i.shape[1] for i in indicators])
n_nodes_ptr = np.array(n_nodes).cumsum()
return sparse_hstack(indicators).tocsr(), n_nodes_ptr
def fit(self, X, y, sample_weight=None):
"""Build a forest of trees from the training set (X, y).
Parameters
----------
X : array-like or sparse matrix of s |
3DGenomes/tadbit | _pytadbit/utils/three_dim_stats.py | Python | gpl-3.0 | 28,455 | 0.00376 | """
30 Oct 2013
"""
import sys
from warnings import catch_warnings, simplefilter
from itertools import combinations
from math import pi, sqrt, cos, sin, acos
from copy import deepcopy
import numpy as np
from numpy.random import shuffle as np_shuffle
from scipy.stats import skew, kurtosis, norm as sc_norm
from matplotlib import pyplot as plt
import matplotlib.gridspec as gridspec
from matplotlib import rcParams
from pytadbit.eqv_rms_drms import rmsdRMSD_wrapper
from pytadbit.consistency import consistency_wrapper
from pytadbit.utils.extraviews import tadbit_savefig
def generate_sphere_points(n=100):
"""
Returns list of 3d coordinates of points on a sphere using the
Golden Section Spiral algorithm.
:param n: number of points in the sphere
:returns a sphere of radius 1, centered in the origin
"""
points = []
inc = pi * (3 - sqrt(5))
offset = 2 / float(n)
for k in range(int(n)):
y = k * offset - 1 + (offset / 2)
r = sqrt(1 - y*y)
phi = k * inc
# points.append(dict((('x', cos(phi) * r),('y', y),('z', sin(phi) * r))))
points.append((cos(phi) * r, y, sin(phi) * r))
return points
def get_center_of_mass(x, y, z, zeros):
"""
get the center of mass of a given object with list of x, y, z coordinates
"""
xm = ym = zm = 0.
size = len(x)
subsize = 0
for i in range(size):
if not zeros[i]:
continue
subsize += 1
xm += x[i]
ym += y[i]
zm += z[i]
xm /= subsize
ym /= subsize
zm /= subsize
return xm, ym, zm
def mass_center(x, y, z, zeros):
"""
Transforms coordinates according to the center of mass
:param x: list of x coordinates
:param y: list of y coordinates
:param z: list of z coordinates
"""
xm, ym, zm = get_center_of_mass(x, y, z, zeros)
for i in range(len(x)):
x[i] -= xm
y[i] -= ym
z[i] -= zm
# def generate_circle_points(x, y, z, a, b, c, u, v, w, n):
# """
# Returns list of 3d coordinates of points on a circle using the
# Rodrigues rotation formula.
#
# see *Murray, G. (2013). Rotation About an Arbitrary Axis in 3 Dimensions*
# for details
#
# :param x: x coordinate of a point somewhere on the circle
# :param y: y coordinate of a point somewhere on the circle
# :param z: z coordinate of a point somewhere on the circle
# :param a: x coordinate of the center
# :param b: y coordinate of the center
# :param c: z coordinate of the center
# :param u: 1st element of a vector in the same plane as the circle
# :param v: 2nd element of a vector in the same plane as the circle
# :param w: 3rd element of a vector in the same plane as the circle
# :param n: number of points in the circle
#
# TODO: try simplification for a=b=c=0 (and do the translation in the main
# function)
# """
# points = []
# offset = 2 * pi / float(n)
# u_2 = u**2
# v_2 = v**2
# w_2 = w**2
# dst = u_2 + v_2 + w_2
# sqrtdst = sqrt(dst)
# uxvywz = - u*x - v*y - w*z
# b_v = b*v
# c_w = c*w
# a_u = a*u
# one = (a * (v_2 + w_2) - u*(b_v + c_w + uxvywz))
# two = (b * (u_2 + w_2) - v*(a_u + c_w + uxvywz))
# tre = (c * (u_2 + v_2) - w*(a_u + b_v + uxvywz))
# onep = sqrtdst * (-c*v + b*w - w*y + v*z)
# twop = sqrtdst * ( c*u - a*w + w*x - u*z)
# trep = sqrtdst * (-b*u + a*v - v*x + u*y)
# for k in range(int(n)):
# ang = k * offset
# cosang = cos(ang)
# dcosang = cosang * dst
# sinang = sin(ang)
# points.append([(one * (1 - cosang) + x * dcosang + onep * sinang) / dst,
# (two * (1 - cosang) + y * dcosang + twop * sinang) / dst,
# (tre * (1 - cosang) + z * dcosang + trep * sinang) / dst]
# )
# return points
def rotate_among_y_axis(x, y, z, angle):
"""
Rotate and object with a list of x, y, z coordinates among its center of
mass
"""
xj = []
yj = []
zj = []
for xi, yi, zi in zip(*(x, y, z)):
#dist = square_distance((xi, yi, zi), center_of_mass)
xj.append(xi*cos(angle) + zi*sin(angle))
yj.append(yi)
zj.append(xi*-sin(angle)+zi*cos(angle))
return xj, yj, zj
def find_angle_rotation_improve_x(x, y, z, center_of_mass):
"""
Finds the rotation angle needed to face the longest edge of the molecule
"""
# find most distant point from center of mass:
coords = list(zip(*(x, y, z)))
xdst, ydst, zdst = max(coords, key=lambda i: square_distance(i, center_of_mass))
dist = distance((xdst, ydst, zdst), center_of_mass)
angle = acos((-xdst**2 - (dist + sqrt(dist**2 - xdst**2))) /
(2 * dist**2) + 1)
return angle
def generate_circle_points(x, y, z, u, v, w, n):
"""
Returns list of 3d coordinates of points on a circle using the
Rodrigues rotation formula.
see *Murray, G. (2013). Rotation About an Arbitrary Axis in 3 Dimensions*
for details
:param x: x coordinate of a point somewhere on the circle
:param y: y coordinate of a point somewhere on the circle
:param z: z coordinate of a point somewhere on the circle
:param a: x coordinate of the center
:param b: y coordinate of the center
:param c: z coordinate of the center
:param u: 1st element of a vector in the same plane as the circle
:param v: 2nd element of a vector in the same plane as the circle
:param w: 3rd element of a vector in the same plane as the circle
:param n: number of points in the circle
TODO: try simplification for a=b=c=0 (and do the translation in the main
function)
"""
points = []
offset = 2 * pi / float(n)
u_2 = u**2
v_2 = v**2
w_2 = w**2
dst = u_2 + v_2 + w_2
sqrtd | st = sqrt(dst)
uxvywz = - u*x - v*y - w*z
one = (-u * (uxvywz))
two = (-v * (uxvywz))
tre = (-w * (uxvywz))
onep = sqrtdst * (- w*y + v*z)
twop = sqrtdst * (+ w*x - u*z)
trep = sqrtdst * (- v*x + u*y)
for k in range(int(n)):
ang = k * offset
cosang = cos(ang)
dcosang = cosang * dst
sinang = sin(ang)
points.append([(one * (1 - cosang) + x * dcosang + onep * si | nang) / dst,
(two * (1 - cosang) + y * dcosang + twop * sinang) / dst,
(tre * (1 - cosang) + z * dcosang + trep * sinang) / dst]
)
return points
def square_distance(part1, part2):
"""
Calculates the square distance between two particles.
:param part1: coordinate (dict format with x, y, z keys)
:param part2: coordinate (dict format with x, y, z keys)
:returns: square distance between two points in space
"""
return ((part1[0] - part2[0])**2 +
(part1[1] - part2[1])**2 +
(part1[2] - part2[2])**2)
def fast_square_distance(x1, y1, z1, x2, y2, z2):
"""
Calculates the square distance between two coordinates.
:param part1: coordinate (dict format with x, y, z keys)
:param part2: coordinate (dict format with x, y, z keys)
:returns: square distance between two points in space
"""
return ((x1 - x2)**2 +
(y1 - y2)**2 +
(z1 - z2)**2)
def distance(part1, part2):
"""
Calculates the distance between two particles.
:param part1: coordinate in list format (x, y, z)
:param part2: coordinate in list format (x, y, z)
:returns: distance between two points in space
"""
return sqrt((part1[0] - part2[0])**2 +
(part1[1] - part2[1])**2 +
(part1[2] - part2[2])**2)
def angle_between_3_points(point1, point2, point3):
"""
Calculates the angle between 3 particles
Given three particles A, B and C, the angle g (angle ACB, shown below):
::
A
/|
/i|
c/ |
/ |
/ |
B )g |b
\ |
|
donkirkby/live-py-plugin | setup.py | Python | mit | 1,514 | 0 | import setuptools
with open("space_tracer.md") as f:
long_description = f.read()
about = {}
with open("plugin/PySrc/space_tracer/about.py") as f:
exec(f.read(), about)
# noinspection PyUnresolvedReferences
setuptools.setup(
name=about['__title__'],
version=about['__version__'],
author=about['__author__'],
author_email=about['__author_email__'],
description=about['__description__'],
long_description=long_description,
long_description_content_type="text/markdown",
url=about['__url__'],
packages=setuptools.find_packages('plugin/PySrc/'),
package_dir={'': 'plugin/PySrc/'},
entry_points=dict(console_scripts=[
'space_tracer = space_tracer:main']),
classifiers=[ # from https://pypi.org/classifiers/
| "Intended Audience :: Developers",
"Topic :: Software Development :: Debuggers",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Deve | lopment Status :: 5 - Production/Stable",
"Environment :: Console"
],
project_urls={
'Bug Reports': 'https://github.com/donkirkby/live-py-plugin/issues',
'Source': 'https://github.com/donkirkby/live-py-plugin'}
)
|
schleichdi2/OpenNfr_E2_Gui-6.0 | lib/python/Components/SearchCovers.py | Python | gpl-2.0 | 32,601 | 0.033279 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from Plugins.Plugin import PluginDescriptor
from Components.ActionMap import *
from Components.Label import Label
from Components.Sources.StaticText import StaticText
from Components.MultiContent import MultiContentEntryText, MultiContentEntryPixmap, MultiContentEntryPixmapAlphaTest
from Components.Pixmap import Pixmap
from Components.AVSwitch import AVSwitch
from Components.PluginComponent import plugins
from Components.config import *
from Components.ConfigList import ConfigList, ConfigListScreen
from Components.GUIComponent import GUIComponent
from Components.Sources.List import List
from Components.MenuList import MenuList
from Components.FileList import FileList, FileEntryComponent
from Tools.Directories import SCOPE_CURRENT_SKIN, resolveFilename, fileExists
from Tools.LoadPixmap import LoadPixmap
from Tools.BoundFunction import boundFunction
from Tools.Directories import pathExists, fileExists, SCOPE_SKIN_IMAGE, resolveFilename
from enigma import RT_HALIGN_LEFT, eListboxPythonMultiContent, eServiceReference, eServiceCenter, gFont
from enigma import eListboxPythonMultiContent, eListbox, gFont, RT_HALIGN_LEFT, RT_HALIGN_RIGHT, RT_HALIGN_CENTER, loadPNG, RT_WRAP, eConsoleAppContainer, eServiceCenter, eServiceReference, getDesktop, loadPic, loadJPG, RT_VALIGN_CENTER, gPixmapPtr, ePicLoad, eTimer
from Screens.Screen import Screen
from Screens.MessageBox import Me | ssageBox
from Screens.VirtualKeyBoard import VirtualKeyBoard
from twisted.web.client import getPage
from twisted.web.client import downloadPage
from twisted.web import client, error as weberror
from twisted.internet import reactor
from twisted.internet import defer
from urllib import urlencode
import sys | , os, re, shutil, time
from threading import Thread
from os import listdir as os_listdir, path as os_path
from re import compile
import re
try:
from enigma import eMediaDatabase
isDreamOS = True
except:
try:
file = open("/proc/stb/info/model", "r")
dev = file.readline().strip()
file.close()
if dev == "dm7080":
isDreamOS = True
elif dev == "dm820":
isDreamOS = True
else:
isDreamOS = False
except:
isDreamOS = False
def getCoverPath():
blockList = ['hdd','cf','usb','sdcard']
dirList = os_listdir("/media")
coverPaths = ['/usr/share/enigma2/cover/', '/data/cover/', '/media/cf/cover/', '/media/usb/cover/', '/media/sdcard/cover/', '/media/hdd/cover/']
if fileExists("/proc/mounts"):
mountsFile = open("/proc/mounts" ,"r")
for line in mountsFile:
entry = line.split()
if entry[2] in ["nfs", "nfs4", "smbfs", "cifs"]:
if entry[1].startswith("/media/"):
blockList.append(entry[1][7:])
mountsFile.close()
for dir in dirList:
if dir in blockList:
print dir, blockList
continue
if os_path.ismount("/media/%s" %(dir)) or (os_path.islink("/media/%s" %(dir)) and os_path.ismount(os_path.realpath("/media/%s" %(dir)))):
path = "/media/%s/cover/" % (dir)
coverPaths.append(path)
return coverPaths
pname = "Find MovieList Covers"
pversion = "0.5 OpenNfr-mod"
config.movielist.cover = ConfigSubsection()
config.movielist.cover.themoviedb_coversize = ConfigSelection(default="w185", choices = ["w92", "w185", "w500", "original"])
config.movielist.cover.followsymlink = ConfigYesNo(default = False)
config.movielist.cover.getdescription = ConfigYesNo(default = False)
config.movielist.cover.bgtimer = ConfigYesNo(default = False)
config.movielist.cover.bgtime = ConfigInteger(3, (1,24))
config.movielist.cover.savestyle = ConfigSelection(default="movielist", choices = ["movielist", "opennfr"])
config.movielist.cover.coverpath = ConfigSelection(default = "/media/hdd/cover/", choices = getCoverPath())
config.movielist.cover.scanpath = ConfigText(default = "/media/hdd/movie/", fixed_size = False)
fileExtensionsRemove = "(.avi|.mkv|.divx|.f4v|.flv|.img|.iso|.m2ts|.m4v|.mov|.mp4|.mpeg|.mpg|.mts|.vob|.wmv)"
def cleanFile(text):
cutlist = ['x264','720p','1080p','1080i','PAL','GERMAN','ENGLiSH','WS','DVDRiP','UNRATED','RETAIL','Web-DL','DL','LD','MiC','MD','DVDR','BDRiP','BLURAY','DTS','UNCUT','ANiME',
'AC3MD','AC3','AC3D','TS','DVDSCR','COMPLETE','INTERNAL','DTSD','XViD','DIVX','DUBBED','LINE.DUBBED','DD51','DVDR9','DVDR5','h264','AVC',
'WEBHDTVRiP','WEBHDRiP','WEBRiP','WEBHDTV','WebHD','HDTVRiP','HDRiP','HDTV','ITUNESHD','REPACK','SYNC']
#text = text.replace('.wmv','').replace('.flv','').replace('.ts','').replace('.m2ts','').replace('.mkv','').replace('.avi','').replace('.mpeg','').replace('.mpg','').replace('.iso','').replace('.mp4','').replace('.jpg','').replace('.txt','')
text = re.sub(fileExtensionsRemove + "$", '', text)
for word in cutlist:
text = re.sub('(\_|\-|\.|\+|\s)'+word+'(\_|\-|\.|\+|\s)','+', text, flags=re.I)
text = text.replace('.',' ').replace('-',' ').replace('_',' ').replace('+','')
return text
class BackgroundCoverScanner(Thread):
def __init__(self, session):
assert not BackgroundCoverScanner.instance, "only one MovieDataUpdater instance is allowed!"
BackgroundCoverScanner.instance = self # set instance
self.session = session
self.scanning = False
self.bgTimerRunning = False
self.fileExtensions = [".avi",".mkv",".divx",".f4v",".flv",".img",".iso",".m2ts",".m4v",".mov",".mp4",".mpeg",".mpg",".mts",".vob",".wmv"]
Thread.__init__(self)
def startTimer(self):
if config.movielist.cover.bgtimer.value:
self.bgTimer = eTimer()
if isDreamOS:
self.bgTimer_conn = self.bgTimer.timeout.connect(self.getFileList)
else:
self.bgTimer.callback.append(self.getFileList)
self.bgTimer.start(3600000 * int(config.movielist.cover.bgtime.value))
self.bgTimerRunning = True
print "----------------------- S t a r t - T i m e r -------------------------"
def stopTimer(self):
if self.bgTimerRunning:
if not config.movielist.cover.bgtimer.value:
self.bgTimer.stop()
self.bgTimerRunning = False
print "----------------------- S t o p - T i m e r -------------------------"
def setCallbacks(self, callback_infos, callback_found, callback_notfound, callback_error, callback_menulist, callback_finished):
# self.msgCallback, self.foundCallback, self.notFoundCallback, self.errorCallback, self.listCallback, self.msgDone
self.callback_infos = callback_infos
self.callback_found = callback_found
self.callback_notfound = callback_notfound
self.callback_error = callback_error
self.callback_menulist = callback_menulist
self.callback_finished = callback_finished
def getFileList(self, background=True):
self.background = background
if not self.scanning:
print "----------------------- Cover Background Scanner -------------------------"
print "Scan Path: %s" % config.movielist.cover.scanpath.value
self.scanning = True
if config.movielist.cover.savestyle.value == "opennfr":
if not pathExists(config.movielist.cover.coverpath.value):
shutil.os.mkdir(config.movielist.cover.coverpath.value)
if not self.background:
self.callback_infos("Scanning: '%s'" % str(config.movielist.cover.scanpath.value))
data = []
symlinks_dupe = []
for root, dirs, files in os.walk(config.movielist.cover.scanpath.value, topdown=False, onerror=None, followlinks=config.movielist.cover.followsymlink.value):
if not root.endswith('/'):
root += "/"
slink = os.path.realpath(root)
if not slink in symlinks_dupe:
symlinks_dupe.append(slink)
else:
break
for file in files:
filename_org = os.path.join(root, file)
if any([file.endswith(x) for x in self.fileExtensions]):
if config.movielist.cover.savestyle.value == "opennfr":
filename = self.getMovieSaveFile(file)
if not filename is None:
filename = "%s%s.jpg" % (config.movielist.cover.coverpath.value, filename)
else:
continue
else:
filename = re.sub(fileExtensionsRemove + "$", '.jpg', filename_org)
if not fileExists(filename):
if os.path.isdir(filename_org):
url = 'http://api.themo |
weilneb/twitter-utils | twutils/__init__.py | Python | mit | 48 | 0.020833 | #!/usr/bin/ | env python
from TweetGrabber imp | ort * |
cloudteampro/juma-editor | JumaEditor.app/Contents/Resources/juma_bin.py | Python | mit | 1,472 | 0.036005 | #!/usr/bin/env python
import os
import os.path
import platform
import sys
##----------------------------------------------------------------##
def isPythonFrozen():
return hasattr( sys, "frozen" )
def getMainModulePath():
if isPythonFrozen():
p = os.path.dirname(unicode(sys.executable, sys.getfilesystemencoding()))
if platform.system() == u'Darwin':
return os.environ.get('JUMA_IDE_PATH') or os.path.realpath( p + '/../../..' )
elif platform.system() == u'Windows':
return p
else:
return p
if __name__ == 'main':
mainfile = os.path.realpath( __file__ )
return os.pa | th.dirname( mainfile )
else:
import __main__
if hasattr( __main__, "__gii_path__" ):
return __main__.__gii_path__
else:
mainfile = os.path.realpath( __main__.__file__ )
return os.path.dirname( mainfile )
##----------------------------------------------------------------##
jumapath = getMainModulePath() + '/editor/lib'
thirdPartyPathBase | = getMainModulePath() + '/editor/lib/3rdparty'
thirdPartyPathCommon = thirdPartyPathBase + '/common'
if platform.system() == u'Darwin':
thirdPartyPathNative = thirdPartyPathBase + '/osx'
else:
thirdPartyPathNative = thirdPartyPathBase + '/windows'
sys.path.insert( 0, jumapath )
sys.path.insert( 2, thirdPartyPathNative )
sys.path.insert( 1, thirdPartyPathCommon )
##----------------------------------------------------------------##
import juma
def main():
juma.startup()
if __name__ == '__main__':
main() |
matt-graham/hmc | mici/autodiff.py | Python | mit | 2,376 | 0.000842 | """Automatic differentation fallback for constructing derivative functions."""
import mici.autograd_wrapper as autograd_wrapper
"""List of names of valid differential operators.
Any automatic differentiation framework wrapper module will need to provide
all of these operators as callables (with a single function as argument) to
fully support all of the required derivative functions.
"""
DIFF_OPS = [
# vector Jacobian product and value
"vjp_and_value",
# gradient and value for scalar valued functions
"grad_and_value",
# Hessian matrix, gradient and value for scalar valued functions
"hessian_grad_and_value",
# matrix Tressian product, gradient and value for scalar valued
# functions
"mtp_hessian_grad_and_value",
# Jacobian matrix and value for vector valued functions
"jacobian_and_value",
# matrix Hessian product, Jacobian matrix and value for vector valued
# functions
"mhp_jacobian_and_value",
]
def autodiff_fallback(diff_func, func, diff_op_name, name):
"""Generate derivative function automatically if not provided.
Uses automatic differentiation to generate a function corresponding to a
differential operator applied to a functio | n if an alternative
implementation of the derivative function has not been provided.
Args:
diff_func (None or Callable): Either a callable implementing the
required derivative function or `None` if none was provided.
func (Callable): Function to differentiate.
diff_op_name (str): String specifying name of differential operator
from automatic differentiation framework wrapper to use to generate
| required derivative function.
name (str): Name of derivative function to use in error message.
Returns:
Callable: `diff_func` value if not `None` otherwise generated
derivative of `func` by applying named differential operator.
"""
if diff_func is not None:
return diff_func
elif diff_op_name not in DIFF_OPS:
raise ValueError(f"Differential operator {diff_op_name} is not defined.")
elif autograd_wrapper.AUTOGRAD_AVAILABLE:
return getattr(autograd_wrapper, diff_op_name)(func)
elif not autograd_wrapper.AUTOGRAD_AVAILABLE:
raise ValueError(f"Autograd not available therefore {name} must be provided.")
|
stormsson/procedural_city_generation_wrapper | vendor/josauder/procedural_city_generation/roadmap/iteration.py | Python | mpl-2.0 | 1,126 | 0.009769 | # -*- coding: utf-8 -*-
from __future__ import division
fro | m procedural_city_generation.roadmap.getSuggestion import getSuggestion
from procedural_city_generation.roadmap.check import check
from procedural_city_generation.additional_stuff.Singleton import Singleton
singleton=Singleton("roadmap")
def iteration(front):
"""
Gets Called in the mainloop.
Manages the front and newfront and the queue
Parameters
----------
front : list<Vertex>
Returns
-------
newfront : list<Vertex>
"""
newfront=[]
| for vertex in front:
for suggested_vertex in getSuggestion(vertex):
newfront=check(suggested_vertex, vertex, newfront)
#Increments index of each element in queue
singleton.global_lists.vertex_queue=[[x[0], x[1]+1] for x in singleton.global_lists.vertex_queue]
#Finds elements in queue which are to be added into the newfront
while singleton.global_lists.vertex_queue!=[] and singleton.global_lists.vertex_queue[0][1]>=singleton.minor_road_delay:
newfront.append(singleton.global_lists.vertex_queue.pop(0)[0])
return newfront
|
rgayon/plaso | tests/test_lib.py | Python | apache-2.0 | 6,202 | 0.004676 | # -*- coding: utf-8 -*-
"""Shared functions and classes for testing."""
from __future__ import unicode_literals
import io
import os
import shutil
import re
import tempfile
import unittest
from dfdatetime import time_elements
from dfvfs.lib import definitions as dfvfs_definitions
from dfvfs.path import factory as path_spec_factory
from dfvfs.resolver import resolver as path_spec_resolver
# The path to top of the Plaso source tree.
PROJECT_PATH = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
# The paths below are all derived from the project path directory.
# They are enumerated explicitly here so that they can be overwritten for
# compatibility with different build systems.
ANALYSIS_PATH = os.path.join(PROJECT_PATH, 'plaso', 'analysis')
ANALYZERS_PATH = os.path.join(PROJECT_PATH, 'plaso', 'analyzers')
CLI_HELPERS_PATH = os.path.join(PROJECT_PATH, 'plaso', 'cli', 'helpers')
CONTAINERS_PATH = os.path.join(PROJECT_PATH, 'plaso', 'containers')
DATA_PATH = os.path.join(PROJECT_PATH, 'data')
OUTPUT_PATH = os.path.join(PROJECT_PATH, 'plaso', 'output')
PARSERS_PATH = os.path.join(PROJECT_PATH, 'plaso', 'parsers')
PREPROCESSORS_PATH = os.path.join(PROJECT_PATH, 'plaso', 'preprocessors')
TEST_DATA_PATH = os.path.join(PROJECT_PATH, 'test_data')
def GetTestFilePath(path_segments):
"""Retrieves the path of a test file in the test data directory.
Args:
path_segments (list[str]): path segments inside the test data directory.
Ret | urns:
str: path of the test file.
"""
# Note that we need to pass the individual path segments to os.path.join
# and not a list.
return os.path.join(TEST_DATA_PATH, *path_segments)
def CopyTimestampFromSring(time_string):
"""Copies a date and time string to a Plas | o timestamp.
Args:
time_string (str): a date and time string formatted as:
"YYYY-MM-DD hh:mm:ss.######[+-]##:##", where # are numeric digits
ranging from 0 to 9 and the seconds fraction can be either 3 or 6
digits. The time of day, seconds fraction and timezone offset are
optional. The default timezone is UTC.
Returns:
int: timestamp which contains the number of microseconds since January 1,
1970, 00:00:00 UTC.
Raises:
ValueError: if the time string is invalid or not supported.
"""
date_time = time_elements.TimeElementsInMicroseconds()
date_time.CopyFromDateTimeString(time_string)
return date_time.GetPlasoTimestamp()
class BaseTestCase(unittest.TestCase):
"""The base test case."""
# Show full diff results, part of TestCase so does not follow our naming
# conventions.
maxDiff = None
def _GetTestFileEntry(self, path_segments):
"""Creates a file entry that references a file in the test data directory.
Args:
path_segments (list[str]): path segments inside the test data directory.
Returns:
dfvfs.FileEntry: file entry.
Raises:
SkipTest: if the path inside the test data directory does not exist and
the test should be skipped.
"""
test_file_path = self._GetTestFilePath(path_segments)
self._SkipIfPathNotExists(test_file_path)
path_spec = path_spec_factory.Factory.NewPathSpec(
dfvfs_definitions.TYPE_INDICATOR_OS, location=test_file_path)
return path_spec_resolver.Resolver.OpenFileEntry(path_spec)
def _GetDataFilePath(self, path_segments):
"""Retrieves the path of a file in the data directory.
Args:
path_segments (list[str]): path segments inside the data directory.
Returns:
str: path of the test file.
"""
# Note that we need to pass the individual path segments to os.path.join
# and not a list.
return os.path.join(DATA_PATH, *path_segments)
def _GetTestFilePath(self, path_segments):
"""Retrieves the path of a test file in the test data directory.
Args:
path_segments (list[str]): path segments inside the test data directory.
Returns:
str: path of the test file.
"""
# Note that we need to pass the individual path segments to os.path.join
# and not a list.
return os.path.join(TEST_DATA_PATH, *path_segments)
def _SkipIfPathNotExists(self, path):
"""Skips the test if the path does not exist.
Args:
path (str): path of a test file.
Raises:
SkipTest: if the path does not exist and the test should be skipped.
"""
if not os.path.exists(path):
filename = os.path.basename(path)
raise unittest.SkipTest('missing test file: {0:s}'.format(filename))
class ImportCheckTestCase(BaseTestCase):
"""Base class for tests that check modules are imported correctly."""
_FILENAME_REGEXP = re.compile(r'^[^_].*\.py$')
def _AssertFilesImportedInInit(self, path, ignorable_files):
"""Checks that files in path are imported in __init__.py
Args:
path (str): path to directory containing an __init__.py file and other
Python files which should be imported.
ignorable_files (list[str]): names of Python files that don't need to
appear in __init__.py. For example, 'manager.py'.
"""
init_path = '{0:s}/__init__.py'.format(path)
with io.open(init_path, mode='r', encoding='utf-8') as init_file:
init_content = init_file.read()
for file_path in os.listdir(path):
filename = os.path.basename(file_path)
if filename in ignorable_files:
continue
if self._FILENAME_REGEXP.search(filename):
module_name, _, _ = filename.partition('.')
import_expression = re.compile(r' import {0:s}\b'.format(module_name))
self.assertRegex(
init_content, import_expression,
'{0:s} not imported in {1:s}'.format(module_name, init_path))
class TempDirectory(object):
"""Class that implements a temporary directory."""
def __init__(self):
"""Initializes a temporary directory."""
super(TempDirectory, self).__init__()
self.name = ''
def __enter__(self):
"""Make this work with the 'with' statement."""
self.name = tempfile.mkdtemp()
return self.name
def __exit__(self, exception_type, value, traceback):
"""Make this work with the 'with' statement."""
shutil.rmtree(self.name, True)
|
HiSPARC/station-software | user/python/Lib/bsddb/test/test_replication.py | Python | gpl-3.0 | 21,476 | 0.01071 | """TestCases for distributed transactions.
"""
import os
import time
import unittest
from test_all import db, test_support, have_threads, verbose, \
get_new_environment_path, get_new_database_path
#----------------------------------------------------------------------
class DBReplication(unittest.TestCase) :
def setUp(self) :
self.homeDirMaster = get_new_environment_path()
self.homeDirClient = get_new_environment_path()
self.dbenvMaster = db.DBEnv()
self.dbenvClient = db.DBEnv()
# Must use "DB_THREAD" because the Replication Manager will
# be executed in other threads but will use the same environment.
# http://forums.oracle.com/forums/thread.jspa?threadID=645788&tstart=0
self.dbenvMaster.open(self.homeDirMaster, db.DB_CREATE | db.DB_INIT_TXN
| db.DB_INIT_LOG | db.DB_INIT_MPOOL | db.DB_INIT_LOCK |
db.DB_INIT_REP | db.DB_RECOVER | db.DB_THREAD, 0666)
self.dbenvClient.open(self.homeDirClient, db.DB_CREATE | db.DB_INIT_TXN
| db.DB_INIT_LOG | db.DB_INIT_MPOOL | db.DB_INIT_LOCK |
db.DB_INIT_REP | db.DB_RECOVER | db.DB_THREAD, 0666)
self.confirmed_master=self.client_startupdone=False
def confirmed_master(a,b,c) :
if b==db.DB_EVENT_REP_MASTER :
self.confirmed_master=True
def client_startupdone(a,b,c) :
if b==db.DB_EVENT_REP_STARTUPDONE :
self.client_startupdone=True
self.dbenvMaster.set_event_notify(confirmed_master)
self.dbenvClient.set_event_notify(client_startupdone)
#self.dbenvMaster.set_verbose(db.DB_VERB_REPLICATION, True)
#self.dbenvMaster.set_verbose(db.DB_VERB_FILEOPS_ALL, True)
#self.dbenvClient.set_verbose(db.DB_VERB_REPLICATION, True)
#self.dbenvClient.set_verbose(db.DB_VERB_FILEOPS_ALL, True)
self.dbMaster = self.dbClient = None
def tearDown(self):
if self.dbClient :
self.dbClient.close()
if self.dbMaster :
self.dbMaster.close()
# Here we assign dummy event handlers to allow GC of the test object.
# Since the dummy handler doesn't use any outer scope variable, it
# doesn't keep any reference to the test object.
def dummy(*args) :
pass
self.dbenvMaster.set_event_notify(dummy)
self.dbenvClient.set_event_notify(dummy)
self.dbenvClient.close()
self.dbenvMaster.close()
test_support.rmtree(self.homeDirClient)
test_support.rmtree(self.homeDirMaster)
class DBReplicationManager(DBReplication) :
def test01_basic_replication(self) :
master_port = test_support.find_unused_port()
client_port = test_support.find_unused_port()
if db.version() >= (5, 2) :
self.site = self.dbenvMaster.repmgr_site("127.0.0.1", master_port)
self.site.set_config(db.DB_GROUP_CREATOR, True)
self.site.set_config(db.DB_LOCAL_SITE, True)
self.site2 = self.dbenvMaster.repmgr_site("127.0.0.1", client_port)
self.site3 = self.dbenvClient.repmgr_site("127.0.0.1", master_port)
self.site3.set_config(db.DB_BOOTSTRAP_HELPER, True)
self.site4 = self.dbenvClient.repmgr_site("127.0.0.1", client_port)
self.site4.set_config(db.DB_LOCAL_SITE, True)
d = {
db.DB_BOOTSTRAP_HELPER: [False, False, True, False],
db.DB_GROUP_CREATOR: [True, False, False, False],
db.DB_LEGACY: [False, False, False, False],
db.DB_LOCAL_SITE: [True, False, False, True],
db.DB_REPMGR_PEER: [False, False, False, False ],
}
for i, j in d.items() :
for k, v in \
zip([self.site, self.site2, self.site3, self.site4], j) :
if v :
self.assertTrue(k.get_config(i))
else :
self.assertFalse(k.get_config(i))
self.assertNotEqual(self.site.get_eid(), self.site2.get_eid())
self.assertNotEqual(self.site3.get_eid(), self.site4.get_eid())
for i, j in zip([self.site, self.site2, self.site3, self.site4], \
[master_port, client_port, master_port, client_port]) :
addr = i.get_address()
self.assertEqual(addr, ("127.0.0.1", j))
for i in [self.site, self.site2] :
self.assertEqual(i.get_address(),
self.dbenvMaster.repmgr_site_by_eid(i.get_eid()).get_address())
for i in [self.site3, self.site4] :
self.assertEqual(i.get_address(),
self.dbenvClient.repmgr_site_by_eid(i.get_eid()).get_address())
else :
self.dbenvMaster.repmgr_set_local_site("127.0.0.1", master_port)
self.dbenvClient.repmgr_set_local_site("127.0.0.1", client_port)
self.dbenvMaster.repmgr_add_remote_site("127.0.0.1", client_port)
self.dbenvClient.repmgr_add_remote_site("127.0.0.1", master_port)
self.dbenvMaster.rep_set_nsites(2)
self.dbenvClient.rep_set_nsites(2)
self.dbenvMaster.rep_set_priority(10)
self.dbenvClient.rep_set_priority(0)
self.dbenvMaster.rep_set_timeout(db.DB_REP_CONNECTION_RETRY,100123)
self.dbenvClient.rep_set_timeout(db.DB_REP_CONNECTION_RETRY,100321)
self.assertEqual(self.dbenvMaster.rep_get_timeout(
db.DB_REP_CONNECTION_RETRY), 100123)
self.assertEqual(self.dbenvClient.rep_get_timeout(
db.DB_REP_CONNECTION_RETRY), 100321)
self.dbenvMaster.rep_set_timeout(db.DB_REP_ELECTION_TIMEOUT, 100234)
self.dbenvClient.rep_set_timeout(db.DB_REP_ELECTION_TIMEOUT, 100432)
self.assertEqual(self.dbenvMaster.rep_get_timeout(
db.DB_REP_ELECTION_TIMEOUT), 100234)
self.assertEqual(self.dbenvClient.rep_get_timeout(
db.DB_REP_ELECTION_TIMEOUT), 100432)
self.dbenvMaster.rep_set_timeout(db.DB_REP_ELECTION_RETRY, 100345)
self.dbenvClient.rep_set_timeout(db.DB_REP_ELECTION_RETRY, 100543)
self.assertEqual(self.dbenvMaster.rep_get_timeout(
db.DB_REP_ELECTION_RETRY), 100345)
self.assertEqual(self.dbenvClient.rep_get_timeout(
db.DB_REP_ELECTION_RETRY), 100543)
self.dbenvMaster.repmgr_set_ack_policy(db.DB_REPMGR_ACKS_ALL)
self.dbenvClient.repmgr_set_ack_policy(db.DB_REPMGR_ACKS_ALL)
self.dbenvMaster.repmgr_start(1, db.DB_REP_MASTER);
self.dbenvClient.repmgr_start(1, db.DB_REP_CLIENT);
self.assertEqual(self.dbenvMaster.rep_get_nsites(),2)
self.assertEqual(self.dbenvClient.rep_get_nsites(),2)
self.assertEqual(self.dbenvMaster.rep_get_priority(),10)
self.assertEqual(self.dbenvClient.rep_get_priority(),0)
self.assertEqual(self.dbenvMaster.repmgr_get_ack_policy(),
db.DB_REPMGR_ACKS_ALL)
self.assertEqual(self.dbenvClient.repmgr_get_ack_policy(),
db.DB_REPMGR_ACKS_ALL)
# The timeout | is necessary in BDB 4.5, since DB_EVENT_REP_STARTUPDONE
# is not generated if the master has no new transactions.
# This is solved in BDB 4.6 (#15542).
import time
timeout = time.time()+60
while (time.time()<timeout) and not (self.confirmed_master and self.client_startupdone) :
time.sleep(0.02)
# self.client_startupdone does not always get set to True within
# the timeo | ut. On windows this may be a deep issue, on other
# platforms it is likely just a timing issue, especially on slow
# virthost buildbots (see issue 3892 for more). Even though
# the timeout triggers, the rest of this test method usually passes
# (but not all of it always, see below). So we just note the
# timeout on stderr and keep soldering on.
if time.time()>timeout:
import sys
print >> sys.stderr, ("XXX: tim |
Purg/SMQTK | python/smqtk/algorithms/__init__.py | Python | bsd-3-clause | 1,219 | 0.005742 | from smqtk.utils import SmqtkObject
from smqtk.utils import Configurable, plugin |
__all__ = [
'SmqtkAlgorithm',
'Classifier', 'SupervisedClassifier', 'get_classifier_impls',
'DescriptorGenerator', 'get_descriptor_generator_impls',
'NearestNeighborsIndex', 'g | et_nn_index_impls',
'HashIndex', 'get_hash_index_impls',
'LshFunctor', 'get_lsh_functor_impls',
'RelevancyIndex', 'get_relevancy_index_impls',
]
class SmqtkAlgorithm (SmqtkObject, Configurable, plugin.Pluggable):
"""
Parent class for all algorithm interfaces.
"""
@property
def name(self):
"""
:return: The name of this class type.
:rtype: str
"""
return self.__class__.__name__
# Import module abstracts and plugin getter functions
from .classifier import Classifier, SupervisedClassifier, get_classifier_impls
from .descriptor_generator import DescriptorGenerator, get_descriptor_generator_impls
from .nn_index import NearestNeighborsIndex, get_nn_index_impls
from .nn_index.hash_index import HashIndex, get_hash_index_impls
from .nn_index.lsh.functors import LshFunctor, get_lsh_functor_impls
from .relevancy_index import RelevancyIndex, get_relevancy_index_impls
|
juliotrigo/django-accounts | accounts/context_processors.py | Python | bsd-3-clause | 544 | 0 | # -*- coding: utf-8 -*-
"""accounts | context processors.
A set of request processors that return dictionaries to be merg | ed into a
template context. Each function takes the request object as its only parameter
and returns a dictionary to add to the context.
These are referenced from the setting TEMPLATE_CONTEXT_PROCESSORS and used by
RequestContext.
"""
from __future__ import unicode_literals
def url(request):
"""
Adds url-related context variables to the context.
"""
return {'get_full_path': request.get_full_path()}
|
cuoretech/syncform | database_config.py | Python | gpl-3.0 | 2,408 | 0.033223 | #**************************************
# Current Database Info
#**************************************
# To Access...
# from database_config import db_config
# from py2neo import neo4j
# graph_db = neo4j.GraphDatabaseService(db_config['uri'])
db_config = {}
#db_config['address'] = "http://162.212.130.189"
db_config['address'] = "http://127.0.0.1"
db_config['port'] = "7474"
db_config['ending'] = "db/data"
db_config['uri'] = db_config['address'] + ":" \
+ db_config['port'] + "/" \
+ db_config['ending'] + "/"
db_config['username'] = "your_username_here"
db_config['password'] = "your_password_here"
#Moar constants
#Relationship Constants
REL_HASEVENT = "has_event"
REL_HASTASK = "has_task"
REL_HASSUBTASK = "has_subtask"
REL_HASDEADLINE = "has_deadline"
REL_HASCALENDAR = "has_calendar"
REL_HASSUBCALENDAR = "has_subcalendar"
REL_HASOWNER = "has_owner"
REL_HASGROUP = "has_group"
REL_HASGENTASK = "has_gentask"
REL_HASTASK = "has_task"
REL_HASFILE = "has_file"
REL_HASPROJECT = "has_project"
REL_HASPOST = "has_post"
REL_ASSIGNEDTO = "assigned_to"
REL_CREATEDBY = "created_by"
REL_INVITED = "invited"
REL_HASCOMMENT = "has_comment"
REL_HASWORKSPACE = "has_workspace"
REL_HASDEP = "has_dept"
REL_HASTITLE = "has_title"
REL_HASUSER = "has_user"
REL_ISMEMBER = "is_member"
REL_HASBLOG = "has_blog"
REL_UNASSIGNED = "is_unassigned"
REL_UNCONFIRMED = "is_unconfirmed"
#Label Constants
LBL_COMPANY = "Company"
LBL_DEPARTMENT = "Department"
LBL_TITLES = "Titles"
LBL_USER = "User"
LBL_CAL = "Calendar"
LBL_EVENT = "Event"
LBL_TASK = "Task"
LBL_SUBTASK = "SubTask"
LBL_DEADLINE = "Deadline"
LBL_GROUP = "Group"
LBL_WORKSPACE = "Workspace"
LBL_PROJECT = "Project"
LBL_FILE = "File"
LBL_BLOG = "Blog"
LBL_POST = "Post"
LBL_COMMENT = "Comment"
#Index Constants (to be r | eplaced by Label Constants at a later time)
IN | D_COMP = "Company"
IND_DEP = "Departments"
IND_TITLE = "Titles"
IND_USER = "Users"
IND_UNASSIGNED = "Unassigned"
IND_UNCONFIRMED = "Unconfirmed"
IND_CAL = "Calendars"
IND_EVENT = "Events"
IND_TASK = "Tasks"
IND_SUBTASK = "Subtasks"
IND_WORKSPACE = "Workspaces"
IND_PROJECT = "Projects"
IND_FILE = "Files"
IND_BLOG = "Blogs"
IND_POST = "Posts"
IND_COMMENT = "Comments"
|
alinko32a/pimouse_ros | scripts/buzzer3.py | Python | gpl-3.0 | 445 | 0.006742 | #!/usr/bin/env python
import rospy
from std_msgs.msg import UInt16
def write_freq(hz=0):
bfile = "/dev/rtbuzzer0"
try:
with open(bfile, "w") as f:
f.write | (str(hz) + "\n")
| except IOError:
rospy.logerr("can't write to " + bfile)
def recv_buzzer(data):
write_freq(data.data)
if __name__ == '__main__':
rospy.init_node('buzzer')
rospy.Subscriber("buzzer", UInt16, recv_buzzer)
rospy.spin()
|
rainest/dance-partner-matching | networkx/readwrite/tests/test_yaml.py | Python | bsd-2-clause | 1,300 | 0.024615 | """
Unit tests for yaml.
"""
import os,tempfile
from nose import SkipTest
from nose.tools import assert_true,assert_equal
import networkx as nx
class TestYaml(object):
@classmethod
def setupClass(cls):
global yaml
try:
import yaml
except I | mportError:
raise SkipTest('y | aml not available.')
def setUp(self):
self.build_graphs()
def build_graphs(self):
self.G = nx.Graph(name="test")
e = [('a','b'),('b','c'),('c','d'),('d','e'),('e','f'),('a','f')]
self.G.add_edges_from(e)
self.G.add_node('g')
self.DG = nx.DiGraph(self.G)
self.MG = nx.MultiGraph()
self.MG.add_weighted_edges_from([(1,2,5),(1,2,5),(1,2,1),(3,3,42)])
def assert_equal(self, G, data=False):
(fd, fname) = tempfile.mkstemp()
nx.write_yaml(G, fname)
Gin = nx.read_yaml(fname);
assert_equal(sorted(G.nodes()),sorted(Gin.nodes()))
assert_equal(G.edges(data=data),Gin.edges(data=data))
os.close(fd)
os.unlink(fname)
def testUndirected(self):
self.assert_equal(self.G, False)
def testDirected(self):
self.assert_equal(self.DG, False)
def testMultiGraph(self):
self.assert_equal(self.MG, True)
|
mediatum/mediatum | core/test/test_legacy_update_create.py | Python | gpl-3.0 | 1,051 | 0.002854 | # -*- coding: utf-8 -*-
"""
:copyright: (c) 2016 by the mediaTUM authors
:license: GPL3, see COPYING for details
"""
from __future__ import absolute_import
from utils.date import parse_date
from core.test.factories import DocumentFactory
# adding more test functions may fail, see the comments for the following imports
from core.test.test_version import teardown_module, session
def test_create_update(session, req, guest_user, some_user, enable_athana_continuum_plugin):
session.commit()
req.app_cache = {}
req.session["user_id"] = some_user.id
| node = DocumentFactory()
session.add(node)
node["testattr"] = "new"
session.commit()
req.app_cache = {}
# well, guest users shouldn't update nodes, but it's ok for a test ;)
req.session["user_id"] = guest_user.id
node["test | attr"] = "changed"
session.commit()
assert node.creator == some_user.getName()
assert node.updateuser == guest_user.getName()
assert node.creationtime <= node.updatetime
assert parse_date(node.updatetime)
|
bgrig/Rice-Bioe-421-521-Final-Project | master.py | Python | gpl-2.0 | 7,297 | 0.009456 | #! /usr/bin/python
import serial
import os
import sys
import time
import re
import glob
#Output stdout to log file
#sys.stdout = open("log.txt", "a")
#Port Definitions
port = "/dev/ttyACM0"
baud = 115200
#counts
zCount = 0
#Regular Expression Compilation
xRE = re.compile(" X(-?[0-9]+[.]?[0-9]*)")
yRE = re.compile(" Y(-?[0-9]+[.]?[0-9]*)")
zRE = re.compile(" Z(-?[0-9]+[.]?[0-9]*)")
M114 = re.compile("Count X: ([0-9]+[.]?[0-9]*)Y:([0-9]+[.]?[0-9]*)Z:([0-9]+[.]?[0-9]*)")
Gmove = re.compile("^G[0-1]{1}|28")
#Timeout length
serTimeout = 1 #timeout for serial communication
wait = 1 #seconds to wait before releasing hold on rambo communications
# Z home coordinate
ZHOME = 105
#Initialize Log information
print("##################")
print(time.strftime("Log: %Y/%m/%d %I:%M:%S"))
print(port)
print("baudrate: " + str(baud) + "\n")
#Open serial connection
rambo = serial.Serial(port, baud, timeout=serTimeout)
rambo.open()
#Cycle through Marlin initial communications
timer = 0
while True:
response = rambo.read(10000)
if response != '':
timer = 0
print(response.strip())
else:
timer += 1
if timer == 3:
break
def isMove(line):
result = Gmove.search(line)
if result:
return True
else:
return False
def clearBuffer(ser):
timer = 0
while True:
response = ser.read(1000)
if response:
continue
elif timer == 1:
break
else:
timer += 1
def homeWait(zFinal, ser=rambo):
print("Z coordinate: " + str(zFinal))
time.sleep(1)
while True:
currPosition = ''
ser.write("M114\n")
timer = 0
while True:
response = ser.read(1000)
response.strip()
print("timer: " + str(timer))
if response != '':
currPosition = currPosition + response
print("Current position: " + currPosition)
timer = 0
elif timer == wait:
break
else:
timer += 1
if len(currPosition) < 2:
print("Current Position not found")
continue
currXYZ = M114.search(currPosition)
print(currPosition)
zCurr = currXYZ.group(3)
print("Current Z: " + str(zCurr))
if ((float(zCurr) - float(zFinal))**2 < 0.01):
global zCount
zCount = ZHOME
time.sleep(1)
break
def waitOnMove(line, se | r=rambo):
global zCount
print("waitOnMove Line: " + line.strip())
MoveCheck = isMove(lin | e)
if not MoveCheck:
print("Not a move")
return
print("Is a move")
if re.search("G28", line):
homeWait(ZHOME)
return
else:
#x_destination = xRE.search(line)
#y_destination = yRE.search(line)
z_search = zRE.search(line)
z_destination = z_search.group(1)
print(line)
print("Z coordinate: " + str(float(z_destination)))
z_destination = float(z_destination) + float(zCount)
time.sleep(1)
while True:
currPosition = ''
ser.write("M114\n")
timer = 0
while True:
response = ser.read(1000)
response.strip()
print("timer: " + str(timer))
if response != '':
currPosition = currPosition + response
print("Current position: " + currPosition)
timer = 0
elif timer == wait:
break
else:
timer += 1
if len(currPosition) < 2:
print("Current Position not found")
continues
currXYZ = M114.search(currPosition)
print(currPosition)
zCurr = currXYZ.group(3)
print("Current Z: " + str(zCurr))
print("Destination Z: " + str(z_destination))
if ((float(zCurr) - float(z_destination))**2 < 0.01):
zCount = z_destination
time.sleep(1)
break
def gcodeLine(line, ser=rambo):
ser.write(line)
print(line.strip())
waitOnMove(line, ser)
#clearBuffer(ser)
def initBackground():
os.system("sudo fbi -T 2 --noverbose ./Background/solid_black.jpg")
time.sleep(5)
return
def displayImage(imageName, seconds):
bashCommand = "sudo fbi -T 2 --noverbose --once -t {0} {1}".format(seconds, imageName)
os.system(bashCommand)
time.sleep(seconds+3)
def exitFBI():
os.system("sudo kill $(pgrep fbi)")
def loadSlideshow(dir,ext="png"):
pathname = "./{0}/*.{1}".format(dir, ext)
images = glob.glob(pathname)
images.sort()
return images
def parseHeader(f):
header = []
for line in f:
if re.search("Header End", line):
break
header.append(line)
return header
def parseGcode(f):
gcodeDict = {}
slice = -1
sliceGcode = []
delay = -1
for line in f:
if re.search(";<Slice> [0-9]+", line):
match = re.search("([0-9]+)", line)
slice = match.group(1)
#print("Slice: " + slice)
elif delay != -1 and re.search(";<Delay>", line):
continue
elif slice != -1 and re.search(";<Delay> [0-9]+", line):
match = re.search("([0-9]+)", line)
delay = match.group(1)
#print("Delay: " + delay)
sliceGcode.append(delay)
elif delay != -1 and re.search("[GM][0-9]+", line):
#print("Gcode: " + line)
match = re.search("([GM][0-9]+.+\n)", line)
if re.search("G[0-1]{1} ", line):
gcode = match.group(1)
sliceGcode.append(gcode)
elif delay != -1 and re.search("Pre.{1}Slice End", line):
#print("\n")
gcodeDict[slice] = sliceGcode
slice = -1
delay = -1
sliceGcode = []
for key in gcodeDict:
gcode = gcodeDict[key]
for i in range(0,len(gcode)):
line = gcode[i]
line = re.sub("^;", "", line, count=1)
gcode[i] = line
gcodeDict[key] = gcode
return gcodeDict
def loadGcode(filename, dir="Gcode"):
pathname = "./{0}/{1}".format(dir, filename)
f = open(pathname, "r")
#parse GCODE
header = parseHeader(f)
gcode = parseGcode(f)
return gcode
def main():
initBackground()
gcodeLine("G28 Z\n")
#gcodeLine("G21\n")
gcodeLine("G91\n")
gcodeLine("G0 Z-10\n")
images = loadSlideshow("Testing")
gcodeDict = loadGcode("0.3-0.5mm_holes_horizontal_short.gcode", ".")
for slice in range(0,len(gcodeDict)):
gcode = gcodeDict[str(slice)]
delay = gcode[0]
for i in range(1, len(gcode)):
line = gcode[i]
gcodeLine(line)
imageName = images[slice]
seconds = int(delay) / 1000
displayImage(imageName, seconds)
#Close rambo serial communications
rambo.close()
def main2():
images = loadSlideshow("Testing")
for image in images:
initBackground()
displayImage(imageName, 5)
main()
if '__name__' == '__main__':
main()
|
PabloCastellano/libreborme | borme/management/commands/importborme.py | Python | agpl-3.0 | 2,559 | 0 | from django.core.management.base import BaseCommand
from django.utils import timezone
import logging
import time
from borme.models import Config
from borme.parser.importer import import_borme_download
# from borme.parser.postgres import psql_update_documents
import borme.parser.importer
from libreborme.utils import get_git_revision_short_hash
class Command(BaseCommand):
# args = '<ISO formatted date (ex. 2015-01-01 or --init)> [--local]'
help = 'Import BORMEs from date'
def add_arguments(self, parser):
parser.add_argument(
'-f', '--from',
nargs=1, required=True,
help='ISO formatted date (ex. 2015-01-01) or "init"')
parser.add_argument(
'-t', '--to',
nargs=1, required=True,
help='ISO formatted date (ex. 2016-01-01) or "today"')
parser.add_argument(
'--local-only',
| action='store_true',
default=False,
help='Do not download any file')
parser.add_argument(
'--no-missing',
action='store_true',
default=False,
help='Abort if local file is not found')
# json only, pdf only...
def handle(self, *args, **op | tions):
self.set_verbosity(int(options['verbosity']))
start_time = time.time()
import_borme_download(options['from'][0],
options['to'][0],
local_only=options['local_only'],
no_missing=options['no_missing'])
config = Config.objects.first()
if config:
config.last_modified = timezone.now()
else:
config = Config(last_modified=timezone.now())
config.version = get_git_revision_short_hash()
config.save()
# Update Full Text Search
# psql_update_documents()
# Elapsed time
elapsed_time = time.time() - start_time
print('\nElapsed time: %.2f seconds' % elapsed_time)
def set_verbosity(self, verbosity):
if verbosity == 0:
borme.parser.importer.logger.setLevel(logging.ERROR)
elif verbosity == 1: # default
borme.parser.importer.logger.setLevel(logging.INFO)
elif verbosity == 2:
borme.parser.importer.logger.setLevel(logging.INFO)
elif verbosity > 2:
borme.parser.importer.logger.setLevel(logging.DEBUG)
logging.getLogger().setLevel(logging.DEBUG)
|
STIXProject/python-stix | stix/version.py | Python | bsd-3-clause | 130 | 0 | # Copyright (c) 2017, The | MITRE Corporation. All rights reserved.
# See LICENSE.txt for complete terms.
__version__ = "1.2. | 0.11"
|
pythonpopayan/bermoto | backend/simulation/app_clients.py | Python | mit | 394 | 0 | """
"""
import websocket
import json
from time import sleep
cla | ss app_client(object):
def __init__(self, url):
self.ws = websocket.create_connection(url)
def send(self, message):
self.ws.send(json.dumps(message))
sleep(0.5)
result = self.ws.recv()
print(result)
return json.loads(result)
def close(self):
self.ws. | close()
|
Trigition/MTG-DataScraper | mtg_dataminer/paths.py | Python | mit | 1,594 | 0.005646 | #!/usr/bin/env python
CARD_IMAGE_TOKEN_CLASS = 'card-small-icon'
CARD_TEXT_BOX_CLASS = 'card-text'
### PATHS
### If Gatherer changes how it stores card details, the paths
### can be edited here
# Path for card links on a search result page
CARD_LINKS = '//tr/td/div/span[@class="cardTitle"]/a'
# Path to the table which holds information on a card
CARD_HTML_TABLE = '//table[@class="cardComponentTable"]'
# Path to card variants
# Note that this path is for the 'results' page
CARD_VARIANT_PATH = '//div[contains(@class, "otherSetSection")]/div[@class="rightCol"]'
# Describe the difference in HTML between a page which describes
# a single card vs. a double card
MULTIPLE_CARD_PATH = './/table[@class="cardDetails cardComponent"]'
MULTIPLE_CARD_CONTAINER = './/td[@class="cardComponentContainer"]'
INDIVIDUAL_MULTIPLE_CARD = './div/table[@class="cardDetails cardComponent"]'
SINGLE_CARD_PATH = '//ta | ble[@clas | s="cardDetails"]'
# Describe where the card image and card details are located
CARD_IMAGE_CONTAINER = './/td[contains(@class,"leftCol")]'
CARD_DETAILS_CONTAINER = './/td[contains(@class,"rightCol")]'
### ---END PATHS--- ###
### CARD DETAIL PATHS
# Card details are in rows
ROW_PATH = './/div[contains(@class, "row")]'
# Fields are separated by a "label" div and a "value" div
KEY_PATH = './div[@class="label"]'
VALUE_PATH = './div[@class="value"]'
# Card Text Box
TEXT_BOX_PATH = '//div[@class="cardtextbox"]'
FLAVOR_TEXT_BOX = './div[@class="flavortextbox"]'
### --END CARD DETAIL PATHS
### NEXT PAGE PATH
NEXT_PAGE = '//div[@class="paging"]/a'
### --END NEXT PAGE PATH
|
samuraisam/lexical_uuid | lexical_uuid/alchemy.py | Python | mit | 999 | 0.012012 | import lexical_uuid
from sqlalchemy.dialects.postgresql import BYTEA
from sqlalchemy.typ | es import TypeDecorator
class LexicalUUID(TypeDecorator):
impl = BYTEA
def __init__(self):
self.impl.length = 16
super(LexicalUUID, self).__init__(length=self.impl.length)
def process_bind_param(self, value, dialect=None):
if value and isinstance(value, lexical_uuid.LexicalUUID):
return value.bytes
elif value and not isinstance(value, lexical_uuid.LexicalUUID):
raise TypeError("Value {} is not a valid LexicalUUID (must be an "
"instance o | f lexical_uuid.LexicalUUID).".format(value))
else:
return None
def process_result_value(self, value, dialect=None):
if value:
return lexical_uuid.LexicalUUID(value=value)
else:
return None
def is_mutable(self):
return False
__doc__ = """
Usage::
class Entity(db.Model):
id = db.Column('id', LexicalUUID(), primary_key=True, default=lexical_uuid.LexicalUUID)
"""
|
delvelabs/htcap | core/crawl/lib/shared.py | Python | gpl-2.0 | 778 | 0.005141 | # -*- coding: utf-8 -*-
"""
HTCAP - beta 1
Author: filippo.cavallarin@wearesegment.com
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
Foundation; either version 2 of the License, or (at your option) any later
version.
"""
# TODO: make sure that only shared data are stored in this object
class Shared:
"""
data shared between threads
"""
def __init__(self):
pass
main_condition = None
th_condition = None
requests = []
requests_index = 0
crawl_results = []
start_url = ""
start_cookies = []
end_coo | kies = []
allowed_domains = set()
excluded_urls = set()
probe_cm | d = []
options = {}
|
WaveBlocks/WaveBlocksND | WaveBlocksND/Interface/NormWavepacket.py | Python | bsd-3-clause | 4,912 | 0.001425 | """The WaveBlocks Project
Compute the norms of the homogeneous wavepackets as well as the sum of all norms.
@author: R. Bourquin
@copyright: Copyright (C) 2010, 2011, 2012, 2013, 2016 R. Bourquin
@license: Modified BSD License
"""
from WaveBlocksND import BlockFactory
from WaveBlocksND import BasisTransformationHAWP
def compute_norm_hawp(iom, blockid=0, eigentrafo=True):
"""Compute the norm of a wavepacket timeseries.
:param iom: An :py:class:`IOManager` instance providing the simulation data.
:param blockid: The data block from which the values are read.
:type blockid: Integer, Default is ``0``
:param eigentrafo: Whether to make a transformation into the eigenbasis.
:type eigentrafo: Boolean, default is ``True``.
"""
parameters = iom.load_parameters()
# Number of time steps we saved
timesteps = iom.load_wavepacket_timegrid(blockid=blockid)
nrtimesteps = timesteps.shape[0]
# Basis transformator
if eigentrafo is True:
# The potential used
Potential = BlockFactory().create_potential(parameters)
BT = BasisTransformationHAWP(Potential)
# We want to save norms, thus add a data slot to the data file
iom.add_norm(parameters, timeslots=nrtimesteps, blockid=blockid)
# Initialize a Hagedorn wavepacket with the data
descr = iom.load_wavepacket_description(blockid=blockid)
HAWP = BlockFactory().create_wavepacket(descr)
if eigentrafo is True:
BT.set_matrix_builder(HAWP.get_innerproduct())
# Basis shapes
BS_descr = iom.load_wavepacket_basisshapes(blockid=blockid)
BS = {}
for ahash, descr in BS_descr.items():
BS[ahash] = BlockFactory().create_basis_shape(descr)
KEY = ("q", "p", "Q", "P", "S", "adQ")
# Iterate over all timesteps
for i, step in enumerate(timesteps):
print(" Computing norms of timestep %d" % step)
# Retrieve simulation data
params = iom.load_wavepacket_parameters(timestep=step, blockid=blockid, key=KEY)
hashes, coeffs = iom.load_wavepacket_coefficients(timestep=step, get_hashes=True, blockid=blockid)
# Configure the wavepacket
HAWP.set_parameters(params, key=KEY)
HAWP.set_basis_shapes([BS[int(ha)] for ha in hashes])
HAWP.set_coefficients(coeffs)
# Transform to the eigenbasis.
if eigentrafo is True:
BT.transform_to_eigen(HAWP)
# Measure norms in the eigenbasis
norm = HAWP.norm()
# Save the norms
iom.save_norm(norm, timestep=step, blockid=blockid)
def compute_norm_inhawp(iom, blockid=0, eigentrafo=True):
"""Compute the norm of a wavepacket timeseries.
This function is for inhomogeneous wavepackets.
:param iom: An :py:class:`IOManager` instance providing the simulation data.
:param blockid: The data block from which the values are read.
:type blockid: Integer, Default is ``0``
:param eigentrafo: Whether to make a transformation into the eigenbasis.
:type eigentrafo: Boolean, default is ``True``.
"""
parameters = iom.load_parameters()
# Number of time steps we saved
timesteps = iom.load_inhomogwavepacket_timegrid(blockid=blockid)
nrtimesteps = timesteps.shape[0]
# Basis transformator
if eigentrafo is True:
# The potential used
Potential = BlockFactory().create_potential(parameters)
BT = BasisTransformationHAWP(Potential)
# We want to save norms, thus add a data slot to the data file
iom.add_norm(parameters, timeslots=nrtimesteps, blockid=blockid)
# Initialize a Hagedorn wavepacket with the data
descr = iom.load_inhomogwavepacket_description(blockid=blockid)
HAWP = BlockFactory().create_wavepacket(descr)
if eigentrafo is True:
BT.set_matrix_builder(HAWP.get_innerproduct())
# Basis shapes
BS_descr = iom.load_inhomogwavepacket_basisshapes(blockid=blockid)
BS = {}
for ahash, descr in BS_descr.items():
BS[ahash] = BlockFactory().cr | eate_basis_shape(descr)
KEY = ("q", "p", "Q", "P", "S", "adQ")
# Iterate over all timesteps
for i, step in enumerate(timesteps):
print(" Computing norms of timestep %d" % step)
# Retrieve simulation data
params = iom.load_inhomogwavepacket_parameters(timestep=step, blockid=blockid, key=KEY)
hashes, coef | fs = iom.load_inhomogwavepacket_coefficients(timestep=step, get_hashes=True, blockid=blockid)
# Configure the wavepacket
HAWP.set_parameters(params, key=KEY)
HAWP.set_basis_shapes([BS[int(ha)] for ha in hashes])
HAWP.set_coefficients(coeffs)
# Transform to the eigenbasis.
if eigentrafo is True:
BT.transform_to_eigen(HAWP)
# Measure norms in the eigenbasis
norm = HAWP.norm()
# Save the norms
iom.save_norm(norm, timestep=step, blockid=blockid)
|
deepmind/deepmind-research | density_functional_approximation_dm21/density_functional_approximation_dm21/export_saved_model.py | Python | apache-2.0 | 2,047 | 0.002931 | #!/usr/bin/env python3
# Copyright 2021 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helper for exporting a functional and its derivatives to a saved_model."""
from typing import Sequence
from absl import app
from absl import flags
from density_functional_approximation_dm21 import neural_numint
_OUT_DIR = flags.DEFINE_string(
'out_dir', None, 'Output directory.', required=True)
_BATCH_SIZE = flags.DEFINE_integer(
'batch_size',
1000,
'Number of grid points exported functional will process in a single call.',
lower_bound=0)
_FUNCT | IONAL = flags.DEFINE_enum_class('functional',
neural_numint.Functional.DM21,
neural_numint.Functional,
'Functional to export.')
def export(
functional: neural_numint.Functional,
export_path: str,
batch_dim: int,
) -> None:
"""Export a functional and its derivatives | to a single saved_model.
Args:
functional: functional to export.
export_path: path to saved the model to.
batch_dim: number of grid points to process in a single call.
"""
ni = neural_numint.NeuralNumInt(functional)
ni.export_functional_and_derivatives(
export_path=export_path, batch_dim=batch_dim)
def main(argv: Sequence[str]) -> None:
if len(argv) > 1:
raise app.UsageError('Too many command-line arguments.')
export(_FUNCTIONAL.value, _OUT_DIR.value, _BATCH_SIZE.value)
if __name__ == '__main__':
app.run(main)
|
jwodder/javaproperties | test/test_loads_xml.py | Python | mit | 4,001 | 0.0005 | from collections import OrderedDict
import pytest
from javaproperties import loads_xml
@pytest.mark.parametrize(
"s,d",
[
("<properties></properties>", {}),
(
'<properties><entry key="key">value</entry></properties>',
{"key": "value"},
),
(
'<properties><entry key="key"> </entry></properties>',
{"key": " "},
),
(
'<properties><entry key="key">\n</entry></properties>',
{"key": "\n"},
),
(
'<properties><entry key="key"></entry></properties>',
{"key": ""},
),
(
'<properties><entry key="key"/></properties>',
{"key": ""},
),
(
"<properties>"
'<entry key="key">\n</entry>'
'<not-an-entry><entry key="foo">bar</entry></not-an-entry>'
"</properties>",
{"key": "\n"},
),
(
'<properties>\n <entry key="key">value</entry>\n</properties>\n',
{"key": "value"},
),
(
"<properties>"
'<entry key="key">value</entry>'
'<entry key="foo">bar</entry>'
"</properties>",
{"key": "value", "foo": "bar"},
),
(
"<properties>\n"
' <entry key="key">value1</entry>\n'
' <entry key="key">value2</entry>\n'
"</properties>\n",
{"key": "value2"},
),
(
"<properties>\n"
' <entry key="ampersand">&</entry>\n'
' <entry key="less than"><</entry>\n'
' <entry key="greater than">></entry>\n'
' <entry key=""">"</entry>\n'
' <entry key="snowman">☃</entry>\n'
"</properties>\n",
{
"ampersand": "&",
"less than": "<",
"greater than": ">",
'"': '"',
"snowman": "\u2603",
},
),
(
"<properties>\n"
' <entry key="escapes">\\n\\r\\t\\u2603\\f\\\\</entry>\n'
"</properties>\n",
{"escapes": "\\n\\r\\t\\u2603\\f\\\\"},
),
(
"<properties>\n"
" <comment>This is a comment.</comment>\n" |
' <entry key="key">value</entry>\n'
"</properties>\n",
{"key": "value"},
),
(
"<properties>\n"
' <entry key="key">value</entry>\n'
' <something | -else key="foo">bar</something-else>\n'
"</properties>\n",
{"key": "value"},
),
(
'<properties><entry key="goat">🐐</entry></properties>',
{"goat": "\U0001F410"},
),
],
)
def test_loads_xml(s, d):
assert loads_xml(s) == d
def test_loads_xml_bad_root():
with pytest.raises(ValueError) as excinfo:
loads_xml('<not-properties><entry key="key">value</entry></not-properties>')
assert "not rooted at <properties>" in str(excinfo.value)
def test_loads_xml_no_key():
with pytest.raises(ValueError) as excinfo:
loads_xml("<properties><entry>value</entry></properties>")
assert '<entry> is missing "key" attribute' in str(excinfo.value)
def test_loads_xml_multiple_ordereddict():
assert (
loads_xml(
"""
<properties>
<entry key="key">value</entry>
<entry key="foo">bar</entry>
</properties>
""",
object_pairs_hook=OrderedDict,
)
== OrderedDict([("key", "value"), ("foo", "bar")])
)
def test_loads_xml_multiple_ordereddict_rev():
assert (
loads_xml(
"""
<properties>
<entry key="foo">bar</entry>
<entry key="key">value</entry>
</properties>
""",
object_pairs_hook=OrderedDict,
)
== OrderedDict([("foo", "bar"), ("key", "value")])
)
|
saelo/sekretaer | sekretaer/core.py | Python | mit | 5,590 | 0.003221 | #!/usr/bin/env python
#coding: UTF-8
#
# (c) 2014 Samuel Groß <dev@samuel-gross.de>
#
import os
import sys
import importlib
from time import sleep
try:
import concurrent.futures
except ImportError:
pass
#
# Functions
#
def warn(msg):
sys.stderr.write('WARNING: ' + msg + '\n')
def err(msg):
sys.stderr.write('ERROR: ' + msg + '\n')
def log(msg):
print(msg)
#
# Decorators
#
def add_attr(f, attributes):
for k, v in attributes.items():
setattr(f, k, v)
def source(name):
def decorate(f):
def add_source_to_events_wrapper(*args, **kwargs):
res = f(*args, **kwargs)
events = set()
for e in res:
e.source = name
events.add(e)
return events
add_attr(add_source_to_events_wrapper, {'mod_type': 'source', 'name': name})
return add_source_to_events_wrapper
return decorate
def sink(name):
def decorate(f):
add_attr(f, {'mod_type': 'sink', 'name': name})
return f
return decorate
#
# Classes
#
class Event:
def __init__(self, start, end, name, source='', description=''):
self.start = start.replace(second=0, microsecond=0)
self.end = end.replace(second=0, microsecond=0)
self.name = name
self.source = source
self.description = description
def __str__(self):
return '[{}] {}: {} - {}'.format(
self.source,
self.name,
self.start.strftime('%d.%m.%Y %H:%M'),
self.end.strftime('%d.%m.%Y %H:%M'))
def __eq__(self, other):
return (self.start == other.start and
self.end == other.end and
self.name.lower() == other.name.lower() and
self.source == other.source)
def __hash__(self):
return (hash(self.start) ^
hash(self.end) ^
hash(self.name.lower) ^
hash(self.source))
class Sekretaer:
DELAY = 60*60
def __init__(self, config):
log("[sekretaer] Initializing...")
self.config = config
# TODO check for and load pickled event set
self.events = set()
if sys.version_info[:2] >= (3, 2):
self.fetch = self.fetch_par
else:
self.fetch = self.fetch_seq
self.sources = []
self.sinks = []
self.load_modules()
log("[sekretaer] Ready for work")
def work(self):
"""Main loop."""
while True:
events = self.fetch()
events -= self.events
if events:
self.process(events)
self.events |= events
self.cleanup()
sleep(self.DELAY)
def fetch_seq(self):
"""Fetch new events from all enabled sources sequentially (for older python versions)."""
events = set()
for f in self.sources:
try:
events |= f(self.config.get(f.name))
except Exception as e:
warn("Caught Exception: {}".format(e))
return events
def fetch_par(self):
"""Fetch new events from all enabled sources in parallel."""
events = set()
with concurrent.futures.ThreadPoolExecutor(max_workers=4) as executor:
futures = [executor.submit(f, self.config.get(f.name)) for f in self.sources]
for future in concurrent.futures.as_completed(futures):
try:
events |= future.result()
except Exception as e:
warn("Caught Exception: {}".format(e))
return events
def process(self, events):
"""Process new events."""
for f in self.sinks:
f(self.config.get(f.name), events)
def cleanup(self):
"""Remove old events from the set."""
pass
def load_modules(self):
"""Load enabled modules from the modules folder."""
log("[sekretaer] Loading modules...")
modules = []
working_dir = os.path.dirname(os.path.abspath(__file__))
modules_dir = os.path.join(working_dir, 'modules')
for filename in os.listdir(modules_dir):
if filename.endswith('.py') and not filena | me.startswith('_'):
modules.append(filename[:-3])
for module_name in modules:
try:
module = importlib.import_module('sekretaer.modules.' + module_name)
except Exception as e:
warn("[sekretaer] Failed to load module '{}': {}".format(module_name, e))
continue
functions = [getattr(module, a) for a in dir(module) if not a.st | artswith('_') and callable(getattr(module, a))]
for f in functions:
if hasattr(f, 'mod_type'):
if f.mod_type == 'source' and f.name in self.config['sources_enabled']:
self.sources.append(f)
elif f.mod_type == 'sink' and f.name in self.config['sinks_enabled']:
self.sinks.append(f)
loaded_modules = set(map(lambda x: x.name, self.sources + self.sinks))
for missing_module in set(self.config['sources_enabled'] + self.config['sinks_enabled']) - loaded_modules:
warn("[sekretaer] Module '{}' could not be loaded".format(missing_module))
log("[sekretaer] {} modules loaded".format(len(self.sources) + len(self.sinks)))
def shutdown(self):
"""Store the current state to disk and quit."""
log("[sekretaer] Shutting down")
pass
|
m-kal/DirtyBoots | src/datamgr.py | Python | gpl-3.0 | 10,361 | 0.024901 | import uuid
import sqlite3
from datetime import date, timedelta
import logging
import dblog
class datamgr( object ):
"""
Handles database creation/setup, queries, and importation
"""
_dbConn = { }
_dbCurs = { }
_db = None
# default name/location of the merged history database
DEF_DB_LOC = 'history.sqlite'
# create SQL for imported/original history records
SQL_CREATE_IMPORTED = '''
DROP TABLE IF EXISTS "imported_history";
CREATE TABLE imported_history (
browser TEXT,
guid TEXT,
seq INTEGER DEFAULT 0,
type TEXT,
url LONGVARCHAR,
from_url LONGVARCHAR,
root_url LONGVARCHAR,
visit_count INTEGER DEFAULT 0,
timestamp DATETIME
);
'''
# create SQL for massaged/active history records
SQL_CREATE_HISTORY = '''
DROP TABLE IF EXISTS "history";
CREATE TABLE history (
browser TEXT,
guid TEXT,
seq INTEGER DEFAULT 0,
type TEXT,
url LONGVARCHAR,
from_url LONGVARCHAR,
root_url LONGVARCHAR,
visit_count INTEGER DEFAULT 0,
timestamp DATETIME
);
'''
# clear working history table
SQL_CLEAR_URL_HISTORY = 'DELETE FROM history'
# clear imported history table
SQL_CLEAR_URL_IMPORT_HISTORY = 'DELETE FROM imported_history'
# import url data into history database for a specified table
SQL_INSERT_URL_HISTORY = 'INSERT INTO %s (browser,guid,seq,type,url,from_url,root_url,visit_count,timestamp) VALUES (?,?,?,?,?,?,?,?,?)'
# select url data from chrome history database to be used for import
SQL_SELECT_CHROME_HISTORY_FOR_IMPORT = '''
SELECT V.*,
datetime((visit_time/10000000),'unixepoch') AS timestamp,
U.visit_count,
U.url AS url_str,
datetime((U.last_visit_time/10000000),'unixepoch') AS last_visit_date
FROM visits AS V
JOIN urls U ON ( U.id = V.url )
'''
# select url data from firefox history database to be used for import
SQL_SELECT_FIREFOX_HISTORY_FOR_IMPORT = '''
SELECT MHI.*,
datetime((visit_date/1000000),'unixepoch') AS timestamp,
MP.url,
MP.rev_host,
MP.visit_count,
MP.frecency,
datetime((MP.last_visit_date/1000000),'unixepoch') AS last_visit_date,
MP.guid
FROM moz_historyvisits AS MHI
JOIN moz_places MP ON ( MP.id = MHI.place_id )
'''
# select visit counts per url, returns all url-instances
SQL_SELECT_URL_VISIT_COUNT = 'SELECT *, count(guid) AS visit_count FROM imported_history GROUP BY url, timestamp'
def __init__(self,db):
self._db | = db
def db(self):
return self._db
def closeConn( self ):
"""
Close the database connection
| """
self.curs( ).close( )
self.conn( ).close( )
def conn( self ):
return self._dbConn
def curs( self ):
return self._dbCurs
def log( self, msg, level = logging.INFO ):
"""
For consistent logging format within all handler functions
"""
dblog.log( 'SQL', msg, level = level )
# DB Setup Functions
def openDb( self, location = DEF_DB_LOC ):
"""
Open and connect to a database
"""
self._dbConn = sqlite3.connect( location, detect_types = sqlite3.PARSE_COLNAMES )
self.conn( ).row_factory = sqlite3.Row
self._dbCurs = self.conn( ).cursor( )
def clearDb( self, location = DEF_DB_LOC ):
"""
Clears all tables in a history database
"""
self.openDb( location )
self.log( 'Clearing database' )
self.executeSql( self.SQL_CLEAR_URL_HISTORY )
self.executeSql( self.SQL_CLEAR_URL_IMPORT_HISTORY )
self.conn( ).commit( )
def initDb( self, location = DEF_DB_LOC ):
"""
Create a database at a given location/filename
"""
self.openDb( location )
self.createDbTables( )
def createDbTables( self ):
"""
Execute creation SQL for DB
"""
self.executeSqlScript( self.SQL_CREATE_IMPORTED )
self.executeSqlScript( self.SQL_CREATE_HISTORY )
self.log( 'Executed creation commands' )
self.conn( ).commit( )
def lcdVisitType( self, browser, typeFromDb ):
"""
Match a visit type int for a browser to a generic/shared visit type
"""
firefox = [ '', 'link', 'typed', 'bookmark', 'embed', 'redirect_perm', 'redirect_temp', 'download',
'framed_link' ]
chrome = [ 'link', 'typed', 'bookmark', 'frame_auto', 'frame_man', 'generated', 'start_page', 'form', 'reload',
'keyword_url', 'keyword_gen' ]
if browser == 'firefox':
return firefox[ max( max( 0, typeFromDb ), min( -typeFromDb, 0 ) ) ]
else:
return chrome[ max( 0, typeFromDb ) ]
# Import / Insert Functions
def importDatabase( self, browser, filename ):
"""
Import a browser database into the generic history database
"""
self.log( '[%s] Loading Database' % browser.title( ) )
if browser == 'firefox':
self.importFirefoxDatabase( filename )
else:
self.importChromeDatabase( filename )
self.log( 'Rebuilding the working history table' )
self.rebuildUrlHistoryTable( )
self.conn( ).commit( )
def importChromeDatabase( self, filename ):
"""
Imports a Chrome database into the generic history database
"""
self.log( '[ChromeDB] Loading database from %s' % filename )
conn = sqlite3.connect( filename )
conn.row_factory = sqlite3.Row
curs = conn.cursor( )
self.executeSql( self.SQL_SELECT_CHROME_HISTORY_FOR_IMPORT, curs = curs )
urlsToImport = curs.fetchall( )
self.log( '[ChromeDB] Importing %d urls into database' % len( urlsToImport ) )
for urlRecord in urlsToImport:
transCode = 0xFF & urlRecord[ 'transition' ]
visitType = self.lcdVisitType( 'chrome', transCode )
formattedRow = {
'browser': 'chrome',
'guid': str( uuid.uuid1( ) ),
'seq': urlRecord[ 'id' ],
'type': visitType,
'url': urlRecord[ 'url_str' ],
'visit_count': urlRecord[ 'visit_count' ],
'from_url': '',
'root_url': '',
'timestamp': urlRecord[ 'timestamp' ]
}
self.insertFormattedHistoryRow( formattedRow )
def importFirefoxDatabase( self, filename ):
"""
Imports a Firefox database into the generic history database
"""
self.log( '[FirefoxDB] Loading database from %s' % filename )
conn = sqlite3.connect( filename )
conn.row_factory = sqlite3.Row
curs = conn.cursor( )
self.executeSql( self.SQL_SELECT_FIREFOX_HISTORY_FOR_IMPORT, curs = curs )
urlsToImport = curs.fetchall( )
self.log( '[FirefoxDB] Importing %d urls into database' % len( urlsToImport ) )
for urlRecord in urlsToImport:
visitType = self.lcdVisitType( 'firefox', urlRecord[ 'visit_type' ] )
formattedRow = {
'browser': 'firefox',
'guid': str( uuid.uuid1( ) ),
'seq': urlRecord[ 'id' ],
'type': visitType,
'url': urlRecord[ 'url' ],
'visit_count': urlRecord[ 'visit_count' ],
'from_url': '',
'root_url': '',
'timestamp': urlRecord[ 'timestamp' ]
}
self.insertFormattedHistoryRow( formattedRow )
def insertFormattedHistoryRowHelper( self, table, formattedRow ):
"""
Inserts a row for the history database into one of the *history tables
"""
self.executeSql( self.SQL_INSERT_URL_HISTORY % table, [
|
auag92/n2dm | Asap-3.8.4/OpenKIMexport/subprocess_py27.py | Python | mit | 1,883 | 0.001593 | # This file is part of the Python 2.7 module subprocess.py, included here
# for compatibility with Python 2.6.
#
# It is still under the original, very open PSF license, see the original
# copyright message included below.
#
# subprocess - Subprocesses with accessible I/O streams
#
# For more information about this module, see PEP 324.
#
# This module should remain compatible with Python 2.2, see PEP 291.
#
# Copyright (c) 2003-2005 by Peter Astrand <astrand@lysator.liu.se>
#
# Licensed to PSF under a Contributor Agreement.
# See http://www.python.org/2.4/license for licensing details.
from subprocess | import Popen, CalledProcessError, PIPE
def check_output(*popenargs, | **kwargs):
r"""Run command with arguments and return its output as a byte string.
If the exit code was non-zero it raises a CalledProcessError. The
CalledProcessError object will have the return code in the returncode
attribute and output in the output attribute.
The arguments are the same as for the Popen constructor. Example:
>>> check_output(["ls", "-l", "/dev/null"])
'crw-rw-rw- 1 root root 1, 3 Oct 18 2007 /dev/null\n'
The stdout argument is not allowed as it is used internally.
To capture standard error in the result, use stderr=STDOUT.
>>> check_output(["/bin/sh", "-c",
... "ls -l non_existent_file ; exit 0"],
... stderr=STDOUT)
'ls: non_existent_file: No such file or directory\n'
"""
if 'stdout' in kwargs:
raise ValueError('stdout argument not allowed, it will be overridden.')
process = Popen(stdout=PIPE, *popenargs, **kwargs)
output, unused_err = process.communicate()
retcode = process.poll()
if retcode:
cmd = kwargs.get("args")
if cmd is None:
cmd = popenargs[0]
raise CalledProcessError(retcode, cmd)
return output
|
Actifio/actifio-python-package | samples/mdl_analyser/app.py | Python | mit | 2,375 | 0.028211 | #!/usr/bin/env python3
from flask import Flask
from Actifio import Actifio
from chartjs import ChartJS, ChartDatalet, ChartDataset
#from mdl_analyser import MDL
import getpass
import base64
import os
import json
import argparse
from datetime import datetime
cli_args = argparse.ArgumentParser(description="CLI Arguments")
cli_args.add_argument('-a', '--appliance', type=str, nargs=1, help="Appliance IP", required=False)
cli_args.add_argument('-u', '--user', type=str, nargs=1, help="Actifio Username", required=False)
cli_args.add_argument('-p', '--password', type=str, nargs=1, help="Actifio password", required=False)
arguments = cli_args.parse_args()
print (arguments)
while 1:
try:
if arguments.appliance == None:
APPLIANCE = input('Appliance: ')
else:
APPLIANCE = arguments.appliance[0]
except:
pass
try:
if arguments.user == None:
USERNAME = input("Username: ")
else:
USERNAME = arguments.user[0]
except:
pass
try:
if arguments.password == None:
PASSWORD = getpass.getpass(prompt="Password: ")
else:
PASSWORD = arguments.password[0]
except:
pass
act = Actifio(APPLIANCE, USERNAME, PASSWORD, verbose=True)
try:
mdldata = act.run_uds_command("info","lsmdlstat",{ 'filtervalue': {"stattime": "> 2020-03-15"}})
except Exception as e:
print ("Something didn't go as planned: " + str(e))
else:
break
mdl_analyser = Flask("mdl_analyser")
mdl_analyser.port = 5100
@mdl_analyser.route("/")
def home ():
try:
with open("chart.html") as f:
return f.read()
except:
pass
@mdl_analyser.route("/start")
def analyser_start ():
return "This is from JQ/AJAX"
@mdl_analyser.route("/applist")
def list_app | lications ():
appnamelist = {}
for m in mdldata['result']:
if not m['appname'] in | appnamelist.keys():
appnamelist[m['appname']] = m['appid']
return json.dumps(appnamelist)
@mdl_analyser.route("/chartdata")
def gen_chart_for_all ():
chart = ChartJS("MDL Analyser", stacked=True)
chart.set_legend()
for m in mdldata['result']:
if not m['appid'] == '0':
stat_day = datetime.strptime(m['stattime'][0:10], "%Y-%m-%d")
chart.add_data(m['appname'], stat_day, int(int(m['manageddata'])/1024/1024/1024))
return chart.render_json_config()
if __name__ == "__main__":
mdl_analyser.run(port=5100)
|
stackforge/monasca-notification | monasca_notification/retry_engine.py | Python | apache-2.0 | 3,769 | 0.001061 | # (C) Copyright 2015-2016 Hewlett Packard Enterprise Development LP
# Copyright 2017 Fujitsu LIMITED
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import time
from monasca_common.kafka import consumer
from monasca_common.kafka import producer
from oslo_config import cfg
from oslo_log import log as logging
from six import PY3
from monasca_notification.common.utils import construct_notification_object
from monasca_notification.common.utils import get_db_repo
from monasca_notification.common.utils import get_statsd_client
from monasca_notification.processors import notification_processor
log = logging.getLogger(__name__)
CONF = cfg.CONF
class RetryEngine(object):
def __init__(self):
self._statsd = get_statsd_client()
self._consumer = consumer.KafkaConsumer(
CONF.kafka.url,
','.join(CONF.zookeeper.url),
CONF.zookeeper.notification_retry_path,
CONF.kafka.group,
CONF.kafka.notification_retry_topic
)
self._producer = producer.KafkaProducer(CONF.kafka.url)
self._notifier = notification_processor.NotificationProcessor()
self._db_repo = get_db_repo()
def run(self):
for raw_notification in self._consumer:
message = raw_notification[1].message.value
message = message.decode('UTF-8') if PY3 else message
notification_data = json.loads(message)
notification = construct_notification_object(self._db_repo, notification_data)
if notification is None:
self._consumer.commit()
continue
wait_duration = CONF.retry_engine.interval - (
time.time() - notificati | on_data['notification_timestamp'])
if wait_duration > 0:
time.sleep(wait_duration)
sent, failed = self._notifier.send([notificat | ion])
if sent:
self._producer.publish(CONF.kafka.notification_topic,
[notification.to_json()])
if failed:
notification.retry_count += 1
notification.notification_timestamp = time.time()
if notification.retry_count < CONF.retry_engine.max_attempts:
log.error(u"retry failed for {} with name {} "
u"at {}. "
u"Saving for later retry.".format(notification.type,
notification.name,
notification.address))
self._producer.publish(CONF.kafka.notification_retry_topic,
[notification.to_json()])
else:
log.error(u"retry failed for {} with name {} "
u"at {} after {} retries. "
u"Giving up on retry."
.format(notification.type,
notification.name,
notification.address,
CONF.retry_engine.max_attempts))
self._consumer.commit()
|
kperun/nestml | PyNestML.py | Python | gpl-2.0 | 881 | 0 | #
# PyNestML.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
| # NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
import sys
from pynestml.frontend.pynestml_frontend import main
"""
This file represents the entry point to the | PyNestML.
"""
if __name__ == '__main__':
main(sys.argv[1:])
|
MrWhoami/WhoamiBangumi | Iqiyi.py | Python | mit | 1,032 | 0.001953 | # -*- coding:utf_8 -*-
import urllib2
from bs4 import BeautifulSoup
from Bangumi import Bangumi
class Iqiyi(Bangumi):
link = "http://www.iqiyi.com/dongman/bangum | i.html"
name = u"爱奇艺"
def getBangumi(self):
"""Youku processing function"""
# Get Iqiyi bangumi HTML
req = urllib2.Request(self.link)
res = urllib2.urlopen(req)
html = res.read()
# Give the HTML to BeautifulSoup
# TODO: Change the parser to lxml for better performan | ce
soup = BeautifulSoup(html, "html.parser")
bweek = soup.find(id='widget-qycpweekly')
# Get the list of the week
for child in bweek.children:
if child.name == None:
continue
wd = int(child['data-day']) + 1
binfos = child.find_all('h4')
for binfo in binfos:
bname, bsep, bupdate = binfo.string.partition(u':')
blink = binfo.parent.parent['href']
self.add(wd, bname, bupdate, blink)
|
plotly/plotly.py | packages/python/plotly/plotly/validators/histogram2d/_uid.py | Python | mit | 388 | 0 | import _plotly_utils.basevalidators
class UidValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(self, plotly_name="uid", parent_name="histogram2d", **kw | args):
super(UidValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
| edit_type=kwargs.pop("edit_type", "plot"),
**kwargs
)
|
Branlala/docker-sickbeardfr | sickbeard/lib/hachoir_parser/game/laf.py | Python | mit | 2,900 | 0.008966 | # -*- coding: utf-8 -*-
"""
LucasArts Font parser.
Author: Cyril Zorin
Creation date: 1 January 2007
"""
from lib.hachoir_parser import Parser
from | lib.hachoir_core.field import (FieldSet,
UInt8, UInt16, UInt32, Gen | ericVector)
from lib.hachoir_core.endian import LITTLE_ENDIAN
class CharData(FieldSet):
def __init__(self, chars, *args):
FieldSet.__init__(self, *args)
self.chars = chars
def createFields(self):
for char in self.chars:
yield CharBitmap(char, self, "char_bitmap[]")
class CharBitmap(FieldSet):
def __init__(self, char, *args):
FieldSet.__init__(self, *args)
self.char = char
def createFields(self):
width = self.char["width_pixels"].value
for line in xrange(self.char["height_pixels"].value):
yield GenericVector(self, "line[]", width,
UInt8, "pixel")
class CharInfo(FieldSet):
static_size = 16 * 8
def createFields(self):
yield UInt32(self, "data_offset")
yield UInt8(self, "logical_width")
yield UInt8(self, "unknown[]")
yield UInt8(self, "unknown[]")
yield UInt8(self, "unknown[]")
yield UInt32(self, "width_pixels")
yield UInt32(self, "height_pixels")
class LafFile(Parser):
PARSER_TAGS = {
"id": "lucasarts_font",
"category": "game",
"file_ext" : ("laf",),
"min_size" : 32*8,
"description" : "LucasArts Font"
}
endian = LITTLE_ENDIAN
def validate(self):
if self["num_chars"].value != 256:
return "Invalid number of characters (%u)" % self["num_chars"].value
if self["first_char_code"].value != 0:
return "Invalid of code of first character code (%u)" % self["first_char_code"].value
if self["last_char_code"].value != 255:
return "Invalid of code of last character code (%u)" % self["last_char_code"].value
if self["char_codes/char[0]"].value != 0:
return "Invalid character code #0 (%u)" % self["char_codes/char[0]"].value
if self["chars/char[0]/data_offset"].value != 0:
return "Invalid character #0 offset"
return True
def createFields(self):
yield UInt32(self, "num_chars")
yield UInt32(self, "raw_font_data_size")
yield UInt32(self, "max_char_width")
yield UInt32(self, "min_char_width")
yield UInt32(self, "unknown[]", 4)
yield UInt32(self, "unknown[]", 4)
yield UInt32(self, "first_char_code")
yield UInt32(self, "last_char_code")
yield GenericVector(self, "char_codes", self["num_chars"].value,
UInt16, "char")
yield GenericVector(self, "chars", self["num_chars"].value,
CharInfo, "char")
# character data. we make an effort to provide
# something more meaningful than "RawBytes:
# character bitmap data"
yield CharData(self["chars"], self, "char_data")
# read to the end
if self.current_size < self._size:
yield self.seekBit(self._size, "unknown[]")
|
chadmv/cmt | scripts/cmt/rig/spaceswitch.py | Python | mit | 4,654 | 0.003223 | """Space switching without constraints or extra DAG nodes.
Contains functions to create a space switching network as well as seamlessly switching
between spaces.
Example Usage
=============
::
import cmt.rig.spaceswitch as spaceswitch
# Create the space switch
spaceswitch.create_space_switch(
pole_vector_control,
[(ik_control, "foot"), (root_control, "root"), (world_control, "world")],
switch_attribute="space",
use_rotate=False,
)
# Seamless switch
spaceswitch.switch_space(pole_vector_control, "space", 1, create_keys=False)
"""
import maya.cmds as cmds
import maya.api.OpenMaya as OpenMaya
from cmt.dge import dge
import cmt.rig.common as common
import cmt.shortcuts as shortcuts
def create_space_switch( |
node, drivers, switch_attribute=None, use_translate=True, use_rotate=True
):
"""Creates a space switch network.
The network uses the offsetParentMatrix attribute and does not create any
constraints or new | dag nodes.
:param node: Transform to drive
:param drivers: List of tuples: [(driver1, "spaceName1"), (driver2, "spaceName2")]
:param switch_attribute: Name of the switch attribute to create on the target node.
"""
if switch_attribute is None:
switch_attribute = "space"
if cmds.objExists("{}.{}".format(node, switch_attribute)):
cmds.deleteAttr(node, at=switch_attribute)
names = [d[1] for d in drivers]
cmds.addAttr(node, ln=switch_attribute, at="enum", en=":".join(names), keyable=True)
# Create attribute to toggle translation in the matrices
enable_translate_attr = _create_bool_attribute(
node, "{}UseTranslate".format(switch_attribute), use_translate
)
# Create attribute to toggle rotation in the matrices
enable_rotate_attr = _create_bool_attribute(
node, "{}UseRotate".format(switch_attribute), use_rotate
)
blend = cmds.createNode("blendMatrix", name="{}_spaceswitch".format(node))
# Get the current offset parent matrix. This is used as the starting blend point
m = OpenMaya.MMatrix(cmds.getAttr("{}.offsetParentMatrix".format(node)))
cmds.setAttr("{}.inputMatrix".format(blend), list(m), type="matrix")
parent = cmds.listRelatives(node, parent=True, path=True)
to_parent_local = "{}.worldInverseMatrix[0]".format(parent[0]) if parent else None
for i, driver in enumerate(drivers):
driver = driver[0]
_connect_driver_matrix_network(blend, node, driver, i, to_parent_local)
target_attr = "{}.target[{}]".format(blend, i)
# Hook up the weight toggle when switching spaces
dge(
"x = switch == {} ? 1 : 0".format(i),
x="{}.weight".format(target_attr),
switch="{}.{}".format(node, switch_attribute),
)
# Connect the translation, rotation toggles
cmds.connectAttr(enable_translate_attr, "{}.useTranslate".format(target_attr))
cmds.connectAttr(enable_rotate_attr, "{}.useRotate".format(target_attr, i))
cmds.connectAttr(
"{}.outputMatrix".format(blend), "{}.offsetParentMatrix".format(node)
)
def _create_bool_attribute(node, attribute, default_value):
cmds.addAttr(
node, ln=attribute, at="bool", defaultValue=default_value, keyable=True
)
return "{}.{}".format(node, attribute)
def _connect_driver_matrix_network(blend, node, driver, index, to_parent_local):
# The multMatrix node will calculate the transformation to blend to when driven
# by this driver transform
mult = cmds.createNode(
"multMatrix", name="spaceswitch_{}_to_{}".format(node, driver)
)
offset = (
shortcuts.get_dag_path2(node).exclusiveMatrix()
* OpenMaya.MMatrix(cmds.getAttr("{}.worldInverseMatrix[0]".format(driver)))
)
cmds.setAttr("{}.matrixIn[0]".format(mult), list(offset), type="matrix")
cmds.connectAttr("{}.worldMatrix[0]".format(driver), "{}.matrixIn[1]".format(mult))
if to_parent_local:
cmds.connectAttr(to_parent_local, "{}.matrixIn[2]".format(mult))
cmds.connectAttr(
"{}.matrixSum".format(mult), "{}.target[{}].targetMatrix".format(blend, index)
)
def switch_space(node, attribute, space, create_keys=False):
"""Seamlessly switch between spaces
:param node: Node to switch
:param attribute: Space switching attribute on node
:param space: Space index in the space attribute
:param create_keys: True to create switching keys
"""
m = cmds.xform(node, q=True, ws=True, m=True)
cmds.setAttr("{}.{}".format(node, attribute), space)
cmds.xform(node, ws=True, m=m)
|
marcusmoller/pyorpg-server | src/datahandler.py | Python | mit | 40,001 | 0.00335 | import random
from database import *
from packettypes import *
from gamelogic import *
from objects import *
from constants import *
from utils import *
import globalvars as g
#debug
import time
class DataHandler():
def handleData(self, index, data):
jsonData = decodeJSON(data)
packetType = jsonData[0]["packet"]
if packetType == ClientPackets.CGetClasses:
self.handleGetClasses(index)
elif packetType == ClientPackets.CNewAccount:
self.handleNewAccount(index, jsonData)
elif packetType == ClientPackets.CLogin:
self.handleLogin(index, jsonData)
elif packetType == ClientPackets.CAddChar:
self.handleAddChar(index, jsonData)
elif packetType == ClientPackets.CUseChar:
self.handleUseChar(index, jsonData)
elif packetType == ClientPackets.CSayMsg:
self.handleSayMsg(index, jsonData)
elif packetType == ClientPackets.CEmoteMsg:
self.handleEmoteMsg(index, jsonData)
elif packetType == ClientPackets.CBroadcastMsg:
self.handleBroadcastMsg(index, jsonData)
elif packetType == ClientPackets.CGlobalMsg:
self.handleGlobalMsg(index, jsonData)
elif packetType == ClientPackets.CAdminMsg:
self.handleAdminMsg(index, jsonData)
elif packetType == ClientPackets.CPlayerMsg:
self.handlePlayerMsg(index, jsonData)
elif packetType == ClientPackets.CPlayerMove:
self.handlePlayerMove(index, jsonData)
elif packetType == ClientPackets.CPlayerDir:
self.handlePlayerDir(index, jsonData)
elif packetType == ClientPackets.CUseItem:
self.handleUseItem(index, jsonData)
elif packetType == ClientPackets.CCast:
self.handleCastSpell(index, jsonData)
elif packetType == ClientPackets.CTarget:
self.handleTarget(index, jsonData)
elif packetType == ClientPackets.CAttack:
self.handleAttack(index)
elif packetType == ClientPackets.CSpells:
self.handleSpells(index)
elif packetType == ClientPackets.CPlayerInfoRequest:
self.handlePlayerInfoRequest(index, jsonData)
elif packetType == ClientPackets.CWarpMeTo:
self.handleWarpMeTo(index, jsonData)
elif packetType == ClientPackets.CWarpToMe:
self.handleWarpToMe(index, jsonData)
elif packetType == ClientPackets.CWarpTo:
self.handleWarpTo(index, jsonData)
elif packetType == ClientPackets.CSetSprite:
self.handleSetSprite(index, jsonData)
elif packetType == ClientPackets.CRequestNewMap:
self.handleRequestNewMap(index, jsonData)
elif packetType == ClientPackets.CMapData:
self.handleMapData(index, jsonData)
elif packetType == ClientPackets.CNeedMap:
self.handleNeedMap(index, jsonData)
elif packetType == ClientPackets.CMapGetItem:
self.handleMapGetItem(index)
elif packetType == ClientPackets.CMapReport:
self.handleMapReport(index)
elif packetType == ClientPackets.CMapRespawn:
self.handleMapRespawn(index)
elif packetType == ClientPackets.CWhosOnline:
self.handleWhosOnline(index)
elif packetType == ClientPackets.CRequestEditMap:
self.handleRequestEditMap(index)
elif packetType == ClientPackets.CRequestEditItem:
self.handleRequestEditItem(index)
elif packetType == ClientPackets.CSaveItem:
self.handleSaveItem(index, jsonData)
elif packetType == ClientPackets.CRequestEditSpell:
self.handleRequestEditSpell(index)
elif packetType == ClientPackets.CEditSpell:
self.handleEditSpell(index, jsonData)
elif packetType == ClientPackets.CSaveSpell:
self.handleSaveSpell(index, jsonData)
elif packetType == ClientPackets.CRequestEditNpc:
self.handleRequestEditNpc(index)
elif packetType == ClientPackets.CEditNpc:
self.handleEditNpc(index, jsonData)
elif packetType == ClientPackets.CSaveNpc:
self.handleSaveNpc(index, jsonData)
elif packetType == ClientPackets.CSetAccess:
self.handleSetAccess(index, jsonData)
elif packetType == ClientPackets.CGiveItem:
self.handleGiveItem(index, jsonData)
elif packetType == ClientPackets.CQuit:
self.handleQuit(index)
else:
# Packet is unknown - hacking attempt
hackingAttempt(index, 'Packet Modification')
def handleGetClasses(self, index):
if not isPlaying(index):
sendNewCharClasses(index)
def handleNewAccount(self, index, jsonData):
name = jsonData[0]['name']
password = jsonData[0]['password']
if not isPlaying(index):
if not isLoggedIn(index):
# prevent hacking
if len(name) < 3 or len(password) < 3:
print "hacking attempt"
alertMsg(index, "Your name and password must be at least three characters in length.")
return
# check if account already exists
if not accountExists(name):
addAccount(index, name, password)
g.serverLogger.info('Account ' + name + ' | has been created')
alertMsg(index, "Your account has been created!")
else:
g.serverLogger.info('Account name has already been taken!')
| alertMsg(index, "Sorry, that account name is already taken!")
''' Player login '''
def handleLogin(self, index, jsonData):
if not isPlaying(index):
if not isLoggedIn(index):
plrName = jsonData[0]["name"]
plrPassword = jsonData[0]["password"]
# todo: check version
# todo: is shutting down?
if len(plrName) < 3 or len(plrPassword) < 3:
alertMsg(index, "The acount name or password is too short!")
return
#Not necessary
'''if not accountExists(plrName):
# alert msg
return'''
if not passwordOK(plrName, plrPassword):
alertMsg(index, "Wrong account name or password!")
return
if isMultiAccounts(plrName):
alertMsg(index, "That account is already logged in!")
g.conn.closeConnection(index)
return
# load the player
loadPlayer(index, plrName)
sendChars(index)
g.connectionLogger.info(getPlayerLogin(index) + ' has logged in')
''' player creates a new character '''
def handleAddChar(self, index, jsonData):
if not isPlaying(index):
name = jsonData[0]["name"]
sex = jsonData[0]["sex"]
Class = jsonData[0]["class"]
charNum = jsonData[0]["slot"]
# prevent hacking
if len(name) < 3:
alertMsg(index, 'Character name must be at least three characters in length.')
return
#todo: check for certain letters
if charNum < 0 or charNum > MAX_CHARS:
alertMsg(index, 'Invalid CharNum')
return
#todo: check sex
if Class < 0 or Class > g.maxClasses:
alertMsg(index, 'Invalid Class')
return
# check if a character already exists in slot
if charExist(index, charNum):
alertMsg(index, 'Character already exists')
return
# check if name is in use
if findChar(name):
alertMsg(index, 'Sorry, but that name is in use!')
return
# everything went ok, add the character
addChar(index, name, |
WikipediaLibrary/TWLight | TWLight/users/oauth.py | Python | mit | 22,204 | 0.001711 | import logging
from mwoauth import ConsumerToken, Handshaker, AccessToken
from mwoauth.errors import OAuthException
import urllib.parse
from django.conf import settings
from django.contrib import messages
from django.contrib.auth import login, authenticate
from django.contrib.auth.models import User
from django.core.exceptions import DisallowedHost, PermissionDenied
from django.urls import reverse_lazy
from django.http import HttpResponseRedirect
from django.http.request import QueryDict
from django.views.generic.base import View
from django.utils.translation import get_language, gettext as _
from urllib.parse import urlencode
from .models import Editor
logger = logging.getLogger(__name__)
def _localize_oauth_redirect(redirect):
"""
Given an appropriate mediawiki oauth handshake url, return one that will
present the user with a login page of their preferred language.
"""
logger.info("Localizing oauth handshake URL.")
redirect_parsed = urllib.parse.urlparse(redirect)
redirect_query = urllib.parse.parse_qs(redirect_parsed.query)
localized_redirect = redirect_parsed.scheme
localized_redirect += "://"
localized_redirect += redirect_parsed.netloc
localized_redirect += redirect_parsed.path
localized_redirect += "?title="
localized_redirect += "Special:UserLogin"
localized_redirect += "&uselang="
localized_redirect += get_language()
localized_redirect += "&returnto="
localized_redirect += str(redirect_query["title"][0])
localized_redirect += "&returntoquery="
localized_redirect += "%26oauth_consumer_key%3D"
localized_redirect += str(redirect_query["oauth_consumer_key"][0])
localized_redirect += "%26oauth_token%3D"
localized_redirect += str(redirect_query["oauth_token"][0])
return localized_redirect
def _get_handshaker():
consumer_token = ConsumerToken(
settings.TWLIGHT_OAUTH_CONSUMER_KEY, settings.TWLIGHT_OAUTH_CONSUMER_SECRET
)
handshaker = Handshaker(settings.TWLIGHT_OAUTH_PROVIDER_URL, consumer_token)
return handshaker
def _dehydrate_token(token):
"""
Convert the request token into a dict suitable for storing in the session.
"""
session_token = {}
session_token["key"] = token.key
session_token["secret"] = token.secret
return session_token
def _rehydrate_token(token):
"""
Convert the stored dict back into a request token that we can use for
getting an access grant.
"""
request_token = ConsumerToken(token["key"], token["secret"])
return request_token
class OAuthBackend(object):
def _get_username(self, identity):
# The Username is globally unique, but Wikipedia allows it to
# have characters that the Django username system rejects. However,
# wiki userID should be unique, and limited to ASCII.
return "{sub}".format(sub=identity["sub"])
def _create_user(self, identity):
# This can't be super informative because we don't want to log
# identities.
logger.info("Creating user.")
# if not self._meets_minimum_requirement(identity):
# This needs to be reworked to actually check against global_userinfo.
# Don't create a User or Editor if this person does not meet the
# minimum account quality requirement. It would be nice to provide
# some user feedback here, but we can't; exception messages don't
# get passed on as template context in Django 1.8. (They do in
# 1.10, so this can be revisited in future.)
# logger.warning('User did not meet minimum requirements; not created.')
# messages.add_message (request, messages.WARNING,
# _('You do not meet the minimum requirements.'))
# raise PermissionDenied
# -------------------------- Create the user ---------------------------
try | :
email = identity["email"]
except KeyError:
email = None
username = self._get_username(identity)
# Since we are not providing a password argument, this will call
# set_unusable_password, which is exactly what we want; users created
# via OAuth should only be allowed to log in via OAuth.
| user = User.objects.create_user(username=username, email=email)
logger.info("User user successfully created.")
return user
def _create_editor(self, user, identity):
# ------------------------- Create the editor --------------------------
logger.info("Creating editor.")
editor = Editor()
editor.user = user
editor.wp_sub = identity["sub"]
lang = get_language()
editor.update_from_wikipedia(identity, lang) # This call also saves the editor
logger.info("Editor successfully created.")
return editor
def _create_user_and_editor(self, identity):
user = self._create_user(identity)
editor = self._create_editor(user, identity)
return user, editor
def _get_and_update_user_from_identity(self, identity):
"""
If we have an Editor and User matching the identity returned by
Wikipedia, update the editor with the identity parameters and return its
associated user. If we don't, create an Editor and User, and return that
user.
If the wikipedia account does not meet our eligibility criteria, create
a TWLight account if needed, but set it as inactive. Also deactivate
any existing accounts that have become ineligible.
Also return a boolean that is True if we created a user during this
call and False if we did not.
"""
logger.info("Attempting to update editor after OAuth login.")
try:
username = self._get_username(identity)
user = User.objects.get(username=username)
# This login path should only be used for accounts created via
# Wikipedia login, which all have editor objects.
if hasattr(user, "editor"):
editor = user.editor
lang = get_language()
editor.update_from_wikipedia(
identity, lang
) # This call also saves the editor
logger.info("Editor updated.")
created = False
else:
try:
logger.warning(
"A user tried using the Wikipedia OAuth "
"login path but does not have an attached editor."
)
editor = self._create_editor(user, identity)
created = True
except:
raise PermissionDenied
except User.DoesNotExist:
logger.info("Can't find user; creating one.")
user, editor = self._create_user_and_editor(identity)
created = True
return user, created
def authenticate(self, request=None, access_token=None, handshaker=None):
logger.info("Authenticating user...")
if not request or not access_token or not handshaker:
logger.info(
"Missing OAuth authentication elements; falling back"
"to another authentication method."
)
# You must have meant to use a different authentication backend.
# Returning None will make Django keep going down its list of
# options.
return None
try:
assert isinstance(access_token, AccessToken)
except AssertionError as e:
logger.exception(e)
return None
# Get identifying information about the user. This doubles as a way
# to authenticate the access token, which only Wikimedia can do,
# and thereby to authenticate the user (which is hard for us to do as
# we have no password.)
logger.info("Identifying user...")
try:
identity = handshaker.identify(access_token, 15)
except OAuthException as e:
logger.warning(e)
messages.add_message(
request,
|
pythonprobr/notmagic | pt-br/baralho.py | Python | mit | 4,006 | 0.003744 | #!/usr/bin/env python3
"""
>>> baralho = Baralho()
>>> len(baralho)
52
>>> baralho[0]
Carta(valor='2', naipe='paus')
>>> baralho[-1]
Carta(valor='A', naipe='espadas')
>>> from random import choice
>>> choice(baralho) #doctest:+SKIP
Carta(valor='4', naipe='paus')
>>> choice(baralho) #doctest:+SKIP
Carta(valor='A', naipe='espadas')
>>> choice(baralho) #doctest:+SKIP
Carta(valor='8', naipe='espadas')
>>> baralho[:5] #doctest:+NORMALIZE_WHITESPACE
[Carta(valor='2', naipe='paus'), Carta(valor='3', naipe='paus'),
Carta(valor='4', naipe='paus'), Carta(valor='5', naipe='paus'),
Carta(valor='6', naipe='paus')]
>>> baralho[-3:] #doctest:+NORMALIZE_WHITESPACE
[Carta(valor='Q', naipe='espadas'),
Carta(valor='K', naipe='espadas'),
Carta(valor='A', naipe='espadas')]
>>> for carta in baralho: #doctest:+ELLIPSIS
... print(carta)
...
Carta(valor='2', naipe='paus')
Carta(valor='3', naipe='paus')
Carta(valor='4', naipe='paus')
...
Carta(valor='Q', naipe='espadas')
Carta(valor='K', naipe='espadas')
Carta(valor='A', naipe='espadas')
To generate a reversed listing:
::
>>> for carta in reversed(baralho): #doctest:+ELLIPSIS
... print(carta)
...
Carta(valor='A', naipe='espadas')
Carta(valor='K', naipe='espadas')
Carta(valor='Q', naipe='espadas')
...
Carta(valor='4', naipe='paus')
Carta(valor='3', naipe='paus')
Carta(valor='2', naipe='paus')
For a numbered listing, we use `enumerate`:
::
>>> for n, carta in enumerate(baralho, 1): #doctest:+ELLIPSIS
... print(format(n, '2'), carta)
...
1 Carta(valor='2', naipe='paus')
2 Carta(valor='3', naipe='paus')
3 Carta(valor='4', naipe='paus')
...
50 Carta(valor='Q', naipe='espadas')
51 Carta(valor='K', naipe='espadas')
52 Carta(valor='A', naipe='espadas')
Get all th | e Jacks in a baralho.
::
>>> [carta for carta in baralho i | f carta.valor=='J']
[Carta(valor='J', naipe='paus'), Carta(valor='J', naipe='ouros'), Carta(valor='J', naipe='copas'), Carta(valor='J', naipe='espadas')]
Ranking by alternate color naipes: ouros (lowest), followed by paus, copas, and espadas (highest).
>>> hand = [Carta(valor='2', naipe='ouros'), Carta(valor='2', naipe='paus'),
... Carta(valor='3', naipe='ouros'), Carta(valor='3', naipe='paus'),
... Carta(valor='A', naipe='espadas')]
>>> [cores_alternadas(carta) for carta in hand]
[0, 1, 4, 5, 51]
>>> hand = [Carta(valor='A', naipe='espadas'),
... Carta(valor='K', naipe='ouros'),
... Carta(valor='A', naipe='ouros')]
>>> for carta in sorted(hand,key=cores_alternadas):
... print(carta)
Carta(valor='K', naipe='ouros')
Carta(valor='A', naipe='ouros')
Carta(valor='A', naipe='espadas')
>>> for carta in sorted(baralho, key=cores_alternadas): #doctest:+ELLIPSIS
... print(carta)
Carta(valor='2', naipe='ouros')
Carta(valor='2', naipe='paus')
Carta(valor='2', naipe='copas')
Carta(valor='2', naipe='espadas')
Carta(valor='3', naipe='ouros')
...
Carta(valor='A', naipe='copas')
Carta(valor='A', naipe='espadas')
"""
import collections
Carta = collections.namedtuple('Carta', ['valor', 'naipe'])
class Baralho:
valores = [str(n) for n in range(2,11)] + list('JQKA')
naipes = 'paus ouros copas espadas'.split()
def __init__(self):
self.cartas = [Carta(v, n) for n in self.naipes for v in self.valores]
def __len__(self):
return len(self.cartas)
def __getitem__(self, position):
return self.cartas[position]
def cores_alternadas(carta):
valor_value = Baralho.valores.index(carta.valor)
naipes = 'ouros paus copas espadas'.split()
return valor_value * len(naipes) + naipes.index(carta.naipe)
|
openprocurement/openprocurement.tender.belowthreshold | openprocurement/tender/belowthreshold/tests/chronograph.py | Python | apache-2.0 | 5,547 | 0.001803 | # -*- coding: utf-8 -*-
import unittest
from openprocurement.api.tests.base import snitch
from openprocurement.tender.belowthreshold.tests.base import (
TenderContentWebTest,
test_lots,
test_bids,
test_organization
)
from openprocurement.tender.belowthreshold.tests.chronograph_blanks import (
# TenderSwitchTenderingResourceTest
switch_to_tendering_by_tenderPeriod_startDate,
# TenderSwitchQualificationResourceTest
switch_to_qualification,
# TenderSwitchAuctionResourceTest
switch_to_auction,
# TenderSwitchUnsuccessfulResourceTest
| switch_to_unsuccessful,
# TenderAucti | onPeriodResourceTest
set_auction_period,
reset_auction_period,
# TenderComplaintSwitchResourceTest
switch_to_ignored_on_complete,
switch_from_pending_to_ignored,
switch_from_pending,
switch_to_complaint,
# TenderAwardComplaintSwitchResourceTest
award_switch_to_ignored_on_complete,
award_switch_from_pending_to_ignored,
award_switch_from_pending,
award_switch_to_complaint,
)
class TenderSwitchTenderingResourceTest(TenderContentWebTest):
test_switch_to_tendering_by_tenderPeriod_startDate = snitch(switch_to_tendering_by_tenderPeriod_startDate)
class TenderSwitchQualificationResourceTest(TenderContentWebTest):
initial_status = 'active.tendering'
initial_bids = test_bids[:1]
test_switch_to_qualification = snitch(switch_to_qualification)
class TenderSwitchAuctionResourceTest(TenderContentWebTest):
initial_status = 'active.tendering'
initial_bids = test_bids
test_switch_to_auction = snitch(switch_to_auction)
class TenderSwitchUnsuccessfulResourceTest(TenderContentWebTest):
initial_status = 'active.tendering'
test_switch_to_unsuccessful = snitch(switch_to_unsuccessful)
class TenderLotSwitchQualificationResourceTest(TenderSwitchQualificationResourceTest):
initial_lots = test_lots
class TenderLotSwitchAuctionResourceTest(TenderSwitchAuctionResourceTest):
initial_lots = test_lots
class TenderLotSwitchUnsuccessfulResourceTest(TenderSwitchUnsuccessfulResourceTest):
initial_lots = test_lots
class TenderAuctionPeriodResourceTest(TenderContentWebTest):
initial_bids = test_bids
test_set_auction_period = snitch(set_auction_period)
test_reset_auction_period = snitch(reset_auction_period)
class TenderLotAuctionPeriodResourceTest(TenderAuctionPeriodResourceTest):
initial_lots = test_lots
class TenderComplaintSwitchResourceTest(TenderContentWebTest):
test_switch_to_ignored_on_complete = snitch(switch_to_ignored_on_complete)
test_switch_from_pending_to_ignored = snitch(switch_from_pending_to_ignored)
test_switch_from_pending = snitch(switch_from_pending)
test_switch_to_complaint = snitch(switch_to_complaint)
class TenderLotComplaintSwitchResourceTest(TenderComplaintSwitchResourceTest):
initial_lots = test_lots
class TenderAwardComplaintSwitchResourceTest(TenderContentWebTest):
initial_status = 'active.qualification'
initial_bids = test_bids
def setUp(self):
super(TenderAwardComplaintSwitchResourceTest, self).setUp()
# Create award
auth = self.app.authorization
self.app.authorization = ('Basic', ('token', ''))
response = self.app.post_json('/tenders/{}/awards'.format(
self.tender_id), {'data': {'suppliers': [test_organization], 'status': 'pending', 'bid_id': self.initial_bids[0]['id']}})
award = response.json['data']
self.award_id = award['id']
self.app.authorization = auth
test_award_switch_to_ignored_on_complete = snitch(award_switch_to_ignored_on_complete)
test_award_switch_from_pending_to_ignored = snitch(award_switch_from_pending_to_ignored)
test_award_switch_from_pending = snitch(award_switch_from_pending)
test_award_switch_to_complaint = snitch(award_switch_to_complaint)
class TenderLotAwardComplaintSwitchResourceTest(TenderAwardComplaintSwitchResourceTest):
initial_lots = test_lots
def setUp(self):
super(TenderAwardComplaintSwitchResourceTest, self).setUp()
# Create award
auth = self.app.authorization
self.app.authorization = ('Basic', ('token', ''))
response = self.app.post_json('/tenders/{}/awards'.format(self.tender_id), {'data': {
'suppliers': [test_organization],
'status': 'pending',
'bid_id': self.initial_bids[0]['id'],
'lotID': self.initial_bids[0]['lotValues'][0]['relatedLot']
}})
award = response.json['data']
self.award_id = award['id']
self.app.authorization = auth
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(TenderAwardComplaintSwitchResourceTest))
suite.addTest(unittest.makeSuite(TenderComplaintSwitchResourceTest))
suite.addTest(unittest.makeSuite(TenderLotAwardComplaintSwitchResourceTest))
suite.addTest(unittest.makeSuite(TenderLotComplaintSwitchResourceTest))
suite.addTest(unittest.makeSuite(TenderLotSwitchAuctionResourceTest))
suite.addTest(unittest.makeSuite(TenderLotSwitchQualificationResourceTest))
suite.addTest(unittest.makeSuite(TenderLotSwitchUnsuccessfulResourceTest))
suite.addTest(unittest.makeSuite(TenderSwitchAuctionResourceTest))
suite.addTest(unittest.makeSuite(TenderSwitchQualificationResourceTest))
suite.addTest(unittest.makeSuite(TenderSwitchUnsuccessfulResourceTest))
return suite
if __name__ == '__main__':
unittest.main(defaultTest='suite')
|
romejoe/ExampleCluster | Cluster.py | Python | gpl-2.0 | 1,560 | 0.042949 | from abc import ABCMeta, abstractmethod
class Cluster:
__metaclass__ = ABCMeta
@abstractmethod
def _getClusterInfo(cluster):
pass
def _getClusterDistance(clusterA, clusterAInfo, clusterB, clusterBInfo):
pass
def Cluster(clusters, verbose=False):
iterCount = 0
while( len(clusters) > 1):
if verbose:
print "===Iter Start==="
print "Iter:", iterCount
rangeInfo = range(0, len(clusters))
clusterInfo = []
for cluster in clusters:
clusterInfo.append(self._getClusterInfo(cluster))
MergeCandidate = None
for combo in itertools.combinations(rangeInfo, rangeInfo):
a = combo[0]
b = combo[1]
if a == b:
continue
| tmp = (self._getClusterDistance(clusters[a], clusterInfo[a], clusters[b], clusterInfo[b]), a, b)
if MergeCandidate == None or MergeCandidate[0] > tmp[0]:
MergeCandidate = tmp
if verbose:
print MergeCandidate
a = MergeCandidate[1]
b = MergeCandidate[2]
if a > b:
tmp = a
a = b
b = t | mp
clusters[a] = clusters[a] + clusters[b]
del clusters[b]
if verbose:
print "clusters:"
for x in clusters:
print x
iterCount = iterCount + 1
if verbose:
print "===Iter Done==="
return clusters
def ClusterCSVFile(filepath, verbose=False):
points = []
with open(filepath, 'r') as csvfile:
pointReader = csv.reader(csvfile)
for row in pointReader:
points.append([list(float(row[i]) for i in range(0, len(row)))])
clusters = [ list(point) for point in points]
return self.Cluster(clusters, verbose)
|
superdesk/superdesk-core | apps/common/components/base_component.py | Python | agpl-3.0 | 653 | 0 | # -*- coding: utf-8; -*-
#
# This file is part of Superdesk.
#
# Cop | yright 2013, 2014 Sourcefabric z.u. and contributors.
#
# For the full copyright and license information, please see the
# AUTHORS and LICENSE files distributed with this source code, or
# at https://www.sourcefab | ric.org/superdesk/license
class BaseComponent:
"""This is a basic interface for defining components.
The only requirement is to implement the name method that
uniquely identifies a component. It should also define other
methods that implement the component functionality.
"""
@classmethod
def name(cls):
raise NotImplementedError()
|
jackzhao-mj/ok-client | demo/sqlite/tests/q2.py | Python | apache-2.0 | 548 | 0 | test = {
'name': 'Question 2',
'points': 2,
'suites': [
{
'type': 'sqlite',
'setup': r"""
sqlite> .open hw1.db
""",
'cases': [
{
'code': r"""
sqlite> select * from colors;
red|primary
blue|pri | mary
green|secondary
yellow|primary
""",
},
{
'code': r"""
sqlite> select color from colors;
red
| blue
green
yellow
""",
},
],
}
]
}
|
VitalPet/addons-onestein | auth_oauth_disable_login_with_odoo/__manifest__.py | Python | agpl-3.0 | 394 | 0 | # -*- coding: utf-8 -*-
# Copyright 2016 Onestein (<http://www | .onestein.eu>)
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
{
'name': 'OAuth2 Disable Login with Odoo.com',
'version': '10.0.1.0.0',
'category': 'Tools',
'author': 'Onestein',
| 'license': 'AGPL-3',
'depends': ['auth_oauth'],
'data': [
'data/auth_oauth_data.xml',
],
}
|
scylladb/scylla-cluster-tests | sdcm/sct_events/prometheus.py | Python | agpl-3.0 | 1,895 | 0.003694 | # This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later v | ersion.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
#
# See LICENSE for more details.
#
# Copyright (c) 2020 | ScyllaDB
"""
This is an example of how we'll send info into Prometheus.
Currently it's not in use, since the data we want to show, doesn't fit Prometheus model,
we are using the GrafanaAnnotator
"""
import logging
import threading
from typing import Tuple, Any
from sdcm.prometheus import nemesis_metrics_obj
from sdcm.sct_events.events_processes import BaseEventsProcess, verbose_suppress
LOGGER = logging.getLogger(__name__)
class PrometheusDumper(BaseEventsProcess[Tuple[str, Any], None], threading.Thread):
def run(self) -> None:
events_gauge = \
nemesis_metrics_obj().create_gauge("sct_events_gauge",
"Gauge for SCT events",
["event_type", "type", "subtype", "severity", "node", ])
for event_tuple in self.inbound_events():
with verbose_suppress("PrometheusDumper failed to process %s", event_tuple):
event_class, event = event_tuple # try to unpack event from EventsDevice
events_gauge.labels(event_class, # pylint: disable=no-member
getattr(event, "type", ""),
getattr(event, "subtype", ""),
event.severity,
getattr(event, "node", "")).set(event.event_timestamp)
|
uclouvain/osis_louvain | assessments/business/score_encoding_export.py | Python | agpl-3.0 | 7,914 | 0.00278 | ##############################################################################
#
# OSIS stands for Open Student Information System. It's an application
# designed to manage the core business of higher education institutions,
# such as universities, faculties, institutes and professional schools.
# The core business involves the administration of students, teachers,
# courses, programs and so on.
#
# Copyright (C) 2015-2018 Université catholique de Louvain (http://www.uclouvain.be)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# A copy of this license - GNU General Public License - is available
# at the root of the source code of this program. If not,
# see http://www.gnu.org/licenses/.
#
##############################################################################
from django.http import HttpResponse
from openpyxl import Workbook
from openpyxl.writer.excel import save_virtual_workbook
from openpyxl.styles import Color, Style, PatternFill, Font, colors
from django.utils.translation import ugettext_lazy as _
from django.utils import timezone
from base import models as mdl
from base.models.enums import exam_enrollment_justification_type
HEADER = ['academic_year', 'session_title', 'learning_unit', 'program', 'registration_number', 'lastname', 'firstname',
'email', 'numbered_score', 'justification', 'end_date']
JUSTIFICATION_ALIASES = {
exam_enrollment_justification_type.ABSENCE_JUSTIFIED : "M",
exam_enrollment_justification_type.ABSENCE_UNJUSTIFIED : "S",
exam_enrollment_justification_type.CHEATING : "T",
}
def export_xls(exam_enrollments):
workbook = Workbook()
worksheet = workbook.active
worksheet.append([str(exam_enrollments[0].learning_unit_enrollment.learning_unit_year)])
worksheet.append([str('Session: %s' % exam_enrollments[0].session_exam.number_session)])
worksheet.append([str('')])
__display_creation_date_with_message_about_state(worksheet, row_number=4)
__display_warning_about_students_deliberated(worksheet, row_number=5)
worksheet.append([str('')])
__display_legends(worksheet)
worksheet.append([str('')])
__columns_resizing(worksheet)
header_translate_list = [str(_(elem)) for elem in HEADER]
worksheet.append(header_translate_list)
row_number = 11
for exam_enroll in exam_enrollments:
student = exam_enroll.learning_unit_enrollment.student
offer = exam_enroll.learning_unit_enrollment.offer
person = mdl.person.find_by_id(student.person.id)
end_date = __get_session_exam_deadline(exam_enroll)
score = None
if exam_enroll.score_final is not None:
if exam_enroll.session_exam.learning_unit_year.decimal_scores:
score = "{0:.2f}".format(exam_enroll.score_final)
else:
score = "{0:.0f}".format(exam_enroll.score_final)
justification = JUSTIFICATION_ALIASES.get(exam_enroll.justification_final, "")
worksheet.append([str(exam_enroll.learning_unit_enrollment.learning_unit_year.academic_year),
str(exam_enroll.session_exam.number_session),
exam_enroll.session_exam.learning_unit_year.acronym,
offer.acronym,
student.registration_id,
person.last_name,
| person.first_name,
person.email,
score,
str(justification),
end_date])
row_number += 1
__coloring_non_edita | ble(worksheet, row_number, score, exam_enroll.justification_final)
lst_exam_enrollments = list(exam_enrollments)
number_session = lst_exam_enrollments[0].session_exam.number_session
learn_unit_acronym = lst_exam_enrollments[0].session_exam.learning_unit_year.acronym
academic_year = lst_exam_enrollments[0].learning_unit_enrollment.learning_unit_year.academic_year
filename = "session_%s_%s_%s.xlsx" % (str(academic_year.year), str(number_session), learn_unit_acronym)
response = HttpResponse(save_virtual_workbook(workbook), content_type='application/vnd.openxmlformats-officedocument.spreadsheetml.sheet')
response['Content-Disposition'] = 'attachment; filename=%s' % filename
return response
def __columns_resizing(ws):
"""
Definition of the columns sizes
"""
col_academic_year = ws.column_dimensions['A']
col_academic_year.width = 18
col_academic_year = ws.column_dimensions['C']
col_academic_year.width = 18
col_academic_year = ws.column_dimensions['E']
col_academic_year.width = 18
col_last_name = ws.column_dimensions['F']
col_last_name.width = 25
col_first_name = ws.column_dimensions['G']
col_first_name.width = 25
col_email = ws.column_dimensions['H']
col_email.width = 30
col_note = ws.column_dimensions['I']
col_note.width = 15
col_note = ws.column_dimensions['J']
col_note.width = 15
col_note = ws.column_dimensions['K']
col_note.width = 15
def __coloring_non_editable(ws, row_number, score, justification):
"""
Coloring of the non-editable columns
"""
pattern_fill_grey = PatternFill(patternType='solid', fgColor=Color('C1C1C1'))
style_no_modification = Style(fill=pattern_fill_grey)
column_number = 1
while column_number < 12:
if column_number < 9 or column_number > 10:
ws.cell(row=row_number, column=column_number).style = style_no_modification
else:
if not(score is None and justification is None):
ws.cell(row=row_number, column=9).style = style_no_modification
ws.cell(row=row_number, column=10).style = style_no_modification
column_number += 1
def __display_creation_date_with_message_about_state(ws, row_number):
date_format = str(_('date_format'))
printing_date = timezone.now()
printing_date = printing_date.strftime(date_format)
ws.cell(row=row_number, column=1).value = str('%s' % (_('warn_user_data_can_change') % printing_date))
ws.cell(row=row_number, column=1).font = Font(color=colors.RED)
def __display_warning_about_students_deliberated(ws, row_number):
ws.cell(row=row_number, column=1).value = str(_('students_deliberated_are_not_shown'))
ws.cell(row=row_number, column=1).font = Font(color=colors.RED)
def __display_legends(ws):
ws.append([
str(_('justification')),
str(_('justification_values_accepted') % mdl.exam_enrollment.justification_label_authorized())
])
ws.append([
str(''),
str(_('justification_other_values') % justification_other_values())
])
ws.append([
str(_('numbered_score')),
str(_('score_legend') % "0 - 20")
])
def justification_other_values():
return "%s, %s" % (_('unjustified_absence_export_legend'),
_('justified_absence_export_legend'))
def __get_session_exam_deadline(exam_enroll):
date_format = str(_('date_format'))
deadline = None
session_exam_deadline = mdl.exam_enrollment.get_session_exam_deadline(exam_enroll)
if session_exam_deadline:
deadline = session_exam_deadline.deadline_tutor_computed if session_exam_deadline.deadline_tutor_computed else\
session_exam_deadline.deadline
return deadline.strftime(date_format) if deadline else "-"
|
Sciprios/SatisfiabilitySimulator | PartyProblemSimulator/BooleanEquation/unit_tests/test_nde_And.py | Python | mit | 1,504 | 0.003324 | from unittest.mock import MagicMock, Mock
from unittest import TestCase
from PartyProblemSimulator.BooleanEquation.BooleanNode import BooleanNode
from PartyProblemSimulator.BooleanEquation.AndNode import AndNode
class aBooleanNode(BooleanNode): # pragma: no cover
""" This class is a boolean node. """
def say(self):
return type(self)
def evaluate(self, input_vector):
return True
class TestAndNode(TestCase):
""" Tests the AndNode class. """
def test_evaluation(self):
""" Test the evaluation of the and function. """
fake_true_child = Mock() # A child to return true.
| fake_false_child = Mock() # A child to return false.
nde = AndNode(aBooleanNode(), aBooleanNode()) # Instantiate a node with replaceable children.
fake_true_child.evaluate = MagicMock(return_value=True)
fake_false_child.evaluate = MagicMock(return_value=False)
# 1 AND 1
nde._lhs_child = fake_true_child
nde._rhs_child = fake_true_child
self.assertTrue(nde.evaluate({}))
# 0 AND 0
nde._lhs_child = fake_false_chil | d
nde._rhs_child = fake_false_child
self.assertFalse(nde.evaluate({}))
# 1 AND 0
nde._lhs_child = fake_true_child
nde._rhs_child = fake_false_child
self.assertFalse(nde.evaluate({}))
# 0 AND 1
nde._lhs_child = fake_false_child
nde._rhs_child = fake_true_child
self.assertFalse(nde.evaluate({})) |
maysara/pandora_image | pandora/document/managers.py | Python | gpl-3.0 | 3,174 | 0.002205 | # -*- coding: utf-8 -*-
# vi:si:et:sw=4:sts=4:ts=4
from django.db.models import Q, Manager
def parseCondition(condition, user):
'''
'''
k = condition.get('key', 'name')
k = {
'user': 'user__username',
}.get(k, k)
if not k:
k = 'name'
v = condition['value']
op = condition.get('operator')
if not op:
op = '='
if op.startswith('!'):
op = op[1:]
exclude = True
else:
exclude = False
if k == 'id':
try:
public_id = v.split(':')
username = public_id[0]
name = ":".join(public_id[1:])
extension = name.split('.')
name = '.'.join(extension[:-1])
extension = extension[-1].lower()
q = Q(user__username=username, name=name, extension=extension)
except:
q = Q(id__in=[])
return q
if isinstance(v, bool): #featured and public flag
key = k
else:
key = "%s%s" % (k, {
'==': '__iexact',
'^': '__istartsw | ith',
'$': '__iendswith',
}.get(op, '__icontains'))
key = str(key)
if exclude:
q = ~Q(**{key: v})
else:
q = Q(**{key: v})
retu | rn q
def parseConditions(conditions, operator, user):
'''
conditions: [
{
value: "war"
}
{
key: "year",
value: "1970-1980,
operator: "!="
},
{
key: "country",
value: "f",
operator: "^"
}
],
operator: "&"
'''
conn = []
for condition in conditions:
if 'conditions' in condition:
q = parseConditions(condition['conditions'],
condition.get('operator', '&'), user)
if q:
conn.append(q)
pass
else:
conn.append(parseCondition(condition, user))
if conn:
q = conn[0]
for c in conn[1:]:
if operator == '|':
q = q | c
else:
q = q & c
return q
return None
class DocumentManager(Manager):
def get_query_set(self):
return super(DocumentManager, self).get_query_set()
def find(self, data, user):
'''
query: {
conditions: [
{
value: "war"
}
{
key: "year",
value: "1970-1980,
operator: "!="
},
{
key: "country",
value: "f",
operator: "^"
}
],
operator: "&"
}
'''
#join query with operator
qs = self.get_query_set()
conditions = parseConditions(data['query'].get('conditions', []),
data['query'].get('operator', '&'),
user)
if conditions:
qs = qs.filter(conditions)
return qs
|
kklmn/xrt | examples/withShadow/04_06/04_dE_VCM_bending.py | Python | mit | 4,894 | 0.002452 | # -*- coding: utf-8 -*-
r"""
Bending of collimating mirror
-----------------------------
Uses :mod:`shadow` backend.
File: `\\examples\\withShadow\\03\\03_DCM_energy.py`
Influence onto energy resolution
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Pictures after monochromator,
:ref:`type 2 of global normalization<globalNorm>`. The nominal radius is 7.4
km. Watch the energy distribution when the bending radius is smaller or greater
than the nominal one.
+---------+---------+---------+---------+
| |VCMR1| | |VCMR2| | |VCMR3| | |
+---------+---------+---------+ |VCMR4| |
| |VCMR7| | |VCMR6| | |VCMR5| | |
+---------+---------+---------+---------+
.. |VCMR1| image:: _images/03VCM_R0496453_norm2.*
:scale: 35 %
.. |VCMR2| image:: _images/03VCM_R0568297_norm2.*
:scale: 35 %
.. |VCMR3| image:: _images/03VCM_R0650537_norm2.*
:scale: 35 %
.. |VCMR4| image:: _images/03VCM_R0744680_norm2.*
:scale: 35 %
:align: middle
.. |VCMR5| image:: _images/03VCM_R0852445_norm2.*
:scale: 35 %
.. |VCMR6| image:: _images/03VCM_R0975806_norm2.*
:scale: 35 %
.. |VCMR7| image:: _images/03VCM_R1117020_norm2.*
:scale: 35 %
Influence onto focusing
~~~~~~~~~~~~~~~~~~~~~~~
Pictures at the sample position,
:ref:`type 1 of global normal | ization<globalNorm>`
+----------+----------+----------+----------+
| |VCMRF1| | |VCMRF2| | |VCMRF3| | |
+----------+---------- | +----------+ |VCMRF4| |
| |VCMRF7| | |VCMRF6| | |VCMRF5| | |
+----------+----------+----------+----------+
.. |VCMRF1| image:: _images/04VCM_R0496453_norm1.*
:scale: 35 %
.. |VCMRF2| image:: _images/04VCM_R0568297_norm1.*
:scale: 35 %
.. |VCMRF3| image:: _images/04VCM_R0650537_norm1.*
:scale: 35 %
.. |VCMRF4| image:: _images/04VCM_R0744680_norm1.*
:scale: 35 %
:align: middle
.. |VCMRF5| image:: _images/04VCM_R0852445_norm1.*
:scale: 35 %
.. |VCMRF6| image:: _images/04VCM_R0975806_norm1.*
:scale: 35 %
.. |VCMRF7| image:: _images/04VCM_R1117020_norm1.*
:scale: 35 %
"""
__author__ = "Konstantin Klementiev"
__date__ = "1 Mar 2012"
import sys
sys.path.append(r"c:\Alba\Ray-tracing\with Python")
import numpy as np
import xrt.plotter as xrtp
import xrt.runner as xrtr
import xrt.backends.shadow as shadow
def main():
plot1 = xrtp.XYCPlot('star.03')
plot1.caxis.offset = 6000
plot2 = xrtp.XYCPlot('star.04')
plot2.caxis.offset = 6000
plot1.xaxis.limits = [-15, 15]
plot1.yaxis.limits = [-15, 15]
plot1.yaxis.factor *= -1
plot2.xaxis.limits = [-1, 1]
plot2.yaxis.limits = [-1, 1]
plot2.yaxis.factor *= -1
textPanel1 = plot1.fig.text(
0.89, 0.82, '', transform=plot1.fig.transFigure,
size=14, color='r', ha='center')
textPanel2 = plot2.fig.text(
0.89, 0.82, '', transform=plot2.fig.transFigure,
size=14, color='r', ha='center')
#==========================================================================
threads = 4
#==========================================================================
start01 = shadow.files_in_tmp_subdirs('start.01', threads)
start04 = shadow.files_in_tmp_subdirs('start.04', threads)
rmaj0 = 476597.0
shadow.modify_input(start04, ('R_MAJ', str(rmaj0)))
angle = 4.7e-3
tIncidence = 90 - angle * 180 / np.pi
shadow.modify_input(
start01, ('T_INCIDENCE', str(tIncidence)),
('T_REFLECTION', str(tIncidence)))
shadow.modify_input(
start04, ('T_INCIDENCE', str(tIncidence)),
('T_REFLECTION', str(tIncidence)))
rmirr0 = 744680.
def plot_generator():
for rmirr in np.logspace(-1., 1., 7, base=1.5) * rmirr0:
shadow.modify_input(start01, ('RMIRR', str(rmirr)))
filename = 'VCM_R%07i' % rmirr
filename03 = '03' + filename
filename04 = '04' + filename
plot1.title = filename03
plot2.title = filename04
plot1.saveName = [filename03 + '.pdf', filename03 + '.png']
plot2.saveName = [filename04 + '.pdf', filename04 + '.png']
# plot1.persistentName = filename03 + '.pickle'
# plot2.persistentName = filename04 + '.pickle'
textToSet = 'collimating\nmirror\n$R =$ %.1f km' % (rmirr * 1e-5)
textPanel1.set_text(textToSet)
textPanel2.set_text(textToSet)
yield
def after():
# import subprocess
# subprocess.call(["python", "05-VFM-bending.py"],
# cwd='/home/kklementiev/Alba/Ray-tracing/with Python/05-VFM-bending')
pass
xrtr.run_ray_tracing(
[plot1, plot2], repeats=640, updateEvery=2,
energyRange=[5998, 6002], generator=plot_generator, threads=threads,
globalNorm=True, afterScript=after, backend='shadow')
#this is necessary to use multiprocessing in Windows, otherwise the new Python
#contexts cannot be initialized:
if __name__ == '__main__':
main()
|
SuperTux/flexlay | flexlay/wip/sprite_stroke_drawer.py | Python | gpl-3.0 | 5,541 | 0.00397 | # Flexlay - A Generic 2D Game Editor
# Copyright (C) 2014 Ingo Ruhnke <grumbel@gmail.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from flexlay.wip.bitmap_layer import BitmapLayer
from flexlay.blend_func import BlendFunc
from flexlay import DrawerProperties
from flexlay.math import Rect, Point, Size, Origin
class Surface:
pass
class SpriteStrokeDrawer:
def __init__(self, drawer):
self.mode = SpriteStrokeDrawer.DM_NORMAL
self.drawer = drawer
def draw(self, stroke, gc):
if DrawerProperties.current.get_brush().is_null() or stroke.get_dab_count() == 0:
| return
dabs = stroke.get_interpolated_dabs(DrawerProperties.current.get_spacing() *
DrawerProperties.current.get_size(),
DrawerProperties.current.get_spacing() *
Dr | awerProperties.current.get_size())
for i, dab in enumerate(self.dabs):
sprite = DrawerProperties.current.get_brush().get_sprite()
color = DrawerProperties.current.get_color()
sprite.set_color(color)
sprite.set_alpha((color.get_alpha() / 255.0) * dab.pressure)
sprite.set_scale(DrawerProperties.current.get_size() * dab.pressure,
DrawerProperties.current.get_size() * dab.pressure)
if gc is not None:
# DO Multipass:
# 1: GL_ZERO, GL_DST_ALPHA
# 2: GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA
# brush.set_blend_func_separate(BlendFunc.zero, BlendFunc.dst_alpha,
# BlendFunc.zero, BlendFunc.one)
# brush.draw(dab.pos.x, dab.pos.y, gc)
if self.mode == SpriteStrokeDrawer.DM_NORMAL:
sprite.set_blend_func_separate(BlendFunc.src_alpha, BlendFunc.one_minus_src_alpha,
BlendFunc.one, BlendFunc.one_minus_src_alpha)
sprite.draw(dab.pos.x, dab.pos.y, gc.gc)
elif self.mode == SpriteStrokeDrawer.DM_ADDITION:
sprite.set_blend_func_separate(BlendFunc.src_alpha, BlendFunc.one,
BlendFunc.zero, BlendFunc.one)
# BlendFunc.one, BlendFunc.one_minus_src_alpha)
sprite.draw(dab.pos.x, dab.pos.y, gc.gc)
elif self.mode == SpriteStrokeDrawer.DM_ERASE:
sprite.set_blend_func(BlendFunc.zero, BlendFunc.one_minus_src_alpha)
sprite.draw(dab.pos.x, dab.pos.y, gc.gc)
elif self.mode == SpriteStrokeDrawer.DM_SMUDGE:
if dab != dabs[0]:
canvas = BitmapLayer.current.get_canvas()
buffer = canvas.get_pixeldata(
Rect(Point(int(self.dabs[i - 1].pos.x) - sprite.width / 2,
int(self.dabs[i - 1].pos.y) - sprite.height / 2),
Size(sprite.width, sprite.height)))
surface = Surface(buffer)
# surface.set_blend_func_separate(BlendFunc.src_alpha, BlendFunc.one_minus_src_alpha,
# BlendFunc.one, BlendFunc.zero)
surface.set_alignment(Origin.center)
surface.set_alpha(0.5)
# surface.set_scale(DrawerProperties.current.get_size(),
# DrawerProperties.current.get_size())
surface.draw(dab.pos.x, dab.pos.y, gc.gc)
else:
print("Error: SpriteStrokeDrawer: Unknown draw mode: ", self.mode)
else:
if self.mode == SpriteStrokeDrawer.DM_NORMAL:
sprite.set_blend_func(BlendFunc.src_alpha, BlendFunc.one_minus_src_alpha)
sprite.draw(dab.pos.x, dab.pos.y, gc.gc)
elif self.mode == SpriteStrokeDrawer.DM_ADDITION:
sprite.set_blend_func(BlendFunc.src_alpha, BlendFunc.one)
sprite.draw(dab.pos.x, dab.pos.y, gc.gc)
elif self.mode == SpriteStrokeDrawer.DM_ERASE:
sprite.set_blend_func(BlendFunc.zero, BlendFunc.one_minus_src_alpha)
sprite.draw(dab.pos.x, dab.pos.y, gc.gc)
elif self.mode == SpriteStrokeDrawer.DM_SMUDGE:
sprite.set_blend_func(BlendFunc.src_alpha, BlendFunc.one_minus_src_alpha)
sprite.draw(dab.pos.x, dab.pos.y, gc.gc)
else:
print("Error: SpriteStrokeDrawer: Unknown draw mode:", self.mode)
def set_mode(self, mode):
self.mode = mode
def get_mode(self):
return self.mode
# EOF #
|
flavono123/LKFES | feature-evals/dio_aio/test.py | Python | gpl-3.0 | 3,826 | 0.006012 | #!/usr/bin/env python2
#-*-coding:utf-8-*-
import sys
import os
import subprocess
import numpy
import json
def exec_cmd(cmd):
return subprocess.Popen(cmd,
shell=True,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
def fio(test):
os.system("echo 3 > /proc/sys/vm/drop_caches")
os.system("echo 3 > /proc/sys/vm/drop_caches")
os.system("echo 3 > /proc/sys/vm/drop_caches")
cmd = "sudo fio --directory=/mnt/loop --direct=1 --bs=4k --size=1G --numjobs=4 --time_based --runtime=10 --norandommap --minimal --name dio_aio --rw=" + test
return exec_cmd(cmd)
#proc = exec_cmd(cmd)
#return get_iops(proc.stdout.read(), test)
def get_iops(output, rw):
field = str(output).split(';')
if (rw == "read" or rw == "randread"):
return int(field[7])
elif (rw == "write" or rw == "randwrite"):
return int(field[48])
def caltobyte(hnum):
num = float(hnum[0:-1])
for unit in ['', 'K', 'M', 'G', 'T', 'P', 'E', 'Z']:
if hnum[-1] == unit: return num
num *= 1024.0
def sizeof_fmt(knum):
for unit in ['K', 'M', 'G', 'T', 'P', 'E', 'Z']:
if abs(knum) < 1024.0:
return "%3.1f%s" % (knum, unit)
knum /= 1024.0
def buffercache():
proc = exec_cmd("cat /proc/meminfo")
output = proc.stdout.read()
field = output.split()
return float(field[10]), float(field[13])
# Make loop block device as file
os.system("dd if=/dev/zero of=lbd.img bs=1k count=1536000 2>/dev/null")
os.system("losetup /dev/loop1 lbd.img 2>/dev/null")
os.system("mkfs -t ext4 /dev/loop1 > /dev/null 2>&1")
os.mkdir("/mnt/loop")
os.system("mount -t ext4 /dev/loop1 /mnt/loop 1>/dev/null")
# System set; drop page, cache and , and set the ratio of write back max as possible
os.system("echo 3 > /proc/sys/vm/drop_caches")
os.system("echo 90 > /proc/sys/vm/dirty_ratio")
rand_read_iops = 0
rand_read_buffer = 0.0
rand_read_cache = 0.0
read_iops = 0
read_buffer = 0.0
read_cache = 0.0
rand_write_iops = 0
rand_write_buffer = 0.0
rand_write_cache = 0.0
write_iops = 0
write_buffer = 0.0
write_cache =0.0
test = 4
# Test start
for i in range (test) :
tmp_buffer = 0.0
tmp_cache = 0.0
proc = fio("randread")
if i == 0: continue
rand_read_iops += get_iops(proc.stdout.read(), "randread")
tmp_buffer, tmp_cache = buffercache()
rand_read_buffer += tmp_buffer
rand_read_cache += tmp_cache
proc = fio("read")
read_iops += get_iops(proc.stdout.read(), "read")
tmp_buffer, tmp_cache = buffercache()
read_buffer += tmp_buffer
read_cache += tmp_cache
proc = fio("randwrite")
rand_write_iops += ge | t_iops(proc.stdout.read(), "randwrite")
tmp_buffer, tmp_cache = buf | fercache()
rand_write_buffer += tmp_buffer
rand_write_cache += tmp_cache
proc = fio("write")
write_iops += get_iops(proc.stdout.read(), "write")
tmp_buffer, tmp_cache = buffercache()
write_buffer += tmp_buffer
write_cache += tmp_cache
col_list = ["rand_read", "read", "rand_write", "write"]
row_list = ["IOPS", "Buffer", "Cache"]
table_data = numpy.array([[int(rand_read_iops), int(read_iops), int(rand_write_iops), int(write_iops)],
[sizeof_fmt(rand_read_buffer), sizeof_fmt(read_buffer), sizeof_fmt(rand_write_buffer), sizeof_fmt(write_buffer)],
[sizeof_fmt(rand_read_cache), sizeof_fmt(read_cache), sizeof_fmt(rand_write_cache), sizeof_fmt(write_cache)]])
row_format = "{:>12}" * (len(col_list) + 1)
print row_format.format("", *col_list)
for row, data in zip(row_list, table_data):
print row_format.format(row, *data)
os.system("umount /mnt/loop")
os.system("rm -rf /mnt/loop")
os.system("losetup -d /dev/loop1")
os.system("rm lbd.img")
|
espressif/esp-idf | tools/find_build_apps/__init__.py | Python | apache-2.0 | 493 | 0.002028 | from .cmake import BUILD_SYSTEM_CMAKE, CMakeB | uildSystem
from .common import (DEFAULT_TARGET, BuildError, BuildItem, BuildSystem, ConfigRule, config_rules_from_str,
setup_logging)
BUILD_SYSTEMS = {
BUILD_SYSTEM_CMAKE: CMakeBuildSystem,
}
__all__ = [
'BuildItem',
'BuildSystem',
'BuildError',
'ConfigRule',
'config_rules_from_str',
'setup_logging',
'DEFAULT_TARGET',
'CMakeBuildSystem',
'BUILD_SYSTEM_CMAKE',
'BUILD_SYSTEMS' | ,
]
|
skuda/client-python | kubernetes/client/models/v1_namespace_spec.py | Python | apache-2.0 | 3,316 | 0.001206 | # coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.6.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class V1NamespaceSpec(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, finalizers=None):
"""
V1NamespaceSpec - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'finalizers': 'list[str]'
}
self.attribute_map = {
'finalizers': 'finalizers'
}
self._finalizers = finalizers
@property
def finalizers(self):
"""
Gets the finalizers of this V1NamespaceSpec.
Finalizers is an opaque list of values that must be empty to permanently remove object from storage. More info: http://releases.k8s.io/HEAD/docs/design/namespaces.md#finalizers
:return: The finalizers of this V1NamespaceSpec.
:rtype: list[str]
"""
return self._finalizers
@finalizers.setter
def finalizers(self, finalizers):
"""
Sets the finalizers of this V1NamespaceSpec.
Finalizers is an opaque list of values that must be empty to permanently remove object from storage. More info: http://releases.k8s.io/HEAD/docs/design/namespaces.md#finalizers
:param finalizers: The finalizers of this V1NamespaceSpec.
:type: list[str]
"""
self._finalizers = finalizers
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
| elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
| ))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
Azure/azure-sdk-for-python | sdk/monitor/azure-mgmt-monitor/azure/mgmt/monitor/v2020_01_01_preview/operations/__init__.py | Python | mit | 640 | 0.004688 | # coding=utf-8
# ---------------------------- | ----------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for l | icense information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from ._management_group_diagnostic_settings_operations import ManagementGroupDiagnosticSettingsOperations
__all__ = [
'ManagementGroupDiagnosticSettingsOperations',
]
|
tmgstevens/flume | flume-ng-doc/sphinx/conf.py | Python | apache-2.0 | 3,274 | 0.002749 | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -*- coding: utf-8 -*-
import sys
import os
from datetime import date
# eventlet/gevent should not monkey patch anything.
os.environ["GEVENT_NOPATCH"] = "yes"
os.environ["EVENTLET_NOPATCH"] = "yes"
#os.environ["CELERY_LOADER"] = "default"
this = os.path.dirname(os.path.abspath(__file__))
# If your extensions are in another directory, add it here. If the directory
# is relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
sys.path.append(os.path.join(os.pardir, "tests"))
sys.path.append(os.path.join(this, "_ext"))
#import celery
# General configuration
# ---------------------
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.coverage',
'sphinx.ext.pngmath',
'sphinx.ext.intersphinx',
]
html_show_sphinx = False
# Add any paths that contain templates here, relative to this directory.
templates_path = ['.templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'Apache Flume'
copyright = '2009 | -%s The Apache Software Foundation' % date.today().year
keep_warnings = True
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
#version = ".".join(map(str, celery.VERSION[0:2]))
# The full version, including alpha/beta/rc tags.
#release = celery.__version__
exclude_trees = ['.build']
# If true, '()' will be appended to :func: etc. cross-r | eference text.
add_function_parentheses = True
#intersphinx_mapping = {
# "http://docs.python.org/dev": None,
# "http://kombu.readthedocs.org/en/latest/": None,
# "http://django-celery.readthedocs.org/en/latest": None,
#}
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'trac'
highlight_language = 'none'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
#html_static_path = ['../resources/images']
html_logo = 'images/flume-logo.png'
html_use_smartypants = True
# If false, no module index is generated.
html_use_modindex = True
# If false, no index is generated.
html_use_index = True
#html_theme = 'default'
html_sidebars = {
'**': ['localtoc.html', 'relations.html', 'sourcelink.html'],
}
|
Sythelux/Picarto.bundle | Contents/Libraries/Shared/PicartoClientAPI/models/video_search_result.py | Python | bsd-3-clause | 3,920 | 0.001276 | # coding: utf-8
"""
Picarto.TV API Documentation
The Picarto.TV API documentation Note, for fixed access tokens, the header that needs to be sent is of the format: `Authorization: Bearer yourTokenHere` This can be generated at https://oauth.picarto.tv/ For chat API, see https://docs.picarto.tv/chat/chat.proto - contact via the email below for implementation details
OpenAPI spec version: 1.2.5
Contact: api@picarto.tv
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class VideoSearchResult(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'channel': 'BasicChannelInfo',
'video': 'ChannelVideo'
}
attribute_map = {
'channel': 'channel',
'video': 'video'
}
def __init__(self, channel=None, video=None):
"""
VideoSearchResult - a model defined in Swagger
"""
self._channel = None
self._video = None
if channel is not None:
self.channel = channel
if video is not None:
self.video = video
@property
def channel(self):
"""
Gets the channel of this VideoSearchResult.
:return: The channel of this VideoSearchResult.
:rtype: BasicChannelInfo
"""
return self._channel
@channel.setter
def channel(self, channel):
"""
Sets the channel of this VideoSearchResult.
:param channel: The channel of this VideoSearchResult.
:type: BasicChannelInfo
"""
self._channel = channel
@property
def video(self):
"""
Gets the video of this VideoSearchResult.
:return: The video of this VideoSearchResult.
:rtype: ChannelVideo
"""
return self._video
@video.setter
def video(self, video):
"""
Sets the video of this VideoSearchResult.
:param video: The video of this VideoSearchResult.
:type: ChannelVideo
"""
self._video = video
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
| ))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __ | eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, VideoSearchResult):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
Compjeff/PokemonGo-Bot | pokemongo_bot/cell_workers/incubate_eggs.py | Python | mit | 8,649 | 0.002428 | from pokemongo_bot.human_behaviour import | sleep
from pokemongo_bot.base_task import BaseTask
class IncubateEggs(BaseTask):
SUPPORTED_TASK_API_VERSION = 1
last_km_walked = 0
def initialize(self):
self.ready_incubators = []
self.used_incubators = []
self.eggs = []
self.km_walked = 0
self.hatching_animation_delay = 4.20
self.max_iv = 45.0
self._process_config()
def _process_config(self):
self.longer_eggs_first = sel | f.config.get("longer_eggs_first", True)
def work(self):
try:
self._check_inventory()
except:
return
if self.used_incubators and IncubateEggs.last_km_walked != self.km_walked:
self.used_incubators.sort(key=lambda x: x.get("km"))
km_left = self.used_incubators[0]['km']-self.km_walked
if km_left <= 0:
self._hatch_eggs()
else:
self.emit_event(
'next_egg_incubates',
formatted='Next egg ({km_needed} km) incubates in {distance_in_km:.2f} km',
data={
'km_needed': self.used_incubators[0]['km_needed'],
'distance_in_km': km_left
}
)
IncubateEggs.last_km_walked = self.km_walked
sorting = self.longer_eggs_first
self.eggs.sort(key=lambda x: x.get("km"), reverse=sorting)
if self.ready_incubators:
self._apply_incubators()
def _apply_incubators(self):
for incubator in self.ready_incubators:
if incubator.get('used', False):
continue
for egg in self.eggs:
if egg["used"] or egg["km"] == -1:
continue
self.emit_event(
'incubate_try',
level='debug',
formatted="Attempting to apply incubator {incubator_id} to egg {egg_id}",
data={
'incubator_id': incubator['id'],
'egg_id': egg['id']
}
)
ret = self.bot.api.use_item_egg_incubator(
item_id=incubator["id"],
pokemon_id=egg["id"]
)
if ret:
code = ret.get("responses", {}).get("USE_ITEM_EGG_INCUBATOR", {}).get("result", 0)
if code == 1:
self.emit_event(
'incubate',
formatted='Incubating a {distance_in_km} egg.',
data={
'distance_in_km': str(egg['km'])
}
)
egg["used"] = True
incubator["used"] = True
break
elif code == 5 or code == 7:
self.emit_event(
'incubator_already_used',
level='debug',
formatted='Incubator in use.',
)
incubator["used"] = True
break
elif code == 6:
self.emit_event(
'egg_already_incubating',
level='debug',
formatted='Egg already incubating',
)
egg["used"] = True
def _check_inventory(self, lookup_ids=[]):
inv = {}
response_dict = self.bot.get_inventory()
matched_pokemon = []
temp_eggs = []
temp_used_incubators = []
temp_ready_incubators = []
inv = reduce(
dict.__getitem__,
["responses", "GET_INVENTORY", "inventory_delta", "inventory_items"],
response_dict
)
for inv_data in inv:
inv_data = inv_data.get("inventory_item_data", {})
if "egg_incubators" in inv_data:
temp_used_incubators = []
temp_ready_incubators = []
incubators = inv_data.get("egg_incubators", {}).get("egg_incubator",[])
if isinstance(incubators, basestring): # checking for old response
incubators = [incubators]
for incubator in incubators:
if 'pokemon_id' in incubator:
start_km = incubator.get('start_km_walked', 9001)
km_walked = incubator.get('target_km_walked', 9001)
temp_used_incubators.append({
"id": incubator.get('id', -1),
"km": km_walked,
"km_needed": (km_walked - start_km)
})
else:
temp_ready_incubators.append({
"id": incubator.get('id', -1)
})
continue
if "pokemon_data" in inv_data:
pokemon = inv_data.get("pokemon_data", {})
if pokemon.get("is_egg", False) and "egg_incubator_id" not in pokemon:
temp_eggs.append({
"id": pokemon.get("id", -1),
"km": pokemon.get("egg_km_walked_target", -1),
"used": False
})
elif 'is_egg' not in pokemon and pokemon['id'] in lookup_ids:
pokemon.update({
"iv": [
pokemon.get('individual_attack', 0),
pokemon.get('individual_defense', 0),
pokemon.get('individual_stamina', 0)
]})
matched_pokemon.append(pokemon)
continue
if "player_stats" in inv_data:
self.km_walked = inv_data.get("player_stats", {}).get("km_walked", 0)
if temp_used_incubators:
self.used_incubators = temp_used_incubators
if temp_ready_incubators:
self.ready_incubators = temp_ready_incubators
if temp_eggs:
self.eggs = temp_eggs
return matched_pokemon
def _hatch_eggs(self):
response_dict = self.bot.api.get_hatched_eggs()
log_color = 'green'
try:
result = reduce(dict.__getitem__, ["responses", "GET_HATCHED_EGGS"], response_dict)
except KeyError:
return
pokemon_ids = []
if 'pokemon_id' in result:
pokemon_ids = [id for id in result['pokemon_id']]
stardust = result.get('stardust_awarded', "error")
candy = result.get('candy_awarded', "error")
xp = result.get('experience_awarded', "error")
sleep(self.hatching_animation_delay)
self.bot.latest_inventory = None
try:
pokemon_data = self._check_inventory(pokemon_ids)
for pokemon in pokemon_data:
# pokemon ids seem to be offset by one
if pokemon['pokemon_id']!=-1:
pokemon['name'] = self.bot.pokemon_list[(pokemon.get('pokemon_id')-1)]['Name']
else:
pokemon['name'] = "error"
except:
pokemon_data = [{"name":"error","cp":"error","iv":"error"}]
if not pokemon_ids or pokemon_data[0]['name'] == "error":
self.emit_event(
'egg_hatched',
data={
'pokemon': 'error',
'cp': 'error',
'iv': 'error',
'exp': 'error',
'stardust': 'error',
'candy': 'error',
}
)
return
for i in range(len(pokemon_data)):
msg = "Egg hatched with a {pokemon} (CP {cp} - IV {iv}), {exp} exp, {stardust} stardust and {candy} candies."
self.emit_event(
'egg_hatched',
formatted=msg,
|
jmontgom10/Mimir_pyPol | hackScript_setBadPixForPPOL.py | Python | mit | 905 | 0.001105 | # hack script to set the bad pixels to the | right value for PPOL
import os
import glob
import numpy as np
import astroimage as ai
ai.set_instrument('Mimir')
bkgFreeDir = 'C:\\Users\\Jordan\\FITS_data\\Mimir_data\\pyPol_Reduced\\201611\\bkgFreeHWPimages'
fileList = glob.glob(
os.path.join(bkgFreeDir, '*.fits')
)
for file1 in fileList:
print('processing {}'.format(os.path.basename(file1)))
img = ai.reduced.ReducedScience.read(file1)
# Capture Na | Ns and bad values and set them to -1e6 so that PPOL will
# know what to do with those values.
tmpData = img.data
badPix = np.logical_not(np.isfinite(tmpData))
tmpData[np.where(badPix)] = -1e6
badPix = np.abs(tmpData) > 1e5
tmpData[np.where(badPix)] = -1e6
# Store the data in the image object
img.data = tmpData
# Write back to disk
img.write(file1, dtype=np.float32, clobber=True)
print('Done!')
|
lfrdm/medpy | bin/medpy_anisotropic_diffusion.py | Python | gpl-3.0 | 4,093 | 0.00733 | #!/usr/bin/python
"""
Executes gradient anisotropic diffusion filter over an image.
Copyright (C) 2013 Oskar Maier
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
# build-in modules
import argparse
import logging
import os
# third-party modules
# path changes
# own modules
from medpy.io import load, save, get_pixel_spacing
from medpy.core import Logger
from medpy.filter.smoothing import anisotropic_diffusion
# information
__author__ = "Oskar Maier"
__version__ = "r0.1.0, 2013-08-24"
__email__ = "oskar.maier@googlemail.com"
__status__ = "Release"
__description__ = """
Executes gradient anisotropic diffusion filter over an image.
This smoothing algorithm is edges preserving.
Note that the images voxel-spacing will be taken into account.
Copyright (C) 2013 Oskar Maier
This program comes with ABSOLUTELY NO WARRANTY; This is free software,
and you are welcome to redistribute it under certain conditions; see
the LICENSE file or <http://www.gnu.org/licenses/> for details.
"""
# code
def main():
# parse cmd arguments
parser = getParser()
parser.parse_args()
args = getArguments(parser)
# prepare logger
logger = Logger.getInstance()
if args.debug: logger.setLevel(logging.DEBUG)
elif args.verbose: logger.setLevel(logging.INFO)
# check if output image exists (will also be performed before saving, but as the smoothing might be very time intensity, a initial check can save frustration)
if not args.force:
if os.path.exists(args.output):
raise parser.error('The output image {} already exists.'.format(args.output))
# loading image
data_input, header_input = load(args.input)
# apply the watershed
logger.info('Applying anisotropic diffusion with settings: niter={} / kappa={} / gamma={}...'.format(args.iterations, args.kappa, args.gamma))
data_output = anisotropic_diffusion(data_input, args.iterations, args.kappa, args.gamma, get_pixel_spacing(header_input))
# save file
save(data_output, args.output, header_input, args.force)
logger.info('Successfully terminated.')
def getArguments(parser):
"Provides additional validation of the arguments collected by argparse."
return parser.parse_args()
def getParser():
"Creates and returns the argparse parser object."
parser = argparse.ArgumentParser(de | scription=__description__)
| parser.add_argument('input', help='Source volume.')
parser.add_argument('output', help='Target volume.')
parser.add_argument('-i', '--iterations', type=int, default=1, help='The number of smoothing iterations. Strong parameter.')
parser.add_argument('-k', '--kappa', type=int, default=50, help='The algorithms kappa parameter. The higher the more edges are smoothed over.')
parser.add_argument('-g', '--gamma', type=float, default=0.1, help='The algorithms gamma parameter. The higher, the stronger the plateaus between edges are smeared.')
parser.add_argument('-v', dest='verbose', action='store_true', help='Display more information.')
parser.add_argument('-d', dest='debug', action='store_true', help='Display debug information.')
parser.add_argument('-f', dest='force', action='store_true', help='Silently override existing output images.')
return parser
if __name__ == "__main__":
main()
|
linkedin/indextank-service | storefront/manage.py | Python | apache-2.0 | 776 | 0.002577 | #!/usr/bin/env python
from django.core.management import execute_manager
from os import environ
from sys import argv
environ['DJANGO_LOCAL'] = ''
if argv[1] == 'runserver':
environ['DJANGO_LOCAL'] = '1'
if argv[1].startswith('local'):
environ['DJANGO_LOCAL'] = '1'
argv[1] = argv[1][5:]
try:
import settings # Assumed to be in the same directory.
except ImportError:
import sys
sys.stderr.write("Error: Can't find the file 'settings.py' in the directory containing %r. It appears you've customized things.\nYou'll have to run django-admin.py, passing it your settings module.\n(If the file settings.py does indeed exist, it | 's causing an ImportError somehow.)\n" % __file__)
sys.ex | it(1)
if __name__ == "__main__":
execute_manager(settings)
|
EricMuller/mywebmarks-backend | requirements/twisted/Twisted-17.1.0/docs/core/benchmarks/linereceiver.py | Python | mit | 1,374 | 0.004367 | from __future__ import print_function
import math, time
from twisted.protocols import basic
| class CollectingLineReceiver(basic.LineReceiver):
def __init__(self):
self.lines = []
self.lineReceived = self.lines.append
def deliver(proto, chunks):
map(proto.dataReceived, chunks)
def benchmark(chunkSize, lineLength, numLines):
bytes = ('x' * lineLength + '\r\n') * numLines
chunkCount = len(bytes) / chunkSize + 1
chunks = []
for n in xrange(chunkCount):
chunks.append(bytes[n*chunkSize:(n+ | 1)*chunkSize])
assert ''.join(chunks) == bytes, (chunks, bytes)
p = CollectingLineReceiver()
before = time.clock()
deliver(p, chunks)
after = time.clock()
assert bytes.splitlines() == p.lines, (bytes.splitlines(), p.lines)
print('chunkSize:', chunkSize, end=' ')
print('lineLength:', lineLength, end=' ')
print('numLines:', numLines, end=' ')
print('CPU Time: ', after - before)
def main():
for numLines in 100, 1000:
for lineLength in (10, 100, 1000):
for chunkSize in (1, 500, 5000):
benchmark(chunkSize, lineLength, numLines)
for numLines in 10000, 50000:
for lineLength in (1000, 2000):
for chunkSize in (51, 500, 5000):
benchmark(chunkSize, lineLength, numLines)
if __name__ == '__main__':
main()
|
gorpon/misc | python/euclid.py | Python | gpl-3.0 | 1,512 | 0.003307 | #!/usr/bin/env python3
def is_prime(a):
"""
stolen from stackoverflow
"""
return all(a % i for i in range(2, a))
def number_check(num):
"""
validate a number is whole and greater than zero
"""
if not isinstance(num, int):
print("num %s is not an integer" % str(num))
return False
elif num <= 0:
print("num %s is not greater than zero" % str(num))
return False
elif is_prime(num):
print("%s is a prime number!" % str(num))
else:
return True
def get_numbers():
"""
input two different whole numbers, sort and return
"""
print("please enter two different, whole, non-prime numbers")
num1 = int(input("enter first number: "))
if not number_check(num1):
get_numbers()
num2 = int(input("enter second number: "))
if not number_check(num2):
get_nu | mbers()
if num1 > num2:
return num1, num2
elif num2 > num1:
return num2, num1
else:
print("I didn't like your answers. try again.")
get_numbers()
def get_gcd(x, y):
"""
given larger and smaller number
:param x: the bi | gger of the two numbers
:param y: the lesser of the two numbers
:return gcd: the greated common denominator of the two numbers
"""
if x % y == 0:
return y
temp_x = x
while (temp_x - y) > 0:
temp_x -= y
return get_gcd(y, temp_x)
x, y = get_numbers()
print("the gcd is %s" % str(get_gcd(x, y)))
|
eunchong/build | third_party/twisted_10_2/twisted/names/common.py | Python | bsd-3-clause | 9,108 | 0.006478 | # -*- test-case-name: twisted.names.test -*-
# Copyright (c) 2001-2010 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Base functionality useful to various parts of Twisted Names.
"""
import socket
from twisted.names import dns
from twisted.names.error import DNSFormatError, DNSServerError, DNSNameError
from twisted.names.error import DNSNotImplementedError, DNSQueryRefusedError
from twisted.names.error import DNSUnknownError
from twisted.internet import defer, error
from twisted.python import failure
EMPTY_RESULT = (), (), ()
class ResolverBase:
"""
L{ResolverBase} is a base class for L{IResolver} implementations which
deals with a lot of the boilerplate of implementing all of the lookup
methods.
@cvar _errormap: A C{dict} mapping DNS protocol failure response codes
to exception classes which will be used to represent those failures.
"""
_errormap = {
dns.EFORMAT: DNSFormatError,
dns.ESERVER: DNSServerError,
dns.ENAME: DNSNameError,
dns.ENOTIMP: DNSNotImplementedError,
dns.EREFUSED: DNSQueryRefusedError}
typeToMethod = None
def __init__(self):
self.typeToMethod = {}
for (k, v) in typeToMethod.items():
self.typeToMethod[k] = getattr(self, v)
def exceptionForCode(self, responseCode):
"""
Convert a response code (one of the possible values of
L{dns.Message.rCode} to an exception instance representing it.
@since: 10.0
"""
return self._errormap.get(responseCode, DNSUnknownError)
def query(self, query, timeout = None):
try:
return self.typeToMethod[query.type](str(query.name), timeout)
except KeyError, e:
return defer.fail(failure.Failure(NotImplementedError(str(self.__class__) + " " + str(query.type))))
def _lookup(self, name, cls, type, timeout):
return defer.fail(NotImplementedError("ResolverBase._lookup"))
def lookupAddress(self, name, timeout = None):
"""
@see: twisted.names.client.lookupAddress
"""
return self._lookup(name, dns.IN, dns.A, timeout)
def lookupIPV6Address(self, name, timeout = None):
"""
@see: twisted.names.client.lookupIPV6Address
"""
return self._lookup(name, dns.IN, dns.AAAA, timeout)
def lookupAddress6(self, name, timeout = None):
"""
@see: twisted.names.client.lookupAddress6
"""
return self._lookup(name, dns.IN, dns.A6, timeout)
def lookupMailExchange(self, name, timeout = None):
"""
@see: twisted.names.client.lookupMailExchange
"""
return self._lookup(name, dns.IN, dns.MX, timeout)
def lookupNameservers(self, name, timeout = None):
"""
| @see: twisted.names.cli | ent.lookupNameservers
"""
return self._lookup(name, dns.IN, dns.NS, timeout)
def lookupCanonicalName(self, name, timeout = None):
"""
@see: twisted.names.client.lookupCanonicalName
"""
return self._lookup(name, dns.IN, dns.CNAME, timeout)
def lookupMailBox(self, name, timeout = None):
"""
@see: twisted.names.client.lookupMailBox
"""
return self._lookup(name, dns.IN, dns.MB, timeout)
def lookupMailGroup(self, name, timeout = None):
"""
@see: twisted.names.client.lookupMailGroup
"""
return self._lookup(name, dns.IN, dns.MG, timeout)
def lookupMailRename(self, name, timeout = None):
"""
@see: twisted.names.client.lookupMailRename
"""
return self._lookup(name, dns.IN, dns.MR, timeout)
def lookupPointer(self, name, timeout = None):
"""
@see: twisted.names.client.lookupPointer
"""
return self._lookup(name, dns.IN, dns.PTR, timeout)
def lookupAuthority(self, name, timeout = None):
"""
@see: twisted.names.client.lookupAuthority
"""
return self._lookup(name, dns.IN, dns.SOA, timeout)
def lookupNull(self, name, timeout = None):
"""
@see: twisted.names.client.lookupNull
"""
return self._lookup(name, dns.IN, dns.NULL, timeout)
def lookupWellKnownServices(self, name, timeout = None):
"""
@see: twisted.names.client.lookupWellKnownServices
"""
return self._lookup(name, dns.IN, dns.WKS, timeout)
def lookupService(self, name, timeout = None):
"""
@see: twisted.names.client.lookupService
"""
return self._lookup(name, dns.IN, dns.SRV, timeout)
def lookupHostInfo(self, name, timeout = None):
"""
@see: twisted.names.client.lookupHostInfo
"""
return self._lookup(name, dns.IN, dns.HINFO, timeout)
def lookupMailboxInfo(self, name, timeout = None):
"""
@see: twisted.names.client.lookupMailboxInfo
"""
return self._lookup(name, dns.IN, dns.MINFO, timeout)
def lookupText(self, name, timeout = None):
"""
@see: twisted.names.client.lookupText
"""
return self._lookup(name, dns.IN, dns.TXT, timeout)
def lookupSenderPolicy(self, name, timeout = None):
"""
@see: twisted.names.client.lookupSenderPolicy
"""
return self._lookup(name, dns.IN, dns.SPF, timeout)
def lookupResponsibility(self, name, timeout = None):
"""
@see: twisted.names.client.lookupResponsibility
"""
return self._lookup(name, dns.IN, dns.RP, timeout)
def lookupAFSDatabase(self, name, timeout = None):
"""
@see: twisted.names.client.lookupAFSDatabase
"""
return self._lookup(name, dns.IN, dns.AFSDB, timeout)
def lookupZone(self, name, timeout = None):
"""
@see: twisted.names.client.lookupZone
"""
return self._lookup(name, dns.IN, dns.AXFR, timeout)
def lookupNamingAuthorityPointer(self, name, timeout=None):
"""
@see: twisted.names.client.lookupNamingAuthorityPointer
"""
return self._lookup(name, dns.IN, dns.NAPTR, timeout)
def lookupAllRecords(self, name, timeout = None):
"""
@see: twisted.names.client.lookupAllRecords
"""
return self._lookup(name, dns.IN, dns.ALL_RECORDS, timeout)
def getHostByName(self, name, timeout = None, effort = 10):
"""
@see: twisted.names.client.getHostByName
"""
# XXX - respect timeout
return self.lookupAllRecords(name, timeout
).addCallback(self._cbRecords, name, effort
)
def _cbRecords(self, (ans, auth, add), name, effort):
result = extractRecord(self, dns.Name(name), ans + auth + add, effort)
if not result:
raise error.DNSLookupError(name)
return result
def extractRecord(resolver, name, answers, level=10):
if not level:
return None
if hasattr(socket, 'inet_ntop'):
for r in answers:
if r.name == name and r.type == dns.A6:
return socket.inet_ntop(socket.AF_INET6, r.payload.address)
for r in answers:
if r.name == name and r.type == dns.AAAA:
return socket.inet_ntop(socket.AF_INET6, r.payload.address)
for r in answers:
if r.name == name and r.type == dns.A:
return socket.inet_ntop(socket.AF_INET, r.payload.address)
for r in answers:
if r.name == name and r.type == dns.CNAME:
result = extractRecord(
resolver, r.payload.name, answers, level - 1)
if not result:
return resolver.getHostByName(
str(r.payload.name), effort=level - 1)
return result
# No answers, but maybe there's a hint at who we should be asking about
# this
for r in answers:
if r.type == dns.NS:
from twisted.names import client
r = client.Resolver(servers=[(str(r.payload.name), dns.PORT)])
return r.lookupAddress(str(name)
).addCallback(
lambda (ans, |
rajalokan/keystone | keystone/conf/__init__.py | Python | apache-2.0 | 5,308 | 0 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from oslo_cache import core as cache
from oslo_config import cfg
from oslo_log import log
import oslo_messaging
from oslo_middleware import cors
from osprofiler import opts as profiler
from keystone.conf import assignment
from keystone.conf import auth
from keystone.conf import catalog
from keystone.conf import credential
from keystone.conf import default
from keystone.conf import domain_config
from keystone.conf import endpoint_filter
from keystone.conf import endpoint_policy
from keystone.conf import eventlet_server
from keystone.conf import federation
from keystone.conf import fernet_tokens
from keystone.conf import identity
from keystone.conf import identity_mapping
from keystone.conf import ldap
from keystone.conf import memcache
from keystone.conf import oauth1
from keystone.conf import paste_deploy
from keystone.conf import policy
from keystone.conf import resource
from keystone.conf import revoke
from keystone.conf import role
from keystone.conf import saml
from keystone.conf import security_compliance
from keystone.conf import shadow_users
from keystone.conf import signing
from keystone.conf import token
from keystone.conf import tokenless_auth
from keystone.conf import trust
CONF = cfg.CONF
conf_modules = [
assignment,
auth,
catalog,
credential,
default,
domain_config,
endpoint_filter,
endpoint_policy,
eventlet_server,
federation,
fernet_tokens,
identity,
identity_mapping,
ldap,
memcache,
oauth1,
paste_deploy,
policy,
resource,
revoke,
role,
saml,
security_compliance,
shadow_users,
signing,
token,
tokenless_auth,
trust,
]
oslo_messaging.set_transport_defaults(control_exchange='keystone')
def set_default_for_default_log_levels():
"""Set the default for the default_log_levels option for keystone.
Keystone uses some packages that other OpenStack services don't use that do
logging. This will set the default_log_levels default level for those
packages.
This function needs to be called before CONF().
"""
extra_log_level_defaults = [
'dogpile=INFO',
'routes=INFO',
]
log.register_options(CONF)
log.set_defaults(default_log_levels=log.get_default_log_levels() +
extra_log_level_defaults)
def setup_logging():
"""Set up logging for the keystone package."""
log.setup(CONF | , 'keystone')
logging.captureWarnings(True)
def configure(conf=None):
if conf is None:
conf = CONF
conf.regist | er_cli_opt(
cfg.BoolOpt('standard-threads', default=False,
help='Do not monkey-patch threading system modules.'))
conf.register_cli_opt(
cfg.StrOpt('pydev-debug-host',
help='Host to connect to for remote debugger.'))
conf.register_cli_opt(
cfg.PortOpt('pydev-debug-port',
help='Port to connect to for remote debugger.'))
for module in conf_modules:
module.register_opts(conf)
# register any non-default auth methods here (used by extensions, etc)
auth.setup_authentication()
# add oslo.cache related config options
cache.configure(conf)
def set_external_opts_defaults():
"""Update default configuration options for oslo.middleware."""
cors.set_defaults(
allow_headers=['X-Auth-Token',
'X-Openstack-Request-Id',
'X-Subject-Token',
'X-Project-Id',
'X-Project-Name',
'X-Project-Domain-Id',
'X-Project-Domain-Name',
'X-Domain-Id',
'X-Domain-Name'],
expose_headers=['X-Auth-Token',
'X-Openstack-Request-Id',
'X-Subject-Token'],
allow_methods=['GET',
'PUT',
'POST',
'DELETE',
'PATCH']
)
# configure OSprofiler options
profiler.set_defaults(CONF, enabled=False, trace_sqlalchemy=False)
# Oslo.cache is always enabled by default for request-local caching
# TODO(morganfainberg): Fix this to not use internal interface when
# oslo.cache has proper interface to set defaults added. This is
# just a bad way to do this.
opts = cache._opts.list_opts()
for opt_list in opts:
if opt_list[0] == 'cache':
for o in opt_list[1]:
if o.name == 'enabled':
o.default = True
def set_config_defaults():
"""Override all configuration default values for keystone."""
set_default_for_default_log_levels()
set_external_opts_defaults()
|
GALabs/StaticAid | static_aid/config.py | Python | mit | 3,707 | 0.004856 | from ConfigParser import ConfigParser, NoSectionError
from os.path import join, exists, realpath, curdir, dirname
from shutil import copyfile
### Application constants - these are not exposed to users via config files ###
# NOTE: Directories must match Gruntfile.js: jekyll > (serve|build) > options > (src|dest)
ROOT = realpath(curdir)
CONFIG_DEFAULTS_FILE_PATH = join(ROOT, 'local_settings.default')
if not exists(CONFIG_DEFAULTS_FILE_PATH):
# probably because we're debugging directly (PWD = dirname(__file__))
ROOT = realpath(join(dirname(__file__), '..'))
CONFIG_DEFAULTS_FILE_PATH = join(ROOT, 'local_settings.default')
CONFIG_FILE_PATH = join(ROOT, 'local_settings.cfg')
SAMPLE_DATA_DIR = join(ROOT, 'data')
SITE_SRC_DIR = join(ROOT, 'site')
# build dirs
BUILD_DIR = join(ROOT, 'build')
DATA_DIR = join(BUILD_DIR, 'data')
STAGING_DIR = join(BUILD_DIR, 'staging')
RAW_DATA_DIR = join(BUILD_DIR, 'raw')
SITE_BUILD_DIR = join(BUILD_DIR, 'site') # must match 'dest' settings in Gruntfile.js
# temp dir
TEMP_DIR = join(BUILD_DIR, 'tmp')
PID_FILE_PATH = join(TEMP_DIR, 'daemon.pid')
OBJECT_CACHE_DIR = join(TEMP_DIR, 'object_cache')
ROW_FETCH_LIMIT = 100
def _configSection(section):
try:
return {k:v for k, v in _config.items(section, raw=True)}
except NoSectionError:
return {}
def _stringToBoolean(string):
if string is None:
return None
k = string.lower()
result = {'true': True,
't': True,
'1': True,
'false': False,
'f': False,
'0':False,
}
if k in result:
return result[k]
return None
def _stringToList(string):
if string is None:
return None
return [i.strip() for i in string.strip().split(',')]
### Config file values ###
# read the config file
if not exists(CONFIG_FILE_PATH) an | d not exists(CONFIG_DEFAULTS_FILE_PATH):
print "Unable to find any config settings! Please create one of these two files:"
print "", CONFIG_FILE_PATH
print "", CONFIG_DEFAULTS_FILE_PATH
exit(1)
if not exists(CONFIG_FILE_PATH):
copyfile(CONFIG_DEFAULTS_FILE_PATH, CONFIG_FILE_PATH)
_config = ConfigPars | er()
_config.read(CONFIG_FILE_PATH)
# Extract the config values - reference these in calling code
# NOTE: keys from config files are forced to lower-case when they are read by ConfigParser
# which extractor backend to use for loading data
dataExtractor = _configSection('DataExtractor')
# set DEFAULT value if necessary
dataExtractor['dataSource'] = dataExtractor.get('datasource', 'DEFAULT').lower()
# baseURL, repository, user, password
archivesSpace = _configSection('ArchivesSpace')
if archivesSpace:
archivesSpace['repository_url'] = '%s/repositories/%s' % (archivesSpace.get('baseurl'), archivesSpace.get('repository'))
archivesSpace['breadcrumb_url'] = '%s/search/published_tree?node_uri=/repositories/%s' % (archivesSpace.get('baseurl'),
archivesSpace.get('repository'),
)
# baseURL, database, user, password
adlib = _configSection('Adlib')
sampleData = _configSection('SampleData')
sampleData['filename'] = join(SAMPLE_DATA_DIR, sampleData.get('filename', 'FILENAME_NOT_SET'))
# filename, level, format, datefmt
logging = _configSection('Logging')
# the data locations - collections, objects, trees, agents, people, subjects
destinations = _configSection('Destinations')
# a state file that stores the most recent export date
lastExportFilepath = join(ROOT, _config.get('LastExport', 'filepath'))
|
tigerking/pyvision | src/pyvision/data/ml/mulSVM.py | Python | bsd-3-clause | 5,495 | 0.014741 | '''
Created on Jan 21, 2010
@author: nayeem
'''
import pyvision as pv
import numpy as np
import scipy as sp
from pyvision.vector.SVM import SVM
import csv
import os.path
import sys
sys.setrecursionlimit(1500)
class multiSVM:
'''
classdocs
'''
def __init__(self):
'''
Constructor
'''
self.data = self.readData()
self.train_data = self.data[0]
self.train_labels = self.data[1]
self.test_data = self.data[2]
#self.runSVM()
def trainData(self):
return self.train_data
def trainLabels(self):
return self.train_labels
def testData(self):
return self.test_data
def runSVM(self):
svm = SVM()
print "I am in the SVM module now"
def readData(self):
IRIS_PATH = os.path.join(pv.__path__[0],'data','ml','iris.csv')
readcsv = csv.reader(open(IRIS_PATH,"rb"))
data = []
labels = []
readcsv.next()
train_data = []
test_data = []
train_labels = []
pred_labels = []
for row in readcsv:
data_point = map(float, row[1:5])
label = row[5]
data.append(data_point)
labels.append(label)
iris_data = np.array(data)
iris_labels = np.array(labels)
data_length = len(iris_data)
iris_training = np.arange(0, data_length, 2)
iris_testing = iris_training + 1
for i in iris_training:
train_data.append(iris_data[i, :])
train_labels.append(iris_labels[i])
for i in iris_testing:
test_data.append(iris_data[i, :])
train_data = np.array(train_data)
test_data = np.array(test_data)
train_labels = np.array(train_labels)
data = train_data, train_labels, test_data
return data
def mahalanobisDist(self,group1,group2):
mean1 = group1.mean(axis=0)
mean2 = group2.mean(axis=0)
gp1Centered = group1-mean1
gp2Centered = group2-mean2
n1 = np.size(gp1Centered,axis=0)
n2 = np.size(gp2Centered,axis=0)
cov1 = (np.dot(gp1Centered.T,gp1Centered)) / n1
cov2 = (np.dot(gp2Centered.T, gp2Centered)) / n2
weighted1 = n1*cov1
weighted2 = n2*cov2
pooledCov = (np.add(weighted1,weighted2))/(n1+n2)
meandiff = mean1-mean2
invpooledCov = np.linalg.inv(pooledCov)
prod1 = np.dot(meandiff,invpooledCov)
prod = np.dot(prod1,meandiff.T)
dist = np.sqrt(prod)
return dist
def rbfKernel(self,vecA,vecB,sigma):
vec_diff = vecA-vecB
return np.exp(-np.dot(vec_diff,vec_diff.T)/(2*sigma**2))
class Node:
def __init__(self):
self.classList = []
self.classData = []
self.pos = 0
self.leftChild = None
self.rightChild = None
class Tree:
root = Node()
def __init__(self):
self.root = None
def insert(self,classlist,classData, pos):
newNode = Node()
newNode.classList = classlist
newNode.classData = classData
newNode.pos = pos
if self.root == None:
self.root = self.newNode
else:
curr = self.root
parent = Node()
while True:
parent = curr
if newNode.pos == -1:
curr = curr.leftChild
if curr == None:
parent.leftChild = newNode()
return
else:
curr = curr.rightChild
if curr == None:
parent.rightChild = newNode()
return
ms = multiSVM()
trainingdata = ms.trainData()
traininglabels = ms.trainLabels()
testdata = ms.testData()
#print 'training data:\n' + repr(traininglabels)
#length = len(traininglabels)
#rows,cols = np.shape(traininglabels)
#print traininglabels[length-1]
classes = np.unique(traininglabels) # Unique classes
num_classes = len(classes)
num_features = np.size(trainingdata,axis=1) # Columns of training Data
num_samples = np.size(trainingdata,axis=0) # Number of samples
print '#classes: '+repr(num_classes)
print '#num_features: ' + repr(num_features)
print '#num_samples: ' + repr(num_samples)
means = []
covs = []
class_data = []
for i in np.arange(0,num_classes):
print classes[i]
mask = traininglabels==classes[i]
numThisClass = | sum(mask)
print numThisClass
trThisClass = trainingdata[mask,:]
class_data.append(trThisClass)
centerThisClass = trThisClass.mean(axis=0)
print centerThisClass
means.append(centerThisClass)
print '**********************************************************************************'
covThisClass = np.cov(trThisClass)
covs.append(covThisClass)
# print '***************** | *****************************************************************'
# print np.shape(covThisClass)
invCovMatThisClass = np.linalg.inv(covThisClass)
print np.shape(invCovMatThisClass)
# assert(0)
|
saltstack/salt | salt/modules/apf.py | Python | apache-2.0 | 3,165 | 0.00158 | """
Support for Advanced Policy Firewall (APF)
==========================================
:maintainer: Mostafa Hussein <mostafa.hussein91@gmail.com>
:maturity: new
:depends: python-iptables
:platform: Linux
"""
import salt.utils.path
from salt.exceptions import CommandExecutionError
try:
import iptc
IPTC_IMPORTED = True
except ImportError:
IPTC_IMPORTED = False
def __virtual__():
"""
Only load if apf exists on the system
"""
if salt.utils.path.which("apf") is None:
return (False, "The apf execution module cannot be loaded: apf unavailable.")
elif not IPTC_IMPORTED:
return (
False,
"The apf execution module cannot be loaded: python-iptables is missing.",
)
else:
return True
def __apf_cmd(cmd):
"""
Return the apf location
"""
apf_cmd = "{} {}".format(salt.utils.path.which("apf"), cmd)
out = __salt__["cmd.run_all"](apf_cmd)
if out["retcode"] != 0:
if not out["stderr"]:
msg = out["stdout"]
else:
msg = out["stderr"]
raise CommandExecutionError("apf failed: {}".format(msg))
return out["stdout"]
def _status_apf():
"""
Return True if apf is running otherwise return False
"""
status = 0
table = iptc.Table(iptc.Table.FILTER)
for chain in table.chains:
if "sanity" in chain.name.lower():
status = 1
return True if status else False
def running():
"""
Check apf status
CLI Example:
.. code-block:: bash
salt '*' apf.running
"""
return True if _status_apf() else False
def disable():
"""
Stop (flush) all firewall rules
CLI Example:
.. code-block:: bash
| salt '*' apf.disable
"""
if _status_apf():
return __apf_cmd("-f")
def enable():
"""
Load all firewall rules
CLI Example:
.. code-block:: bash
salt '*' apf.enable
"""
if not _status_apf():
return __apf_cmd("-s")
def reload():
"""
Stop (flush) & reload firewall rules
CLI Example:
.. code-bloc | k:: bash
salt '*' apf.reload
"""
if not _status_apf():
return __apf_cmd("-r")
def refresh():
"""
Refresh & resolve dns names in trust rules
CLI Example:
.. code-block:: bash
salt '*' apf.refresh
"""
return __apf_cmd("-e")
def allow(ip, port=None):
"""
Add host (IP/FQDN) to allow_hosts.rules and immediately load new rule into firewall
CLI Example:
.. code-block:: bash
salt '*' apf.allow 127.0.0.1
"""
if port is None:
return __apf_cmd("-a {}".format(ip))
def deny(ip):
"""
Add host (IP/FQDN) to deny_hosts.rules and immediately load new rule into firewall
CLI Example:
.. code-block:: bash
salt '*' apf.deny 1.2.3.4
"""
return __apf_cmd("-d {}".format(ip))
def remove(ip):
"""
Remove host from [glob]*_hosts.rules and immediately remove rule from firewall
CLI Example:
.. code-block:: bash
salt '*' apf.remove 1.2.3.4
"""
return __apf_cmd("-u {}".format(ip))
|
magic0704/oslo.db | oslo_db/tests/utils.py | Python | apache-2.0 | 1,310 | 0 | # Copyright 2010-2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www. | apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
from oslo_config import cfg
from oslotest import base as test_base
from oslotes | t import moxstubout
import six
if six.PY3:
@contextlib.contextmanager
def nested(*contexts):
with contextlib.ExitStack() as stack:
yield [stack.enter_context(c) for c in contexts]
else:
nested = contextlib.nested
class BaseTestCase(test_base.BaseTestCase):
def setUp(self, conf=cfg.CONF):
super(BaseTestCase, self).setUp()
moxfixture = self.useFixture(moxstubout.MoxStubout())
self.mox = moxfixture.mox
self.stubs = moxfixture.stubs
self.conf = conf
self.addCleanup(self.conf.reset)
|
wilderrodrigues/cloudstack_integration_tests | cit/integration/acs/tests/test_reset_vm_on_reboot.py | Python | apache-2.0 | 5,776 | 0.002251 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
""" P1 tests for reset Vm on reboot
"""
#Import Local Modules
import marvin
from marvin.codes import FAILED
from marvin.cloudstackTestCase import *
from marvin.cloudstackAPI import *
from marvin.lib.utils import *
from marvin.lib.base import *
from marvin.lib.common import *
from nose.plugins.attrib import attr
_multiprocess_shared_ = True
class TestResetVmOnReboot(cloudstackTestCase):
@classmethod
def setUpClass(cls):
testClient = super(TestResetVmOnReboot, cls).getClsTestClient()
cls.apiclient = testClient.getApiClient()
cls.services = testClient.getParsedTestDataConfig()
#Adding storage type as 'local' otherwise it won't work because default is 'shared'.
cls.services["service_offerings"]["small"]['storagetype'] = 'local'
cls.services['ostype'] = 'CentOS 5.6 (64-bit)'
# Get Zone, Domain and templates
domain = get_domain(cls.apiclient)
zone = get_zone(cls.apiclient, testClient.getZoneForTests())
cls.services['mode'] = zone.networktype
template = get_template(
cls.apiclient,
zone.id,
cls.services["ostype"]
)
if template == FAILED:
assert False, "get_template() failed to return template with description %s" % cls.services["ostype"]
# Set Zones and disk offerings ??
cls.services["small"]["zoneid"] = zone.id
cls.services["small"]["template"] = template.id
# Create account, service offerings, vm.
cls.account = Account.create(
cls.apiclient,
cls.services["account"],
domainid=domain.id
)
cls.small_offering = ServiceOffering.create(
cls.apiclient,
cls.services["service_offerings"]["small"],
isvolatile="true"
)
#create a virtual machine
cls.virtual_machine = VirtualMachine.create(
cls.apiclient,
cls.services["small"],
accountid=cls.account.name,
domainid=cls.account.domainid,
serviceofferingid=cls.small_offering.id,
mode=cls.services["mode"]
)
cls._cleanup = [
cls.small_offering,
cls.account
]
@classmethod
def tearDownClass(cls):
cls.apiclient = super(TestResetVmOnReboot, cls).getClsTestClient().getApiClient()
cleanup_resources(cls.apiclient, cls._cleanup)
return
def setUp(self):
self.apiclient = self.testClient.getApiClient()
self.dbclient = self.testClient.getDbConnection()
self.cleanup = []
def tearDown(self):
#Clean up, terminate the created ISOs
cleanup_resources(self.apiclient, self.cleanup)
return
@attr(hypervisor="xenserver")
@attr(tags=["advanced", "basic"])
def test_01_reset_vm_on_reboot(self):
#TODO: SIMENH: add new test to check volume contents
"""Test reset virtual machine on reboot
"""
# Validate the following
# create vm and list the volume for that VM. Reboot vm and check if root volume is different as before.
volumelist_before_reboot = Volume.list(
self.apiclient,
virtualmachineid=self.virtual_machine.id,
type='ROOT',
listall=True
)
self.assertNotEqual(
volumelist_before_reboot,
None,
"Check if volume is in listvolumes"
)
volume_before_reboot = volumelist_before_reboot[0]
self.debug("Rebooting vm %s " % (self.virtual_machine.id))
cmd = rebootVirtualMachine.rebootVirtualMachineCmd()
cmd.id = self.virtual_machine.id
self.apiclient.rebootVirtualMachine(cmd)
volumelist_after_reboot = Volume.list(
self.apiclient,
virtualmachineid=self.virtual_machine.id,
type='ROOT',
listall=True
)
self.assertNotEqual(
volumelist_after_reboot,
None,
"Check if volume is in listvolumes"
)
volume_after_reboot = volumelist_after_reboot[0]
self.assertNotEqual(
volume_after_reboot.id,
volume_before_reboot.id,
"Check whether volumes are different before and after reboot"
)
list_vm_response = VirtualMachine.list(
self.apiclient,
id=self.virtual_machine.id
)
self.assertEqual(
isinstance(list_vm_response, list),
True,
"Check l | ist response returns a valid list"
)
self.assertNotEqual(
list_vm_response,
None,
"Check virtual machine is listVirtualMachines"
)
vm_response = list_vm_response[0]
self.assertEqual(
vm_respo | nse.state,
'Running',
"Check the state of VM"
)
return
|
timkrentz/SunTracker | IMU/VTK-6.2.0/Web/Applications/Cone/server/vtk_web_cone.py | Python | mit | 3,893 | 0.002826 | r"""
This module is a VTK Web server application.
The following command line illustrate how to use it::
$ vtkpython .../vtk_web_cone.py
Any VTK Web executable script come with a set of standard arguments that
can be overriden if need be::
--host localhost
Interface on which the HTTP server will listen on.
--port 8080
Port number on which the HTTP server will listen to.
--content /path-to-web-content/
Directory that you want to server as static web content.
By default, this variable is empty which mean that we rely on another server
to deliver the static content and the current process only focus on the
WebSocket connectivity of clients.
--authKey vtk-secret
Secret key that should be provided by the client to allow it to make any
WebSocket communication. The client will assume if none is given that the
server expect "vtk-secret" as secret key.
"""
# import to process args
import sys
import os
# import vtk modules.
import vtk
from vtk.web import protocols, server
from vtk.web import wamp as vtk_wamp
try:
import argparse
except ImportError:
# since Python 2.6 and earlier don't have argparse, we simply provide
# the source for the same as _argparse and we use it instead.
import _argparse as argparse
# =============================================================================
# Create custom File Opener class to handle clients requests
# =============================================================================
class _WebCone(vtk_wamp.ServerProtocol):
# Application configuration
view = None
authKey = "vtkweb-secret"
def initialize(self):
global renderer, renderWindow, renderWindowInteractor, cone, mapper, actor
# Bring used components
self.registerVtkWebProtocol(protocols.vtkWebMouseHandler())
self.registerVtkWebProtocol(protocols.vtkWebViewPort())
self.registerVtkWebProtocol(protocols.vtkWebViewPortImageDelivery())
self.registerVtkWebProtocol(protocols.vtkWebViewPortGeometryDelivery())
# Create default pipeline (Only once for all the session)
if not _WebCone.view:
# VTK specific code
renderer = vtk.vtkRenderer()
renderWindow = vtk.vtkRenderWindow()
renderWindow.AddRenderer(renderer)
renderWindowInteractor = vtk.vtkRenderWindowInteractor()
renderWindowInteractor.SetRenderWindow(renderWindow)
renderWindowInteractor.GetInteractorStyle().SetCurrentStyleToTrackballCamera()
cone = vtk.vtkConeSource()
mapper = vtk.vtkPolyDataMapper()
actor = vtk.vtkActor()
mapper.SetInputConnection(cone.GetOutputPort())
| actor.SetMapper(mapper)
renderer.AddActor(actor)
renderer.ResetCamera()
renderWindow.Render()
# VTK Web application specific
_WebCone.view = renderWindow
self.Application.GetObjectIdMap().SetActiveObject("VIEW", renderWindow)
# ========================================================================= | ====
# Main: Parse args and start server
# =============================================================================
if __name__ == "__main__":
# Create argument parser
parser = argparse.ArgumentParser(description="VTK/Web Cone web-application")
# Add default arguments
server.add_arguments(parser)
# Exctract arguments
args = parser.parse_args()
# Configure our current application
_WebCone.authKey = args.authKey
# Start server
server.start_webserver(options=args, protocol=_WebCone)
|
mulkieran/blk-DAG-tools | version.py | Python | gpl-2.0 | 1,244 | 0 | # -*- coding: utf-8 -*-
# Copyright (C) 2015 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will | be u | seful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
# Red Hat Author(s): Anne Mulhern <amulhern@redhat.com>
"""
version
================
Version information.
.. moduleauthor:: mulhern <amulhern@redhat.com>
"""
__version__ = '0.1'
__version_info__ = tuple(int(x) for x in __version__.split('.'))
|
merlinthered/sublime-rainmeter | newskintools.py | Python | mit | 4,688 | 0.004693 | import os
import re
import time
import sublime
import sublime_plugin
import rainmeter
class RainmeterNewSkinFileCommand(sublime_plugin.WindowCommand):
"""Open a new view and insert a skin skeleton"""
def run(self):
view = self.window.new_file()
view.run_command(
"insert_snippet",
{"name": "Packages/Rainmeter/Snippets/skin.sublime-snippet"})
if os.path.exists("Packages/User/Rainmeter.tmLanguage"):
view.set_syntax_file("Packages/User/Rainmeter.tmLanguage")
else:
view.set_syntax_file("Packages/Rainmeter/Rainmeter.tmLanguage")
class RainmeterNewSkinCommand(sublime_plugin.WindowCommand):
"""Create a new skin, complete with folders, open it and refresh Rainmeter
Prompts the user for the name of a skin and creates a new skin of that
name in the skins folder, if it doesn't already exist. Then opens the skin
file, inserts a basic skin skeleton and refreshes Rainmeter.
"""
def run(self):
self.window.show_input_panel("Enter Skin N | ame:",
"",
lambda name: self.create_skin(name),
| None,
None)
def create_skin(self, name):
skinspath = rainmeter.skins_path()
if not skinspath or not os.path.exists(skinspath):
sublime.error_message(
"Error while trying to create new skin: " +
"Skins path could not be found. Please check the value" +
" of your \"skins_path\" setting.")
return
name = os.path.normpath(name.strip("\\").strip("/")) + "\\"
# Path where the new ini file will be created
newskinpath = os.path.join(skinspath, name)
# Path where the @Resources folder should be created
basepath = os.path.join(skinspath,
re.match("(.*?)\\\\", name).group(1))
try:
os.makedirs(newskinpath)
except os.error:
sublime.error_message(
"Error while trying to create new skin: " +
"Directory " + newskinpath + " could not be created. " +
"Does it already exist?")
return
# Check which folders should be created
settings = sublime.load_settings("Rainmeter.sublime-settings")
make_resources = settings.get(
"rainmeter_new_skin_create_resources_folder",
True)
make_images = settings.get(
"rainmeter_new_skin_create_images_folder",
True)
make_fonts = settings.get(
"rainmeter_new_skin_create_fonts_folder",
True)
make_scripts = settings.get(
"rainmeter_new_skin_create_scripts_folder",
True)
try:
if make_resources:
os.makedirs(os.path.join(basepath, "@Resources"))
if make_images:
os.makedirs(os.path.join(basepath, "@Resources\\Images"))
if make_fonts:
os.makedirs(os.path.join(basepath, "@Resources\\Fonts"))
if make_scripts:
os.makedirs(os.path.join(basepath, "@Resources\\Scripts"))
except os.error:
sublime.status_message("Did not create @Resources folder or" +
" subfolders because they already exist")
window = self.window
filename = os.path.basename(os.path.normpath(name))
open(os.path.join(newskinpath, filename + ".ini"), 'a')
newview = window.open_file(os.path.join(newskinpath,
filename + ".ini"))
# We have to wait until the file is fully loaded (even if it's empty
# because it was just created)
sublime.set_timeout((lambda: self.open_skin_file(newview)), 100)
def open_skin_file(self, view):
if view.is_loading():
sublime.set_timeout(lambda: self.open_skin_file(self, newview),
100)
return
view.run_command(
"insert_snippet",
{"name": "Packages/Rainmeter/Snippets/skin.sublime-snippet"})
if os.path.exists("Packages/User/Rainmeter.tmLanguage"):
view.set_syntax_file("Packages/User/Rainmeter.tmLanguage")
else:
view.set_syntax_file("Packages/Rainmeter/Rainmeter.tmLanguage")
sublime.run_command("rainmeter_refresh")
|
jmaher/treeherder | tests/webapp/api/test_performance_tags.py | Python | mpl-2.0 | 483 | 0.00207 | from django.urls import reverse
def test_perf_tags_get(authorized_sheriff_client, test_perf_tag, test_perf_tag_2):
resp = authorized_sheriff_client.get(reverse('performance-tags-list'))
assert res | p.status_code == 200
assert len(resp.json()) == 2
assert resp.json()[0]['id'] == test_perf_tag.i | d
assert resp.json()[0]['name'] == test_perf_tag.name
assert resp.json()[1]['id'] == test_perf_tag_2.id
assert resp.json()[1]['name'] == test_perf_tag_2.name
|
hftools/hftools | hftools/file_formats/hdf5/v_01.py | Python | bsd-3-clause | 4,699 | 0.003618 | # -*- coding: ISO-8859-1 -*-
#-----------------------------------------------------------------------------
# Copyright (c) 2014, HFTools Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
try:
import h5py
import_failed = False
except ImportError: # pragma: no cover
import_failed = True
h5py = None
import hftools
import numpy as np
from hftools.dataset import DimRep, hfarray, DataBlock
from hftools.dataset.dim import DimBase
from hftools.file_formats.common import Comments
from hftools.py3compat import string_types, cast_unicode, cast_bytes
def unpack_dim(dim):
return dim.__class__.__name__, dim.name, dim.unit
def save_hdf5(db, h5file, name="datablock", mode="w", compression="gzip", **kw):
if isinstance(h5file, string_types):
fil = h5py.File(h5file, mode)
else:
fil = h5file
grp = fil.create_group(name)
if db.blockname is None:
blockname = "None"
else:
blockname = db.blockname
grp.attrs["Blockname"] = blockname
vardata = grp.create_group("vardata")
ivardata = grp.create_group("ivardata")
comments = grp.create_group("Comments")
if db.comments:
com = np.array([cast_bytes(x) for x in db.comments.fullcomments])
comments.create_dataset("fullcomments", data=com, compression=compression)
# pdb.set_trace()
for k in db.vardata:
datadtype = ""
data = db[k]
if data.ndim:
if data.dtype.name.startswith("datetime64"):
datadtype = data.dtype.name
data = data.astype(np.uint64)
dset = vardata.create_dataset(k, data=data, compression=compression, )
klass, name, unit = zip(*[unpack_dim(dim) for dim in db[k].dims])
else:
if data.dtype.name.startswith("datetime64"):
datadtype = data.dtype.name
data = data.astype(np.uint64)
dset = vardata.create_dataset(k, data=data) #can not compress scalars
name = []
dset.attrs[r"info\name"] = list(map(cast_bytes, name))
dset.attrs[r"data\unit"] = cast_bytes(db[k].unit if db[k].unit is not None else "None")
dset.attrs[r"data\dtype"] = datadtype
for k in db.ivardata:
datadtype = ""
data = hfarray(db.ivardata[k])
if data.dtype.name.startswith("datetime64"):
datadtype = data.dtype.name
data = data.astype(np.uint64)
dset = ivardata.create_dataset(k, data=data, compression=compression)
klass, name, unit = unpack_dim(db.ivardata[k])
dset.attrs[r"info\class"] = klass
dset.attrs[r"info\unit"] = cast_bytes(unit if unit is not None else "None")
dset.attrs[r"info\dtype"] = datadtype
if isinstance(h5file, string_types):
fil.close()
dims_dict = dict((name, dim) for name, dim in hftools.dataset.__dict__.items() if isinstance(dim, type) and issubclass(dim, DimBase))
#print dims
def read_hdf5(h5file, name="datablock" | , **kw):
if isinstance(h5file, string_types):
fil = h5py.File(h5file, "r")
else:
fil = h5file
db = DataBlock()
grp = fil[name]
blockname = grp.attrs["Blockname"]
if blockname.lower() == "none":
| blockname = None
db.blockname = blockname
comments = grp["Comments"]
if "fullcomments" in comments and len(comments["fullcomments"]):
db.comments = Comments([cast_unicode(x).strip() for x in np.array(comments["fullcomments"])])
else:
db.comments = Comments()
ivardata = grp["ivardata"]
vardata = grp["vardata"]
for k in ivardata:
v = ivardata[k]
datadtype = v.attrs[r"info\dtype"] or None
dimcls = dims_dict.get(v.attrs[r"info\class"], DimRep)
unit = str(v.attrs.get(r"info\unit", "none"))
if unit.lower() == "none":
unit = None
vdata = np.array(np.array(v), dtype=datadtype)
dim = dimcls(k, vdata, unit=unit)
db[k] = dim
for k in vardata:
v = vardata[k]
datadtype = v.attrs[r"data\dtype"] or None
dims = tuple(db.ivardata[cast_unicode(dimname)] for dimname in v.attrs[r"info\name"])
unit = cast_unicode(v.attrs.get(r"data\unit", "none"))
if unit.lower() == "none":
unit = None
db[k] = hfarray(np.array(v), dtype=datadtype, dims=dims, unit=unit)
if isinstance(h5file, string_types):
fil.close()
if kw.get("property_to_vars", False):
db.values_from_property()
return db
|
mengskysama/FoobarTTLyric | lrcserv.py | Python | gpl-3.0 | 8,866 | 0.010357 | # -*- coding: UTF-8 -*-
#auther mengskysama
import tornado.httpserver
import tornado.ioloop
import tornado.options
import tornado.web
import tornado.httpclient
import lcs
from urllib import quote
from urllib import unquote
from tornado import gen
import ttlrcdump
listen_port = 38439
def ChooiseItem(xml, artist):
#print '==============================================='
#print xml.decode('utf-8').encode('gbk')
n = xml.find('<?xml')
if n == -1:
return False
artist = ttlrcdump.FilterSearchStr(artist)
#remove item if artist != artist
n = 0
pos = 0
t = xml.count('id=')
for n in range(0, t):
begin = xml.find('artist="', pos)
end = xml.find('" title', begin)
_artist = ttlrcdump.FilterSearchStr(xml[begin+8:end])
pos = end
n += 1
arith = lcs.arithmetic()
samelen = len(arith.lcs(_artist,artist))
#print samelen
if samelen < 5 and samelen < len(artist)/3 :
begin = xml.rfind('<lrc',0 ,pos)
end = xml.find('lrc>', pos)
xml = xml[:begin] + xml[end + 4:]
pos = begin
n -= 1
t -= 1
#print xml.decode('utf-8').encode('gbk')
#print '==============================================='
n = xml.find('id=')
if n == -1:
return False
#remove item if artist != artist
n = 0
begin = xml.find('artist="', n)
end = xml.find('" title', n)
n = end
_artist = ttlrcdump.FilterSearchStr(xml[begin+10:end])
strs = ('动新','動新','动基','对照','對照','中日','中英','修正','假名')
for _str in strs:
n = xml.find(_str)
if n != -1:
break
if n == -1:
n = xml.find('<lrc')
else:
n = xml.rfind('<lrc', 0, n)
if n > -1:
begin = xml.find('id="', n) + 4
end = xml.find('"', begin)
#print xml[begin:end]
id = xml[begin:end]
begin = xml.find('artist="', n) + 8
end = xml.find('"', begin )
#print quote(xml[begin:end])
artist = xml[begin:end].replace('&','&').replace(''',"'").replace('"','"').replace('<','<').replace('>','>')
begin = xml.find('title="', n) + 7
end = xml.find('"', begin)
#print quote(xml[begin + 7:end])
title = xml[begin:end].replace('&','&').replace(''',"'").replace('"','"').replace('<','<').replace('>','>')
#ret = "id=%s&artist=%s&title=%s" % (id, quote(artist), quote(title))
#print ret
data = {'id':id, 'artist':artist, 'title':title}
return data
return False
def get_arg(req, arg):
begin = req.find('%s=' % arg)
if begin != -1:
begin += len(arg) + 1
end = req.find('&', begin)
if end != -1:
return req[begin:end]
else:
return req[begin:]
@gen.coroutine
def handle_request(request):
if request.uri.startswith('/lrc'):
try:
id = get_arg(request.uri, 'id')
artist = unquote(get_arg(request.uri, 'artist'))
title = unquote(get_arg(request.uri, 'title'))
ttservernum = int(get_arg(request.uri, 'ttservernum'))
#print id.decode('utf-8').encode('gbk')
#print artist.decode('utf-8').encode('gbk')
#print title.decode('utf-8').encode('gbk')
print str(ttservernum)
http_client = tornado.httpclient.AsyncHTTPClient()
#print ttlrcdump.GetDownloadLrcReq(id, artist, title)
req = tornado.httpclient.HTTPRequest(ttlrcdump.GetDownloadLrcReq(ttservernum, id, artist, title))
res = yield http_client.fetch(req)
lrc = res.body.replace('>', '】')
lrc = lrc.replace('<', '【')
lrc = lrc.replace('\r\n', '<br />')
lrc = lrc.replace('\n', '<br />')
lrc = lrc.replace('\r', '<br />')
context = '<script type="text/javascript" src="/templates/ddjs/lrc_content_inner_1.js"></script></div>%s</li>'
context = context.replace('%s',lrc, 1)
#print context
request.write('HTTP/1.1 200 OK\r\nContent-Length: %d\r\n\r\n%s' % (len(context), context))
except tornado.httpclient.HTTPError, code:
print 'HTTPError except Code' + str(code)
except Exception,e:
print e |
finally:
request.finish()
elif (request.uri.f | ind('/?keyword=') != -1):
uri = request.uri.decode('gbk').replace('%20',' ')
if uri.find('&') != -1:
keyword = uri[10:uri.find('&')]
else:keyword = uri[10:]
#print repr(keyword)
keyword = keyword.encode('gbk')
#print repr(keyword)
keyword = keyword.decode('utf-8')
#print repr(keyword)
keyword = eval(repr(keyword)[1:])
#print repr(keyword)
keyword = keyword.decode('gbk').encode('utf-8')
#print keyword.decode('utf-8').encode('gbk')
#print repr(keyword)
try:
if keyword.count(' ') == 0:
keyword += ' '
n = 0
ttservernum = 0
cnt = keyword.count(' ')
for i in range(0, cnt):
#try to prase art and title
n = keyword.find(' ', n) + 1
artist = keyword[0:n-1]
title = keyword[n:]
#print 'title %s' % title
if title.startswith( '(') and i < cnt - 1:
#歌名一般不可能以括号开头
continue
#print 'guess art=%s' % artist.decode('utf-8').encode('gbk')
#print 'guess tit=%s' % title.decode('utf-8').encode('gbk')
trycnt = 0
if artist.find('and') == -1 and title.find('and') == -1:
trycnt = 1
while True:
reqartist = ''
reqtitle = ''
if trycnt == 0:
reqartist = artist.replace('and', '')
reqtitle = title.replace('and', '')
elif trycnt == 1:
reqartist = artist
reqtitle = title
http_client = tornado.httpclient.AsyncHTTPClient()
#print ttlrcdump.GetSearchLrcReq(ttservernum, artist, title)
ttservernum = ttlrcdump.GetServerNum()
req = tornado.httpclient.HTTPRequest(ttlrcdump.GetSearchLrcReq(ttservernum, reqartist, reqtitle))
res = yield http_client.fetch(req)
ret = ChooiseItem(res.body, artist)
if ret != False or trycnt > 0:
break
trycnt += 1
if ret != False:
break
if ret != False:
context = '<div class="newscont mb15" style="line-height:160%;margin-top:10px">' \
'歌手:<a class="mr">%s</a><br>' \
'专辑:<a class="mr"></a>' \
'歌曲:<a class="mr ">%s<span class="highlighter">a</span></a><br>' \
'查看:<a class="mr"href="%s" target="_blank">LRC' \
'<div style="clear:both;"></div>' \
'<div class="page wid f14">'
context = context.replace('%s', artist, 1)
uni_title = title.decode('utf-8')
strrep = ''
for i in range(0, len(uni_title)):
strrep += '<span class="highlighter">%s</span>' % uni_title[i].encode('utf-8')
context = context.replace('%s', strrep, 1)
context = context.replace('%s', "/lrc/?id=%s&artist=%s&title=%s&ttservernum=%s" % (str(ret['id']), quote(str(ret['artist'])), quote(str(ret['title'])), str(ttservernum)))
#print context.decode('utf-8').encode('gbk')
else:
context = 'Lrc Not Found'
request.write('HTTP/1.1 200 OK\r\nContent-Length: %d\r\n\r\n%s' % (len(context), context))
except tornado.httpclient.HTTPError, code:
print 'HTTPErro |
miurahr/django-admin-tools | admin_tools/dashboard/dashboards.py | Python | mit | 10,473 | 0.000382 | """
Module where admin tools dashboard classes are defined.
"""
from django.template.defaultfilters import slugify
try:
from importlib import import_module
except ImportError:
# Django < 1.9 and Python < 2.7
from django.utils.importlib import import_module
from django.utils.translation import ugettext_lazy as _
from django.core.urlresolvers import reverse
from django.contrib.contenttypes.models import ContentType
from django.utils.encoding import force_text
from admin_tools.dashboard import modules
from admin_tools.utils import get_admin_site_name, uniquify
class Dashboard(object):
"""
Base class for dashboards.
The Dashboard class is a simple python list that has three addition | al
properties:
``title``
The dashboard title, by default, it is displayed above the dashboard
in a ``h2`` tag. Default v | alue: 'Dashboard'.
``template``
The template to use to render the dashboard.
Default value: 'admin_tools/dashboard/dashboard.html'
``columns``
An integer that represents the number of columns for the dashboard.
Default value: 2.
If you want to customize the look of your dashboard and it's modules, you
can declare css stylesheets and/or javascript files to include when
rendering the dashboard (these files should be placed in your
media path), for example::
from admin_tools.dashboard import Dashboard
class MyDashboard(Dashboard):
class Media:
css = ('css/mydashboard.css',)
js = ('js/mydashboard.js',)
Here's an example of a custom dashboard::
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext_lazy as _
from admin_tools.dashboard import modules, Dashboard
class MyDashboard(Dashboard):
# we want a 3 columns layout
columns = 3
def __init__(self, **kwargs):
# append an app list module for "Applications"
self.children.append(modules.AppList(
title=_('Applications'),
exclude=('django.contrib.*',),
))
# append an app list module for "Administration"
self.children.append(modules.AppList(
title=_('Administration'),
models=('django.contrib.*',),
))
# append a recent actions module
self.children.append(modules.RecentActions(
title=_('Recent Actions'),
limit=5
))
Below is a screenshot of the resulting dashboard:
.. image:: images/dashboard_example.png
"""
title = _('Dashboard')
template = 'admin_tools/dashboard/dashboard.html'
columns = 2
children = None
class Media:
css = ()
js = ()
def __init__(self, **kwargs):
for key in kwargs:
if hasattr(self.__class__, key):
setattr(self, key, kwargs[key])
self.children = self.children or []
def init_with_context(self, context):
"""
Sometimes you may need to access context or request variables to build
your dashboard, this is what the ``init_with_context()`` method is for.
This method is called just before the display with a
``django.template.RequestContext`` as unique argument, so you can
access to all context variables and to the ``django.http.HttpRequest``.
"""
pass
def get_id(self):
"""
Internal method used to distinguish different dashboards in js code.
"""
return 'dashboard'
def _prepare_children(self):
""" Enumerates children without explicit id """
seen = set()
for id, module in enumerate(self.children):
module.id = uniquify(module.id or str(id+1), seen)
module._prepare_children()
class AppIndexDashboard(Dashboard):
"""
Class that represents an app index dashboard, app index dashboards are
displayed in the applications index page.
:class:`~admin_tools.dashboard.AppIndexDashboard` is very similar to the
:class:`~admin_tools.dashboard.Dashboard` class except
that its constructor receives two extra arguments:
``app_title``
The title of the application
``models``
A list of strings representing the available models for the current
application, example::
['yourproject.app.Model1', 'yourproject.app.Model2']
It also provides two helper methods:
``get_app_model_classes()``
Method that returns the list of model classes for the current app.
``get_app_content_types()``
Method that returns the list of content types for the current app.
If you want to provide custom app index dashboard, be sure to inherit from
this class instead of the :class:`~admin_tools.dashboard.Dashboard` class.
Here's an example of a custom app index dashboard::
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext_lazy as _
from admin_tools.dashboard import modules, AppIndexDashboard
class MyAppIndexDashboard(AppIndexDashboard):
# we don't want a title, it's redundant
title = ''
def __init__(self, app_title, models, **kwargs):
AppIndexDashboard.__init__(self, app_title, models, **kwargs)
# append a model list module that lists all models
# for the app and a recent actions module for the current app
self.children += [
modules.ModelList(self.app_title, self.models),
modules.RecentActions(
include_list=self.models,
limit=5
)
]
Below is a screenshot of the resulting dashboard:
.. image:: images/dashboard_app_index_example.png
"""
models = None
app_title = None
def __init__(self, app_title, models, **kwargs):
kwargs.update({'app_title': app_title, 'models': models})
super(AppIndexDashboard, self).__init__(**kwargs)
def get_app_model_classes(self):
"""
Helper method that returns a list of model classes for the current app.
"""
models = []
for m in self.models:
mod, cls = m.rsplit('.', 1)
mod = import_module(mod)
models.append(getattr(mod, cls))
return models
def get_app_content_types(self):
"""
Return a list of all content_types for this app.
"""
return [ContentType.objects.get_for_model(c) for c \
in self.get_app_model_classes()]
def get_id(self):
"""
Internal method used to distinguish different dashboards in js code.
"""
return '%s-dashboard' % slugify(force_text(self.app_title))
class DefaultIndexDashboard(Dashboard):
"""
The default dashboard displayed on the admin index page.
To change the default dashboard you'll have to type the following from the
commandline in your project root directory::
python manage.py customdashboard
And then set the ``ADMIN_TOOLS_INDEX_DASHBOARD`` settings variable to
point to your custom index dashboard class.
"""
def init_with_context(self, context):
site_name = get_admin_site_name(context)
# append a link list module for "quick links"
self.children.append(modules.LinkList(
_('Quick links'),
layout='inline',
draggable=False,
deletable=False,
collapsible=False,
children=[
[_('Return to site'), '/'],
[_('Change password'),
reverse('%s:password_change' % site_name)],
[_('Log out'), reverse('%s:logout' % site_name)],
]
))
# append an app list module for "Applications"
self.children.append(modules.AppList(
_('Applications'),
ex |
FescueFungiShare/hydroshare | hs_core/search_indexes.py | Python | bsd-3-clause | 13,185 | 0.000834 | from haystack import indexes
from hs_core.models import BaseResource
from django.db.models import Q
from datetime import datetime
class BaseResourceIndex(indexes.SearchIndex, indexes.Indexable):
text = indexes.CharField(document=True, use_template=True)
short_id = indexes.CharField(model_attr='short_id')
doi = indexes.CharField(model_attr='doi', null=True)
author = indexes.CharField(faceted=True)
title = indexes.CharField(faceted=True)
abstract = indexes.CharField()
creators = indexes.MultiValueField(faceted=True)
contributors = indexes.MultiValueField()
subjects = indexes.MultiValueField(faceted=True)
public = indexes.BooleanField(faceted=True)
discoverable = indexes.BooleanField(faceted=True)
published = indexes.BooleanField(faceted=True)
created = indexes.DateTimeField(model_attr='created', faceted=True)
modified = indexes.DateTimeField(model_attr='updated', faceted=True)
organizations = indexes.MultiValueField(faceted=True)
author_emails = indexes.MultiValueField()
publisher = indexes.CharField(faceted=True)
rating = indexes.IntegerField(model_attr='rating_sum')
coverages = indexes.MultiValueField()
coverage_types = indexes.MultiValueField()
coverage_east = indexes.FloatField()
coverage_north = indexes.FloatField()
coverage_northlimit = indexes.FloatField()
coverage_eastlimit = indexes.FloatField()
coverage_southlimit = indexes.FloatField()
coverage_westlimit = indexes.FloatField()
coverage_start_date = indexes.DateField()
coverage_end_date = indexes.DateField()
formats = indexes.MultiValueField()
identifiers = indexes.MultiValueField()
language = indexes.CharField(faceted=True)
sources = indexes.MultiValueField()
relations = indexes.MultiValueField()
resource_type = indexes.CharField(faceted=True)
comments = indexes.MultiValueField()
comments_count = indexes.IntegerField(faceted=True)
owners_logins = indexes.MultiValueField(faceted=True)
owners_names = indexes.MultiValueField(faceted=True)
owners_count = indexes.IntegerField(faceted=True)
viewers_logins = indexes.MultiValueField(faceted=True)
viewers_names = indexes.MultiValueField(faceted=True)
viewers_count = indexes.IntegerField(faceted=True)
editors_logins = indexes.MultiValueField(faceted=True)
editors_names = indexes.MultiValueField(faceted=True)
editors_count = indexes.IntegerField(faceted=True)
def get_model(self):
return BaseResource
def index_queryset(self, using=None):
return self.get_model().objects.filter(Q(raccess__discoverable=True) |
Q(raccess__public=True))
def prepare_title(self, obj):
if hasattr(obj, 'metadata') and obj.metadata.title.value is not None:
return obj.metadata.title.value
else:
return 'none'
def prepare_abstract(self, obj):
if hasattr(obj, 'metadata') and obj.metadata.description is not None and \
obj.metadata.description.abstract is not None:
return obj.metadata.description.abstract
else:
return 'none'
def prepare_author(self, obj):
if hasattr(obj, 'metadata'):
first_creator = obj.metadata.creators.filter(order=1).first()
if first_creator.name is not None:
return first_creator.name
else:
return 'none'
else:
return 'none'
def prepare_creators(self, obj):
if hasattr(obj, 'metadata'):
return [creator.name for creator in obj.metadata.creators.all()
.exclude(name__isnull=True)]
else:
return []
def prepare_contributors(self, obj):
if hasattr(obj, 'metadata'):
return [contributor.name for contributor in obj.metadata.contributors.all()
.exclude(name__isnull=True)]
else:
return []
def prepare_subjects(self, obj):
if hasattr(obj, 'metadata'):
return [subject.value for subject in obj.metadata.subjects.all()
.exclude(value__isnull=True)]
else:
return []
def prepare_organizations(self, obj):
organizations = []
none = False # on | ly ente | r one value "none"
if hasattr(obj, 'metadata'):
for creator in obj.metadata.creators.all():
if(creator.organization is not None):
organizations.append(creator.organization)
else:
if not none:
none = True
organizations.append('none')
return organizations
def prepare_publisher(self, obj):
if hasattr(obj, 'metadata'):
publisher = obj.metadata.publisher
if publisher is not None:
return publisher
else:
return 'none'
else:
return 'none'
def prepare_author_emails(self, obj):
if hasattr(obj, 'metadata'):
return [creator.email for creator in obj.metadata.creators.all()
.exclude(email__isnull=True)]
else:
return []
def prepare_discoverable(self, obj):
if hasattr(obj, 'raccess'):
if obj.raccess.public or obj.raccess.discoverable:
return True
else:
return False
else:
return False
def prepare_public(self, obj):
if hasattr(obj, 'raccess'):
if obj.raccess.public:
return True
else:
return False
else:
return False
def prepare_published(self, obj):
if hasattr(obj, 'raccess'):
if obj.raccess.published:
return True
else:
return False
else:
return False
def prepare_coverages(self, obj):
# TODO: reject empty coverages
if hasattr(obj, 'metadata'):
return [coverage._value for coverage in obj.metadata.coverages.all()]
else:
return []
def prepare_coverage_types(self, obj):
if hasattr(obj, 'metadata'):
return [coverage.type for coverage in obj.metadata.coverages.all()]
else:
return []
def prepare_coverage_east(self, obj):
if hasattr(obj, 'metadata'):
for coverage in obj.metadata.coverages.all():
if coverage.type == 'point':
return float(coverage.value["east"])
elif coverage.type == 'box':
return (float(coverage.value["eastlimit"]) +
float(coverage.value["westlimit"])) / 2
else:
return 'none'
def prepare_coverage_north(self, obj):
if hasattr(obj, 'metadata'):
for coverage in obj.metadata.coverages.all():
if coverage.type == 'point':
return float(coverage.value["north"])
elif coverage.type == 'box':
return (float(coverage.value["northlimit"]) +
float(coverage.value["southlimit"])) / 2
else:
return 'none'
def prepare_coverage_northlimit(self, obj):
if hasattr(obj, 'metadata'):
for coverage in obj.metadata.coverages.all():
if coverage.type == 'box':
return coverage.value["northlimit"]
else:
return 'none'
def prepare_coverage_eastlimit(self, obj):
if hasattr(obj, 'metadata'):
for coverage in obj.metadata.coverages.all():
if coverage.type == 'box':
return coverage.value["eastlimit"]
else:
return 'none'
def prepare_coverage_southlimit(self, obj):
if hasattr(obj, 'metadata'):
for coverage in obj.metadata.coverages.all():
if coverage.type == 'box':
return coverage.value["southlimit"]
else:
retu |
drewcsillag/skunkweb | pylibs/skunkdoc/scanners/STMLScanner.py | Python | gpl-2.0 | 9,219 | 0.009762 | #
# Copyright (C) 2001 Andrew T. Csillag <drew_csillag@geocities.com>
#
# You may distribute under the terms of either the GNU General
# Public License or the SkunkWeb License, as specified in the
# README file.
#
import globals
import os
from DT import DTTags, DTTagRegistry, DTLexer, DTParser, DTUtil
import DT
import common
import string
import types
tagConf = {
#name, isempty
'loop': 0,
'for': 0,
'val': 1,
'call': 1,
'continue': 1,
'break': 1,
'else': 1,
'elif': 1,
'if': 0,
'while': 0,
'comment': 0,
'#': 0,
'raise': 1,
'except': 1,
'finally': 1,
'try': 0,
'default': 1,
'halt': 1,
'spool': 0,
#from templating persona
'cache': 1,
'component': 1,
'datacomp': 1,
'include': 1,
'brandcomponent': 1,
'branddatacomp': 1,
'brandinclude': 1,
'date': 1,
'doc': 0,
'import': 1,
'msg': 1,
'redirect': 1,
'sendmail': 1,
'http_get': 1,
'return': 1,
'set': 1,
'log': 1,
'warn':1,
'error':1,
'debug':1,
'catalog':1,
'multicatalog':1,
'curl': 1,
'url': 1,
'img': 1,
'form': 1,
'retain': 1,
'hidden': 1,
'args': 1,
'compargs': 1,
#periscopio
'ad': 1,
'esp': 0,
'por': 0,
'eng': 0,
#sql
'sql': 1,
}
class boinkTag(DTTags.DTTag):
"""tag class used for parsing STML"""
def __init__(self, name, isEmpty):
DTTags.DTTag.__init__(self, name, isEmpty)
def parseBlock(self, text, taglist, start, tagreg, name):
if self.tagname not in ('#', 'comment'):
return DTTags.DTTag.parseBlock(self, text, taglist, start,
tagreg, name)
func = DTTags.GenericCommentTag.__dict__['parseBlock']
return func(self, text, taglist, start, tagreg, name)
def _makeRegistry():
tr = DTTagRegistry.DTTagRegistry()
for k, v in tagConf.items():
tr.addTag( boinkTag( k, v ) )
return tr
def _parseSTMLDocument( filename, contents ):
#tagRegistry is global
try:
tagList = DTLexer.doTag( contents, filename )
except:
return
newtaglist = []
#filter out unknown tags
for i in tagList:
if type(i) != type('') and tagRegistry.has_key(i.tagname):
newtaglist.append(i)
elif type(i) == type(''):
newtaglist.append(i)
return DTParser.parseit( contents, newtaglist, tagRegistry, filename )
def _findTags(tree, kind):
if type(kind) == types.StringType:
kind = (kind,)
foundTags = []
for i in tree.children:
if isinstance( i, DT.DTLexer.DTToken ) and i.tagname in kind:
foundTags.append(i)
elif isinstance( i, DT.DTParser.Node ):
foundTags.extend( _findTags( i, kind ) )
else:
pass
return foundTags
def _findDocStrings(tree):
docBlocks = []
if not isinstance(tree, DT.DTParser.Node):
return []
if (isinstance(tree.children[0], DT.DTLexer.DTToken)
and tree.children[0].tagname == 'doc'):
docBlocks.append(tree)
else:
for i in tree.children:
if isinstance(i, DT.DTParser.Node):
docBlocks.extend(_findDocStrings(i))
return docBlocks
_depSpecs = {
'datacomp': ['var', 'name'],
'include': ['name'],
'component': ['name'],
'branddatacomp': ['var', 'name'],
'brandinclude': ['name'],
'brandcomponent': ['name'],
}
class STMLDocument:
def __init__(self, name, source, kind):
self.kind = kind
self.name = name
self.source = source
self.parseDocument()
self.imports = []
self.caches = []
self.dependancies = []
self.usedBy = [] #filled in by renderer (if applicable)
if self.parsed is not None:
self.getIncludes()
self.getComponents()
self.getDataComponents()
self.getDependancies()
self.getImports()
self.getCaches()
self.getDocString()
else:
if globals.VERBOSE:
print 'document %s failed to parse' % name
globals.ERRORS.append('document %s failed to parse' % name)
self.includes = self.brandincludes = []
self.components = self.brandcomponents = []
self.datacomps = self.branddatacomps = []
self.docString = 'THIS DOCUMENT FAILED TO PARSE!'
def parseDocument(self):
try:
self.parsed = _parseSTMLDocument(self.name, self.source)
except:
se | lf.parsed = None
def _getDependsOnFull(self, tagname):
tags = _findTags(self.parsed, tagname)
if not tags:
return []
l = []
for tag in tags:
#print 'TAG=', tag.tagname
args = DTUtil.tagCall(tag, _depSpecs[tag.tagname], kwcol='kw')
l.ap | pend((tag.tagname, args['name'], args['kw']))
return l
def _getDependsOn(self, tagname):
return map(lambda x:x[1], self._getDependsOnFull(tagname))
def getDependancies(self):
self.dependancies = map(
lambda (tn,n,a):[tn, n, a.get('cache')],
self._getDependsOnFull(('include', 'component', 'datacomp',
'brandinclude', 'brandcomponent',
'branddatacomp')))
def getIncludes(self):
self.includes = self._getDependsOn('include')
self.brandincludes = self._getDependsOn('brandinclude')
def getComponents(self):
self.components = self._getDependsOn('component')
self.brandcomponents = self._getDependsOn('brandcomponent')
def getDataComponents(self):
self.datacomps = self._getDependsOn('datacomp')
self.branddatacomps = self._getDependsOn('branddatacomp')
def getImports(self):
tags = _findTags(self.parsed, 'import')
for tag in tags:
args = DTUtil.tagCall(tag, ['module', ('items', 'None')])
self.imports.append(args['module'])
def getCaches(self):
tags = _findTags(self.parsed, 'cache')
if not tags:
return
for tag in tags:
args = DTUtil.tagCall( tag,
[('until', 'None'), ('duration', 'None')])
if args['until'] != None:
self.caches.append(('until', args['until']))
else:
self.caches.append(('duration', args['duration']))
def getDocString(self):
tags = _findDocStrings(self.parsed)#Tags(self.parsed, 'doc')
if not tags:
self.docString = ''
return
try:
tag = string.join( tags[0].children[1:-1], '' )
except AttributeError:
print tags[0], dir(tags[0])
print tags, dir(tags)
raise
self.docString = common.doDocString(tag)
def relativize(self, targetFile):
if targetFile[0] == '/':
return targetFile
fromFile = self.name
path, filename = os.path.split(fromFile)
newPath = os.path.normpath(path+'/'+targetFile)
newPath = string.replace(newPath, '//', '/')
return newPath
def linkify(self, targetFile):
return string.replace(self.relativize(targetFile), '/', '.')
def processFile( filename, kind ):
contents = open( filename, 'r' ).read()
try:
tree = _parseSTMLDocument( filename, contents )
except:
if globals.VERBOSE:
print 'error while parsing', filename
globals.ERRORS.append('error while parsing %s'% filename)
raise
if not tree:
return
doc = STMLDocument(filename, kind)
#create global tag registry
tagRegistry=_makeRegistry()
def getExtension(name):
ind = string.rfind(name, '.')
if ind:
return name[ind:]
return ''
def isSTML(file):
return getExtension(file) in ('.html', '.comp', '.dcmp')
def getSTMLKind(file):
ext = getExtension(file)
ret |
Nebucatnetzer/tamagotchi | pygame/lib/python3.4/site-packages/faker/providers/phone_number/zh_CN/__init__.py | Python | gpl-2.0 | 589 | 0 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from .. import Provider as PhoneNumberProvider
class Provider(PhoneNumberProvider):
phonenumbe | r_prefixes = [134, 135, 136, 137, 138, 139, 147, 150,
151, 152, 157, 158, 159, 182, 187, 188,
130, 131, | 132, 145, 155, 156, 185, 186,
145, 133, 153, 180, 181, 189]
formats = [str(i) + "########" for i in phonenumber_prefixes]
@classmethod
def phonenumber_prefix(cls):
return cls.random_element(cls.phonenumber_prefixes)
|
docker-solr/docker-solr | tools/serve_local.py | Python | apache-2.0 | 1,512 | 0.002646 | #!/usr/bin/env python3
#
# Script which serves local solr tgz files simulating an Apache mirror server
#
from http.server import BaseHTTPRequestHandler, HTTPServer
import os
import sys
PORT_NUMBER = 8083
#This class will handle any incoming request
class myHandler(BaseHTTPRequestHandler):
def do_GET(self):
if self.path.endswith("quit"):
print("Exiting")
server.socket.close()
sys.exit()
file = "./downloads/%s" % self.path.split("/")[-1]
if not os.path.exists(file):
self.send_response(404)
self.end_headers()
self.wfile.write(("File %s not found" % file).encode())
return
try:
with open(file, 'rb') as f:
size = os.path.getsize(file)
self.send_response(200)
self.send_header('Content-type', 'application/gzip')
self.send_header('Content-length', size)
self.end_headers()
self.wfile.write(f.read())
return
except Exception as e:
self.send_response(500)
self.end_headers()
self.wfile.write(("Error: %s" % e).encode())
return
try:
server = HTTPServer(('', PORT_ | NUMBER), myHandler)
print("Started local web server serving Solr artifacts on port %s" % P | ORT_NUMBER)
server.serve_forever()
except KeyboardInterrupt:
print("^C received, shutting down the web server")
server.socket.close() |
ValyrianTech/BitcoinSpellbook-v0.3 | trigger/recurringtrigger.py | Python | gpl-3.0 | 2,696 | 0.001855 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import time
from datetime import datetime
from helpers.loghelpers import LOG
from .trigger import Trigger
from .triggertype import TriggerType
from validators.validators import valid_amount, valid_timestamp
class RecurringTrigger(Trigger):
def __init__(self, trigger_id):
super(RecurringTrigger, self).__init__(trigger_id=trigger_id)
self.trigger_type = TriggerType.RECURRING
self.next_activation = None
self.begin_time = None
self.end_time = None
self.interval = None
def conditions_fulfilled(self):
if self.interval is None or self.begin_time is None:
return False
if self.end_time is None:
return self.next_activation <= int(time.time())
elif self.end_time <= int(time.time()):
LOG.info('Recurring trigger %s has reached its end time' % self.id)
self.status = 'Succeeded'
self.save()
return False
return self.next_activation <= int(time.time()) <= self.end_time
def activate(self):
super(RecurringTrigger, self).activate()
if self.end_time is None or self.next_activation + self.interval <= self.end_time:
self.next_activation += self.interval # Todo what if trigger was activated after interval has passed??
LOG.info('Setting next activation of recurring trigger %s to %s' % (self.id, datetime.fromtimestamp(self.next_activation)))
self.save()
def configure(self, **config):
super(RecurringTrigger, self).configure(**config)
if 'interval' in config and valid_amount(config['interval']):
self.interval = config['interval']
if 'begin_time' in config and valid_timestamp(config['begin_time']):
self.begin_time = config['begin_time']
if 'end_time' in config and valid_timestamp(config['end_time']):
self.end_time = config['end_time']
if 'next_activation' in config and valid_timestamp(config['next_activation']):
self.next_activation = config['next_activation']
elif self.begin_time is not None:
self.next_activation = s | elf.begin_time
LOG.info('Setting first activation of recurring trigger %s to %s' % (self.id, datetime.fromtimestamp(self.next_activation)))
self.multi = True
def json_encodable(self):
ret = super(RecurringTrigger, self).json_encodable()
ret.update({
'begin_time': self.begin_time,
'end_time': self.end_time,
'interval': self.interval,
'next_activation': self.next_activa | tion})
return ret
|
alexrudnick/terere | bibletools/unify_bibles.py | Python | gpl-3.0 | 1,141 | 0.003506 | #!/usr/bin/env python3
"""
Given two Bibles all in one file (with books and verses in any order), with one
verse per line, with lines like this:
BOOK_chapter_verse{TAB}Text of verse goes here...
... print out which verses are present in the first Bible but missing in the
second, and vice-versa.
"""
import sys
import util
def set_of_verses(fn):
"""Return the set of verses found in the given filename."""
out = set()
with open(fn) as infile:
for line in infile:
line = line.strip()
verse, text = line.split("\t")
if verse in out:
util.dprint("WARNING duplicate verse {0} in {1}".format(
verse, fn))
out.add(verse)
return out
def main():
left = set_of_verses(sys.argv[1])
right = set_of_verses(sys.argv[2])
print("[left but not right]")
leftbutnotright = sort | ed(list(left - right))
for verse in leftbutnotright:
print(verse)
print("[right but not left]")
rightbutno | tleft = sorted(list(right - left))
for verse in rightbutnotleft:
print(verse)
if __name__ == "__main__": main()
|
soul-F/forumdemo | article/views.py | Python | gpl-2.0 | 2,005 | 0.018734 | # coding: utf-8
from django.contrib.auth.decorators import login_required
from django.shortcuts import render
from django.shortcuts import render_to_response,redirect
from models import Article
from block.views import Demo
from django.contrib.auth.models import User
from django.template.context import RequestContext
from django.contrib import messages
from django.core.urlresolvers import reverse
# Create your views here.
def article_list(request,block_id):
block_id = int(block_id)
b | lock = Demo.objects.get(id=block_id)
articles = Article.objects.filter(block=block).order_by("-update_timestamp")
return render_to_response("article_list.html", {"articles":articles,"b":block},
context_instance=RequestContext(request))
@login_required
def create_article(request,block_id):
block_id = int(block_id)
block = Demo.objects.get(id=block_id)
| if request.method == "GET":
return render_to_response("article_create.html",{"b":block},
context_instance=RequestContext(request))
else:
title = request.POST["title"].strip()
content = request.POST["content"].strip()
if not title or not content:
messages.add_message(request,messages.ERROR,u"标题内容均不能为空")
return render_to_response("article_create.html",{"b":block,"title":title,"content":content},
context_instance=RequestContext(request))
# owner = User.objects.all()[0] #RODO:
new_article = Article(block=block,owner=request.user,title=title,content=content)
new_article.save()
messages.add_message(request,messages.INFO,u"成功发布文章")
return redirect(reverse("article_list", args=[block.id]))
def article_detail(request,article_id):
article_id = int(article_id)
article = Article.objects.get(id=article_id)
return render_to_response("article_detail.html",{"article":article})
|
bongo-project/bongo | src/libs/python/bongo/external/email/message.py | Python | gpl-2.0 | 32,690 | 0.001835 | # Copyright (C) 2001-2006 Python Software Foundation
# Author: Barry Warsaw
# Contact: email-sig@python.org
"""Basic message object for the email package object model."""
__all__ = ['Message']
import re
from cStringIO import StringIO
# Intrapackage imports
import bongo.external.email.charset
from bongo.external.email import utils
from bongo.external.email import errors
from bongo.external.email import payloads
SEMISPACE = '; '
# Regular expression used to split header parameters. BAW: this may be too
# simple. It isn't strictly RFC 2045 (section 5.1) compliant, but it catches
# most headers found in the wild. We may eventually need a full fledged
# parser eventually.
paramre = re.compile(r'\s*;\s*')
# Regular expression that matches `special' characters in parameters, the
# existance of which force quoting of the parameter value.
tspecials = re.compile(r'[ \(\)<>@,;:\\"/\[\]\?=]')
# Helper functions
def _formatparam(param, value=None, quote=True):
"""Convenience function to format and return a key=value pair.
This will quote the value if needed or if quote is true.
"""
if value is not None and len(value) > 0:
# A tuple is used for RFC 2231 encoded parameter values where items
# are (charset, language, value). charset is a string, not a Charset
# instance.
if isinstance(value, tuple):
# Encode as per RFC 2231
param += '*'
value = utils.encode_rfc2231(value[2], value[0], value[1])
# BAW: Please check this. I think that if quote is set it should
# force quoting even if not necessary.
if quote or tspecials.search(value):
return '%s="%s"' % (param, utils.quote(value))
else:
return '%s=%s' % (param, value)
else:
return param
def _parseparam(s):
plist = []
while s[:1] == ';':
s = s[1:]
end = s.find(';')
while end > 0 and s.count('"', 0, end) % 2:
end = s.find(';', end + 1)
if end < 0:
end = len(s)
f = s[:end]
if '=' in f:
i = f.index('=')
f = f[:i].strip().lower() + '=' + f[i+1:].strip()
plist.append(f.strip())
s = s[end:]
return plist
def _unquotevalue(value):
# This is different than utils.collapse_rfc2231_value() because it doesn't
# try to convert the value to a unicode. Message.get_param() and
# Message.get_params() are both currently defined to return the tuple in
# the face of RFC 2231 parameters.
if isinstance(value, tuple):
return value[0], value[1], utils.unquote(value[2])
else:
return utils.unquote(value)
class Message:
"""Basic message object.
A message object is defined as something that has a bunch of RFC 2822
headers and a payload. It may optionally have an envelope header
(a.k.a. Unix-From or From_ header). If the message is a container (i.e. a
multipart or a message/rfc822), then the payload is a list of Message
objects, otherwise it is a string.
Message objects implement part of the `mapping' interface, which assumes
there is exactly one occurrance of the header per message. Some headers
do in fact appear multiple times (e.g. Received) and for those headers,
you must use the explicit API to set or get all the headers. Not all of
the mapping methods are implemented.
"""
def __init__(self):
self._headers = []
self._unixfrom = None
self._payload = None
self._charset = None
# Defaults for multipart messages
self.preamble = self.epilogue = None
self.defects = []
# Default content type
self._default_type = 'text/plain'
def __str__(self):
"""Return the entire formatted message as a string.
This includes the headers, body, and envelope header.
"""
return self.as_string(unixfrom=True)
def as_string(self, unixfrom=False):
"""Return the entire formatted message as a string.
Optional `unixfrom' when True, means include the Unix From_ envelope
header.
This is a convenience method and may not generate the message exactly
as you intend because by default it mangles lines that begin with
"From ". For more flexibility, use the flatten() method of a
Generator instance.
"""
from bongo.external.email.Generator import Generator
fp = StringIO()
g = Generator(fp)
g.flatten(self, unixfrom=unixfrom)
return fp.getvalue()
def is_multipart(self):
"""Return True if the message consists of multiple parts."""
return isinstance(self._payload, list)
#
# Unix From_ line
#
def set_unixfrom(self, unixfrom):
self._unixfrom = unixfrom
def get_unixfrom(self):
return self._unixfrom
#
# Payload manipulation.
#
def attach(self, payload):
"""Add the given payload to the current payload.
The current payload will always be a list of objects after this method
is called. If you want to set the payload to a scalar object, use
set_payload() instead.
"""
if self._payload is None:
self._payload = [payload]
else:
self._payload.append(payload)
def get_payload(self, i=None, decode=False):
"""Return a reference to the payload.
The payload will either be a list object or a string. If you mutate
the l | ist object, you modify the message's payload in place. Optional
i returns that index into the payload.
Optional decode is a flag indicating whether the payload should be
decoded or not, according to the Content-Transfer-Encoding header
(default is False).
When True and the message is not a multipart | , the payload will be
decoded if this header's value is `quoted-printable' or `base64'. If
some other encoding is used, or the header is missing, or if the
payload has bogus data (i.e. bogus base64 or uuencoded data), the
payload is returned as-is.
If the message is a multipart and the decode flag is True, then None
is returned.
"""
if i is None :
if isinstance(self._payload, list) :
if decode :
return None
else :
return self._payload
elif isinstance(self._payload, str):
if self._charset and not decode:
return self._charset.body_encode(self._payload)
else :
return self._payload
elif isinstance(self._payload, payloads.Payload) :
ret = ""
for item in self.iter_payload(decode) :
ret += item
return ret
elif not isinstance(self._payload, list):
raise TypeError('Expected list, got %s' % type(self._payload))
else:
# Multipart container
if decode:
return None
else:
return self._payload[i]
def get_payload_obj(self) :
if self._payload == None :
return None
#XXX: document
if isinstance(self._payload, str):
if self._charset :
charset = self._charset.input_charset
else :
charset = "us-ascii"
payload = payloads.MemoryPayload(charset, self._payload)
elif isinstance(self._payload, payloads.Payload) or isinstance(self._payload, list) :
payload = self._payload
else :
print "unsupported payload type: %s" % (str(self._payload))
raise TypeError('unsupported payload type for iterating')
return payload
def set_payload(self, payload, charset=None):
"""Set the payload to the given value.
Optional charset sets the message's default character set. See
set_charset() for details.
"""
if isinstance(payload, payloads.Payload):
|
zhangvs1988/zhangyl-Djangodemo | article/migrations/0007_auto_20160811_1915.py | Python | gpl-3.0 | 419 | 0 | # -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2016-08-11 11:15
fr | om __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('article', '0006_article_owner'),
]
operations = [
migrations.RenameField(
model_name='article',
old_name='owner',
new_name='owne',
),
] | |
aleksandarmilicevic/react | src/react/src/react/api/wrappers.py | Python | gpl-2.0 | 2,372 | 0.002108 | class Wrapper(object):
def __init__(self, target, robj=None, fname=None):
self._target = target
self._owner = robj
self._fname = fname
def unwrap(self):
return self._target
def append(self, *args, **kwargs):
return self._handle_mutation_call('append', *args, **kwargs)
def size(self): return self.__len__()
def _handle_mutation_call(self, mname, *args, **kwargs):
if not hasattr(self._target, mname):
raise AttributeError(self._target, mname)
ans = getattr(self._target, mname)(*args, **kwargs)
if self._owner is not None:
self._owner._field_mutated(self._fname, self._target)
return ans
def __len__(self):
ftype = self._owner.meta().field(self._fname)
if self._target is None:
return 0
elif ftype.is_scalar():
return 1
else:
return len(self._target)
def __iter__(self):
ftype = self._owner.meta().field(self._fname)
if self._target is None:
pass
elif ftype.is_scalar():
yield se | lf._target
else:
for x in self._target:
yield x
def __getattr__(self, name):
return getattr(self._target, name)
def __add__(sel | f, other):
if isinstance(other, Wrapper):
other = other.unwrap()
return Wrapper.wrap(self._target + other)
def __eq__(self, other): return self.__bop__("eq", other)
def __ne__(self, other): return self.__bop__("ne", other)
def __gt__(self, other): return self.__bop__("gt", other)
def __lt__(self, other): return self.__bop__("lt", other)
def __ge__(self, other): return self.__bop__("ge", other)
def __le__(self, other): return self.__bop__("le", other)
def __str__(self): return str(self._target)
def __repr__(self): return repr(self._target)
def __bop__(self, op, other):
if isinstance(other, Wrapper):
other = other.unwrap()
opname = "__%s__" % op
return getattr(self.unwrap(), opname)(other)
@staticmethod
def wrap(*a, **kw):
# return Wrapper(*a, **kw)
return a[0]
def wrap(*a, **kw):
return Wrapper.wrap(*a, **kw)
def unwrap(val):
if isinstance(val, Wrapper):
return val.unwrap()
else:
return val
|
google-research/google-research | kws_streaming/train/train_test.py | Python | apache-2.0 | 5,152 | 0.00427 | # coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests model train, based on tensorflow/examples/speech_commands."""
import os
from absl import flags
from absl.testing import parameterized
import tensorflow.compat.v1 as tf
from kws_streaming.models import model_flags
from kws_streaming.models import model_params
from kws_streaming.train import train
tf.disable_eager_execution()
FLAGS = flags.FLAGS
# Used to convert a dictionary into an object, for mocking parsed flags.
class DictStruct(object):
def __init__(self, **entries):
self.__dict__.update(entries)
class TrainTest(tf.test.TestCase, parameterized.TestCase):
def _GetWavData(self):
with self.cached_session():
sample_data = tf.zeros([32000, 2])
wav_encoder = tf.audio.encode_wav(sample_data, 16000)
wav_data = self.evaluate(wav_encoder)
return wav_data
def _SaveTestWavFile(self, filename, wav_data):
with open(filename, 'wb') as f:
f.write(wav_data)
def _SaveWavFolders(self, root_dir, labels, how_many):
wav_data = self._GetWavData()
for label in labels:
dir_name = os.path.join(root_dir, label)
os.mkdir(dir_name)
for i in range(how_many):
file_path = os.path.join(dir_name, 'some_audio_%d.wav' % i)
self._SaveTestWavFile(file_path, wav_data)
def _PrepareDummyTrainingData(self):
tmp_dir = FLAGS.test_tmpdir
# create data folder with subfolders,
# where every subfolder is a category/label with wav data inside
# we will automatically split these data into
# training, validation and testing sets
data_dir = os.path.join(tmp_dir, 'data1')
os.mkdir(data_dir)
self._SaveWavFolders(data_dir, ['a', 'b', 'c'], 100)
background_dir = os.path.join(data_dir, '_background_noise_')
os.mkdir(background_dir)
wav_data = self._GetWavData()
for i in range(10):
file_path = os.path.join(background_dir, 'background_audio_%d.wav' % i)
self._SaveTestWavFile(file_path, wav_data)
return data_dir
def _PrepareDummyTrainingDataSplit(self):
tmp_dir = FLAGS.test_tmpdir
# main wav data folder
data_dir = os.path.join(tmp_dir, 'data0')
os.mkdir(data_dir)
# create 4 subfolders: training, validation, testing, _background_noise_
# training data
training_dir = os.path.join(data_dir, 'training')
os.mkdir(training_dir)
self._SaveWavFolders(training_dir, ['a', 'b', 'c'], 100)
# validation data
validation_dir = os.path.join(data_dir, 'validation')
os.mkdir(validation_dir)
self._SaveWavFolders(validation_dir, ['a', 'b', 'c'], 100)
# testing data
testing_dir = os.path.join(data_dir, 'testing')
os.mkdir(testing_dir)
self._SaveWavFolders(testing_dir, ['a', 'b', 'c'], 100)
# _background_noise_ data
background_dir = os.path.join(data_dir, '_background_noise_')
os.mkdir(background_dir)
wav_data = self._GetWavData()
for i in range(10):
file_path = os.path.join(background_dir, 'background_audio_%d.wav' % i)
self._SaveTestWavFile(file_path, wav_data)
return data_dir
def | _PrepareDummyDir(self, dir_name):
path = os.path.join(FLAGS.test_tmpdir, dir_name)
os.mkdir(path)
return path
def _GetDefaultFlags(self, split_data):
params = model_params.dnn_params()
params.data_dir = self._PrepareDummyTrainingData(
) if spl | it_data == 1 else self._PrepareDummyTrainingDataSplit()
params.wanted_words = 'a,b,c'
params.split_data = split_data
params.summaries_dir = self._PrepareDummyDir('summaries' + str(split_data))
params.train_dir = self._PrepareDummyDir('train' + str(split_data))
params.how_many_training_steps = '2'
params.learning_rate = '0.01'
params.eval_step_interval = 1
params.save_step_interval = 1
params.clip_duration_ms = 100
params.batch_size = 1
return model_flags.update_flags(params)
@parameterized.named_parameters([
dict(testcase_name='default data split', split_data=1),
dict(testcase_name='user splits data', split_data=0)
])
def testTrain(self, split_data):
input_flags = self._GetDefaultFlags(split_data)
input_flags = model_flags.update_flags(input_flags)
train.train(input_flags)
self.assertTrue(
tf.io.gfile.exists(os.path.join(input_flags.train_dir, 'graph.pbtxt')))
self.assertTrue(
tf.io.gfile.exists(os.path.join(input_flags.train_dir, 'labels.txt')))
self.assertTrue(
tf.io.gfile.exists(
os.path.join(input_flags.train_dir, 'accuracy_last.txt')))
if __name__ == '__main__':
tf.test.main()
|
Psycojoker/dierentheater | scraper/management/commands/create_db_dumps.py | Python | agpl-3.0 | 932 | 0 | # lachambre.be to json sausage machine
# Copyright (C) 2011 Laurent Peuch <cortex@worlddomination.be>
#
# | This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, | or any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from django.core.management.base import BaseCommand
from lachambre.utils import dump_db
class Command(BaseCommand):
def handle(self, *args, **options):
dump_db()
|
thomwiggers/onebot | onebot/plugins/execute.py | Python | bsd-3-clause | 1,513 | 0 | """
=====================================================
:mod:`onebot.plugins.execute` Run commands on connect
=====================================================
Run commands on connect
Config:
=======
.. code-block: ini
[bot]
includes=
onebot.plugins.execute
[onebot.plugins.execute]
commands=
NS IDENTIFY f00bar
PRIVMSG SomeBot :LetMeIn
irc3 also has a plugin now, might be more useful.
"""
import irc3
import time
class ExecutePlugin:
"""Execute commands after having connected"""
def __init__(self, bot):
self.bot = bot
self.log = bot.log.getChild(__name__)
config = bot.config.get(__name__, {})
self.commands = config.get("commands", [])
self.delayed_commands = config.get("delayed_commands", [])
@irc3.event(irc3.rfc.CONNECTED)
def connected(self, **kwargs):
self.log.info("Sending perform commands")
for command in self.commands:
self.log.debug("Sending command %s", command)
self.bot.send(command)
if not self.commands: # pragma: no cover
self.log.warning("No perform commands!")
self.log.debug("Waiting for delayed commands")
time.sleep(4)
self.log.info("Sending delayed commands")
for command in self.delayed_commands:
self.log.debug("Sending command %s", command)
self. | bot.send(command)
@classmethod
def reload(cls, old): # pragma: no cover
return cls(old. | bot)
|
szhem/spark | python/pyspark/streaming/tests.py | Python | apache-2.0 | 62,306 | 0.001653 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import glob
import os
import sys
from itertools import chain
import time
import operator
import tempfile
import random
import struct
import shutil
from functools import reduce
try:
import xmlrunner
except ImportError:
xmlrunner = None
if sys.version_info[:2] <= (2, 6):
try:
import unittest2 as unittest
except ImportError:
sys.stderr.write('Please install unittest2 to test with Python 2.6 or earlier')
sys.exit(1)
else:
import unittest
if sys.version >= "3":
long = int
from pyspark.context import SparkConf, SparkContext, RDD
from pyspark.storagelevel import StorageLevel
from pyspark.streaming.context import StreamingContext
from pyspark.streaming.kafka import Broker, KafkaUtils, OffsetRange, TopicAndPartition
from pyspark.streaming.flume import FlumeUtils
from pyspark.streaming.kinesis import KinesisUtils, InitialPositionInStream
from pyspark.streaming.listener import StreamingListener
class PySparkStreamingTestCase(unittest.TestCase):
timeout = 30 # seconds
duration = .5
@classmethod
def setUpClass(cls):
class_name = cls.__name__
conf = SparkConf().set("spark.default.parallelism", 1)
cls.sc = SparkContext(appName=class_name, conf=conf)
cls.sc.setCheckpointDir(tempfile.mkdtemp())
@classmethod
def tearDownClass(cls):
cls.sc.stop()
# Clean up in the JVM just in case there has been some issues in Python API
try:
jSparkContextOption = SparkContext._jvm.SparkContext.get()
if jSparkContextOption.nonEmpty():
jSparkContextOption.get().stop()
except:
pass
def setUp(self):
self.ssc = StreamingContext(self.sc, self.duration)
def tearDown(self):
if self.ssc is not None:
self.ssc.stop(False)
# Clean up in the JVM just in case there has been some issues in Python API
try:
jStreamingContextOption = StreamingContext._jvm.SparkContext.getActive()
if jStreamingContextOption.nonEmpty():
jStreamingContextOption.get().stop(False)
except:
pass
def wait_for(self, result, n):
start_time = time.time()
while len(result) < n and time.time() - start_time < self.timeout:
time.sleep(0.01)
if len(result) < n:
print("timeout after", self.timeout)
def _take(self, dstream, n):
"""
Return the first `n` elements in the stream (will start and stop).
"""
results = []
def take(_, rdd):
if rdd and len(results) < n:
results.extend(rdd.take(n - len(results)))
dstream.foreachRDD(take)
self.ssc.start()
self.wait_for(results, n)
return results
def _collect(self, dstream, n, block=True):
"""
Collect each RDDs into the returned list.
:return: list, which will have the collected items.
"""
result = []
def get_output(_, rdd):
if rdd and len(result) < n:
r = rdd.collect()
if r:
result.append(r)
dstream.foreachRDD(get_output)
if not block:
return result
self.ssc.start()
self.wait_for(result, n)
return result
def _test_func(self, input, func, expected, sort=False, input2=None):
"""
@param input: dataset for the test. This should be list of lists.
@param func: wrapped function. This function should return PythonDStream object.
@param expected: expected output for this testcase.
"""
if not isinstance(input[0], RDD):
input = [self.sc.parallelize(d, 1) for d in input]
input_stream = self.ssc.queueStream(input)
if input2 and not isinstance(input2[0], RDD):
input2 = [self.sc.parallelize(d, 1) for d in input2]
input_stream2 = self.ssc.queueStream(input2) if input2 is not None else None
# Apply test function to stream.
if input2:
stream = func(input_stream, input_stream2)
else:
stream = func(input_stream)
result = self._collect(stream, len(expected))
if sort:
self._sort_result_based_on_key(result)
self._sort_result_based_on_key(expected)
self.assertEqual(expected, result)
def _sort_result_based_on_key(self, outputs):
"""Sort the list based on first value."""
for output in outputs:
output.sort(key=lambda x: x[0])
class BasicOperationTests(PySparkStreamingTestCase):
def test_map(self):
"""Basic operation test for DStream.map."""
input = [range(1, 5), range(5, 9), range(9, 13)]
def func(dstream):
return dstream.map(str)
expected = [list(map(str, x)) for x in input]
self._test_func(input, func, expected)
def test_flatMap(self):
"""Basic operation test for DStream.faltMap."""
input = [range(1, 5), range(5, 9), range(9, 13)]
def func(dstream):
return dstream.flatMap(lambda x: (x, x * 2))
expected = [list(chain.from_iterable((map(lambda y: [y, y * 2], x))))
for x in input]
self._ | test_func(input, func, expected)
def test_filter(self):
"""Basic operation test for DStream.filter."""
input = [range(1, 5), range(5, 9), range(9, 13)]
def func(dstream):
return dstream.filter(lambda x: x % 2 == 0) |
expected = [[y for y in x if y % 2 == 0] for x in input]
self._test_func(input, func, expected)
def test_count(self):
"""Basic operation test for DStream.count."""
input = [range(5), range(10), range(20)]
def func(dstream):
return dstream.count()
expected = [[len(x)] for x in input]
self._test_func(input, func, expected)
def test_reduce(self):
"""Basic operation test for DStream.reduce."""
input = [range(1, 5), range(5, 9), range(9, 13)]
def func(dstream):
return dstream.reduce(operator.add)
expected = [[reduce(operator.add, x)] for x in input]
self._test_func(input, func, expected)
def test_reduceByKey(self):
"""Basic operation test for DStream.reduceByKey."""
input = [[("a", 1), ("a", 1), ("b", 1), ("b", 1)],
[("", 1), ("", 1), ("", 1), ("", 1)],
[(1, 1), (1, 1), (2, 1), (2, 1), (3, 1)]]
def func(dstream):
return dstream.reduceByKey(operator.add)
expected = [[("a", 2), ("b", 2)], [("", 4)], [(1, 2), (2, 2), (3, 1)]]
self._test_func(input, func, expected, sort=True)
def test_mapValues(self):
"""Basic operation test for DStream.mapValues."""
input = [[("a", 2), ("b", 2), ("c", 1), ("d", 1)],
[(0, 4), (1, 1), (2, 2), (3, 3)],
[(1, 1), (2, 1), (3, 1), (4, 1)]]
def func(dstream):
return dstream.mapValues(lambda x: x + 10)
expected = [[("a", 12), ("b", 12), ("c", 11), ("d", 11)],
[(0, 14), (1, 11), (2, 12), (3, 13)],
[(1, 11), (2, 11), (3, 11), (4, 11)]]
self._test_func(input, func, expected, sort=True)
def test_flatMapValues(self):
"""Basic |
anaruse/chainer | tests/chainer_tests/functions_tests/connection_tests/test_bilinear.py | Python | mit | 6,944 | 0 | import unittest
import numpy
import chainer
from chainer.backends import cuda
from chainer import functions
from chainer import gradient_check
from chainer import testing
from chainer.testing import attr
def _uniform(*shape):
return numpy.random.uniform(-1, 1, shape).astype(numpy.float32)
@testing.parameterize(*testing.product({
'in_shapes': [((2,), (4,)), ((2, 1), (4, 2))],
'out_size': [3],
'batch_size': [2]
}))
class TestBilinearFunction(unittest.TestCase):
def setUp(self):
e1_shape = (self.bat | ch_size,) + self.in_shapes[0]
e2_shape = (self.batch_size,) + self.in_shapes[1]
e1_size = numpy.prod(self.in_shapes[0])
e2_size = numpy.prod(self.in_shapes[1])
self.e1 = _uniform | (*e1_shape)
self.e2 = _uniform(*e2_shape)
self.W = _uniform(e1_size, e2_size, self.out_size)
self.V1 = _uniform(e1_size, self.out_size)
self.V2 = _uniform(e2_size, self.out_size)
self.b = _uniform(self.out_size)
self.gy = _uniform(self.batch_size, self.out_size)
self.gge1 = _uniform(*self.e1.shape)
self.gge2 = _uniform(*self.e2.shape)
self.ggW = _uniform(*self.W.shape)
self.ggV1 = _uniform(*self.V1.shape)
self.ggV2 = _uniform(*self.V2.shape)
self.ggb = _uniform(*self.b.shape)
self.check_backward_options = {
'atol': 1e-5, 'rtol': 1e-4, 'dtype': numpy.float64}
self.check_double_backward_options = {
'atol': 1e-4, 'rtol': 1e-3, 'dtype': numpy.float64}
def check_forward(self, e1_data, e2_data, W_data, V1_data, V2_data,
b_data):
e1 = chainer.Variable(e1_data)
e2 = chainer.Variable(e2_data)
W = chainer.Variable(W_data)
e1_data = e1_data.reshape(e1_data.shape[0], -1)
e2_data = e2_data.reshape(e2_data.shape[0], -1)
xp = cuda.get_array_module(e1)
y_expect = xp.einsum('ij,ik,jkl->il', e1_data, e2_data, W_data)
flags = V1_data is None, V2_data is None, b_data is None
if any(flags):
if not all(flags):
raise ValueError(
'Test either all or none of the optional parameters.')
y = functions.bilinear(e1, e2, W)
else:
V1 = chainer.Variable(V1_data)
V2 = chainer.Variable(V2_data)
b = chainer.Variable(b_data)
y = functions.bilinear(e1, e2, W, V1, V2, b)
y_expect = xp.einsum('ij,ik,jkl->il', e1_data, e2_data, W_data)
y_expect += e1_data.dot(V1_data)
y_expect += e2_data.dot(V2_data)
y_expect += b_data
testing.assert_allclose(y_expect, cuda.to_cpu(y.data))
assert y.data.dtype == e1_data.dtype
def test_forward_cpu(self):
self.check_forward(self.e1, self.e2, self.W, self.V1, self.V2, self.b)
@attr.gpu
def test_forward_gpu(self):
self.check_forward(
cuda.to_gpu(self.e1), cuda.to_gpu(self.e2), cuda.to_gpu(self.W),
cuda.to_gpu(self.V1), cuda.to_gpu(self.V2), cuda.to_gpu(self.b))
def test_partial_backward_cpu(self):
gradient_check.check_backward(
functions.bilinear, (self.e1, self.e2, self.W), self.gy,
**self.check_backward_options)
@attr.gpu
def test_partial_backward_gpu(self):
gradient_check.check_backward(
functions.bilinear,
(cuda.to_gpu(self.e1), cuda.to_gpu(self.e2), cuda.to_gpu(self.W)),
cuda.to_gpu(self.gy), **self.check_backward_options)
def test_full_backward_cpu(self):
gradient_check.check_backward(
functions.bilinear,
(self.e1, self.e2, self.W, self.V1, self.V2, self.b), self.gy,
**self.check_backward_options)
@attr.gpu
def test_full_backward_gpu(self):
gradient_check.check_backward(
functions.bilinear,
(cuda.to_gpu(self.e1), cuda.to_gpu(self.e2), cuda.to_gpu(self.W),
cuda.to_gpu(self.V1), cuda.to_gpu(self.V2), cuda.to_gpu(self.b)),
cuda.to_gpu(self.gy), **self.check_backward_options)
def test_partial_double_backward_cpu(self):
gradient_check.check_double_backward(
functions.bilinear, (self.e1, self.e2, self.W), self.gy,
(self.gge1, self.gge2, self.ggW), **self.check_backward_options)
@attr.gpu
def test_partial_double_backward_gpu(self):
gradient_check.check_double_backward(
functions.bilinear,
(cuda.to_gpu(self.e1), cuda.to_gpu(self.e2), cuda.to_gpu(self.W)),
cuda.to_gpu(self.gy),
(cuda.to_gpu(self.gge1), cuda.to_gpu(self.gge2),
cuda.to_gpu(self.ggW)), **self.check_backward_options)
def test_full_double_backward_cpu(self):
def f(*inputs):
y = functions.bilinear(*inputs)
return y * y
gradient_check.check_double_backward(
f, (self.e1, self.e2, self.W, self.V1, self.V2, self.b),
self.gy,
(self.gge1, self.gge2, self.ggW, self.ggV1, self.ggV2, self.ggb),
**self.check_double_backward_options)
@attr.gpu
def test_full_double_backward_gpu(self):
def f(*inputs):
y = functions.bilinear(*inputs)
return y * y
gradient_check.check_double_backward(
f,
(cuda.to_gpu(self.e1), cuda.to_gpu(self.e2), cuda.to_gpu(self.W),
cuda.to_gpu(self.V1), cuda.to_gpu(self.V2), cuda.to_gpu(self.b)),
cuda.to_gpu(self.gy),
(cuda.to_gpu(self.gge1), cuda.to_gpu(self.gge2),
cuda.to_gpu(self.ggW), cuda.to_gpu(self.V1), cuda.to_gpu(self.V2),
cuda.to_gpu(self.ggb)), **self.check_double_backward_options)
@attr.slow
class TestBilinearFunctionLarge(unittest.TestCase):
def setUp(self):
self.e1 = _uniform(256, 256)
self.e2 = _uniform(256, 256)
self.w = _uniform(256, 256, 256)
self.v1 = _uniform(256, 256)
self.v2 = _uniform(256, 256)
self.b = _uniform(256)
def test_cpu(self):
chainer.functions.bilinear(
self.e1, self.e2, self.w, self.v1, self.v2, self.b)
@attr.gpu
def test_gpu(self):
chainer.functions.bilinear(*map(cuda.to_gpu, (
self.e1, self.e2, self.w, self.v1, self.v2, self.b)))
class TestBilinearFunctionInvalidArgument(unittest.TestCase):
def setUp(self):
e1 = _uniform(3, 2)
e2 = _uniform(3, 4)
W = _uniform(2, 4, 5)
V1 = _uniform(2, 5)
self.e1 = chainer.Variable(e1)
self.e2 = chainer.Variable(e2)
self.W = chainer.Variable(W)
self.V1 = chainer.Variable(V1)
def test_invalid_full_partial_ambiguous(self):
with self.assertRaises(ValueError):
functions.bilinear(self.e1, self.e2, self.W, self.V1)
testing.run_module(__name__, __file__)
|
mumuwoyou/vnpy-master | sonnet/python/modules/pondering_rnn.py | Python | mit | 7,717 | 0.00298 | # Copyright 2017 The Sonnet Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Cores for RNNs with varying number of unrolls.
This file contains implementations for:
* ACT (Adaptive Computation Time)
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
from sonnet.python.modules import basic
from sonnet.python.modules import rnn_core
from sonnet.python.ops import nest
import tensorflow as tf
def _nested_add(nested_a, nested_b):
"""Add two arbitrarily nested `Tensors`."""
return nest.map(lambda a, b: a + b, nested_a, nested_b)
def _nested_unary_mul(nested_a, p):
"""Multiply `Tensors` in arbitrarily nested `Tensor` `nested_a` with `p`."""
return nest.map(lambda a: p * a, nested_a)
def _nested_zeros_like(nested_a):
return nest.map(tf.zeros_like, nested_a)
class ACTCore(rnn_core.RNNCore):
"""Adaptive computation time core.
Implementation of the model described in "Adaptive Computation Time for
Recurrent Neural Networks" paper, https://arxiv.org/abs/1603.08983.
The `ACTCore` incorporates the pondering RNN of ACT, with different
computation times for each element in the mini batch. Each pondering step is
performed by the `core` passed to the constructor of `ACTCore`.
The output of the `ACTCore` is made of `(act_out, (iteration, remainder)`,
where
* `iteration` counts the number of pondering step in each batch element;
* `remainder` is the remainder as defined in the ACT paper;
* `act_out` is the weighted average output of all pondering steps (see ACT
paper for more info).
"""
def __init__(self, core, output_size, threshold, get_state_for_halting,
name="act_core"):
"""Constructor.
Args:
core: A `sonnet.RNNCore` object. This should only take a single `Tensor`
in input, and output only a single flat `Tensor`.
output_size: An integer. The size of each output in the sequence.
threshold: A float between 0 and 1. Probability to reach for ACT to stop
pondering.
get_state_for_halting: A callable that can take the `core` state and
return the input to the halting function.
name: A string. The name of this module.
Raises:
ValueError: if `threshold` is not between 0 and 1.
ValueError: if `core` has either nested outputs or outputs that are not
one dimensional.
"""
super(ACTCore, self).__init__(name=name)
self._core = core
self._output_size = output_size
self._threshold = threshold
self._get_state_for_halting = get_state_for_halting
if not isinstance(self._core.output_size, tf.TensorShape):
raise ValueError("Output of core should be single Tensor.")
if self._core.output_size.ndims != 1:
raise ValueError("Output of core should be 1D.")
if not 0 <= self._threshold <= 1:
raise ValueError("Threshold should be between 0 and 1, but found {}".
format(self._threshold))
def initial_state(self, *args, **kwargs):
return self._core.initial_state(*args, **kwargs)
@property
def output_size(self):
return tf.TensorShape([self._output_size]), (tf.TensorShape([1]),
tf.TensorShape([1]))
@property
def state_size(self):
return self._core.state_size
@property
def batch_size(self):
self._ensure_is_connected()
return self._batch_size
@property
def dtype(self):
self._ensure_is_connected()
return self._dtype
def _cond(self, unused_x, unused_cumul_out, unused_prev_state,
unused_cumul_state, cumul_halting, unused_iteration,
unused_remainder):
"""The `cond` of the `tf.while_loop`."""
return tf.reduce_any(cumul_halting < 1)
def _body(self, x, cumul_out, prev_state, cumul_state,
cumul_halting, iteration, remainde | r, halting_linear, x_ones):
"""The `body` of `tf.while_loop`."""
# Increase iteration count only for those elements that are still running.
all_ones = tf.constant(1, shape=(self._batch_size, 1), dtype=self._dtype)
is_iteration_over = tf.equal(cumul_halting, all_ones)
next_iteration = tf.where(is | _iteration_over, iteration, iteration + 1)
out, next_state = self._core(x, prev_state)
# Get part of state used to compute halting values.
halting_input = halting_linear(self._get_state_for_halting(next_state))
halting = tf.sigmoid(halting_input, name="halting")
next_cumul_halting_raw = cumul_halting + halting
over_threshold = next_cumul_halting_raw > self._threshold
next_cumul_halting = tf.where(over_threshold, all_ones,
next_cumul_halting_raw)
next_remainder = tf.where(over_threshold, remainder,
1 - next_cumul_halting_raw)
p = next_cumul_halting - cumul_halting
next_cumul_state = _nested_add(cumul_state,
_nested_unary_mul(next_state, p))
next_cumul_out = cumul_out + p * out
return (x_ones, next_cumul_out, next_state, next_cumul_state,
next_cumul_halting, next_iteration, next_remainder)
def _build(self, x, prev_state):
"""Connects the core to the graph.
Args:
x: Input `Tensor` of shape `(batch_size, input_size)`.
prev_state: Previous state. This could be a `Tensor`, or a tuple of
`Tensor`s.
Returns:
The tuple `(output, state)` for this core.
Raises:
ValueError: if the `Tensor` `x` does not have rank 2.
"""
x.get_shape().with_rank(2)
self._batch_size = x.get_shape().as_list()[0]
self._dtype = x.dtype
x_zeros = tf.concat(
[x, tf.zeros(
shape=(self._batch_size, 1), dtype=self._dtype)], 1)
x_ones = tf.concat(
[x, tf.ones(
shape=(self._batch_size, 1), dtype=self._dtype)], 1)
# Weights for the halting signal
halting_linear = basic.Linear(name="halting_linear", output_size=1)
body = functools.partial(
self._body, halting_linear=halting_linear, x_ones=x_ones)
cumul_halting_init = tf.zeros(shape=(self._batch_size, 1),
dtype=self._dtype)
iteration_init = tf.zeros(shape=(self._batch_size, 1), dtype=self._dtype)
core_output_size = [x.value for x in self._core.output_size]
out_init = tf.zeros(shape=(self._batch_size,) + tuple(core_output_size),
dtype=self._dtype)
cumul_state_init = _nested_zeros_like(prev_state)
remainder_init = tf.zeros(shape=(self._batch_size, 1), dtype=self._dtype)
(unused_final_x, final_out, unused_final_state, final_cumul_state,
unused_final_halting, final_iteration, final_remainder) = tf.while_loop(
self._cond, body, [x_zeros, out_init, prev_state, cumul_state_init,
cumul_halting_init, iteration_init, remainder_init])
act_output = basic.Linear(
name="act_output_linear", output_size=self._output_size)(final_out)
return (act_output, (final_iteration, final_remainder)), final_cumul_state
|
tensorflow/tensorflow | tensorflow/python/distribute/one_device_strategy.py | Python | apache-2.0 | 18,606 | 0.004998 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A tf.distribute.Strategy for running on a single device."""
from tensorflow.python.distribute import device_util
from tensorflow.python.distribute import distribute_lib
from tensorflow.python.distribute import distribute_utils
from tensorflow.python.distribute import input_lib
from tensorflow.python.distribute import input_util
from tensorflow.python.distribute import numpy_dataset
from tensorflow.python.distribute.v1 import input_lib as input_lib_v1
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.util import nest
from tensorflow.python.util.tf_export import tf_export
# TODO(josh11b): Do we wrap values in types to generate errors if you are
# doing something that won't work with other DistributionStrategy
# implementations?
@tf_export("distribute.OneDeviceStrategy", v1=[])
class OneDeviceStrategy(distribute_lib.Strategy):
"""A distribution strategy for running on a single device.
Using this strategy will place any variables created in its scope on the
specified device. Input distributed through this strategy will be
prefetched to the specified device. Moreover, any functions called via
`strategy.run` will also be placed on the specified device
as well.
Typical usage of this strategy could be testing your code with the
tf.distribute.Strategy API before switching to other strategies which
actually distribute to multiple devices/machines.
For example:
```
strategy = tf.distribute.OneDeviceStrategy(device="/gpu:0")
with strategy.scope():
v = tf.Variable(1.0)
print(v.device) # /job:localhost/replica:0/task:0/device:GPU:0
def step_fn(x):
return x * 2
result = | 0
for i in range(10):
result += strategy.run(step_fn, args=(i,))
print(result) # 90
```
"""
def __init__(self, device):
"""Creates a `OneDeviceStrategy`.
Args:
device: Device string identifier for the device on which the variables
should be placed. See class docs for more details on how the device is
used. Examples: "/cpu:0", "/gpu:0", "/device:CPU:0", "/device:GPU:0"
"""
super(OneDeviceStrategy, self).__init__(OneDeviceExtended(self, device | ))
distribute_lib.distribution_strategy_gauge.get_cell("V2").set(
"OneDeviceStrategy")
def experimental_distribute_dataset(self, dataset, options=None): # pylint: disable=useless-super-delegation
"""Distributes a tf.data.Dataset instance provided via dataset.
In this case, there is only one device, so this is only a thin wrapper
around the input dataset. It will, however, prefetch the input data to the
specified device. The returned distributed dataset can be iterated over
similar to how regular datasets can.
NOTE: Currently, the user cannot add any more transformations to a
distributed dataset.
Example:
```
strategy = tf.distribute.OneDeviceStrategy()
dataset = tf.data.Dataset.range(10).batch(2)
dist_dataset = strategy.experimental_distribute_dataset(dataset)
for x in dist_dataset:
print(x) # [0, 1], [2, 3],...
```
Args:
dataset: `tf.data.Dataset` to be prefetched to device.
options: `tf.distribute.InputOptions` used to control options on how this
dataset is distributed.
Returns:
A "distributed `Dataset`" that the caller can iterate over.
"""
return super(OneDeviceStrategy, self).experimental_distribute_dataset(
dataset, options)
def distribute_datasets_from_function(
self,
dataset_fn, # pylint: disable=useless-super-delegation
options=None):
"""Distributes `tf.data.Dataset` instances created by calls to `dataset_fn`.
`dataset_fn` will be called once for each worker in the strategy. In this
case, we only have one worker and one device so `dataset_fn` is called
once.
The `dataset_fn` should take an `tf.distribute.InputContext` instance where
information about batching and input replication can be accessed:
```
def dataset_fn(input_context):
batch_size = input_context.get_per_replica_batch_size(global_batch_size)
d = tf.data.Dataset.from_tensors([[1.]]).repeat().batch(batch_size)
return d.shard(
input_context.num_input_pipelines, input_context.input_pipeline_id)
inputs = strategy.distribute_datasets_from_function(dataset_fn)
for batch in inputs:
replica_results = strategy.run(replica_fn, args=(batch,))
```
IMPORTANT: The `tf.data.Dataset` returned by `dataset_fn` should have a
per-replica batch size, unlike `experimental_distribute_dataset`, which uses
the global batch size. This may be computed using
`input_context.get_per_replica_batch_size`.
Args:
dataset_fn: A function taking a `tf.distribute.InputContext` instance and
returning a `tf.data.Dataset`.
options: `tf.distribute.InputOptions` used to control options on how this
dataset is distributed.
Returns:
A "distributed `Dataset`", which the caller can iterate over like regular
datasets.
"""
return super(OneDeviceStrategy,
self).distribute_datasets_from_function(dataset_fn, options)
def experimental_local_results(self, value): # pylint: disable=useless-super-delegation
"""Returns the list of all local per-replica values contained in `value`.
In `OneDeviceStrategy`, the `value` is always expected to be a single
value, so the result is just the value in a tuple.
Args:
value: A value returned by `experimental_run()`, `run()`,
`extended.call_for_each_replica()`, or a variable created in `scope`.
Returns:
A tuple of values contained in `value`. If `value` represents a single
value, this returns `(value,).`
"""
return super(OneDeviceStrategy, self).experimental_local_results(value)
def run(self, fn, args=(), kwargs=None, options=None): # pylint: disable=useless-super-delegation
"""Run `fn` on each replica, with the given arguments.
In `OneDeviceStrategy`, `fn` is simply called within a device scope for the
given device, with the provided arguments.
Args:
fn: The function to run. The output must be a `tf.nest` of `Tensor`s.
args: (Optional) Positional arguments to `fn`.
kwargs: (Optional) Keyword arguments to `fn`.
options: (Optional) An instance of `tf.distribute.RunOptions` specifying
the options to run `fn`.
Returns:
Return value from running `fn`.
"""
return super(OneDeviceStrategy, self).run(fn, args, kwargs, options)
def reduce(self, reduce_op, value, axis): # pylint: disable=useless-super-delegation
"""Reduce `value` across replicas.
In `OneDeviceStrategy`, there is only one replica, so if axis=None, value
is simply returned. If axis is specified as something other than None,
such as axis=0, value is reduced along that axis and returned.
Example:
```
t = tf.range(10)
result = strategy.reduce(tf.distribute.ReduceOp.SUM, t, axis=None).numpy()
# result: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
result = strategy.reduce(tf.distribute.ReduceOp.SUM, t, axis=0).numpy()
# result: 45
```
Args:
reduce_op: A `tf.distribute.ReduceOp` value specifying how values should
be combined.
value: A "per replica" value, e.g. returned by |
danjac/ownblock | ownblock/ownblock/apps/storage/migrations/0004_auto_20140911_0341.py | Python | mit | 493 | 0.002028 | # | -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import ownblock.apps.storage.models
class Migration(migrations.Migration):
dependencies = [
('storage', '0003_item_photo'),
]
operations = [
migrations.AlterField(
model_name='item',
name='photo',
field=models.ImageField(blank=True, upload_to=ownblock.apps.storage.models._upload_image_to, null=True | ),
),
]
|
laurentb/weboob | modules/caissedepargne/cenet/pages.py | Python | lgpl-3.0 | 14,121 | 0.002691 | # -*- coding: utf-8 -*-
# Copyright(C) 2012 Romain Bignon
#
# This file is part of a weboob module.
#
# This weboob module is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This weboob module is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this weboob module. If not, see <http://www.gnu.org/licenses/>.
import re
import json
from datetime import datetime
from weboob.browser.pages import LoggedPage, HTMLPage, JsonPage
from weboob.browser.elements import DictElement, ItemElement, method
from weboob.browser.filters.standard import Date, CleanDecimal, CleanText, Format, Field, Env, Regexp, Currency
from weboob.browser.filters.json import Dict
from weboob.capabilities import NotAvailable
from weboob.capabilities.bank import Account, Loan
from weboob.capabilities.contact import Advisor
from weboob.capabilities.profile import Profile
from weboob.capabilities.bill import DocumentTypes, Subscription, Document
from weboob.tools.capabilities.bank.transactions import FrenchTransaction
from weboob.exceptions import BrowserUnavailable
class Transaction(FrenchTransaction):
PATTERNS = [
(re.compile(r'^CB (?P<text>.*?) FACT (?P<dd>\d{2})(?P<mm>\d{2})(?P<yy>\d{2})', re.IGNORECASE), FrenchTransaction.TYPE_CARD),
(re.compile(r'^RET(RAIT)? DAB (?P<dd>\d+)-(?P<mm>\d+)-.*', re.IGNORECASE), FrenchTransaction.TYPE_WITHDRAWAL),
(re.compile(r'^RET(RAIT)? DAB (?P<text>.*?) (?P<dd>\d{2})(?P<mm>\d{2})(?P<yy>\d{2}) (?P<HH>\d{2})H(?P<MM>\d{2})', re.IGNORECASE), FrenchTransaction.TYPE_WITHDRAWAL),
(re.compile(r'^VIR(EMENT)?(\.PERIODIQUE)? (?P<text>.*)', re.IGNORECASE), FrenchTransaction.TYPE_TRANSFER),
(re.compile(r'^PRLV (?P<text>.*)', re.IGNORECASE), FrenchTransaction.TYPE_ORDER),
(re.compile(r'^CHEQUE.*', re.IGNORECASE), FrenchTransaction.TYPE_CHECK),
(re.compile(r'^(CONVENTION \d+ )?COTIS(ATION)? (?P<text>.*)', re.IGNORECASE), FrenchTransaction.TYPE_BANK),
(re.compile(r'^\* (?P<text>.*)', re.IGNORECASE), FrenchTransaction.TYPE_BANK),
(re.compile(r'^REMISE (?P<text>.*)', re.IGNORECASE), FrenchTransaction.TYPE_DEPOSIT),
(re.compile(r'^(?P<text>.*)( \d+)? QUITTANCE .*', re.IGNORECASE), FrenchTransaction.TYPE_ORDER),
(re.compile(r'^CB [\d\*]+ TOT DIF .*', re.IGNORECASE), FrenchTransaction.TYPE_CARD_SUMMARY),
(re.compile(r'^CB [\d\*]+ (?P<text>.*)', re.IGNORECASE), FrenchTransaction.TYPE_CARD),
(re.compile(r'^CB (?P<text>.*?) (?P<dd>\d{2})(?P<mm>\d{2})(?P<yy>\d{2})', re.IGNORECASE), FrenchTransaction.TYPE_CARD),
(re.compile(r'\*CB (?P<text>.*?) (?P<dd>\d{2})(?P<mm>\d{2})(?P<yy>\d{2})', re.IGNORECASE), FrenchTransaction.TYPE_CARD),
(re.compile(r'^FAC CB (?P<text>.*?) (?P<dd>\d{2})/(?P<mm>\d{2})', re.IGNORECASE), FrenchTransaction.TYPE_CARD),
]
class LoginPage(JsonPage):
def get_response(self):
return self.doc
class CenetLoginPage(HTMLPage):
def login(self, username, password, nuser, codeCaisse, _id, vkpass):
form = self.get_form(id='aspnetForm')
form['__EVENTTARGET'] = "btn_authentifier_securise"
form['__EVENTARGUMENT'] = '{"CodeCaisse":"%s","NumeroBad":"%s","NumeroUsager":"%s",\
"MotDePasse":"%s","IdentifiantClavier":"%s","ChaineConnexion":"%s"}' \
% (codeCaisse, username, nuser, password, _id, vkpass)
form.submit()
class CenetHomePage(LoggedPage, HTMLPage):
@method
class get_advisor(ItemElement):
klass = Advisor
obj_name = CleanText('//section[contains(@id, "ChargeAffaires")]//strong')
obj_email = CleanText('//li[contains(@id, "MailContact")]')
obj_phone = CleanText('//li[contains(@id, "TelAgence")]', replace=[('.', '')])
obj_mobile = NotAvailable
obj_agency = CleanText('//section[contains(@id, "Agence")]//strong')
obj_address = CleanText('//li[contains(@id, "AdresseAgence")]')
def obj_fax(self):
return CleanText('//li[contains(@id, "FaxAgence")]', replace=[('.', '')])(self) or NotAvailable
@method
class get_profile(ItemElement):
klass = Profile
obj_name = CleanText('//li[@class="identite"]/a/span')
class CenetJsonPage(JsonPage):
def __init__(self, browser, response, *args, **kwargs):
super(CenetJsonPage, self).__init__(browser, response, *args, **kwargs)
# Why you are so ugly....
self.doc = json.loads(self.doc['d'])
if self.doc['Erreur'] and (self.doc['Erreur']['Titre'] or self.doc['Erreur']['Code']):
self.logger.warning('error on %r: %s', self.url, self.doc['Erreur']['Titre'] or self.doc['Erreur']['Code'])
raise Brow | serUnavailable(self.d | oc['Erreur']['Titre'] or self.doc['Erreur']['Description'])
self.doc['DonneesSortie'] = json.loads(self.doc['DonneesSortie'])
class CenetAccountsPage(LoggedPage, CenetJsonPage):
ACCOUNT_TYPES = {'CCP': Account.TYPE_CHECKING}
@method
class get_accounts(DictElement):
item_xpath = "DonneesSortie"
class item(ItemElement):
klass = Account
obj_id = CleanText(Dict('Numero'))
obj_label = CleanText(Dict('Intitule'))
obj_iban = CleanText(Dict('IBAN'))
def obj_balance(self):
absolut_amount = CleanDecimal(Dict('Solde/Valeur'))(self)
if CleanText(Dict('Solde/CodeSens'))(self) == 'D':
return -absolut_amount
return absolut_amount
def obj_currency(self):
return CleanText(Dict('Devise'))(self).upper()
def obj_type(self):
return self.page.ACCOUNT_TYPES.get(Dict('TypeCompte')(self), Account.TYPE_UNKNOWN)
def obj__formated(self):
return self.el
class CenetLoanPage(LoggedPage, CenetJsonPage):
@method
class get_accounts(DictElement):
item_xpath = "DonneesSortie"
class item(ItemElement):
klass = Loan
obj_id = CleanText(Dict('IdentifiantUniqueContrat'), replace=[(' ', '-')])
obj_label = CleanText(Dict('Libelle'))
obj_total_amount = CleanDecimal(Dict('MontantInitial/Valeur'))
obj_currency = Currency(Dict('MontantInitial/Devise'))
obj_type = Account.TYPE_LOAN
obj_duration = CleanDecimal(Dict('Duree'))
obj_rate = CleanDecimal.French(Dict('Taux'))
obj_next_payment_amount = CleanDecimal(Dict('MontantProchaineEcheance/Valeur'))
def obj_balance(self):
balance = CleanDecimal(Dict('CapitalRestantDu/Valeur'))(self)
if balance > 0:
balance *= -1
return balance
def obj_subscription_date(self):
sub_date = Dict('DateDebutEffet')(self)
if sub_date:
date = CleanDecimal().filter(sub_date) / 1000
return datetime.fromtimestamp(date).date()
return NotAvailable
def obj_maturity_date(self):
mat_date = Dict('DateDerniereEcheance')(self)
if mat_date:
date = CleanDecimal().filter(mat_date) / 1000
return datetime.fromtimestamp(date).date()
return NotAvailable
def obj_next_payment_date(self):
next_date = Dict('DateProchaineEcheance')(self)
if next_date:
date = CleanDecimal().filter(next_date) / 1000
return datetime.fromtimestamp(date).date()
return NotAvailable
class CenetCardsPage(LoggedPage, CenetJsonPage):
def get_ |
Cinntax/home-assistant | homeassistant/components/ebox/sensor.py | Python | apache-2.0 | 4,756 | 0.000631 | """
Support for EBox.
Get data from 'My Usage Page' page: https://client.ebox.ca/myusage
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/sensor.ebox/
"""
import logging
from datetime import timedelta
import voluptuous as vol
import homeassistant.helpers.config_validation as cv
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import (
CONF_USERNAME,
CONF_PASSWORD,
CONF_NAME,
CONF_MONITORED_VARIABLES,
)
from homeassistant.helpers.entity import Entity
from homeassistant.util import Throttle
from homeassistant.exceptions import PlatformNotReady
_LOGGER = logging.getLogger(__name__)
GIGABITS = "Gb"
PRICE = "CAD"
DAYS = "days"
PERCENT = "%"
DEFAULT_NAME = "EBox"
REQUESTS_TIMEOUT = 15
SCAN_INTERVAL = timedelta(minutes=15)
MIN_TIME_BETWEEN_UPDATES = timedelta(minutes=15)
SENSOR_TYPES = {
"usage": ["Usage", PERCENT, "mdi:percent"],
"balance": ["Balance", PRICE, "mdi:square-inc-cash"],
"limit": ["Data limit", GIGABITS, "mdi:download"],
"days_left": ["Days left", DAYS, "mdi:calendar-today"],
"before_offpeak_download": ["Download before offpeak", GIGABITS, "mdi:download"],
"before_offpeak_upload": ["Upload before offpeak", GIGABITS, "mdi:upload"],
"before_offpeak_total": ["Total before offpeak", GIGABITS, "mdi:download"],
"offpeak_download": ["Offpeak download", GIGABITS, "mdi:download"],
"offpeak_upload": ["Offpeak Upload", GIGABITS, "mdi:upload"],
"offpeak_total": ["Offpeak Total", GIGABITS, "mdi:download"],
"download": ["Download", GIGABITS, "mdi:download"],
"upload": ["Upload", GIGABITS, "mdi:upload"],
"total": ["Total", GIGABITS, "mdi:download"],
}
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_MONITORED_VARIABLES): vol.All(
cv.ensure_list, [vol.In(SENSOR_TYPES)]
),
vol.Required(CONF_USERNAME): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
}
)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the EBox sensor."""
username = config.get(CONF_USERNAME)
password = config.get(CONF_PASSWORD)
httpsession = hass.helpers.aiohttp_client.async_get_clientsession()
ebox_data = EBoxData(username, password, httpsession)
name = config.get(CONF_NAME)
from pyebox.client import PyEboxError
try:
await ebox_data.async_update()
except PyEboxError as exp:
_LOGGER.error("Failed login: %s", exp)
raise PlatformNotReady
sensors = []
for variable in config[CONF_MONITORED_VARIABLES]:
sensors.append(EBoxSensor(ebox_data, variable, name))
async_add_entities(sensors, True)
class EBoxSensor(Entity):
"""Implementation of a EBox sensor."""
def __init__(self, ebox_data, sensor_type, name):
"""Initialize the sensor."""
self.client_name = name
self.type = sensor_type
self._name = SENSOR_TYPES[sensor_type][0]
self._unit_of_measurement = SENSOR_TYPES[sensor_type][1]
self._icon = SENSOR_TYPES[sensor_type][2]
self.ebox_data = ebox_data
self._state = None
@property
def name(self):
"""Return the name of the sensor."""
return f"{self.client_name} {self._name}"
@property
def state(self):
"""Return the state of the sensor."""
return self._state
@property
def unit_of_measurement(self):
"""Return the unit of measurement of this entity, if any."""
return self._unit_of_measurement
@property
def icon(self):
"""Icon to use in the frontend, if any."""
return self._icon
async def async_update(self):
"""Get the latest data from EBox and update the state."""
await self.ebox_data.async_update()
if self.type in self.ebox_data.data:
self._state = round(self.ebox_data.data[self.type], 2)
class EBoxData:
"""Get data from Ebox."""
def _ | _init__(self, username, password, httpsession):
"""Initialize the data object."""
from pyebox import EboxClient
self.client = EboxClient(username, password, REQUESTS_TIMEOUT, httpsession)
self.data = {}
@Throttle(MIN_TIME_BETWEEN_UPDATES)
async def async_update(self):
"""Get the latest data from Ebox."""
from pyebox.client import PyEboxError
try:
await | self.client.fetch_data()
except PyEboxError as exp:
_LOGGER.error("Error on receive last EBox data: %s", exp)
return
# Update data
self.data = self.client.get_data()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.