repo_name stringlengths 5 100 | path stringlengths 4 231 | language stringclasses 1 value | license stringclasses 15 values | size int64 6 947k | score float64 0 0.34 | prefix stringlengths 0 8.16k | middle stringlengths 3 512 | suffix stringlengths 0 8.17k |
|---|---|---|---|---|---|---|---|---|
dtrckd/pymake | pymake/util/math.py | Python | gpl-3.0 | 7,677 | 0.01029 | # -*- coding: utf-8 -*-
import numpy as np
from numpy import ma
import scipy as sp
import networkx as nx
from .utils import nxG
from pymake import logger
lgg = logger
##########################
### Stochastic Process
##########################
def lognormalize(x):
return np.exp(x - np.logaddexp.reduce(x))
def expnormalize(x):
b = x.max()
y = np.exp(x - b)
return y / y.sum()
def categorical(params):
return np.where(np.random.multinomial(1, params) == 1)[0]
def bernoulli(param, size=1):
return np.random.binomial(1, param, size=size)
### Power law distribution generator
def random_powerlaw(alpha, x_min, size=1):
### Discrete
alpha = float(alpha)
u = np.random.random(size)
x = (x_min-0.5)*(1-u)**(-1/(alpha-1))+0.5
return np.floor(x)
### A stick breakink process, truncated at K components.
def gem(gmma, K):
sb = np.empty(K)
cut = np.random.beta(1, gmma, size=K)
for k in range(K):
sb[k] = cut[k] * cut[0:k].prod()
return sb
##########################
### Means and Norms
##########################
### Weighted means
def wmean(a, w, mean='geometric'):
if mean == 'geometric':
kernel = lambda x : np.log(x)
out = lambda x : np.exp(x)
elif mean == 'arithmetic':
kernel = lambda x : x
out = lambda x : x
elif mean == 'harmonic':
num = np.sum(w)
denom = np.sum(np.asarray(w) / np.asarray(a))
return num / denom
else:
raise NotImplementedError('Mean Unknwow: %s' % mean)
num = np.sum(np.asarray(w) * kernel(np.asarray(a)))
denom = np.sum(np.asarray(w))
return out(num / denom)
##########################
### Matrix/Image Operation
##########################
from scipy import ndimage
def draw_square(mat, value, topleft, l, L, w=0):
tl = topleft
# Vertical draw
mat[tl[0]:tl[0]+l, tl[1]:tl[1]+w] = value
mat[tl[0]:tl[0]+l, tl[1]+L-w:tl[1]+L] = value
# Horizontal draw
mat[tl[0]:tl[0]+w, tl[1]:tl[1]+L] = value
mat[tl[0]+l-w:tl[0]+l, tl[1]:tl[1]+L] = value
return mat
def dilate(y, size=1):
dim = y.ndim
mask = ndimage.generate_binary_structure(dim, dim)
if size > 1:
for i in range(1, size):
mask = np.vstack((mask, mask[-1,:]))
mask = np.column_stack((mask, mask[:, -1]))
y_f = ndimage.binary_dilation(y, structure=mask).astype(y.dtype)
return y_f
########## | ################
### Array routine Operation
##########################
from collections import Counter
def sorted | _perm(a, label=None, reverse=False):
""" return sorted $a and the induced permutation.
Inplace operation """
# np.asarray applied this tuple lead to error, if label is string
# because a should be used as elementwise comparison
if label is None:
label = np.arange(a.shape[0])
hist, label = zip(*sorted(zip(a, label), reverse=reverse))
hist = np.asarray(hist)
label = np.asarray(label)
return hist, label
def degree_hist_to_list(d, dc):
degree = np.repeat(np.round(d).astype(int), np.round(dc).astype(int))
return degree
def clusters_hist(clusters, labels=None, remove_empty=True):
""" return non empty clusters histogramm sorted.
parameters
---------
clusters: np.array
array of clusters membership of data.
returns
-------
hist: np.array
count of element by clusters (decrasing hist)
label: np.array
label of the cluster aligned with hist
"""
block_hist = np.bincount(clusters)
if labels is None:
labels = range(len(block_hist))
hist, labels = sorted_perm(block_hist, labels, reverse=True)
if remove_empty is True:
null_classes = (hist == 0).sum()
if null_classes > 0:
hist = hist[:-null_classes]; labels = labels[:-null_classes]
return hist, labels
def adj_to_degree(y):
# @debug: dont' call nxG or do a native integration !
# To convert normalized degrees to raw degrees
#ba_c = {k:int(v*(len(ba_g)-1)) for k,v in ba_c.iteritems()}
G = nxG(y)
#degree = sorted(dict(nx.degree(G)).values(), reverse=True)
#ba_c = nx.degree_centrality(G)
return dict(nx.degree(G))
def degree_hist(_degree, filter_zeros=False):
if isinstance(_degree, np.ndarray) and _degree.ndim == 2 :
degree = list(dict(adj_to_degree(_degree)).values())
elif isinstance(_degree, (list, np.ndarray)):
degree = _degree
else:
# networkx
degree = list(dict(_degree).values())
max_c = np.max(degree)
d = np.arange(max_c+1)
dc = np.bincount(degree, minlength=max_c+1)
if len(d) == 0:
return [], []
if dc[0] > 0:
lgg.debug('%d unconnected vertex' % dc[0])
d = d[1:]
dc = dc[1:]
if filter_zeros is True:
#d, dc = zip(*filter(lambda x:x[1] != 0, zip(d, dc)))
nzv = (dc != 0)
d = d[nzv]
dc = dc[nzv]
return d, dc
def random_degree(Y, params=None):
_X = []
_Y = []
N = Y[0].shape[0]
nb_uniq_degree = []
dc_list = []
for y in Y:
ba_c = adj_to_degree(y)
d, dc = degree_hist(ba_c)
nb_uniq_degree.append(len(dc))
dc_list.append(dc)
dc_mat = ma.array(np.empty((N, max(nb_uniq_degree))), mask=True)
for i, degrees in enumerate(dc_list):
size = nb_uniq_degree[i]
dc_mat[i, :size] = degrees
y = dc_mat.mean(0)
yerr = dc_mat.std(0)
# 0 are filtered out in degree_hist
return np.arange(1, len(y)+1), np.round(y), yerr
def reorder_mat(y, clusters, labels=False, reverse=True):
"""Reorder the matrix according the clusters membership
@Debug: square matrix
"""
assert(y.shape[0] == y.shape[1] == len(clusters))
if reverse is True:
hist, label = clusters_hist(clusters)
sorted_clusters = np.empty_like(clusters)
for i, k in enumerate(label):
if i != k:
sorted_clusters[clusters == k] = i
else:
sorted_clusters = clusters
N = y.shape[0]
nodelist = [k[0] for k in sorted(zip(range(N), sorted_clusters),
key=lambda k: k[1])]
y_r = y[nodelist, :][:, nodelist]
if labels is True:
return y_r, nodelist
else:
return y_r
def shiftpos(arr, fr, to, axis=0):
""" Move element In-Place, shifting backward (or forward) others """
if fr == to: return
x = arr.T if axis == 1 else arr
tmp = x[fr].copy()
if fr > to:
x[to+1:fr+1] = x[to:fr]
else:
x[fr:to] = x[fr+1:to+1]
x[to] = tmp
##########################
### Colors Operation
##########################
import math
def floatRgb(mag, cmin, cmax):
""" Return a tuple of floats between 0 and 1 for the red, green and
blue amplitudes.
"""
try:
# normalize to [0,1]
x = float(mag-cmin)/float(cmax-cmin)
except:
# cmax = cmin
x = 0.5
blue = min((max((4*(0.75-x), 0.)), 1.))
red = min((max((4*(x-0.25), 0.)), 1.))
green= min((max((4*math.fabs(x-0.5)-1., 0.)), 1.))
return (red, green, blue)
def strRgb(mag, cmin, cmax):
""" Return a tuple of strings to be used in Tk plots.
"""
red, green, blue = floatRgb(mag, cmin, cmax)
return "#%02x%02x%02x" % (red*255, green*255, blue*255)
def rgb(mag, cmin, cmax):
""" Return a tuple of integers to be used in AWT/Java plots.
"""
red, green, blue = floatRgb(mag, cmin, cmax)
return (int(red*255), int(green*255), int(blue*255))
def htmlRgb(mag, cmin, cmax):
""" Return a tuple of strings to be used in HTML documents.
"""
return "#%02x%02x%02x"%rgb(mag, cmin, cmax)
|
zasdfgbnm/qutip | qutip/tests/test_wigner.py | Python | bsd-3-clause | 6,312 | 0.000158 | # This file is part of QuTiP: Quantum Toolbox in Python.
#
# Copyright (c) 2011 and later, Paul D. Nation and Robert J. Johansson.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the QuTiP: Quantum Toolbox in Python nor the names
# of its contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
###############################################################################
import numpy as np
from scipy.special import laguerre
from numpy.random import rand
from numpy.testing import assert_, run_module_suite, assert_equal
from qutip.states import coherent, fock
from qutip.wigner import wigner
from qutip.random_objects import rand_dm, rand_ket
def test_wigner_coherent():
"wigner: test wigner function calculation for coherent states"
xvec = np.linspace(-5.0, 5.0, 100)
yvec = xvec
X, Y = np.meshgrid(xvec, yvec)
a = X + 1j * Y # consistent with g=2 option to wigner function
dx = xvec[1] - xvec[0]
dy = yvec[1] - yvec[0]
N = 20
beta = rand() + rand() * 1.0j
psi = coherent(N, beta)
# calculate the wigner function using qutip and analytic formula
W_qutip = wigner(psi, xvec, yvec, g=2)
W_analytic = 2 / np.pi * np.exp(-2 * abs(a - beta) ** 2)
# check difference
assert_(np.sum(abs(W_qutip - W_analytic) ** 2) < 1e-4)
# check normalization
assert_(np.sum(W_qutip) * dx * dy - 1.0 < 1e-8)
assert_(np.sum(W_analytic) * dx * dy - 1.0 < 1e-8)
def test_wigner_fock():
"wigner: test wigner function calculation for Fock states"
xvec = np.linspace(-5.0, 5.0, 100)
yvec = xvec
X, Y = np.meshgrid(xvec, yvec)
a = X + 1j * Y # consistent with g=2 option to wigner function
dx = xvec[1] - xvec[0]
dy = yvec[1] - yvec[0]
N = 15
for n in [2, 3, 4, 5, 6]:
psi = fock(N, n)
# calculate the wigner function using qutip and analytic formula
W_qutip = wigner(psi, xvec, yvec, g | =2)
W_analytic = 2 / np.pi * (-1) ** n * \
np.exp(-2 * abs(a) ** 2) * np.polyval(laguerre(n), 4 * abs(a) ** 2)
# check difference
assert_(np.sum(abs(W_qutip - W_analytic)) < 1e-4)
# check normalization
assert_(np.sum(W_qutip) * dx * dy - 1.0 < 1e-8)
assert_(np.sum(W_analytic) * dx * dy - 1.0 < 1e-8)
def test_wigner_compare_methods_dm():
"wigner: compare wigner methods for rando | m density matrices"
xvec = np.linspace(-5.0, 5.0, 100)
yvec = xvec
X, Y = np.meshgrid(xvec, yvec)
# a = X + 1j * Y # consistent with g=2 option to wigner function
dx = xvec[1] - xvec[0]
dy = yvec[1] - yvec[0]
N = 15
for n in range(10):
# try ten different random density matrices
rho = rand_dm(N, 0.5 + rand() / 2)
# calculate the wigner function using qutip and analytic formula
W_qutip1 = wigner(rho, xvec, yvec, g=2)
W_qutip2 = wigner(rho, xvec, yvec, g=2, method='laguerre')
# check difference
assert_(np.sum(abs(W_qutip1 - W_qutip1)) < 1e-4)
# check normalization
assert_(np.sum(W_qutip1) * dx * dy - 1.0 < 1e-8)
assert_(np.sum(W_qutip2) * dx * dy - 1.0 < 1e-8)
def test_wigner_compare_methods_ket():
"wigner: compare wigner methods for random state vectors"
xvec = np.linspace(-5.0, 5.0, 100)
yvec = xvec
X, Y = np.meshgrid(xvec, yvec)
# a = X + 1j * Y # consistent with g=2 option to wigner function
dx = xvec[1] - xvec[0]
dy = yvec[1] - yvec[0]
N = 15
for n in range(10):
# try ten different random density matrices
psi = rand_ket(N, 0.5 + rand() / 2)
# calculate the wigner function using qutip and analytic formula
W_qutip1 = wigner(psi, xvec, yvec, g=2)
W_qutip2 = wigner(psi, xvec, yvec, g=2, method='laguerre')
# check difference
assert_(np.sum(abs(W_qutip1 - W_qutip1)) < 1e-4)
# check normalization
assert_(np.sum(W_qutip1) * dx * dy - 1.0 < 1e-8)
assert_(np.sum(W_qutip2) * dx * dy - 1.0 < 1e-8)
def test_wigner_fft_comparse_ket():
"Wigner: Compare Wigner fft and iterative for rand. ket"
N = 20
xvec = np.linspace(-10, 10, 128)
for i in range(3):
rho = rand_ket(N)
Wfft, yvec = wigner(rho, xvec, xvec, method='fft')
W = wigner(rho, xvec, yvec, method='iterative')
Wdiff = abs(W - Wfft)
assert_equal(np.sum(abs(Wdiff)) < 1e-7, True)
def test_wigner_fft_comparse_dm():
"Wigner: Compare Wigner fft and iterative for rand. dm"
N = 20
xvec = np.linspace(-10, 10, 128)
for i in range(3):
rho = rand_dm(N)
Wfft, yvec = wigner(rho, xvec, xvec, method='fft')
W = wigner(rho, xvec, yvec, method='iterative')
Wdiff = abs(W - Wfft)
assert_equal(np.sum(abs(Wdiff)) < 1e-7, True)
if __name__ == "__main__":
run_module_suite()
|
inventree/InvenTree | InvenTree/InvenTree/fields.py | Python | mit | 4,623 | 0.001298 | """ Custom fields used in InvenTree """
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import sys
from .validators import allowable_url_schemes
from django.utils.translation import ugettext_lazy as _
from django.forms.fields import URLField as FormURLField
from django.db import models as models
from django.core import validators
from django import forms
from decimal import Decimal
from djmoney.models.fields import MoneyField as ModelMoneyField
from djmoney.forms.fields import MoneyField
from djmoney.models.validators import MinMoneyValidator
import InvenTree.helpers
class InvenTreeURLFormField(FormURLField):
""" Custom URL form field with custom scheme validators """
default_validators = [validators.URLValidator(schemes=allowable_url_schemes())]
class InvenTreeURLField(models.URLField):
""" Custom URL field which has custom scheme validators """
default_validators = [validators.URLValidator(schemes=allowable_url_schemes())]
def formfield(self, **kwargs):
return super().formfield(**{
'form_class': InvenTreeURLFormField
})
def money_kwargs():
""" returns the database settings for MoneyFields """
from common.settings import currency_code_mappings, currency_code_default
kwargs = {}
kwargs['currency_choices'] = currency_code_mappings()
kwargs['default_currency'] = currency_code_default()
return kwargs
class InvenTreeModelMoneyField(ModelMoneyField):
"""
Custom MoneyField for clean migrations while using dynamic currency settings
"""
def __init__(self, **kwargs):
# detect if creating migration
if 'migrate' in sys.argv or 'makemigrations' in sys.argv:
# remove currency information for a clean migration
kwargs['default_currency'] = ''
kwargs['currency_choices'] = []
else:
# set defaults
kwargs.update(money_kwargs())
# Set a minimum value validator
validators = kwargs.get('validators', [])
if len(validators) == 0:
validators.append(
MinMoneyValidator(0),
)
kwargs['validators'] = validators
super().__init__(**kwargs)
def formfield(self, **kwargs):
""" override form class to use own function """
kwargs['form_class'] = InvenTreeMoneyField
return super().formfield(**kwargs)
class InvenTreeMoneyField(MoneyField):
""" custom MoneyField for clean migrations while using dynamic currency settings """
def __init__(self, *args, **kwargs):
# override initial values with the real info from database
kwargs.update(money_kwargs())
super().__init__(*args, **kwargs)
class DatePickerFormField(forms.DateField):
"""
Custom date-picker field
"""
def __init__(self, **kwargs):
help_text = kwargs.get('help_text', _('Enter date'))
label = kwargs.get('label', None)
required = kwargs.get('required', False)
initial = kwargs.get('initial', None)
widget = forms.DateInput(
attrs={
'type': 'date',
}
)
forms.DateField.__init__(
self,
required=required,
initial=initial,
help_text=help_text,
widget=widget,
label=label
)
def round_decimal(value, places):
"""
Round value to the specified number of places.
"""
if value is not None:
# see https://docs.python.org/2/library/decimal.html#decimal.Decimal.quantize for options
return value.quantize(Decimal(10) ** -p | laces)
return value
class RoundingDecimalFormField(forms.DecimalField):
def to_python(self, value):
value = super(RoundingDecimalFormField, self).to_python(value)
value = round_decimal(value, self.decimal_places)
return value
def prepare_value(self, value):
"""
Override the 'prepare_value' method, to remove trailing zeros when displaying | .
Why? It looks nice!
"""
if type(value) == Decimal:
return InvenTree.helpers.normalize(value)
else:
return value
class RoundingDecimalField(models.DecimalField):
def to_python(self, value):
value = super(RoundingDecimalField, self).to_python(value)
return round_decimal(value, self.decimal_places)
def formfield(self, **kwargs):
defaults = {
'form_class': RoundingDecimalFormField
}
defaults.update(kwargs)
return super().formfield(**kwargs)
|
axinging/chromium-crosswalk | tools/perf/measurements/task_execution_time.py | Python | bsd-3-clause | 7,687 | 0.008846 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry.page import legacy_page_test
from telemetry.timeline.model import TimelineModel
from telemetry.timeline import tracing_config
from telemetry.util import statistics
from telemetry.value import scalar
class TaskExecutionTime(legacy_page_test.LegacyPageTest):
IDLE_SECTION_TRIGGER = 'SingleThreadIdleTaskRunner::RunTask'
IDLE_SECTION = 'IDLE'
NORMAL_SECTION = 'NORMAL'
_TIME_OUT_IN_SECONDS = 60
_NUMBER_OF_RESULTS_TO_DISPLAY = 10
_BROWSER_THREADS = ['Chrome_ChildIOThread',
'Chrome_IOThread']
_RENDERER_THREADS = ['Chrome_ChildIOThread',
'Chrome_IOThread',
'CrRendererMain']
_CATEGORIES = ['benchmark',
'blink',
'blink.console',
'blink_gc',
'cc',
'gpu',
'ipc',
'renderer.scheduler',
'toplevel',
'v8',
'webkit.console']
def __init__(self):
super(TaskExecutionTime, self).__init__()
self._renderer_process = None
self._browser_process = None
self._results = None
def WillNavigateToPage(self, page, tab):
config = tracing_config.TracingConfig()
for category in self._CATEGORIES:
config.tracing_category_filter.AddIncludedCategory(category)
config.enable_chrome_trace = True
tab.browser.platform.tracing_controller.StartTracing(
config, self._TIME_OUT_IN_SECONDS)
def ValidateAndMeasurePage(self, page, tab, results):
trace_data = tab.browser.platform.tracing_controller.StopTracing()
timeline_model = TimelineModel(trace_data)
self._renderer_process = timeline_model.GetRendererProcessFromTabId(tab.id)
self._browser_process = timeline_model.browser_process
self._AddResults(results)
def _AddResults(self, results):
self._results = results
for thread in self._BROWSER_THREADS:
self._AddTasksFromThreadToResults(self._browser_process, thread)
for thread in self._RENDERER_THREADS:
self._AddTasksFromThreadToResults(self._renderer_process, thread)
def _AddTasksFromThreadToResults(self, process, thread_name):
if process is None:
return
sections = TaskExecutionTime._GetSectionsForThread(process, thread_name)
self._ReportSectionPercentages(sections.values(),
'%s:%s' % (process.name, thread_name))
# Create list with top |_NUMBER_OF_RESULTS_TO_DISPLAY| for each section.
for section in sections.itervalues():
if section.name == TaskExecutionTime.IDLE_SECTION:
# Skip sections we don't report.
continue
self._AddSlowestTasksToResults(section.tas | ks.values())
def _AddSlowestTasksToResults(self, tasks):
sorted_tasks = sorted(
tasks,
key=lambda slice: slice.median_self_duration,
reverse=True)
for task in sorted_tasks[:self.GetExpectedResultCount()]:
self._results.AddValue(scalar.ScalarValue(
self._results.current_page,
task.name,
'ms',
task.median_self_duration,
description='Slowest tasks'))
def _ReportSectionPercentages(self, section_val | ues, metric_prefix):
all_sectionstotal_duration = sum(
section.total_duration for section in section_values)
if not all_sectionstotal_duration:
# Nothing was recorded, so early out.
return
for section in section_values:
section_name = section.name or TaskExecutionTime.NORMAL_SECTION
section_percentage_of_total = (
(section.total_duration * 100.0) / all_sectionstotal_duration)
self._results.AddValue(scalar.ScalarValue(
self._results.current_page,
'%s:Section_%s' % (metric_prefix, section_name),
'%',
section_percentage_of_total,
description='Idle task percentage'))
@staticmethod
def _GetSectionsForThread(process, target_thread):
sections = {}
for thread in process.threads.itervalues():
if thread.name != target_thread:
continue
for task_slice in thread.IterAllSlices():
_ProcessTasksForThread(
sections,
'%s:%s' % (process.name, thread.name),
task_slice)
return sections
@staticmethod
def GetExpectedResultCount():
return TaskExecutionTime._NUMBER_OF_RESULTS_TO_DISPLAY
def _ProcessTasksForThread(
sections,
thread_name,
task_slice,
section_name=None):
if task_slice.self_thread_time is None:
# Early out if this slice is a TRACE_EVENT_INSTANT, as it has no duration.
return
# Note: By setting a different section below we split off this task into
# a different sorting bucket. Too add extra granularity (e.g. tasks executed
# during page loading) add logic to set a different section name here. The
# section name is set before the slice's data is recorded so the triggering
# event will be included in its own section (i.e. the idle trigger will be
# recorded as an idle event).
if task_slice.name == TaskExecutionTime.IDLE_SECTION_TRIGGER:
section_name = TaskExecutionTime.IDLE_SECTION
# Add the thread name and section (e.g. 'Idle') to the test name
# so it is human-readable.
reported_name = thread_name + ':'
if section_name:
reported_name += section_name + ':'
if 'src_func' in task_slice.args:
# Data contains the name of the timed function, use it as the name.
reported_name += task_slice.args['src_func']
elif 'line' in task_slice.args:
# Data contains IPC class and line numbers, use these as the name.
reported_name += 'IPC_Class_' + str(task_slice.args['class'])
reported_name += ':Line_' + str(task_slice.args['line'])
else:
# Fall back to use the name of the task slice.
reported_name += task_slice.name.lower()
# Replace any '.'s with '_'s as V8 uses them and it confuses the dashboard.
reported_name = reported_name.replace('.', '_')
# If this task is in a new section create a section object and add it to the
# section dictionary.
if section_name not in sections:
sections[section_name] = Section(section_name)
sections[section_name].AddTask(reported_name, task_slice.self_thread_time)
# Process sub slices recursively, passing the current section down.
for sub_slice in task_slice.sub_slices:
_ProcessTasksForThread(
sections,
thread_name,
sub_slice,
section_name)
class NameAndDurations(object):
def __init__(self, name, self_duration):
self.name = name
self.self_durations = [self_duration]
def Update(self, self_duration):
self.self_durations.append(self_duration)
@property
def median_self_duration(self):
return statistics.Median(self.self_durations)
class Section(object):
def __init__(self, name):
# A section holds a dictionary, keyed on task name, of all the tasks that
# exist within it and the total duration of those tasks.
self.name = name
self.tasks = {}
self.total_duration = 0
def AddTask(self, name, duration):
if name in self.tasks:
# section_tasks already contains an entry for this (e.g. from an earlier
# slice), add the new duration so we can calculate a median value later.
self.tasks[name].Update(duration)
else:
# This is a new task so create a new entry for it.
self.tasks[name] = NameAndDurations(name, duration)
# Accumulate total duration for all tasks in this section.
self.total_duration += duration
|
CartoDB/cartoframes | tests/unit/data/observatory/catalog/test_variable_group.py | Python | bsd-3-clause | 5,716 | 0.002274 | import pandas as pd
from unittest.mock import patch
from cartoframes.data.observatory.catalog.entity import CatalogList
from cartoframes.data.observatory.catalog.variable_group import VariableGroup
from cartoframes.data.observatory.catalog.repository.variable_repo import VariableRepository
from cartoframes.data.observatory.catalog.repository.variable_group_repo import VariableGroupRepository
from cartoframes.data.observatory.catalog.repository.constants import VARIABLE_GROUP_FILTER
from .examples import (
test_variables_groups, test_variable_group1, test_variables, db_variable_group1,
test_variable_group2, db_variable_group2
)
class TestVariableGroup(object):
@patch.object(VariableGroupRepository, 'get_by_id')
def test_get_variable_group_by_id(self, mocked_repo):
# Given
mocked_repo.return_value = test_variable_group1
# When
variable_group = VariableGroup.get(test_variable_group1.id)
# Then
assert isinstance(variable_group, object)
assert isinstance(variable_group, VariableGroup)
assert variable_group == test_variable_group1
@patch.object(VariableRepository, 'get_all')
def test_get_variables_by_variable_group(self, mocked_repo):
# Given
mocked_repo.return_value = test_variables
# When
variables = test_variable_group1.variables
# Then
mocked_repo.assert_called_once_with({VARIABLE_GROUP_FILTER: test_variable_group1.id})
assert isinstance(variables, list)
assert isinstance(variables, CatalogList)
assert variables == test_variables
def test_variable_group_properties(self):
# Given
variable_group = VariableGroup(db_variable_group1)
# When
variable_group_id = variable_group.id
slug = variable_group.slug
name = variable_group.name
dataset = variable_group.dataset
# Then
assert variable_group_id == db_variable_group1['id']
assert slug == db_variable_group1['slug']
assert name == db_variable_group1['name']
assert dataset == db_variable_group1['dataset_id']
def test_variable_group_is_exported_as_series(self):
# Given
variable_group = test_variable_group1
# When
variable_group_series = variable_group.to_series()
# Then
assert isinstance(variable_group_series, pd.Series)
assert variable_group_series['id'] == variable_group.id
def test_variable_group_is_exported_as_dict(self):
# Given
variable_group = VariableGroup(db_variable_group1)
# When
variable_group_dict = variable_group.to_dict()
# Then
assert isinstance(variable_group_dict, dict)
assert variable_group_dict == db_variable_group1
def test_variable_group_is_represented_with_classname_and_slug(self):
# Given
variable_group = VariableGroup(db_variable_group1)
# When
variable_group_repr = repr(variable_group)
# Then
assert variable_group_repr == "<VariableGroup.get('{id}')>".format(id=db_variable_group1['slug'])
def test_variable_group_is_printed_with_classname(self):
# Given
variable_group = VariableGroup(db_variable_group1)
# When
variable_group_str = str(variable_group)
# Then
assert vari | able_group_str == 'VariableGroup({d | ict_str})'.format(dict_str=str(db_variable_group1))
@patch.object(VariableGroupRepository, 'get_all')
def test_get_all_variables_groups(self, mocked_repo):
# Given
mocked_repo.return_value = test_variables_groups
# When
variables_groups = VariableGroup.get_all()
# Then
assert isinstance(variables_groups, list)
assert isinstance(variables_groups, CatalogList)
assert variables_groups == test_variables_groups
def test_variable_group_list_is_printed_with_classname_and_slug(self):
# Given
variables_groups = CatalogList([test_variable_group1, test_variable_group2])
# When
variables_groups_str = str(variables_groups)
# Then
assert variables_groups_str == "[<VariableGroup.get('{id1}')>, <VariableGroup.get('{id2}')>]" \
.format(id1=db_variable_group1['slug'], id2=db_variable_group2['slug'])
def test_variable_group_list_is_represented_with_classname_and_slug(self):
# Given
variables_groups = CatalogList([test_variable_group1, test_variable_group2])
# When
variables_groups_repr = repr(variables_groups)
# Then
assert variables_groups_repr == "[<VariableGroup.get('{id1}')>, <VariableGroup.get('{id2}')>]"\
.format(id1=db_variable_group1['slug'], id2=db_variable_group2['slug'])
def test_variables_groups_items_are_obtained_as_variable_group(self):
# Given
variables_groups = test_variables_groups
# When
variable_group = variables_groups[0]
# Then
assert isinstance(variable_group, VariableGroup)
assert variable_group == test_variable_group1
def test_variables_groups_are_exported_as_dataframe(self):
# Given
variables_groups = test_variables_groups
variable_group = variables_groups[0]
# When
variable_group_df = variables_groups.to_dataframe()
sliced_variable_group = variable_group_df.iloc[0]
# Then
assert isinstance(variable_group_df, pd.DataFrame)
assert isinstance(sliced_variable_group, pd.Series)
assert sliced_variable_group.equals(variable_group.to_series())
|
vortex-ape/scikit-learn | sklearn/linear_model/tests/test_bayes.py | Python | bsd-3-clause | 6,251 | 0 | # Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Fabian Pedregosa <fabian.pedregosa@inria.fr>
#
# License: BSD 3 clause
import numpy as np
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_less
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import SkipTest
from sklearn.utils import check_random_state
from sklearn.linear_model.bayes import BayesianRidge, ARDRegression
from sklearn.linear_model import Ridge
from sklearn import datasets
def test_bayesian_on_diabetes():
# Test BayesianRidge on diabetes
raise SkipTest("test_bayesian_on_diabetes is broken")
diabetes = datasets.load_diabetes()
X, y = diabetes.data, diabetes.target
clf = BayesianRidge(compute_score=True)
# Test with more samples than features
clf.fit(X, y)
# Test that scores are increasing at each iteration
assert_array_equal(np.diff(clf.scores_) > 0, True)
# Test with more features than samples
X = X[:5, :]
y = y[:5]
clf.fit(X, y)
# Test that scores are increasing at each iteration
assert_array_equal(np.diff(clf.scores_) > 0, True)
def test_bayesian_ridge_parameter():
# Test correctness of lambda_ and alpha_ parameters (GitHub issue #8224)
X = np.array([[1, 1], [3, 4], [5, 7], [4, 1], [2, 6], [3, 10], [3, 2]])
y = np.array([1, 2, 3, 2, 0, 4, 5]).T
# A Ridge regression model using an alpha value equal to the ratio of
# lambda_ and alpha_ from the Bayesian Ridge model must be identical
br_model = BayesianRidge(compute_score=True).fit(X, y)
rr_model = Ridge(alpha=br_model.lambda_ / br_model.alpha_).fit(X, y)
assert_array_almost_equal(rr_model.coef_, br_model.coef_)
assert_almost_equal(rr_model.intercept_, br_model.intercept_)
def test_bayesian_sample_weights():
# Test correctness of the sample_weights method
X = np.array([[1, 1], [3, 4], [5, 7], [4, 1], [2, 6], [3, 10], [3, 2]])
y = np.array([1, 2, 3, 2, 0, 4, 5]).T
w = np.array([4, 3, 3, 1, 1, 2, 3]).T
# A Ridge regression model using an alpha value equal to the ratio of
# lambda_ and alpha_ from the Bayesian Ridge model must be identical
br_model = BayesianRidge(compute_score=True).fit(X, y, sample_weight=w)
rr_model = Ridge(alpha=br_model.lambda_ / br_model.alpha_).fit(
X, y, sample_weight=w)
assert_array_almost_equal(rr_model.coef_, br_model.coef_)
assert_almost_equal(rr_model.intercept_, br_model.intercept_)
def test_toy_bayesian_ridge_object():
# Test BayesianRidge on toy
X = np.array([[1], [2], [6], [8], [10]])
Y = np.array([1, 2, 6, 8, 10])
clf = BayesianRidge(compute_score=True)
clf.fit(X, Y)
# Check that the model could approximately learn the identity function
test = [[1], [3], [4]]
assert_array_almost_equal(clf.predict(test), [1, 3, 4], 2)
def test_prediction_bayesian_ridge_ard_with_constant_input():
# Test BayesianRidge and ARDRegression predictions for edge case of
# constant target vectors
n_samples = 4
n_features = 5
random_state = check_random_state(42)
constant_value = random_state.rand()
X = random_state.random_sample((n_samples, n_features))
y = np.full(n_samples, constant_value,
dtype=np.array(constant_value).dtype)
expected = np.full(n_samples, constant_value,
dtype=np.array(constant_value).dtype)
for clf in [BayesianRidge(), ARDRegression()]:
y_pred = clf.fit(X, y).predict(X)
assert_array_almost_equal(y_pred, expected)
def test_std_bayesian_ridge_ard_with_constant_input():
# Test BayesianRidge and ARDRegression standard dev. for edge case of
# constant target vector
# The standard dev. should be relatively small (< 0.01 is tested here)
n_samples = 4
n_features = 5
random_state = check_random_state(42)
constant_value = random_state.rand()
X = random_state.random_sample((n_samples, n_features))
y = np.full(n_samples, constant_value,
dtype=np.array(constant_value).dtype)
expected_upper_boundary = 0.01
for clf in [BayesianRidge(), ARDRegression()]:
_, y_std = clf.fit(X, y).predict(X, return_std=True)
assert_array_less(y_std, expected_upper_boundary)
def test_update_of_sigma_in_ard():
# Checks that `sigma_` is updated correctly after the last iteration
# of the ARDRegression algorithm. See issue #10128.
X = np.array([[1, 0],
[0, 0]])
y = np.array([0, 0])
clf = ARDRegression(n_iter=1)
clf.fit(X, y)
# With the inputs above, ARDRegression prunes one of the two coefficients
# in the first iteration. Henc | e, the expected shape of `sigma_` is (1, 1).
assert_equal(clf.sigma_.shape, (1, 1))
| # Ensure that no error is thrown at prediction stage
clf.predict(X, return_std=True)
def test_toy_ard_object():
# Test BayesianRegression ARD classifier
X = np.array([[1], [2], [3]])
Y = np.array([1, 2, 3])
clf = ARDRegression(compute_score=True)
clf.fit(X, Y)
# Check that the model could approximately learn the identity function
test = [[1], [3], [4]]
assert_array_almost_equal(clf.predict(test), [1, 3, 4], 2)
def test_return_std():
# Test return_std option for both Bayesian regressors
def f(X):
return np.dot(X, w) + b
def f_noise(X, noise_mult):
return f(X) + np.random.randn(X.shape[0]) * noise_mult
d = 5
n_train = 50
n_test = 10
w = np.array([1.0, 0.0, 1.0, -1.0, 0.0])
b = 1.0
X = np.random.random((n_train, d))
X_test = np.random.random((n_test, d))
for decimal, noise_mult in enumerate([1, 0.1, 0.01]):
y = f_noise(X, noise_mult)
m1 = BayesianRidge()
m1.fit(X, y)
y_mean1, y_std1 = m1.predict(X_test, return_std=True)
assert_array_almost_equal(y_std1, noise_mult, decimal=decimal)
m2 = ARDRegression()
m2.fit(X, y)
y_mean2, y_std2 = m2.predict(X_test, return_std=True)
assert_array_almost_equal(y_std2, noise_mult, decimal=decimal)
|
Typecraft/norsourceparser | norsourceparser/__init__.py | Python | mit | 126 | 0 | # -*- coding: utf-8 -*-
__author__ = """Tormod Haugland"""
__ema | il__ = 'tormod.haugland@gmail.co | m'
__version__ = '1.0.0-rc2'
|
lastweek/gem5 | configs/spec2k6_classic/Caches.py | Python | bsd-3-clause | 3,186 | 0.002511 | # Copyright (c) 2006-2007 The Regents of The University of Michigan
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER | IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH | DAMAGE.
#
# Authors: Lisa Hsu
from m5.objects import *
class L1Cache(BaseCache):
assoc = 2
block_size = 64
hit_latency = '3ns'
response_latency = '1ns'
mshrs = 10
tgts_per_mshr = 20
is_top_level = True
def connectCPU(self, bus):
self.mem_side = bus.slave
class L1ICache(L1Cache):
size = '32kB'
def connectCPU(self, cpu):
self.cpu_side = cpu.icache_port
class L1DCache(L1Cache):
size = '32kB'
def connectCPU(self, cpu):
self.cpu_side = cpu.dcache_port
class L2Cache(BaseCache):
size = '256kB'
assoc = 8
block_size = 64
hit_latency = '12ns'
response_latency = '12ns'
mshrs = 20
tgts_per_mshr = 12
def connectCPUSideBus(self, bus):
self.cpu_side = bus.master
def connectMemSideBus(self, bus):
self.mem_side = bus.slave
class L3Cache(BaseCache):
size = '8MB'
assoc = 8
block_size = 64
hit_latency = '20ns'
response_latency = '20ns'
mshrs = 64
tgts_per_mshr = 12
class L4Cache(BaseCache):
size = '128MB'
assoc = 8
block_size = 64
hit_latency = '50ns'
response_latency = '50ns'
mshrs = 64
tgts_per_mshr = 12
#class PageTableWalkerCache(BaseCache):
# assoc = 2
# block_size = 64
# hit_latency = '1ns'
# response_latency = '1ns'
# mshrs = 10
# size = '1kB'
# tgts_per_mshr = 12
# is_top_level = True
#class IOCache(BaseCache):
# assoc = 8
# block_size = 64
# hit_latency = '10ns'
# response_latency = '10ns'
# mshrs = 20
# size = '1kB'
# tgts_per_mshr = 12
# forward_snoops = False
# is_top_level = True
|
olafhauk/mne-python | examples/visualization/plot_publication_figure.py | Python | bsd-3-clause | 11,270 | 0 | """
.. _ex-publication-figure:
===================================
Make figures more publication ready
===================================
In this example, we show several use cases to take MNE plots and
customize them for a more publication-ready look.
"""
# Authors: Eric Larson <larson.eric.d@gmail.com>
# Daniel McCloy <dan.mccloy@gmail.com>
# Stefan Appelhoff <stefan.appelhoff@mailbox.org>
#
# License: BSD (3-clause)
###############################################################################
# .. contents:: Contents
# :local:
# :depth: 1
#
# Imports
# -------
# We are importing everything we need for this example:
import os.path as op
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import (make_axes_locatable, ImageGrid,
inset_locator)
import mne
###############################################################################
# Evoked plot with brain activation
# ---------------------------------
#
# Suppose we want a figure with an evoked plot on top, and the brain activation
# below, with the brain subplot slightly bigger than the evoked plot. Let's
# start by loading some :ref:`example data <sample-dataset>`.
data_path = mne.datasets.sample.data_path()
subjects_dir = op.join(data_path, 'subjects')
fname_stc = op.join(data_path, 'MEG', 'sample', 'sample_audvis-meg-eeg-lh.stc')
fname_evoked = op.join(data_path, 'MEG', 'sample', 'sample_audvis-ave.fif')
evoked = mne.read_evokeds(fname_ | evoked, 'Left Auditory')
evoked.pick_types(meg= | 'grad').apply_baseline((None, 0.))
max_t = evoked.get_peak()[1]
stc = mne.read_source_estimate(fname_stc)
###############################################################################
# During interactive plotting, we might see figures like this:
evoked.plot()
stc.plot(views='lat', hemi='split', size=(800, 400), subject='sample',
subjects_dir=subjects_dir, initial_time=max_t,
time_viewer=False, show_traces=False)
###############################################################################
# To make a publication-ready figure, first we'll re-plot the brain on a white
# background, take a screenshot of it, and then crop out the white margins.
# While we're at it, let's change the colormap, set custom colormap limits and
# remove the default colorbar (so we can add a smaller, vertical one later):
colormap = 'viridis'
clim = dict(kind='value', lims=[4, 8, 12])
# Plot the STC, get the brain image, crop it:
brain = stc.plot(views='lat', hemi='split', size=(800, 400), subject='sample',
subjects_dir=subjects_dir, initial_time=max_t, background='w',
colorbar=False, clim=clim, colormap=colormap,
time_viewer=False, show_traces=False)
screenshot = brain.screenshot()
brain.close()
###############################################################################
# Now let's crop out the white margins and the white gap between hemispheres.
# The screenshot has dimensions ``(h, w, 3)``, with the last axis being R, G, B
# values for each pixel, encoded as integers between ``0`` and ``255``. ``(255,
# 255, 255)`` encodes a white pixel, so we'll detect any pixels that differ
# from that:
nonwhite_pix = (screenshot != 255).any(-1)
nonwhite_row = nonwhite_pix.any(1)
nonwhite_col = nonwhite_pix.any(0)
cropped_screenshot = screenshot[nonwhite_row][:, nonwhite_col]
# before/after results
fig = plt.figure(figsize=(4, 4))
axes = ImageGrid(fig, 111, nrows_ncols=(2, 1), axes_pad=0.5)
for ax, image, title in zip(axes, [screenshot, cropped_screenshot],
['Before', 'After']):
ax.imshow(image)
ax.set_title('{} cropping'.format(title))
###############################################################################
# A lot of figure settings can be adjusted after the figure is created, but
# many can also be adjusted in advance by updating the
# :data:`~matplotlib.rcParams` dictionary. This is especially useful when your
# script generates several figures that you want to all have the same style:
# Tweak the figure style
plt.rcParams.update({
'ytick.labelsize': 'small',
'xtick.labelsize': 'small',
'axes.labelsize': 'small',
'axes.titlesize': 'medium',
'grid.color': '0.75',
'grid.linestyle': ':',
})
###############################################################################
# Now let's create our custom figure. There are lots of ways to do this step.
# Here we'll create the figure and the subplot axes in one step, specifying
# overall figure size, number and arrangement of subplots, and the ratio of
# subplot heights for each row using :mod:`GridSpec keywords
# <matplotlib.gridspec>`. Other approaches (using
# :func:`~matplotlib.pyplot.subplot2grid`, or adding each axes manually) are
# shown commented out, for reference.
# sphinx_gallery_thumbnail_number = 4
# figsize unit is inches
fig, axes = plt.subplots(nrows=2, ncols=1, figsize=(4.5, 3.),
gridspec_kw=dict(height_ratios=[3, 4]))
# alternate way #1: using subplot2grid
# fig = plt.figure(figsize=(4.5, 3.))
# axes = [plt.subplot2grid((7, 1), (0, 0), rowspan=3),
# plt.subplot2grid((7, 1), (3, 0), rowspan=4)]
# alternate way #2: using figure-relative coordinates
# fig = plt.figure(figsize=(4.5, 3.))
# axes = [fig.add_axes([0.125, 0.58, 0.775, 0.3]), # left, bot., width, height
# fig.add_axes([0.125, 0.11, 0.775, 0.4])]
# we'll put the evoked plot in the upper axes, and the brain below
evoked_idx = 0
brain_idx = 1
# plot the evoked in the desired subplot, and add a line at peak activation
evoked.plot(axes=axes[evoked_idx])
peak_line = axes[evoked_idx].axvline(max_t, color='#66CCEE', ls='--')
# custom legend
axes[evoked_idx].legend(
[axes[evoked_idx].lines[0], peak_line], ['MEG data', 'Peak time'],
frameon=True, columnspacing=0.1, labelspacing=0.1,
fontsize=8, fancybox=True, handlelength=1.8)
# remove the "N_ave" annotation
axes[evoked_idx].texts = []
# Remove spines and add grid
axes[evoked_idx].grid(True)
axes[evoked_idx].set_axisbelow(True)
for key in ('top', 'right'):
axes[evoked_idx].spines[key].set(visible=False)
# Tweak the ticks and limits
axes[evoked_idx].set(
yticks=np.arange(-200, 201, 100), xticks=np.arange(-0.2, 0.51, 0.1))
axes[evoked_idx].set(
ylim=[-225, 225], xlim=[-0.2, 0.5])
# now add the brain to the lower axes
axes[brain_idx].imshow(cropped_screenshot)
axes[brain_idx].axis('off')
# add a vertical colorbar with the same properties as the 3D one
divider = make_axes_locatable(axes[brain_idx])
cax = divider.append_axes('right', size='5%', pad=0.2)
cbar = mne.viz.plot_brain_colorbar(cax, clim, colormap, label='Activation (F)')
# tweak margins and spacing
fig.subplots_adjust(
left=0.15, right=0.9, bottom=0.01, top=0.9, wspace=0.1, hspace=0.5)
# add subplot labels
for ax, label in zip(axes, 'AB'):
ax.text(0.03, ax.get_position().ymax, label, transform=fig.transFigure,
fontsize=12, fontweight='bold', va='top', ha='left')
###############################################################################
# Custom timecourse with montage inset
# ------------------------------------
#
# Suppose we want a figure with some mean timecourse extracted from a number of
# sensors, and we want a smaller panel within the figure to show a head outline
# with the positions of those sensors clearly marked.
# If you are familiar with MNE, you know that this is something that
# :func:`mne.viz.plot_compare_evokeds` does, see an example output in
# :ref:`ex-hf-sef-data` at the bottom.
#
# In this part of the example, we will show you how to achieve this result on
# your own figure, without having to use :func:`mne.viz.plot_compare_evokeds`!
#
# Let's start by loading some :ref:`example data <sample-dataset>`.
data_path = mne.datasets.sample.data_path()
fname_raw = op.join(data_path, "MEG", "sample", "sample_audvis_raw.fif")
raw = mne.io.read_raw_fif(fname_raw)
# For the sake of the example, we focus on EEG data
raw.pick_types(meg=False, eeg=True)
###############################################################################
# Let's make a plot.
|
denisbalyko/checkio-solution | count-inversions.py | Python | mit | 619 | 0 | def count_inversion(sequence):
flag, answer, sequence = True, 0, list(sequence)
while flag:
flag = False
for i in xrange(1, len(sequence)):
if sequence[i-1] > sequence[i]:
sequence[i], sequence[i-1] = sequence[i-1], sequence[i]
| answer += 1
flag = True
return answer
def test_function():
assert count_inversion((1, 2, 5, 3, 4, 7, 6)) == 3, "Example"
assert count_inversion((0, 1, 2, 3)) == 0, "Sorted"
assert count_inversion((99, -99)) == 1, "Two numbers"
assert count_inversion((5, 3, 2, 1, 0)) == 10, "Rever | sed"
|
devicehive/devicehive-python | devicehive/subscription.py | Python | apache-2.0 | 2,598 | 0.000385 | # Copyright (C) 2018 DataArt
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
from devicehive.api_request import RemoveSubscriptionApiRequest, ApiRequest, \
ApiRequestError
class BaseSubscription(object):
"""BaseSubscription class"""
ID_KEY = 'subscriptionId'
def __init__(self, api, call, args):
self._api = api
self._call = call
self._args = self._hashable_args(args)
self._id = None
@staticmethod
def _hashable_args(args):
args = list(args)
for i in range(len(args)):
if not isinstance(args[i], list):
continue
args[i] = tuple(args[i])
return tuple(args)
def _ensure_exists(self):
if self._id:
return
raise SubscriptionError('S | ubscription does not exist.')
def _get_subscription_type(self):
raise NotImplementedError
def subscribe(self):
subscription = self._call(*self._args)
self._id = subscription[self.ID_KEY]
@property
def id(self):
return self._id
def remove(self):
self._ensure_exists()
remove_subscription_api_request = RemoveSubscriptionApiRequest()
remove_subscription_api_request | .subscription_id(self._id)
api_request = ApiRequest(self._api)
api_request.action('%s/unsubscribe' % self._get_subscription_type())
api_request.set('subscriptionId', self._id)
api_request.remove_subscription_request(remove_subscription_api_request)
api_request.execute('Unsubscribe failure.')
self._api.remove_subscription(self)
self._id = None
class CommandsSubscription(BaseSubscription):
"""CommandsSubscription class"""
def _get_subscription_type(self):
return 'command'
class NotificationsSubscription(BaseSubscription):
"""NotificationsSubscription class"""
def _get_subscription_type(self):
return 'notification'
class SubscriptionError(ApiRequestError):
"""Subscription error."""
|
iulian787/spack | var/spack/repos/builtin/packages/bcftools/package.py | Python | lgpl-2.1 | 3,898 | 0.002565 | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
class Bcftools(AutotoolsPackage):
"""BCFtools is a set of utilities that manipulate variant calls in the
Variant Call Format (VCF) and its binary counterpar | t BCF. All
commands work transparently with both VCFs and BCFs, both
uncompressed and BGZF-compressed."""
homepage = "http://samtools.github.io/bcftools/"
url = "https://github.com/samtools/bcfto | ols/releases/download/1.3.1/bcftools-1.3.1.tar.bz2"
version('1.10.2', sha256='f57301869d0055ce3b8e26d8ad880c0c1989bf25eaec8ea5db99b60e31354e2c')
version('1.9', sha256='6f36d0e6f16ec4acf88649fb1565d443acf0ba40f25a9afd87f14d14d13070c8')
version('1.8', sha256='4acbfd691f137742e0be63d09f516434f0faf617a5c60f466140e0677915fced')
version('1.7', sha256='dd4f63d91b0dffb0f0ce88ac75c2387251930c8063f7799611265083f8d302d1')
version('1.6', sha256='293010736b076cf684d2873928924fcc3d2c231a091084c2ac23a8045c7df982')
version('1.4', sha256='8fb1b0a47ed4e1f9d7c70129d7993aa650da1688fd931b10646d1c4707ae234d')
version('1.3.1', sha256='12c37a4054cbf1980223e2b3a80a7fdb3fd850324a4ba6832e38fdba91f1b924')
version('1.2', sha256='53c628339020dd45334a007c9cefdaf1cba3f1032492ec813b116379fa684fd6')
variant('libgsl',
default=False,
description='build options that require the GNU scientific '
'library')
variant('perl-filters',
default=False,
description='build in support for PERL scripts in -i/-e '
'filtering expressions, for versions >= 1.8.')
depends_on('gsl', when='+libgsl')
depends_on('py-matplotlib', when='@1.6:', type='run')
depends_on('perl', when='@1.8:~perl-filters', type='run')
depends_on('perl', when='@1.8:+perl-filters', type=('build', 'run'))
depends_on('htslib@1.10.2', when='@1.10.2')
depends_on('htslib@1.9', when='@1.9')
depends_on('htslib@1.8', when='@1.8')
depends_on('htslib@1.7', when='@1.7')
depends_on('htslib@1.6', when='@1.6')
depends_on('htslib@1.4', when='@1.4')
depends_on('htslib@1.3.1', when='@1.3.1')
depends_on('htslib@1.2', when='@1.2')
patch('makefile_12.patch', when='@1.2')
patch('fix_mk.patch', when='@1.2')
patch('makefile_13.patch', when='@1.3')
patch('makefile_14.patch', when='@1.4')
patch('guess-ploidy.py_2to3.patch', when='@1.6:1.9')
@when('@1.5:')
def configure_args(self):
args = []
args.append('--with-htslib={0}'.format(self.spec['htslib'].prefix))
args.extend(self.enable_or_disable('libgsl'))
if self.spec.satisfies('@1.8:'):
args.extend(self.enable_or_disable('perl-filters'))
return args
@when('@1.2:1.4')
def set_make_options(self):
options = []
options.append('prefix={0}'.format(self.prefix))
options.append('HTSDIR={0}'.format(self.spec['htslib'].prefix))
if '+libgsl' in self.spec:
options.append('USE_GPL=1')
return options
@when('@1.2:1.4')
def autoreconf(self, spec, prefix):
touch('configure')
@when('@1.2:1.4')
def configure(self, spec, prefix):
pass
@when('@1.2:1.4')
def build(self, spec, prefix):
make_options = self.set_make_options()
make(*make_options)
@when('@1.2:1.4')
def install(self, spec, prefix):
make_options = self.set_make_options()
make('install', *make_options)
if spec.satisfies('@1.2'):
mkdirp(self.prefix.libexec.bcftools)
install('plugins/*.so', self.prefix.libexec.bcftools)
@when('@1.2')
def setup_run_environment(self, env):
env.set('BCFTOOLS_PLUGINS', self.prefix.libexec.bcftools)
|
dmlc/tvm | apps/topi_recipe/gemm/gemm_int8.py | Python | apache-2.0 | 5,879 | 0.001531 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"Example code to perform int8 GEMM"
import logging
import sys
import numpy as np
import tvm
from tvm import te
from tvm import autotvm
from tvm.topi.cuda.tensor_intrin import dp4a
DO_TUNING = True
PRETUNED_INDEX = 75333
intrin_dp4a = dp4a("local", "local", "local")
@autotvm.template
def gemm_int8(n, m, l):
A = te.placeholder((n, l), name="A", dtype="int8")
B = te.placeholder((m, l), name="B", dtype="int8")
k = te.reduce_axis((0, l), name="k")
C = te.compute(
(n, m),
lambda i, j: te.sum(A[i, k].astype("int32") * B[j, k].astype("int32"), axis=k),
name="C",
)
cfg = autotvm.get_config()
s = te.create_schedule(C.op)
y, x = C.op.axis
AA = s.cache_read(A, "shared", [C])
BB = s.cache_read(B, "shared", [C])
AL = s.cache_read(AA, "local", [C])
BL = s.cache_read(BB, "local", [C])
CC = s.cache_write(C, "local")
k = CC.op.reduce_axis[0]
cfg.define_split(
"tile_k",
cfg.axis(k),
num_outputs=3,
filter=lambda entity: entity.size[2] == 4 and entity.size[0] * 2 >= entity.size[1],
)
ko, kt, ki = cfg["tile_k"].apply(s, CC, k)
s[CC].tensorize(ki, intrin_dp4a)
block_x = te.thread_axis("blockIdx.x")
block_y = te.thread_axis("blockIdx.y")
thread_x = te.thread_axis("threadIdx.x")
thread_y = te.thread_axis("threadIdx.y")
def block_size_filter(entity):
return (
entity.size[0] * 2 >= entity.size[1] * 2
and entity.size[1] <= 16
and entity.size[3] <= 4
)
cfg.define_split("tile_y", cfg.axis(y), num_outputs=4, filter=block_size_filter)
cfg.define_split("tile_x", cfg.axis(x), num_outputs=4, filter=block_size_filter)
by, tyz, ty, yi = cfg["tile_y"].apply(s, C, y)
bx, txz, tx, xi = cfg["tile_x"].apply(s, C, x)
s[C].bind(by, block_y)
s[C].bind(bx, block_x)
s[C].bind(tyz, te.thread_axis("vthread"))
s[C].bind(txz, te.thread_axis("vthread"))
s[C].bind(ty, thread_y)
s[C].bind(tx, thread_x)
s[C].reorder(by, bx, tyz, txz, ty, tx, yi, xi)
s[CC].compute_at(s[C], tx)
yo, xo = CC.op.axis
s[CC].reorder(ko, kt, yo, xo, ki)
s[CC].unroll(kt)
for stage in [AL, BL]:
s[stage].compute_at(s[CC], kt)
_, xi = s[stage].split(stage.op.axis[1], factor=4)
s[stage].vectorize(xi)
s[stage].double_buffer()
cfg.define_knob("storage_align", [16, 48])
for stage in [AA, BB]:
s[stage].storage_align(s[stage].op.axis[0], cfg["storage_align"].val, 0)
s[stage].compute_at(s[CC], ko)
fused = s[stage].fuse(*s[stage].op.axis)
ty, tx = s[stage].split(fused, nparts=cfg["tile_y"].size[2])
tx, xi = s[stage].split(tx, nparts=cfg["tile_x"].size[2])
_, xi = s[stage].split(xi, factor=16)
s[stage].bind(ty, thread_y)
s[stage].bind(tx, thread_x)
s[stage].vectorize(xi)
cfg.define_knob("auto_unroll_max_step", [512, 1500])
s[C].pragma(by, "auto_unroll_max_step", cfg["auto_unroll_max_step"].val)
s[C].pragma(by, "unroll_explicit", False)
cfg.add_flop(n * m * l * 2)
return s, [A, B, C]
if __name__ == "__main__":
N = 2048
n = m = l = N
logging.basicConfig(level=logging.DEBUG, stream=sys.stdout)
task = autotvm.task.create(gemm_int8, args=(n, m, l), target="cuda")
print(task.config_space)
measure_option = autotvm.measure_option(
builder=autotvm.LocalBuilder(),
runner=autotvm.LocalRunner(repeat=3, min_repeat_ms=100, timeout=4),
)
log_name = "gemm_int8.log"
if DO_TUNING:
tuner = autotvm.tuner.XGBTuner(task)
tuner.tune(
n_trial=1000,
mea | sure_option=measure_option,
callbacks=[autotvm.callback.log_to_file(log_name)],
)
dispatch_context = autotvm.apply_history_best(log_name)
| best_config = dispatch_context.query(task.target, task.workload)
print("\nBest config:")
print(best_config)
else:
config = task.config_space.get(PRETUNED_INDEX)
dispatch_context = autotvm.task.ApplyConfig(config)
print("Using pretuned config:")
print(config)
with dispatch_context:
with tvm.target.Target("cuda"):
s, arg_bufs = gemm_int8(n, m, l)
f = tvm.build(s, arg_bufs, "cuda", name="gemm_int8")
dev = tvm.device("cuda", 0)
a_np = np.random.randint(size=(n, l), low=-128, high=127, dtype="int8")
b_np = np.random.randint(size=(m, l), low=-128, high=127, dtype="int8")
a = tvm.nd.array(a_np, dev)
b = tvm.nd.array(b_np, dev)
c = tvm.nd.array(np.zeros((n, m), dtype="int32"), dev)
f(a, b, c)
tvm.testing.assert_allclose(
c.numpy(), np.dot(a_np.astype("int32"), b_np.T.astype("int32")), rtol=1e-5
)
num_ops = 2 * l * m * n
num_runs = 1000
timer_f = f.time_evaluator(f.entry_name, dev, number=num_runs)
t = timer_f(a, b, c).mean
GOPS = num_ops / (t * 1e3) / 1e6
print("average time cost of %d runs = %g ms, %g GOPS." % (num_runs, t * 1e3, GOPS))
|
vardis/pano | src/pano/external/interactiveConsole/console.py | Python | mit | 4,407 | 0.025414 | # -----
# customConsoleClass
# -----
# by Reto Spoerri
# rspoerri AT nouser.org
# http://www.nouser.org
# -----
# wraps the interactiveConsole
# -----
from shared import *
from completer import completePython
import sys, inspect
from code import InteractiveConsole
class FileCacher:
"Cache the stdout text so we can analyze it before returning it"
def __init__(self):
self.reset()
def reset(self):
self.out = []
def write(self,line):
self.out.append(line)
def flush(self):
output = '\n'.join(self.out).rstrip()
self.reset()
return output
class customConsoleClass( InteractiveConsole ):
inputColor = (1.0,0.8,1.0,1.0)
outputColor = (0.8,1.0,1.0,1.0)
def __init__( self, localsEnv=globals() ):
InteractiveConsole.__init__( self, localsEnv )
print "customConsoleClass", localsEnv
self.consoleLocals = localsEnv
# catch the output of the interactive interpreter
self.stdout = sys.stdout
self.stderr = sys.stderr
self.cache = FileCacher()
self.help()
def help( self ):
text = " ------ InteractiveConsole ------ \n"
if PYTHON_PRE is None:
text += """- direct entry enabled"""
else:
text += """- use '%s' in front of a line to send it to the interactiveConsole component
- example: %sfor i in xrange(10): # no spaces between the ! and the 'for'
- example: %s print i
- example: %s <enter>\n""" % (PYTHON_PRE,PYTHON_PRE,PYTHON_PRE,PYTHON_PRE)
text += """- BUGS : do not try to call something like 'while True:'
you will not be able to break it, you must at least include 'Task.step()'
TAB : autocomplete commands
F1 : help"""
return text
def get_output( self ):
sys.stdout = self.cache
sys.stderr = self.cache
def return_output( self ):
sys.stdout = self.stdout
sys.stderr = self.stderr
def push( self, input ):
output = list()
output.append( ["%s" % input, '>>> ', self.inputColor] )
# execute on interactiveConsole console
self.get_output()
InteractiveConsole.push( self, input )
self.return_output()
resultText = self.cache.flush()
if len(resultText) > 0:
output.append( ["%s" % resultText, '> ', self.outputColor] )
return output
def autocomplete( self, pythonText, currentCursorPos ):
newText = pythonText
printText = None
pythonTestSplit = pythonText.split(' ')
env = self.consoleLocals
term = completePython( env, pythonText )
# if the entered name is uniq, use autocomplete
if len(term) == 1:
newTextList = pythonTestSplit[0:-1]
newTextList.append( term[0] )
newText = ' '.join(newTextList)
# output the list of available names
elif len(term) > 1:
printText = str(term)
return newText, printText
def autohelp( self, pythonText, currentCursorPos ):
# read the docstring
docString = self.push( "%s.__doc__" % pythonText )
if len(docString) == 1 or \
'Traceback' in docString[1][0] or \
'SyntaxError' in docString[-1][0]:
print "discarding __doc__ of %s l(%i): %s " % (pythonText, len(docString), docString)
docString = None
else:
print "accepting doc string %s" % docString
docString = docString[1][0]
# read the first five lines of the sourcecode
self.push( "import inspect" )
inspectString = self.push( "in | spect.getsourcelines( %s )[0][0:6]" % pythonText )
if 'SyntaxError' in inspectString[1][0] or \
'T | raceback' in inspectString[1][0] or \
len(inspectString) > 6:
print "discarding inspect of %s l(%i): %s" % (pythonText, len(inspectString), inspectString)
inspectString = None
else:
print "accepting inspect string %s" % inspectString
inspectString = inspectString[1][0]
# if no docstring found
if docString is not None:
lines = docString
else:
if inspectString is not None:
lines = inspectString
else:
lines = 'no sourcecode & docstring found','',
print "test", lines
# return the help text
exec( "helpText = ''.join(%s)" % str(lines) )
#helpText = ''.join(lines)
helpText = "--- help for %s ---\n" % (pythonText) + helpText
return helpText
|
tensorflow/datasets | tensorflow_datasets/image/flic_test.py | Python | apache-2.0 | 1,130 | 0.006195 | # coding=utf-8
# Copyright 2022 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test for FLIC dataset."""
from tensorflow_datasets import testing
from tensorflow_datasets.image import flic
class FlicTestSmall | (testing.DatasetBuilderTestCase):
DATASET_CLASS = flic.Flic
BUILDER_CONFIG_NAMES_TO_TEST = ["small"]
SPLITS = {
"train": 1,
"test": 1,
}
class FlicTestFull(testing.DatasetBuilderTestCase):
DATAS | ET_CLASS = flic.Flic
BUILDER_CONFIG_NAMES_TO_TEST = ["full"]
SPLITS = {
"train": 1,
"test": 1,
}
if __name__ == "__main__":
testing.test_main()
|
bartoszj/Mallet | mallet/CFNetwork/NSCFBackgroundDownloadTask.py | Python | mit | 2,176 | 0.005055 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# The MIT License (MIT)
#
# Copyright (c) 2015 Bartosz Janda
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
# the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from .. import helpers
from ..common import SummaryBase
import NSCFBackgroundSessionTask
class NSCFBackgroundDownloadTaskSyntheticProvider(NSCFBackgroundSessionTask.NSCFBackgroundSessionTaskSyntheticProvider):
"""
Class representing __NSCFBackgroundDownloadTask.
"""
def __init__(self, value_obj, internal_dict):
super(NSCFBackgroundDownloadTaskSyntheticProvider, self).__init__(value_obj, internal_dict)
self.type_name = "__NSCFBackgroundDownloadTask"
self.register_child_value("finished", ivar_name="_finished",
primi | tive_value_function=SummaryBase.get_bool_value,
summary_function=self.get_finished_summary)
@staticmethod
def get_finished_summary(value):
if value:
return "finish | ed"
return None
def summary_provider(value_obj, internal_dict):
return helpers.generic_summary_provider(value_obj, internal_dict, NSCFBackgroundDownloadTaskSyntheticProvider)
|
googleads/google-ads-python | google/ads/googleads/v10/services/types/campaign_asset_service.py | Python | apache-2.0 | 6,424 | 0.000934 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from google.ads.googleads.v10.enums.types import (
response_content_type as gage_response_content_type,
)
from google.ads.googleads.v10.resources.types import (
campaign_asset as gagr_campaign_asset,
)
from google.protobuf import field_mask_pb2 # type: ignore
from google.rpc import status_pb2 # type: ignore
__protobuf__ = proto.module(
package="google.ads.googleads.v10.services",
marshal="google.ads.googleads.v10",
manifest={
"MutateCampaignAssetsRequest",
"CampaignAssetOperation",
"MutateCampaignAssetsResponse",
"MutateCampaignAssetResult",
},
)
class MutateCampaignAssetsRequest(proto.Message):
r"""Request message for
[CampaignAssetService.MutateCampaignAssets][google.ads.googleads.v10.services.CampaignAssetService.MutateCampaignAssets].
Attributes:
customer_id (str):
Required. The ID of the customer whose
campaign assets are being modified.
operations (Sequence[google.ads.googleads.v10.services.types.CampaignAssetOperation]):
Required. The list of operations to perform
on individual campaign assets.
partial_failure (bool):
If true, successful operations will be
carried out and invalid operations will return
errors. If false, all operations will be carried
out in one transaction if and only if they are
all valid. Default is false.
validate_only (bool):
If true, the request is validated but not
executed. Only errors are returned, not results.
response_content_type (google.ads.googleads.v10.enums.types.ResponseContentTypeEnum.ResponseContentType):
The response content type setting. Determines
whether the mutable resource or just the
resource name should be returned post mutation.
"""
customer_id = proto.Field(proto.STRING, number=1,)
operations = proto.RepeatedField(
proto.MESSAGE, number=2, message="CampaignAssetOperation",
)
partial_failure = proto.Field(proto.BOOL, number=3,)
validate_only = proto.Field(proto.BOOL, number=4,)
response_content_type = proto.Field(
proto.ENUM,
number=5,
enum=gage_response_content_type.ResponseContentTypeEnum.ResponseContentType,
)
class CampaignAssetOperation(proto.Message):
r"""A single operation (create, update, remove) on a campaign
asset.
This message has `oneof`_ fields (mutually exclusive fields).
For each oneof, at most one member field can be set at the same time.
Setting any member of the oneof automatically clears all other
members.
.. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
Attributes:
update_mask (google.protobuf.field_mask_pb2.FieldMask):
FieldMask that determines which resource
fields are modified in an update.
create (google.ads.googleads.v10.resources.types.CampaignAsset):
Create operation: No resource name is
expected for the new campaign asset.
This field is a member of `oneof`_ ``operation``.
update (google.ads.googleads.v10.resources.types.CampaignAsset):
Update operation: The campaign asset is
expected to have a valid resource name.
This field is a member of `oneof`_ ``operation``.
| remove (str):
Remove operation: A resource name for the removed campaign
asset is expected, in this format:
``customers/{customer_id}/campaignAssets/{campaign_id}~{asset_id}~{field_type}``
This field is a member of `oneof`_ ``operation``.
"""
update_mask = proto.Field(
proto.MESSAGE, number=4, messa | ge=field_mask_pb2.FieldMask,
)
create = proto.Field(
proto.MESSAGE,
number=1,
oneof="operation",
message=gagr_campaign_asset.CampaignAsset,
)
update = proto.Field(
proto.MESSAGE,
number=3,
oneof="operation",
message=gagr_campaign_asset.CampaignAsset,
)
remove = proto.Field(proto.STRING, number=2, oneof="operation",)
class MutateCampaignAssetsResponse(proto.Message):
r"""Response message for a campaign asset mutate.
Attributes:
partial_failure_error (google.rpc.status_pb2.Status):
Errors that pertain to operation failures in the partial
failure mode. Returned only when partial_failure = true and
all errors occur inside the operations. If any errors occur
outside the operations (e.g. auth errors), we return an RPC
level error.
results (Sequence[google.ads.googleads.v10.services.types.MutateCampaignAssetResult]):
All results for the mutate.
"""
partial_failure_error = proto.Field(
proto.MESSAGE, number=1, message=status_pb2.Status,
)
results = proto.RepeatedField(
proto.MESSAGE, number=2, message="MutateCampaignAssetResult",
)
class MutateCampaignAssetResult(proto.Message):
r"""The result for the campaign asset mutate.
Attributes:
resource_name (str):
Returned for successful operations.
campaign_asset (google.ads.googleads.v10.resources.types.CampaignAsset):
The mutated campaign asset with only mutable fields after
mutate. The field will only be returned when
response_content_type is set to "MUTABLE_RESOURCE".
"""
resource_name = proto.Field(proto.STRING, number=1,)
campaign_asset = proto.Field(
proto.MESSAGE, number=2, message=gagr_campaign_asset.CampaignAsset,
)
__all__ = tuple(sorted(__protobuf__.manifest))
|
edublancas/titanic | pipeline/feature_extraction.py | Python | mit | 1,081 | 0.012026 | import pandas as pd
import re
df = pd.read_csv('data/combined_clean.csv', index_col='id')
#Replace name wit title
regex = '.*,{1}\s{1}([a-zA-Z\s]+)\.{1}.*'
name_title = df.name.map(lambda name: re.search(regex, name).group(1))
df.name = name_title
#Cabin with first letter
prefix_cabin = df.cabin.map(lambda c: c[:1])
df.cabin = prefix_cabin
#Map name title to social status
dic = {'Lady': 'high', 'Sir': 'high', 'the Countess': 'high',
'Jonkheer': 'high', 'Major': 'high', 'Master': 'high'}
df['social_status'] = df.name.map(lambda name: dic.get(name, 'normal'))
#Round fare
#df.fare = df.fare.astype(int)
#Feature interactions
df['fam_size'] = df.siblings_and_spouses + df.parents_and_chi | ldren
df['fam_mul_size'] = df.siblings_and_spouses * df.parents_and_children
df['fare_mul_pclass'] = df.fare/df.p_class.astype(float)
df['fare_mul_age'] = df.fare*df.age
df['fare_div_age'] = df.fare/df.age.astype(float)
df['pclass_mul_age'] = df.p_class*df.age.astype(float)
df['pclass_div_age'] = df.p_class/df.age.astype(float)
df.to_csv | ('data/combined_with_features.csv') |
popazerty/e2_sh4 | lib/python/Screens/Standby.py | Python | gpl-2.0 | 9,640 | 0.028112 | from Screens.Screen import Screen
from Components.ActionMap import ActionMap
from Components.config import config
from Components.AVSwitch import AVSwitch
from Components.SystemInfo import SystemInfo
from GlobalActions import globalActionMap
from enigma import eDVBVolumecontrol, eTimer, eServiceReference
from boxbranding import getMachineBrand, getMachineName, getBoxType, getBrandOEM
from Tools import Notifications
from time import localtime, time
import Screens.InfoBar
from gettext import dgettext
inStandby = None
class Standby2(Screen):
def Power(self):
print "leave standby"
if (getBrandOEM() in ('fulan')):
open("/proc/stb/hdmi/output", "w").write("on")
#set input to encoder
self.avswitch.setInput("ENCODER")
#restart last played service
#unmute adc
self.leaveMute()
#kill me
self.close(True)
def setMute(self):
if eDVBVolumecontrol.getInstance().isMuted():
self.wasMuted = 1
print "mute already active"
else:
self.wasMuted = 0
eDVBVolumecontrol.getInstance().volumeToggleMute()
def leaveMute(self):
if self.wasMuted == 0:
eDVBVolumecontrol.getInstance().volumeToggleMute()
def __init__(self, session):
Screen.__init__(self, session)
self.skinName = "Standby"
self.avswitch = AVSwitch()
print "enter standby"
self["actions"] = ActionMap( [ "StandbyActions" ],
{
"power": self.Power,
"discrete_on": self.Power
}, -1)
globalActionMap.setEnabled(False)
self.standbyTimeUnknownTimer = eTimer()
#mute adc
self.setMute()
self.paused_service = None
self.prev_running_service = None
if self.session.current_dialog:
if self.session.current_dialog.ALLOW_SUSPEND == Screen.SUSPEND_STOPS:
if localtime(time()).tm_year > 1970 and self.session.nav.getCurrentlyPlayingServiceOrGroup():
self.prev_running_service = self.session.nav.getCurrentlyPlayingServiceOrGroup()
self.session.nav.stopService()
else:
self.standbyTimeUnknownTimer.callback.append(self.stopService)
self.standbyTimeUnknownTimer.startLongTimer(60)
elif self.session.current_dialog.ALLOW_SUSPEND == Screen.SUSPEND_PAUSES:
self.paused_service = self.session.current_dialog
self.paused_service.pauseService()
if self.session.pipshown:
from Screens.InfoBar import InfoBar
InfoBar.instance and hasattr(InfoBar.instance, "showPiP") and InfoBar.instance.showPiP()
#set input to vcr scart
if SystemInfo["ScartSwitch"]:
self.avswitch.setInput("SCART")
else:
self.avswitch.setInput("AUX")
if (getBrandOEM() in ('fulan')):
open("/proc/stb/hdmi/output", "w").write("off")
self.onFirstExecBegin.append(self.__onFirstExecBegin)
self.onClose.append(self.__onClose)
def __onClose(self):
global inStandby
inStandby = None
self.standbyTimeUnknownTimer.stop()
if self.prev_running_service:
self.session.nav.playService(self.prev_running_service)
elif self.paused_service:
self.paused_service.unPauseService()
self.session.screen["Standby"].boolean = False
globalActionMap.setEnabled(True)
def __onFirstExecBegin(self):
global inStandby
inStandby = self
self.session.screen["Standby"].boolean = True
config.misc.standbyCounter.value += 1
def createSummary(self):
return StandbySummary
def stopService(self):
self.prev_running_service = self.session.nav.getCurrentlyPlayingServiceOrGroup()
self.session.nav.stopService()
class Standby(Standby2):
def __init__(self, session):
if Screens.InfoBar.InfoBar and Screens.InfoBar.InfoBar.instance and Screens.InfoBar.InfoBar.ptsGetTimeshiftStatus(Screens.InfoBar.InfoBar.instance):
self.skin = """<screen position="0,0" size="0,0"/>"""
Screen.__init__(self, session)
self.onFirstExecBegin.append(self.showMessageBox)
self.onHide.append(self.close)
else:
Standby2.__init__(self, session)
def showMessageBox(self):
Screens.InfoBar.InfoBar.checkTimeshiftRunning(Screens.InfoBar.InfoBar.instance, self.showMessageBoxcallback)
def showMessageBoxcallback(self, answer):
if answer:
self.onClose.append(self.doStandby)
def doStandby(self):
Notifications.AddNotification(Screens.Standby.Standby2)
class StandbySummary(Screen):
skin = """
<screen position="0,0" size="132,64">
<widget source="global.CurrentTime" render="Label" position="0,0" size="132,64" font="Regular;40" halign="center">
<convert type="ClockToText" />
</widget>
<widget source="session.RecordState" render="FixedLabel" text=" " position="0,0" size="132,64" zPosition="1" >
<convert type="ConfigEntryTest">config.usage.blinking_display_clock_during_recording,True,CheckSourceBoolean</convert>
<convert type="ConditionalShowHide">Blink</convert>
</widget>
</screen>"""
from enigma import quitMainloop, iRecordableService
from Screens.MessageBox import MessageBox
from time import time
from Components.Task import job_manager
class QuitMainloopScreen(Screen):
def __init__(self, session, retvalue=1):
self.skin = """<screen name="QuitMainloopScreen" position="fill" flags="wfNoBorder">
<ePixmap pixmap="icons/input_info.png" position="c-27,c-60" size="53,53" alphatest="on" />
<widget name="text" position="center,c+5" size="720,100" font="Regular;22" halign="center" />
</screen>"""
Screen.__init__(self, session)
from Components.Label import Label
text = { 1: _("Your %s %s is shutting down") % (getMachineBrand(), getMachineName()),
2: _("Your %s %s is rebooting") % (getMachineBrand(), getMachineName()),
3: _("The user interface of your %s %s is restarting") % (getMachineBrand(), getMachineName()),
4: _("Your frontprocessor will be upgraded\nPlease wait until your %s %s reboots\nThis may take a few minutes") % (getMachineBrand(), getMachineName()),
5: _("The user interface of your %s %s is restarting\ndue to an error in mytest.py") % (getMachineBrand(), getMachineName()),
42: _("Upgrade in progress\nPlease wait until your %s %s reboots\nThis may take a few minutes") % (getMachineBrand(), getMachineName()),
43: _("Reflash in progress\nPlease wait until your %s %s reboots\nThis may take a few minutes") % (getMachineBrand(), getMachineName()) }.get(retvalue)
self["text"] = Label(text)
inTryQuitMainloop = False
class TryQuitMainloop(MessageBox):
def __init__(self, session, retvalue=1, timeout=-1, default_yes = True):
self.retval = retvalue
self.ptsmainloopvalue = retvalue
recordings = session.nav.getRecordings()
jobs = []
for job in job_manager.getPendingJobs():
if job.name != dgettext('vix', 'SoftcamCheck'):
jobs.append(job)
inTimeshift = Screens.InfoBar.InfoBar and Screens.InfoBar.InfoBar.instance and Screens.InfoBar.InfoBar.ptsGetTimeshiftStatus(Screens.InfoBar.InfoBar.instance)
self.connected = False
reason = ""
next_rec_time = -1
if not recordings:
| next_rec_time = session.nav.RecordTimer.getNextRecordingTime()
if len(jobs):
reason = (ngettext("%d job is running in the background!", "%d jobs are running in the background!", len(jobs)) % len(job | s)) + '\n'
if len(jobs) == 1:
job = jobs[0]
reason += "%s: %s (%d%%)\n" % (job.getStatustext(), job.name, int(100*job.progress/float(job.end)))
else:
reason += (_("%d jobs are running in the background!") % len(jobs)) + '\n'
if inTimeshift:
reason = _("You seem to be in timeshift!") + '\n'
if recordings or (next_rec_time > 0 and (next_rec_time - time()) < 360):
default_yes = False
reason = _("Recording(s) are in progress or coming up in few seconds!") + '\n'
if reason and inStandby:
session.nav.record_event.append(self.getRecordEvent)
self.skinName = ""
elif reason and not inStandby:
text = { 1: _("Really shutdown now?"),
2: _("Really reboot now?"),
3: _("Really restart now?"),
4: _("Really upgrade the frontprocessor and reboot now?"),
42: _("Really upgrade your %s %s and reboot now?") % (getMachineBrand(), getMachineName()),
43: _("Really reflash your %s %s and reboot now?") % (getMachineBrand(), getMachineName()) }.get(retvalue)
if text:
MessageBox.__init__(self, session, reason+text, type = MessageBox.TYPE_YESNO, timeout = timeout, default = default_yes)
self.skinName = "MessageBoxSimple"
session.nav.record_event.a |
mfherbst/spack | var/spack/repos/builtin/packages/nnvm/package.py | Python | lgpl-2.1 | 1,955 | 0.000512 | ##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All righ | ts reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHO | UT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Nnvm(CMakePackage):
"""nnvm is a modular, decentralized and lightweight
part to help build deep learning libraries."""
homepage = "https://github.com/dmlc/nnvm"
git = "https://github.com/dmlc/nnvm.git"
version('master', branch='master')
version('20170418', commit='b279286304ac954098d94a2695bca599e832effb')
variant('shared', default=True, description='Build a shared NNVM lib.')
depends_on('dmlc-core')
patch('cmake.patch')
def cmake_args(self):
spec = self.spec
return [
'-DUSE_SHARED_NNVM=%s' % ('ON' if '+shared' in spec else 'OFF'),
'-DUSE_STATIC_NNVM=%s' % ('ON' if '~shared' in spec else 'OFF'),
]
|
willprice/arduino-sphere-project | scripts/example_direction_finder/temboo/Library/Foursquare/OAuth/FinalizeOAuth.py | Python | gpl-2.0 | 5,006 | 0.004994 | # -*- coding: utf-8 -*-
###############################################################################
#
# FinalizeOAuth
# Completes the OAuth process by retrieving a Foursquare access token for a user, after they have visited the authorization URL returned by the InitializeOAuth choreo and clicked "allow."
#
# Python versions 2.6, 2.7, 3.x
#
# Copyright 2014, Temboo Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by ap | plicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
#
# |
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class FinalizeOAuth(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the FinalizeOAuth Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
super(FinalizeOAuth, self).__init__(temboo_session, '/Library/Foursquare/OAuth/FinalizeOAuth')
def new_input_set(self):
return FinalizeOAuthInputSet()
def _make_result_set(self, result, path):
return FinalizeOAuthResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return FinalizeOAuthChoreographyExecution(session, exec_id, path)
class FinalizeOAuthInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the FinalizeOAuth
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_AccountName(self, value):
"""
Set the value of the AccountName input for this Choreo. ((optional, string) Deprecated (retained for backward compatibility only).)
"""
super(FinalizeOAuthInputSet, self)._set_input('AccountName', value)
def set_AppKeyName(self, value):
"""
Set the value of the AppKeyName input for this Choreo. ((optional, string) Deprecated (retained for backward compatibility only).)
"""
super(FinalizeOAuthInputSet, self)._set_input('AppKeyName', value)
def set_AppKeyValue(self, value):
"""
Set the value of the AppKeyValue input for this Choreo. ((optional, string) Deprecated (retained for backward compatibility only).)
"""
super(FinalizeOAuthInputSet, self)._set_input('AppKeyValue', value)
def set_CallbackID(self, value):
"""
Set the value of the CallbackID input for this Choreo. ((required, string) The callback token returned by the InitializeOAuth Choreo. Used to retrieve the authorization code after the user authorizes.)
"""
super(FinalizeOAuthInputSet, self)._set_input('CallbackID', value)
def set_ClientID(self, value):
"""
Set the value of the ClientID input for this Choreo. ((required, string) The Client ID provided by Foursquare after registering your application.)
"""
super(FinalizeOAuthInputSet, self)._set_input('ClientID', value)
def set_ClientSecret(self, value):
"""
Set the value of the ClientSecret input for this Choreo. ((required, string) The Client Secret provided by Foursquare after registering your application.)
"""
super(FinalizeOAuthInputSet, self)._set_input('ClientSecret', value)
def set_Timeout(self, value):
"""
Set the value of the Timeout input for this Choreo. ((optional, integer) The amount of time (in seconds) to poll your Temboo callback URL to see if your app's user has allowed or denied the request for access. Defaults to 20. Max is 60.)
"""
super(FinalizeOAuthInputSet, self)._set_input('Timeout', value)
class FinalizeOAuthResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the FinalizeOAuth Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_AccessToken(self):
"""
Retrieve the value for the "AccessToken" output from this Choreo execution. ((string) The access token for the user that has granted access to your application.)
"""
return self._output.get('AccessToken', None)
class FinalizeOAuthChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return FinalizeOAuthResultSet(response, path)
|
fametrano/BitcoinBlockchainTechnology | tests/mnemonic/test_entropy.py | Python | mit | 10,702 | 0.000561 | #!/usr/bin/env python3
# Copyright (C) 2017-2021 The btclib developers
#
# This file is part of btclib. It is subject to the license terms in the
# LICENSE file found in the top-level directory of this distribution.
#
# No part of btclib including this file, may be copied, modified, propagated,
# or distributed except according to the terms contained in the LICENSE file.
"Tests for the `btclib.entropy` module."
import math
import secrets
from io import StringIO
from typing import List
import pytest
from btclib.exceptions import BTClibValueError
from btclib.mnemonic.entropy import (
_bits,
bin_str_entropy_from_bytes,
bin_str_entropy_from_entropy,
bin_str_entropy_from_int,
bin_str_entropy_from_random,
bin_str_entropy_from_rolls,
bin_str_entropy_from_str,
bin_str_entropy_from_wordlist_indexes,
bytes_entropy_from_str,
collect_rolls,
wordlist_indexes_from_bin_str_entropy,
)
def test_indexes() -> None:
for entropy in ("0", "00000000000"):
indexes = wordlist_indexes_from_bin_str_entropy(entropy, 2048)
assert indexes == [0]
entropy = "000000000000"
indexes = wordlist_indexes_from_bin_str_entropy(entropy, 2048)
assert indexes == [0, 0]
test_vector = [
[1268, 535, 810, 685, 433, 811, 1385, 1790, 421, 570, 567, 1313],
[0, 0, 2047, 2047, 2047, 2047, 2047, 2047, 2047, 2047, 2047, 0],
[0, 0, 2047, 2047, 2047, 2047, 2047, 2047, 2047, 2047, 2047, 0],
]
for indx in test_vector:
entropy = bin_str_entropy_from_wordlist_indexes(indx, 2048)
indexes = wordlist_indexes_from_bin_str_entropy(entropy, 2048)
assert indexes == indx
def test_conversions() -> None:
test_vectors = [
"10101011" * 32,
"00101011" * 32,
"00000000" + "10101011" * 31,
]
for raw in test_vectors:
assert bin_str_entropy_from_str(raw) == raw
i = int(raw, 2)
assert bin_str_entropy_from_int(i) == raw
assert bin_str_entropy_from_int(bin(i).upper()) == raw
assert bin_str_entropy_from_int(hex(i).upper()) == raw
b = i.to_bytes(32, byteorder="big", signed=False)
assert bin_str_entropy_from_bytes(b) == raw
assert bin_str_entropy_from_bytes(b.hex()) == raw
assert bin_str_entropy_from_entropy(raw) == raw
assert bin_str_entropy_from_entropy(i) == raw
assert bin_str_entropy_from_entropy(b) == raw
max_bits = max(_bits)
raw = "10" + "11111111" * (max_bits // 8)
assert bin_str_entropy_from_entropy(raw) == bin_str_entropy_from_entropy(raw[:-2])
# entr integer has its leftmost bit set to 0
i = 1 << max_bits - 1
bin_str_entropy = bin_str_entropy_from_entropy(i)
assert len(bin_str_entropy) == max_bits
# entr integer has its leftmost bit set to 1
i = 1 << max_bits
bin_str_entropy = bin_str_entropy_from_entropy(i)
assert len(bin_str_entropy) == max_bits
exp_i = i >> 1
i = int(bin_str_entropy, 2)
assert i == exp_i
i = secrets.randbits(255)
raw = bin_str_entropy_from_int(i)
assert int(raw, 2) == i
assert len(raw) == 256
assert bin_str_entropy_from_str(raw) == raw
assert bin_str_entropy_from_int(hex(i).upper()) == raw
b = i.to_bytes(32, byteorder="big", signed=False)
assert bin_str_entropy_from_bytes(b) == raw
raw2 = bin_str_entropy_from_int(i, 255)
assert int(raw2, 2) == i
assert len(raw2) == 255
assert bin_str_entropy_from_str("0" + raw2) == raw
raw2 = bin_str_entropy_from_str(raw, 128)
assert len(raw2) == 128
assert raw2 == raw[:128]
def test_exceptions() -> None:
bin_str_entropy216 = "00011010" * 27 # 216 bits
bin_str_entropy214 = bin_str_entropy216[:-2] # 214 bits
entropy = bin_str_entropy_from_entropy(bin_str_entropy214, 214)
assert entropy == bin_str_entropy214
# 214 is not in [128, 160, 192, 224, 256, 512]
err_msg = "invalid number of bits: "
with pytest.raises(BTClibValueError, match=err_msg):
bin_str_entropy_from_entropy(bin_str_entropy214)
# 214 is not in [216]
err_msg = "invalid number of bits: "
with pytest.raises(BTClibValueError, match=err_msg):
bin_str_entropy_from_entropy(bin_str_entropy214, 216)
int_entropy211 = int(bin_str_entropy214, 2) # 211 bits
assert int_entropy211.bit_length() == 211
entropy = bin_str_entropy_from_entropy(int_entropy211, 214)
assert entropy == bin_str_entropy214
entropy = bin_str_entropy_from_entropy(int_entropy211, 256)
assert len(entropy) == 256
assert int(entropy, 2) == int_entropy211
entropy = bin_str_entropy_from_entropy(int_entropy211)
assert len(entropy) == 224
assert int(entropy, 2) == int_entropy211
err_msg = "Negative entropy: "
with pytest.raises(BTClibValueError, match=err_msg):
bin_str_entropy_from_entropy(-1 * | int_entropy211)
bytes_entropy216 = int_entropy211.to_bytes(27, byteorder="big", signed=False)
entropy = bin_str_entropy_from_entropy(bytes_entropy216, 214)
assert entropy == bin_str_entropy214
entropy = bin_str_entropy_from_entropy(bytes_entropy216, 216)
assert entropy != bin_str_entropy216
err_msg = "invalid number of bits: "
with pytest.raises(BTClibValueErro | r, match=err_msg):
bin_str_entropy_from_entropy(bytes_entropy216, 224)
with pytest.raises(BTClibValueError, match=err_msg):
bin_str_entropy_from_entropy(tuple()) # type: ignore
with pytest.raises(ValueError):
bin_str_entropy_from_int("not an int") # type: ignore
with pytest.raises(TypeError):
bin_str_entropy_from_str(3) # type: ignore
err_msg = "invalid number of bits: "
with pytest.raises(BTClibValueError, match=err_msg):
bin_str_entropy = "01" * 65 # 130 bits
bytes_entropy_from_str(bin_str_entropy)
inputs: List[StringIO] = []
# 2 input failures, then automatic rolls with default D6
inputs.append(StringIO("3\npluto\na\n"))
# D120, then 43 automatic rolls
inputs.append(StringIO("a120\n"))
# D120, one input failure, then 43 (implausible but valid) non-automatic rolls
inputs.append(StringIO("120\npluto\n" + "64\n" * 43))
def test_collect_rolls(monkeypatch):
bits = 256
for i, sides in enumerate((6, 120, 120)):
monkeypatch.setattr("sys.stdin", inputs[i])
dice_sides, dice_rolls = collect_rolls(bits)
assert dice_sides == sides
bits_per_roll = math.floor(math.log2(sides))
base = 2 ** bits_per_roll
for roll in dice_rolls:
assert 0 < roll <= base
min_roll_number = math.ceil(bits / bits_per_roll)
assert len(dice_rolls) == min_roll_number
def test_bin_str_entropy_from_rolls() -> None:
bits = 256
dice_base = 20
bits_per_roll = math.floor(math.log2(dice_base))
base = 2 ** bits_per_roll
roll_number = math.ceil(bits / bits_per_roll)
rolls = [base for _ in range(roll_number)]
bin_str = bin_str_entropy_from_rolls(bits, dice_base, rolls)
assert bin_str == "1" * 256
rolls = [base for _ in range(2 * roll_number)]
bin_str = bin_str_entropy_from_rolls(bits, dice_base, rolls)
assert bin_str == "1" * 256
rolls = [1 for _ in range(roll_number)]
bin_str = bin_str_entropy_from_rolls(bits, dice_base, rolls)
assert bin_str == "0" * 256
rolls = [1 for _ in range(2 * roll_number)]
bin_str = bin_str_entropy_from_rolls(bits, dice_base, rolls)
assert bin_str == "0" * 256
rolls = [secrets.randbelow(base) + 1 for _ in range(roll_number)]
bin_str = bin_str_entropy_from_rolls(bits, dice_base, rolls)
assert len(bin_str) == 256
rolls = [secrets.randbelow(base) + 1 for _ in range(roll_number)]
bin_str2 = bin_str_entropy_from_rolls(bits, dice_base, rolls)
assert len(bin_str2) == 256
assert bin_str != bin_str2
bin_str = bin_str_entropy_from_rolls(bits - 1, dice_base, rolls)
assert len(bin_str) == bits - 1
rolls = [base for _ in range(roll_number + 1)]
bin_str = bin_str_entropy_from_rolls(bits + 1, dice_base, rolls)
assert len(bin_str) == bits + 1
rolls = [base for _ in range(roll_number + 1)]
|
jamespcole/home-assistant | homeassistant/components/abode/camera.py | Python | apache-2.0 | 2,683 | 0 | """Support for Abode Security System cameras."""
from datetime import timedelta
import logging
import requests
from homeassistant.components.camera import Camera
from homeassistant.util import Throttle
from . import DOMAIN as ABODE_DOMAIN, AbodeDevice
DEPENDENCIES = ['abode']
MIN_TIME_BETWEEN_UPDATES = timedelta(seconds=90)
_LOGGER = logging.getLogger(__name__)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up Abode camera devices."""
import abodepy.helpers.constants as CONST
import abodepy.helpers.timeline as TIMELINE
data = hass.data[ABODE_DOMAIN]
devices = []
for device in data.abode.ge | t_devices(generic_type=CONST.TYPE_CAMERA):
if data.is_excluded(device):
continue
devices.append(AbodeCamera(data, device, TIMELINE.CAPTURE_IMAGE))
data.devices.extend(devices)
add_entities(devices)
|
class AbodeCamera(AbodeDevice, Camera):
"""Representation of an Abode camera."""
def __init__(self, data, device, event):
"""Initialize the Abode device."""
AbodeDevice.__init__(self, data, device)
Camera.__init__(self)
self._event = event
self._response = None
async def async_added_to_hass(self):
"""Subscribe Abode events."""
await super().async_added_to_hass()
self.hass.async_add_job(
self._data.abode.events.add_timeline_callback,
self._event, self._capture_callback
)
def capture(self):
"""Request a new image capture."""
return self._device.capture()
@Throttle(MIN_TIME_BETWEEN_UPDATES)
def refresh_image(self):
"""Find a new image on the timeline."""
if self._device.refresh_image():
self.get_image()
def get_image(self):
"""Attempt to download the most recent capture."""
if self._device.image_url:
try:
self._response = requests.get(
self._device.image_url, stream=True)
self._response.raise_for_status()
except requests.HTTPError as err:
_LOGGER.warning("Failed to get camera image: %s", err)
self._response = None
else:
self._response = None
def camera_image(self):
"""Get a camera image."""
self.refresh_image()
if self._response:
return self._response.content
return None
def _capture_callback(self, capture):
"""Update the image with the device then refresh device."""
self._device.update_image_location(capture)
self.get_image()
self.schedule_update_ha_state()
|
colaftc/webtool | top/api/rest/RefundsReceiveGetRequest.py | Python | mit | 506 | 0.033597 | '''
Created by auto_sdk on 2015.10.22
'''
from top.api.base import RestApi
class RefundsReceiveGetRequest(RestApi):
| def __init__(self,domain='gw.api.taobao.com',port=80):
RestApi.__init__(self,domain, port)
self.buyer_nick = None
self.end_modified = None
self.fields = None
self.page_no = None
self.page_size = None
self.start_modified = None
self.status = None
self.type = None
self.use_has_next = None
def getapin | ame(self):
return 'taobao.refunds.receive.get'
|
CaptainHayashi/lass | website/models/website_class.py | Python | gpl-2.0 | 2,747 | 0.000364 | """
The singleton class that allows metadata and other attachables to be
attached to the entire website.
As the website at this level is one item of data rather than an entire
model, we have to use a singleton class to attach metadata to it.
"""
from django.conf import settings
from django.contrib. | sites.models import Site
from metadata.models import PackageEntry, ImageMetadata, TextMetadata
from metadata.mixins import MetadataSubjectMixin
class Website(MetadataSubjectMixin):
"""
Class representing the website itself.
This does not hold any data on its own, so in order to acquire a
website object for running metadata queries, just run Website().
"""
def __init__(self, request):
"""
Initialises a Website object.
:param request: The HttpRe | quest object of the current page.
:type request: HttpRequest
:rtype: Website
"""
self.request = request
self.pk = 1 # Needed for the metadata system
def metadata_strands(self):
return {
"text": WebsiteTextMetadata.objects,
"image": WebsiteImageMetadata.objects,
}
def packages(self):
return WebsitePackageEntry.objects
## Template-exposed API ##
def root(self):
"""
Returns the URI of the root of the website, for concatenating
things like STATIC_URL onto it.
Please please PLEASE try using decoupling-friendly features
such as 'get_absolute_uri' and whatnot before this.
"""
return self.request.build_absolute_uri('/').rstrip('/')
def site(self):
"""
Returns the current Django Sites Framework site.
"""
try:
site = Site.objects.get_current()
except Site.DoesNotExist:
site = None
return site
WebsiteTextMetadata = TextMetadata.make_model(
Website,
'website',
table=getattr(
settings,
'WEBSITE_TEXT_METADATA_DB_TABLE',
None
),
id_column=getattr(
settings,
'WEBSITE_TEXT_METADATA_DB_ID_COLUMN',
None
),
fkey=None,
)
WebsiteImageMetadata = ImageMetadata.make_model(
Website,
'website',
table=getattr(
settings,
'WEBSITE_IMAGE_METADATA_DB_TABLE',
None
),
id_column=getattr(
settings,
'WEBSITE_IMAGE_METADATA_DB_ID_COLUMN',
None
),
fkey=None,
)
WebsitePackageEntry = PackageEntry.make_model(
Website,
'website',
table=getattr(
settings,
'WEBSITE_PACKAGE_ENTRY_DB_TABLE',
None
),
id_column=getattr(
settings,
'WEBSITE_PACKAGE_ENTRY_DB_ID_COLUMN',
None
),
fkey=None,
)
|
rabipanda/tensorflow | tensorflow/contrib/boosted_trees/python/ops/batch_ops_utils.py | Python | apache-2.0 | 5,283 | 0.009654 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utility for batching remote OPs together to reduce RPC overhead."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import collections
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
class ScheduledOp(object):
"""Represents a scheduled remote operation."""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def batching_key(self):
"""Returns the key for batching operations."""
@abc.abstractmethod
def batch_runner_fn(self):
"""Returns the function that executes the operation on the batch."""
class ScheduledStampedResourceOp(ScheduledOp):
"""Wrapper class for batched operations on stamped resources."""
def __init__(self, resource_handle, op, **kwargs):
self.resource_handle = resource_handle
self.op = op
self.args = kwargs
def batching_key(self):
# We want to group the same operations on the same device and run them in
# one batch. So we use (device, operation) as the key.
| return self.resource_handle.device, self.op
def batch_runner_fn(self):
return _scheduled_stamp_resource_op_runner
def _move_tensors(tensors, device):
"""Moves a list of tensors to a device by concatenating/splitting them."""
# Reset the device setting to avoid weird interactions with device merging
# logic.
with ops.device(None):
if all(tensor.shape == tensor_shape.scalar() for ten | sor in tensors):
with ops.device(tensors[0].device):
values = array_ops.stack(tensors)
with ops.device(device):
return array_ops.unstack(values)
else:
with ops.device(tensors[0].device):
sizes = array_ops.stack(
[array_ops.shape(tensor)[0] for tensor in tensors])
values = array_ops.concat(tensors, axis=0)
with ops.device(device):
sizes = array_ops.unstack(sizes)
return list(array_ops.split(values, sizes, axis=0))
def _scheduled_stamp_resource_op_runner(batch, stamp):
"""Runs a batch operation on a stamped resource."""
if not batch:
return
arg_keys = set(batch[0].args.keys())
grouped_args = collections.OrderedDict()
resource_handles = []
# Check that the set of arguments is the same across all the scheduled ops.
for op in batch:
if set(op.args.keys()) != arg_keys:
raise ValueError("Mismatching arguments: %s, %s.", op.args, arg_keys)
for key in arg_keys:
grouped_args.setdefault(key, []).append(op.args[key])
resource_handles.append(op.resource_handle)
# Move all the inputs to the op device in one RPC.
grouped_args = collections.OrderedDict(
(k, _move_tensors(v, resource_handles[0].device))
for k, v in sorted(grouped_args.items()))
with ops.device(resource_handles[0].device):
return batch[0].op(resource_handles, stamp, **grouped_args)
def run_handler_scheduled_ops(per_handler_ops, stamp, worker_device):
"""Given a dictionary of ops for each handler, runs them in batch."""
batched_ops = collections.OrderedDict()
# Group the ops by their batching_key. Ops that share the same batching key
# can be executed together.
for handler in per_handler_ops.keys():
for op in per_handler_ops[handler]:
key = (op.batching_key(), op.batch_runner_fn())
batched_ops.setdefault(key, []).append(op)
op_results = {}
for batch in batched_ops.values():
# Run each of the batched ops using its runner.
results = batch[0].batch_runner_fn()(batch, stamp)
# If the result is a tuple, move each entry in the tuple in one RPC.
if isinstance(results, tuple):
results = tuple(
_move_tensors(result, worker_device) for result in results)
# Once all the results are on the worker, create individual tuple for
# each scheduled op request.
for i in range(len(batch)):
op_results[batch[i]] = tuple(result[i] for result in results)
# If the result is a tuple, it didn't have any outputs, so use the
# `ops.Operation` as the result for all the scheduled ops.
elif isinstance(results, ops.Operation):
for i in range(len(batch)):
op_results[batch[i]] = results
else:
raise ValueError("Unknown type of result %s.", results)
handler_results = collections.defaultdict(list)
# Dispatch the results of the ScheduledOps to the handlers that requested
# them.
for handler in per_handler_ops.keys():
for op in per_handler_ops[handler]:
handler_results[handler].append(op_results[op])
return handler_results
|
smalley/python | exercises/grade-school/grade_school_test.py | Python | mit | 2,435 | 0.002053 | import unittest
from grade_school import School
# Tests adapted from `problem-specifications//canonical-data.json` @ v1.0.0
class GradeSchoolTest(unittest.TestCase):
def test_adding_a_student_adds_them_to_the_sorted_roster(self):
school = School()
school.add_student(name="Aimee", grade=2)
expected = ["Aimee"]
self.assertEqual(school.roster(), expected)
def test_adding_more_student_adds_them_to_the_sorted_roster(self):
school = School()
school.add_student(name="Blair", grade=2)
school.add_student(name="James", grade=2)
school.add_student(name="Paul", grade=2)
expected = ["Blair", "James", "Paul"]
self.assertEqual(school.roster(), expected)
def test_adding_students_to_different_grades_adds_them_to_the_same_sorted_roster(
self
):
school = School()
school.add_student(name="Chelsea", grade=3)
school.add_student(name="Logan", grade=7)
expected = ["Chelsea", "Logan"]
self.assertEqual(school.roster(), expected)
def test_roster_returns_an_empty_list_if_there_are_no_students_enrolled(self):
school = School()
expected = []
self.assertEqual(school.roster(), expected)
def test_student_names_with_grades_are_displayed_in_the_same_sorted_roster(self):
school = School()
school.add_student(name="Peter", grade=2)
school.add_student(name="Anna", grade=1)
school.add_student(name="Barb", grade=1)
school.add_student(name="Zoe", grade=2)
school.add_student(name="Alex", grade=2)
school.add_student(name="Jim", grade=3)
school.add_student(name="Charlie", grade=1)
expected = ["Anna", "Barb", "Charlie", "Alex", "Peter", "Zoe", "Jim"]
self.assertEqual(school.roster(), expected)
def test_grade_returns_the_students_in_that_grade_in_alphabetical_order(self):
school = School()
school.add_student(name="Franklin", grade=5)
| school.add_student(name="Bradley", grade=5)
school.add_student(name="Jeff", grade=1)
expected = ["Bradley", "Franklin"]
self.assertEqual(school.grade(5), expected)
def test_grade_returns_an_empty_list_if_there_are_no_students_in_that_grade(self):
school = School()
expected = []
self.assertEqual(school.grade(1), expected)
if __name__ == "__main__":
unittest.ma | in()
|
interactiveaudiolab/nussl | tests/core/test_mixing.py | Python | mit | 1,683 | 0.001188 | import nussl
import numpy as np
import pytest
def test_pan_audio_signal(mix_and_sources):
mix, sources = mix_and_sources
sources = list(sources.values())
panned_audio = nussl.mixing.pan_audio_signal(sources[0], -45)
zeros = np.zero | s_like(panned_audio.audio_data[0])
| sum_ch = np.sum(panned_audio.audio_data, axis=0)
assert np.allclose(panned_audio.audio_data[1], zeros)
assert np.allclose(panned_audio.audio_data[0], sum_ch)
panned_audio = nussl.mixing.pan_audio_signal(sources[0], 45)
zeros = np.zeros_like(panned_audio.audio_data[0])
sum_ch = np.sum(panned_audio.audio_data, axis=0)
assert np.allclose(panned_audio.audio_data[0], zeros)
assert np.allclose(panned_audio.audio_data[1], sum_ch)
pytest.raises(ValueError, nussl.mixing.pan_audio_signal, mix, -46)
pytest.raises(ValueError, nussl.mixing.pan_audio_signal, mix, 46)
def test_delay_audio_signal(mix_and_sources):
mix, sources = mix_and_sources
sources = list(sources.values())
a = nussl.mixing.pan_audio_signal(sources[0], -35)
b = nussl.mixing.pan_audio_signal(sources[1], 15)
mix = a + b
delays = [np.random.randint(1, 1000) for _ in range(mix.num_channels)]
delayed_audio = nussl.mixing.delay_audio_signal(mix, delays)
for i, d in enumerate(delays):
_est = delayed_audio.audio_data[i]
_true = mix.audio_data[i, :-d]
assert np.allclose(_est[d:], _true)
pytest.raises(ValueError, nussl.mixing.delay_audio_signal, mix, [0, 0, 0])
pytest.raises(ValueError, nussl.mixing.delay_audio_signal, mix, [0, -10, 0])
pytest.raises(ValueError, nussl.mixing.delay_audio_signal, mix, [0, .1, 2.0])
|
jslag/gml | gml.py | Python | gpl-2.0 | 3,630 | 0.02259 | #!/usr/bin/env python
#
# Copyright (C) 2004 Mark H. Lyon <mark@marklyon.org>
#
# This file is the Mbox & Maildir to Gmail Loader (GML).
#
# Mbox & Maildir to Gmail Loader (GML) is free software; you can redistribute
# it and/or modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2 of
# the License, or (at your option) any later version.
#
# GML is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# for more details.
#
# You should have received a copy of the GNU General Public License
# along with GML; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
# Origional development thread at Ars Technica:
# http://episteme.arstechnica.com/eve/ubb.x?a=tpc&s=50009562&f=6330927813&m=108000474631
#
# Version 0.1 - 15 Jun 04 16:28 Supports Mbox
# Version 0.2 - 15 Jun 04 18:48 Implementing Magus` suggestion for Maildir
# Version 0.3 - 16 Jun 04 16:17 Implement Rold Gold suggestion for counters
# Version 0.4 - 17 Jun 04 13:15 Add support for changing SMTP server at command line
# Version 0.5 - 05 Oct 09 redo exception handling to see what Google's
# complaints are on failure, update to use TLS
import mailbox, smtplib, sys, time, string
def main ():
print "\nMbox & Maildir to Gmail Loader (GML) by Mark Lyon <mark@marklyon.org>\n"
if len(sys.argv) in (5, 6) :
boxtype_in = sys.argv[1]
mailboxname_in = sys.argv[2]
emailname_in = sys.argv[3]
password_in = sys.argv[4]
else:
usage()
try:
smtpserver_in = sys.argv[5]
except:
smtpserver_in = 'smtp.gmail.com'
print "Using smtpserver %s\n" % smtpserver_in
count = [0,0,0]
try:
if boxtype_in == "maildir":
mb = mailbox.Maildir(mailboxname_in)
else:
mb = mailbox.UnixMailbox (file(mailboxname_in,'r'))
msg = mb.next()
except:
print "*** Can't open file or directory. Is the path correct? ***\n"
usage()
while msg is not None:
try:
document = msg.fp.read()
except:
count[2] = count[2] + 1
print "*** %d MESSAGE READ FAILED, SKIPPED" % (count[2])
msg = mb.next()
if document is not None:
fullmsg = msg.__str__( ) + '\x0a' + document
server = smtplib.SMTP(smtpserver_in)
#server.set_debuglevel(1)
server.ehlo()
server.starttls()
# smtpli | b won't send auth info without this second ehlo after
# starttls -- thanks to
# http://bytes.com/top | ic/python/answers/475531-smtplib-authentication-required-error
# for the tip
server.ehlo()
server.login(emailname_in, password_in)
server.sendmail(msg.getaddr('From')[1], emailname_in, fullmsg)
server.quit()
count[0] = count[0] + 1
print " %d Forwarded a message from: %s" % (count[0], msg.getaddr('From')[1])
msg = mb.next()
print "\nDone. Stats: %d success %d error %d skipped." % (count[0], count[1], count[2])
def usage():
print 'Usage: gml.py [mbox or maildir] [mbox file or maildir path] [gmail address] [gmail password] [Optional SMTP Server]'
print 'Exmpl: gml.py mbox "c:\mail\Inbox" marklyon@gmail.com password'
print 'Exmpl: gml.py maildir "c:\mail\Inbox\" marklyon@gmail.com password gsmtp171.google.com\n'
sys.exit()
if __name__ == '__main__':
main ()
|
bradleyayers/xhtml2pdf | xhtml2pdf/tags.py | Python | apache-2.0 | 20,336 | 0.004081 | # -*- coding: utf-8 -*-
from reportlab.graphics.barcode import createBarcodeDrawing
from reportlab.lib.pagesizes import A4
from reportlab.lib.units import inch, mm
from reportlab.platypus.doctemplate import NextPageTemplate, FrameBreak
from reportlab.platypus.flowables import Spacer, HRFlowable, PageBreak, Flowable
from reportlab.platypus.frames import Frame
from reportlab.platypus.paraparser import tt2ps, ABag
from xhtml2pdf import xhtml2pdf_reportlab
from xhtml2pdf.util import getColor, getSize, getAlign, dpi96
from xhtml2pdf.xhtml2pdf_reportlab import PmlImage, PmlPageTemplate
import copy
import logging
import re
import warnings
# Copyright 2010 Dirk Holtwick, holtwick.it
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
log = logging.getLogger("xhtml2pdf")
def deprecation(message):
warnings.warn("<" + message + "> is deprecated!", DeprecationWarning, stacklevel=2)
class pisaTag:
"""
The default class for a tag definition
"""
def __init__(self, node, attr):
self.node = node
self.tag = node.tagName
self.attr = attr
def start(self, c):
pass
def end(self, c):
pass
class pisaTagBODY(pisaTag):
"""
We can also asume that there is a BODY tag because html5lib
adds it for us. Here we take the base font size for later calculations
in the FONT tag.
"""
def start(self, c):
c.baseFontSize = c.frag.fontSize
# print "base font size", c.baseFontSize
class pisaTagTITLE(pisaTag):
def end(self, c):
c.meta["title"] = c.text
c.clearFrag()
class pisaTagSTYLE(pisaTag):
def start(self, c):
c.addPara()
def end(self, c):
c.clearFrag()
class pisaTagMETA(pisaTag):
def start(self, c):
name = self.attr.name.lower()
if name in ("author" , "subject", "keywords"):
c.meta[name] = self.attr.content
class pisaTagSUP(pisaTag):
def start(self, c):
c.frag.super = 1
class pisaTagSUB(pisaTag):
def start(self, c):
c.frag.sub = 1
class pisaTagA(pisaTag):
rxLink = re.compile("^(#|[a-z]+\:).*")
def start(self, c):
attr = self.attr
# XXX Also support attr.id ?
if attr.name:
# Important! Make sure that cbDefn is not inherited by other
# fragments because of a bug in Reportlab!
afrag = c.frag.clone()
# These 3 lines are needed to fix an error with non internal fonts
afrag.fontName = "Helvetica"
afrag.bold = 0
afrag.italic = 0
afrag.cbDefn = ABag(
kind="anchor",
name=attr.name,
label="anchor")
c.fragAnchor.append(afrag)
c.anchorName.append(attr.name)
if attr.href and self.rxLink.match(attr.href):
c.frag.link = attr.href
def end(self, c):
pass
class pisaTagFONT(pisaTag):
# Source: http://www.w3.org/TR/CSS21/fonts.html#propdef-font-size
def start(self, c):
if self.attr["color"] is not None:
c.frag.textColor = getColor(self.attr["color"])
if self.attr["face"] is not None:
c.frag.fontName = c.getFontName(self.attr["face"])
if self.attr["size"] is not None:
size = getSize(self.attr["size"], c.frag.fontSize, c.baseFontSize)
c.frag.fontSize = max(size, 1.0)
def end(self, c):
pass
class pisaTagP(pisaTag):
def start(self, c):
# save the type of tag; it's used in PmlBaseDoc.afterFlowable()
# to check if we need to add an outline-entry
# c.frag.tag = self.tag
if self.attr.align is not None:
#print self.attr.align, getAlign(self.attr.align)
c.frag.alignment = getAlign(self.attr.align)
class pisaTagDIV(pisaTagP): pass
class pisaTagH1(pisaTagP): pass
class pisaTagH2(pisaTagP): pass
class pisaTagH3(pisaTagP): pass
class pisaTagH4(pisaTagP): pass
class pisaTagH5(pisaTagP): pass
class pisaTagH6(pisaTagP): pass
def listDecimal(c):
c.listCounter += 1
return unicode("%d." % c.listCounter)
_bullet = u"\u2022"
_list_style_type = {
"none": u"",
"disc": _bullet,
"circle": _bullet, # XXX PDF has no equivalent
"square": _bullet, # XXX PDF has no equivalent
"decimal": listDecimal,
"decimal-leading-zero": listDecimal,
"lower-roman": listDecimal,
"upper-roman": listDecimal,
"hebrew": listDecimal,
"georgian": listDecimal,
"armenian": listDecimal,
"cjk-ideographic": listDecimal,
"hiragana": listDecimal,
"katakana": listDecimal,
"hiragana-iroha": listDecimal,
"katakana-iroha": listDecimal,
"lower-latin": listDecimal,
"lower-alpha": listDecimal,
"upper-latin": listDecimal,
"upper-alpha": listDecimal,
"lower-greek": listDecimal,
}
class pisaTagUL(pisaTagP):
def start(self, c):
self.counter, c.listCounter = c.listCounter, 0
def end(self, c):
c.addPara()
# XXX Simulate margin for the moment
c.addStory(Spacer(width=1, height=c.fragBlock.spaceAfter))
c.listCounter = self.counter
class pisaTagOL(pisaTagUL):
pass
class pisaTagLI(pisaTag):
def start(self, c):
lst = _list_style_type.get(c.frag.listStyleType or "disc", _bullet)
#log.debug("frag %r", c.copyFrag(
# text=lst,
# bulletFontName=c.getFontName("helvetica"),
# fontName=c.getFontName("helvetica")))
# c.addFrag("")
#frag = ParaFrag()
| #frag.fontName = frag.bulletFontName = c.getFontName("helvetica")
#frag.fontSize = c.frag.fontSize
#c.frag.fontName = c.getFontName("helvetica")
frag = copy.copy(c.frag)
#print "###", c.frag.fontName
#frag.fontName = "au_00" # c.getFontName("helvetica")
#frag.bulletFontName = "au | _00" # c.getFontName("helvetica")
self.offset = 0
if frag.listStyleImage is not None:
frag.text = u""
f = frag.listStyleImage
if f and (not f.notFound()):
img = PmlImage(
f.getData(),
width=None,
height=None)
img.drawHeight *= dpi96
img.drawWidth *= dpi96
img.pisaZoom = frag.zoom
img.drawWidth *= img.pisaZoom
img.drawHeight *= img.pisaZoom
frag.image = img
self.offset = max(0, img.drawHeight - c.frag.fontSize)
else:
if type(lst) == type(u""):
frag.text = lst
else:
# XXX This should be the recent font, but it throws errors in Reportlab!
frag.text = lst(c)
# XXX This should usually be done in the context!!!
frag.fontName = frag.bulletFontName = tt2ps(frag.fontName, frag.bold, frag.italic)
c.frag.bulletText = [frag]
def end(self, c):
c.fragBlock.spaceBefore += self.offset
#c.fragBlock.bulletText = self.bulletText
#print 999, self.bulletText
# c.addPara()
class pisaTagBR(pisaTag):
def start(self, c):
# print "BR", c.text[-40:]
c.frag.lineBreak = 1
c.addFrag()
c.fragStrip = True
del c.frag.lineBreak
c.force = True
class pisaTagIMG(pisaTag):
def start(self, c):
attr = self.attr
if attr.src and (not attr.src.notFound()):
try:
align = attr.align or c.frag.vAlign or "baseline"
# print "align", align, attr.align, c.frag.vAlign
width = c.frag.width
height |
zencoders/pyircbot | pyircbot.py | Python | gpl-2.0 | 2,596 | 0.014638 | #! /usr/bin/env python
# Copyright (c) 2013 sentenza
"""
A simple python-twisted IRC bot with greetings and karma functionalities
Usage:
$ python pyircbot.py --help
"""
import sys
import optparse
from config import ConfigManager
from bot_core.bot_factory import BotFactory
if __name__ == '__main__':
config_manager = ConfigManager()
usage = """usage: %prog [options]
* Basic configuration file 'bot.conf' could be used instead
** Information will be stored in a directory called<CHANNEL>-data"""
parser = optparse.OptionParser(usage)
parser.add_option("-s", "--server", dest="server_address",
action="store",
default = config_manager.server_address,
help="IRC server address, default %s" % config_manager.server_address)
parser.add_option("-p", "--port", dest="port",
action="store",
type="int",
default = config_manager.server_port,
help="Server port, default %s" % config_manager.server_port)
parser.add_option("-c", "--channel", dest="channel",
action="store",
type="string",
default = config_manager.channel,
help="Channel name, default %s" % config_manager.channel)
parser.add_option("-n", "--nick", dest="nick",
action="store",
default = config_manager.bot_nick,
help="Bot nickname %s" % config_manager.bot_nick)
parser.add_option("-g", "--greeting", dest="greeting_probability",
action="store",
type="int",
default = 30,
help="Greeting probability [1 - 100]")
parser.add_option("-v", "--verbose", dest="verbose",
action="store_true",
help="Print a lot of stuff...")
options, args = parser.parse_args()
# Set options to ConfigManager
config_manager.server_address = o | ptions.server_address
config_manager.server_port = options.port
config_manager.channel = options.channel
config_manager.bot_nick = options.nick
config_manager.verbose = options.verbose
config_manager.greeting_probability = options.greeting_probability
#if not options.<something>:
# parser.error('Must choose one option try -n or -c or --he | lp')
if config_manager.verbose:
print "Information will be stored in ", config_manager.data_path
factory = BotFactory()
factory.connect()
factory.run()
|
jackrzhang/zulip | zerver/migrations/0187_userprofile_is_billing_admin.py | Python | apache-2.0 | 498 | 0 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.14 on 2018-08-22 05:45
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('zerver', '0186_userprofile_starred_message_counts'),
]
operations = [
| migratio | ns.AddField(
model_name='userprofile',
name='is_billing_admin',
field=models.BooleanField(db_index=True, default=False),
),
]
|
anomaly/prestans | prestans/provider/throttle.py | Python | bsd-3-clause | 1,895 | 0.001583 | # -*- coding: utf-8 -*-
#
# prestans, A WSGI compliant REST micro-framework
# http://prestans.org
#
# Copyright (c) 2017, Anomaly Software Pty Ltd.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of Anomaly Software nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
# |
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR | PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL ANOMALY SOFTWARE BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
class Base(object):
def __init__(self):
self._debug = False
@property
def debug(self):
return self._debug
@debug.setter
def debug(self, value):
self._debug = value
|
open-synergy/opnsynid-hr | hr_attendance_computation/models/hr_attendance.py | Python | agpl-3.0 | 21,338 | 0.000703 | # -*- coding: utf-8 -*-
# Copyright 2011 Domsense srl (<http://www.domsense.com>)
# Copyright 2011-15 Agile Business Group sagl (<http://www.agilebg.com>)
# Copyright 2017 OpenSynergy Indonesia (<https://opensynergy-indonesia.com>)
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
from __future__ import division
import math
import time
from datetime import datetime, timedelta
# from openerp.tools import float_compare
import pytz
from openerp import api, fields, models
from openerp.exceptions import Warning as UserError
from openerp.tools import DEFAULT_SERVER_DATETIME_FORMAT
from openerp.tools.translate import _
class HrAttendance(models.Model):
# ref: https://bugs.launchpad.net/openobject-client/+bug/887612
# test: 0.9853 - 0.0085
_inherit = "hr.attendance"
def float_time_convert(self, float_val):
hours = math.floor(abs(float_val))
mins = abs(float_val) - hours
mins = round(mins * 60)
# Original Code
# Comment by Reason:
# 1. M | ins can't be greater than 60
# ====================================
# if mins >= 60.0:
# hours = hours + 1
# mins = 0.0
float_time = "%02d:%02d" % (hours, mins)
return | float_time
def float_to_datetime(self, float_val):
str_float = self.float_time_convert(float_val)
hours = int(str_float.split(":")[0])
minutes = int(str_float.split(":")[1])
days = 1
if hours / 24 > 0:
days += hours / 24
hours = hours % 24
return datetime(1900, 1, int(days), hours, minutes)
# Original Code
# Comment by Reason:
# 1. Not used
# ==================================================
# def float_to_timedelta(self, float_val):
# str_time = self.float_time_convert(float_val)
# int_hour = int(str_time.split(":")[0])
# int_minute = int(str_time.split(":")[1])
# return timedelta(
# 0,
# (int_hour * 3600.0) + (int_minute * 6.0)),
def total_seconds(self, td):
return (
td.microseconds + (td.seconds + td.days * 24 * 3600) * 10 ** 6
) / 10 ** 6
def time_difference(self, float_start_time, float_end_time, help_message=False):
# Original Code
# Condition:
# 1. End Time = Duration within working schedule
# 2. Start Time = Duration
# Comment by Reason:
# 1. Start Time can't be greater than end time
# ================================================================
# if float_compare(
# float_end_time, float_start_time, precision_rounding=0.0000001
# ) == -1:
# that means a difference smaller than 0.36 milliseconds
# message = _('End time %s < start time %s %s') % (
# unicode(float_end_time),
# unicode(float_start_time),
# help_message and '(' + help_message + ')' or ''
# )
# raise UserError(message)
delta = self.float_to_datetime(float_end_time) - self.float_to_datetime(
float_start_time
)
return self.total_seconds(delta) / 3600.0
def time_sum(self, float_first_time, float_second_time):
str_first_time = self.float_time_convert(float_first_time)
first_timedelta = timedelta(
0,
int(str_first_time.split(":")[0]) * 3600.0
+ int(str_first_time.split(":")[1]) * 60.0,
)
str_second_time = self.float_time_convert(float_second_time)
second_timedelta = timedelta(
0,
int(str_second_time.split(":")[0]) * 3600.0
+ int(str_second_time.split(":")[1]) * 60.0,
)
return self.total_seconds(first_timedelta + second_timedelta) / 60.0 / 60.0
def split_interval_time_by_precision(
self, start_datetime, duration, precision=0.25
):
# start_datetime: datetime, duration: hours, precision: hours
# returns [(datetime, hours)]
res = []
while duration > precision:
res.append((start_datetime, precision))
start_datetime += timedelta(hours=precision)
duration -= precision
if duration > precision / 2.0:
res.append((start_datetime, precision))
return res
def datetime_to_hour(self, datetime_):
hour = datetime_.hour + datetime_.minute / 60.0 + datetime_.second / 3600.0
return hour
def mid_time_interval(self, datetime_start, delta):
return datetime_start + timedelta(hours=delta / 2.0)
@api.model
def matched_schedule(self, datetime_, weekday_char, calendar_id, context=None):
calendar_attendance_pool = self.env["resource.calendar.attendance"]
datetime_hour = self.datetime_to_hour(datetime_)
matched_schedules = calendar_attendance_pool.search(
[
"&",
"|",
("date_from", "=", False),
("date_from", "<=", datetime_.date()),
"|",
("dayofweek", "=", False),
("dayofweek", "=", weekday_char),
("calendar_id", "=", calendar_id),
("hour_to", ">=", datetime_hour),
("hour_from", "<=", datetime_hour),
],
)
return matched_schedules
# Original Code
# Comment by Reason:
# 1. Not used
# ====================================
# @api.model
# def get_reference_calendar(
# self, employee_id, date=None):
#
# if date is None:
# date = fields.date.context_today()
#
# contract_pool = self.env['hr.contract']
# employee_pool = self.env['hr.employee']
#
# active_contracts = contract_pool.search([
# '&',
# ('employee_id', '=', employee_id),
# '|',
# '&',
# ('date_start', '<=', date),
# '|',
# ('date_end', '>=', date),
# ('date_end', '=', False),
# '&',
# '&',
# ('trial_date_start', '!=', False),
# ('trial_date_start', '<=', date),
# '&',
# ('trial_date_end', '!=', False),
# ('trial_date_end', '>=', date),
# ])
#
# if len(active_contracts) > 1:
# employee = employee_pool.browse(employee_id)
# msg = _('Too many active contracts for employee %s at date %s')
# raise UserError(msg % (employee.name, date))
# elif active_contracts:
# contract = active_contracts[0]
# return contract.working_hours
# else:
# return None
def _ceil_rounding(self, rounding, datetime_):
minutes = datetime_.minute / 60.0 + datetime_.second / 3600.0
return math.ceil(minutes * rounding) / rounding
def _floor_rounding(self, rounding, datetime_):
minutes = datetime_.minute / 60.0 + datetime_.second / 3600.0
return math.floor(minutes * rounding) / rounding
# TODO: this is for functional field
@api.depends(
"triggering_attendance_id",
"triggering_attendance_id.name",
"triggering_attendance_id.action",
"triggering_attendance_id.employee_id",
"employee_id.contract_ids",
"employee_id.contract_ids.date_start",
"employee_id.contract_ids.date_start",
"employee_id.contract_ids.date_end",
"employee_id.contract_ids.trial_date_start",
"employee_id.contract_ids.trial_date_end",
"employee_id.contract_ids.working_hours",
"employee_id.contract_ids.working_hours.attendance_ids",
"employee_id.contract_ids.working_hours.attendance_ids.dayofweek",
"employee_id.contract_ids.working_hours.attendance_ids.date_from",
"employee_id.contract_ids.working_hours.attendance_ids.hour_from",
"employee_id.contract_ids.working_hours.attendance_ids.hour_to",
"employee_id.contract_ids.working_hours.attendance_ids.calendar_id",
)
@api.multi
def _compute_attendance_duration(self): # noqa C901
p |
agermanidis/Pattern | examples/01-web/05-flickr.py | Python | bsd-3-clause | 1,296 | 0.006944 | import os, sys; sys.path.append(os.path.join("..", "..", ".."))
from pattern.web import Flickr, extension
from pattern.web import RELEVANCY, LATEST, INTERESTING # Image sort order.
from pattern.web import SMALL, MEDIUM, LAR | GE # Image size.
# This example downloads an image from Flickr (http://flickr | .com).
# Acquiring the image data takes three Flickr queries:
# - the first query with Flickr.search() retrieves a list of results,
# - the second query is executed behind the scenes in the FlickResult.url property,
# - the third query downloads the actual image data using this URL.
# It is a good idea to cache results from Flickr locally,
# which is what the cached=True parameter does.
# You should obtain your own license key at:
# http://www.flickr.com/services/api/
# Otherwise you will be sharing the default key with all users of this module.
engine = Flickr(license=None)
q = "duracell bunny"
results = engine.search(q, size=MEDIUM, sort=RELEVANCY, cached=True)
for img in results:
#print img.url # Retrieving the actual image URL executes an additional query.
print img.description
print img.author
print
# Download and save the image:
img = results[0]
data = img.download()
f = open(q.replace(" ","_") + extension(img.url), "w")
f.write(data)
f.close() |
abrt/satyr | tests/python/satyr.py | Python | gpl-2.0 | 215 | 0.004651 | import sys
import os.path
oldpath = sys.path
newpath = os.path.join(os.path.dirname(__file__), '../../python/.libs')
sys.path = [newpath]
f | rom _satyr3 impor | t *
sys.path = oldpath
del sys, os
del oldpath, newpath
|
leonardoarroyo/django-google-address | google_address/api.py | Python | mit | 692 | 0.011561 | from google_address import helpers
import requests
class GoogleAddressApi():
url = 'https://maps.googleapis.com/maps/api/geocode/json?address={address}'
key = None
def __init__(self):
# Set key
self.key = helpers.get_settings().get("API_KEY", None)
# Set language
self.language = helpers.get_settings().get("API_LANGUAGE", "en_US")
def _get_url(self):
url = self.url
if self.key:
url = "{}&key={}".format(url, self.key)
if self.language:
url = "{}&language={}".fo | rmat(url, self.language)
return url
def query(self, raw):
url = self._get_url().format(address=raw)
r = requests.get(url)
data = r. | json()
return data
|
nsh87/regressors | tests/test_regressors.py | Python | isc | 9,685 | 0.00031 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
test_regressors
---------------
Tests for the `regressors` module.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np
import pandas as pd
import unittest2 as unittest
from sklearn import datasets
from sklearn import decomposition
from sklearn import linear_model
from sklearn import preprocessing
from regressors import regressors
from regressors import _utils
from regressors import stats
boston = datasets.load_boston()
which_betas = np.ones(13, dtype=bool)
which_betas[3] = False # Eliminate dummy variable
X = boston.data[:, which_betas]
y = boston.target
class TestStatsResiduals(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_classifier_type_assertion_raised(self):
# Test that assertion is raised for unsupported model
pcomp = decomposition.PCA()
pcomp.fit(X, y)
with self.assertRaises(AttributeError):
stats.residuals(pcomp, X, y)
def tests_classifier_type_assertion_not_raised(self):
# Test that assertion is not raise for supported models
for classifier in _utils.supported_linear_models:
clf = classifier()
clf.fit(X, y)
try:
stats.residuals(clf, X, y)
except Exception as e:
self.fail("Testing supported linear models in residuals "
"function failed unexpectedly: {0}".format(e))
def test_getting_raw_residuals(self):
ols = linear_model.LinearRegression()
ols.fit(X, y)
try:
stats.residuals(ols, X, y, r_type='raw')
except Exception as e:
self.fail("Testing raw residuals failed unexpectedly: "
"{0}".format(e))
def test_getting_standardized_residuals(self):
ols = linear_model.LinearRegression()
ols.fit(X, y)
try:
stats.residuals(ols, X, y, r_type='standardized')
except Exception as e:
self.fail("Testing standardized residuals failed unexpectedly: "
"{0}".format(e))
def test_getting_studentized_residuals(self):
ols = linear_model.LinearRegression()
ols.fit(X, y)
try:
| stats.residuals(ols, X, y, r_type='studentized')
except Exception as e:
self.fail("Testing studentized residuals failed unexpectedly: "
"{0}".format(e))
class TestSummaryStats(uni | ttest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_error_not_raised_by_sse(self):
# Test that assertion is not raise for supported models
for classifier in _utils.supported_linear_models:
clf = classifier()
clf.fit(X, y)
try:
sse = stats.sse(clf, X, y)
except Exception as e:
self.fail("Testing SSE function for supported linear models "
"failed unexpectedly: {0}".format(e))
def test_error_not_raised_by_adj_r2_score(self):
# Test that assertion is not raise for supported models
for classifier in _utils.supported_linear_models:
clf = classifier()
clf.fit(X, y)
try:
stats.adj_r2_score(clf, X, y)
except Exception as e:
self.fail("Testing adjusted R2 function for supported linear "
"models failed unexpectedly: {0}".format(e))
def test_verify_adj_r2_score_return_type(self):
for classifier in _utils.supported_linear_models:
clf = classifier()
clf.fit(X, y)
adj_r2_score = stats.adj_r2_score(clf, X, y)
self.assertIsInstance(adj_r2_score, float)
def test_error_not_raised_by_coef_se(self):
# Test that assertion is not raise for supported models
for classifier in _utils.supported_linear_models:
clf = classifier()
clf.fit(X, y)
try:
stats.coef_se(clf, X, y).shape
except Exception as e:
self.fail("Testing standard error of coefficients function for "
"supported linear models failed "
"unexpectedly: {0}".format(e))
def test_length_of_returned_coef_se(self):
for classifier in _utils.supported_linear_models:
clf = classifier()
clf.fit(X, y)
coef_se = stats.coef_se(clf, X, y)
expected_length = X.shape[1] + 1 # Add 1 for the intercept
self.assertEqual(coef_se.shape[0], expected_length)
def test_error_not_raised_by_coef_tval(self):
# Test that assertion is not raise for supported models
for classifier in _utils.supported_linear_models:
clf = classifier()
clf.fit(X, y)
try:
stats.coef_tval(clf, X, y).shape
except Exception as e:
self.fail("Testing t-values of coefficients function for "
"supported linear models failed "
"unexpectedly: {0}".format(e))
def test_length_of_returned_coef_tval(self):
for classifier in _utils.supported_linear_models:
clf = classifier()
clf.fit(X, y)
coef_tval = stats.coef_tval(clf, X, y)
expected_length = X.shape[1] + 1 # Add 1 for the intercept
self.assertEqual(coef_tval.shape[0], expected_length)
def test_error_not_raised_by_coef_pval(self):
# Test that assertion is not raise for supported models
for classifier in _utils.supported_linear_models:
clf = classifier()
clf.fit(X, y)
try:
stats.coef_pval(clf, X, y).shape
except Exception as e:
self.fail("Testing p-values of coefficients function for "
"supported linear models failed "
"unexpectedly: {0}".format(e))
def test_length_of_returned_coef_pval(self):
for classifier in _utils.supported_linear_models:
clf = classifier()
clf.fit(X, y)
coef_pval = stats.coef_tval(clf, X, y)
expected_length = X.shape[1] + 1 # Add 1 for the intercept
self.assertEqual(coef_pval.shape[0], expected_length)
def test_error_not_raised_by_f_stat(self):
# Test that assertion is not raise for supported models
for classifier in _utils.supported_linear_models:
clf = classifier()
clf.fit(X, y)
try:
stats.f_stat(clf, X, y).shape
except Exception as e:
self.fail("Testing summary F-statistic function for "
"supported linear models failed "
"unexpectedly: {0}".format(e))
def test_verify_f_stat_return_type(self):
for classifier in _utils.supported_linear_models:
clf = classifier()
clf.fit(X, y)
adj_r2_score = stats.adj_r2_score(clf, X, y)
self.assertIsInstance(adj_r2_score, float)
def test_error_not_raised_by_summary_function(self):
for classifier in _utils.supported_linear_models:
clf = classifier()
clf.fit(X, y)
try:
stats.f_stat(clf, X, y).shape
except Exception as e:
self.fail("Testing summary function for "
"supported linear models failed "
"unexpectedly: {0}".format(e))
class TestPCRBetaCoef(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_pcr_beta_coef_returns_coefs_for_all_predictors(self):
# Just return the coefficients for the predictors because the intercept
# for PCR is the same as the intercept in the PCA regression model.
scaler = preprocessing.StandardScale |
bop/foundation | lib/python2.7/site-packages/compressor/utils/decorators.py | Python | gpl-2.0 | 2,549 | 0.002746 | import functools
class memoize(object):
def __init__ (self, func):
self.func = func
def __call__ (self, *args, **kwargs):
if (args, str(kwargs)) in self.__dict__:
value = self.__dict__[args, str(kwargs)]
else:
value = self.func(*args, **kwargs)
self.__dict__[args, str(kwargs)] = value
return value
def __repr__(self):
"""
Return the function's docstring.
"""
return self.func.__doc__ or ''
def __get__(self, obj, objtype):
"""
Support instance methods.
"""
return functools.partial(self.__call__, obj)
class cached_property(object):
"""Property descriptor that caches the return value
of the get function.
*Examples*
.. code-block:: python
@cached_property
def connection(self):
return Connection()
@connection.setter # Prepares stored value
def connection(self, value):
if value is None:
raise TypeError("Connection must be a connection")
return value
@connection.deleter
def connection(self, value):
# Additional action to do at del(self.attr)
if value is not None:
print("Connection %r deleted" % (value, ))
"""
def __init__(self, fget=None, fset=None, fdel=None, doc=None):
self.__get = fget
self.__set = fset
self.__del = fdel
self.__doc__ = doc or fget.__doc__
self.__name__ = fget.__name__
self.__module__ = fget.__module__
def __get__(self, obj, type=None):
if obj is None:
| return self
try:
return obj.__dict__[self.__name__]
except KeyError:
value = obj.__dict__[self.__name__] = self.__get(obj)
return value
def __set__(self, obj, value):
if obj is None:
return self
| if self.__set is not None:
value = self.__set(obj, value)
obj.__dict__[self.__name__] = value
def __delete__(self, obj):
if obj is None:
return self
try:
value = obj.__dict__.pop(self.__name__)
except KeyError:
pass
else:
if self.__del is not None:
self.__del(obj, value)
def setter(self, fset):
return self.__class__(self.__get, fset, self.__del)
def deleter(self, fdel):
return self.__class__(self.__get, self.__set, fdel)
|
ntoll/fluiddb.py | fluiddb.py | Python | mit | 5,024 | 0.000398 | # -*- coding: utf-8 -*-
"""
A very thin wrapper on top of the FluidDB RESTful API
Copyright (c) 2009-2010 Seo Sanghyeon, Nicholas Tollervey and others
See README, AUTHORS and LICENSE for more information
"""
import sys
import httplib2
import urllib
import types
if sys.version_info < (2, 6):
import simplejson as json
else:
import json
# There are currently two instances of FluidDB. MAIN is the default standard
# instance and SANDBOX is a scratch version for testing purposes. Data in
# SANDBOX can (and will) be blown away.
MAIN = 'https://fluiddb.fluidinfo.com'
SANDBOX = 'https://sandbox.fluidinfo.com'
instance = MAIN
ITERABLE_TYPES = set((list, tuple))
SERIALIZABLE_TYPES = set((types.NoneType, bool, int, float, str, unicode, list,
tuple))
global_headers = {
'Accept': '*/*',
}
def login(username, password):
"""
Creates the 'Authorization' token from the given username and password.
"""
userpass = username + ':' + password
auth = 'Basic ' + userpass.encode('base64').strip()
global_headers['Authorization'] = auth
def logout():
"""
Removes the 'Authorization' token from the headers passed into FluidDB
"""
if 'Authorization' in global_headers:
del global_headers['Authorization']
def call(method, path, body=None, mime=None, tags=[], custom_headers={}, **kw):
"""
Makes a call to FluidDB
method = HTTP verb. e.g. PUT, POST, GET, DELETE or HEAD
path = Path appended to the instance to locate the resource in FluidDB this
can be either a string OR a list of path elements.
body = The request body (a dictionary will be translated to json,
primitive types will also be jsonified)
mime = The mime-type for the body of the request - will override the
jsonification of primitive types
tags = The list of tags to return if the request is to values
headers = A dictionary containing additional headers to send in the request
**kw = Query-string arguments to be appended to the URL
"""
http = httplib2.Http()
# build the URL
url = build_url(path)
if kw:
url = url + '?' + urllib.urlencode(kw)
if tags and path.startswith('/values'):
# /values based requests must have a tags list to append to the
# url args (which are passed in as **kw), so append them so everything
# gets urlencoded correctly below
url = url + '&' + urllib.urlencode([('tag', tag) for tag in tags])
# set the headers
headers = global_ | headers.copy()
if custom_headers:
headers.update(custom_headers)
# make sure the path is a string for the following elif check for PUT
# based requests
if isinstance(path, list):
path = '/'+'/'.join(path)
# Make sure the correct content-type header is sent
if isinstance(body, dict):
# jsonify dicts
headers['content-type'] = 'application/json'
body = json.dumps(body)
elif method | .upper() == 'PUT' and (
path.startswith('/objects/') or path.startswith('/about')):
# A PUT to an "/objects/" or "/about/" resource means that we're
# handling tag-values. Make sure we handle primitive/opaque value types
# properly.
if mime:
# opaque value (just set the mime type)
headers['content-type'] = mime
elif isprimitive(body):
# primitive values need to be json-ified and have the correct
# content-type set
headers['content-type'] = 'application/vnd.fluiddb.value+json'
body = json.dumps(body)
else:
# No way to work out what content-type to send to FluidDB so
# bail out.
raise TypeError("You must supply a mime-type")
response, content = http.request(url, method, body, headers)
if ((response['content-type'] == 'application/json' or
response['content-type'] == 'application/vnd.fluiddb.value+json')
and content):
result = json.loads(content)
else:
result = content
return response, result
def isprimitive(body):
"""
Given the body of a request will return a boolean to indicate if the
value is a primitive value type.
See:
http://doc.fluidinfo.com/fluidDB/api/tag-values.html
&
http://bit.ly/hmrMzT
For an explanation of the difference between primitive and opaque
values.
"""
bodyType = type(body)
if bodyType in SERIALIZABLE_TYPES:
if bodyType in ITERABLE_TYPES:
if not all(isinstance(x, basestring) for x in body):
return False
return True
else:
return False
def build_url(path):
"""
Given a path that is either a string or list of path elements, will return
the correct URL
"""
url = instance
if isinstance(path, list):
url += '/'
url += '/'.join([urllib.quote(element, safe='') for element in path])
else:
url += urllib.quote(path)
return url
|
MobinRanjbar/hue | apps/oozie/src/oozie/views/dashboard.py | Python | apache-2.0 | 44,447 | 0.011047 | #!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the | Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless r | equired by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging
import os
import re
import time
from datetime import datetime
from django.forms.formsets import formset_factory
from django.http import HttpResponse
from django.utils.functional import wraps
from django.utils.translation import ugettext as _
from django.core.urlresolvers import reverse
from django.shortcuts import redirect
from desktop.conf import TIME_ZONE
from desktop.lib.django_util import JsonResponse, render
from desktop.lib.json_utils import JSONEncoderForHTML
from desktop.lib.exceptions_renderable import PopupException
from desktop.lib.i18n import smart_str, smart_unicode
from desktop.lib.rest.http_client import RestException
from desktop.lib.view_util import format_duration_in_millis
from desktop.log.access import access_warn
from desktop.models import Document, Document2
from hadoop.fs.hadoopfs import Hdfs
from liboozie.oozie_api import get_oozie
from liboozie.credentials import Credentials
from liboozie.submission2 import Submission
from oozie.conf import OOZIE_JOBS_COUNT, ENABLE_CRON_SCHEDULING, ENABLE_V2
from oozie.forms import RerunForm, ParameterForm, RerunCoordForm, RerunBundleForm, UpdateCoordinatorForm
from oozie.models import Workflow as OldWorkflow, Job, utc_datetime_format, Bundle, Coordinator, get_link, History as OldHistory
from oozie.models2 import History, Workflow, WORKFLOW_NODE_PROPERTIES
from oozie.settings import DJANGO_APPS
from oozie.utils import convert_to_server_timezone
def get_history():
if ENABLE_V2.get():
return History
else:
return OldHistory
def get_workflow():
if ENABLE_V2.get():
return Workflow
else:
return OldWorkflow
LOG = logging.getLogger(__name__)
"""
Permissions:
A Workflow/Coordinator/Bundle can:
* be accessed only by its owner or a superuser or by a user with 'dashboard_jobs_access' permissions
* be submitted/modified only by its owner or a superuser
Permissions checking happens by calling:
* check_job_access_permission()
* check_job_edition_permission()
"""
def _get_workflows(user):
return [{
'name': workflow.name,
'owner': workflow.owner.username,
'value': workflow.uuid,
'id': workflow.id
} for workflow in [d.content_object for d in Document.objects.get_docs(user, Document2, extra='workflow2')]
]
def manage_oozie_jobs(request, job_id, action):
if request.method != 'POST':
raise PopupException(_('Use a POST request to manage an Oozie job.'))
job = check_job_access_permission(request, job_id)
check_job_edition_permission(job, request.user)
response = {'status': -1, 'data': ''}
try:
oozie_api = get_oozie(request.user)
params = None
if action == 'change':
pause_time_val = request.POST.get('pause_time')
if request.POST.get('clear_pause_time') == 'true':
pause_time_val = ''
end_time_val = request.POST.get('end_time')
if end_time_val:
end_time_val = convert_to_server_timezone(end_time_val, TIME_ZONE.get())
if pause_time_val:
pause_time_val = convert_to_server_timezone(pause_time_val, TIME_ZONE.get())
params = {'value': 'endtime=%s' % (end_time_val) + ';'
'pausetime=%s' % (pause_time_val) + ';'
'concurrency=%s' % (request.POST.get('concurrency'))}
elif action == 'ignore':
oozie_api = get_oozie(request.user, api_version="v2")
params = {
'type': 'action',
'scope': ','.join(job.aggreate(request.POST.get('actions').split())),
}
response['data'] = oozie_api.job_control(job_id, action, parameters=params)
response['status'] = 0
if 'notification' in request.POST:
request.info(_(request.POST.get('notification')))
except RestException, ex:
ex_message = ex.message
if ex._headers.get('oozie-error-message'):
ex_message = ex._headers.get('oozie-error-message')
msg = "Error performing %s on Oozie job %s: %s." % (action, job_id, ex_message)
LOG.exception(msg)
response['data'] = _(msg)
return JsonResponse(response)
def bulk_manage_oozie_jobs(request):
if request.method != 'POST':
raise PopupException(_('Use a POST request to manage the Oozie jobs.'))
response = {'status': -1, 'data': ''}
if 'job_ids' in request.POST and 'action' in request.POST:
jobs = request.POST.get('job_ids').split()
response = {'totalRequests': len(jobs), 'totalErrors': 0, 'messages': ''}
oozie_api = get_oozie(request.user)
for job_id in jobs:
job = check_job_access_permission(request, job_id)
check_job_edition_permission(job, request.user)
try:
oozie_api.job_control(job_id, request.POST.get('action'))
except RestException, ex:
LOG.exception("Error performing bulk operation for job_id=%s", job_id)
response['totalErrors'] = response['totalErrors'] + 1
response['messages'] += str(ex)
return JsonResponse(response)
def show_oozie_error(view_func):
def decorate(request, *args, **kwargs):
try:
return view_func(request, *args, **kwargs)
except RestException, ex:
LOG.exception("Error communicating with Oozie in %s", view_func.__name__)
detail = ex._headers.get('oozie-error-message', ex)
if 'Max retries exceeded with url' in str(detail) or 'Connection refused' in str(detail):
detail = _('The Oozie server is not running')
raise PopupException(_('An error occurred with Oozie.'), detail=detail)
return wraps(view_func)(decorate)
@show_oozie_error
def list_oozie_workflows(request):
kwargs = {'cnt': OOZIE_JOBS_COUNT.get(), 'filters': []}
if not has_dashboard_jobs_access(request.user):
kwargs['filters'].append(('user', request.user.username))
oozie_api = get_oozie(request.user)
if request.GET.get('format') == 'json':
just_sla = request.GET.get('justsla') == 'true'
if request.GET.get('startcreatedtime'):
kwargs['filters'].extend([('startcreatedtime', request.GET.get('startcreatedtime'))])
if request.GET.get('offset'):
kwargs['offset'] = request.GET.get('offset')
json_jobs = []
total_jobs = 0
if request.GET.getlist('status'):
kwargs['filters'].extend([('status', status) for status in request.GET.getlist('status')])
wf_list = oozie_api.get_workflows(**kwargs)
json_jobs = wf_list.jobs
total_jobs = wf_list.total
if request.GET.get('type') == 'progress':
json_jobs = [oozie_api.get_job(job.id) for job in json_jobs]
response = massaged_oozie_jobs_for_json(json_jobs, request.user, just_sla)
response['total_jobs'] = total_jobs
return JsonResponse(response, encoder=JSONEncoderForHTML)
return render('dashboard/list_oozie_workflows.mako', request, {
'user': request.user,
'jobs': [],
'has_job_edition_permission': has_job_edition_permission,
})
@show_oozie_error
def list_oozie_coordinators(request):
kwargs = {'cnt': OOZIE_JOBS_COUNT.get(), 'filters': []}
if not has_dashboard_jobs_access(request.user):
kwargs['filters'].append(('user', request.user.username))
oozie_api = get_oozie(request.user)
enable_cron_scheduling = ENABLE_CRON_SCHEDULING.get()
if request.GET.get('format') == 'json':
if request.GET.get('offset'):
kwargs['offset'] = request.GET.get('offset')
json_jobs = []
total_jobs = 0
if request.GET.getlist('status'):
kw |
bitedgeco/survivor-pool | survivor_pool/models/__init__.py | Python | mit | 2,411 | 0.000415 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from sqlalchemy import engine_from_config
from sqlalchemy.orm import sessionmaker
from sqlalchemy.orm import configure_mappers
import zope.sqlalchemy
# import or define all models here to ensure they are attached to the
# Base.metadata prior to any initialization routines
#from .mymodel import MyModel # noqa
from .user import User
from .event import Event
from .pick import Pick
# import all models so that they are in scope for configure_mappers
# run configure_mappers after defining all of the models to ensure
# all relationships can be setup
configure_mappers()
def get_engine(settings, prefix='sqlalchemy.'):
return engine_from_config(settings, prefix)
def get_session_factory(engine):
factory = sessionmaker()
factory.configure(bind=engine)
return factory
def get_tm_session(session_factory, transaction_manager):
"""
Get a ``sqlalchemy.orm.Session`` instance backed by a transaction.
This function will hook the session to the transaction manager which
will take care of committing any changes.
- When using pyramid_tm it will automatically be committed or aborted
depending on whether an exception is raised.
- When using scripts you should wrap the session in a manager yourself.
For example::
import transaction
engine = get_engine(settings)
session_factory = get_session_factory(engine)
with transaction.manager:
dbsession = get_tm_session(session_factory, transaction.manager)
"""
dbsession = session_factory()
zope.sqlalchemy.register(
dbsession, transaction_manager=transaction_manager)
return dbsession
def includeme(config):
| """
Initialize the model for a Pyramid app.
Activate this setup using ``config.include('survivor-pool.models')``.
"""
settings = config.get_se | ttings()
# use pyramid_tm to hook the transaction lifecycle to the request
config.include('pyramid_tm')
session_factory = get_session_factory(get_engine(settings))
config.registry['dbsession_factory'] = session_factory
# make request.dbsession available for use in Pyramid
config.add_request_method(
# r.tm is the transaction manager used by pyramid_tm
lambda r: get_tm_session(session_factory, r.tm),
'dbsession',
reify=True
)
|
meahmadi/ThreeDHighway | Content/Scripts/FilmActor.py | Python | apache-2.0 | 767 | 0.059974 | import unreal_engine as ue
import json
class FilmActor:
def begin_play(self):
self.pawn = self.uobject.get_owner()
def getjson(self):
ue.log("@@@@video getting json:")
loc = self.uobject.get_actor_location()
rot = self.uobject.get_actor_forward()
data = {
"x":loc.x,"y":loc.y,"z":loc.z,
"rx":rot.x, "ry":rot.y, "rz": rot.z
}
return json.dumps(data)
def addtoworld(self):
ue.log("@@@@video add to world")
return ""
def setjson(self,js):
ue.log("@@@@video setting json:")
data = json.loads(js)
loc = self.uobject.ge | t_actor_location()
loc.x = data["x"]
loc.y = dat | a["y"]
loc.z = data["z"]
self.uobject.set_actor_location(loc)
rot = self.uobject.get_actor_forward()
return True
def tick(self, delta_time):
pass |
ajinabraham/YSO-Mobile-Security-Framework | mobsf/StaticAnalyzer/views/ios/macho_analysis.py | Python | gpl-3.0 | 9,156 | 0 | # !/usr/bin/python
# coding=utf-8
import logging
import lief
logger = logging.getLogger(__name__)
class Checksec:
def __init__(self, macho):
self.macho = lief.parse(macho.as_posix())
def checksec(self):
macho_dict = {}
macho_dict['name'] = self.macho.name
has_nx = self.has_nx()
has_pie = self.has_pie()
has_canary = self.has_canary()
has_rpath = self.has_rpath()
has_code_signature = self.has_code_signature()
has_arc = self.has_arc()
is_encrypted = self.is_encrypted()
is_stripped = self.is_symbols_stripped()
if has_nx:
severity = 'info'
desc = (
'The binary has NX bit set. This marks a '
'memory page non-executable making attacker '
'injected shellcode non-executable.')
else:
severity = 'info'
desc = (
'The binary does not have NX bit set. NX bit '
'offer protection against exploitation of memory corruption '
'vulnerabilities by marking memory page as non-executable. '
'However iOS never allows an app to execute from writeable '
'memory. You do not need to specifically | enable the '
'‘NX bit’ because it’s always ena | bled for all '
'third-party code.')
macho_dict['nx'] = {
'has_nx': has_nx,
'severity': severity,
'description': desc,
}
if has_pie:
severity = 'info'
desc = (
'The binary is build with -fPIC flag which '
'enables Position independent code. This makes Return '
'Oriented Programming (ROP) attacks much more difficult '
'to execute reliably.')
else:
severity = 'high'
desc = (
'The binary is built without Position '
'Independent Code flag. In order to prevent '
'an attacker from reliably jumping to, for example, a '
'particular exploited function in memory, Address '
'space layout randomization (ASLR) randomly arranges '
'the address space positions of key data areas of a '
'process, including the base of the executable and the '
'positions of the stack,heap and libraries. Use compiler '
'option -fPIC to enable Position Independent Code.')
macho_dict['pie'] = {
'has_pie': has_pie,
'severity': severity,
'description': desc,
}
if has_canary:
severity = 'info'
desc = (
'This binary has a stack canary value '
'added to the stack so that it will be overwritten by '
'a stack buffer that overflows the return address. '
'This allows detection of overflows by verifying the '
'integrity of the canary before function return.')
elif is_stripped:
severity = 'warning'
desc = (
'This binary has symbols stripped. We cannot identify '
'whether stack canary is enabled or not.')
else:
severity = 'high'
desc = (
'This binary does not have a stack '
'canary value added to the stack. Stack canaries '
'are used to detect and prevent exploits from '
'overwriting return address. Use the option '
'-fstack-protector-all to enable stack canaries.')
macho_dict['stack_canary'] = {
'has_canary': has_canary,
'severity': severity,
'description': desc,
}
if has_arc:
severity = 'info'
desc = (
'The binary is compiled with Automatic Reference '
'Counting (ARC) flag. ARC is a compiler '
'feature that provides automatic memory '
'management of Objective-C objects and is an '
'exploit mitigation mechanism against memory '
'corruption vulnerabilities.'
)
elif is_stripped:
severity = 'warning'
desc = (
'This binary has symbols stripped. We cannot identify '
'whether ARC is enabled or not.')
else:
severity = 'high'
desc = (
'The binary is not compiled with Automatic '
'Reference Counting (ARC) flag. ARC is a compiler '
'feature that provides automatic memory '
'management of Objective-C objects and '
'protects from memory corruption '
'vulnerabilities. Use compiler option '
'-fobjc-arc to enable ARC.')
macho_dict['arc'] = {
'has_arc': has_arc,
'severity': severity,
'description': desc,
}
if has_rpath:
severity = 'warning'
desc = (
'The binary has Runpath Search Path (@rpath) set. '
'In certain cases an attacker can abuse this '
'feature to run arbitrary executable for code '
'execution and privilege escalation. Remove the '
'compiler option -rpath to remove @rpath.')
else:
severity = 'info'
desc = (
'The binary does not have Runpath Search '
'Path (@rpath) set.')
macho_dict['rpath'] = {
'has_rpath': has_rpath,
'severity': severity,
'description': desc,
}
if has_code_signature:
severity = 'info'
desc = 'This binary has a code signature.'
else:
severity = 'warning'
desc = 'This binary does not have a code signature.'
macho_dict['code_signature'] = {
'has_code_signature': has_code_signature,
'severity': severity,
'description': desc,
}
if is_encrypted:
severity = 'info'
desc = 'This binary is encrypted.'
else:
severity = 'warning'
desc = 'This binary is not encrypted.'
macho_dict['encrypted'] = {
'is_encrypted': is_encrypted,
'severity': severity,
'description': desc,
}
if is_stripped:
severity = 'info'
desc = 'Symbols are stripped'
else:
severity = 'warning'
desc = (
'Symbols are available. To strip '
'debugging symbols, set Strip Debug '
'Symbols During Copy to YES, '
'Deployment Postprocessing to YES, '
'and Strip Linked Product to YES in '
'project\'s build settings.')
macho_dict['symbol'] = {
'is_stripped': is_stripped,
'severity': severity,
'description': desc,
}
return macho_dict
def has_nx(self):
return self.macho.has_nx
def has_pie(self):
return self.macho.is_pie
def has_canary(self):
stk_check = '___stack_chk_fail'
stk_guard = '___stack_chk_guard'
ipt_list = set()
for ipt in self.macho.imported_functions:
ipt_list.add(str(ipt))
return stk_check in ipt_list and stk_guard in ipt_list
def has_arc(self):
for func in self.macho.imported_functions:
if str(func).strip() == '_objc_release':
return True
return False
def has_rpath(self):
return self.macho.has_rpath
def has_code_signature(self):
try:
return self.macho.code_signature.data_size > 0
except Exception:
return False
def is_encrypted(self):
return bool(self.macho.encryption_info.crypt_id)
def is_symbols_stripped(self):
for i in self.macho.symbols:
if i:
return False
return True
|
idrogeno/IdroMips | lib/python/Plugins/SystemPlugins/WirelessLan/plugin.py | Python | gpl-2.0 | 16,367 | 0.025661 | from enigma import eTimer, eEnv
from Screens.Screen import Screen
from Components.ActionMap import ActionMap, NumberActionMap
from Components.Pixmap import Pixmap,MultiPixmap
from Components.Label import Label
from Components.Sources.StaticText import StaticText
from Components.Sources.List import List
from Components.MenuList import MenuList
from Components.config import config, getConfigListEntry, ConfigYesNo, NoSave, ConfigSubsection, ConfigText, ConfigSelection, ConfigPassword
from Components.ConfigList import ConfigListScreen
from Components.Network import iNetwork
from Components.Console import Console
from Plugins.Plugin import PluginDescriptor
from Tools.Directories import resolveFilename, SCOPE_PLUGINS, SCOPE_SKIN_IMAGE
from Tools.LoadPixmap import LoadPixmap
from Wlan import iWlan, wpaSupplicant, iStatus, getWlanConfigName
from time import time
import re
plugin_path = eEnv.resolve("${libdir}/enigma2/python/Plugins/SystemPlugins/WirelessLan")
list = []
list.append("Unencrypted")
list.append("WEP")
list.append("WPA")
list.append("WPA/WPA2")
list.append("WPA2")
weplist = []
weplist.append("ASCII")
weplist | .append("HEX")
config.plugins.wlan = ConfigSubsection()
config.plugins.wlan.essid = NoSave(ConfigText(default = "", fixed_size = False))
config.plugins.wlan.hiddenessid = NoSave(ConfigYesNo(default = False | ))
config.plugins.wlan.encryption = NoSave(ConfigSelection(list, default = "WPA2"))
config.plugins.wlan.wepkeytype = NoSave(ConfigSelection(weplist, default = "ASCII"))
config.plugins.wlan.psk = NoSave(ConfigPassword(default = "", fixed_size = False))
class WlanStatus(Screen):
skin = """
<screen name="WlanStatus" position="center,center" size="560,400" title="Wireless network status" >
<ePixmap pixmap="skin_default/buttons/red.png" position="0,0" size="140,40" alphatest="on" />
<widget source="key_red" render="Label" position="0,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#9f1313" transparent="1" />
<widget source="LabelBSSID" render="Label" position="10,60" size="200,25" valign="left" font="Regular;20" transparent="1" foregroundColor="#FFFFFF" />
<widget source="LabelESSID" render="Label" position="10,100" size="200,25" valign="center" font="Regular;20" transparent="1" foregroundColor="#FFFFFF" />
<widget source="LabelQuality" render="Label" position="10,140" size="200,25" valign="center" font="Regular;20" transparent="1" foregroundColor="#FFFFFF" />
<widget source="LabelSignal" render="Label" position="10,180" size="200,25" valign="center" font="Regular;20" transparent="1" foregroundColor="#FFFFFF" />
<widget source="LabelBitrate" render="Label" position="10,220" size="200,25" valign="center" font="Regular;20" transparent="1" foregroundColor="#FFFFFF" />
<widget source="LabelEnc" render="Label" position="10,260" size="200,25" valign="center" font="Regular;20" transparent="1" foregroundColor="#FFFFFF" />
<widget source="BSSID" render="Label" position="220,60" size="330,25" valign="center" font="Regular;20" transparent="1" foregroundColor="#FFFFFF" />
<widget source="ESSID" render="Label" position="220,100" size="330,25" valign="center" font="Regular;20" transparent="1" foregroundColor="#FFFFFF" />
<widget source="quality" render="Label" position="220,140" size="330,25" valign="center" font="Regular;20" transparent="1" foregroundColor="#FFFFFF" />
<widget source="signal" render="Label" position="220,180" size="330,25" valign="center" font="Regular;20" transparent="1" foregroundColor="#FFFFFF" />
<widget source="bitrate" render="Label" position="220,220" size="330,25" valign="center" font="Regular;20" transparent="1" foregroundColor="#FFFFFF" />
<widget source="enc" render="Label" position="220,260" size="330,25" valign="center" font="Regular;20" transparent="1" foregroundColor="#FFFFFF" />
<ePixmap pixmap="skin_default/div-h.png" position="0,350" zPosition="1" size="560,2" />
<widget source="IFtext" render="Label" position="10,355" size="120,21" zPosition="10" font="Regular;20" halign="left" backgroundColor="#25062748" transparent="1" />
<widget source="IF" render="Label" position="120,355" size="400,21" zPosition="10" font="Regular;20" halign="left" backgroundColor="#25062748" transparent="1" />
<widget source="Statustext" render="Label" position="10,375" size="115,21" zPosition="10" font="Regular;20" halign="left" backgroundColor="#25062748" transparent="1"/>
<widget name="statuspic" pixmaps="skin_default/buttons/button_green.png,skin_default/buttons/button_green_off.png" position="130,380" zPosition="10" size="15,16" transparent="1" alphatest="on"/>
</screen>"""
def __init__(self, session, iface):
Screen.__init__(self, session)
self.session = session
self.iface = iface
self["LabelBSSID"] = StaticText(_('Accesspoint:'))
self["LabelESSID"] = StaticText(_('SSID:'))
self["LabelQuality"] = StaticText(_('Link quality:'))
self["LabelSignal"] = StaticText(_('Signal strength:'))
self["LabelBitrate"] = StaticText(_('Bitrate:'))
self["LabelEnc"] = StaticText(_('Encryption:'))
self["BSSID"] = StaticText()
self["ESSID"] = StaticText()
self["quality"] = StaticText()
self["signal"] = StaticText()
self["bitrate"] = StaticText()
self["enc"] = StaticText()
self["IFtext"] = StaticText()
self["IF"] = StaticText()
self["Statustext"] = StaticText()
self["statuspic"] = MultiPixmap()
self["statuspic"].hide()
self["key_red"] = StaticText(_("Close"))
self.resetList()
self.updateStatusbar()
self["actions"] = NumberActionMap(["WizardActions", "InputActions", "EPGSelectActions", "ShortcutActions"],
{
"ok": self.exit,
"back": self.exit,
"red": self.exit,
}, -1)
self.timer = eTimer()
self.timer.timeout.get().append(self.resetList)
self.onShown.append(lambda: self.timer.start(8000))
self.onLayoutFinish.append(self.layoutFinished)
self.onClose.append(self.cleanup)
def cleanup(self):
iStatus.stopWlanConsole()
def layoutFinished(self):
self.setTitle(_("Wireless network state"))
def resetList(self):
iStatus.getDataForInterface(self.iface,self.getInfoCB)
def getInfoCB(self,data,status):
if data is not None:
if data is True:
if status is not None:
if status[self.iface]["essid"] == "off":
essid = _("No Connection")
else:
essid = status[self.iface]["essid"]
if status[self.iface]["accesspoint"] == "Not-Associated":
accesspoint = _("Not associated")
essid = _("No Connection")
else:
accesspoint = status[self.iface]["accesspoint"]
if self.has_key("BSSID"):
self["BSSID"].setText(accesspoint)
if self.has_key("ESSID"):
self["ESSID"].setText(essid)
quality = status[self.iface]["quality"]
if self.has_key("quality"):
self["quality"].setText(quality)
if status[self.iface]["bitrate"] == '0':
bitrate = _("Unsupported")
else:
bitrate = str(status[self.iface]["bitrate"]) + " Mb/s"
if self.has_key("bitrate"):
self["bitrate"].setText(bitrate)
signal = status[self.iface]["signal"]
if self.has_key("signal"):
self["signal"].setText(signal)
if status[self.iface]["encryption"] == "off":
if accesspoint == "Not-Associated":
encryption = _("Disabled")
else:
encryption = _("off or wpa2 on")
else:
encryption = _("Enabled")
if self.has_key("enc"):
self["enc"].setText(encryption)
self.updateStatusLink(status)
def exit(self):
self.timer.stop()
self.close(True)
def updateStatusbar(self):
wait_txt = _("Please wait...")
self["BSSID"].setText(wait_txt)
self["ESSID"].setText(wait_txt)
self["quality"].setText(wait_txt)
self["signal"].setText(wait_txt)
self["bitrate"].setText(wait_txt)
self["enc"].setText(wait_txt)
self["IFtext"].setText(_("Network:"))
self["IF"].setText(iNetwork.getFriendlyAdapterName(self.iface))
self["Statustext"].setText(_("Link:"))
def updateStatusLink(self,status):
if status is not None:
if status[self.iface]["essid"] == "off" or status[self.iface]["accesspoint"] == "Not-Associated" or status[self.iface]["accesspoi |
mbohlool/client-python | kubernetes/test/test_v1beta1_subject_access_review_spec.py | Python | apache-2.0 | 1,077 | 0.004643 | # coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.8.2
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import kubernetes.client
from kubernetes.client.rest import ApiException
from kubernetes.client.models.v1beta1_subject_access_review_spec import V1beta1SubjectAccessReviewSpec
class TestV1beta1SubjectAccessReviewSpec(unittest.TestCase):
""" V1beta1S | ubjectAccessReviewSpec unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testV1beta1SubjectAccessReviewSpec(self):
"""
Test V1beta1SubjectAccessReviewSpec
"""
# FIXME: construct object with mandatory attributes with example values
#model = kube | rnetes.client.models.v1beta1_subject_access_review_spec.V1beta1SubjectAccessReviewSpec()
pass
if __name__ == '__main__':
unittest.main()
|
megrela/flask-cms-control-panel | application/mongo_db/__init__.py | Python | mit | 98 | 0 | from | application import app
from flask.ext.pymongo import PyMongo
m | ongo = PyMongo(app, "MONGO")
|
AlexaProjects/Alexa2 | ALEXA-IDE/core/user_files/alexa_ide/addins/plugins/alexatools/windowcropregion.py | Python | gpl-3.0 | 8,071 | 0.005823 | # -*- coding: UTF-8 -*-
#
# Copyright (C) 2013 Alan Pipitone
#
# This file is part of Al'EXA-IDE.
#
# Al'EXA-IDE is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Al'EXA-IDE is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Al'EXA-IDE. If not, see <http://www.gnu.org/licenses/>.
#PYTHON
import os
import sys
#QT Toolkit
from PyQt4.QtGui import *
from PyQt4.QtCore import *
#PIL
import Image
if sys.platform == 'win32':
import win32gui
import win32con
class CropRegionClass(QWidget):
def __init__(self, caller):
QWidget.__init__(self)
self.caller = caller
self.plug_path = self.caller.plugin.path
SERVICE_NAME = "editor"
self.editor_service = self.caller.plugin.locator.get_service(SERVICE_NAME)
#set pixmap for the background
self.pixmap = QPixmap()
self.pixmap.load(self.plug_path + os.sep + 'tmp' + os.sep + 'screenshot.png')
self.OriginalScreenshot = Image.open(self.plug_path + os.sep + 'tmp' + os.sep + 'screenshot.png')
#store if mouse is pressed
self.pressed = False
self.released = False
self.printLabelBorder = False
#store mouse position
self.mouseOldX = 0
self.mouseOldY = 0
self.mouseNewX = 0
self.mouseNewY = 0
def paintEvent(self, event):
paint = QPainter()
paint.begin(self)
paint.drawPixmap(0, 0, self.pixmap)
pen = QPen()
#serve per creare un quadrato senza angoli smussati
pen.setJoinStyle(Qt.MiterJoin)
center = QPoint(QCursor.pos())
if self.caller.CropRegionX != 0 or self.caller.CropRegionY != 0 or self.caller.CropRegionW != 0 or self.caller.CropRegionH != 0:
x = self.caller.CropRegionX
y = self.caller.CropRegionY
w = self.caller.CropRegionW
h = self.caller.CropRegionH
pen.setStyle(Qt.SolidLine)
pen.setBrush(QColor(0, 78, 255, 255))
pen.setWidth(1)
paint.setPen(pen)
paint.fillRect(x, y, w, h,
QBrush(QColor(100, 80, 155, 100)))
newRect = QRect(x, y, w, h)
paint.drawRect(newRect)
#paint.restore()
self.mouseNewX = center.x()
self.mouseNewY = center.y()
if self.pressed is False:
#pen.setStyle(Qt.DashDotLine)
pen.setDashPattern([1, 1])
pen.setWidth(1)
pen.setBrush(QColor(32, 178, 170, 255))
#pen.setBrush(QColor(225, 0, 0, 255))
paint.setPen(pen)
#dal centro in alto
paint.drawLine(center.x(), center.y(), center.x(), 0)
#dal centro in basso
paint.drawLine(center.x(), center.y(), center.x(), self.height())
paint.drawLine(center.x(), center.y(), 0, center.y())
paint.drawLine(center.x(), center.y(), self.width(), center.y())
pen.setStyle(Qt.SolidLine)
pen.setWi | dth(1)
pen.setBrush(Qt.red)
pen.setCapStyle(Qt.RoundCap)
pen.setJoinStyle(Qt.RoundJoin)
else:
pen.setWidth(1)
pen.setStyle(Qt.SolidLine)
#pen.setBrush(QColor(128, 128, 128, 255))
pen.setBrush(QBrush(QColor(0, 255, 0, 255)))
paint.setPe | n(pen)
paint.fillRect(self.mouseOldX + 1,
self.mouseOldY + 1,
center.x() - self.mouseOldX - 1,
center.y() - self.mouseOldY - 1,
QBrush(QColor(32, 178, 170, 100)))
rect = QRect(self.mouseOldX, self.mouseOldY,
center.x() - self.mouseOldX, center.y() - self.mouseOldY)
paint.drawRect(rect)
self.setCursor(QCursor(Qt.CrossCursor))
#self.setCursor(QCursor(Qt.BlankCursor))
paint.end()
def mouseMoveEvent(self, event):
self.update()
#mouse press event
def mousePressEvent(self, event):
if event.buttons() == Qt.LeftButton:
#self.setCursor(QCursor(Qt.BlankCursor))
self.pressed = True
origin = QPoint(QCursor.pos())
self.mouseOldX = origin.x()
self.mouseOldY = origin.y()
self.update()
#mouse release event
def mouseReleaseEvent(self, event):
#if(event.type() == QEvent.MouseButtonRelease):
if event.button() == Qt.LeftButton:
self.pressed = False
self.released = True
width = self.mouseNewX - self.mouseOldX
height = self.mouseNewY - self.mouseOldY
rect = QRect(self.mouseOldX, self.mouseOldY, width, height)
if (rect.width() >= 3 or rect.width() <= -3) and (rect.height() >= 3 or rect.height() <= -3):
if (rect.width() < 0 and rect.height() < 0):
x = rect.x() + rect.width()
y = rect.y() + rect.height()
w = -rect.width()
h = -rect.height()
##rect = QRect(x, y, w, h)
elif (rect.width() < 0 and rect.height() > 0):
x = rect.x() + rect.width()
y = rect.y()
w = -rect.width()
h = rect.height()
##rect = QRect(x, y, w, h)
elif (rect.width() > 0 and rect.height() < 0):
x = rect.x()
y = rect.y() + rect.height()
w = rect.width()
h = -rect.height()
##rect = QRect(x, y, w, h)
else:
x = rect.x()
y = rect.y()
w = rect.width()
h = rect.height()
##rect = QRect(x, y, w, h)
if width < 0:
width = width * -1
if height < 0:
height = height * -1
#AlexaObject.Height = height
#AlexaObject.Width = width
self.caller.CropRegionH = h
self.caller.CropRegionW = w
self.caller.CropRegionX = x
self.caller.CropRegionY = y
self.caller.UpdateCropSpinBoxes()
self.update()
#self.closeExtended()
def keyPressEvent(self, event):
if event.key() == Qt.Key_Escape:
self.closeExtended2()
def closeExtended2(self):
self.caller.setVisible(True)
self.close()
def closeExtended(self):
if sys.platform == 'win32':
toplist = []
winlist = []
def enum_callback(hwnd, results):
winlist.append((hwnd, win32gui.GetWindowText(hwnd)))
win32gui.EnumWindows(enum_callback, toplist)
if self.caller.plugin.undockWindowOpened is True:
firefox = [(hwnd, title) for hwnd, title in
winlist if 'exa-ide' in title.lower() or 'exa tool' in title.lower() and 'about' not in title.lower()]
else:
firefox = [(hwnd, title) for hwnd, title in
winlist if 'exa-ide' in title.lower() and 'about' not in title.lower() and 'exa tool' not in title.lower()]
# just grab the first window that matches
#firefox = firefox[0]
for ninja in firefox:
win32gui.ShowWindow(ninja[0], win32con.SW_SHOW)
#print str(ninja[0]) + " " + ninja[1]
self.caller.setVisible(True)
self.close()
|
pymedusa/SickRage | ext/diskcache/__init__.py | Python | gpl-3.0 | 835 | 0.001198 | "DiskCache: disk and file backed cache."
from .core import Cache, Disk, UnknownFileWarning, EmptyDirWarning, Timeout
from .core import DEFAULT_SETTINGS, EVICTION_POLICY
from .fanout import FanoutCache
from .persistent import Deque, Index
__all__ = [
'Cache',
'Disk',
| 'UnknownFileWarning',
'EmptyDirWarning',
'Timeout',
'DEFAULT_SETTINGS',
'EVICTION_POLICY',
'FanoutCache',
'Deque',
'Index',
]
try:
from .djangocache import DjangoCache # pylint: disable=wrong-import-position
__all__.append('DjangoCache')
except Exception: # pylint: disable=broad-except
# Django not installed or not setup so ignore.
pass
__title__ | = 'diskcache'
__version__ = '2.9.0'
__build__ = 0x020900
__author__ = 'Grant Jenks'
__license__ = 'Apache 2.0'
__copyright__ = 'Copyright 2016 Grant Jenks'
|
terracoin/terracoin | qa/rpc-tests/txindex.py | Python | mit | 2,703 | 0.0037 | #!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test txindex generation and fetching
#
import time
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
from test_framework.script import *
from test_framework.mininode import *
import binascii
class TxIndexTest(BitcoinTestFramework):
def setup_chain(self):
print("Initializing test directory "+self.options.tmpdir)
initialize_chain_clean(self.options.tmpdir, 4)
def setup_network(self):
self.nodes = []
# Nodes 0/1 are "wallet" nodes
self.nodes.append(start_node(0, self.options.tmpdir, ["-debug"]))
self.nodes.append(start_node(1, self.options.tmpdir, ["-debug", "-txindex"]))
# Nodes 2/3 are used for testing
self.nodes.append(start_node(2, self.options.tmpdir, ["-debug", "-txindex"]))
self.nodes.append(start_node(3, self.options.tmpdir, ["-debug", "-txindex"]))
connect_nodes(self.nodes[0], 1)
connect_nodes(self.nodes[0], 2)
connect_nodes(self.nodes[0], 3)
self.is_network_split = False
self.sync_all()
def run_test(self):
print("Mining blocks...")
self.nodes[0].generate(105)
self.sync_all()
chain_height = self.nodes[1].getblockcount()
assert_equal(chain_height, 105)
print("Testing transaction index...")
privkey = "cU4zhap7nPJAWeMFu4j6jLrfPmqakDAzy8zn8Fhb3oEevdm4e5Lc"
address = "yeMpGzMj3rhtnz48XsfpB8itPHhHtgxLc3"
addressHash = binascii.unhex | lify("C5E4FB9171C22409809A3E8047A29C83886E325D")
scriptPubKey = CScript([OP_DUP, OP_HASH160, addressHash, OP_EQUALVERIFY, OP_CHECKSIG])
unspent = self.nodes[0].listunspent()
| tx = CTransaction()
amount = unspent[0]["amount"] * 100000000
tx.vin = [CTxIn(COutPoint(int(unspent[0]["txid"], 16), unspent[0]["vout"]))]
tx.vout = [CTxOut(amount, scriptPubKey)]
tx.rehash()
signed_tx = self.nodes[0].signrawtransaction(binascii.hexlify(tx.serialize()).decode("utf-8"))
txid = self.nodes[0].sendrawtransaction(signed_tx["hex"], True)
self.nodes[0].generate(1)
self.sync_all()
# Check verbose raw transaction results
verbose = self.nodes[3].getrawtransaction(unspent[0]["txid"], 1)
assert_equal(verbose["vout"][0]["valueSat"], 5000000000);
assert_equal(verbose["vout"][0]["value"], 50);
print("Passed\n")
if __name__ == '__main__':
TxIndexTest().main()
|
alucryd/django-pkgbuild | django_pkgbuild/__init__.py | Python | gpl-3.0 | 59 | 0 | default_app_c | onfig = 'django_pkgbuild.apps.Pkgbu | ildConfig'
|
w495/python-video-shot-detector | shot_detector/filters/compound/__init__.py | Python | bsd-3-clause | 201 | 0 | # -*- coding: utf8 -*-
"""
Compound | filters
"""
from __future__ import absolute_import, division, print_function
from .mole_filter import mo | le_filter
from .mole_filter import simple_mole_filter
|
karllessard/tensorflow | tensorflow/lite/testing/op_tests/equal.py | Python | apache-2.0 | 2,749 | 0.001819 | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test configs for equal."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v1 as tf
from tensorflow.lite.testing.zip_test_utils import create_tensor_data
from tensorflow.lite.testing.zip_test_utils import make_zip_of_tests
from tensorflow.lite.testing.zip_test_utils import register_make_test_function
@register_make_test_function()
def make_equal_tests(options):
"""Make a set of tests to do equal."""
test_parameters = [{
"input_dtype": [tf.float32, tf.int32, tf.int64, tf.string],
"input_shape_pair": [([], []), ([1, 1, 1, 3], [1, 1, 1, 3]),
([2, 3, 4, 5], [2, 3, 4, 5]), ([2, 3, 3], [2, 3]),
([5, 5], [1]), ([10], [2, 4, 10])],
"fully_quantize": [False],
}, {
"input_dtype": [tf.float32],
"input_shape_pair": [([1, 1, 1, 3], [1, 1, 1, 3]), ([2, 3, 3], [2, 3])],
"fully_quantize": [True],
}]
def build_graph(parameters):
"""Build t | he equal op testing graph."""
input_value1 = tf.compat.v1.placeholder(
dtype=parameters["input_dtype"],
name="input1",
shape=parameters["input_shape_pair"][0])
input_value2 = tf.compat.v1.placeholder(
dtype=parame | ters["input_dtype"],
name="input2",
shape=parameters["input_shape_pair"][1])
out = tf.equal(input_value1, input_value2)
return [input_value1, input_value2], [out]
def build_inputs(parameters, sess, inputs, outputs):
input_value1 = create_tensor_data(parameters["input_dtype"],
parameters["input_shape_pair"][0])
input_value2 = create_tensor_data(parameters["input_dtype"],
parameters["input_shape_pair"][1])
return [input_value1, input_value2], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_value1, input_value2])))
make_zip_of_tests(
options,
test_parameters,
build_graph,
build_inputs,
expected_tf_failures=5)
|
netjunki/trac-Pygit2 | trac/web/main.py | Python | bsd-3-clause | 29,627 | 0.001586 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2005-2009 Edgewall Software
# Copyright (C) 2005-2007 Christopher Lenz <cmlenz@gmx.de>
# Copyright (C) 2005 Matthew Good <trac@matt-good.net>
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/log/.
#
# Author: Christopher Lenz <cmlenz@gmx.de>
# Matthew Good <trac@matt-good.net>
import cgi
import dircache
import fnmatch
from functools import partial
import gc
import locale
import os
import pkg_resources
from pprint import pformat, pprint
import re
import sys
from genshi.builder import Fragment, tag
from genshi.output import DocType
from genshi.template import TemplateLoader
from trac import __version__ as TRAC_VERSION
from trac.config import BoolOption, ExtensionOption, Option, \
OrderedExtensionsOption
from trac.core import *
from trac.env import open_environment
from trac.loader import get_plugin_info, match_plugins_to_frames
from trac.perm import PermissionCache, PermissionError
from trac.resource import ResourceNotFound
from trac.util import arity, get_frame_info, get_last_traceback, hex_entropy, \
read_file, safe_repr, translation
from trac.util.concurrency import threading
from trac.util.datefmt import format_datetime, localtz, timezone, user_time
from trac.util.text import exception_to_unicode, shorten_line, to_unicode
from trac.util.translation import _, get_negotiated_locale, has_babel, \
safefmt, tag_
from trac.web.api import *
from trac.web.chrome import Chrome
from trac.w | eb.href import Href
from trac.web.session import Session
#: This U | RL is used for semi-automatic bug reports (see
#: `send_internal_error`). Please modify it to point to your own
#: Trac instance if you distribute a patched version of Trac.
default_tracker = 'http://trac.edgewall.org'
class FakeSession(dict):
sid = None
def save(self):
pass
class FakePerm(dict):
def require(self, *args):
return False
def __call__(self, *args):
return self
class RequestDispatcher(Component):
"""Web request dispatcher.
This component dispatches incoming requests to registered
handlers. Besides, it also takes care of user authentication and
request pre- and post-processing.
"""
required = True
authenticators = ExtensionPoint(IAuthenticator)
handlers = ExtensionPoint(IRequestHandler)
filters = OrderedExtensionsOption('trac', 'request_filters',
IRequestFilter,
doc="""Ordered list of filters to apply to all requests
(''since 0.10'').""")
default_handler = ExtensionOption('trac', 'default_handler',
IRequestHandler, 'WikiModule',
"""Name of the component that handles requests to the base
URL.
Options include `TimelineModule`, `RoadmapModule`,
`BrowserModule`, `QueryModule`, `ReportModule`, `TicketModule`
and `WikiModule`. The default is `WikiModule`. (''since 0.9'')""")
default_timezone = Option('trac', 'default_timezone', '',
"""The default timezone to use""")
default_language = Option('trac', 'default_language', '',
"""The preferred language to use if no user preference has
been set. (''since 0.12.1'')
""")
default_date_format = Option('trac', 'default_date_format', '',
"""The date format. Valid options are 'iso8601' for selecting
ISO 8601 format, or leave it empty which means the default
date format will be inferred from the browser's default
language. (''since 1.0'')
""")
use_xsendfile = BoolOption('trac', 'use_xsendfile', 'false',
"""When true, send a `X-Sendfile` header and no content when sending
files from the filesystem, so that the web server handles the content.
This requires a web server that knows how to handle such a header,
like Apache with `mod_xsendfile` or lighttpd. (''since 1.0'')
""")
# Public API
def authenticate(self, req):
for authenticator in self.authenticators:
authname = authenticator.authenticate(req)
if authname:
return authname
else:
return 'anonymous'
def dispatch(self, req):
"""Find a registered handler that matches the request and let
it process it.
In addition, this method initializes the data dictionary
passed to the the template and adds the web site chrome.
"""
self.log.debug('Dispatching %r', req)
chrome = Chrome(self.env)
# Setup request callbacks for lazily-evaluated properties
req.callbacks.update({
'authname': self.authenticate,
'chrome': chrome.prepare_request,
'perm': self._get_perm,
'session': self._get_session,
'locale': self._get_locale,
'lc_time': self._get_lc_time,
'tz': self._get_timezone,
'form_token': self._get_form_token,
'use_xsendfile': self._get_use_xsendfile,
})
try:
try:
# Select the component that should handle the request
chosen_handler = None
try:
for handler in self.handlers:
if handler.match_request(req):
chosen_handler = handler
break
if not chosen_handler:
if not req.path_info or req.path_info == '/':
chosen_handler = self.default_handler
# pre-process any incoming request, whether a handler
# was found or not
chosen_handler = self._pre_process_request(req,
chosen_handler)
except TracError, e:
raise HTTPInternalError(e)
if not chosen_handler:
if req.path_info.endswith('/'):
# Strip trailing / and redirect
target = req.path_info.rstrip('/').encode('utf-8')
if req.query_string:
target += '?' + req.query_string
req.redirect(req.href + target, permanent=True)
raise HTTPNotFound('No handler matched request to %s',
req.path_info)
req.callbacks['chrome'] = partial(chrome.prepare_request,
handler=chosen_handler)
# Protect against CSRF attacks: we validate the form token
# for all POST requests with a content-type corresponding
# to form submissions
if req.method == 'POST':
ctype = req.get_header('Content-Type')
if ctype:
ctype, options = cgi.parse_header(ctype)
if ctype in ('application/x-www-form-urlencoded',
'multipart/form-data') and \
req.args.get('__FORM_TOKEN') != req.form_token:
if self.env.secure_cookies and req.scheme == 'http':
msg = _('Secure cookies are enabled, you must '
'use https to submit forms.')
else:
msg = _('Do you have cookies enabled?')
raise HTTPBadRequest(_('Missing or invalid form token.'
' %(msg)s', msg=msg))
# Process the request and render the template
|
ppecio/py-html-diff | pyhtmldiff/utils.py | Python | apache-2.0 | 518 | 0 | # -*- coding: utf-8 -*-
"" | "Created on 23.06.17
.. moduleauthor:: Paweł Pecio
"""
def longzip(a, b):
"""Like `izip` but yields `None` for missing items."""
aiter = iter(a)
biter = iter(b)
try:
for | item1 in aiter:
yield item1, next(biter)
except StopIteration:
for item1 in aiter:
yield item1, None
else:
for item2 in biter:
yield None, item2
def irepeat(a, b):
biter = iter(b)
for item in biter:
yield a, item
|
joaovitorsilvestre/MongographQL | graphene_mongodb/mutation/__init__.py | Python | mit | 2,026 | 0.003949 | import graphene
from graphene.utils.str_converters import to_snake_case
from graphene_mongodb.query import mongo_to_graphene
def gen_mutation(model, graphene_schema, operators_mutation, fields_mutation, mutate_func, validator):
""" We need to create a class that seems as follows (http://docs.graphene-python.org/en/latest/types/mutations/):
class CreatePerson(graphene.Mutation):
class Input:
name = graphene.String()
ok = graphene.Boolean()
person = graphene.Field(lambda: Person)
@staticmethod
def mutate(root, args, context, info):
person = Person(name=args.get('name'))
ok = True
return CreatePerson(person=person, ok=ok)
"""
def user_mutate(root, info, **kwargs):
if validator:
validator(model, kwargs, {}, {})
obj = mutate_func(kwargs, info.context)
if not isinstance(obj, model):
| raise TypeError('Failed to resolve mutation of the schema {}'
' because mutate function must return a instance of {}, and the return type was {}.'
.f | ormat(graphene_schema.__name__, model.__name__, type(obj)))
graphene_obj = mongo_to_graphene(obj, graphene_schema, fields_mutation)
return Create(**{to_snake_case(model.__name__): graphene_obj})
def generic_mutate(root, info, **kwargs):
if validator:
validator(model, kwargs, {}, {})
obj = model(**kwargs)
obj.save()
graphene_obj = mongo_to_graphene(obj, graphene_schema, fields_mutation)
return Create(**{to_snake_case(model.__name__): graphene_obj})
Create = type('Create' + model.__name__, (graphene.Mutation,), {
'Arguments': type('Arguments', (), operators_mutation),
to_snake_case(model.__name__): graphene.Field(lambda: graphene_schema),
'mutate': staticmethod(generic_mutate) if not mutate_func else staticmethod(user_mutate)
})
return Create
|
ray-project/ray | python/ray/tests/test_placement_group_mini_integration.py | Python | apache-2.0 | 4,294 | 0.000466 | import pytest
import sys
import time
from random import random
try:
import pytest_timeout
except Imp | ortError:
pytest_timeout = None
import ray
import ray.cluster_utils
from ray._private.test_utils import wait_for_condition
from ray.util.placement_group import placement_group, remove_placement_group
def run_mini_integration_test(cluster, pg_removal=True, num_pgs=999):
# This test checks the race condition between remove / creation.
# This test shouldn't be flaky. If it fails on the last ray.get
# that highly likely indicate | s a real bug.
# It also runs 3 times to make sure the test consistently passes.
# When 999 resource quantity is used, it fails about every other time
# when the test was written.
resource_quantity = num_pgs
num_nodes = 5
custom_resources = {"pg_custom": resource_quantity}
# Create pg that uses 1 resource of cpu & custom resource.
num_pg = resource_quantity
# TODO(sang): Cluster setup. Remove when running in real clusters.
nodes = []
for _ in range(num_nodes):
nodes.append(
cluster.add_node(
num_cpus=3, num_gpus=resource_quantity, resources=custom_resources
)
)
cluster.wait_for_nodes()
num_nodes = len(nodes)
ray.init(address=cluster.address)
while not ray.is_initialized():
time.sleep(0.1)
bundles = [{"GPU": 1, "pg_custom": 1}] * num_nodes
@ray.remote(num_cpus=0, num_gpus=1, max_calls=0)
def mock_task():
time.sleep(0.1)
return True
@ray.remote(num_cpus=0)
def pg_launcher(num_pgs_to_create):
print("Creating pgs")
pgs = []
for i in range(num_pgs_to_create):
pgs.append(placement_group(bundles, strategy="STRICT_SPREAD"))
pgs_removed = []
pgs_unremoved = []
# Randomly choose placement groups to remove.
if pg_removal:
print("removing pgs")
for pg in pgs:
if random() < 0.5 and pg_removal:
pgs_removed.append(pg)
else:
pgs_unremoved.append(pg)
print(len(pgs_unremoved))
tasks = []
# Randomly schedule tasks or actors on placement groups that
# are not removed.
for pg in pgs_unremoved:
for i in range(num_nodes):
tasks.append(
mock_task.options(
placement_group=pg, placement_group_bundle_index=i
).remote()
)
# Remove the rest of placement groups.
if pg_removal:
for pg in pgs_removed:
remove_placement_group(pg)
ray.get(tasks)
# Since placement groups are scheduled, remove them.
for pg in pgs_unremoved:
remove_placement_group(pg)
pg_launchers = []
for _ in range(3):
pg_launchers.append(pg_launcher.remote(num_pg // 3))
ray.get(pg_launchers, timeout=240)
ray.shutdown()
ray.init(address=cluster.address)
cluster_resources = ray.cluster_resources()
cluster_resources.pop("memory")
cluster_resources.pop("object_store_memory")
def wait_for_resource_recovered():
for resource, val in ray.available_resources().items():
if resource in cluster_resources and cluster_resources[resource] != val:
return False
if "_group_" in resource:
return False
return True
wait_for_condition(wait_for_resource_recovered)
@pytest.mark.parametrize("execution_number", range(1))
def test_placement_group_create_only(ray_start_cluster, execution_number):
"""PG mini integration test without remove_placement_group
When there are failures, this will help identifying if issues are
from removal or not.
"""
run_mini_integration_test(ray_start_cluster, pg_removal=False, num_pgs=333)
@pytest.mark.parametrize("execution_number", range(3))
def test_placement_group_remove_stress(ray_start_cluster, execution_number):
"""Full PG mini integration test that runs many
concurrent remove_placement_group
"""
run_mini_integration_test(ray_start_cluster, pg_removal=True, num_pgs=999)
if __name__ == "__main__":
sys.exit(pytest.main(["-sv", __file__]))
|
ericrrichards/rpgEngine | RpgEngine/RpgEngine/Scripts/Main.py | Python | mit | 1,809 | 0.012175 | import math
print "loaded script"
#LoadScript("Map.py")
LoadScript("Entity.py")
LoadScript("StateMachine.py")
LoadScript("WaitState.py")
LoadScript("Util.py")
LoadScript("Actions.py")
LoadScript("Trigger.py")
gTiledMap = TileMap.LoadMap("small_room.json")
gMap = Map(gTiledMap)
gMap.GotoTile(5,5)
class Character:
def __init__(self, entity):
self.AnimUp = List[int]([0,1,2,3])
self.AnimRight = List[int]([4,5,6,7])
self.AnimDown = List[int]([8,9,10,11])
self.AnimLeft = List[int]([12,13,14,15])
se | lf.Entity = entity
self.Controller = StateMachine({
"wait": lambda: self.WaitState,
"move": lambda: self.MoveState
})
self.WaitState = WaitState(self, gMap)
self.MoveState = MoveState(self, gMap)
self.Controller.Change("wait", None)
heroDef = EntityDef("walk_cycle.png", 16, 24, 8, 11,3, 0)
gHero = Character(Entity(heroDef))
gUpDoorTeleport = Actions.Teleport(gMap, 11, 3)
g | DownDoorTeleport = Actions.Teleport(gMap, 10, 11)
gDownDoorTeleport(None, gHero.Entity)
gTriggerTop = Trigger(gDownDoorTeleport, None, None)
gTriggerBottom = Trigger(gUpDoorTeleport, None, None)
gMap.AddTrigger(10, 12, gTriggerBottom)
gMap.AddTrigger(11, 2, gTriggerTop)
def Update():
dt = GetDeltaTime()
playerPos = gHero.Entity.Sprite.Position
gMap.CamX = int(math.floor(playerPos.X))
gMap.CamY = int(math.floor(playerPos.Y))
Renderer.Translate(-gMap.CamX, -gMap.CamY)
layerCount = gMap.LayerCount
for i in range(0, layerCount):
gMap.RenderLayer(Renderer, i)
if i == gHero.Entity.Layer:
Renderer.DrawSprite(gHero.Entity.Sprite)
gHero.Controller.Update(dt)
if IsKeyDown(Keys.Space):
gUpDoorTeleport(None, gHero.Entity)
|
blablacar/exabgp | lib/exabgp/dep/objgraph.py | Python | bsd-3-clause | 31,092 | 0.000032 | """
Tools for drawing Python object reference graphs with graphviz.
You can find documentation online at http://mg.pov.lt/objgraph/
Copyright (c) 2008-2015 Marius Gedminas <marius@pov.lt> and contributors
Released under the MIT licence.
"""
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so | , subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AU | THORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
import codecs
import gc
import re
import inspect
import types
import operator
import os
import subprocess
import tempfile
import sys
import itertools
try:
from types import InstanceType
except ImportError:
# Python 3.x compatibility
InstanceType = None
__author__ = "Marius Gedminas (marius@gedmin.as)"
__copyright__ = "Copyright (c) 2008-2015 Marius Gedminas and contributors"
__license__ = "MIT"
__version__ = "2.0.1"
__date__ = "2015-07-28"
try:
basestring
except NameError:
# Python 3.x compatibility
basestring = str
try:
iteritems = dict.iteritems
except AttributeError:
# Python 3.x compatibility
iteritems = dict.items
def count(typename, objects=None):
"""Count objects tracked by the garbage collector with a given class name.
Example:
>>> count('dict')
42
>>> count('MyClass', get_leaking_objects())
3
>>> count('mymodule.MyClass')
2
Note that the GC does not track simple objects like int or str.
.. versionchanged:: 1.7
New parameter: ``objects``.
.. versionchanged:: 1.8
Accepts fully-qualified type names (i.e. 'package.module.ClassName')
as well as short type names (i.e. 'ClassName').
"""
if objects is None:
objects = gc.get_objects()
try:
if '.' in typename:
return sum(1 for o in objects if _long_typename(o) == typename)
else:
return sum(1 for o in objects if _short_typename(o) == typename)
finally:
del objects # clear cyclic references to frame
def typestats(objects=None, shortnames=True):
"""Count the number of instances for each type tracked by the GC.
Note that the GC does not track simple objects like int or str.
Note that classes with the same name but defined in different modules
will be lumped together if ``shortnames`` is True.
Example:
>>> typestats()
{'list': 12041, 'tuple': 10245, ...}
>>> typestats(get_leaking_objects())
{'MemoryError': 1, 'tuple': 2795, 'RuntimeError': 1, 'list': 47, ...}
.. versionadded:: 1.1
.. versionchanged:: 1.7
New parameter: ``objects``.
.. versionchanged:: 1.8
New parameter: ``shortnames``.
"""
if objects is None:
objects = gc.get_objects()
try:
if shortnames:
typename = _short_typename
else:
typename = _long_typename
stats = {}
for o in objects:
n = typename(o)
stats[n] = stats.get(n, 0) + 1
return stats
finally:
del objects # clear cyclic references to frame
def most_common_types(limit=10, objects=None, shortnames=True):
"""Count the names of types with the most instances.
Returns a list of (type_name, count), sorted most-frequent-first.
Limits the return value to at most ``limit`` items. You may set ``limit``
to None to avoid that.
The caveats documented in :func:`typestats` apply.
Example:
>>> most_common_types(limit=2)
[('list', 12041), ('tuple', 10245)]
.. versionadded:: 1.4
.. versionchanged:: 1.7
New parameter: ``objects``.
.. versionchanged:: 1.8
New parameter: ``shortnames``.
"""
stats = sorted(typestats(objects, shortnames=shortnames).items(),
key=operator.itemgetter(1), reverse=True)
if limit:
stats = stats[:limit]
return stats
def show_most_common_types(limit=10, objects=None, shortnames=True):
"""Print the table of types of most common instances.
The caveats documented in :func:`typestats` apply.
Example:
>>> show_most_common_types(limit=5)
tuple 8959
function 2442
wrapper_descriptor 1048
dict 953
builtin_function_or_method 800
.. versionadded:: 1.1
.. versionchanged:: 1.7
New parameter: ``objects``.
.. versionchanged:: 1.8
New parameter: ``shortnames``.
"""
stats = most_common_types(limit, objects, shortnames=shortnames)
width = max(len(name) for name, count in stats)
for name, count in stats:
print('%-*s %i' % (width, name, count))
def show_growth(limit=10, peak_stats={}, shortnames=True):
"""Show the increase in peak object counts since last call.
Limits the output to ``limit`` largest deltas. You may set ``limit`` to
None to see all of them.
Uses and updates ``peak_stats``, a dictionary from type names to previously
seen peak object counts. Usually you don't need to pay attention to this
argument.
The caveats documented in :func:`typestats` apply.
Example:
>>> show_growth()
wrapper_descriptor 970 +14
tuple 12282 +10
dict 1922 +7
...
.. versionadded:: 1.5
.. versionchanged:: 1.8
New parameter: ``shortnames``.
"""
gc.collect()
stats = typestats(shortnames=shortnames)
deltas = {}
for name, count in iteritems(stats):
old_count = peak_stats.get(name, 0)
if count > old_count:
deltas[name] = count - old_count
peak_stats[name] = count
deltas = sorted(deltas.items(), key=operator.itemgetter(1),
reverse=True)
if limit:
deltas = deltas[:limit]
if deltas:
width = max(len(name) for name, count in deltas)
for name, delta in deltas:
print('%-*s%9d %+9d' % (width, name, stats[name], delta))
def get_leaking_objects(objects=None):
"""Return objects that do not have any referents.
These could indicate reference-counting bugs in C code. Or they could
be legitimate.
Note that the GC does not track simple objects like int or str.
.. versionadded:: 1.7
"""
if objects is None:
gc.collect()
objects = gc.get_objects()
try:
ids = set(id(i) for i in objects)
for i in objects:
ids.difference_update(id(j) for j in gc.get_referents(i))
# this then is our set of objects without referrers
return [i for i in objects if id(i) in ids]
finally:
del objects, i # clear cyclic references to frame
def by_type(typename, objects=None):
"""Return objects tracked by the garbage collector with a given class name.
Example:
>>> by_type('MyClass')
[<mymodule.MyClass object at 0x...>]
Note that the GC does not track simple objects like int or str.
.. versionchanged:: 1.7
New parameter: ``objects``.
.. versionchanged:: 1.8
Accepts fully-qualified type names (i.e. 'package.module.ClassName')
as well as short type names (i.e. 'ClassName').
"""
if objects is None:
|
dongguangming/requests-oauthlib | docs/conf.py | Python | isc | 8,489 | 0.007186 | # -*- coding: utf-8 -*-
#
# Requests-OAuthlib documentation build configuration file, created by
# sphinx-quickstart on Fri May 10 11:49:01 2013.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('..'))
from requests_oauthlib import __version__
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.intersphinx']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Requests-OAuthlib'
copyright = u'2014, Kenneth Reitz'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = __version__
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If tru | e, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and modul | eauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Requests-OAuthlibdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'Requests-OAuthlib.tex', u'Requests-OAuthlib Documentation',
u'Requests-OAuthlib Contributors', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'requests-oauthlib', u'Requests-OAuthlib Documentation',
[u'Requests-OAuthlib Contributors'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'Requests-OAuthlib', u'Requests-OAuthlib Documentation',
u'Requests-OAuthlib Contributors', 'Requests-OAuthlib', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# |
modulexcite/wal-e | wal_e/tar_partition.py | Python | bsd-3-clause | 19,584 | 0.000306 | #!/usr/bin/env python
"""
Converting a file tree into partitioned, space-contr | olled TAR files | .
This module attempts to address the following problems:
* Storing individual small files can be very time consuming because of
per-file overhead.
* It is desirable to maintain UNIX metadata on a file, and that's not
always possible without boxing the file in another format, such as
TAR.
* Because multiple connections can allow for better throughput,
partitioned TAR files can be parallelized for download while being
pipelined for extraction and decompression, all to the same base
tree.
* Ensuring that partitions are of a predictable size: the size to be
added is bounded, as sizes must be passed up-front. It is assumed
that if the dataset is "hot" that supplementary write-ahead-logs
should exist to bring the data to a consistent state.
* Representation of empty directories and symbolic links.
* Avoiding volumes with "too many" individual members to avoid
consuming too much memory with metadata.
The *approximate* maximum size of a volume is tunable. If any archive
members are too large, a TarMemberTooBig exception is raised: in this
case, it is necessary to raise the partition size. The volume size
does *not* include Tar metadata overhead, and this is why one cannot
rely on an exact maximum (without More Programming).
Why not GNU Tar with its multi-volume functionality: it's relatively
difficult to limit the size of an archive member (a problem for fast
growing files that are also being WAL-logged), and GNU Tar uses
interactive prompts to ask for the right tar file to continue the next
extraction. This coupling between tarfiles makes the extraction
process considerably more complicated.
"""
import collections
import errno
import os
import tarfile
from wal_e import log_help
from wal_e import copyfileobj
from wal_e import pipebuf
from wal_e import pipeline
from wal_e.exception import UserException
logger = log_help.WalELogger(__name__)
PG_CONF = ('postgresql.conf',
'pg_hba.conf',
'recovery.conf',
'pg_ident.conf')
class StreamPadFileObj(object):
"""
Layer on a file to provide a precise stream byte length
This file-like-object accepts an underlying file-like-object and a
target size. Once the target size is reached, no more bytes will
be returned. Furthermore, if the underlying stream runs out of
bytes, '\0' will be returned until the target size is reached.
"""
# Try to save space via __slots__ optimization: many of these can
# be created on systems with many small files that are packed into
# a tar partition, and memory blows up when instantiating the
# tarfile instance full of these.
__slots__ = ('underlying_fp', 'target_size', 'pos')
def __init__(self, underlying_fp, target_size):
self.underlying_fp = underlying_fp
self.target_size = target_size
self.pos = 0
def read(self, size):
max_readable = min(self.target_size - self.pos, size)
ret = self.underlying_fp.read(max_readable)
lenret = len(ret)
self.pos += lenret
return ret + '\0' * (max_readable - lenret)
def close(self):
return self.underlying_fp.close()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
return False
class TarMemberTooBigError(UserException):
def __init__(self, member_name, limited_to, requested, *args, **kwargs):
self.member_name = member_name
self.max_size = limited_to
self.requested = requested
msg = 'Attempted to archive a file that is too large.'
hint = ('There is a file in the postgres database directory that '
'is larger than %d bytes. If no such file exists, please '
'report this as a bug. In particular, check %s, which appears '
'to be %d bytes.') % (limited_to, member_name, requested)
UserException.__init__(self, msg=msg, hint=hint, *args, **kwargs)
class TarBadRootError(Exception):
def __init__(self, root, *args, **kwargs):
self.root = root
Exception.__init__(self, *args, **kwargs)
class TarBadPathError(Exception):
"""
Raised when a root directory does not contain all file paths.
"""
def __init__(self, root, offensive_path, *args, **kwargs):
self.root = root
self.offensive_path = offensive_path
Exception.__init__(self, *args, **kwargs)
ExtendedTarInfo = collections.namedtuple('ExtendedTarInfo',
'submitted_path tarinfo')
# 1.5 GiB is 1610612736 bytes, and Postgres allocates 1 GiB files as a
# nominal maximum. This must be greater than that.
PARTITION_MAX_SZ = 1610612736
# Maximum number of members in a TarPartition segment.
#
# This is to restrain memory consumption when segmenting the
# partitions. Some workloads can produce many tiny files, so it's
# important to try to choose some happy medium between avoiding
# excessive bloat in the number of partitions and making the wal-e
# process effectively un-fork()-able for performing any useful work.
#
# 262144 is 256 KiB.
PARTITION_MAX_MEMBERS = int(PARTITION_MAX_SZ / 262144)
def _fsync_files(filenames):
"""Call fsync() a list of file names
The filenames should be absolute paths already.
"""
touched_directories = set()
mode = os.O_RDONLY
# Windows
if hasattr(os, 'O_BINARY'):
mode |= os.O_BINARY
for filename in filenames:
fd = os.open(filename, mode)
os.fsync(fd)
os.close(fd)
touched_directories.add(os.path.dirname(filename))
# Some OSes also require us to fsync the directory where we've
# created files or subdirectories.
if hasattr(os, 'O_DIRECTORY'):
for dirname in touched_directories:
fd = os.open(dirname, os.O_RDONLY | os.O_DIRECTORY)
os.fsync(fd)
os.close(fd)
def cat_extract(tar, member, targetpath):
"""Extract a regular file member using cat for async-like I/O
Mostly adapted from tarfile.py.
"""
assert member.isreg()
# Fetch the TarInfo object for the given name and build the
# destination pathname, replacing forward slashes to platform
# specific separators.
targetpath = targetpath.rstrip("/")
targetpath = targetpath.replace("/", os.sep)
# Create all upper directories.
upperdirs = os.path.dirname(targetpath)
if upperdirs and not os.path.exists(upperdirs):
try:
# Create directories that are not part of the archive with
# default permissions.
os.makedirs(upperdirs)
except EnvironmentError as e:
if e.errno == errno.EEXIST:
# Ignore an error caused by the race of
# the directory being created between the
# check for the path and the creation.
pass
else:
raise
with open(targetpath, 'wb') as dest:
with pipeline.get_cat_pipeline(pipeline.PIPE, dest) as pl:
fp = tar.extractfile(member)
copyfileobj.copyfileobj(fp, pl.stdin)
tar.chown(member, targetpath)
tar.chmod(member, targetpath)
tar.utime(member, targetpath)
class TarPartition(list):
def __init__(self, name, *args, **kwargs):
self.name = name
list.__init__(self, *args, **kwargs)
@staticmethod
def _padded_tar_add(tar, et_info):
try:
with open(et_info.submitted_path, 'rb') as raw_file:
with StreamPadFileObj(raw_file,
et_info.tarinfo.size) as f:
tar.addfile(et_info.tarinfo, f)
except EnvironmentError, e:
if (e.errno == errno.ENOENT and
e.filename == et_info.submitted_path):
# log a NOTICE/INFO that the file was unlinked.
# Ostensibly harmless (such unlinks should be replayed
# in the WAL) but good to know.
logger.debug(
msg='tar member a |
muxiaobai/CourseExercises | python/kaggle/data-visual/plot%26seaborn.py | Python | gpl-2.0 | 2,005 | 0.011837 |
# coding: utf-8
# In[1]:
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
from pandas import Series,DataFrame
import seaborn as sns
# In[2]:
#https://www.kaggle.com/residentmario/bivariate-plotting-with-pandas/data
reviews = pd.read_csv("winemag-data_first150k.csv", index_col=0)
reviews.head()
# ### sns.countplot() sns.kdeplot() 核密度估计 sns.jointplot() sns.boxplot() sns.violinplot()
# In[4]:
sns.countplot(reviews['points'])
#reviews['points'].value_counts().sort_index().plot.bar()
plt.show()
# In[5]:
sns.kdeplot(reviews.query('price < 200').price)
#reviews[reviews['price'] < 200]['price'].value_counts().sort_index().plot.line()
plt.show()
# In[6]:
# 出现锯齿状
reviews[reviews['price'] < 200]['price'].value_counts().sort_index().plot.line()
plt.show()
# In[7]:
#两个类别的关系
sns.kdeplot(reviews[reviews['price'] < 200].loc[:, ['price', 'points']].dropna().sample(5000))
plt.show()
# In[8]:
sns.distplot(reviews['points'], bins=10, kde=False)
#reviews[reviews['price'] < 200]['price'].plot.hist() 对应直方图
plt.show()
# ### jointplot 对应 kind=scatter/reg/hex/kde
# In[12]:
sns.jointplot(x='price', y='points', data=reviews[reviews['price'] < 100])
plt.show()
# In[10]:
sns.jointplot(x='price', y='points', data=reviews[reviews['price'] < 100], kind='hex',
gridsize=20)
plt.show()
# In[15]:
sns.jointplot(x='price', y='points', data=reviews[reviews['price'] < 100], kind='reg')
plt.show()
# In[16]:
sns.jointplot(x='price', y='points', data=reviews[reviews['price'] < 100], kind='kde',
gridsize=20)
plt.show()
# In[19]:
df = reviews[reviews.variety.isin(reviews.variety.value_counts().head(5).index)] |
sns.boxplot(x='variety', y='points', data=df)
plt.show()
# #### Red Blend 比Chardonnay variety得分更高一点
# In[20]:
sns.violinplot( x='varie | ty',y='points',data=reviews[reviews.variety.isin(reviews.variety.value_counts()[:5].index)])
plt.show()
|
HiSPARC/station-software | user/python/Lib/test/test_dis.py | Python | gpl-3.0 | 4,663 | 0.00193 | # Minimal tests for dis module
from test.test_support import run_unittest
import unittest
import sys
import dis
import StringIO
def _f(a):
print a
return 1
dis_f = """\
%3d 0 LOAD_FAST 0 (a)
3 PRINT_ITEM
4 PRINT_NEWLINE
%3d 5 LOAD_CONST 1 (1)
8 RETURN_VALUE
"""%(_f.func_code.co_firstlineno + 1,
_f.func_code.co_firstlineno + 2)
def bug708901():
for res in range(1,
10):
pass
dis_bug708901 = """\
%3d 0 SETUP_LOOP 23 (to 26)
3 LOAD_GLOBAL 0 (range)
6 LOAD_CONST 1 (1)
%3d 9 LOAD_CONST 2 (10)
12 CALL_FUNCTION 2
15 GET_ITER
>> 16 FOR_ITER 6 (to 25)
19 STORE_FAST 0 (res)
%3d 22 JUMP_ABSOLUTE 16
>> 25 POP_BLOCK
>> 26 LOAD_CONST 0 (None)
29 RETURN_VALUE
"""%(bug708901.func_code.co_firstlineno + 1,
bug708901.func_code.co_firstlineno + 2,
bug708901.fun | c_code.co_firstlineno + 3)
def bug1333982(x=[]):
assert 0, ([s for s in x] +
1)
pass
dis_bug1333982 = """\
%3d 0 LOAD_CONST 1 (0)
3 POP_JUMP_IF_TRUE 41
6 LOAD_GLOBAL 0 (AssertionError)
9 BUILD_LIST 0
12 LOAD_FAST 0 (x)
15 GET_ITER
>> 16 FOR_ITER 12 (to 31)
19 STORE_FAST 1 (s)
2 | 2 LOAD_FAST 1 (s)
25 LIST_APPEND 2
28 JUMP_ABSOLUTE 16
%3d >> 31 LOAD_CONST 2 (1)
34 BINARY_ADD
35 CALL_FUNCTION 1
38 RAISE_VARARGS 1
%3d >> 41 LOAD_CONST 0 (None)
44 RETURN_VALUE
"""%(bug1333982.func_code.co_firstlineno + 1,
bug1333982.func_code.co_firstlineno + 2,
bug1333982.func_code.co_firstlineno + 3)
_BIG_LINENO_FORMAT = """\
%3d 0 LOAD_GLOBAL 0 (spam)
3 POP_TOP
4 LOAD_CONST 0 (None)
7 RETURN_VALUE
"""
class DisTests(unittest.TestCase):
def do_disassembly_test(self, func, expected):
s = StringIO.StringIO()
save_stdout = sys.stdout
sys.stdout = s
dis.dis(func)
sys.stdout = save_stdout
got = s.getvalue()
# Trim trailing blanks (if any).
lines = got.split('\n')
lines = [line.rstrip() for line in lines]
expected = expected.split("\n")
import difflib
if expected != lines:
self.fail(
"events did not match expectation:\n" +
"\n".join(difflib.ndiff(expected,
lines)))
def test_opmap(self):
self.assertEqual(dis.opmap["STOP_CODE"], 0)
self.assertIn(dis.opmap["LOAD_CONST"], dis.hasconst)
self.assertIn(dis.opmap["STORE_NAME"], dis.hasname)
def test_opname(self):
self.assertEqual(dis.opname[dis.opmap["LOAD_FAST"]], "LOAD_FAST")
def test_boundaries(self):
self.assertEqual(dis.opmap["EXTENDED_ARG"], dis.EXTENDED_ARG)
self.assertEqual(dis.opmap["STORE_NAME"], dis.HAVE_ARGUMENT)
def test_dis(self):
self.do_disassembly_test(_f, dis_f)
def test_bug_708901(self):
self.do_disassembly_test(bug708901, dis_bug708901)
def test_bug_1333982(self):
# This one is checking bytecodes generated for an `assert` statement,
# so fails if the tests are run with -O. Skip this test then.
if __debug__:
self.do_disassembly_test(bug1333982, dis_bug1333982)
else:
self.skipTest('need asserts, run without -O')
def test_big_linenos(self):
def func(count):
namespace = {}
func = "def foo():\n " + "".join(["\n "] * count + ["spam\n"])
exec func in namespace
return namespace['foo']
# Test all small ranges
for i in xrange(1, 300):
expected = _BIG_LINENO_FORMAT % (i + 2)
self.do_disassembly_test(func(i), expected)
# Test some larger ranges too
for i in xrange(300, 5000, 10):
expected = _BIG_LINENO_FORMAT % (i + 2)
self.do_disassembly_test(func(i), expected)
def test_main():
run_unittest(DisTests)
if __name__ == "__main__":
test_main()
|
aaronsw/watchdog | vendor/rdflib-2.4.0/rdflib/plugin.py | Python | agpl-3.0 | 3,689 | 0.00244 | from rdflib.store import Store
from rdflib.syntax import serializer, serializers
from rdflib.syntax import parsers
from rdflib import sparql
from rdflib.QueryResult import QueryResult
_kinds = {}
_adaptors = {}
def register(name, kind, module_path, class_name):
_module_info = _kinds.get(kind, None)
if _module_info is None:
_module_info = _kinds[kind] = {}
_module_info[name] = (module_path, class_name)
def get(name, kind):
_module_info = _kinds.get(kind)
if _module_info and name in _module_info:
module_path, class_name = _module_info[name]
module = __import__(module_path, globals(), locals(), True)
return getattr(module, class_name)
else:
Adaptor = kind # TODO: look up of adaptor, for now just use ki | nd
try:
Adaptee = get(name, _adaptors[kind])
except Exception, e:
raise Exception("could not get plugin for %s, %s: %s" % (name, kind, e))
def const(*args, **keywords):
return Adaptor(Adaptee(*args, **keywords))
return const
def register_adaptor(adaptor, adaptee):
_adaptors[adaptor] = adaptee
register_adaptor(serializer.Serializer, serializers.Serializer)
#register_adaptor(parser.Pars | er, parsers.Parser)
register('rdf', serializers.Serializer,
'rdflib.syntax.serializers.XMLSerializer', 'XMLSerializer')
register('xml', serializers.Serializer,
'rdflib.syntax.serializers.XMLSerializer', 'XMLSerializer')
register('rdf/xml', serializers.Serializer,
'rdflib.syntax.serializers.XMLSerializer', 'XMLSerializer')
register('pretty-xml', serializers.Serializer,
'rdflib.syntax.serializers.PrettyXMLSerializer', 'PrettyXMLSerializer')
register('nt', serializers.Serializer,
'rdflib.syntax.serializers.NTSerializer', 'NTSerializer')
register('turtle', serializers.Serializer,
'rdflib.syntax.serializers.TurtleSerializer', 'TurtleSerializer')
register('n3', serializers.Serializer,
'rdflib.syntax.serializers.N3Serializer', 'N3Serializer')
register('xml', parsers.Parser,
'rdflib.syntax.parsers.RDFXMLParser', 'RDFXMLParser')
register('trix', parsers.Parser,
'rdflib.syntax.parsers.TriXParser', 'TriXParser')
register('n3', parsers.Parser,
'rdflib.syntax.parsers.N3Parser', 'N3Parser')
register('notation3', parsers.Parser,
'rdflib.syntax.parsers.N3Parser', 'N3Parser')
register('nt', parsers.Parser,
'rdflib.syntax.parsers.NTParser', 'NTParser')
register('n3', parsers.Parser,
'rdflib.syntax.parsers.N3Parser', 'N3Parser')
register('rdfa', parsers.Parser,
'rdflib.syntax.parsers.RDFaParser', 'RDFaParser')
register('default', Store,
'rdflib.store.IOMemory', 'IOMemory')
register('IOMemory', Store,
'rdflib.store.IOMemory', 'IOMemory')
register('Memory', Store,
'rdflib.store.Memory', 'Memory')
register('Sleepycat', Store,
'rdflib.store.Sleepycat', 'Sleepycat')
register('BerkeleyDB', Store,
'rdflib.store.BerkeleyDB', 'BerkeleyDB')
register('MySQL', Store,
'rdflib.store.MySQL', 'MySQL')
register('SQLite', Store,
'rdflib.store.SQLite', 'SQLite')
register('ZODB', Store,
'rdflib.store.ZODB', 'ZODB')
register('sqlobject', Store,
'rdflib.store._sqlobject', 'SQLObject')
register('Redland', Store,
'rdflib.store.Redland', 'Redland')
register('MySQL', Store,
'rdflib.store.MySQL', 'MySQL')
register("sparql", sparql.Processor,
'rdflib.sparql.bison.Processor', 'Processor')
register("SPARQLQueryResult", QueryResult,
'rdflib.sparql.QueryResult', 'SPARQLQueryResult')
|
doptio/you-owe-it | yoi/authentication.py | Python | mit | 215 | 0.004651 | from flask import g, session
from yoi.app import app
@app.before_request
def get_current_user():
if session.get('user_id'):
g.user = | app.db.User.get(session['user_id'])
els | e:
g.user = None
|
realgam3/phantom-requests | setup.py | Python | apache-2.0 | 721 | 0 | #!/usr/bin/env python
from setuptools import setup
setup(
name='phantom-requests',
version='0.0.1',
description='Use PhantomJS As You Are U | sing Requests.',
author='Tomer Zait (RealGame)',
author_email='realgam3@gmail.com',
packages=['phantom_requests'],
package_data={
'phantom_requests': [
| 'ghostdriver/VERSION',
'ghostdriver/*.*',
'ghostdriver/src/*.*',
'ghostdriver/src/request_handlers/*.*',
'ghostdriver/src/third_party/*.*',
'ghostdriver/src/third_party/webdriver-atoms/*.*',
]
},
install_requires=[
'selenium >= 3.0.1',
'requests >= 2.11.1',
],
platforms='any',
)
|
keithemiller/shell-scribe | shell-scribe.py | Python | apache-2.0 | 12,528 | 0.010536 | #!/usr/bin/python
"""
.. module:: shellscribe
Shell-Scribe run.py
@author: Keith E. Miller <keithmiller@umass.edu>
Expected issues:
- cd command is shell-scribe specific so commands that use cd in a non-trivial
way might break the cd command
"""
import cmd
import os
import sys
import argparse as ap
import datetime
import json
from twilio.rest import TwilioRestClient
## Set to false to get rid of debug print statements
DEBUG = False
### PASTE FUNCTION DEFINITIONS HERE
def bashinator_9000(filename):
dic={}
inc=1
title = ''
author = ''
date = datetime.datetime.now()
title = raw_input("What is the title: ")
author = raw_input("Who is the author: ")
dic['welcome']= raw_input("Input a description for the lesson: ")
date = datetime.datetime.now()
if title =="": title = 'lesson'
if author=="": author = 'N/A'
dic["title"] = title
dic["author"] = author
with open(filename,'r') as file:
for row in file:
print '\033[91m' + "\nCode for the row: " + '\033[96m' + row + '\033[92m'
comment=raw_input('- ')
tempDic = {'comment':comment,'command':row}
dic.update({inc:tempDic})
inc+=1
print('\033[0m')
dic['command_count'] = inc - 1
with open(title+'.json','w') as file:
json.dump(dic,file)
def bashinator_10000(filename): #need sleeeeeep
#fname = filename.readFile() #attempting to have json file read-in
with open(filename, 'r') as f:
json_dict = json.load(f)
print json_dict
inc=1
# Welcomes them to Hell
print json_dict["welcome"], "\n"
for x in range(json_dict["command_count"]):
x = x + 1
print '\033[91m' +"Line: ", x,'\n'
print '\033[92m'+ "Comment: ", json_dict[str(x)]["comment"],'\n'
print '\033[96m' + "Input: ", json_dict[str(x)]["command"][:-1]
outfile = os.popen(json_dict[str(x)]["command"])
output = outfile.read()
return_val = outfile.close()
if return_val != None:
shell-scribe().send_call()
print '\033[93m' + "Output: ", os.popen(json_dict[str(x)]["command"]).read() + '\033[0m'
raw_input("-Press Enter-\n")
#not sure what to do with the rest of this code. whether or not it is even necessary
#with open('test.sh','r') as file:
# for row in file:
# print '\033[91m' + "\nCode for the row: " + '\033[96m' + row + '\033[92m'
# comment=raw_input('- ')
# tempDic = {'comment':comment,'command':row}
# dic.update({inc:tempDic})
# inc+=1
#dic['welcome']="""This is a welcome message"""
#print('\033[0m')
#with open(title+'.json','w') as file:
# json.dump(dic,file)
class Shell_Scribe(cmd.Cmd):
"""
Shell_Scribe is a commandline interface that automatically saves a history
of what commands were typed to a text file as well as creating a shell
script for them.
"""
## Return value for each command (None == 0)
return_value = None
## The prompt to the user
prompt = '\033[96m'+'S'+'\033[33m'+'hell-'+'\033[96m'+'S'+'\033[33m'+ \
'cribe>'+'\033[0m'
## Set to True for Working Directory as prompt"
location_prompt = False
## This is a list of commands that will not be stored by Shell-Scribe
storage_blacklist = ["ls", "pwd", ""]
## Config File Name
config_filename = "config.json"
## Twilio Attributes
TWILIO = False
ACCOUNT_SID = None
AUTH_TOKEN = None
message_recipient = None
message_sender = None
call_url = None
alert_type = None
## Properties
script_filename = "shell-scribe.sh"
script = None
def bashinator_9000(self, filename):
dic={}
inc=1
title = ''
author = ''
date = datetime.datetime.now()
title = raw_input("What is the title: ")
author = raw_input("Who is the author: ")
dic['welcome']= raw_input("Input a description for the lesson: ")
date = datetime.datetime.now()
if title =="": title = 'lesson'
if author=="": author = 'N/A'
dic["title"] = title
dic["author"] = author
with open(filename,'r') as file:
for row in file:
print '\033[91m' + "\nCode for the row: " + '\033[96m' + row + '\033[92m'
comment=raw_input('- ')
tempDic = {'comment':comment,'command':row}
dic.update({inc:tempDic})
inc+=1
print('\033[0m')
dic['command_count'] = inc - 1
with open(title+'.json','w') as file:
json.dump(dic,file)
def bashinator_10000(self, filename): #need sleeeeeep
#fname = filename.readFile() #attempting to have json file read-in
with open(filename, 'r') as f:
json_dict = json.load(f)
print json_dict
inc=1
# Welcomes them to Hell
print json_dict["welcome"], "\n"
for x in range(json_dict["command_count"]):
x = x + 1
print '\033[91m' +"Line: ", x,'\n'
print '\033[92m'+ "Comment: ", json_dict[str(x)]["comment"],'\n'
print '\033[96m' + "Input: ", json_dict[str(x)]["command"][:-1] |
outfile = os.popen(json_dict[str(x)]["command"])
output = outfile.read()
return_val = outfile.close()
if return_val != None:
self.send_call()
print '\033[93m' + "Output: ", os.popen(json_dict[str(x)]["command"]).read() + '\033[0m'
raw_input("-Press Enter-\n")
## File Editing Methods
def store_to_script(self, line):
"""
Stores the shell command to the script
"""
self.script | .write(line + "\n")
def load_config_json(self):
"""
Configures Shell-Scribe based on the JSON configuration file
"""
with open(self.config_filename, 'r') as f:
json_dict = json.load(f)
#print "Dict from Json:", json_dict
self.TWILIO = (1 == json_dict["twilio"]["TWILIO"])
if self.TWILIO:
self.ACCOUNT_SID = json_dict["twilio"]["ACCOUNT_SID"]
self.AUTH_TOKEN = json_dict["twilio"]["AUTH_TOKEN"]
self.message_recipient = json_dict["twilio"]["TO"]
self.message_sender = json_dict["twilio"]["FROM"]
if json_dict["twilio"]["ALERT_TYPE"].lower() == "call":
self.alert_type = json_dict["twilio"]["ALERT_TYPE"].lower()
self.call_url = json_dict["twilio"]["CALL_URL"]
if json_dict["appearance"]["prompt"].lower() == 'location':
self.location_prompt = True
def no_config_subroutine(self):
"""
Method that is called when there is no config found
"""
gen_config = input("Generate Default Config File? (Y/n)")
if gen_config == "": gen_conifg = "Y"
if gen_config.lower() == 'y':
self.generate_config()
self.load_config_json
else:
"No Configuration File. Running basic mode"
## Send text via Twilio
def send_text(self, line):
"""
Sends a text message via Twilio
"""
client = TwilioRestClient(self.ACCOUNT_SID, self.AUTH_TOKEN)
client.messages.create(to=self.message_recipient,
from_=self.message_sender,
body="Failed on command: " + line)
def send_call(self):
"""
Sends said call via Twilio
"""
print "Calling"
client = TwilioRestClient(self.ACCOUNT_SID, self.AUTH_TOKEN)
call = client.calls.create(to=self.message_recipient,
from_=self.message_sender,
url=self.call_url,
method="GET",
|
puavo-org/puavo-os | parts/puavomenu/user_programs.py | Python | gpl-2.0 | 10,226 | 0.002543 | # Loads, updates and maintains user programs
import os
import logging
from pathlib import Path
import time
import threading
import socket
import utils
import menudata
import loaders.menudata_loader as menudata_loader
import loaders.dotdesktop_loader
class UserProgramsManager:
def __init__(self, base_dir, language):
self.__base_dir = base_dir
self.__language = language
self.__file_cache = {}
def reset(self):
self.__file_cache = {}
# Scans the user programs directory and creates, removes and updates
# user programs. Returns True if something actually changed.
def update(self, programs, category, icon_locator, icon_cache):
if self.__base_dir is None:
return False
start_time = time.perf_counter()
if not os.path.isdir(self.__base_dir) or not os.access(self.__base_dir, os.R_OK):
logging.warning(
"UserProgramsManager::update(): can't access directory \"%s\"", self.__base_dir)
return False
# Get a list of current .desktop files
new_files = {}
seen = set()
for name in Path(self.__base_dir).rglob('*.desktop'):
try:
# Generate a unique ID for this program
basename = os.path.splitext(name.name)[0]
program_id = 'user-program-' + basename
if program_id in seen:
# If you really want to duplicate a program, you have to rename
# the duplicate .desktop file
continue
seen.add(program_id)
stat = os.stat(name)
new_files[name] = {
'modified': stat.st_mtime,
'size': stat.st_size,
'program_id': program_id,
}
except Exception as exception:
logging.fatal('Error occurred when scanning for user programs:')
logging.error(exception, exc_info=True)
# Detect added, removed and changed files
existing_keys = set(self.__file_cache.keys())
new_keys = set(new_files.keys())
added = new_keys - existing_keys
removed = existing_keys - new_keys
changed = set()
current = set()
for pid, program in p | rograms.items():
if isinstance(program, menudata.UserProgram):
current.add(pid)
for name in existing_keys.in | tersection(new_keys):
if self.__file_cache[name]['modified'] != new_files[name]['modified'] or \
self.__file_cache[name]['size'] != new_files[name]['size']:
changed.add(name)
something_changed = False
# Unload removed programs first. This way, if a .desktop file is renamed, the new
# program ID won't be a duplicate (the renamed program would appear on the next
# update).
for name in removed:
pid = self.__file_cache[name]['program_id']
if pid in current:
current.remove(pid)
if pid not in programs:
# what is going on?
continue
program = programs[pid]
if program.icon and program.original_icon_name:
del icon_cache[program.original_icon_name]
del programs[pid]
something_changed = True
# Load new files
for name in added:
pid = new_files[name]['program_id']
program = menudata.UserProgram()
program.menudata_id = pid
program.original_desktop_file = os.path.basename(name)
program.filename = name
program.modified = new_files[name]['modified']
program.size = new_files[name]['size']
if self.__load_user_program(program, name, icon_locator, icon_cache):
programs[pid] = program
something_changed = True
current.add(pid)
# Reload changed files
for name in changed:
pid = new_files[name]['program_id']
if pid not in programs:
# what did you do?!
continue
program = programs[pid]
if self.__load_user_program(program, name, icon_locator, icon_cache):
something_changed = True
else:
logging.error('Changed user program "%s" not updated', name)
# Rebuild the list of programs in the specified user category
if something_changed:
prog_list = []
for pid in current:
if not programs[pid].name:
continue
prog_list.append((pid, programs[pid].name.lower()))
# the files can be in arbitrary order, so sort
# the user programs alphabetically
prog_list.sort(key=lambda p: p[1])
category.program_ids = []
for prog in prog_list:
category.program_ids.append(prog[0])
self.__file_cache = new_files
end_time = time.perf_counter()
utils.log_elapsed_time('UserProgramsManager::update(): user programs update',
start_time, end_time)
# Trigger a menu buttons rebuild if something actually changed
return something_changed
# Loads the .desktop file for a single program and builds
# a program object out of it
def __load_user_program(self,
program, # a UserProgram instance
filename, # .desktop file name
icon_locator, # where to find icons
icon_cache): # the icon cache to use
# Load the .desktop file
try:
desktop_data = loaders.dotdesktop_loader.load(filename)
if 'Desktop Entry' not in desktop_data:
raise RuntimeError('missing "Desktop Entry" section')
except Exception as exc:
logging.error(
'Could not load the desktop file "%s" for user program:',
filename)
logging.error(str(exc))
return False
# If the .desktop file was created by us, reject it, because
# otherwise we'd end up creating loops. If you edit an existing
# .desktop file and add (or remove) this key, it WILL cause
# problems, but then it'll be your own problem.
if 'X-Puavomenu-Created' in desktop_data['Desktop Entry']:
logging.info(
'.desktop file "%s" was created by us, not adding it to the user programs list',
filename
)
return False
# Honor "NoDisplay=true"
if 'NoDisplay' in desktop_data['Desktop Entry'] and \
desktop_data['Desktop Entry']['NoDisplay'] == 'true':
logging.info('.desktop file "%s" contains "NoDisplay=true", skipping it',
filename)
return False
# Normally this would contain all the data loaded from menudata JSON
# file(s), but those don't exist here
final_data = {}
menudata_loader.merge_json_and_desktop_data(
final_data, desktop_data['Desktop Entry'], self.__language)
if final_data.get('command', None) is None:
logging.warning('.desktop file "%s" does not specify a command to run',
filename)
return False
program.name = final_data.get('name', None)
program.command = final_data.get('command', None)
program.description = final_data.get('description', None)
program.keywords = final_data.get('keywords', frozenset())
# Locate the icon file
icon_name = final_data.get('icon', None)
icon_file, _ = icon_locator.locate_icon(icon_name)
if program.original_icon_name:
if program.original_icon_name == icon_file:
# This program was reloaded, but the icon did not change
return True
# The icon did change, remove the |
raj4/bigbang | bigbang/archive.py | Python | gpl-2.0 | 6,045 | 0.004632 | import datetime
import mailman
import mailbox
import numpy as np
from bigbang.thread import Thread
from bigbang.thread import Node
import pandas as pd
import pytz
import utils
def load(path):
data = pd.read_csv(path)
return Archive(data)
class Archive:
"""
A representation of a mailing list archive.
"""
data = None
activity = None
threads = None
def __init__(self, data, archive_dir="archives", mbox=False):
"""
Initializes an Archive object.
The behavior of the constructor depends on the type
of its first argument, data.
If data is a Pandas DataFrame, it is treated as a representation of
email messages with columns for Message-ID, From, Date, In-Reply-To,
References, and Body. The created Archive becomes a wrapper around a
copy of the input DataFrame.
If data is a string, then it is interpreted as a path to either a
single .mbox file (if the optional argument single_file is True) or
else to a directory of .mbox files (also in .mbox format). Note that
the file extensions need not be .mbox; frequently they will be .txt.
Upon initialization, the Archive object drops duplicate entries
and sorts its member variable *data* by Date.
"""
if isinstance(data, pd.core.frame.DataFrame):
self.data = data.copy()
elif isinstance(data, str):
self.data = mailman.load_data(data,archive_dir=archive_dir,mbox=mbox)
self.data['Date'] = pd.to_datetime(self.data['Date'], utc=True)
self.data.drop_duplicates(inplace=True)
# Drops any entries with no Date field.
# It may be wiser to optionally
# do interpolation here.
self.data.dropna(subset=['Date'], inplace=True)
#convert any null fields to None -- csv saves these as nan sometimes
self.data = self.data.where(pd.notnull(self.data),None)
try:
#set the index to be the Message-ID column
self.data.set_index('Message-ID',inplace=True)
except KeyError:
#will get KeyError if Message-ID is already index
pass
self.data.sort(columns='Date', inplace=True)
def get_activity(self):
if self.activity is None:
self.activity = self.compute_activity(self)
return self.activity
def compute_activity(self, clean=True):
mdf = self.data
if clean:
# unnecessary?
mdf = mdf.dropna(subset=['Date'])
mdf = mdf[
mdf['Date'] < datetime.datetime.now(
pytz.utc)] # drop messages apparently in the future
mdf2 = mdf[['From', 'Date']]
mdf2['Date'] = mdf['Date'].apply(lambda x: x.toordinal())
activity = mdf2.groupby(
['From', 'Date']).size().unstack('From').fillna(0)
new_date_range = np.arange(mdf2['Date'].min(), mdf2['Date'].max())
# activity.set_index('Date')
activity = activity.reindex(new_date_range, fill_value=0)
return activity
def get_threads(self, verbose=False):
if self.threads is not None:
return self.threads
df = self.data
threads = list()
visited = dict()
total = df.shape[0]
c = 0
for i in df.iterrows():
if verbose:
c += 1
if c % 1000 == 0:
print "Processed %d of %d" %(c,total)
if(i[1]['In-Reply-To'] is None):
root = Node(i[0], i[1])
visited[i[0]] = root
threads.append(Thread(root))
elif(i[1]['In-Reply-To'] not in visited.keys()):
root = Node(i[1]['In-Reply-To'])
succ = Node(i[0],i[1], root)
root.add_successor(succ)
visited[i[1]['In-Reply-To']] = root
visited[i[0]] = succ
threads.append(Thread(root, known_root=False))
else:
parent = visited[i[1]['In-Reply-To']]
node = Node(i[0],i[1], parent)
parent.add_successor(node)
visited[i[0]] = node
self.threads = threads
return threads
def save(self, path,encoding='utf-8'):
self.data.to_csv(path, ",",encoding=encoding)
def find_footer(messages,number=1):
'''
Returns the footer of a DataFrame of emails.
A footer is a string occurring at the tail of most messages.
Messages can be a DataFrame or a Series
'''
if isinstance(messages,pd.DataFrame):
messages = messages['Body']
# sort in lexical order of reverse strings to maximize foot length
srb = messages.apply(lambda x: None if x is None else x[::-1]).order()
#srb = df.apply(lambda x: None if x['Body'] is None else x['Body'][::-1],
# axis=1).order()
# begin walking down the | series looking for maximal overlap
counts = {}
last = None
last_i = None
current = None
def clean_footer(foot):
return foot.strip()
for b in srb:
if last is None:
last = b
continue
elif b is None:
continue
else:
head,i = utils.get_common_head(b,last,delimiter='\n')
head = clean_footer(head[::-1] | )
last = b
if head in counts:
counts[head] = counts[head] + 1
else:
counts[head] = 1
last = b
# reduce candidates that are strictly longer and less frequent
# than most promising footer candidates
for n,foot1 in sorted([(v,k) for k,v in counts.items()],reverse=True):
for foot2, m in counts.items():
if n > m and foot1 in foot2 and len(foot1) > 0:
counts[foot1] = counts[foot1] + counts[foot2]
del counts[foot2]
candidates = sorted([(v,k) for k,v in counts.items()],reverse=True)
return candidates[0:number]
|
simleo/openmicroscopy | components/tools/OmeroWeb/omeroweb/webadmin/views.py | Python | gpl-2.0 | 43,638 | 0.000229 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
#
#
# Copyright (c) 2008-2014 University of Dundee.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Author: Aleksandra Tarkowska <A(dot)Tarkowska(at)dundee(dot)ac(dot)uk>,
# 2008-2013.
#
# Version: 1.0
#
''' A view functions is simply a Python function that takes a Web request and
returns a Web response. This response can be the HTML contents of a Web page,
or a redirect, or the 404 and 500 error, or an XML document, or an image...
or anything.'''
import traceback
import logging
import datetime
import omeroweb.webclient.views
from omero_version import build_year
from omero_version import omero_version
from django.template import loader as template_loader
from django.core.urlresolvers import reverse
from django.http import HttpResponse, HttpResponseRedirect
from django.template import RequestContext as Context
from django.utils.translation import ugettext as _
from django.utils.encoding import smart_str
from forms import ForgottonPasswordForm, ExperimenterForm, GroupForm
from forms import GroupOwnerForm, MyAccountForm, ChangePassword
from forms import UploadPhotoForm, EmailForm
from omeroweb.http import HttpJPEGResponse
from omeroweb.webclient.decorators import login_required, render_response
from omeroweb.connector import Connector
logger = logging.getLogger(__name__)
##############################################################################
# decorators
class render_response_admin(omeroweb.webclient.decorators.render_response):
"""
Subclass for adding additional data to the 'context' dict passed to
templates
"""
def prepare_context(self, request, context, *args, **kwargs):
"""
We extend the webclient render_response to check if any groups are
created.
If not, add an appropriate message to the template context
"""
super(render_response_admin, self).prepare_context(request, context,
*args, **kwargs)
if 'conn' not in kwargs:
return
conn = kwargs['conn']
noGroupsCreated = conn.isAnythingCreated()
if noGroupsCreated:
msg = _('User must be in a group - You have not created any'
' groups yet. Click <a href="%s">here</a> to create a'
' group') % (reverse(viewname="wamanagegroupid",
args=["new"]))
context['ome']['message'] = msg
context['om | e']['email'] = request.session \
.get('server_settings', False) \
.get('email', False)
##############################################################################
# utils
import omero
from omero.model import PermissionsI
def prepare_experimenter(conn, eid=None):
if eid is None:
eid = conn.getEventContext().userId
experimenter | = conn.getObject("Experimenter", eid)
defaultGroup = experimenter.getDefaultGroup()
otherGroups = list(experimenter.getOtherGroups())
hasAvatar = conn.hasExperimenterPhoto()
isLdapUser = experimenter.isLdapUser()
return experimenter, defaultGroup, otherGroups, isLdapUser, hasAvatar
def otherGroupsInitialList(groups, excluded_names=("user", "guest"),
excluded_ids=list()):
formGroups = list()
for gr in groups:
flag = False
if gr.name in excluded_names:
flag = True
if gr.id in excluded_ids:
flag = True
if not flag:
formGroups.append(gr)
formGroups.sort(key=lambda x: x.getName().lower())
return formGroups
def ownedGroupsInitial(conn, excluded_names=("user", "guest", "system"),
excluded_ids=list()):
groupsList = list(conn.listOwnedGroups())
ownedGroups = list()
for gr in groupsList:
flag = False
if gr.name in excluded_names:
flag = True
if gr.id in excluded_ids:
flag = True
if not flag:
ownedGroups.append(gr)
ownedGroups.sort(key=lambda x: x.getName().lower())
return ownedGroups
# myphoto helpers
def attach_photo(conn, newFile):
if newFile.content_type.startswith("image"):
f = newFile.content_type.split("/")
format = f[1].upper()
else:
format = newFile.content_type
conn.uploadMyUserPhoto(smart_str(newFile.name), format, newFile.read())
# permission helpers
def setActualPermissions(permissions):
permissions = int(permissions)
if permissions == 0:
p = PermissionsI("rw----")
elif permissions == 1:
p = PermissionsI("rwr---")
elif permissions == 2:
p = PermissionsI("rwra--")
elif permissions == 3:
p = PermissionsI("rwrw--")
else:
p = PermissionsI()
return p
def getActualPermissions(group):
p = None
if group.details.getPermissions() is None:
raise AttributeError('Object has no permissions')
else:
p = group.details.getPermissions()
flag = None
if p.isGroupWrite():
flag = 3
elif p.isGroupAnnotate():
flag = 2
elif p.isGroupRead():
flag = 1
elif p.isUserRead():
flag = 0
return flag
# getters
def getSelectedGroups(conn, ids):
if ids is not None and len(ids) > 0:
return list(conn.getObjects("ExperimenterGroup", ids))
return list()
def getSelectedExperimenters(conn, ids):
if ids is not None and len(ids) > 0:
return list(conn.getObjects("Experimenter", ids))
return list()
def mergeLists(list1, list2):
if not list1 and not list2:
return list()
if not list1:
return list(list2)
if not list2:
return list(list1)
result = list()
result.extend(list1)
result.extend(list2)
return set(result)
@login_required()
@render_response()
def drivespace_json(request, query=None, groupId=None, userId=None, conn=None,
**kwargs):
"""
Returns a json list of {"label":<Name>, "data": <Value>, "groupId /
userId": <id>} for plotting disk usage by users or groups.
If 'query' is "groups" or "users", this is for an Admin to show all data
on server divided into groups or users.
Else, if groupId is not None, we return data for that group, split by user.
Else, if userId is not None, we return data for that user, split by group.
"""
diskUsage = []
# diskUsage.append({"label": "Free space", "data":conn.getFreeSpace()})
queryService = conn.getQueryService()
ctx = conn.SERVICE_OPTS.copy()
params = omero.sys.ParametersI()
params.theFilter = omero.sys.Filter()
def getBytes(ctx, eid=None):
bytesInGroup = 0
pixelsQuery = "select sum(cast( p.sizeX as double ) * p.sizeY * p.sizeZ * p.sizeT * p.sizeC * pt.bitSize / 8) " \
"from Pixels p join p.pixelsType as pt join p.image i left outer join i.fileset f " \
"join p.details.owner as owner " \
"where f is null"
filesQuery = "select sum(origFile.size) from OriginalFile as origFile " \
"join origFile.details.owner as owner"
if eid is not None:
params.add('eid', omero.rtypes.rlong(eid))
pixelsQuery = pixelsQuery + " and owner.id = (:eid)"
filesQuery = filesQuery + " where owner.id = (:eid)"
# Calculate disk usage via Pixels
result = queryService.projection(pixelsQuery, params, ctx)
if len(result) > 0 |
LarryHillyer/PoolHost | PoolHost/pooltype/forms.py | Python | gpl-3.0 | 911 | 0.027442 | from django import forms
from django.forms import ModelForm
from django.db import models
from app.models import PoolType
class PoolTypeForm_Create(ModelForm):
name = forms.CharField(max_length = 100, label = 'Pool Type', |
widget = forms.TextInput({
'class':'form-control',
'placeholder': 'Enter Pool Type'}))
class Meta:
model | = PoolType
fields = ['name']
class PoolTypeForm_Edit(ModelForm):
id = forms.IntegerField(widget = forms.HiddenInput())
name = forms.CharField(max_length = 100, label = 'Pool Type',
widget = forms.TextInput({
'class':'form-control',
'placeholder': 'Enter Pool Type'}))
class Meta:
model = PoolType
fields = ['id', 'name'] |
le717/Shutdown-Timer | constants.py | Python | gpl-3.0 | 937 | 0 | # -*- coding: utf-8 -*-
"""Shutdown Timer - Small Windows shutdown timer.
Created 2013, 2015 T | riangle717
<http://Triangle717.WordPress.com>
Shutdown Timer is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Shutdown Timer is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR | A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Shutdown Timer. If not, see <http://www.gnu.org/licenses/>.
"""
import os
import sys
appName = "Shutdown Timer"
version = "1.5"
creator = "Triangle717"
exeName = os.path.basename(sys.argv[0])
appFolder = os.path.dirname(sys.argv[0])
|
guillempalou/scikit-cv | skcv/video/segmentation/region_tracking.py | Python | bsd-3-clause | 3,595 | 0.003338 | import networkx as nx
import numpy as np
def bipartite_region_tracking(partition, optical_flow, reliability,
matching_th=0.1, reliability_th=0.2):
"""
Parameters
----------
partition: numpy array
A 3D label array where each label represents a region
optical_flow: numpy array
A 3D,2 array representing optical flow values for each frame
reliability: numpy array
A 3D array representing the flow reliability
matching_th: float, optional
matching threshold for the bipartite matching
reliability_th: float, optional
reliability threshold to stop tracking
Returns
-------
A NetworkX graph object with adjacency relations
"""
dimensions = len(partition.shape)
if dimensions != 3: # pragma: no cover
raise ValueError("Dimensions must be 3")
# link regions across frames
# perform a weighted bipartite matchings
frames = partition.shape[0]
width = partition.shape[1]
height = partition.shape[2]
new_partition = np.zeros_like(partition)
#the first frame is the same
new_partition[0,...] = partition[0,...]
current_label = np.max(np.unique(partition[0,...]))+1
for frame in range(frames-1):
labels = np.unique(new_partition[frame, ...])
labels_next = np.unique(partition[frame+1, ...])
# create a graph matching contours
bipartite = nx.Graph()
bipartite.add_nodes_from([l for l in labels])
bipartite.add_nodes_from([l for l in labels_next])
# find the correspondence of each label to the next frame
for label in labels:
px, py = np.where(new_partition[frame, ...] == label)
# find the mean reliability
rel = np.mean(reliability[frame, px, py])
if rel < reliability_th: # pragma: no cover
continue
# find where the regions projects to the next frame
npx = px + optical_flow[frame, px, py, 0]
npy = py + optical_flow[frame, px, py, 1]
#check for bounds
in_x = np.logical_and(0 <= npx, npx < width)
in_y = np.logical_and(0 <= npy, npy < height)
idx = np.logical_and(in_x, in_y)
npx = npx[idx]
npy = npy[idx]
count = np.bincount(partition[frame+1,
npx.astype(np.int),
npy.astype(np.int)].astype(np.int))
# get the count and eliminate weak correspondences
max_count = max(count)
nodes = np.nonzero(count > max_count*matching_th)[0]
weight = count[nodes]/max_count
for i, n in enumerate(nodes):
bipartite.add_edge(label, n, weight=weight[i])
# max weighted matching
matchings = nx.max_weight_matching(bipartite)
# assign propagated labels to the matchings
for a in matchings:
b = matchings[a]
#print("Match {0}-{1}".format(a,b))
if b not in labels_next:
continue
px, py = np.where(partition[frame+1, ...] == b)
new_partition[frame+1, px, py] = a
# assign new labels to non-matched regions
for n in bipartite.nodes():
if n not in labels_next:
| continue
if n not in matchings:
px, py = np.where(partition[frame+1, ...] == n)
new_partition[frame+1, px, py] = current_label + 1
current_label += 1
return new_partition | |
makiftasova/hangoutsbot | hangupsbot/plugins/forecast.py | Python | agpl-3.0 | 9,895 | 0.006975 | """
Use DarkSky.net to get current weather forecast for a given location.
Instructions:
* Get an API key from https://darksky.net/dev/
* Store API key in config.json:forecast_api_key
"""
import logging
import plugins
import requests
from decimal import Decimal
logger = logging.getLogger(__name__)
_internal = {}
def _initialize(bot):
api_key = bot.get_config_option('forecast_api_key')
if api_key:
_internal['forecast_api_key'] = api_key
plugins.register_user_command(['weather', 'forecast'])
plugins.register_admin_command(['setweatherlocation'])
else:
logger.error('WEATHER: config["forecast_api_key"] required')
def setweatherlocation(bot, event, *args):
"""Sets the Lat Long default coordinates for this hangout when polling for weather data
/bot setWeatherLocation <location>
"""
locati | on = ''.join(args).strip()
if not location:
yield from bot.coro_send_message(event.conv_id, _('No location was specified, please specify a location.'))
return
location = _lookup_address(location)
if location is None:
yield from bo | t.coro_send_message(event.conv_id, _('Unable to find the specified location.'))
return
if not bot.memory.exists(["conv_data", event.conv.id_]):
bot.memory.set_by_path(['conv_data', event.conv.id_], {})
bot.memory.set_by_path(["conv_data", event.conv.id_, "default_weather_location"], {'lat': location['lat'], 'lng': location['lng']})
bot.memory.save()
yield from bot.coro_send_message(event.conv_id, _('This hangouts default location has been set to {}.'.format(location)))
def weather(bot, event, *args):
"""Returns weather information from darksky.net
<b>/bot weather <location></b> Get location's current weather.
<b>/bot weather</b> Get the hangouts default location's current weather. If the default location is not set talk to a hangout admin.
"""
weather = _get_weather(bot, event, args)
if weather:
yield from bot.coro_send_message(event.conv_id, _format_current_weather(weather))
else:
yield from bot.coro_send_message(event.conv_id, 'There was an error retrieving the weather, guess you need to look outside.')
def forecast(bot, event, *args):
"""Returns a brief textual forecast from darksky.net
<b>/bot weather <location></b> Get location's current forecast.
<b>/bot weather</b> Get the hangouts default location's forecast. If default location is not set talk to a hangout admin.
"""
weather = _get_weather(bot, event, args)
if weather:
yield from bot.coro_send_message(event.conv_id, _format_forecast_weather(weather))
else:
yield from bot.coro_send_message(event.conv_id, 'There was an error retrieving the weather, guess you need to look outside.')
def _format_current_weather(weather):
"""
Formats the current weather data for the user.
"""
weatherStrings = []
if 'temperature' in weather:
weatherStrings.append("It is currently: <b>{0}°{1}</b>".format(round(weather['temperature'],2),weather['units']['temperature']))
if 'summary' in weather:
weatherStrings.append("<i>{0}</i>".format(weather['summary']))
if 'feelsLike' in weather:
weatherStrings.append("Feels Like: {0}°{1}".format(round(weather['feelsLike'],2),weather['units']['temperature']))
if 'windspeed' in weather:
weatherStrings.append("Wind: {0} {1} from {2}".format(round(weather['windspeed'],2), weather['units']['windSpeed'], _get_wind_direction(weather['windbearing'])))
if 'humidity' in weather:
weatherStrings.append("Humidity: {0}%".format(weather['humidity']))
if 'pressure' in weather:
weatherStrings.append("Pressure: {0} {1}".format(round(weather['pressure'],2), weather['units']['pressure']))
return "<br/>".join(weatherStrings)
def _format_forecast_weather(weather):
"""
Formats the forecast data for the user.
"""
weatherStrings = []
if 'hourly' in weather:
weatherStrings.append("<b>Next 24 Hours</b><br/>{}". format(weather['hourly']))
if 'daily' in weather:
weatherStrings.append("<b>Next 7 Days</b><br/>{}". format(weather['daily']))
return "<br/>".join(weatherStrings)
def _lookup_address(location):
"""
Retrieve the coordinates of the location from googles geocode api.
Limit of 2,000 requests a day
"""
google_map_url = 'https://maps.googleapis.com/maps/api/geocode/json'
payload = {'address': location}
resp = requests.get(google_map_url, params=payload)
try:
resp.raise_for_status()
results = resp.json()['results'][0]
return {
'lat': results['geometry']['location']['lat'],
'lng': results['geometry']['location']['lng'],
'address': results['formatted_address']
}
except (IndexError, KeyError):
logger.error('unable to parse address return data: %d: %s', resp.status_code, resp.json())
return None
except (requests.exceptions.ConnectionError, requests.exceptions.HTTPError, requests.exceptions.Timeout):
logger.error('unable to connect with maps.googleapis.com: %d - %s', resp.status_code, resp.text)
return None
def _lookup_weather(coords):
"""
Retrieve the current forecast for the specified coordinates from darksky.net
Limit of 1,000 requests a day
"""
forecast_io_url = 'https://api.darksky.net/forecast/{0}/{1},{2}?units=auto'.format(_internal['forecast_api_key'],coords['lat'], coords['lng'])
r = requests.get(forecast_io_url)
try:
j = r.json()
current = {
'time' : j['currently']['time'],
'summary': j['currently']['summary'],
'temperature': Decimal(j['currently']['temperature']),
'feelsLike': Decimal(j['currently']['apparentTemperature']),
'units': _get_forcast_units(j),
'humidity': int(j['currently']['humidity']*100),
'windspeed' : Decimal(j['currently']['windSpeed']),
'windbearing' : j['currently']['windBearing'],
'pressure' : j['currently']['pressure']
}
if current['units']['pressure'] == 'kPa':
current['pressure'] = Decimal(current['pressure']/10)
if 'hourly' in j:
current['hourly'] = j['hourly']['summary']
if 'daily' in j:
current['daily'] = j['daily']['summary']
except ValueError as e:
logger.error("Forecast Error: {}".format(e))
current = dict()
except (requests.exceptions.ConnectionError, requests.exceptions.HTTPError, requests.exceptions.Timeout):
logger.error('unable to connect with api.darksky.net: %d - %s', resp.status_code, resp.text)
return None
return current
def _get_weather(bot,event,params):
"""
Checks memory for a default location set for the current hangout.
If one is not found and parameters were specified attempts to look up a location.
If it finds a location it then attempts to load the weather data
"""
parameters = list(params)
location = {}
if not parameters:
if bot.memory.exists(["conv_data", event.conv.id_]):
if(bot.memory.exists(["conv_data", event.conv.id_, "default_weather_location"])):
location = bot.memory.get_by_path(["conv_data", event.conv.id_, "default_weather_location"])
else:
address = ''.join(parameters).strip()
location = _lookup_address(address)
if location:
return _lookup_weather(location)
return {}
def _get_forcast_units(result):
"""
Checks to see what uni the results were passed back as and sets the display units accordingly
"""
units = {
'temperature': 'F',
'distance': 'Miles',
'percipIntensity': 'in./hr.',
'precipAccumulation': 'inches',
'windSpeed': 'mph',
'pressure': 'millibars'
}
if result['flags']:
unit = result['flags']['units']
if unit != 'us':
units['temperature'] = 'C'
units['distance'] |
fuchsia-mirror/third_party-ninja | configure.py | Python | apache-2.0 | 22,852 | 0.001532 | #!/usr/bin/env python
#
# Copyright 2001 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Script that generates the build.ninja for ninja itself.
Projects that use ninja themselves should either write a similar script
or use a meta-build system that supports Ninja output."""
from __future__ import print_function
from optparse import OptionParser
import os
import pipes
import string
import subprocess
import sys
sourcedir = os.path.dirname(os.path.realpath(__file__))
sys.path.insert(0, os.path.join(sourcedir, 'misc'))
import ninja_syntax
class Platform(object):
"""Represents a host/target platform and its specific build attributes."""
def __init__(self, platform):
self._platform = platform
if self._platform is not None:
return
self._platform = sys.platform
if self._platform.startswith('linux'):
self._platform = 'linux'
elif self._platform.startswith('freebsd'):
self._platform = 'freebsd'
elif self._platform.startswith('gnukfreebsd'):
self._platform = 'freebsd'
elif self._platform.startswith('openbsd'):
self._platform = 'openbsd'
elif self._platform.startswith('solaris') or self._platform == 'sunos5':
self._platform = 'solaris'
elif self._platform.startswith('mingw'):
self._platform = 'mingw'
elif self._platform.startswith('win'):
self._platform = 'msvc'
elif self._platform.startswith('bitrig'):
self._platform = 'bitrig'
elif self._platform.startswith('netbsd'):
self._platform = 'netbsd'
elif self._platform.startswith('aix'):
self._platform = 'aix'
elif self._platform.startswith('dragonfly'):
self._platform = 'dragonfly'
@staticmethod
def known_platforms():
return ['linux', 'darwin', 'freebsd', 'openbsd', 'solaris', 'sunos5',
'mingw', 'msvc', 'gnukfreebsd', 'bitrig', 'netbsd', 'aix',
'dragonfly']
def platform(self):
return self._platform
def is_linux(self):
return self._platform == 'linux'
def is_mingw(self):
return self._platform == 'mingw'
def is_msvc(self):
return self._platform == 'msvc'
def msvc_needs_fs(self):
popen = subprocess.Popen(['cl', '/nologo', '/?'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, err = popen.communicate()
return b'/FS' in out
def is_windows(self):
return self.is_mingw() or self.is_msvc()
def is_solaris(self):
return self._platform == 'solaris'
def is_aix(self):
return self._platform == 'aix'
def uses_usr_local(self):
return self._platform in ('freebsd', 'openbsd', 'bitrig', 'dragonfly', 'netbsd')
def supports_ppoll(self):
return self._platform in ('freebsd', 'linux', 'openbsd', 'bitrig',
'dragonfly')
def supports_ninja_browse(self):
return (not self.is_windows()
and not self.is_solaris()
and not self.is_aix())
def can_rebuild_in_place(self):
return not (self.is_windows() or self.is_aix())
class Bootstrap:
"""API shim for ninja_syntax.Writer that instead runs the commands.
Used to bootstrap Ninja from scratch. In --bootstrap mode this
class is used to execute all the commands to build an executable.
It also proxies all calls to an underlying ninja_syntax.Writer, to
behave like non-bootstrap mode.
"""
def __init__(self, writer, verbose=False):
self.writer = writer
self.verbose = verbose
# Map of variable name => expanded variable value.
self.vars = {}
# Map of rule name => dict of rule attributes.
self.rules = {
'phony': {}
}
def comment(self, text):
return self.writer.comment(text)
def newline(self):
return self.writer.newline()
def variable(self, key, val):
# In bootstrap mode, we have no ninja process to catch /showIncludes
# output.
self.vars[key] = self._expand(val).replace('/showIncludes', '')
return self.writer.variable(key, val)
def rule(self, name, **kwargs):
self.rules[name] = kwargs
return self.writer.rule(name, **kwargs)
def build(self, outputs, rule, inputs=None, **kwargs):
ruleattr = self.rules[rule]
cmd = ruleattr.get('command')
if cmd is None: # A phony rule, for example.
return
# Implement just enough of Ninja variable expansion etc. to
# make the bootstrap build work.
local_vars = {
'in': self._expand_paths(inputs),
'out': self._expand_paths(outputs)
}
for key, val in kwargs.get('variables', []):
local_vars[key] = ' '.join(ninja_syntax.as_list(val))
self._run_command(self._expand(cmd, local_vars))
return self.writer.build(outputs, rule, inputs, **kwargs)
def default(self, paths):
return self.writer.default(paths)
def _expand_paths(self, paths):
"""Expand $vars in an array of paths, e.g. from a 'build' block."""
paths = ninja_syntax.as_list(paths)
return ' '.join(map(self._shell_escape, (map(self._expand, paths))))
def _expand(self, str, local_vars={}):
"""Expand $vars in a string."""
return ninja_syntax.expand(str, self.vars, local_vars)
def _shell_escape(self, path):
"""Quote paths containing spaces."""
return '"%s"' % path if ' ' in path else path
def _run_command(self, cmdline):
"""Run a subcommand, quietly. Prints the full command on error."""
try:
if self.verbose:
print(cmdline)
subprocess.check_call(cmdline, shell=True)
except subprocess.CalledProcessError:
print('when running: ', cmdline)
raise
parser = OptionParser()
profilers = ['gmon', 'pprof']
parser.add_option('--bootstrap', action='store_true',
help='bootstrap a ninja binary from nothing')
parser.add_option('--verbose', action='store_true',
help='enable verbose build')
parser.add_option('--platform',
help='target platform (' +
'/'.join(Platform.known_platforms()) + ')',
choices=Platform.known_platforms())
parser.add_option('--host',
help='host platform (' +
'/'.join(Platform.known_platforms()) + ')',
choices=Platform.known_platforms())
parser.add_option('--debug', action='store_true',
help='enable debugging extras',)
parser.add_option('--profile', metav | ar='TYPE',
choices=profilers,
help='enable profiling (' + '/'.join(profilers) + ')',)
parser.add_option('--with-gtest', metavar='PATH', help='ignored')
parser.add_option('--with-python', metavar='EXE',
help='use EXE as the Python interpreter',
default=os.path.basename(sys.executable))
parser.add_option | ('--force-pselect', action='store_true',
help='ppoll() is used by default where available, '
'but some platforms may need to use pselect instead',)
(options, args) = parser.parse_args()
if args:
print('ERROR: extra unparsed command-line arguments:', args)
sys.exit(1)
platform = Platform(options.platform)
if options.host:
host = Platform(options.host)
else: |
ellisonbg/nbgrader | nbgrader/tests/preprocessors/test_savecells.py | Python | bsd-3-clause | 11,442 | 0.000961 | import pytest
from nbformat.v4 import new_notebook
from ...preprocessors import SaveCells
from ...api import Gradebook
from ...utils import compute_checksum
from .base import BaseTestPreprocessor
from .. import (
create_grade_cell, create_solution_cell, create_grade_and_solution_cell,
create_locked_cell)
@pytest.fixture
def preprocessor():
return SaveCells()
@pytest.fixture
def gradebook(request, db):
gb = Gradebook(db)
gb.add_assignment("ps0")
def fin():
gb.close()
request.addfinalizer(fin)
return gb
@pytest.fixture
def resources(db, gradebook):
return {
"nbgrader": {
"db_url": db,
"assignment": "ps0",
"notebook": "test",
}
}
class TestSaveCells(BaseTestPreprocessor):
def test_save_code_grade_cell(self, preprocessor, gradebook, resources):
cell = create_grade_cell("hello", "code", "foo", 1)
cell.metadata.nbgrader['checksum'] = compute_checksum(cell)
nb = new_notebook()
nb.cells.append(cell)
nb, resources = preprocessor.preprocess(nb, resources)
grade_cell = gradebook.find_grade_cell("foo", "test", "ps0")
assert grade_cell.max_score == 1
assert grade_cell.cell_type == "code"
source_cell = gradebook.find_source_cell("foo", "test", "ps0")
assert source_cell.source == "hello"
assert source_cell.checksum == cell.metadata.nbgrader["checksum"]
assert source_cell.cell_type == "code"
assert source_cell.locked
def test_save_code_solution_cell(self, preprocessor, gradebook, resources):
cell = create_solution_cell("hello", "code", "foo")
cell.metadata.nbgrader['checksum'] = compute_checksum(cell)
nb = new_notebook()
nb.cells.append(cell)
nb, resources = preprocessor.preprocess(nb, resources)
gradebook.find_solution_cell("foo", "test", "ps0")
source_cell = gradebook.find_source_cell("foo", "test", "ps0")
assert source_cell.source == "hello"
assert source_cell.checksum == cell.metadata.nbgrader["checksum"]
assert source_cell.cell_type == "code"
assert not source_cell.locked
def test_save_markdown_solution_cell(self, preprocessor, gradebook, resources):
cell = create_solution_cell("hello", "markdown", "foo")
cell.metadata.nbgrader['checksum'] = compute_checksum(cell)
nb = new_notebook()
nb.cells.append(cell)
nb, resources = preprocessor.preprocess(nb, resources)
gradebook.find_solution_cell("foo", "test", "ps0")
source_cell = gradebook.find_source_cell("foo", "test", "ps0")
assert source_cell.source == "hello"
assert source_cell.checksum == cell.metadata.nbgrader["checksum"]
assert source_cell.cell_type == "markdown"
assert not source_cell.locked
def test_save_code_grade_and_solution_cell(self, preprocessor, gradebook, resources):
cell = create_grade_and_solution_cell("hello", "code", "foo", 1)
cell.metadata.nbgrader['checksum'] = compute_checksum(cell)
nb = new_notebook()
nb.cells.append(cell)
nb, resources = preprocessor.preprocess(nb, resources)
grade_cell = gradebook.find_grade_cell("foo", "test", "ps0")
assert grade_cell.max_score == 1
assert grade_cell.cell_type == "code"
gradebook.find_solution_cell("foo", "test", "ps0")
source_cell = gradebook.find_source_cell("foo", "test", "ps0")
assert source_cell.source == "hello"
assert source_cell.checksum == cell.metadata.nbgrader["checksum"]
assert source_cell.cell_type == "code"
assert not source_cell.locked
def test_save_markdown_grade | _and_solution_cell(self, preprocessor, gradebook, resources):
cell = create_grade_and_solution_cell("hello", "markdown", "foo", 1)
cell.metadata.nbgrader['checksum'] = compute_checksum(cell)
nb = new_notebook()
nb.cells.append(cell)
nb, resources = preprocessor.preprocess(nb, resources)
grade_cell = gradebook.find_grade_cell( | "foo", "test", "ps0")
assert grade_cell.max_score == 1
assert grade_cell.cell_type == "markdown"
gradebook.find_solution_cell("foo", "test", "ps0")
source_cell = gradebook.find_source_cell("foo", "test", "ps0")
assert source_cell.source == "hello"
assert source_cell.checksum == cell.metadata.nbgrader["checksum"]
assert source_cell.cell_type == "markdown"
assert not source_cell.locked
def test_save_locked_code_cell(self, preprocessor, gradebook, resources):
cell = create_locked_cell("hello", "code", "foo")
cell.metadata.nbgrader['checksum'] = compute_checksum(cell)
nb = new_notebook()
nb.cells.append(cell)
nb, resources = preprocessor.preprocess(nb, resources)
source_cell = gradebook.find_source_cell("foo", "test", "ps0")
assert source_cell.source == "hello"
assert source_cell.checksum == cell.metadata.nbgrader["checksum"]
assert source_cell.cell_type == "code"
assert source_cell.locked
def test_save_locked_markdown_cell(self, preprocessor, gradebook, resources):
cell = create_locked_cell("hello", "markdown", "foo")
cell.metadata.nbgrader['checksum'] = compute_checksum(cell)
nb = new_notebook()
nb.cells.append(cell)
nb, resources = preprocessor.preprocess(nb, resources)
source_cell = gradebook.find_source_cell("foo", "test", "ps0")
assert source_cell.source == "hello"
assert source_cell.checksum == cell.metadata.nbgrader["checksum"]
assert source_cell.cell_type == "markdown"
assert source_cell.locked
def test_save_new_cell(self, preprocessor, gradebook, resources):
cell1 = create_grade_and_solution_cell("hello", "markdown", "foo", 2)
cell2 = create_grade_and_solution_cell("hello", "markdown", "bar", 1)
nb = new_notebook()
nb.cells.append(cell1)
nb, resources = preprocessor.preprocess(nb, resources)
notebook = gradebook.find_notebook("test", "ps0")
assert len(notebook.grade_cells) == 1
assert len(notebook.solution_cells) == 1
assert len(notebook.source_cells) == 1
nb.cells.append(cell2)
nb, resources = preprocessor.preprocess(nb, resources)
gradebook.db.refresh(notebook)
assert len(notebook.grade_cells) == 2
assert len(notebook.solution_cells) == 2
assert len(notebook.source_cells) == 2
def test_save_new_cell_with_submissions(self, preprocessor, gradebook, resources):
cell1 = create_grade_and_solution_cell("hello", "markdown", "foo", 2)
cell2 = create_grade_and_solution_cell("hello", "markdown", "bar", 1)
nb = new_notebook()
nb.cells.append(cell1)
nb, resources = preprocessor.preprocess(nb, resources)
notebook = gradebook.find_notebook("test", "ps0")
assert len(notebook.grade_cells) == 1
assert len(notebook.solution_cells) == 1
assert len(notebook.source_cells) == 1
gradebook.add_student("hacker123")
gradebook.add_submission("ps0", "hacker123")
nb.cells.append(cell2)
with pytest.raises(RuntimeError):
nb, resources = preprocessor.preprocess(nb, resources)
def test_remove_cell(self, preprocessor, gradebook, resources):
cell1 = create_grade_and_solution_cell("hello", "markdown", "foo", 2)
cell2 = create_grade_and_solution_cell("hello", "markdown", "bar", 1)
nb = new_notebook()
nb.cells.append(cell1)
nb.cells.append(cell2)
nb, resources = preprocessor.preprocess(nb, resources)
notebook = gradebook.find_notebook("test", "ps0")
assert len(notebook.grade_cells) == 2
assert len(notebook.solution_cells) == 2
assert len(notebook.source_cells) == 2
nb.cells = nb.cells[:-1]
nb, resources = preprocessor.preprocess(nb, resources)
gradebook.db.refresh(notebook)
assert l |
akvo/akvo-rsr | akvo/rsr/models/partnership.py | Python | agpl-3.0 | 11,614 | 0.003186 | # -*- coding: utf-8 -*-
# Akvo RSR is covered by the GNU Affero General Public License.
# See more details in the license.txt file located at the root folder of the Akvo RSR module.
# For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.
import logging
from typing import Type
from django.core.cache import cache |
from django.core.exception | s import ValidationError
from django.apps import apps
from django.db import models
from django.db.models.signals import pre_save, pre_delete
from django.dispatch import receiver
from django.utils.translation import ugettext_lazy as _
import akvo.cache as akvo_cache
from ..fields import ValidXMLCharField
logger = logging.getLogger(__name__)
class Partnership(models.Model):
# the old way
FIELD_PARTNER = 'field'
FUNDING_PARTNER = 'funding'
SPONSOR_PARTNER = 'sponsor'
SUPPORT_PARTNER = 'support'
EXTENDING_PARTNER = 'extending'
PARTNER_TYPE_LIST = [
FIELD_PARTNER, FUNDING_PARTNER, SPONSOR_PARTNER, SUPPORT_PARTNER, EXTENDING_PARTNER
]
PARTNER_LABELS = [
_('Implementing partner'),
_('Funding partner'),
_('Sponsor partner'),
_('Accountable partner'),
_('Extending partner'),
]
PARTNER_TYPES = list(zip(PARTNER_TYPE_LIST, PARTNER_LABELS))
# the new way
IATI_FUNDING_PARTNER = 1
IATI_ACCOUNTABLE_PARTNER = 2
IATI_EXTENDING_PARTNER = 3
IATI_IMPLEMENTING_PARTNER = 4
AKVO_SPONSOR_PARTNER = 100 # not part of the IATI OrganisationRole codelist!
IATI_REPORTING_ORGANISATION = 101
# make sure the AKVO_SPONSOR_PARTNER is last in the list
IATI_ROLE_LIST = [
IATI_FUNDING_PARTNER, IATI_ACCOUNTABLE_PARTNER, IATI_EXTENDING_PARTNER,
IATI_IMPLEMENTING_PARTNER, AKVO_SPONSOR_PARTNER, IATI_REPORTING_ORGANISATION
]
IATI_ROLE_LABELS = [
_('Funding partner'),
_('Accountable partner'),
_('Extending partner'),
_('Implementing partner'),
_('Sponsor partner'),
_('Reporting organisation'),
]
IATI_ROLES = list(zip(IATI_ROLE_LIST, IATI_ROLE_LABELS))
# used when migrating
PARTNER_TYPES_TO_ROLES_MAP = {
FUNDING_PARTNER: IATI_FUNDING_PARTNER,
SUPPORT_PARTNER: IATI_ACCOUNTABLE_PARTNER,
FIELD_PARTNER: IATI_IMPLEMENTING_PARTNER,
SPONSOR_PARTNER: AKVO_SPONSOR_PARTNER,
}
# backwards compatibility
ROLES_TO_PARTNER_TYPES_MAP = {
IATI_FUNDING_PARTNER: FUNDING_PARTNER,
IATI_ACCOUNTABLE_PARTNER: SUPPORT_PARTNER,
IATI_EXTENDING_PARTNER: EXTENDING_PARTNER,
IATI_IMPLEMENTING_PARTNER: FIELD_PARTNER,
AKVO_SPONSOR_PARTNER: SPONSOR_PARTNER,
# TODO: not backwards compatible
IATI_REPORTING_ORGANISATION: ''
}
ALLIANCE_PARTNER = 'alliance'
KNOWLEDGE_PARTNER = 'knowledge'
NETWORK_PARTNER = 'network'
PARTNER_TYPE_EXTRAS_LIST = (ALLIANCE_PARTNER, KNOWLEDGE_PARTNER, NETWORK_PARTNER)
PARTNER_TYPE_EXTRA_LABELS = (
_('Alliance'),
_('Knowledge'),
_('Network')
)
PARTNER_TYPE_EXTRAS = list(zip(PARTNER_TYPE_EXTRAS_LIST, PARTNER_TYPE_EXTRA_LABELS))
organisation = models.ForeignKey(
'Organisation', on_delete=models.CASCADE, verbose_name=_('organisation'), related_name='partnerships', null=True,
blank=True,
help_text=_('Select an organisation that is taking an active role in the project.')
)
project = models.ForeignKey('Project', on_delete=models.CASCADE, verbose_name=_('project'), related_name='partnerships')
iati_organisation_role = models.PositiveSmallIntegerField(
_('organisation role'), choices=IATI_ROLES, db_index=True, null=True, blank=True,
help_text=_('Select the role of the organisation within the project:<br/>'
'- Funding organisation: a government or organisation that provides funds to '
'the project<br/>'
'- Implementing organisation: an organisation involved in carrying out the '
'activity or intervention<br/>'
'- Accountable organisation: an organisation responsible for oversight of '
'the project and its outcomes<br/>'
'- Extending organisation: an organisation that manages the budget and '
'direction of a project on behalf of the funding organisation<br/>'
'- Reporting organisation: an organisation that will report this project in '
'an IATI file')
)
# is_secondary_reporter is only used when the iati_organisation_role is set to
# IATI_REPORTING_ORGANISATION, thus the use of NullBooleanField
is_secondary_reporter = models.BooleanField(
_('secondary reporter'),
null=True,
help_text=_(
'This indicates whether the reporting organisation is a secondary publisher: '
'publishing data for which it is not directly responsible.'
)
)
funding_amount = models.DecimalField(
_('funding amount'), max_digits=14, decimal_places=2, blank=True, null=True, db_index=True,
help_text=_('It’s only possible to indicate a funding amount for funding partners. Use a '
'period to denote decimals.')
)
partner_type_extra = ValidXMLCharField(
_('partner type extra'), max_length=30, blank=True, null=True, choices=PARTNER_TYPE_EXTRAS,
help_text=_('RSR specific partner type.')
)
iati_activity_id = ValidXMLCharField(
_('IATI activity ID'), max_length=100, blank=True, null=True, db_index=True,
help_text=_('A valid activity identifier published by the participating organisation '
'which points to the activity that it has published to IATI that describes '
'its role in this activity.')
)
internal_id = ValidXMLCharField(
_('Internal ID'), max_length=75, blank=True, null=True, db_index=True,
help_text=_('This field can be used to indicate an internal identifier that is used by '
'the organisation for this project. (75 characters)')
)
iati_url = models.URLField(
blank=True,
help_text=_(
'Please enter the URL for where the IATI Activity Id Funding details are published. '
'For projects directly or indirectly funded by the Dutch Government, this should '
'be the OpenAid.nl page. For other projects, an alternative URL can be used.'
)
)
related_activity_id = ValidXMLCharField(
_('related IATI activity ID'), max_length=100, blank=True
)
def iati_organisation_role_label(self):
return dict(self.IATI_ROLES).get(self.iati_organisation_role, '')
def iati_organisation_role_label_unicode(self):
return "{}".format(self.iati_organisation_role_label())
def iati_role_to_partner_type(self):
return dict(self.ROLES_TO_PARTNER_TYPES_MAP).get(self.iati_organisation_role, '')
def iati_role_to_partner_type_unicode(self):
return "{}".format(self.iati_role_to_partner_type())
def organisation_show_link(self):
if self.organisation:
return '<a href="{0}">{1}</a>'.format(self.organisation.get_absolute_url(),
self.organisation.long_name
or self.organisation.name)
return ''
def funding_amount_with_currency(self):
"""Returns the funding amount, prepended by the project's currency."""
if self.funding_amount and self.project and self.project.currency:
return '{0} {1}'.format(self.project.currency, self.funding_amount)
return self.funding_amount
class Meta:
app_label = 'rsr'
verbose_name = _('project partner')
verbose_name_plural = _('project partners')
ordering = ['iati_organisation_role']
def __str__(self):
if self.organisation:
if self.organisation.name:
organisation_unicode = self.organisation.name
elif self.organi |
sauloal/cnidaria | scripts/venv/lib/python2.7/site-packages/cogent/app/cd_hit.py | Python | mit | 11,924 | 0.007716 | #!/usr/bin/env python
"""Application controller for CD-HIT v3.1.1"""
import shutil
from os import remove
from cogent.app.parameters import ValuedParameter
from cogent.app.util import CommandLineApplication, ResultPath,\
get_tmp_filename
from cogent.core.moltype import RNA, DNA, PROTEIN
from cogent.core.alignment import SequenceCollection
from cogent.parse.fasta import MinimalFastaParser
__author__ = "Daniel McDonald"
__copyright__ = "Copyright 2007-2012, The Cogent Project"
__credits__ = ["Daniel McDonald"]
__license__ = "GPL"
__version__ = "1.5.3"
__maintainer__ = "Daniel McDonald"
__email__ = "mcdonadt@colorado.edu"
__status__ = "Development"
class CD_HIT(CommandLineApplication):
"""cd-hit Application Controller
Use this version of CD-HIT if your MolType is PROTEIN
"""
_command = 'cd-hit'
_input_handler = '_input_as_multiline_string'
_parameters = {
# input input filename in fasta format, required
'-i':ValuedParameter('-',Name='i',Delimiter=' ',IsPath=True),
# output filename, required
'-o':ValuedParameter('-',Name='o',Delimiter=' ',IsPath=True),
# sequence identity threshold, default 0.9
# this is the default cd-hit's "global sequence identity" calc'd as :
# number of identical amino acids in alignment
# divided by the full length of the shorter sequence
'-c':ValuedParameter('-',Name='c',Delimiter=' '),
# use global sequence identity, default 1
# if set to 0, then use local sequence identity, calculated as :
# number of identical amino acids in alignment
# divided by the length of the alignment
# NOTE!!! don't use -G 0 unless you use alignment coverage controls
# see options -aL, -AL, -aS, -AS
'-g':ValuedParameter('-',Name='g',Delimiter=' '),
# band_width of alignment, default 20
'-b':ValuedParameter('-',Name='b',Delimiter=' '),
# max available memory (Mbyte), default 400
'-M':ValuedParameter('-',Name='M',Delimiter=' '),
# word_length, default 8, see user's guide for choosing it
'-n':ValuedParameter('-',Name='n',Delimiter=' '),
# length of throw_away_sequences, default 10
'-l':ValuedParameter('-',Name='l',Delimiter=' '),
# tolerance for redundance, default 2
'-t':ValuedParameter('-',Name='t',Delimiter=' '),
# length of description in .clstr file, default 20
# if set to 0, it takes the fasta defline and stops at first space
'-d':ValuedParameter('-',Name='d',Delimiter=' '),
# length difference cutoff, default 0.0
# if set to 0.9, the shorter sequences need to be
# at least 90% length of the representative of the cluster
'-s':ValuedParameter('-',Name='s',Delimiter=' '),
# length difference cutoff in amino acid, default 999999
# f set to 60, the length difference between the shorter sequences
# and the representative of the cluster can not be bigger than 60
'-S':ValuedParameter('-',Name='S',Delimiter=' '),
# alignment coverage for the longer sequence, default 0.0
# if set to 0.9, the alignment must covers 90% of the sequence
'-aL':ValuedParameter('-',Name='aL',Delimiter=' '),
# alignment coverage control for the longer sequence, default 99999999
# if set to 60, and the length of the sequence is 400,
# then the alignment must be >= 340 (400-60) residues
'-AL':ValuedParameter('-',Name='AL',Delimiter=' '),
| # alignment coverage for the shorter sequence, default 0.0
# if set to 0.9, the alignment must covers 90% of the sequenc | e
'-aS':ValuedParameter('-',Name='aS',Delimiter=' '),
# alignment coverage control for the shorter sequence, default 99999999
# if set to 60, and the length of the sequence is 400,
# then the alignment must be >= 340 (400-60) residues
'-AS':ValuedParameter('-',Name='AS',Delimiter=' '),
# 1 or 0, default 0, by default, sequences are stored in RAM
# if set to 1, sequence are stored on hard drive
# it is recommended to use -B 1 for huge databases
'-B':ValuedParameter('-',Name='B',Delimiter=' '),
# 1 or 0, default 0
# if set to 1, print alignment overlap in .clstr file
'-p':ValuedParameter('-',Name='p',Delimiter=' '),
# 1 or 0, default 0
# by cd-hit's default algorithm, a sequence is clustered to the first
# cluster that meet the threshold (fast cluster). If set to 1, the program
# will cluster it into the most similar cluster that meet the threshold
# (accurate but slow mode)
# but either 1 or 0 won't change the representatives of final clusters
'-g':ValuedParameter('-',Name='g',Delimiter=' '),
# print this help
'-h':ValuedParameter('-',Name='h',Delimiter=' ')
}
_synonyms = {'Similarity':'-c'}
def getHelp(self):
"""Method that points to documentation"""
help_str =\
"""
CD-HIT is hosted as an open source project at:
http://www.bioinformatics.org/cd-hit/
The following papers should be cited if this resource is used:
Clustering of highly homologous sequences to reduce thesize of large
protein database", Weizhong Li, Lukasz Jaroszewski & Adam Godzik
Bioinformatics, (2001) 17:282-283
Tolerating some redundancy significantly speeds up clustering of large
protein databases", Weizhong Li, Lukasz Jaroszewski & Adam Godzik
Bioinformatics, (2002) 18:77-82
"""
return help_str
def _input_as_multiline_string(self, data):
"""Writes data to tempfile and sets -i parameter
data -- list of lines
"""
if data:
self.Parameters['-i']\
.on(super(CD_HIT,self)._input_as_multiline_string(data))
return ''
def _input_as_lines(self, data):
"""Writes data to tempfile and sets -i parameter
data -- list of lines, ready to be written to file
"""
if data:
self.Parameters['-i']\
.on(super(CD_HIT,self)._input_as_lines(data))
return ''
def _input_as_seqs(self, data):
"""Creates a list of seqs to pass to _input_as_lines
data -- list like object of sequences
"""
lines = []
for i,s in enumerate(data):
# will number the sequences 1,2,3, etc...
lines.append(''.join(['>',str(i+1)]))
lines.append(s)
return self._input_as_lines(lines)
def _input_as_string(self, data):
"""Makes data the value of a specific parameter"""
if data:
self.Parameters['-i'].on(str(data))
return ''
def _get_seqs_outfile(self):
"""Returns the absolute path to the seqs outfile"""
if self.Parameters['-o'].isOn():
return self.Parameters['-o'].Value
else:
raise ValueError, "No output file specified"
def _get_clstr_outfile(self):
"""Returns the absolute path to the clstr outfile"""
if self.Parameters['-o'].isOn():
return ''.join([self.Parameters['-o'].Value, '.clstr'])
else:
raise ValueError, "No output file specified"
def _get_result_paths(self, data):
"""Return dict of {key: ResultPath}"""
result = {}
result['FASTA'] = ResultPath(Path=self._get_seqs_outfile())
result['CLSTR'] = ResultPath(Path=self._get_clstr_outfile())
return result
class CD_HIT_EST(CD_HIT):
"""cd-hit Application Controller
Use this version of CD-HIT if your MolType is PROTEIN
"""
_command = 'cd-hit-est'
_input_handler = '_input_as_multiline_string'
_parameters = CD_HIT._parameters
_parameters.update({\
# 1 or 0, default 0, by default only +/+ strand alignment
# if set to 1, do both +/+ & +/- alignments
'-r':ValuedParameter('-',Name='r',Delimiter=' ')
})
def cdhit_clusters_from_seqs(seqs, moltype, params=None):
"" |
rcanepa/cs-fundamentals | python/tests/strings/test_msd_string_sort.py | Python | mit | 1,284 | 0.000779 | import unittest
from strings.msd_string_sort import msd_sort
class MSDSort(unittest.TestCase):
def setUp(self):
self.licenses = [
"4PGC938",
"2IYE230",
"3CI0720",
"1ICK750",
"1OHV845",
"4JZY524",
"1ICK750",
"3CI0720",
"1OHV845",
"1OHV845",
"2RLA629",
"2RLA629",
"3ATW723"
]
self.unsorted_strings = [
"are",
"by",
"sea",
"seashells",
"ar",
"seashells",
"sells",
"sells",
"she",
"a",
"she",
"zorro",
"shells",
"shore",
"surely",
"the",
"the",
]
def test_fixed_length_string | s_are_sorted(self):
sorted_data = msd_sort(self.licenses)
manually_sorted_data = sorted(self.licenses)
self.assertEqual(sorted_data, manually_sorted_data)
def test_variable_length_strings_are_sorted(self):
sorted_data = msd_sort(self.unsorted_strings)
manually_sorted_data = sorted(self.unsorted_strings)
self.assertEqual(sorted_data, manually_sorted_data) | |
h2oai/h2o-3 | h2o-py/h2o/estimators/glrm.py | Python | apache-2.0 | 45,329 | 0.002537 | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
#
# This file is auto-generated by h2o-3/h2o-bindings/bin/gen_python.py
# Copyright 2016 H2O.ai; Apache License Version 2.0 (see LICENSE for details)
#
from __future__ import absolute_import, division, print_function, unicode_literals
from h2o.estimators.estimator_base import H2OEstimator
from h2o.exceptions import H2OValueError
from h2o.frame import H2OFrame
from h2o.utils.typechecks import assert_is_type, Enum, numeric
class H2OGeneralizedLowRankEstimator(H2OEstimator):
"""
Generalized Low Rank Modeling
Builds a generalized low rank model of a H2O dataset.
"""
algo = "glrm"
supervised_learning = False
def __init__(self,
model_id=None, # type: Optional[Union[None, str, H2OEstimator]]
training_frame=None, # type: Optional[Union[None, str, H2OFrame]]
validation_frame=None, # type: Optional[Union[None, str, H2OFrame]]
ignored_columns=None, # type: Optional[List[str]]
ignore_const_cols=True, # type: bool
score_each_iteration=False, # type: bool
representation_name=None, # type: Optional[str]
loading_name=None, # type: Optional[str]
transform="none", # type: Literal["none", "standardize", "normalize", "demean", "descale"]
k=1, # type: int
loss="quadratic", # type: Literal["quadratic", "absolute", "huber", "poisson", "hinge", "logistic", "periodic"]
loss_by_col=None, # type: Optional[List[Literal["quadratic", "absolute", "huber", "poisson", "hinge", "logistic", "periodic", "categorical", "ordinal"]]]
loss_by_col_idx=None, # type: Optional[List[int]]
multi_loss="categorical", # type: Literal["categorical", "ordinal"]
period=1, # type: int
regularization_x="none", # type: Literal["none", "quadratic", "l2", "l1", "non_negative", "one_sparse", "unit_one_sparse", "simplex"]
regularization_y="none", # type: Literal["none", "quadratic", "l2", "l1", "non_negative", "one_sparse", "unit_one_sparse", "simplex"]
gamma_x=0.0, # type: float
gamma_y=0.0, # type: float
max_iterations=1000, # type: int
max_updates=2000, # type: int
init_step_size=1.0, # type: float
min_step_size=0.0001, # type: float
seed=-1, # type: int
init="plus_plus", # type: Literal["random", "svd", "plus_plus", "user"]
svd_method="randomized", # type: Literal["gram_s_v_d", "power", "randomized"]
user_y=None, # type: Optional[Union[None, str, H2OFrame]]
user_x=None, # type: Optional[Union[None, str, H2OFrame]]
expand_user_y=True, # type: bool
impute_original=False, # type: bool
recover_svd=False, # type: bool
max_runtime_secs=0.0, # type: float
export_checkpoints_dir=None, # type: Optional[str]
):
"""
:param model_id: Destination id for this model; auto-generated if not specified.
Defaults to ``None``.
:type model_id: Union[None, str, H2OEstimator], optional
:param training_frame: Id of the training data frame.
Defaults to ``None``.
:type training_frame: Union[None, str, H2OFrame], optional
:param validation_frame: Id of the validation data frame.
Defaults to ``None``.
:type validation_frame: Union[None, str, H2OFrame], optional
:param ignored_columns: Names of columns to ignore for training.
Defaults to ``None``.
:type ignored_columns: List[str], optional
:param ignore_const_cols: Ignore constant columns.
Defaults to ``True``.
:type ignore_const_cols: bool
:param score_each_iteration: Whether to score during each iteration of model training.
Defaults to ``False``.
:type score_each_iteration: bool
:param representation_name: Frame key to save resulting X
Defaults to ``None``.
:type representation_name: str, optional
:param loading_name: [Deprecated] Use representation_name instead. Frame key to save resulting X.
Defaults to ``None``.
:type loading_name: str, optional
:param transform: Transformation of training data
Defaults to ``"none"``.
:type transform: Literal["none", "standardize", "normalize", "demean", "descale"]
:param k: Rank of matrix approximation
Defaults to ``1``.
:type k: int
:param loss: Numeric loss function
Defaults to ``"quadratic"``.
:type loss: Literal["quadratic", "absolute", "huber", "poisson", "hinge", "logistic", "periodic"]
:param loss_by_col: Loss function by column (override)
Defaults to ``None``.
:type loss_by_col: List[Literal["quadratic", "absolute", "huber", "poisson", "hinge", "logistic", "periodic", "categorical",
"ordinal"]], optional
:param loss_by_col_idx: Loss function by column index (override)
Defaults to ``None``.
:type loss_by_col_idx: List[int], optional
:param multi_loss: Categorical loss function
Defaults to ``"categorical"``.
:type multi_loss: Literal["categorical", "ordinal"]
:param period: Length of period (only used with periodic loss function)
Defaults to ``1``.
:type period: int
:param regularization_x: Regularization function for X matrix
Defaults to ``"none"``.
:type regularization_x: Literal["none", "quadratic", "l2", "l1", "non_negative", "one_sparse", "unit_one_sparse", "simplex"]
:param regularization_y: Regularization function for Y matrix
Defaults to ``"none"``.
:type regularization_y: Literal["none", "quadratic", "l2", "l1", "non_negative", "one_sparse", "unit_one_sparse", "simplex"]
:param gamma_x: Regularization weight on X matrix
Defaults to ``0.0``.
:type gamma_x: float
:param gamma_y: Regularization weight on Y matrix
Defaults to ``0.0``.
:type gamma_y: float
:param max_iterations: Maximum number of iterations
Defaults to ``1000``.
:type max_iterations: int
:param max_updates: Maximum number of updates, defaults to 2*max_iterations
Defaults to ``2000``.
:type max_update | s: int
:param init_step_size: Initial step size
Defaults to ``1.0``.
:type init_step_size: float
:param min_step_size: Minimum step size
Defaults to ``0.0001``.
:type min_step_size: float
:param seed: RNG seed for initialization
Defaults to ``-1``.
:type seed: int
:param init: Initialization mode
Defaults to ``"plus_plus"``.
| :type init: Literal["random", "svd", "plus_plus", "user"]
:param svd_method: Method for computing SVD during initialization (Caution: Randomized is currently experimental
and unstable)
Defaults to ``"randomized"``.
:type svd_method: Literal["gram_s_v_d", "power", "randomized"]
:param user_y: User-specified initial Y
Defaults to ``None``.
:type user_y: Union[None, str, H2OFrame], optional
:param user_x: User-specified initial X
Defaults to ``None``.
:type user_x: Union[None, str, H2OFrame], optional
:param expand_user_y: Expand categorical columns in user-specified initial Y
Defaults to ``True``.
:type expand_user_y: bool
:param impute_original: Reconstruct original training data by reversing transform
Defaults to ``False``.
:type impute_original: bool
:param recover_svd: Recover singular |
aliyun/oss-ftp | python27/win32/Lib/site-packages/setuptools/svn_utils.py | Python | mit | 18,855 | 0.00175 | from __future__ import absolute_import
import os
import re
import sys
from distutils import log
import xml.dom.pulldom
import shlex
import locale
import codecs
import unicodedata
import warnings
from setuptools.compat import unicode, PY2
from setuptools.py31compat import TemporaryDirectory
from xml.sax.saxutils import unescape
try:
import urlparse
except ImportError:
import urllib.parse as urlparse
from subprocess import Popen as _Popen, PIPE as _PIPE
#NOTE: Use of the command line options require SVN 1.3 or newer (December 2005)
# and SVN 1.3 hasn't been supported by the developers since mid 2008.
#subprocess is called several times with shell=(sys.platform=='win32')
#see the follow for more information:
# http://bugs.python.org/issue8557
# http://stackoverflow.com/questions/5658622/
# python-subprocess-popen-environment-path
def _run_command(args, stdout=_PIPE, stderr=_PIPE, encoding=None, stream=0):
#regarding the shell argument, see: http://bugs.python.org/issue8557
try:
proc = _Popen(args, stdout=stdout, stderr=stderr,
shell=(sys.platform == 'win32'))
data = proc.communicate()[stream]
except OSError:
return 1, ''
#doubled checked and
data = decode_as_string(data, encoding)
#communciate calls wait()
return proc.returncode, data
def _get_entry_schedule(entry):
schedule = entry.getElementsByTagName('schedule')[0]
return "".join([t.nodeValue
for t in schedule.childNodes
if t.nodeType == t.TEXT_NODE])
def _get_target_property(target):
property_text = target.getElementsByTagName('property')[0]
return "".join([t.nodeValue
for t in property_text.childNodes
if t.nodeType == t.TEXT_NODE])
def _get_xml_data(decoded_str):
if PY2:
#old versions want an encoded string
data = decoded_str.encode('utf-8')
else:
data = decoded_str
return data
def joinpath(prefix, *suffix):
if not prefix or prefix == '.':
return os.path.join(*suffix)
return os.path.join(prefix, *suffix)
def determine_console_encoding():
try:
#try for the preferred encoding
encoding = locale.getpreferredencoding()
#see if the locale.getdefaultlocale returns null
#some versions of python\platforms return US-ASCII
#when it cannot determine an encoding
if not encoding or encoding == "US-ASCII":
encoding = locale.getdefaultlocale()[1]
if encoding:
codecs.lookup(encoding) # make sure a lookup error is not made
except (locale.Error, LookupError):
encoding = None
is_osx = sys.platform == "darwin"
if not encoding:
return ["US-ASCII", "utf-8"][is_osx]
elif encoding.startswith("mac-") and is_osx:
#certain versions of python would return mac-roman as default
#OSX as a left over of earlier mac versions.
return "utf-8"
else:
return encoding
_console_encoding = determine_console_encoding()
def decode_as_string(text, encoding=None):
"""
Decode the console or file output explicitly using getpreferredencoding.
The text paraemeter should be a encoded string, if not no decode occurs
If no encoding is given, getpreferredencoding is used. If encoding is
specified, that is used instead. This would be needed for SVN --xml
output. Unicode is explicitly put in composed NFC form.
--xml should be UTF-8 (SVN Issue 2938) the discussion on the Subversion
DEV List from 2007 seems to indicate the same.
"""
#text should be a byte string
if encoding is None:
encoding = _console_encoding
if not isinstance(text, unicode):
text = text.decode(encoding)
text = unicodedata.normalize('NFC', text)
return text
def parse_dir_entries(decoded_str):
'''Parse the entries from a recursive info xml'''
doc = xml.dom.pulldom.parseString(_get_xml_data(decoded_str))
entries = list()
for event, node in doc:
if event == 'START_ELEMENT' and node.nodeName == 'entry':
doc.expandNode(node)
if not _get_entry_schedule(node).startswith('delete'):
entries.append((node.getAttribute('path'),
node.getAttribute('kind')))
return entries[1:] # do not want the root directory
def parse_externals_xml(decoded_str, prefix=''):
'''Parse a propget svn:externals xml'''
prefix = os.path.normpath(prefix)
prefix = os.path.normcase(prefix)
doc = xml.dom.pulldom.parseString(_get_xml_data(decoded_str))
externals = list()
for event, node in doc:
if event == 'START_ELEMENT' and node.nodeName == 'target':
doc.expandNode(node)
path = os.path.normpath(node.getAttribute('path'))
if os.path.normcase(path).startswith(prefix):
path = path[len(prefix)+1:]
data = _get_target_property(node)
#data should be decoded already
for external in parse_external_prop(data):
externals.append(joinpath(path, external))
return externals # do not want the root directory
def parse_external_prop(lines):
"""
Parse the value of a retrieved svn:externals entry.
possible token setups (with quotng and backscaping in laters versions)
URL[@#] EXT_FOLDERNAME
[-r#] URL EXT_FOLDERNAME
EXT_FOLDERNAME [-r#] URL
"""
externals = []
for line in lines.splitlines():
line = line.lstrip() # there might be a "\ "
if not line:
continue
if PY2:
#shlex handles NULLs just fine and shlex in 2.7 tries to encode
#as ascii automatiically
line = line.encode('utf-8')
line = shlex.split(line)
if PY2:
line = [x.decode('utf-8') for x in line]
#EXT_FOLDERNAME is either the first | or last depending on where
#the URL falls
if urlparse.urlsplit(line[-1])[0]:
external = line[0]
else:
external = line[-1]
external = decode_as_string(exter | nal, encoding="utf-8")
externals.append(os.path.normpath(external))
return externals
def parse_prop_file(filename, key):
found = False
f = open(filename, 'rt')
data = ''
try:
for line in iter(f.readline, ''): # can't use direct iter!
parts = line.split()
if len(parts) == 2:
kind, length = parts
data = f.read(int(length))
if kind == 'K' and data == key:
found = True
elif kind == 'V' and found:
break
finally:
f.close()
return data
class SvnInfo(object):
'''
Generic svn_info object. No has little knowledge of how to extract
information. Use cls.load to instatiate according svn version.
Paths are not filesystem encoded.
'''
@staticmethod
def get_svn_version():
# Temp config directory should be enough to check for repository
# This is needed because .svn always creates .subversion and
# some operating systems do not handle dot directory correctly.
# Real queries in real svn repos with be concerned with it creation
with TemporaryDirectory() as tempdir:
code, data = _run_command(['svn',
'--config-dir', tempdir,
'--version',
'--quiet'])
if code == 0 and data:
return data.strip()
else:
return ''
#svnversion return values (previous implementations return max revision)
# 4123:4168 mixed revision working copy
# 4168M modified working |
dls-controls/dls_ade | dls_ade/dls_checkout_module_test.py | Python | apache-2.0 | 2,092 | 0.000478 | #!/bin/env dls-python
import unittest
from dls_ade import dls_checkout_module
from mock import patch, MagicMock
class MakeParserTest(unittest.TestCase):
def setUp(self):
self.parser = dls_checkout_module.make_parser()
@patch('dls_ade.dls_changes_since_release.ArgParser.add_branch_flag')
def test_branch_set(self, parser_mock):
dls_checkout_module.make_parser()
parser_mock.assert_called_once_with(
help_msg="Checkout a specific named branch rather than the default (master)")
def test_parser_has_correct_attributes(self):
args = self.parser.parse_args("-p module1".split())
self.assertEqual(args.module_name, "module1")
self.assertEqual(args.area, "python")
def test_parser_does_not_accept_version(self):
try:
self.parser.parse_args("-p module1 0-1".split())
self.fail("dls-checkout-module should not accept a version")
except SystemExit:
pass
class CheckTechnicalAreaTest(unittest.TestCase):
def test_given_area_not_ioc_then_no_error_raised(self):
area = "support"
module = "test_module"
dls_checkout_module.check_technical_area(area, module)
def test_given_area_ioc_module_all_then_no_error_raised(self):
area = "ioc"
module = ""
dls_checkout_module.check_technical_area(area, module)
def test_given_area_ioc_module_split_two_then_no_error_raised(self):
area = "ioc"
module = "modules/test_module"
dls_checkout_module.check_technical_area(area, module)
def test_given_area_ioc_module_split_less_than_two_then_error_raised(self):
area = "ioc"
module = "test_module"
expected_error_msg = "Missing Technical Area under Beamline"
try:
dls_checkout_module.check_technical_area(area, module)
except Exception as error:
self.assertEqual(str(error), expected_error_msg)
if __n | ame__ == '__main__':
| # buffer option suppresses stdout generated from tested code
unittest.main(buffer=True)
|
jxtech/wechatpy | tests/test_events.py | Python | mit | 18,940 | 0.000371 | # -*- coding: utf-8 -*-
import unittest
from datetime import datetime
from wechatpy import parse_message
class EventsTestCase(unittest.TestCase):
def test_scan_code_push_event(self):
from wechatpy.events import ScanCodePushEvent
xml = """<xml>
<ToUserName><![CDATA[gh_e136c6e50636]]></ToUserName>
<FromUserName><![CDATA[oMgHVjngRipVsoxg6TuX3vz6glDg]]></FromUserName>
<CreateTime>1408090502</CreateTime>
<MsgType><![CDATA[event]]></MsgType>
<Event><![CDATA[scancode_push]]></Event>
<EventKey><![CDATA[6]]></EventKey>
<ScanCodeInfo><ScanType><![CDATA[qrcode]]></ScanType>
<ScanResult><![CDATA[1]]></ScanResult>
</ScanCodeInfo>
</xml>"""
event = parse_message(xml)
self.assertTrue(isinstance(event, ScanCodePushEvent))
self.assertEqual("qrcode", event.scan_type)
self.assertEqual("1", event.scan_result)
def test_scan_code_waitmsg_event(self):
from wechatpy.events import ScanCodeWaitMsgEvent
xml = """<xml>
<ToUserName><![CDATA[gh_e136c6e50636]]></ToUserName>
<FromUserName><![CDATA[oMgHVjngRipVsoxg6TuX3vz6glDg]]></FromUserName>
<CreateTime>1408090606</CreateTime>
<MsgType><![CDATA[event]]></MsgType>
<Event><![CDATA[scancode_waitmsg]]></Event>
<EventKey><![CDATA[6]]></EventKey>
<ScanCodeInfo><ScanType><![CDATA[qrcode]]></ScanType>
<ScanResult><![CDATA[2]]></ScanResult>
</ScanCodeInfo>
</xml>"""
event = parse_message(xml)
self.assertTrue(isinstance(event, ScanCodeWaitMsgEvent))
self.assertEqual("qrcode", event.scan_type)
self.assertEqual("2", event.scan_result)
def test_pic_sysphoto_event(self):
from wechatpy.events import PicSysPhotoEvent
xml = """<xml>
<ToUserName><![CDATA[gh_e136c6e50636]]></ToUserName>
<FromUserName><![CDATA[oMgHVjngRipVsoxg6TuX3vz6glDg]]></FromUserName>
<CreateTime>1408090651</CreateTime>
<MsgType><![CDATA[event]]></MsgType>
<Event><![CDATA[pic_sysphoto]]></Event>
<EventKey><![CDATA[6]]></EventKey>
<SendPicsInfo><Count>1</Count>
<PicList>
<item>
<PicMd5Sum><![CDATA[1b5f7c23b5bf75682a53e7b6d163e185]]></PicMd5Sum>
</item>
</PicList>
</SendPicsInfo>
</xml>"""
event = parse_message(xml)
self.assertTrue(isinstance(event, PicSysPhotoEvent))
self.assertEqual(1, event.count)
self.assertEqual("1b5f7c23b5bf75682a53e7b6d163e185", event.pictures[0]["PicMd5Sum"])
def test_pic_photo_or_album_event(self):
from wechatpy.events import PicPhotoOrAlbumEvent
xml = """<xml>
<ToUserName><![CDATA[gh_e136c6e50636]]></ToUserName>
<FromUserName><![CDATA[oMgHVjngRipVsoxg6TuX3vz6glDg]]></FromUserName>
<CreateTime>1408090816</CreateTime>
<MsgType><![CDATA[event]]></MsgType>
<Event><![CDATA[pic_photo_or_album]]></Event>
<EventKey><![CDATA[6]]></EventKey>
<SendPicsInfo><Count>1</Count>
<PicList>
<item>
<PicMd5Sum><![CDATA[5a75aaca956d97be686719218f275c6b]]></PicMd5Sum>
</item>
</PicList>
</SendPicsInfo>
</xml>"""
event = parse_message(xml)
self.assertTrue(isinstance(event, PicPhotoOrAlbumEvent))
self.assertEqual(1, event.count)
self.assertEqual("5a75aaca956d97be686719218f275c6b", event.pictures[0]["PicMd5Sum"])
def test_pic_wechat_event(self):
from wechatpy.events import PicWeChatEvent
xml = """<xml>
<ToUserName><![CDATA[gh_e136c6e50636]]></ToUserName>
<FromUserName><![CDATA[oMgHVjngRipVsoxg6TuX3vz6glDg]]></FromUserName>
<CreateTime>1408090816</CreateTime>
<MsgType><![CDATA[event]]></MsgType>
<Event><![CDATA[pic_weixin]]></Event>
<EventKey><![CDATA[6]]></EventKey>
<SendPicsInfo><Count>1</Count>
<PicList>
<item>
<PicMd5Sum><![CDATA[5a75aaca956d97be686719218f275c6b]]></PicMd5Sum>
</item>
</PicList>
</SendPicsInfo>
</xml>"""
event = parse_message(xml)
self.assertTrue(isinstance(event, PicWeChatEvent))
self.assertEqual(1, event.count)
self.assertEqual("5a75aaca956d97be686719218f275c6b", event.pictures[0]["PicMd5Sum"])
def test_location_select_event(self):
from wechatpy.events import LocationSelectEvent
xml = """<xml>
<ToUserName><![CDATA[gh_e136c6e50636]]></ToUserName>
<FromUserName><![CDATA[oMgHVjngRipVsoxg6TuX3vz6glDg]]></FromUserName>
<CreateTime>1408091189</CreateTime>
<MsgType><![CDATA[event]]></MsgType>
<Event><![CDATA[location_select]]></Event>
<EventKey><![CDATA[6]]></EventKey>
<SendLocationInfo><Location_X><![CDATA[23]]></Location_X>
<Location_Y><![CDATA[113]]></Location_Y>
<Scale><![CDATA[15]]></Scale>
<Label><![CDATA[广州市海珠区客村艺苑路 106号]]></Label>
<Poiname><![CDATA[]]></Poiname>
</SendLocationInfo>
</xml>"""
event = parse_message(xml)
self.assertTrue(isinstance(event, LocationSelectEvent))
self.assertEqual(("23", "113"), event.location)
self.assertEqual("15", event.scale)
self.assertTrue(event.poiname is None)
self.assertEqual("广州市海珠区客村艺苑路 106号", event.label)
def test_merchant_order_event(self):
from wechatpy.events import MerchantOrderEvent
xml = """<xml>
<ToUserName>< | ![CDATA[weixin_media1]]></ToUserName>
| <FromUserName><![CDATA[oDF3iYyVlek46AyTBbMRVV8VZVlI]]></FromUserName>
<CreateTime>1398144192</CreateTime>
<MsgType><![CDATA[event]]></MsgType>
<Event><![CDATA[merchant_order]]></Event>
<OrderId><![CDATA[test_order_id]]></OrderId>
<OrderStatus>2</OrderStatus>
<ProductId><![CDATA[test_product_id]]></ProductId>
<SkuInfo><![CDATA[10001:1000012;10002:100021]]></SkuInfo>
</xml>"""
event = parse_message(xml)
self.assertTrue(isinstance(event, MerchantOrderEvent))
self.assertEqual("test_order_id", event.order_id)
self.assertEqual(2, event.order_status)
self.assertEqual("test_product_id", event.product_id)
self.assertEqual("10001:1000012;10002:100021", event.sku_info)
def test_kf_create_session_event(self):
from wechatpy.events import KfCreateSessionEvent
xml = """<xml>
<ToUserName><![CDATA[touser]]></ToUserName>
<FromUserName><![CDATA[fromuser]]></FromUserName>
<CreateTime>1399197672</CreateTime>
<MsgType><![CDATA[event]]></MsgType>
<Event><![CDATA[kf_create_session]]></Event>
<KfAccount><![CDATA[test1@test]]></KfAccount>
</xml>"""
event = parse_message(xml)
self.assertTrue(isinstance(event, KfCreateSessionEvent))
self.assertEqual("test1@test", event.account)
def test_kf_close_session_event(self):
from wechatpy.events import KfCloseSessionEvent
xml = """<xml>
<ToUserName><![CDATA[touser]]></ToUserName>
<FromUserName><![CDATA[fromuser]]></FromUserName>
<CreateTime>1399197672</CreateTime>
<MsgType><![CDATA[event]]></MsgType>
<Event><![CDATA[kf_close_session]]></Event>
<KfAccount><![CDATA[test1@test]]></KfAccount>
</xml>"""
event = parse_message(xml)
self.assertTrue(isinstance(event, KfCloseSessionEvent))
self.assertEqual("test1@test", event.account)
def test_kf_switch_session_event(self):
from wechatpy.events import KfSwitchSessionEvent
xml = """<xml>
<ToUserName><![CDATA[touser]]></ToUserName>
<FromUserName><![CDATA[fromuser]]></FromUserName>
<CreateTime>1399197672</CreateTime>
<MsgType><![CDATA[event]]></MsgType>
<Event><![CDATA[kf_switch_session]]></Event>
<FromKfAccount><![CDATA[test1@test]]></FromKfAccount>
<ToKfAccount><![CDATA[test2@test]]></ToKfAccount>
</xml>"""
|
yuyangit/tornado | tornado/test/simple_httpclient_test.py | Python | apache-2.0 | 22,936 | 0.000523 | from __future__ import absolute_import, division, print_function, with_statement
import collections
from contextlib import closing
import errno
import gzip
import logging
import os
import re
import socket
import sys
from tornado import gen
from tornado.httpclient import AsyncHTTPClient
from tornado.httputil import HTTPHeaders
from tornado.ioloop import IOLoop
from tornado.log import gen_log
from tornado.netutil import Resolver, bind_sockets
from tornado.simple_httpclient import SimpleAsyncHTTPClient, _default_ca_certs
from tornado.test.httpclient_test import ChunkHandler, CountdownHandler, HelloWorldHandler
from tornado.test import httpclient_test
from tornado.testing import AsyncHTTPTestCase, AsyncHTTPSTestCase, AsyncTestCase, ExpectLog
from tornado.test.util import skipOnTravis, skipIfNoIPv6, refusing_port
from tornado.web import RequestHandler, Application, asynchronous, url, stream_request_body
class SimpleHTTPClientCommonTestCase(httpclient_test.HTTPClientCommonTestCase):
def get_http_client(self):
client = SimpleAsyncHTTPClient(io_loop=self.io_loop,
force_instance=True)
self.assertTrue(isinstance(client, SimpleAsyncHTTPClient))
return client
class TriggerHandler(RequestHandler):
def initialize(self, queue, wake_callback):
self.queue = queue
self.wake_callback = wake_callback
@asynchronous
def get(self):
logging.debug("queuing trigger")
self.queue.append(self.finish)
if self.get_argument("wake", "true") == "true":
self.wake_callback()
class HangHandler(RequestHandler):
@asynchronous
def get(self):
pass
class ContentLengthHandler(RequestHandler):
def get(self):
self.set_header("Content-Length", self.get_argument("value"))
self.write("ok")
class HeadHandler(RequestHandler):
def head(self):
self.set_header("Content-Length", "7")
class OptionsHandler(RequestHandler):
def options(self):
self.set_header("Access-Control-Allow-Origin", "*")
self.write("ok")
class NoContentHandler(RequestHandler):
def get(self):
if self.get_argument("error", None):
self.set_header("Content-Length", "5")
self.write("hello")
self.set_status(204)
class SeeOtherPostHandler(RequestHandler):
def post(self):
redirect_code = int(self.request.body)
assert redirect_code in (302, 303), "unexpected body %r" % self.request.body
self.set_header("Location", "/see_other_get")
self.set_status(redirect_code)
class SeeOtherGetHandler(RequestHandler):
def get(self):
if self.request.body:
raise Exception("unexpected body %r" % self.request.body)
self.write("ok")
class HostEchoHandler(RequestHandler):
def get(self):
self.write(self.request.headers["Host"])
class NoContentLengthHandler(RequestHandler):
@gen.coroutine
def get(self):
# Emulate the old HTTP/1.0 behavior of returning a body with no
# content-length. Tornado handles content-length at the framework
# level so we have to go around it.
stream = self.request.connection.stream
yield stream.write(b"HTTP/1.0 200 OK\r\n\r\n"
| b"hello")
stream.close()
class EchoPostHandler(RequestHandler):
def post(self):
self.write(self.request.body)
@stream_request_body
class RespondInPrepareHand | ler(RequestHandler):
def prepare(self):
self.set_status(403)
self.finish("forbidden")
class SimpleHTTPClientTestMixin(object):
def get_app(self):
# callable objects to finish pending /trigger requests
self.triggers = collections.deque()
return Application([
url("/trigger", TriggerHandler, dict(queue=self.triggers,
wake_callback=self.stop)),
url("/chunk", ChunkHandler),
url("/countdown/([0-9]+)", CountdownHandler, name="countdown"),
url("/hang", HangHandler),
url("/hello", HelloWorldHandler),
url("/content_length", ContentLengthHandler),
url("/head", HeadHandler),
url("/options", OptionsHandler),
url("/no_content", NoContentHandler),
url("/see_other_post", SeeOtherPostHandler),
url("/see_other_get", SeeOtherGetHandler),
url("/host_echo", HostEchoHandler),
url("/no_content_length", NoContentLengthHandler),
url("/echo_post", EchoPostHandler),
url("/respond_in_prepare", RespondInPrepareHandler),
], gzip=True)
def test_singleton(self):
# Class "constructor" reuses objects on the same IOLoop
self.assertTrue(SimpleAsyncHTTPClient(self.io_loop) is
SimpleAsyncHTTPClient(self.io_loop))
# unless force_instance is used
self.assertTrue(SimpleAsyncHTTPClient(self.io_loop) is not
SimpleAsyncHTTPClient(self.io_loop,
force_instance=True))
# different IOLoops use different objects
with closing(IOLoop()) as io_loop2:
self.assertTrue(SimpleAsyncHTTPClient(self.io_loop) is not
SimpleAsyncHTTPClient(io_loop2))
def test_connection_limit(self):
with closing(self.create_client(max_clients=2)) as client:
self.assertEqual(client.max_clients, 2)
seen = []
# Send 4 requests. Two can be sent immediately, while the others
# will be queued
for i in range(4):
client.fetch(self.get_url("/trigger"),
lambda response, i=i: (seen.append(i), self.stop()))
self.wait(condition=lambda: len(self.triggers) == 2)
self.assertEqual(len(client.queue), 2)
# Finish the first two requests and let the next two through
self.triggers.popleft()()
self.triggers.popleft()()
self.wait(condition=lambda: (len(self.triggers) == 2 and
len(seen) == 2))
self.assertEqual(set(seen), set([0, 1]))
self.assertEqual(len(client.queue), 0)
# Finish all the pending requests
self.triggers.popleft()()
self.triggers.popleft()()
self.wait(condition=lambda: len(seen) == 4)
self.assertEqual(set(seen), set([0, 1, 2, 3]))
self.assertEqual(len(self.triggers), 0)
def test_redirect_connection_limit(self):
# following redirects should not consume additional connections
with closing(self.create_client(max_clients=1)) as client:
client.fetch(self.get_url('/countdown/3'), self.stop,
max_redirects=3)
response = self.wait()
response.rethrow()
def test_default_certificates_exist(self):
open(_default_ca_certs()).close()
def test_gzip(self):
# All the tests in this file should be using gzip, but this test
# ensures that it is in fact getting compressed.
# Setting Accept-Encoding manually bypasses the client's
# decompression so we can see the raw data.
response = self.fetch("/chunk", use_gzip=False,
headers={"Accept-Encoding": "gzip"})
self.assertEqual(response.headers["Content-Encoding"], "gzip")
self.assertNotEqual(response.body, b"asdfqwer")
# Our test data gets bigger when gzipped. Oops. :)
self.assertEqual(len(response.body), 34)
f = gzip.GzipFile(mode="r", fileobj=response.buffer)
self.assertEqual(f.read(), b"asdfqwer")
def test_max_redirects(self):
response = self.fetch("/countdown/5", max_redirects=3)
self.assertEqual(302, response.code)
# We requested 5, followed three redirects for 4, 3, 2, then the last
# unfollowed redirect is to 1.
self.assertTrue(response.request.url.endswith("/countdown/5"))
self.assertTrue(response.effe |
openhealthcare/gloss | sites/rfh/settings.py | Python | gpl-3.0 | 340 | 0.002941 | DATABASE_STRING = 'postgresql://gloss:gloss@localhost/gloss_rfh'
INFORMATION_SOURCE = 'sites.rfh.information_source.InformatinSourceOnTest'
UPSTREAM_DB = dict(
USERNAME="username",
PASSWORD="password",
IP_ADDRESS="192.1.1.1",
DATABASE="some_database",
TABLE_NAME="some table"
)
from | site | s.rfh.local_settings import *
|
opencog/ros-behavior-scripting | sensors/audio_power.py | Python | agpl-3.0 | 1,713 | 0.008173 | #
# audio_power.py - Sound energy and power.
# Copyright (C) 2016 Hanson Robotics
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; withou | t even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301 USA
im | port rospy
from atomic_msgs import AtomicMsgs
from hr_msgs.msg import audiodata
'''
This implements a ROS node that subscribes to the `audio_sensors`
topic, and passes the audio power data to the cogserver. This is
used by OpenCog to react to loud sounds, sudden changes, and
general background noise levels.
An enhancement would be a a neural net that responded to clapping,
cheering, or other common sound events, identified them, labelled
them, and passed them on into the atomspace.
'''
class AudioPower:
def __init__(self):
self.atomo = AtomicMsgs()
rospy.Subscriber("audio_sensors", audiodata, self.audio_cb)
def audio_cb(self, data):
#print "SuddenChange {}".format(data.SuddenChange)
if data.SuddenChange:
print "Heard a loud bang!"
self.atomo.audio_bang(1.0)
else:
self.atomo.audio_bang(0.0)
self.atomo.audio_energy(data.Decibel)
|
uhavin/pubbd | tests/api/__init__.py | Python | mit | 96 | 0 | from a | pi import Api
def full_url(resource):
return Api.url_base.format(resource=resource | )
|
stormi/tsunami | src/secondaires/familier/masques/nom_familier/__init__.py | Python | bsd-3-clause | 3,922 | 0.000255 | # -*-coding:Utf-8 -*
# Copyright (c) 2014 LE GOFF Vincent
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Fichier contenant le masque <nom_familier>."""
from primaires.format.fonctions import supprimer_accents
from primaires.interpreteur.masque.masque import Masque
from primaires.interpreteur.masque.fonctions import *
from primaires.interpreteur.masque.exceptions.erreur_validation \
| import ErreurValidation
class NomFamilier(Masque):
"""Masque <nom_familier>.
On attend un nom unique de familier. Quand le joueur change le nom de
son familier, il doit veiller à ce qu'il reste unique.
"""
nom = "nom_familier"
nom_complet = "nom d'un familier"
def __init__(self):
| """Constructeur du masque"""
Masque.__init__(self)
self.proprietes["nouveau"] = "False"
self.proprietes["salle_identique"] = "True"
def init(self):
"""Initialisation des attributs"""
self.nom_familier = ""
self.familier = None
def repartir(self, personnage, masques, commande):
"""Répartition du masque."""
nom = liste_vers_chaine(commande)
if not nom:
raise ErreurValidation(
"Précisez un nom de familier.")
nom = nom.split(" ")[0].lower()
self.a_interpreter = nom
commande[:] = commande[len(nom):]
masques.append(self)
return True
def valider(self, personnage, dic_masques):
"""Validation du masque"""
Masque.valider(self, personnage, dic_masques)
salle = personnage.salle
nom = self.a_interpreter
t_nom = supprimer_accents(nom).lower()
familiers = importeur.familier.familiers_de(personnage)
familiers = [f for f in familiers if f.pnj]
if self.salle_identique:
familiers = [f for f in familiers if f.pnj.salle is salle]
familier = None
for t_familier in familiers:
if supprimer_accents(t_familier.nom).lower() == t_nom:
familier = t_familier
break
if not self.nouveau and familier is None:
raise ErreurValidation(
"|err|Le familier {} ne peut être trouvé.|ff|".format(nom))
elif self.nouveau and familier:
raise ErreurValidation(
"|err|Le familier {} existe déjà.|ff|".format(nom))
self.nom_familier = nom
self.familier = familier
return True
|
mmcdermo/RedLeader | redleader/resources/lambda.py | Python | apache-2.0 | 1,662 | 0.002407 | from redleader.resources import Resource
import botocore
class LambdaFunctionResource(Resource):
"""
Resource modeling a Lambda FunctionArn
"""
def __init__(self,
context,
function_name,
):
super(DynamoDBTableResource, self).__init__(context, cf_params)
self._function_name = function_name
def is_static(self):
return True
def get_id(self):
return "Lambda%s" % self._table_name.replace("-", "").replace("_", "")
def _iam_service_policy(self):
return {"name": "lambda",
"params": {
"safe_function_name": self.get_id(),
"function_name": self._function_name
}}
def _cloud_formation_template(self):
"""
Get the cloud formation templa | te for this resource
"""
return {
"Type" : "AWS::Lambda::Function",
"Properties" : {
"TableName": self._table_name,
"AttributeDefinitions": attribute_definitions,
"KeySchema": key_schema,
"ProvisionedThroughput": {
'ReadCapacityUnits': self._read_units,
'WriteCapacityUnits': se | lf._write_units
}
}
}
def resource_exists(self):
client = self._context.get_client("dynamodb")
try:
desc = client.describe_table(TableName=self._table_name)
return True
except botocore.exceptions.ClientError as e:
if "exist" in str(e):
return False
else:
raise e
|
znoland3/zachdemo | venvdir/lib/python3.4/site-packages/bs4/dammit.py | Python | mit | 29,774 | 0.010428 | # -*- coding: utf-8 -*-
"""Beautiful Soup bonus library: Unicode, Dammit
This library converts a bytestream to Unicode through any means
necessary. It is heavily based on code from Mark Pilgrim's Universal
Feed Parser. It works best on XML and HTML, but it does not rewrite the
XML or HTML to reflect a new encoding; that's the tree builder's job.
"""
__license__ = "MIT"
from pdb import set_trace
import codecs
from html.entities import codepoint2name
import re
import logging
import string
# Import a library to autodetect character encodings.
chardet_type = None
try:
# First try the fast C implementation.
# PyPI package: cchardet
import cchardet
def chardet_dammit(s):
return cchardet.detect(s)['encoding']
except ImportError:
try:
# Fall back to the pure Python implementation
# Debian package: python-chardet
# PyPI package: chardet
import chardet
def chardet_dammit(s):
return chardet.detect(s)['encoding']
#import chardet.constants
#chardet.constants._debug = 1
except ImportError:
# No chardet available.
def chardet_dammit(s):
return None
# Available from http://cjkpython.i18n.org/.
try:
import iconv_codec
except ImportError:
pass
xml_encoding_re = re.compile(
'^<\?.*encoding=[\'"](.*?)[\'"].*\?>'.encode(), re.I)
html_meta_re = re.compile(
'<\s*meta[^>]+charset\s*=\s*["\']?([^>]*?)[ /;\'">]'.encode(), re.I)
class EntitySubstitution(object):
"""Substitute XML or HTML entities for the corresponding characters."""
def _populate_class_variables():
lookup = {}
reverse_lookup = {}
characters_for_re = []
for codepoint, name in list(codepoint2name.items()):
character = chr(codepoint)
if codepoint != 34:
# There's no point in turning the quotation mark into
# ", unless it happens within an attribute value, which
# is handled elsewhere.
characters_for_re.append(character)
lookup[character] = name
# But we do want to turn " into the quotation mark.
reverse_lookup[name] = character
re_definition = "[%s]" % "".join(characters_for_re)
return lookup, reverse_lookup, re.compile(re_definition)
(CHARACTER_TO_HTML_ENTITY, HTML_ENTITY_TO_CHARACTER,
CHARACTER_TO_HTML_ENTITY_RE) = _populate_class_variables()
CHARACTER_TO_XML_ENTITY = {
"'": "apos",
'"': "quot",
"&": "amp",
"<": "lt",
">": "gt",
}
BARE_AMPERSAND_OR_BRACKET = re.compile("([<>]|"
"&(?!#\d+;|#x[0-9a-fA-F]+;|\w+;)"
")")
AMPERSAND_OR_BRACKET = re.compile("([<>&])")
@classmethod
def _substitute_html_entity(cls, matchobj):
entity = cls.CHARACTER_TO_HTML_ENTITY.get(matchobj.group(0))
return "&%s;" % entity
@classmethod
def _substitute_xml_entity(cls, matchobj):
"""Used with a regular expression to substitute the
appropriate XML entity for an XML special character."""
entity = cls.CHARACTER_TO_XML_ENTITY[matchobj.group(0)]
return "&%s;" % entity
@classmethod
def quoted_attribute_value(self, value):
"""Make a value into a quoted XML attribute, possibly escaping it.
Most strings will be quoted using double quotes.
Bob's Bar -> "Bob's Bar"
If a string contains double quotes, it will be quoted using
single quotes.
Welcome to "my bar" -> 'Welcome to "my bar"'
If a string contains both single and double quotes, the
double quotes will be escaped, and the string will be quoted
using double quotes.
Welcome to "Bob's Bar" -> "Welcome to "Bob's bar"
"""
quote_with = '"'
if '"' in value:
if "'" in value:
# The string contains both single and double
# quotes. Turn the double quotes into
# entities. We quote the double quotes rather than
# the single quotes because the entity name is
# """ whether this is HTML or XML. If we
# quoted the single quotes, we'd have to decide
# between ' and &squot;.
replace_with = """
value = value.replace('"', replace_with)
else:
# There are double quotes but no single quotes.
# We can use single quotes to quote the attribute.
quote_with = "'"
return quote_with + value + quote_with
@classmethod
def substitute_xml(cls, value, make_quoted_attribute=False):
"""Substitute XML entities for special XML characters.
:par | am value: A string to be substituted. The less-than sign
will become <, the greater-than sign will become >,
| and any ampersands will become &. If you want ampersands
that appear to be part of an entity definition to be left
alone, use substitute_xml_containing_entities() instead.
:param make_quoted_attribute: If True, then the string will be
quoted, as befits an attribute value.
"""
# Escape angle brackets and ampersands.
value = cls.AMPERSAND_OR_BRACKET.sub(
cls._substitute_xml_entity, value)
if make_quoted_attribute:
value = cls.quoted_attribute_value(value)
return value
@classmethod
def substitute_xml_containing_entities(
cls, value, make_quoted_attribute=False):
"""Substitute XML entities for special XML characters.
:param value: A string to be substituted. The less-than sign will
become <, the greater-than sign will become >, and any
ampersands that are not part of an entity defition will
become &.
:param make_quoted_attribute: If True, then the string will be
quoted, as befits an attribute value.
"""
# Escape angle brackets, and ampersands that aren't part of
# entities.
value = cls.BARE_AMPERSAND_OR_BRACKET.sub(
cls._substitute_xml_entity, value)
if make_quoted_attribute:
value = cls.quoted_attribute_value(value)
return value
@classmethod
def substitute_html(cls, s):
"""Replace certain Unicode characters with named HTML entities.
This differs from data.encode(encoding, 'xmlcharrefreplace')
in that the goal is to make the result more readable (to those
with ASCII displays) rather than to recover from
errors. There's absolutely nothing wrong with a UTF-8 string
containg a LATIN SMALL LETTER E WITH ACUTE, but replacing that
character with "é" will make it more readable to some
people.
"""
return cls.CHARACTER_TO_HTML_ENTITY_RE.sub(
cls._substitute_html_entity, s)
class EncodingDetector:
"""Suggests a number of possible encodings for a bytestring.
Order of precedence:
1. Encodings you specifically tell EncodingDetector to try first
(the override_encodings argument to the constructor).
2. An encoding declared within the bytestring itself, either in an
XML declaration (if the bytestring is to be interpreted as an XML
document), or in a <meta> tag (if the bytestring is to be
interpreted as an HTML document.)
3. An encoding detected through textual analysis by chardet,
cchardet, or a similar external library.
4. UTF-8.
5. Windows-1252.
"""
def __init__(self, markup, override_encodings=None, is_html=False,
exclude_encodings=None):
self.override_encodings = override_encodings or []
exclude_encodings = exclude_encodings or []
self.exclude_encodings = set([x.lower() for x in exclude_encodings])
self.chardet_encoding = None
self.is_html = is_html
self.declared_encoding = None
# F |
roaet/wafflehaus.nova | wafflehaus/nova/nova_base.py | Python | apache-2.0 | 1,330 | 0 | # Copyright 2013 Openstack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for th | e specific language governing permissions and limitations
# under the License.
from nova import compute
from wafflehaus.base import WafflehausBase
class WafflehausNova(WafflehausBase):
def _get_compute(self):
return compute
def __init__(self, application, conf):
super(WafflehausNova, self).__init__(application, conf)
self.comp | ute = self._get_compute()
def _get_context(self, request):
"""Mock target for testing."""
context = request.environ.get("nova.context")
return context
def _get_instance(self, context, server_id):
"""Mock target for testing."""
compute_api = self.compute.API()
instance = compute_api.get(context, server_id, want_objects=True)
return instance
|
gaolichuang/neutron-fwaas | neutron_fwaas/services/firewall/drivers/cisco/csr_firewall_svc_helper.py | Python | apache-2.0 | 10,054 | 0.000497 | # Copyright 2014 Cisco Systems, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from networking_cisco.plugins.cisco.cfg_agent.service_helpers import (
service_helper)
from neutron.common import rpc as n_rpc
from neutron import context as n_context
from neutron.i18n import _LE
from neutron.plugins.common import constants
from oslo_log import helpers as log_helpers
from oslo_log import log as logging
import oslo_messaging
from neutron_fwaas.services.firewall.drivers.cisco import csr_acl_driver
LOG = logging.getLogger(__name__)
CSR_FW_EVENT_Q_NAME = 'csr_fw_event_q'
CSR_FW_EVENT_CREATE = 'FW_EVENT_CREATE'
CSR_FW_EVENT_UPDATE = 'FW_EVENT_UPDATE'
CSR_FW_EVENT_DELETE = 'FW_EVENT_DELETE'
class CsrFirewalllPluginApi(object):
"""CsrFirewallServiceHelper (Agent) side of the ACL RPC API."""
@log_helpers.log_method_call
def __init__(self, topic, host):
self.host = host
target = oslo_messaging.Target(topic=topic, version='1.0')
self.client = n_rpc.get_client(target)
@log_helpers.log_method_call
def get_firewalls_for_device(self, context, **kwargs):
"""Get Firewalls with rules for a device from Plugin."""
cctxt = self.client.prepare()
return cctxt.call(context, 'get_firewalls_for_device', host=self.host)
@log_helpers.log_method_call
def get_firewalls_for_tenant(self, context, **kwargs):
"""Get Firewalls with rules for a tenant from the Plugin."""
cctxt = self.client.prepare()
return cctxt.call(context, 'get_firewalls_for_tenant', host=self.host)
@log_helpers.log_method_call
def get_tenants_with_firewalls(self, context, **kwargs):
"""Get Tenants that have Firewalls configured from plugin."""
cctxt = self.client.prepare()
return cctxt.call(context,
'get_tenants_with_firewalls', host=self.host)
@log_helpers.log_method_call
def set_firewall_status(self, context, fw_id, status, status_data=None):
"""Make a RPC to set the status of a firewall."""
cctxt = self.client.prepare()
return cctxt.call(context, 'set_firewall_status', host=self.host,
firewall_id=fw_id, status=status,
status_data=status_data)
def firewall_deleted(self, context, firewall_id):
"""Make a RPC to indicate that the firewall resources are deleted."""
cctxt = self.client.prepare()
return cctxt.call(context, 'firewall_deleted', host=self.host,
firewall_id=firewall_id)
class CsrFirewallServiceHelper(object):
@log_helpers.log_method_call
def __init__(self, host, conf, cfg_agent):
super(CsrFirewallServiceHelper, self).__init__()
self.conf = conf
self.cfg_agent = cfg_agent
self.fullsync = True
self.event_q = service_helper.QueueMixin()
self.fw_plugin_rpc = CsrFirewalllPluginApi(
'CISCO_FW_PLUGIN', conf.host)
self.topic = 'CISCO_FW'
self._setup_rpc()
self.acl_driver = csr_acl_driver.CsrAclDriver()
def _setup_rpc(self):
self.conn = n_rpc.create_connection(new=True)
self.endpoints = [self]
self.conn.create_consumer(self.topic,
self.endpoints, fanout=True)
self.conn.consume_in_threads()
### Notifications from Plugin ####
def create_firewall(self, context, firewall, host):
"""Handle Rpc from plugin to create a firewall."""
LOG.debug("create_firewall: firewall %s", firewall)
event_data = {'event': CSR_FW_EVENT_CREATE,
'context': context,
'firewall': firewall,
'host': host}
self.event_q.enqueue(CSR_FW_EVENT_Q_NAME, event_data)
def update_firewall(self, context, firewall, host):
"""Handle Rpc from plugin to update a firewall."""
LOG.debug("update_firewall: firewall %s", firewall)
event_data = {'event': CSR_FW_EVENT_UPDATE,
'context': context,
'firewall': firewall,
'host': host}
self.event_q.enqueue(CSR_FW_EVENT_Q_NAME, event_data)
def delete_firewall(self, context, firewall, host):
"""Handle Rpc from plugin to delete a firewall."""
LOG.debug("delete_firewall: firewall %s", firewall)
event_data = {'event': CSR_FW_EVENT_DELETE,
'context': context,
'firewall': firewall,
'host': host}
self.event_q.enqueue(CSR_FW_EVENT_Q_NAME, event_data)
def _invoke_firewall_driver(self, context, firewall, func_name):
LOG.debug("_invoke_firewall_driver: %s", func_name)
try:
if func_name == 'delete_firewall':
return_code = self.acl_driver.__getattribute__(func_name)(
None, None, firewall)
if not return_code:
LOG.debug("firewall %s", firewall['id'])
self.fw_plugin_rpc.set_firewall_status(
context, firewall['id'], constants.ERROR)
else:
self.fw_plugin_rpc.firewall_deleted(
context, firewall['id'])
else:
return_code, status = self.acl_driver.__getattribute__(
func_name)(None, None, firewall)
if not return_code:
LOG.debug("firewall %s", firewall['id'])
self.fw_plugin_rpc.set_firewall_status(
context, firewall['id'], constants.ERROR)
else:
LOG.debug("status %s", status)
self.fw_plugin_rpc.set_firewall_status(
context, firewall['id'], constants.ACTIVE, status)
except Exception:
LOG.debug("_invoke_firewall_driver: PRC failure")
self.fullsync = True
def | _process_firewall_pending_op(self, context, firewall_list):
for firewall in firewall_list:
| firewall_status = firewall['status']
if firewall_status == 'PENDING_CREATE':
self._invoke_firewall_driver(
context, firewall, 'create_firewall')
elif firewall_status == 'PENDING_UPDATE':
self._invoke_firewall_driver(
context, firewall, 'update_firewall')
elif firewall_status == 'PENDING_DELETE':
self._invoke_firewall_driver(
context, firewall, 'delete_firewall')
def _process_fullsync(self):
LOG.debug("_process_fullsync")
try:
context = n_context.get_admin_context()
tenants = self.fw_plugin_rpc.get_tenants_with_firewalls(
context)
LOG.debug("tenants with firewall: %s", tenants)
for tenant_id in tenants:
ctx = n_context.Context('', tenant_id)
firewall_list = self.fw_plugin_rpc.get_firewalls_for_tenant(
ctx)
self._process_firewall_pending_op(ctx, firewall_list)
except Exception:
LOG.debug("_process_fullsync: RPC failure")
self.fullsync = True
def _process_devices(self, device_ids):
LOG.debug("_process_devices: device_ids %s", device_ids)
try:
for device_id in device_ids:
ctx = n_context.Context('', device_id)
firewall_list = self.fw_plugin_rpc.get_firewalls_for_device(
ctx)
self._pro |
Adward-R/InfoVis | Wang/email_relations/jsonsplit.py | Python | apache-2.0 | 2,100 | 0.024286 | #!/usr/bin/python
# -*- coding: utf-8 -*-
import copy
import simplejson as json
attrs = {
'BC' : 'BirthCountry',
'G' : 'Gender',
'CC' : 'CitizenshipCountry',
'CB' : 'CitizenshipBasis',
'PC' : 'PassportCountry',
'CETP' : 'CurrentEmploymentType',
'CETT' : 'CurrentEmploymentTitle',
'MSB' : 'MilitaryServiceBranch',
'MDT' : 'MilitaryDischargeType'
}
count_table = {}
email_table = {}
cluster = {}
content = {}
new_links = []
def organize(cluster, nodes, key):
global attrs
global new_links
global content
group = 0
for keyattr in cluster.keys():
childlist = cluster[keyattr]
for childid in childlist:
nodes[childid]['group'] = group
group += 1
outputs = {'nodes' : copy.deepcopy(nodes), 'links' : new_links}
content[attrs[key]] = outputs
def clustering(nodes, key):
global cluster
cluster = {}
for node in nodes:
attrValue = node[attrs[key]]
if not cluster.has_key(attrValue):
cluster[attrValue] = []
cluster[attrValue].append(int(node['Number']))
organize(cluster, nodes, key)
jsonobj = json.load(file('Employee Records.json', 'r'))
nodes = jsonobj['nodes']
links = json.load(file('email.json', 'r'))
for node in nodes:
email_table[node['EmailAddress']] = int(node['Number'])
# Handle the fucking links
for link in links:
new_link = {}
if email_table.has_key(link['source']):
new_link['source'] = email_table[link['source']]
else:
continue
if email_table.has_key(link['target']):
new_link['target'] = email_table[link['targ | et']]
else:
continue
key = str(new_link['source']) + '_' + str(new_link['target'])
if count_table.has_key(key):
count_table[key] += 1
else:
count_table[key] = 0
for t in count_table.keys():
new_link = {}
new_link['value'] = count_table[t]
t = t.split('_')
new_link['source'] = int(t[0])
new | _link['target'] = int(t[1])
new_links.append(new_link)
# Handle the fucking nodes
for key in attrs:
clustering(nodes, key)
outputfile = file('content.json', 'w')
outputfile.write(json.dumps(content, indent = 2, sort_keys = True))
outputfile.close()
|
aronsky/home-assistant | tests/components/yamaha/test_media_player.py | Python | apache-2.0 | 5,828 | 0.00103 | """The tests for the Yamaha Media player platform."""
from unittest.mock import MagicMock, PropertyMock, call, patch
import pytest
import homeassistant.components.media_player as mp
from homeassistant.components.yamaha import media_player as yamaha
from homeassistant.components.yamaha.const import DOMAIN
from homeassistant.helpers.discovery import async_load_platform
from homeassistant.setup import async_setup_component
CONFIG = {"media_player": {"platform": "yamaha", "host": "127.0.0.1"}}
def _create_zone_mock(name, url):
zone = MagicMock()
zone.ctrl_url = url
zone.zone = name
return zone
class FakeYamahaDevice:
"""A fake Yamaha device."""
def __init__(self, ctrl_url, name, zones=None):
"""Initialize the fake Yamaha device."""
self.ctrl_url = ctrl_url
self.name = name
self._zones = zones or []
def zone_controllers(self):
"""Return controllers for all available zones."""
return self._zones
@pytest.fixture(name="main_zone")
def main_zone_fixture():
"""Mock the main zone."""
return _create_zone_mock("Main zone", "http://main")
@pytest.fixture(name="device")
def device_fixture(main_zone):
"""Mock the yamaha device."""
device = FakeYamahaDevice("http://receiver", "Receiver", zones=[main_zone])
with patch("rxv.RXV", return_value=device):
yield device
async def test_ | setup_host(hass, device, main_zone):
"""Test set up integration with host."""
assert await async_setup_component(hass, mp.DOMAIN, CONFIG)
await hass.async_block_till_done()
state = hass.states.get("media_player.yamaha_receiver_main_zone")
assert state is not None
assert state.state == "off"
async def test_setup_no_host(hass, device, main_zone):
"""Test set up integration without host."""
with patch("rxv.find", return_value=[device]):
assert await | async_setup_component(
hass, mp.DOMAIN, {"media_player": {"platform": "yamaha"}}
)
await hass.async_block_till_done()
state = hass.states.get("media_player.yamaha_receiver_main_zone")
assert state is not None
assert state.state == "off"
async def test_setup_discovery(hass, device, main_zone):
"""Test set up integration via discovery."""
discovery_info = {
"name": "Yamaha Receiver",
"model_name": "Yamaha",
"control_url": "http://receiver",
"description_url": "http://receiver/description",
}
await async_load_platform(
hass, mp.DOMAIN, "yamaha", discovery_info, {mp.DOMAIN: {}}
)
await hass.async_block_till_done()
state = hass.states.get("media_player.yamaha_receiver_main_zone")
assert state is not None
assert state.state == "off"
async def test_setup_zone_ignore(hass, device, main_zone):
"""Test set up integration without host."""
assert await async_setup_component(
hass,
mp.DOMAIN,
{
"media_player": {
"platform": "yamaha",
"host": "127.0.0.1",
"zone_ignore": "Main zone",
}
},
)
await hass.async_block_till_done()
state = hass.states.get("media_player.yamaha_receiver_main_zone")
assert state is None
async def test_enable_output(hass, device, main_zone):
"""Test enable output service."""
assert await async_setup_component(hass, mp.DOMAIN, CONFIG)
await hass.async_block_till_done()
port = "hdmi1"
enabled = True
data = {
"entity_id": "media_player.yamaha_receiver_main_zone",
"port": port,
"enabled": enabled,
}
await hass.services.async_call(DOMAIN, yamaha.SERVICE_ENABLE_OUTPUT, data, True)
assert main_zone.enable_output.call_count == 1
assert main_zone.enable_output.call_args == call(port, enabled)
@pytest.mark.parametrize(
"cursor,method",
[
(yamaha.CURSOR_TYPE_DOWN, "menu_down"),
(yamaha.CURSOR_TYPE_LEFT, "menu_left"),
(yamaha.CURSOR_TYPE_RETURN, "menu_return"),
(yamaha.CURSOR_TYPE_RIGHT, "menu_right"),
(yamaha.CURSOR_TYPE_SELECT, "menu_sel"),
(yamaha.CURSOR_TYPE_UP, "menu_up"),
],
)
@pytest.mark.usefixtures("device")
async def test_menu_cursor(hass, main_zone, cursor, method):
"""Verify that the correct menu method is called for the menu_cursor service."""
assert await async_setup_component(hass, mp.DOMAIN, CONFIG)
await hass.async_block_till_done()
data = {
"entity_id": "media_player.yamaha_receiver_main_zone",
"cursor": cursor,
}
await hass.services.async_call(DOMAIN, yamaha.SERVICE_MENU_CURSOR, data, True)
getattr(main_zone, method).assert_called_once_with()
async def test_select_scene(hass, device, main_zone, caplog):
"""Test select scene service."""
scene_prop = PropertyMock(return_value=None)
type(main_zone).scene = scene_prop
assert await async_setup_component(hass, mp.DOMAIN, CONFIG)
await hass.async_block_till_done()
scene = "TV Viewing"
data = {
"entity_id": "media_player.yamaha_receiver_main_zone",
"scene": scene,
}
await hass.services.async_call(DOMAIN, yamaha.SERVICE_SELECT_SCENE, data, True)
assert scene_prop.call_count == 1
assert scene_prop.call_args == call(scene)
scene = "BD/DVD Movie Viewing"
data["scene"] = scene
await hass.services.async_call(DOMAIN, yamaha.SERVICE_SELECT_SCENE, data, True)
assert scene_prop.call_count == 2
assert scene_prop.call_args == call(scene)
scene_prop.side_effect = AssertionError()
missing_scene = "Missing scene"
data["scene"] = missing_scene
await hass.services.async_call(DOMAIN, yamaha.SERVICE_SELECT_SCENE, data, True)
assert f"Scene '{missing_scene}' does not exist!" in caplog.text
|
nccgroup/Scout2 | tools/gen-tests.py | Python | gpl-2.0 | 843 | 0.002372 | #!/usr/bin/env python
import os
scout2_dir = 'AWSScout2'
tests_dir = 'testsbase'
for root, dirnames, filenames in os.walk(scout2_dir):
for filename in filenames:
if filename.startswith('__') or not filename.endswith('.py'):
continue
filepath = os. | path.join(root, filename)
tmp = filepath.split('.')[0].split('/')
print(str(tmp))
test = '# Import AWS utils\nfrom %s import *\n\n#\n# Test methods for %s\n#\n\nclass Test%sClass:\n\n' % ('.'.join(tmp), filepath, ''.join(t.title() for t in tmp))
| test_filename = 'test-%s.py' % '-'.join(tmp[1:])
print('%s --> %s' % (filepath, test_filename))
test_file = os.path.join(tests_dir, test_filename)
if not os.path.isfile(test_file):
with open(test_file, 'w+') as f:
f.write(test)
|
YorkJong/pyResourceLink | reslnk/myutil.py | Python | lgpl-3.0 | 4,850 | 0.002268 | # -*- coding: utf-8 -*-
"""
This module put my utility functions
"""
__author__ = "Jiang Yu-Kuan <yukuan.jiang@gmail.com>"
__date__ = "2016/02/08 (initial version) ~ 2019/04/17 (last revision)"
import re
import os
import sys
#------------------------------------------------------------------------------
# File
#------------------------------------------------------------------------------
def save_utf8_file(fn, lines):
"""Save string lines into an UTF8 text files.
"""
with open(fn, "w") as out_file:
out_file.write("\n".join(lines).encode("utf-8"))
def main_bas | ename(path):
r"""Return a main name of a basename of a given file path.
Example
-------
>>> main_basename('c:\code\langconv\MsgID.h')
'MsgID.h'
"""
base = os.path.basename(path)
base_main, _base_e | xt = os.path.splitext(base)
return base_main
#------------------------------------------------------------------------------
# Math
#------------------------------------------------------------------------------
def is_numeric(str):
try:
_offset = int(eval(str))
except:
return False
return True
#------------------------------------------------------------------------------
# String
#------------------------------------------------------------------------------
def replace_chars(text, replaced_pairs='', deleted_chars=''):
"""Return a char replaced text.
Arguments
---------
text -- the text
replaced_pairs -- the replaced chars
Example
-------
>>> replaced = [('a','b'), ('c','d')]
>>> removed = 'e'
>>> replace_chars('abcde', replaced, removed)
'bbdd'
"""
for old, new in replaced_pairs:
text = text.replace(old, new)
for ch in deleted_chars:
text = text.replace(ch, '')
return text
def camel_case(string):
"""Return camel case string from a space-separated string.
Example
-------
>>> camel_case('good job')
'GoodJob'
"""
return ''.join(w.capitalize() for w in string.split())
def replace_punctuations(text):
"""Replace punctuation characters with abbreviations for a string.
"""
punctuations = [
('?', 'Q'), # Q: question mark
('.', 'P'), # P: period; full stop
('!', 'E'), # E: exclamation mark
("'", 'SQ'), # SQ: single quotation mark; single quote
('"', 'DQ'), # DQ: double quotation mark; double quotes
('(', 'LP'), # LP: left parenthese
(')', 'RP'), # RP: right parenthese
(':', 'Cn'), # Cn: colon
(',', 'Ca'), # Ca: comma
(';', 'S'), # S: semicolon
]
deleted = '+-*/^=%$#@|\\<>{}[]'
return replace_chars(text, punctuations, deleted)
def remain_alnum(text):
"""Remain digits and English letters of a string.
"""
return ''.join(c for c in text if c.isalnum()
and ord(' ') <= ord(c) <= ord('z'))
#------------------------------------------------------------------------------
# For code generation
#------------------------------------------------------------------------------
def c_identifier(text):
"""Convert input text into an legal identifier in C.
Example
-------
>>> c_identifier("Hello World")
'HelloWorld'
>>> c_identifier("Anti-Shake")
'Antishake'
"""
if ' ' in text:
text = camel_case(text)
text = re.sub(r'\+\d+', lambda x: x.group().replace('+', 'P'), text)
text = re.sub(r'\-\d+', lambda x: x.group().replace('-', 'N'), text)
text = replace_punctuations(text)
return remain_alnum(text)
def wrap_header_guard(lines, h_fn):
"""Wrap a C header guard for a given line list.
"""
def underscore(txt):
"""Return an under_scores text from a CamelCase text.
This function will leave a CamelCase text unchanged.
"""
s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', txt)
return re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower()
h_fn_sig = '%s_H_' % underscore(main_basename(h_fn)).upper()
begin = ['#ifndef %s' % h_fn_sig]
begin += ['#define %s' % h_fn_sig, '', '']
end = ['', '', '#endif // %s' % h_fn_sig, '']
return begin + lines + end
def prefix_info(lines, software, version, author, comment_mark='//'):
"""Prefix information to the given lines with given comment-mark.
"""
prefix = ['%s Generated by the %s v%s' % (comment_mark,
software, version)]
prefix += ['%s !author: %s' % (comment_mark, author)]
prefix += ['%s !trail: %s %s' % (comment_mark,
os.path.basename(sys.argv[0]), ' '.join(sys.argv[1:]))]
return prefix + lines
|
flensrocker/yavdr-python-buildserver | gh2lp.py | Python | gpl-2.0 | 16,587 | 0.003376 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
from email.mime.text import MIMEText
from http.server import BaseHTTPRequestHandler
from http.server import HTTPServer
from socketserver import ThreadingMixIn
import argparse
import ast
import configparser
import hashlib
import hmac
import datetime
import json
import os
import shutil
import signal
import smtplib
import subprocess
import sys
import tempfile
import threading
version = "0.1.5"
config = None
server = None
def get_from_args(args, key, default = None):
if key in args:
return args[key]
if default:
return default
raise Exception("missing argument {}".format(key))
class Config:
def __init__(self):
argparser = argparse.ArgumentParser(description='Github hook handler')
argparser.add_argument('-c', '--config', action='append', metavar='CONFIG', dest='config', default=None, help='configuration file(s)')
argparser.add_argument('-b', '--build', action='store_true', dest='build', default=None, help='direct build, don\'t serve')
argparser.add_argument('--pusher', metavar='PUSHER', dest='pusher', default=None, help='name of the commit pusher')
argparser.add_argument('--pusher-email', metavar='PUSHEREMAIL', dest='pusher-email', default=None, help='email address of the commit pusher')
argparser.add_argument('--owner', metavar='OWNER', dest='owner', default=None, help='owner of the git repository')
argparser.add_argument('--name', metavar='NAME', dest='name', default=None, help='name of the package/repository')
argparser.add_argument('--git-url', metavar='GITURL', dest='git-url', default=None, help='clone-url of the git repository')
argparser.add_argument('--branch', metavar='BRANCH', dest='branch', default=None, help='name of the branch to clone')
argparser.add_argument('--urgency', metavar='URGENCY', dest='urgency', default="medium", help='urgency of the build')
self.args = vars(argparser.parse_args())
self.configparser = configparser.SafeConfigParser()
if "config" in self.args:
read_files = self.configparser.read(self.args["config"])
print("read config from: {FILES}".format(FILES=read_files))
self.get_config()
# set up environment variables
try:
self.HOOK_SECRET_KEY = os.environb[b'HOOK_SECRET_KEY']
except:
print("warning: HOOK_SECRET_KEY environment variable not set!")
print("export your buildhook secret as HOOK_SECRET_KEY")
self.HOOK_SECRET_KEY = None
os.environ['DEBEMAIL'] = self.debemail
os.environ['DEBFULLNAME'] = self.debfullname
os.environ['EDITOR'] = 'true'
def get_setting(self, category, setting, default = None):
if self.configparser.has_option(category, setting):
return self.configparser.get(category, setting)
else:
return default
def get_settingb(self, category, setting, default = False):
if self.configparser.has_option(category, setting):
return self.configparser.getboolean(category, setting)
else:
return default
def get_section(self, section, default = None):
if self.configparser.has_section(section):
return self.configparser[section]
else:
return default
def get_config(self):
self.direct_build = self.args["build"]
self.dryrun = self.get_settingb("Server", "dryrun", False)
self.server_port = int(self.get_setting("Server", "port", "8180"))
self.smtp_server = self.get_setting("Server", "smtp_server", None)
self.smtp_sender = self.get_setting("Server", "smtp_sender", None)
self.smtp_tls = self.get_settingb("Server", "smtp_tls", False)
self.smtp_user = self.get_setting("Server", "smtp_user", None)
self.smtp_password = self.get_setting("Server", "smtp_password", None)
if not self.smtp_sender:
self.smtp_server = None
self.launchpad_owner = self.get_setting("Launchpad", "owner", "yavdr")
self.github_owner = self.get_setting("Github", "owner", "yavdr")
self.github_baseurl = self.get_setting("Github", "baseurl", "git://github.com/yavdr/")
self.debfullname = self.get_setting("Build", "fullname", "yaVDR Release-Team")
self.debemail = self.get_setting("Build", "email", "release@yavdr.org")
self.gpgkey = self.get_setting("Build", "gpgkey", None)
self.version_suffix = self.get_setting("Build", "version_suffix", "-0yavdr0~{release}")
self.default_release = self.get_setting("Build", "default_release", "trusty")
self.default_stage = self.get_setting("Build", "default_stage", "unstable")
self.default_section = self.get_setting("Build", "default_section", "main")
self.stages = self.get_section("Stages", {'master': 'unstable', 'testing-': 'testing', 'stable-': 'stable'})
self.releases = self.get_section("Releases", {'-0.5': 'precise', '-0.6': 'trusty', '-0.7': 'xenial'})
self.sections = self.get_section("Sections", {'vdr-': 'vdr', 'vdr-addon-avahi': 'vdr', 'vdr-addon-': 'main', 'yavdr-': 'yavdr'})
class Build(threading.Thread):
def __init__(self, config):
threading.Thread.__init__(self)
self.config = config
self.pusher = ""
self.pusher_email = ""
self.owner = ""
self.name = ""
self.git_url = ""
self.branch = ""
self.stage = ""
self.release = ""
self.section = ""
self.urgency = "medium"
return
def run(self):
self.build()
return
def output(self, logfile):
logfile.write("repo: {}\n".format(self.name).encode())
logfile.write("branch: {}\n".format(self.branch).encode())
logfile.write("owner: {}\n".format(self.owner).encode())
logfile.write("pusher: {0} <{1}>\n".format(self.pusher, self.pusher_email).encode())
logfile.write("git_url: {}\n".format(self.git_url).encode())
logfile.write("stage: {}\n".format(self.stage).encode())
logfile.write("section: {}\n".format(self.section).encode())
logfile.write("release: {}\n".format(self.release).encode())
logfile.write("urgency: {}\n".format(self.urgency).encode())
return
def fromgithub(self, json_payload):
self.pusher = json_payload["pusher"]["name"]
self.pusher_email = json_payload["pusher"]["email"]
self.owner = json_payload["repository"]["owner"]["name"]
self.name = json_payload["repository"]["name"]
self.git_url = json_payload["repository"]["git_url"]
branch = json_payload["ref"]
if not branch.startswith("refs/heads/"):
raise Exception("unknown branch")
self.branch = branch[11:]
return
def fromargs(self, args):
self.pusher = get_from_args(args, "pusher")
self.pusher_email = get_from_args(args, "pusher-email")
self.owner = get_from_args(args, "owner", "ya | vdr")
self.name = get_from_args(args, "name")
self.git_url = get_from_args(args, "git-url")
self.branch = get_from_args(args, "branch", "master")
self.urgency = get_from_args(args, "urgency", "medium")
return
def build(self):
logfile = None
package_name_version = None
try:
# create a temporary directory and enter it
tmpdir = tempfile.mkdtemp(suffix=self.name)
print("build d | irectory: ", tmpdir)
os.chdir(tmpdir)
# log the output to files
logfile = open('build.log', 'w+b')
if self.owner != self.config.github_owner:
raise Exception("wrong owner: {OWNER} != {GHOWNER}".format(OWNER=self.owner, GHOWNER=self.config.github_owner))
if not self.git_url.startswith(self.config.github_baseurl):
raise Exception("wrong repository: {GITURL} starts not with {BASEURL}".format(GITURL=self.git_url, BASEURL=self.config.github_baseurl))
self.stage = self.config. |
Cue/greplin-tornado-sendgrid | src/greplin/tornado/sendgrid.py | Python | apache-2.0 | 2,166 | 0.01108 | # Copyright 2011 The greplin-tornado-sendgrid Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Mixin for Sendgrid's REST API"""
import urllib
import functools
import logging
from tornado import httpclient, escape
class Sendgrid(object):
"""Base Sendgrid object"""
_BASE_URL = "https://sendgrid.com/api/mail.send"
_FORMAT = "json"
_attrs = frozenset(['toname', 'x-smtpapi', 'fromname', 'replyto', 'date', 'files'])
_required_attrs = frozenset(['to', 'subject', 'from'])
def __init__(self, user, secret):
self._user = user
self._secret = secret
def send_email(self, callback, **kwargs):
"""Send a message through SendGrid"""
if 'text' and 'html' not in kwargs:
logging.warning("Message not sent. 'text' or 'html' fields required")
callback(None)
return
for required in self._required_attrs:
if required not in kwargs:
logging.error("Message not sent. Missing required argument %s", required)
callback(None)
return
kwargs.update({'api_user':self._user, 'api_key':self._secret})
api_url = "%s.%s" | % (self._BASE_URL, self._FORMAT)
post_body = urllib.urlencode(kwargs)
http = httpclient.AsyncHTTPClient()
r | equest = httpclient.HTTPRequest(api_url, method='POST', body=post_body)
http.fetch(request, functools.partial(self._on_sendgrid_result, callback))
def _on_sendgrid_result(self, callback, result):
"""Parse out a result from SendGrid"""
result = escape.json_decode(result.body)
if result.get("errors"):
logging.error("SendGrid API error: %s", result['errors'])
callback(None)
return
callback(True)
|
thehajime/ns-3-dev | src/point-to-point/bindings/modulegen__gcc_LP64.py | Python | gpl-2.0 | 349,061 | 0.015158 | from pybindgen import Module, FileCodeSink, param, retval, cppclass, typehandlers
import pybindgen.settings
import warnings
class ErrorHandler(pybindgen.settings.ErrorHandler):
def handle_error(self, wrapper, exception, traceback_):
warnings.warn("exception %r in wrapper %s" % (exception, wrapper))
return True
pybindgen.settings.error_handler = ErrorHandler()
import sys
def module_init():
root_module = Module('ns.point_to_point', cpp_namespace='::ns3')
return root_module
def register_types(module):
root_module = module.get_root()
## address.h (module 'network'): ns3::Address [class]
module.add_class('Address', import_from_module='ns.network')
## address.h (module 'network'): ns3::Address::MaxSize_e [enumeration]
module.add_enum('MaxSize_e', ['MAX_SIZE'], outer_class=root_module['ns3::Address'], import_from_module='ns.network')
## trace-helper.h (module 'network'): ns3::AsciiTraceHelper [class]
module.add_class('AsciiTraceHelper', import_from_module='ns.network')
## trace-helper.h (module 'network'): ns3::AsciiTraceHelperForDevice [class]
module.add_class('AsciiTraceHelperForDevice', allow_subclassing=True, import_from_module='ns.network')
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList [class]
module.add_class('AttributeConstructionList', import_from_module='ns.core')
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item [struct]
module.add_class('Item', import_from_module='ns.core', outer_class=root_module['ns3::AttributeConstructionList'])
## buffer.h (module 'network'): ns3::Buffer [class]
module.add_class('Buffer', import_from_module='ns.network')
## buffer.h (module 'network'): ns3::Buffer::Iterator [class]
module.add_class('Iterator', import_from_module='ns.network', outer_class=root_module['ns3::Buffer'])
## packet.h (module 'network'): ns3::ByteTagIterator [class]
module.add_class('ByteTagIterator', import_from_module='ns.network')
## packet.h (module 'network'): ns3::ByteTagIterator::Item [class]
module.add_class('Item', import_from_module='ns.network', outer_class=root_module['ns3::ByteTagIterator'])
## byte-tag-list.h (module 'network'): ns3::ByteTagList [class]
module.add_class('ByteTagList', import_from_module='ns.network')
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator [class]
module.add_class('Iterator', import_from_module='ns.network', outer_class=root_module['ns3::ByteTagList'])
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item [struct]
module.add_class('Item', import_from_module='ns.network', outer_class=root_module['ns3::ByteTagList::Iterator'])
## callback.h (module 'core'): ns3::CallbackBase [class]
module.add_class('CallbackBase', import_from_module='ns.core')
## data-rate.h (module 'network'): ns3::DataRate [class]
module.add_class('DataRate', import_from_module='ns.network')
## event-id.h (module 'core'): ns3::EventId [class]
module.add_class('EventId', import_from_module='ns.core') |
## ipv4-address.h (module 'network'): ns3::Ipv4Address [class]
module.add_class('Ipv4Address', import_from_module='ns.network')
## ipv4-address.h (module 'network'): ns3::Ipv4Address [class]
root_module['ns3::Ipv4Address'].implicitly_converts_to(root_module['ns3::Address'])
## ipv4-address.h (module 'network'): ns3::Ipv4Mask [class]
module.add_c | lass('Ipv4Mask', import_from_module='ns.network')
## ipv6-address.h (module 'network'): ns3::Ipv6Address [class]
module.add_class('Ipv6Address', import_from_module='ns.network')
## ipv6-address.h (module 'network'): ns3::Ipv6Address [class]
root_module['ns3::Ipv6Address'].implicitly_converts_to(root_module['ns3::Address'])
## ipv6-address.h (module 'network'): ns3::Ipv6Prefix [class]
module.add_class('Ipv6Prefix', import_from_module='ns.network')
## mac48-address.h (module 'network'): ns3::Mac48Address [class]
module.add_class('Mac48Address', import_from_module='ns.network')
## mac48-address.h (module 'network'): ns3::Mac48Address [class]
root_module['ns3::Mac48Address'].implicitly_converts_to(root_module['ns3::Address'])
## net-device-container.h (module 'network'): ns3::NetDeviceContainer [class]
module.add_class('NetDeviceContainer', import_from_module='ns.network')
## node-container.h (module 'network'): ns3::NodeContainer [class]
module.add_class('NodeContainer', import_from_module='ns.network')
## object-base.h (module 'core'): ns3::ObjectBase [class]
module.add_class('ObjectBase', allow_subclassing=True, import_from_module='ns.core')
## object.h (module 'core'): ns3::ObjectDeleter [struct]
module.add_class('ObjectDeleter', import_from_module='ns.core')
## object-factory.h (module 'core'): ns3::ObjectFactory [class]
module.add_class('ObjectFactory', import_from_module='ns.core')
## packet-metadata.h (module 'network'): ns3::PacketMetadata [class]
module.add_class('PacketMetadata', import_from_module='ns.network')
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item [struct]
module.add_class('Item', import_from_module='ns.network', outer_class=root_module['ns3::PacketMetadata'])
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item [enumeration]
module.add_enum('', ['PAYLOAD', 'HEADER', 'TRAILER'], outer_class=root_module['ns3::PacketMetadata::Item'], import_from_module='ns.network')
## packet-metadata.h (module 'network'): ns3::PacketMetadata::ItemIterator [class]
module.add_class('ItemIterator', import_from_module='ns.network', outer_class=root_module['ns3::PacketMetadata'])
## packet.h (module 'network'): ns3::PacketTagIterator [class]
module.add_class('PacketTagIterator', import_from_module='ns.network')
## packet.h (module 'network'): ns3::PacketTagIterator::Item [class]
module.add_class('Item', import_from_module='ns.network', outer_class=root_module['ns3::PacketTagIterator'])
## packet-tag-list.h (module 'network'): ns3::PacketTagList [class]
module.add_class('PacketTagList', import_from_module='ns.network')
## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData [struct]
module.add_class('TagData', import_from_module='ns.network', outer_class=root_module['ns3::PacketTagList'])
## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::TagData_e [enumeration]
module.add_enum('TagData_e', ['MAX_SIZE'], outer_class=root_module['ns3::PacketTagList::TagData'], import_from_module='ns.network')
## pcap-file.h (module 'network'): ns3::PcapFile [class]
module.add_class('PcapFile', import_from_module='ns.network')
## trace-helper.h (module 'network'): ns3::PcapHelper [class]
module.add_class('PcapHelper', import_from_module='ns.network')
## trace-helper.h (module 'network'): ns3::PcapHelper [enumeration]
module.add_enum('', ['DLT_NULL', 'DLT_EN10MB', 'DLT_PPP', 'DLT_RAW', 'DLT_IEEE802_11', 'DLT_PRISM_HEADER', 'DLT_IEEE802_11_RADIO'], outer_class=root_module['ns3::PcapHelper'], import_from_module='ns.network')
## trace-helper.h (module 'network'): ns3::PcapHelperForDevice [class]
module.add_class('PcapHelperForDevice', allow_subclassing=True, import_from_module='ns.network')
## point-to-point-helper.h (module 'point-to-point'): ns3::PointToPointHelper [class]
module.add_class('PointToPointHelper', parent=[root_module['ns3::PcapHelperForDevice'], root_module['ns3::AsciiTraceHelperForDevice']])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter> [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::Object', 'ns3::ObjectBase', 'ns3::ObjectDeleter'], parent=root_module['ns3::ObjectBase'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simulator.h (module 'core'): ns3::Simulator [class]
module.add_class('Simulator |
inveniosoftware/invenio-records-rest | invenio_records_rest/config.py | Python | mit | 13,214 | 0 | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2016-2019 CERN.
#
# Invenio is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""Invenio-Records-REST configuration."""
from __future__ import absolute_import, print_function
from flask import request
from invenio_indexer.api import RecordIndexer
from invenio_search import RecordsSearch
from .facets import terms_filter
from .utils import allow_all, check_elasticsearch, deny_all
def _(x):
"""Identity function for string extraction."""
return x
RECORDS_REST_ENDPOINTS = dict(
recid=dict(
pid_type='recid',
pid_minter='recid',
pid_fetcher='recid',
search_class=RecordsSearch,
indexer_class=RecordIndexer,
search_index=None,
search_type=None,
record_serializers={
'application/json': ('invenio_records_rest.serializers'
':json_v1_response'),
},
search_serializers={
'application/json': ('invenio_records_rest.serializers'
':json_v1_search'),
},
list_route='/records/',
item_route='/records/<pid(recid):pid_value>',
default_media_type='application/json',
max_result_window=10000,
error_handlers=dict(),
),
)
"""Default REST endpoints loaded.
This option can be overwritten to describe the endpoints of different
record types. Each endpoint is in charge of managing all its CRUD operations
(GET, POST, PUT, DELETE, ...).
The structure of the dictionary is as follows:
.. code-block:: python
from flask import abort
from flask_security import current_user
from invenio_records_rest.query import es_search_factory
from invenio_records_rest.errors import PIDDeletedRESTError
def search_factory(*args, **kwargs):
if not current_user.is_authenticated:
abort(401)
return es_search_factory(*args, **kwargs)
def permission_check_factory():
def check_title(record, *args, **kwargs):
def can(self):
if record['title'] == 'Hello World':
return True
return type('Check', (), {'can': can})()
def deleted_pid_error_handler(error):
record = error.pid_error.record or {}
return make_response(jsonify({
'status': 410,
'message': error.description,
'removal_reason': record.get('removal_reason')}), 410)
RECORDS_REST_ENDPOINTS = {
'endpoint-prefix': {
'create_permission_factory_imp': permission_check_factory(),
'default_endpoint_prefix': True,
'default_media_type': 'application/json',
'delete_permission_factory_imp': permission_check_factory(),
'item_route': ('/records/<pid(record-pid-type, '
'record_class="mypackage.api:MyRecord"):pid_value>'),
'links_factory_imp': ('invenio_records_rest.links:'
'default_links_factory'),
'list_route': '/records/',
'max_result_window': 10000,
'pid_fetcher': '<registered-pid-fetcher>',
'pid_minter': '<registered-minter-name>',
'pid_type': '<record-pid-type>',
'list_permission_factory_imp': permission_check_factory(),
'read_permission_factory_imp': permission_check_factory(),
'record_class': 'mypackage.api:MyRecord',
'record_loaders': {
'application/json': 'mypackage.loaders:json_loader'
},
'record_serializers': {
'application/json': 'mypackage.utils:my_json_serializer'
},
'record_serializers_aliases': {
'json': 'application/json'
},
'search_class': 'mypackage.utils:mysearchclass',
'search_factory_imp': search_factory(),
'search_index': 'elasticsearch-index-name',
'search_serializers': {
'application/json': 'mypackage.utils:my_json_search_serializer'
},
'search_serializers_aliases': {
'json': 'application/json'
},
'search_type': 'elasticsearch-doc-type',
'suggesters': {
'my_url_param_to_complete': {
'_source': ['specified_source_filtered_field'],
'completion': {
'field': 'suggest_byyear_elasticsearch_field',
'size': 10,
'context': 'year'
}
},
},
'update_permission_factory_imp': permission_check_factory(),
'use_options_view': True,
'error_handlers': {
PIDDeletedRESTError: deleted_pid_error_handler,
},
},
}
:param create_permission_factory_imp: Import path to factory that create
permission object for a given record.
:param default_endpoint_prefix: declare the current endpoint as the default
when building endpoints for the defined ``pid_type``. By default the
default prefix is defined to be the value of ``pid_type``.
:param default_media_type: Default media type for both records and search.
:param delete_permission_factory_imp: Import path to factory that creates a
delete permission object for a given record.
:param item_route: URL rule for a single record.
:param links_factory_imp: Factory for record links generation.
:param list_route: Base URL for the records endpoint.
:param max_result_window: Maximum total number of records retrieved from a
query.
:param pid_type: It specifies the record pid type. Required.
You can generate an URL to list all records of the given ``pid_type`` by
calling ``url_for('invenio_records_rest.{0}_list'.format(
current_records_rest.default_endpoint_prefixes[pid_type]))``.
:param pid_fetcher: It identifies the registered fetcher name. Required.
:param pid_minter: It identifies the registered minter name. Required.
:param list_permission_factory_imp: Import path to factory that creates a
list permission object for a given index / list.
:param read_permission_factory_imp: Import path to factory that creates a
read permission object for a given record.
:param record_class: A record API class or importable string.
:param record_loaders: It contains the list of record deserializers for
supported formats.
:param record_serializers: It contains the list of record serializers for
supported formats.
:param record_serializers_aliases: A mapping of values of the defined query arg
(see `config.REST_MIMETYPE_QUERY_ARG_NAME`) to valid mimetypes for record
item serializers: dict(alias -> mimetype).
:param search_class: Import path or class object for the object in charge of
execute the search queries. The default search class is
:class:`invenio_search.api.RecordsSearch`.
For more information about resource loading, see the `Search
<http://elasticsearch-dsl.readthedocs.io/en/latest/search_dsl.html>` of the
ElasticSearch DSL library.
:param search_factory_imp: Factory to parse queries.
:param search_index: Name of the search index used when searching records.
:param search_serializers: It contains the list of records serializers for all
supported format. This configuration differ from the previous because in
this case it handle a list of records resulted by a search query instead of
a single record.
:param search_serializers_aliases: A mapping of values of the defined query arg
(see `config.REST_MIMETYPE_QUERY_ARG_NA | ME`) to valid mimetypes for records
search serializers: dict(alias -> mimetype).
:param search_type: Name of the search type used when searching records.
:param suggesters: Suggester fields configuration. Any element of the
dictionary represents a suggestion field. For ea | ch suggestion field we can
optionally specify the source filtering (appropriate for ES5) by using
``_source``. The key |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.