repo_name stringlengths 5 100 | path stringlengths 4 231 | language stringclasses 1 value | license stringclasses 15 values | size int64 6 947k | score float64 0 0.34 | prefix stringlengths 0 8.16k | middle stringlengths 3 512 | suffix stringlengths 0 8.17k |
|---|---|---|---|---|---|---|---|---|
georgemarshall/django-cryptography | tests/fields/test_pickle.py | Python | bsd-3-clause | 3,763 | 0.000531 | import json
import pickle
from django.core import exceptions, serializers
from django.db import IntegrityError
from django.test import TestCase
from django.utils import timezone
from django_cryptography.fields import PickledField
from .models import PickledModel, NullablePickledModel
class TestSaveLoad(TestCase):
def test_integer(self):
instance = PickledModel(field=42)
instance.save()
loaded = PickledModel.objects.get()
self.assertEqual(instance.field, loaded.field)
def test_string(self):
instance = PickledModel(field='Hello, world!')
instance.save()
loaded = PickledModel.objects.get()
self.assertEqual(instance.field, loaded.field)
def test_datetime(self):
instance = PickledModel(field=timezone.now())
instance.save()
loaded = PickledModel.objects.get()
self.assertEqual(instance.field, loaded.field)
def test_default_null(self):
instance = NullablePickledModel()
instance.save()
loaded = NullablePickledModel.objects.get(pk=instance.pk)
self.assertEqual(loaded.field, None)
self.assertEqual(instance.field, loaded.field)
def test_null_handling(self):
instance = NullablePickledModel(field=None)
instance.save()
loaded = NullablePickledModel.objects.get()
self.assertEqual(instance.field, loaded.field)
instance = PickledModel(field=None)
with self.assertRaises(IntegrityError):
instance.save()
class TestQuerying(TestCase):
def setUp(self):
self.objs = [
NullablePickledModel.objects.create(field=[1]),
NullablePickledModel.objects.create(field=[2]),
NullablePickledModel.objects.create(field=[2, 3]),
NullablePickledModel.objects.create(field=[20, 30, 40]),
NullablePickledModel.objects.create(field=None),
]
def test_exact(self):
self.assertSequenceEqual(
NullablePickledModel.objects.filter(field__exact=[1]),
self.objs[:1])
def test_isnull(self):
self.assertSequenceEqual(
NullablePickledModel.objects.filter(field__isnull=True),
self.objs[-1:])
def test_in(self):
self.assertSequenceEqual(
NullablePickledModel.objects.filter(field__in=[[1], [2]]),
self.objs[:2])
def test_unsupported(self):
with self.assertRaises(exceptions.FieldError):
NullablePickledModel.objects.filter(field__contains=[2])
class TestMigrations(TestCase):
def test_deconstruct(self):
field = PickledField()
| name, path, args, kwargs = field.deconstruct()
self.assertEqual | ("django_cryptography.fields.PickledField", path)
self.assertEqual(args, [])
self.assertEqual(kwargs, {})
class TestSerialization(TestCase):
test_data = (
# Python 3.4
'[{"fields": {"field": "gANdcQAoSwFLAk5lLg=="}, "model": "fields.pickledmodel", "pk": null}]'
) if pickle.HIGHEST_PROTOCOL < 5 else (
# Python 3.8
'[{"fields": {"field": "gASVCgAAAAAAAABdlChLAUsCTmUu"}, "model": "fields.pickledmodel", "pk": null}]'
)
def test_dumping(self):
instance = PickledModel(field=[1, 2, None])
data = serializers.serialize('json', [instance])
self.assertEqual(json.loads(self.test_data), json.loads(data))
def test_loading(self):
instance = list(serializers.deserialize('json',
self.test_data))[0].object
self.assertEqual([1, 2, None], instance.field)
class TestValidation(TestCase):
def test_validate(self):
field = PickledField()
field.clean(None, None)
|
OBIGOGIT/etch | binding-python/runtime/src/main/python/etch/binding/util/URLSerializer.py | Python | apache-2.0 | 2,249 | 0.004891 | """
# Licensed to the Apache Software Foundation (ASF) under one *
# or more contributor license agreements. See the NOTICE file *
# distributed with this work for additional information *
# regarding copyright ownership. The ASF licenses this file *
# to you under the Apache License, Version 2.0 (the *
# "License"); you may not use this file except in compliance *
# with the License. You may obtain a copy of the License at *
# *
# http://www.apache.org/licenses/LICENSE-2.0 *
# *
# Unless required by applicable law or agreed to in writing, *
# software distributed under the License is distributed on an *
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY *
# KIND, either express or implied. See the License for the *
# specific language governing permissions and limitations *
# under the License.
"""
from __future__ import absolute_import
from ..msg.Fie | ld import *
from ..msg.ImportExportHelper import *
from ..msg.StructValue import *
from ..msg.Type import *
from ..msg.ValueFactory import *
from ..support.Class2TypeMap import *
from ..support.Validator_string import *
from ...util.URL import *
class URLSerializer(ImportExportHelper):
"""
An etch serializer for URL
"""
FIELD_NAME = "urlStr"
|
@classmethod
def init(cls, typ, class2type):
"""
Defines custom fields in the value factory so that the importer can find them.
@param typ
@param class2type
"""
field = typ.getField(cls.FIELD_NAME)
class2type.put(URL, typ)
typ.setComponentType(URL)
typ.setImportExportHelper( URLSerializer(typ, field))
typ.putValidator( field, Validator_string.get(0))
typ.lock()
def __init__(self, typ, field):
self.__type = typ
self.__field = field
def importValue(self, struct):
return URL(struct.get(field))
def exportValue(self, vf, value):
struct = StructValue(self.__type, vf)
struct.put(self.__field, repr(value))
return struct
|
kdart/pycopia3 | QA/pycopia/QA/db/tui/eventloop.py | Python | apache-2.0 | 2,784 | 0.000359 | #!/usr/bin/python3.4 |
# vim:ts=4:sw=4:softtabstop=4:smarttab:expandtab
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# | distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
An urwid event loop that integrates with the Pycopia event loop and timer.
"""
from urwid.main_loop import ExitMainLoop
from pycopia import asyncio
from pycopia.OS import scheduler
class PycopiaEventLoop(object):
def alarm(self, seconds, callback):
"""
Call callback() given time from from now. No parameters are
passed to callback.
Returns a handle that may be passed to remove_alarm()
seconds -- floating point time to wait before calling callback
callback -- function to call from event loop
"""
return scheduler.get_scheduler().add(callback, seconds)
def remove_alarm(self, handle):
"""
Remove an alarm.
Returns True if the alarm exists, False otherwise
"""
return scheduler.get_scheduler().remove(handle)
def watch_file(self, fd, callback):
"""
Call callback() when fd has some data to read. No parameters
are passed to callback.
Returns a handle that may be passed to remove_watch_file()
fd -- file descriptor to watch for input
callback -- function to call when input is available
"""
asyncio.poller.register_fd(fd, asyncio.EPOLLIN, callback)
return fd
def remove_watch_file(self, handle):
"""
Remove an input file.
Returns True if the input file exists, False otherwise
"""
return asyncio.poller.unregister_fd(handle)
def enter_idle(self, callback):
"""
Add a callback for entering idle.
Returns a handle that may be passed to remove_idle()
"""
return asyncio.poller.register_idle(callback)
def remove_enter_idle(self, handle):
"""
Remove an idle callback.
Returns True if the handle was removed.
"""
return asyncio.poller.unregister_idle(handle)
def run(self):
"""
Start the event loop. Exit the loop when any callback raises
an exception. If ExitMainLoop is raised, exit cleanly.
"""
try:
asyncio.poller.loop()
except ExitMainLoop:
pass
|
mjames-upc/python-awips | dynamicserialize/dstypes/com/raytheon/uf/common/management/response/diagnostic/__init__.py | Python | bsd-3-clause | 327 | 0.003058 | ##
##
# File auto-generated by PythonFileGenerator
__all__ = | [
'ClusterMembersResponse',
'ContextsResponse',
'StatusResponse'
]
|
from .ClusterMembersResponse import ClusterMembersResponse
from .ContextsResponse import ContextsResponse
from .StatusResponse import StatusResponse
|
swayf/pyLoad | module/plugins/hoster/MegavideoCom.py | Python | agpl-3.0 | 3,767 | 0.007966 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import re
from time import time
from module.plugins.Hoster import Hoster
from module.unescape import unescape
class MegavideoCom(Hoster):
__name__ = "MegavideoCom"
__type__ = "hoster"
__pattern__ = r"http://(www\.)?megavideo.com/\?v=.*"
__version__ = "0.2"
__description__ = """Megavideo.com Download Hoster"""
__author_name__ = ("jeix","mkaay")
__author_mail__ = ("jeix@hasnomail.de","mkaay@mkaay.de")
def setup(self):
self.html = None
def process(self, pyfile):
self.pyfile = pyfile
if not self.file_exists():
self.offline()
self.pyfile.name = self.get_file_name()
self.download( self.get_file_url() )
def download_html(self):
url = self.pyfile.url
self.html = self.req.load(url)
def get_file_url(self):
""" returns the absolute downloadable filepath
"""
if self.html is None:
| self.download_html()
# get id
id = re.search("previewplayer/\\?v=(.*?)&width", self.html).group(1)
# check for hd link and return if there
if "flashvars.hd = \"1\";" in self.html:
content = | self.req.load("http://www.megavideo.com/xml/videolink.php?v=%s" % id)
return unescape(re.search("hd_url=\"(.*?)\"", content).group(1))
# else get normal link
s = re.search("flashvars.s = \"(\\d+)\";", self.html).group(1)
un = re.search("flashvars.un = \"(.*?)\";", self.html).group(1)
k1 = re.search("flashvars.k1 = \"(\\d+)\";", self.html).group(1)
k2 = re.search("flashvars.k2 = \"(\\d+)\";", self.html).group(1)
return "http://www%s.megavideo.com/files/%s/" % (s, self.__decrypt(un, int(k1), int(k2)))
def __decrypt(self, input, k1, k2):
req1 = []
req3 = 0
for c in input:
c = int(c, 16)
tmp = "".join([str((c >> y) & 1) for y in range(4 -1, -1, -1)])
req1.extend([int(x) for x in tmp])
req6 = []
req3 = 0
while req3 < 384:
k1 = (k1 * 11 + 77213) % 81371
k2 = (k2 * 17 + 92717) % 192811
req6.append((k1 + k2) % 128)
req3 += 1
req3 = 256
while req3 >= 0:
req5 = req6[req3]
req4 = req3 % 128
req8 = req1[req5]
req1[req5] = req1[req4]
req1[req4] = req8
req3 -= 1
req3 = 0
while req3 < 128:
req1[req3] = req1[req3] ^ (req6[req3+256] & 1)
req3 += 1
out = ""
req3 = 0
while req3 < len(req1):
tmp = req1[req3] * 8
tmp += req1[req3+1] * 4
tmp += req1[req3+2] * 2
tmp += req1[req3+3]
out += "%X" % tmp
req3 += 4
return out.lower()
def get_file_name(self):
if self.html is None:
self.download_html()
name = re.search("flashvars.title = \"(.*?)\";", self.html).group(1)
name = "%s.flv" % unescape(name.encode("ascii", "ignore")).decode("utf-8").encode("ascii", "ignore").replace("+", " ")
return name
def file_exists(self):
""" returns True or False
"""
if self.html is None:
self.download_html()
if re.search(r"Dieses Video ist nicht verfügbar.", self.html) is not None or \
re.search(r"This video is unavailable.", self.html) is not None:
return False
else:
return True
|
hacklabr/mapasculturais-openid | iddacultura/settings/staging.py | Python | gpl-2.0 | 270 | 0 | # coding: utf-8
from .base import *
DEBUG = True
TEMPLATE_DEBUG = | DEBUG
DATABASES = {
'default': {
'ENGINE': 'django.db. | backends.postgresql_psycopg2',
'NAME': 'openid-staging',
}
}
STATIC_ROOT = join(SITE_ROOT, '../webfiles-staging/static/')
|
all-umass/manifold_spanning_graphs | viz.py | Python | mit | 2,551 | 0.02548 | import numpy as np
from matplotlib import pyplot
def scatterplot(X, marker='.', title=None, fig=None, ax=None, **kwargs):
'''General plotting function for a set of points X. May be [1-3] dimensional.'''
assert len(X.shape) in (1,2), 'Only valid for 1 or 2-d arrays of points'
assert len(X.shape) == 1 or X.shape[1] in (1,2,3), 'Only valid for [1-3] dimensional points'
is_3d = len(X.shape) == 2 and X.shape[1] == 3
is_1d = len(X.shape) == 1 or X.shape[1] == 1
if ax is None:
if fig is None:
fig = pyplot.gcf()
if is_3d:
from mpl_toolkits.mplot3d import Axes3D
ax = Axes3D(fig)
else:
ax = fig.add_subplot(111)
elif is_3d:
assert hasattr(ax, 'zaxis'), 'Must provide an Axes3D axis'
# Do the plotting
if is_1d:
ax.scatter(X, marker=marker, **kwargs)
elif is_3d:
ax.scatter(X[:,0], X[:,1], X[:,2], marker=marker, **kwargs)
else:
ax.scatter(X[:,0], X[:,1], marker=marker, **kwargs)
if title:
ax.set_title(title)
return pyplot.show
def show_neighbor_graph(X, W, title=None, fig=None, ax=None,
edge_style='r-', vertex_style='o', vertex_colors='b',
vertex_sizes=20, vertex_edgecolor='none'):
'''Plot the neighbor connections between points in a data set.'''
assert X.shape[1] in (2,3), 'can only show neighbor graph for 2d or 3d data'
is_3d = (X.shape[1] == 3)
if ax is None:
if is_3d:
from mpl_toolkits.mplot3d import Axes3D
if fig is None:
fig = pyplot.gcf()
ax = Axes3D(fig)
else:
ax = pyplot.gca()
pairs = np.transpose(np.nonzero(W))
t = X[pairs]
# this uses the 'None trick', to insert discontinuties in the line plot
tX = np.empty((t.shape[0], t.shape[1]+1))
tX[:,:-1] = t[:,:,0]
tX[:,-1] = None
tY = tX.copy()
tY[:,:-1] = t[:,:,1]
if is_3d:
| tZ = tX.copy()
tZ[:,:-1] = t[:,:,2]
# needs to be a real array, so we use .ravel() | instead of .flat
ax.plot(tX.ravel(), tY.ravel(), tZ.ravel(), edge_style, zorder=1)
if vertex_style is not None:
ax.scatter(X[:,0], X[:,1], X[:,2], marker=vertex_style, zorder=2,
edgecolor=vertex_edgecolor, c=vertex_colors, s=vertex_sizes)
else:
# tX.flat looks like: [x1,x2,NaN, x3,x4,Nan, ...]
ax.plot(tX.flat, tY.flat, edge_style, zorder=1)
if vertex_style is not None:
ax.scatter(X[:,0], X[:,1], marker=vertex_style, zorder=2,
edgecolor=vertex_edgecolor, c=vertex_colors, s=vertex_sizes)
if title:
ax.set_title(title)
return pyplot.show
|
gusseppe/pymach | pymach/core/fselect.py | Python | mit | 2,360 | 0.003814 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Author: Gusseppe Bravo <gbravor@uni.pe>
# License: BSD 3 clause
"""
This module provides a few of useful functions (actually, methods)
for feature selection the dataset which is to be studied.
"""
from __future__ import print_function
import numpy as np
import pandas as pd
from sklearn.feature_selection import SelectKBest
from sklearn.feature_selection import chi2
from sklearn.feature_selection import f_regression
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.ensemble import ExtraTreesRegressor
from skl | earn.decomposition import PCA
from sklearn.feature_selection import SelectFromModel
from sklearn.pipeline import Pipeline, FeatureUnion
from sklearn.base import BaseEstimator, TransformerMixin
# __all__ = [
# 'pipeline']
class Select:
""" A class for feature selection """
# | data = None
def __init__(self, definer):
self.definer = definer
self.problem_type = definer.problem_type
self.infer_algorithm = definer.infer_algorithm
# self.response = definer.response
# self.data_path = definer.data_path
self.n_features = definer.n_features
def pipeline(self):
""" This function chooses the best way to find features"""
transformers = []
custom = self.CustomFeature()
#transformers.append(('custom', custom))
n_features = int(self.n_features/2)
#kbest = SelectKBest(score_func=chi2, k=n_features)
#transformers.append(('kbest', kbest))
# pca = PCA(n_components=n_features, svd_solver='randomized', whiten=True)
# transformers.append(('pca', pca))
if self.definer.problem_type == 'classification':
extraTC = SelectFromModel(ExtraTreesClassifier(criterion='entropy'))
else:
extraTC = SelectFromModel(ExtraTreesRegressor())
transformers.append(('extraTC', extraTC))
#scaler = StandardScaler()
#transformers.append(('scaler', scaler))
#binarizer = Binarizer()
return FeatureUnion(transformers)
class CustomFeature(TransformerMixin):
""" A custome class for featuring """
def transform(self, X, **transform_params):
#X = pd.DataFrame(X)
return X
def fit(self, X, y=None, **fit_params):
return self
|
rytis/Apache-access-log-parser | plugins/plugin_geoip_stats.py | Python | apache-2.0 | 767 | 0.005215 | #!/usr/bin/env python
from manager import Plugin
from operator import itemgetter
import GeoIP
class GeoIPStats(Plugin):
def __init__(self, **kwargs):
self.gi = GeoIP.new(GeoIP.GEOIP_MEMORY_CACHE)
self.countries = {}
def process(self, **kwargs):
if 'remote_host' in kwargs:
country = self.gi.country_name_by_addr(kwargs['remote_host'])
if country in self.countries:
self.countries[country] += 1
else:
self.countries[country] = 1
def report(self, **kwargs):
print "== Requests by country =="
for (country, c | ount) in sorted(self.countries.iteritems(), key=itemgetter(1), reverse=True):
p | rint " %10d: %s" % (count, country)
|
HaebinShin/tensorflow | tensorflow/contrib/learn/python/learn/estimators/svm_test.py | Python | apache-2.0 | 10,014 | 0.001897 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for estimators.SVM."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
class SVMTest(tf.test.TestCase):
def testRealValuedFeaturesPerfectlySeparable(self):
"""Tests SVM classifier with real valued features."""
def input_fn():
return {
'example_id': tf.constant(['1', '2', '3']),
'feature1': tf.constant([[0.0], [1.0], [3.0]]),
'feature2': tf.constant([[1.0], [-1.2], [1.0]]),
}, tf.constant([[1], [0], [1]])
feature1 = tf.contrib.layers.real_valued_column('feature1')
feature2 = tf.contrib.layers.real_valued_column('feature2')
svm_classifier = tf.contrib.learn.SVM(feature_columns=[feature1, feature2],
example_id_column='example_id',
l1_regularization=0.0,
l2_regularization=0.0)
svm_classifier.fit(input_fn=input_fn, steps=30)
metrics = svm_classifier.evaluate(input_fn=input_fn, steps=1)
loss = metrics['loss']
accuracy = metrics['accuracy']
# The points are not only separable but there exist weights (for instance
# w1=0.0, w2=1.0) that satisfy the margin inequalities (y_i* w^T*x_i >=1).
# The unregularized loss should therefore be 0.0.
self.assertAlmostEqual(loss, 0.0, places=3)
self.assertAlmostEqual(accuracy, 1.0, places=3)
def testRealValuedFeaturesWithL2Regularization(self):
"""Tests SVM classifier with real valued features and L2 regularization."""
def input_fn():
return {
'example_id': tf.constant(['1', '2', '3']),
'feature1': tf.constant([[0.5], [1.0], [1.0]]),
'feature2': tf.constant([[1.0], [-1.0], [0.5]]),
}, tf.constant([[1], [0], [1]])
feature1 = tf.contrib.layers.real_valued_column('feature1')
feature2 = tf.contrib.layers.real_valued_column('feature2')
svm_classifier = tf.contrib.learn.SVM(feature_columns=[feature1, feature2],
example_id_column='example_id',
l1_regularization=0.0,
l2_regularization=1.0)
svm_classifier.fit(input_fn=input_fn, steps=30)
metrics = svm_classifier.evaluate(input_fn=input_fn, steps=1)
loss = metrics['loss']
accuracy = metrics['accuracy']
# The points are in general separable. Also, if there was no regularization,
# the margin inequalities would be satisfied too (for instance by w1=1.0,
# w2=5.0). Due to regularization, smaller weights are chosen. This results
# to a small but non-zero uneregularized loss. Still, all the predictions
# will be correct resulting to perfect accuracy.
self.assertGreater(loss, 0.01)
self.assertLess(loss, 0.1)
self.assertAlmostEqual(accuracy, 1.0, places=3)
def testRealValuedFeaturesWithMildL1Regularization(self):
"""Tests SVM classifier with real valued features and L2 regularization."""
def input_fn():
return {
'example_id': tf.constant(['1', '2', '3']),
'feature1': tf.constant([[0.5], [1.0], [1.0]]),
'feature2': tf.constant([[1.0], [-1.0], [0.5]]),
}, tf.constant([[1], [0], [1]])
feature1 = tf.contrib.layers.real_valued_column('feature1')
feature2 = tf.contrib.layers.real_valued_column('feature2')
svm_classifier = tf.contrib.learn.SVM(feature_columns=[feature1, feature2],
example_id_column='example_id',
l1_regularization=0.5,
l2_regularization=1.0)
svm_classifier.fit(input_fn=input_fn, steps=30)
metrics = svm_classifier.evaluate(in | put_fn=input_fn, steps=1)
loss = metrics['loss']
accuracy = | metrics['accuracy']
# Adding small L1 regularization favors even smaller weights. This results
# to somewhat moderate unregularized loss (bigger than the one when there is
# no L1 regularization. Still, since L1 is small, all the predictions will
# be correct resulting to perfect accuracy.
self.assertGreater(loss, 0.1)
self.assertAlmostEqual(accuracy, 1.0, places=3)
def testRealValuedFeaturesWithBigL1Regularization(self):
"""Tests SVM classifier with real valued features and L2 regularization."""
def input_fn():
return {
'example_id': tf.constant(['1', '2', '3']),
'feature1': tf.constant([[0.5], [1.0], [1.0]]),
'feature2': tf.constant([[1.0], [-1.0], [0.5]]),
}, tf.constant([[1], [0], [1]])
feature1 = tf.contrib.layers.real_valued_column('feature1')
feature2 = tf.contrib.layers.real_valued_column('feature2')
svm_classifier = tf.contrib.learn.SVM(feature_columns=[feature1, feature2],
example_id_column='example_id',
l1_regularization=3.0,
l2_regularization=1.0)
svm_classifier.fit(input_fn=input_fn, steps=30)
metrics = svm_classifier.evaluate(input_fn=input_fn, steps=1)
loss = metrics['loss']
accuracy = metrics['accuracy']
# When L1 regularization parameter is large, the loss due to regularization
# outweights the unregularized loss. In this case, the classifier will favor
# very small weights (in current case 0) resulting both big unregularized
# loss and bad accuracy.
self.assertAlmostEqual(loss, 1.0, places=3)
self.assertAlmostEqual(accuracy, 1 / 3, places=3)
def testSparseFeatures(self):
"""Tests SVM classifier with (hashed) sparse features."""
def input_fn():
return {
'example_id': tf.constant(['1', '2', '3']),
'price': tf.constant([[0.8], [0.6], [0.3]]),
'country': tf.SparseTensor(values=['IT', 'US', 'GB'],
indices=[[0, 0], [1, 0], [2, 0]],
shape=[3, 1]),
}, tf.constant([[0], [1], [1]])
price = tf.contrib.layers.real_valued_column('price')
country = tf.contrib.layers.sparse_column_with_hash_bucket(
'country', hash_bucket_size=5)
svm_classifier = tf.contrib.learn.SVM(feature_columns=[price, country],
example_id_column='example_id',
l1_regularization=0.0,
l2_regularization=1.0)
svm_classifier.fit(input_fn=input_fn, steps=30)
accuracy = svm_classifier.evaluate(input_fn=input_fn, steps=1)['accuracy']
self.assertAlmostEqual(accuracy, 1.0, places=3)
def testBucketizedFeatures(self):
"""Tests SVM classifier with bucketized features."""
def input_fn():
return {
'example_id': tf.constant(['1', '2', '3']),
'price': tf.constant([[600.0], [800.0], [400.0]]),
'sq_footage': tf.constant([[1000.0], [800.0], [500.0]]),
'weights': tf.constant([[1.0], [1.0], [1.0]])
}, tf.constant([[1], [0], [1]])
price_bucket = tf.contrib.layers.bucketized_column(
tf.contrib.layers.real_valued_column('price'),
boundaries=[500.0, 700.0])
sq_footage_bucket = tf.contrib.layers.bucketized_column(
tf.contrib.layers.real_valued_column('sq_footage'),
boundaries=[650.0])
svm_classifier = tf.contrib.learn.SVM(
feature_columns=[pr |
Acidity/PyPermissions | pypermissions/decorators.py | Python | mit | 5,369 | 0.007078 | from pypermissions.permission import PermissionSet
def _prepare_runtime_permission(self, perm=None, runkw=None, args=None, kwargs=None):
"""This function parses the provided string arguments to decorators into the actual values for use when the
decorator is being evaluated. This allows for permissions to be created that rely on arguments that are provided to
the function.
:param perm: The permission string to parse
:param runkw: The run-time components to be inserted into the permission
:param args: The arguments provided to the decorated function
:param kwargs: The keyword arguments provided to the decorated function
:rtype: :py:class:`str`
"""
permission = perm
if not permission:
return False
for key, value in runkw.iteritems():
val_split = value.split('.')
for attr in val_split:
if attr == "self":
value = self
continue
elif attr in kwargs:
value = kwargs.get(attr)
continue
value = getattr(value, attr)
permission = permission.replace('{'+key+'}', value)
return permission
def set_has_permission(perm=None, perm_set=None, on_failure=None, perm_check=None, **runkw):
"""This decorator checks if the provided permission set has the permission specified. It allows for the permission
to rely on runtime information via runkw; which be used to modify perm based on arguments provided to the decorated
function. For many use cases, this can be extended by decorating it with a custom decorator that will capture the
current user making the function call, and providing their permissions as the perm_set. The function provided for
use when the check fails will be called with the decorated functions arguments.
:param perm: The permission to be checked. May contain {} tags to be replaced at run time.
:param perm_set: The permission set being checked for the permission.
:param on_failure: A function that gets called instead of the decorated function when perm_set does not have the
specified permission.
:param perm_check: The PermissionSet function to be used when evaluating for perm.
:param runkw: The mappings to be used to create the actual permission at run time.
"""
def decorator(function):
def check_permission(self, *args, **kwargs):
permission = _prepare_runtime_permission(self, perm, runkw, args, kwargs)
# No permission provided, so everyone has permission.
if not permission:
return function(self, *args, **kwargs)
if not perm_set:
return on_failure(self, *args, **kwargs)
if not perm_check(perm_set, permission):
return on_failure(self, *args, **kwargs)
return function(self, *args, **kwargs)
return check_permission
return decorator
def set_grants_permission(perm=None, perm_set=None, on_failure=None, **runkw):
"""This decorator checks if the provided permission set has the permission specified. It allows for the permission
to rely on runtime information via runkw; which be used to modify perm based on arguments provided to the decorated
function. For many use cases, this can be extended by decorating it with a custom decorator that will capture the
current user making the function call, and providing their permissions as the perm_set. The function provided for
use when the check fails will be called with the decorated functions arguments.
:param perm: The permission to be checked. May contain {} tags to be replaced at run time.
:param perm_set: The permission set being checked for the permission.
:param on_failure: A function that gets called instead of the decorated function when perm_set does not have the
specified permission.
:param runkw: The mappings to be used to create the actual permission at run time.
"""
return set_has_permission(perm, perm_set, on_failure, perm_check=PermissionSet.grants_permission, **runkw)
def set_has_any_permission(perm=None, perm_set=None, on_failure=None, **runkw):
"""This decorator checks if the provided permission set has a permission of the form specified. It allows for the
permission to rely on runtime information via runkw; which be used to modify perm based on arguments provided to the
decorated function. For many use cases, this can be extended by decorating it with a custom decorator that will
capture the current user making the function call, and providing their permissions as the perm_set. The function
provided for use when the check fails will be called with the decorated functions arguments.
:param perm: The permission to be checked. May contain {} tags to be replaced at run time.
:param perm_set: The permission set being checked for the permission.
:param on_failure: A function that gets called instead of the decorated function when perm_set does not have the
specified permission.
:param runkw: The mappings to be used to create the actual permission at run t | ime.
"""
return set_has_permission(perm, perm_set, on_failure, perm_ | check=PermissionSet.has_any_permission, **runkw)
|
mmccoo/kicad_mmccoo | toggle_visibility/__init__.py | Python | apache-2.0 | 606 | 0.006601 | import pcbnew
from . import toggle_visibility
class ToggleVisibilityPlugin(pcbnew.ActionPlugin):
def defaults(self):
self.na | me = "Toggle visibility of value/reference (of selected modules)"
self.category = "A descriptive category name"
self.description = "This plugin toggles the visibility of any selected module v | alues/references"
def Run(self):
# The entry function of the plugin that is executed on user action
toggle_visibility.ToggleVisibility()
ToggleVisibilityPlugin().register() # Instantiate and register to Pcbnew
print("done adding toggle")
|
youkochan/shadowsocks-analysis | shadowsocks/manager.py | Python | apache-2.0 | 12,796 | 0.000315 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2015 clowwindy
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import, division, print_function, \
with_statement
import errno
import traceback
import socket
import logging
import json
import collections
import signal
import sys
import os
import datetime
from shadowsocks import common, eventloop, tcprelay, udprelay, asyncdns, shell
BUF_SIZE = 1506
STAT_SEND_LIMIT = 100
LIMIT_MULTIPLE = 1048576 # 1M = 1048576 BYTES
class Manager(object):
def __init__(self, config):
self._config = config
self._port_info = {}
self._relays = {} # (tcprelay, udprelay)
self._loop = eventloop.EventLoop()
self._dns_resolver = asyncdns.DNSResolver()
self._dns_resolver.add_to_loop(self._loop)
self._last_day = datetime.date.today().day
self._statistics = collections.defaultdict(int)
# 使用 _statistics_sum 来记录每天使用的流量的总量,每天凌晨刷新
self._statistics_sum = collections.defaultdict(int)
self._control_client_addr = None
self._control_client_url = None
try:
manager_address = config['manager_address']
if ':' in manager_address:
addr = manager_address.rsplit(':', 1)
addr = addr[0], int(addr[1])
addrs = socket.getaddrinfo(addr[0], addr[1])
if addrs:
family = addrs[0][0]
else:
logging.error('invalid address: %s', manager_address)
exit(1)
else:
addr = manager_address
self._control_client_url = addr
family = socket.AF_UNIX
self._control_socket = socket.socket(family,
socket.SOCK_DGRAM)
self._control_socket.bind(addr)
self._control_socket.setblocking(False)
except (OSError, IOError) as e:
logging.error(e)
logging.error('can not bind to manager address')
exit(1)
self._loop.add(self._control_socket,
eventloop.POLL_IN, self)
self._loop.add_periodic(self.handle_periodic)
port_password = config['port_password']
port_limit = config['port_limit']
del config['port_password']
del config['port_limit']
del config['server_port']
if port_limit is None:
port_limit = {}
for port, password in port_password.items():
a_config = config.copy()
a_config['server_port'] = int(port)
a_config['password'] = password
if port in port_limit:
a_config['limit'] = int(port_limit[port]) * LIMIT_MULTIPLE
self.add_user(a_config)
def add_user(self, config):
port = int(config['server_port'])
if port in self._port_info:
logging.error("user already exists at %s:%d" % (config['server'], port))
return
logging.info("adding user at %s:%d" % (config['server'], port))
self._port_info[port] = config
self.add_port(config)
def remove_user(self, config):
port = int(config['server_port'])
if port not in self._port_info:
logging.error("user not exist at %s:%d" % (config['server'], port))
return
logging.info("removing user at %s:%d" % (config['server'], port))
if port in self._relays:
self.remove_port(config)
del self._port_info[port]
def add_port(self, config):
port = int(config['server_port'])
servers = self._relays.get(port, None)
if servers:
logging.error("port already opened at %s:%d" % (config['server'], port))
return
logging.info("opening port at %s:%d" % (config['server'], port))
t = tcprelay.TCPRelay(config, self._dns_resolver, False,
self.stat_callback)
u = udprelay.UDPRelay(config, self._dns_resolver, False,
self.stat_callback)
t.add_to_loop(self._loop)
u.add_to_loop(self._loop)
self._relays[port] = (t, u)
def remove_port(self, config):
port = int(config['server_port'])
servers = self._relays.get(port, None)
if servers:
logging.info("closing port at %s:%d" % (config['server'], port))
t, u = servers
t.close(next_tick=False)
u.close(next_tick=False)
del self._relays[port]
else:
logging.error("port not open at %s:%d" % (config['server'], port))
def handle_event(self, sock, fd, event):
if sock == self._control_socket and event == eventloop.POLL_IN:
data, self._control_client_addr = sock.recvfrom(BUF_SIZE)
parsed = self._parse_command(data)
if parsed:
command, config = parsed
a_config = self._config.copy()
if config:
# let the command override the configuration file
a_config.update(config)
command = command.strip()
# 加一些检测命令的语句,防止错误的指令轻易地使服务器崩溃
try:
if command == 'add':
assert 'server_port' in a_config
assert 'password' in a_config
assert type(a_config['server_port']) is int
assert type(a_config['password']) is str
self.add_user(a_config)
self._send_control_data(b'ok')
elif command == 'remove':
assert 'server_port' in a_config
assert type(a_config['server_port']) is int
| self.remove_user(a_config)
self._send_control_data(b'ok')
elif command == 'ping':
self._send_control_data(b'pong')
else:
logging.error('unknown command %s', command)
self._send_ | control_data(b'unknown command')
except AssertionError:
self._send_control_data(b'error command')
else:
self._send_control_data(b'error command')
def _parse_command(self, data):
# commands:
# add: {"server_port": 8000, "password": "foobar"}
# remove: {"server_port": 8000"}
data = common.to_str(data)
parts = data.split(':', 1)
if len(parts) < 2:
return data, None
command, config_json = parts
try:
config = shell.parse_json_in_str(config_json)
return command, config
except Exception as e:
logging.error(e)
return None
def stat_callback(self, port, data_len):
self._statistics[port] += data_len
self._statistics_sum[port] += data_len
def handle_periodic(self):
r = {}
i = 0
def send_data(data_dict):
if data_dict:
# use compact JSON format (without space)
data = common.to_bytes(json.dumps(data_dict,
separators=(',', ':')))
self._send_control_data(b'stat: ' + data)
if self._control_client_addr:
for k, v in self._statistics.items():
r[k] = v
i += 1
# split the data into segments that fit in UDP packets
if i >= STAT_SEND_LIMIT:
|
tantale/bumpversion_demo | setup.py | Python | mit | 356 | 0.002809 | from distutils.core import setup
setup(
name='bumpversion_demo',
| version='0.1.0',
packages=[''],
url='https://github.com/tantale/bumpversion_demo',
license='MIT License',
author='Tantale',
author_email='tantale.solution@gmail.com',
description='Demonstration of ``bumpversion`` usage in the context of a Pyth | on project.'
)
|
rocket-league-replays/rocket-league-replays | rocket_league/apps/replays/migrations/0014_auto_20151013_2217.py | Python | gpl-3.0 | 1,589 | 0 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('replays', '0013_replaypack_file'),
]
operations = [
migrations.AddField(
model_name='player',
name='assists',
field=models.PositiveIntegerField(default=0, blank=True),
),
migrations.AddField(
model_name='player',
name='bot',
field=models | .BooleanField(default=False),
),
migrations.AddField(
model_name='player',
name='goals',
field=models.PositiveIntegerField(default=0, blank=True),
),
migrations.AddField(
model_name='player',
name='online_id',
field=mode | ls.BigIntegerField(null=True, blank=True),
),
migrations.AddField(
model_name='player',
name='platform',
field=models.CharField(max_length=100, null=True, blank=True),
),
migrations.AddField(
model_name='player',
name='saves',
field=models.PositiveIntegerField(default=0, blank=True),
),
migrations.AddField(
model_name='player',
name='score',
field=models.PositiveIntegerField(default=0, blank=True),
),
migrations.AddField(
model_name='player',
name='shots',
field=models.PositiveIntegerField(default=0, blank=True),
),
]
|
Exteris/lsdeflate | tests/context.py | Python | gpl-3.0 | 122 | 0.016393 | import os
import sys
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), ' | ..')) | )
import lsdeflate
|
team-vigir/vigir_behaviors | behaviors/vigir_behavior_simple_joint_control_test/src/vigir_behavior_simple_joint_control_test/simple_joint_control_test_sm.py | Python | bsd-3-clause | 26,416 | 0.025515 | #!/usr/bin/env python
######################################################## | ###
# WARNING: Generated code! #
# ************************** #
# Manual changes may get lost if file is generated again. #
# Only code inside the [MANUAL] tags will be kept. #
###########################################################
import roslib; roslib.load_manifest('vigir_ | behavior_simple_joint_control_test')
from flexbe_core import Behavior, Autonomy, OperatableStateMachine, Logger
from vigir_flexbe_states.check_current_control_mode_state import CheckCurrentControlModeState
from vigir_flexbe_states.change_control_mode_action_state import ChangeControlModeActionState
from vigir_flexbe_states.moveit_move_group_state import MoveitMoveGroupState
from flexbe_states.decision_state import DecisionState
from flexbe_states.calculation_state import CalculationState
from flexbe_states.wait_state import WaitState
from vigir_flexbe_states.execute_trajectory_state import ExecuteTrajectoryState
from flexbe_states.flexible_calculation_state import FlexibleCalculationState
from vigir_flexbe_states.update_dynamic_parameter_state import UpdateDynamicParameterState
from vigir_flexbe_states.read_dynamic_parameter_state import ReadDynamicParameterState
from flexbe_states.start_record_logs_state import StartRecordLogsState
from flexbe_states.stop_record_logs_state import StopRecordLogsState
# Additional imports can be added inside the following tags
# [MANUAL_IMPORT]
import time
import os
import rospy
# [/MANUAL_IMPORT]
'''
Created on Mon Nov 03 2014
@author: Philipp and Spyros
'''
class SimpleJointControlTestSM(Behavior):
'''
Get step response of joint controllers by varying PID gains.
'''
def __init__(self):
super(SimpleJointControlTestSM, self).__init__()
self.name = 'Simple Joint Control Test'
# parameters of this behavior
self.add_parameter('topics_to_record', '')
self.add_parameter('joint_upper_bounds', 0.6)
self.add_parameter('joint_lower_bounds', 0.4)
self.add_parameter('real_robot', True)
# references to used behaviors
# Additional initialization code can be added inside the following tags
# [MANUAL_INIT]
# 0-5 left arm
# 6-11 right arm
# for each: wrx, wry, elx, ely, shx, shz
# simulation
self._joint_limits_sim = [ \
[-0.44, 1.57], \
[-1.57, 1.57], \
[0.00, 2.35], \
[0.00, 3.14], \
[-1.40, 1.75], \
[-1.96, 1.96], \
\
[-1.57, 0.44], \
[-1.57, 1.57], \
[-2.35, 0.00], \
[0.00, 3.14], \
[-1.75, 1.40], \
[-1.96, 1.96] \
]
# real robot
self._joint_limits_rob = [ \
[-1.18, 1.18], \
[0.00, 3.14], \
[0.00, 2.36], \
[0.00, 3.14], \
[-1.57, 1.57], \
[-1.57, 0.79], \
\
[-1.18, 1.18], \
[0.00, 3.14], \
[-2.36, 0.00], \
[0.00, 3.14], \
[-1.57, 1.57], \
[-1.57, 0.79], \
]
self._joint_limits = []
# joint order: shz, shx, ely, elx, wry, wrx
self._joint_configs_down = []
self._joint_configs_up = []
self._traj_controllers = [ \
UpdateDynamicParameterState.LEFT_ARM_WRX, \
UpdateDynamicParameterState.LEFT_ARM_WRY, \
UpdateDynamicParameterState.LEFT_ARM_ELX, \
UpdateDynamicParameterState.LEFT_ARM_ELY, \
UpdateDynamicParameterState.LEFT_ARM_SHX, \
UpdateDynamicParameterState.LEFT_ARM_SHZ, \
\
UpdateDynamicParameterState.RIGHT_ARM_WRX, \
UpdateDynamicParameterState.RIGHT_ARM_WRY, \
UpdateDynamicParameterState.RIGHT_ARM_ELX, \
UpdateDynamicParameterState.RIGHT_ARM_ELY, \
UpdateDynamicParameterState.RIGHT_ARM_SHX, \
UpdateDynamicParameterState.RIGHT_ARM_SHZ \
]
# [/MANUAL_INIT]
# Behavior comments:
def create(self):
joint_names_left = ["l_arm_shz", "l_arm_shx", "l_arm_ely", "l_arm_elx", "l_arm_wry", "l_arm_wrx"]
joint_names_right = ["r_arm_shz", "r_arm_shx", "r_arm_ely", "r_arm_elx", "r_arm_wry", "r_arm_wrx"]
wait_time = 3.0
bagfolder = "" # calculated
gains_list = {'pid_gains': ['p', 'i', 'd'], 'bdi_gains': ['k_qd_p', 'ff_qd_d'], 'vigir_gains': ['ff_bang', 'ff_effort', 'ff_friction']}
# x:30 y:365, x:130 y:365
_state_machine = OperatableStateMachine(outcomes=['finished', 'failed'])
_state_machine.userdata.joints_left_up = [] # calculated
_state_machine.userdata.joints_right_up = [] # calculated
_state_machine.userdata.joint_index = 0
_state_machine.userdata.zero_time = [0.02]
_state_machine.userdata.joint_positions_up = [] # calculated
_state_machine.userdata.joint_positions_down = [] # calculated
_state_machine.userdata.joint_index = 0
_state_machine.userdata.none = None
_state_machine.userdata.init_time = [3.0]
# Additional creation code can be added inside the following tags
# [MANUAL_CREATE]
# 'Basic' configuration for SIMULATION
#_state_machine.userdata.joints_left_up = [0.00, 0.18, 1.57, 1.18, 0.00, 0.57]
#_state_machine.userdata.joints_right_up = [0.00, -0.18, 1.57, -1.18, 0.00, -0.57]
logs_folder = os.path.expanduser('~/joint_control_tests/')
if not os.path.exists(logs_folder):
os.makedirs(logs_folder)
bagfolder = os.path.join(logs_folder, "run_" + time.strftime("%Y-%m-%d-%H_%M"))
os.makedirs(bagfolder)
self._joint_limits = self._joint_limits_rob if self.real_robot else self._joint_limits_sim
# standard config
joints_left_up = [0] * 6
for i in range(6):
joint_range = self._joint_limits[i][1] - self._joint_limits[i][0]
joints_left_up[5-i] = self._joint_limits[i][0] + joint_range * 0.5
joints_right_up = [0] * 6
for i in range(6):
joint_range = self._joint_limits[i+6][1] - self._joint_limits[i+6][0]
joints_right_up[5-i] = self._joint_limits[i+6][0] + joint_range * 0.5
_state_machine.userdata.joints_left_up = joints_left_up
_state_machine.userdata.joints_right_up = joints_right_up
rospy.loginfo('Average left joint positions: ' + ' '.join(map(str, joints_left_up)))
rospy.loginfo('Average right joint positions: ' + ' '.join(map(str, joints_right_up)))
# left
for i in range(6):
joint_config_up = list(_state_machine.userdata.joints_left_up)
joint_config_down = list(_state_machine.userdata.joints_left_up)
joint_range = self._joint_limits[i][1] - self._joint_limits[i][0]
joint_config_up[5-i] = self._joint_limits[i][0] + joint_range * self.joint_upper_bounds
joint_config_down[5-i] = self._joint_limits[i][0] + joint_range * self.joint_lower_bounds
self._joint_configs_up.append([joint_config_up])
self._joint_configs_down.append([joint_config_down])
rospy.loginfo('Left Joint Config Up: ' + ' '.join(map(str, joint_config_up)))
rospy.loginfo('Left Joint Config Dn: ' + ' '.join(map(str, joint_config_down)))
# right
for i in range(6):
joint_config_up = list(_state_machine.userdata.joints_right_up)
joint_config_down = list(_state_machine.userdata.joints_right_up)
joint_range = self._joint_limits[i+6][1] - self._joint_limits[i+6][0]
joint_config_up[5-i] = self._joint_limits[i+6][0] + joint_range * self.joint_upper_bounds
joint_config_down[5-i] = self._joint_limits[i+6][0] + joint_range * self.joint_lower_bounds
self._joint_configs_up.append([joint_config_up])
self._joint_configs_down.append([joint_config_down])
rospy.loginfo('Right Joint Config Up: ' + ' '.join(map(str, joint_config_up)))
rospy.loginfo('Right Joint Config Dn: ' + ' '.join(map(str, joint_config_down)))
# [/MANUAL_CREATE]
# x:30 y:365, x:130 y:365
_sm_move_joint_down_0 = OperatableStateMachine(outcomes=['finished', 'failed'], input_keys=['joint_index', 'joint_positions_down', 'zero_time', 'joints_right_up', 'joints_left_up', 'init_time'])
with _sm_move_joint_down_0:
# x:71 y:145
OperatableStateMachine.add('Move_Left_Arm_Back',
MoveitMoveGroupState(planning_group="l_arm_group", joint_names=joint_names_left),
transitions={'reached': 'Move_Right_Arm_Back', 'failed': 'failed'},
autonomy={'reached': Autonomy.Low, 'failed': Autonomy.High},
remapping={'target_joint_config': 'joints_left_up'})
# x:639 y:69
OperatableStateMachine.add('Move_Left_Joint_Down',
ExecuteTrajectoryState(controller=ExecuteTrajectoryState.CONTROLLER_LEFT_ARM, joint_names=joint_na |
VolodyaEsk/selenium_python_tony_project | tests/other/waits.py | Python | gpl-2.0 | 1,361 | 0.002204 | # -*- coding: utf-8 -*-
from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver. | common.by import By
from selenium.common.exceptions import TimeoutException
import unittest
class WaitForElements(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.driver = webdriver.Firefox()
cls.driver.maximize_window()
cls.wait = WebDriverWait(cls.driver, 10)
def test_wait_for_photos_button(self):
self.driver.get("http://travelingtony.weebly.com/")
button_locator = 'span.wsite-button-inner'
see_button = self.driver.find_element_by_cs | s_selector(button_locator)
# see_button = self.wait.until(EC.presence_of_element_located((By.CSS_SELECTOR, button_locator)))
print see_button
def test_wait_for_search_field(self):
self.driver.get("http://travelingtony.weebly.com/")
search_locator = 'input.wsite-search-input'
search_field = self.driver.find_element_by_css_selector(search_locator)
# search_field = self.wait.until(EC.presence_of_element_located((By.CSS_SELECTOR, search_locator)))
print search_field
@classmethod
def tearDownClass(cls):
cls.driver.quit()
if __name__ == '__main__':
unittest.main()
|
antoinearnoud/openfisca-france | openfisca_france/scripts/performance/measure_tests_performance.py | Python | agpl-3.0 | 1,894 | 0.00528 | # -*- coding: utf-8 -*-
"""
This files tests the performance of the test runner of openfisca-run-test on a subset of YAML tests.
It is placed in openfisca-france because it is the largest set we currently have.
Usage example:
python openfisca_france/scripts/performance/measure_tests_performance.py
"""
import os
import time
import logging
import pkg_resources
from openfisca_core.tools.test_runner import run_tests
from openfisca_france import CountryTaxBenefitSyst | em
# Create logger
logging.basicConfig(level = logging.INFO)
logger = logging.getLogger(__name__)
# Baselines for comparision - unit : seconds
BASELINE_TBS_LOAD_TIME = 9.10831403732
BASELINE_YAML_TESTS_TIME = 271.448431969
# Time tax benefit system loading
start_time_tbs = time.time()
tbs = CountryTaxBenefitSystem()
time_spent_tbs = time.time() - start_time_tbs
openfisca_france_dir = pkg_resources | .get_distribution('OpenFisca-France').location
yaml_tests_dir = os.path.join(openfisca_france_dir, 'tests', 'mes-aides.gouv.fr')
# Time openfisca-run-test runner
start_time_tests = time.time()
run_tests(tbs, yaml_tests_dir)
time_spent_tests = time.time() - start_time_tests
def compare_performance(baseline, test_result):
delta = (test_result - baseline) * 100 / baseline
if test_result > baseline * 1.2:
logger.warning("The perfomance seems to have worsen by {} %.".format(delta))
elif test_result < baseline * 0.8:
logger.info("The performance seems to have been improved by {} %.".format(delta))
else:
logging.info("The performance seems steady ({} %).".format(delta))
logger.info("Generate Tax Benefit System: --- {}s seconds ---".format(time_spent_tbs))
compare_performance(BASELINE_TBS_LOAD_TIME, time_spent_tbs)
logger.info("Pass Mes-aides tests: --- {}s seconds ---".format(time_spent_tests))
compare_performance(BASELINE_YAML_TESTS_TIME, time_spent_tests)
|
JoZie/denite-make | rplugin/python3/denite/source/make.py | Python | mit | 5,366 | 0.010622 | # ============================================================================
# FILE: make.py
# AUTHOR: Johannes Ziegenbalg <Johannes dot Ziegenbalg at gmail.com>
# License: MIT license
# ============================================================================
import re
import shlex
import os, sys, stat
from os import path
from .base import Base
from denite.util import globruntime, abspath
from denite.process import Process
# TODO:
# syntax highliting
# write filter
# Test Test Test!
class Source(Base):
def __init__(self, vim):
super().__init__(vim)
self.name = 'make'
self.kind = 'file'
self.vars = {
'shell' : ['bash', '-c'],
'command' : "make",
'regex_enter' : re.compile(
"(\d+:)*(?P<process>\d+:).*"
"(?P<cd_op>Entering) directory "
"\'(?P<dir>.*)\'"),
'regex_err' : re.compile(
"(\d+:)*(?P<process>\d+:)"
"(?P<file>.*)"
":(?P<line>\d+):(?P<col>\d+): "
"(?P<tag>warning|error): "
"(?P<message>.*)")
}
self.__dir_map = {}
self.__wrapper = '/tmp/denite-make-wrapper.sh'
self.__last_message = { 'following_lines' : 0 }
def on_init(self, context):
context['__proc'] = None
self.__create_make_wrapper()
context['__precommand'] = context['args'][0] if len(
context['args']) > 0 else ""
context['__make_args'] = context['args'][1] if len(
context['args']) > 1 else ""
directory = context['args'][2] if len(
context['args']) > 2 else context['path']
context['__make_dir'] = abspath(self.vim, directory)
context['__command'] = '{} {} {}'.format(
context['__precommand'],
self.__wrapper,
context['__make_args'] )
def on_close(self, context):
if context['__proc']:
context['__proc'].kill()
context['__proc'] = None
if path.exists(self.__wrapper):
os.remove(self.__wrapper)
def gather_candidates(self, context):
if context['__proc']:
return self.__async_gather_candidates(context, 0.03)
args = self.vars['shell']
args.append( context['__command'] )
context['__proc'] = Process( args, context, context['__make_dir'] )
return self.__async_gather_candidates(context, 0.1)
def __async_gather_candidates(self, context, timeout):
outs, err = context['__proc'].communicate(timeout=timeout)
context['is_async'] = not context['__proc'].eof()
if context['__proc'].eof():
context['__proc'] = None
if err:
return [ { 'word' : x } for x in err ]
if not outs:
return []
candidates = [
self.__convert(context, x) for x in outs
]
return [ c for c in candidates if c is not None ]
def __convert(self, context, line):
clean_line = re.sub('(/tmp/)*denite-make-wrapper.sh', self.vars['command'], line)
clean_line = re.sub('^(\d+:)+', '', clean_line)
match = self.vars['regex_err'].search( line )
if match:
message = match.groupdict()
err_dir = self.__dir_map[message['process']]
message['full_file'] = path.relpath(err_dir + "/" + message['file'], context['path'])
message['following_lines'] = 2
self.__last_message = message
return {
'word' : '[{0}] {1}:{2}:{3}'.format(
message['tag'],
message['full_file'],
message['line'],
message['col'] ),
'abbr' : clean_line,
'action__path' : message['full_file'],
'action__line' : message['line'],
'action__col' : message['col']
}
if self.__last_message['following_lines'] > 0:
self.__last_message['following_lines'] -= 1
return {
'word' : '[{0}] {1}:{2}:{3}'.format(
self.__last_message['tag'],
self.__last_message['full_file'],
| self.__last_message['line'],
self.__last_message['col'] ),
'abbr' : clean_line,
'action__path' : self.__last_message['full_file'],
'action__line' : self.__last_message['line'],
'action__col' : self.__last_message['col']
}
match = self.vars['regex_ente | r'].search( line )
if match:
message = match.groupdict()
self.__dir_map[message['process']] = message['dir']
return {
'word' : clean_line,
'abbr' : clean_line,
'action__path' : ''
}
def __create_make_wrapper(self):
script = [
'#!/bin/bash',
'PREFIX=$$:',
'exec -a $0 ' + self.vars['command'] + ' "$@" 2>&1 | sed "s/^/$PREFIX/"'
]
wrapper = open(self.__wrapper, 'w')
wrapper.write("\n".join(script))
wrapper.close()
os.chmod(self.__wrapper, 0o0777)
|
WGS-TB/MentaLiST | scripts/create_new_scheme_with_novel.py | Python | mit | 2,432 | 0.00699 | #!/usr/bin/env python
import logging
logger = logging.getLogger()
import argparse
import collections
import sys
import os
from Bio import SeqIO
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Adds novel alleles to an existing MLST scheme.")
parser.add_argument("-n", "--novel", type=str, help="FASTA with novel alleles.")
parser.add_argument("-o", "--output", type=str, help="Output folder for new scheme.")
parser.add_argument("-i", "--id", type=int, default=1000, help="Start numbering new alleles on this value, later will implement from last allele id +1.")
# parser.add_argument("-t", "--threads", type=int, default=4, help="number of threads")
parser.add_argument("files", nargs="+", help="MLST Fasta files")
parser.add_argument('-ll', '--loglevel', type=str, default="INFO", choices=['DEBUG','INFO','WARNING','ERROR','CRITICAL'], help='Set the logging level')
param = parser.parse_args()
logging.basicConfig(level=param.loglevel, format='%(asctime)s (%(relativeCreated)d ms) -> %(levelname)s:%(message)s', datefmt='%I:%M:%S %p')
# Opening novel alleles:
logger.info("Opening the novel alleles file ...")
novel = collections.defaultdict(list)
for seq_record in SeqIO.parse(param.novel, "fasta"):
novel[seq_record.id].app | end(seq_record)
# create folder if it does not exist:
if not os.path.isdir(param.output):
os.makedirs(param.output)
# Open mlst
mlst = {}
logger.info("Opening the MLST schema and adding novel alleles ...")
for f in param.files:
logger.debug("Opening file %s ..." % f)
file_no_ext, ext = os.path.splitext(f)
locus = os.path.basename(file_no_ext)
record_list = [seq_record for seq_record in SeqIO.parse(f, "fasta")]
# if there | are novel alleles for this locus, add:
if len(novel[locus]) > 0:
# find maximum id present, novel alleles gets next;
id_list = [int(s.id.split("_")[-1]) for s in record_list]
next_id = max(id_list) + 1
# append novels:
for record in novel[locus]:
record.id += "_%d" % next_id
record.name = record.description = ""
next_id += 1
record_list.append(record)
# save:
SeqIO.write(record_list, os.path.join(param.output, os.path.basename(f)), "fasta")
logger.info("Done.")
|
vsoch/expfactory-python | expfactory/testing/test_views.py | Python | mit | 2,582 | 0.020139 | #!/usr/bin/python
"""
Test experiments
"""
from expfactory.utils import copy_directory, get_installdir
from expfactory.vm import custom_battery_download
from expfactory.experiment import load_experiment
from expfactory.views import *
import tempfile
import unittest
import shutil
import json
import os
import re
class TestViews(unittest.TestCase):
def setUp(self):
self.pwd = get_installdir()
self.tmpdir = tempfile.mkdtemp()
self.experiment = os.path.abspath("%s/testing/data/test_task/" %self.pwd)
self.config = json.load(open("%s/testing/data/test_task/config.json" %self.pwd,"rb"))
def tearDown(self):
shutil.rmtree(self.tmpdir)
def test_embed_experiment(self):
html_snippe | t = embed_experiment(self.experiment)
self.assertTrue(re.search("< | !DOCTYPE html>",html_snippet)!=None)
self.assertTrue(re.search("style.css",html_snippet)!=None)
self.assertTrue(re.search("experiment.js",html_snippet)!=None)
self.assertTrue(re.search("test_task",html_snippet)!=None)
def test_experiment_web(self):
generate_experiment_web(self.tmpdir)
self.assertTrue(os.path.exists("%s/templates" %self.tmpdir))
self.assertTrue(os.path.exists("%s/static" %self.tmpdir))
self.assertTrue(os.path.exists("%s/data" %self.tmpdir))
self.assertTrue(os.path.exists("%s/index.html" %self.tmpdir))
self.assertTrue(os.path.exists("%s/table.html" %self.tmpdir))
def test_get_experiment_html(self):
html_snippet = get_experiment_html(self.config,self.experiment)
self.assertTrue(re.search("<!DOCTYPE html>",html_snippet)!=None)
self.assertTrue(re.search("style.css",html_snippet)!=None)
self.assertTrue(re.search("experiment.js",html_snippet)!=None)
self.assertTrue(re.search("test_task",html_snippet)!=None)
def test_cognitiveatlas_hierarchy(self):
html_snippet = get_cognitiveatlas_hierarchy(get_html=True)
self.assertTrue(re.search("<!DOCTYPE html>",html_snippet)!=None)
self.assertTrue(re.search("The Experiment Factory",html_snippet)!=None)
def test_tmp_experiment(self):
battery = custom_battery_download("%s/battery"%self.tmpdir,repos=["battery"])
tmp_exp = tmp_experiment(self.experiment,"%s/battery"%self.tmpdir)
self.assertTrue(os.path.exists("%s/battery/static" %tmp_exp))
self.assertTrue(os.path.exists("%s/battery/index.html" %tmp_exp))
shutil.rmtree(tmp_exp)
if __name__ == '__main__':
unittest.main()
|
pbrunet/pythran | pythran/run.py | Python | bsd-3-clause | 5,948 | 0.000168 | #!/usr/bin/env python
""" Script to run Pythran file compilation with specified g++ like flags. """
import argparse
import logging
import os
import sys
import pythran
from distutils.errors import CompileError
logger = logging.getLogger("pythran")
def convert_arg_line_to_args(arg_line):
"""Read argument from file in a prettier way."""
for arg in arg_line.split():
if not arg.strip():
continue
yield arg
def compile_flags(args):
"""
Build a dictionnary with an entry for cppflags, ldflags, and cxxflags.
These options are filled according to the command line defined options
"""
compiler_options = {
'define_macros': args.defines,
'include_dirs': args.include_dirs,
'extra_compile_args': args.extra_flags,
'library_dirs': args.libraries_dir,
'extra_link_args': args.extra_flags,
}
if args.opts:
compiler_options['opts'] = args.opts
return compiler_options
def run():
parser = argparse.ArgumentParser(prog='pythran',
description='pythran: a python to C++ '
'compiler',
epilog="It's a megablast!",
fromfile_prefix_chars="@")
parser.add_argument('input_file', type=str,
help='the pythran module to compile, '
'either a .py or a .cpp file')
parser.add_argument('-o', dest='output_file', type=str,
help='path to generated file')
parser.add_argument('-E', dest='translate_only', action='store_true',
help='only run the translator, do not compile')
parser.add_argument('-e', dest='raw_translate_only', action='store_true',
help='similar to -E, '
'but does not generate python glue')
parser.add_argument('-v', dest='verbose', action='store_true',
help='be verbose')
parser.add_argument('-V', '--version',
action='version',
version=pythran.version.__version__)
parser.add_argument('-p', dest='opts', metavar='pass',
action='append',
help='any pythran optimization to apply before code '
'generation',
default=list())
parser.add_argument('-I', dest='include_dirs', metavar='include_dir',
action='append',
help='any include dir relevant to the underlying C++ '
'compiler',
default=list())
parser.add_argument('-L', dest='libraries_dir', metavar='ldflags',
action='append',
help='any search dir relevant to the linker',
default=list())
parser.add_argument('-D', dest='defines', metavar='macro_definition',
action='append',
help='any macro definition relevant to '
'the underlying C++ compiler',
default=list())
parser.convert_arg_line_to_args = convert_arg_line_to_args
args, extra = parser.parse_known_args(sys.argv[1:])
args.extra_flags = extra
if args.raw_translate_only:
args.translate_only = True
if args.verbose:
logger.setLevel(logging.INFO)
try:
if not os.path.exists(args.input_file):
raise ValueError("input | file `{0}' not found".format(
args.input_file))
module_name, ext = os.path.splitext(os.path.basename(args.input_file))
# FI | XME: do we want to support other ext than .cpp?
if ext not in ['.cpp', '.py']:
raise SyntaxError("Unsupported file extension: '{0}'".format(ext))
if ext == '.cpp':
if args.translate_only:
raise ValueError("Do you really ask for Python-to-C++ "
"on this C++ input file: '{0}'?".format(
args.input_file))
pythran.compile_cxxfile(module_name,
args.input_file, args.output_file,
**compile_flags(args))
else: # assume we have a .py input file here
pythran.compile_pythranfile(args.input_file,
output_file=args.output_file,
cpponly=args.translate_only,
**compile_flags(args))
except IOError as e:
logger.critical("I've got a bad feeling about this...\n"
"E: " + str(e))
sys.exit(1)
except ValueError as e:
logger.critical("Chair to keyboard interface error\n"
"E: " + str(e))
sys.exit(1)
except SyntaxError as e:
logger.critical("I am in trouble. Your input file does not seem "
"to match Pythran's constraints...\n"
"E: " + str(e))
sys.exit(1)
except CompileError as e:
logger.critical("Cover me Jack. Jack? Jaaaaack!!!!\n"
"E: " + str(e))
sys.exit(1)
except NotImplementedError as e:
logger.critical("MAYDAY, MAYDAY, MAYDAY; pythran compiler; "
"code area out of control\n"
"E: not implemented feature needed, "
"bash the developers")
raise # Why ? we may instead display the stacktrace and exit?
except EnvironmentError as e:
logger.critical("By Jove! Your environment does not seem "
"to provide all what we need\n"
"E: " + str(e))
sys.exit(1)
if __name__ == '__main__':
run()
|
MaxHalford/xam | xam/nlp/nb_svm.py | Python | mit | 695 | 0.001439 | import numpy as np
from scipy import sparse
fr | om skl | earn import utils
from sklearn import linear_model
class NBSVMClassifier(linear_model.LogisticRegression):
def predict(self, X):
return super().predict(X.multiply(self.r_))
def predict_proba(self, X):
return super().predict_proba(X.multiply(self.r_))
def fit(self, X, y, sample_weight=None):
X, y = utils.check_X_y(X, y, accept_sparse='csr', order='C')
def pr(x, y_i, y):
p = x[y == y_i].sum(0)
return (p+1) / ((y==y_i).sum()+1)
self.r_ = sparse.csr_matrix(np.log(pr(X, 1, y) / pr(X, 0, y)))
return super().fit(X.multiply(self.r_), y, sample_weight)
|
fbradyirl/home-assistant | homeassistant/components/clementine/__init__.py | Python | apache-2.0 | 32 | 0 | " | ""The clementine component."" | "
|
melon-li/openstack-dashboard | horizon/test/tests/tables.py | Python | apache-2.0 | 65,049 | 0 | # encoding=utf-8
#
# Copyright 2012 Nebula, Inc.
# Copyright 2014 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.core.urlresolvers import reverse
from django import forms
from django import http
from django import shortcuts
from django.template import defaultfilters
from mox3.mox import IsA # noqa
import six
from horizon import tables
from horizon.tables import formset as table_formset
from horizon.tables import views as table_views
from horizon.test import helpers as test
class FakeObject(object):
def __init__(self, id, name, value, status, optional=None, excluded=None):
self.id = id
self.name = name
self.value = value
self.status = status
self.optional = optional
self.excluded = excluded
self.extra = "extra"
def __str__(self):
return u"%s: %s" % (self.__class__.__name__, self.name)
TEST_DATA = (
FakeObject('1', 'object_1', 'value_1', 'up', 'optional_1', 'excluded_1'),
FakeObject('2', 'object_2', '<strong>evil</strong>', 'down', 'optional_2'),
FakeObject('3', 'object_3', 'value_3', 'up'),
FakeObject('4', u'öbject_4', u'välue_1', u'üp', u'öptional_1',
u'exclüded_1'),
)
TEST_DATA_2 = (
FakeObject('1', 'object_1', 'value_1', 'down', 'optional_1', 'excluded_1'),
)
TEST_DATA_3 = (
FakeObject('1', 'object_1', 'value_1', 'up', 'optional_1', 'excluded_1'),
)
TEST_DATA_4 = (
FakeObject('1', 'object_1', 2, 'up'),
FakeObject('2', 'object_2', 4, 'up'),
)
TEST_DATA_5 = (
FakeObject('1', 'object_1', 'value_1',
'A Status that is longer than 35 characters!', 'optional_1'),
)
TEST_DATA_6 = (
FakeObject('1', 'object_1', 'DELETED', 'down'),
FakeObject('2', 'object_2', 'CREATED', 'up'),
FakeObject('3', 'object_3', 'STANDBY', 'standby'),
)
TEST_DATA_7 = (
FakeObject('1', 'wrapped name', 'wrapped value', 'status',
'not wrapped optional'),
)
class MyLinkAction(tables.LinkAction):
name = "login"
verbose_name = "Log In"
url = "login"
attrs = {
"class": "ajax-modal",
}
def get_link_url(self, datum=None, *args, **kwargs):
return reverse(self.url)
class MyAction(tables.Action):
name = "delete"
verbose_name = "Delete Me"
verbose_name_plural = "Delete Them"
def allowed(self, request, obj=None):
return getattr(obj, 'status', None) != 'down'
def handle(self, data_table, request, object_ids):
return shortcuts.redirect('http://example.com/?ids=%s'
% ",".join(object_ids))
class MyColumn(tables.Column):
pass
class MyRowSelectable(tables.Row):
ajax = True
def can_be_selected(self, datum):
return datum.value != 'DELETED'
class MyRow(tables.Row):
ajax = True
@classmethod
def get_data(cls, request, obj_id):
return TEST_DATA_2[0]
class MyBatchAction(tables.BatchAction):
name = "batch"
action_present = "Batch"
action_past = "Batched"
data_t | ype_ | singular = "Item"
data_type_plural = "Items"
def action(self, request, object_ids):
pass
class MyBatchActionWithHelpText(MyBatchAction):
name = "batch_help"
help_text = "this is help."
action_present = "BatchHelp"
action_past = "BatchedHelp"
class MyToggleAction(tables.BatchAction):
name = "toggle"
action_present = ("Down", "Up")
action_past = ("Downed", "Upped")
data_type_singular = "Item"
data_type_plural = "Items"
def allowed(self, request, obj=None):
if not obj:
return False
self.down = getattr(obj, 'status', None) == 'down'
if self.down:
self.current_present_action = 1
return self.down or getattr(obj, 'status', None) == 'up'
def action(self, request, object_ids):
if self.down:
# up it
self.current_past_action = 1
class MyDisabledAction(MyToggleAction):
def allowed(self, request, obj=None):
return False
class MyFilterAction(tables.FilterAction):
def filter(self, table, objs, filter_string):
q = filter_string.lower()
def comp(obj):
if q in obj.name.lower():
return True
return False
return filter(comp, objs)
class MyServerFilterAction(tables.FilterAction):
filter_type = 'server'
filter_choices = (('name', 'Name', False),
('status', 'Status', True))
needs_preloading = True
def filter(self, table, items, filter_string):
filter_field = table.get_filter_field()
if filter_field == 'name' and filter_string:
return [item for item in items
if filter_string in item.name]
return items
class MyUpdateAction(tables.UpdateAction):
def allowed(self, *args):
return True
def update_cell(self, *args):
pass
class MyUpdateActionNotAllowed(MyUpdateAction):
def allowed(self, *args):
return False
def get_name(obj):
return "custom %s" % obj.name
def get_link(obj):
return reverse('login')
class MyTable(tables.DataTable):
tooltip_dict = {'up': {'title': 'service is up and running',
'style': 'color:green;cursor:pointer'},
'down': {'title': 'service is not available',
'style': 'color:red;cursor:pointer'}}
id = tables.Column('id', hidden=True, sortable=False)
name = tables.Column(get_name,
verbose_name="Verbose Name",
sortable=True,
form_field=forms.CharField(required=True),
form_field_attributes={'class': 'test'},
update_action=MyUpdateAction)
value = tables.Column('value',
sortable=True,
link='http://example.com/',
attrs={'class': 'green blue'},
summation="average",
link_classes=('link-modal',),
link_attrs={'data-type': 'modal dialog',
'data-tip': 'click for dialog'})
status = tables.Column('status', link=get_link, truncate=35,
cell_attributes_getter=tooltip_dict.get)
optional = tables.Column('optional', empty_value='N/A')
excluded = tables.Column('excluded')
class Meta(object):
name = "my_table"
verbose_name = "My Table"
status_columns = ["status"]
columns = ('id', 'name', 'value', 'optional', 'status')
row_class = MyRow
column_class = MyColumn
table_actions = (MyFilterAction, MyAction, MyBatchAction,
MyBatchActionWithHelpText)
row_actions = (MyAction, MyLinkAction, MyBatchAction, MyToggleAction,
MyBatchActionWithHelpText)
class MyServerFilterTable(MyTable):
class Meta(object):
name = "my_table"
verbose_name = "My Table"
status_columns = ["status"]
columns = ('id', 'name', 'value', 'optional', 'status')
row_class = MyRow
column_class = MyColumn
table_actions = (MyServerFilterAction, MyAction, MyBatchAction)
row_actions = (MyAction, MyLinkAction, MyBatchAction, MyToggleAction,
MyBatchActionWithHelpText)
class MyTableSelectable(MyTable):
class Meta(object):
name = "my_table"
columns = ('id', 'name', 'value', 'status')
row_class = MyRowSelectable
status_columns = ["status"]
mu |
shashank971/edx-platform | openedx/core/djangoapps/credit/models.py | Python | agpl-3.0 | 24,732 | 0.001536 | # -*- coding: utf-8 -*-
"""
Models for Credit Eligibility for courses.
Credit courses allow students to receive university credit for
successful completion of a course on EdX
"""
import datetime
from collections import defaultdict
import logging
import pytz
from django.conf import settings
from django.core.cache import cache
from django.dispatch import receiver
from django.db import models, transaction, IntegrityError
from django.core.validators import RegexValidator
from simple_history.models import HistoricalRecords
from jsonfield.fields import JSONField
from model_utils.models import TimeStampedModel
from xmodule_django.models import CourseKeyField
from django.utils.translation import ugettext_lazy
log = logging.getLogger(__name__)
class CreditProvider(TimeStampedModel):
"""
This model represents an institution that can grant credit for a course.
Each provider is identified by unique ID (e.g., 'ASU'). CreditProvider also
includes a `url` where the student will be sent when he/she will try to
get credit for course. Eligibility duration will be use to set duration
for which credit eligible message appears on dashboard.
"""
provider_id = models.CharField(
max_length=255,
unique=True,
validators=[
RegexValidator(
regex=r"^[a-z,A-Z,0-9,\-]+$",
message="Only alphanumeric characters and hyphens (-) are allowed",
code="invalid_provider_id",
)
],
help_text=ugettext_lazy(
"Unique identifier for this credit provider. "
"Only alphanumeric characters and hyphens (-) are allowed. "
"The identifier is case-sensitive."
)
)
active = models.BooleanField(
default=True,
help_text=ugettext_lazy("Whether the credit provider is currently enabled.")
)
display_name = models.CharField(
max_length=255,
help_text=ugettext_lazy("Name of the credit provider displayed to users")
)
enable_integration = models.BooleanField(
default=False,
help_text=ugettext_lazy(
"When true, automatically notify the credit provider "
"when a user requests credit. "
"In order for this to work, a shared secret key MUST be configured "
"for the credit provider in secure auth settings."
)
)
provider_url = models.URLField(
default="",
help_text=ugettext_lazy(
"URL of the credit provider. If automatic integration is "
"enabled, this will the the end-point that we POST to "
"to notify the provider of a credit request. Otherwise, the "
"user will be shown a link to this URL, so the user can "
"request credit from the provider directly."
)
)
provider_status_url = models.URLField(
default="",
help_text=ugettext_lazy(
"URL from the credit provider where the user can check the status "
"of his or her request for credit. This is displayed to students "
"*after* they have requested credit."
)
)
provider_description = models.TextField(
default="",
help_text=ugettext_lazy(
"Description for the credit provider displayed to users."
)
)
fulfillment_instructions = models.TextField(
null=True,
blank=True,
help_text=ugettext_lazy(
"Plain text or html content for displaying further steps on "
"receipt page *after* paying for the credit to get credit for a "
"credit course against a credit provider."
)
)
eligibility_email_message = models.TextField(
default="",
help_text=ugettext_lazy(
"Plain text or html content for displaying custom message inside "
"credit eligibility email content which is sent when user has met "
"all credit eligibility requirements."
)
)
receipt_email_message = models.TextField(
default="",
help_text=ugettext_lazy(
"Plain text or html content for displaying custom message inside "
"credit receipt email content which is sent *after* paying to get "
"credit for a credit course."
)
)
thumbnail_url = models.URLField(
default="",
max_length=255,
help_text=ugettext_lazy(
"Thumbnail image url of the credit provider."
)
)
CREDIT_PROVIDERS_CACHE_KEY = "credit.providers.list"
@classmethod
def get_credit_providers(cls, providers_list=None):
"""
Retrieve a list of all credit providers or filter on providers_list, represented
as dictionaries.
Arguments:
provider_list (list of strings or None): contains list of ids if required results
to be filtered, None for all providers.
Returns:
list of providers represented as dictionaries.
"""
# Attempt to retrieve the credit provider list from the cache if provider_list is None
# The cache key is invalidated when the provider list is updated
# (a post-save signal handler on the CreditProvider model)
# This doesn't happen very often, so we would expect a *very* high
# cache hit rate.
credit_providers = cache.get(cls.CREDIT_PROVIDERS_CACHE_KEY)
if credit_providers is None:
# Cache miss: construct the provider lis | t and save it in the cache
credit_providers = CreditProvider.objects.filter(active=True)
credit_providers = [
{
"id": provider.provider_id,
"display_name": provider.display_name,
"url": provider.provider_url,
"status_url": provider.provide | r_status_url,
"description": provider.provider_description,
"enable_integration": provider.enable_integration,
"fulfillment_instructions": provider.fulfillment_instructions,
"thumbnail_url": provider.thumbnail_url,
}
for provider in credit_providers
]
cache.set(cls.CREDIT_PROVIDERS_CACHE_KEY, credit_providers)
if providers_list:
credit_providers = [provider for provider in credit_providers if provider['id'] in providers_list]
return credit_providers
@classmethod
def get_credit_provider(cls, provider_id):
"""
Retrieve a credit provider with provided 'provider_id'.
"""
try:
return CreditProvider.objects.get(active=True, provider_id=provider_id)
except cls.DoesNotExist:
return None
def __unicode__(self):
"""Unicode representation of the credit provider. """
return self.provider_id
@receiver(models.signals.post_save, sender=CreditProvider)
@receiver(models.signals.post_delete, sender=CreditProvider)
def invalidate_provider_cache(sender, **kwargs): # pylint: disable=unused-argument
"""Invalidate the cache of credit providers. """
cache.delete(CreditProvider.CREDIT_PROVIDERS_CACHE_KEY)
class CreditCourse(models.Model):
"""
Model for tracking a credit course.
"""
course_key = CourseKeyField(max_length=255, db_index=True, unique=True)
enabled = models.BooleanField(default=False)
CREDIT_COURSES_CACHE_KEY = "credit.courses.set"
@classmethod
def is_credit_course(cls, course_key):
"""
Check whether the course has been configured for credit.
Args:
course_key (CourseKey): Identifier of the course.
Returns:
bool: True iff this is a credit course.
"""
credit_courses = cache.get(cls.CREDIT_COURSES_CACHE_KEY)
if credit_courses is None:
credit_courses = set(
unicode(course.course_key)
for course in cls.objects.filter(enabled=True)
)
cache.set(cls.CREDIT_COURSES_CACHE_KEY, credit_courses)
|
chengdujin/newsman | newsman/tools/text_based_feeds/feed_title_change.py | Python | agpl-3.0 | 797 | 0.002509 | import sys
reload(sys)
sys.setdefaultencoding('utf-8')
sys.path.append('../..')
from config.settings import Collection, db
from config.settings import FEED_REGISTRAR
def _convert(language, country):
feeds = Collection(db, FEED_REGISTRAR)
f = open('feed_lists/%s_%s_feed_titles' % (language, country), 'r')
data = f.readlines()
f.close()
for d in data:
| print d
url, title = d.strip().split('*|*')
item = feeds.update({'language': language, 'countries': country,
'feed_link': url.strip()},
{'$set': {'feed_title': title.strip()}})
if __name__ == "__main__":
if len(sys.argv) > 1:
_convert(sys.argv[1], sys.argv[2])
else:
print 'Please indicate a language and c | ountry'
|
RedhawkSDR/framework-codegen | redhawk/codegen/jinja/python/ports/frontend.py | Python | lgpl-3.0 | 2,269 | 0.004848 | #
# This file is protected by Copyright. Please refer to the COPYRIGHT file
# distributed with this source distribution.
#
# This file is part of REDHAWK core.
#
# REDHAWK core is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option) any
# later version.
#
# REDHAWK core is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see http://www.gnu.org/licenses/.
#
from redhawk.codegen.lang.idl import IDLInterface
fro | m redhawk.codegen.jinja.ports import PortGenerator
from redhawk.codegen.jinja.ports import PortFactory
from generator import PythonPortGenerator
from redhawk.codegen.lang import python
class FrontendPortFactory(PortFactory):
NAMESPACE = 'FRONTEND'
def match(self, port):
return IDLInterface(port.repid()).namespace() == self.NAMESPACE
def generator(self, port):
| interface = IDLInterface(port.repid()).interface()
return FrontendPortGenerator(port)
class FrontendPortGenerator(PythonPortGenerator):
def className(self):
return "frontend." + self.templateClass()
def templateClass(self):
if self.direction == 'uses':
porttype = 'Out'
else:
porttype = 'In'
porttype += self.interface + 'Port'
return porttype
def _ctorArgs(self, port):
return [python.stringLiteral(port.name())]
def constructor(self, name):
fei_ports = ['InDigitalTunerPort','InFrontendTunerPort','InAnalogTunerPort','InGPSPort','InRFInfoPort','InRFSourcePort','InNavDataPort']
for _port in fei_ports:
if _port in self.className():
return '%s(%s, self)' % (self.className(), ', '.join(self._ctorArgs(name)))
return '%s(%s)' % (self.className(), ', '.join(self._ctorArgs(name)))
def loader(self):
return jinja2.PackageLoader(__package__)
|
was4444/chromium.src | tools/perf/profile_creators/extension_profile_extender_unittest.py | Python | bsd-3-clause | 1,275 | 0.009412 | # Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import shutil
import tempfile
from profile_creators import extension_profile_extender
from telemetry import decorators
from telemetry.testing import options_for_unittests
from telemetry.testing import page_test_test_case
class ExtensionProfileExtenderUnitTest(page_test_test_case.PageTestTestCase):
"""Smoke test for creating an extension profile.
Creates an extension profile and verifies that it has non-empty contents.
"""
# Should be enabled on mac, disabled because flaky: https:/ | /crbug.com/586362.
@decorators.Disabled('all') # Extension generation only works on Mac for now.
def testExtensionProfileCreation(self):
tmp_dir = tempfile.mkdtemp()
files_in_crx_dir = 0
try | :
options = options_for_unittests.GetCopy()
options.output_profile_path = tmp_dir
extender = extension_profile_extender.ExtensionProfileExtender(options)
extender.Run()
crx_dir = os.path.join(tmp_dir, 'external_extensions_crx')
files_in_crx_dir = len(os.listdir(crx_dir))
finally:
shutil.rmtree(tmp_dir)
self.assertGreater(files_in_crx_dir, 0)
|
Aloomaio/googleads-python-lib | examples/adwords/v201806/account_management/get_account_hierarchy.py | Python | apache-2.0 | 3,452 | 0.009849 | #!/usr/bin/env python
#
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example gets the account hierarchy under the current account.
The LoadFromStorage method is pulling credentials and properties from a
"googleads.yaml" file. By default, it looks for this file in your home
directory. For more information, see the "Caching authentication information"
section of our README.
"""
from googleads import adwords
PAGE_SIZE = 500
def DisplayAccountTree(account, accounts, links, depth=0):
"""Displays an account tree.
Args:
account: dict The account to display.
accounts: dict Map from customerId to account.
links: dict Map from customerId to child links.
depth: int Depth of the current account in the tree.
"""
prefix = '-' * depth * 2
print '%s%s, %s' % (prefix, account['customerId'], account['name'])
if account['customerId'] in links:
for child_link in links[account['customerId']]:
child_account = accounts[child_link['clientCustomerId']]
DisplayAccountTree(child_account, accounts, links, depth + 1)
def main(client):
# Initialize appropriate service.
managed_customer_service = client.GetService(
'ManagedCustomerService', version='v201806')
# Construct selector to get all accounts.
offset = 0
selector = {
'fields': ['CustomerId', 'Name'],
'paging': {
'startIndex': str(offset),
'numberResults': str(PAGE_SIZE)
}
}
more_pages = True
accounts = {}
child_links = {}
parent_links = {}
root_account = None
while more_pages:
# Get serviced account graph.
page = managed_customer_service.get(selector)
if 'entries' in page and page['entries']:
# Create map from customerId to parent and child links.
if 'links' in page:
for link in page['links']:
if link['managerCustomerId'] not in child_links:
child_links[link['managerCustomerId']] = []
child_links[link['managerCustomerId']].append(link)
if link['clientCustomerId'] not in parent_links:
parent_links[link['clientCustomerId']] = []
parent_links | [link['clientCustomerId']].append(link)
# Map from customerID to account.
for account in page['entries']:
accounts[account['customerId']] = account
offset += PAGE_SIZE
selector['paging']['startIndex'] = str(offset)
more_pages = offset | < int(page['totalNumEntries'])
# Find the root account.
for customer_id in accounts:
if customer_id not in parent_links:
root_account = accounts[customer_id]
# Display account tree.
if root_account:
print 'CustomerId, Name'
DisplayAccountTree(root_account, accounts, child_links, 0)
else:
print 'Unable to determine a root account'
if __name__ == '__main__':
# Initialize client object.
adwords_client = adwords.AdWordsClient.LoadFromStorage()
main(adwords_client)
|
berquist/pyquante2 | pyquante2/dft/dft.py | Python | bsd-3-clause | 899 | 0.032258 | import numpy as np
from pyquante2.dft.functionals import xs,cvwn5
# Maybe move these to the functionals module and import from there?
xname = dict(lda=xs,xs=xs,svwn=xs)
cname = dict(lda=cvwn5,svwn=cvwn5,xs=None)
def get_xc(grid,D,**kwargs):
xcname = kwargs.get('xcname','lda')
# Does | not work on either gradient corrected functionals or spin-polarized functionals yet.
xfunc = xname[xcname]
cfunc = cname[xcname]
rho = grid.getdens(D)
fx,dfxa = xfunc(rho)
if cfunc:
fc,dfca,dfcb = cfunc(rho,rho)
else:
fc=dfca=dfcb=0
w = grid.points[:,3]
Vxc = np.einsum('g,g,gI,gJ->IJ',w,dfxa+dfca,grid. | bfamps,grid.bfamps)
# The fx comes from either the up or the down spin, whereas the fc comes from
# both (which is why x is called with either one, and c is called with both
Exc = np.dot(w,2*fx+fc)
return Exc,Vxc
|
jwren/intellij-community | python/testData/codeInsight/smartEnter/firstClauseAfterEmptyMatchStatementWithSubjectAndColon.py | Python | apache-2.0 | 15 | 0.2 | ma | tch x<caret>: | |
jmacmahon/invenio | modules/bibformat/lib/elements/bfe_oai_identifier.py | Python | gpl-2.0 | 1,610 | 0.004969 | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2012 CERN.
#
# Invenio is free software; you can redistribute it and/ | or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have recei | ved a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""BibFormat element - Prints record OAI identifier
"""
import cgi
from invenio.config import CFG_OAI_ID_FIELD
def format_element(bfo, instance_prefix="", separator=", ", instance_suffix=""):
"""
Prints the record OAI identifier(s).
@param instance_prefix: some value printed before each identifier. Must be already escaped
@param separator: some value printed between each identifier. Must be already escaped
@param instance_suffix: some value printed after each identifier. Must be already escaped
"""
return separator.join([instance_prefix + cgi.escape(value) + instance_suffix \
for value in bfo.fields(CFG_OAI_ID_FIELD) if value])
def escape_values(bfo):
"""
Called by BibFormat in order to check if output of this element
should be escaped.
"""
return 0
|
plotly/octogrid | octogrid/builder/builder.py | Python | mit | 2,032 | 0.000984 | # -*- coding: utf-8 -*-
"""
octogrid.builder.builder
This module helps i | n generating a GML file from the graph content
"""
from ..store.store import cache_file, copy_file
from ..utils.utils import username_to_file
FILE_PREFIX = 'graph\n[\n'
FILE_SUFFIX = ']\n'
NODE_PREFIX = '\tnode\n\t[\n'
NODE_SUFFIX = '\t]\n'
EDGE_PREFIX = '\tedge\n\t[\n'
EDGE_SUFFIX = '\t]\n'
SIGNATURE = 'Creator "octogrid [https://git.io/vzhM0]"\n'
def format_node(id, label):
"""
Return the formatted string to repre | sent a node
"""
return NODE_PREFIX + id + label + NODE_SUFFIX
def format_edge(source, target):
"""
Return the formatted string to represent an edge
"""
return EDGE_PREFIX + source + target + EDGE_SUFFIX
def format_content(node, edge):
"""
Return the formatted GML file content
"""
return SIGNATURE + FILE_PREFIX + node + edge + FILE_SUFFIX
def reuse_gml(username):
"""
Use the cached copy for this username
"""
copy_file(username_to_file(username))
def generate_gml(username, nodes, edges, cache=False):
"""
Generate a GML format file representing the given graph attributes
"""
# file segment that represents all the nodes in graph
node_content = ""
for i in range(len(nodes)):
node_id = "\t\tid %d\n" % (i + 1)
node_label = "\t\tlabel \"%s\"\n" % (nodes[i])
node_content += format_node(node_id, node_label)
# file segment that represents all the edges in graph
edge_content = ""
for i in range(len(edges)):
edge = edges[i]
edge_source = "\t\tsource %d\n" % (nodes.index(edge[0]) + 1)
edge_target = "\t\ttarget %d\n" % (nodes.index(edge[1]) + 1)
edge_content += format_edge(edge_source, edge_target)
# formatted file content
content = format_content(node_content, edge_content)
with open(username_to_file(username), 'w') as f:
f.write(content)
# save the file for further use
if cache:
cache_file(username_to_file(username))
|
facebookexperimental/eden | eden/scm/edenscm/mercurial/scmutil.py | Python | gpl-2.0 | 49,777 | 0.000743 | # Portions Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2.
# scmutil.py - Mercurial core utility functions
#
# Copyright Matt Mackall <mpm@selenic.com>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
from __future__ import absolute_import
import errno
import glob
import hashlib
import os
import re
import socket
import subprocess
import time
import traceback
import weakref
from . import (
encoding,
error,
match as matchmod,
pathutil,
phases,
pycompat,
revsetlang,
similar,
smartset,
url,
util,
vfs,
visibility,
winutil,
)
from .i18n import _
from .node import hex, nullid, short, wdirid, wdirrev
from .pycompat import basestring, encodeutf8, isint
if pycompat.iswindows:
from . import scmwindows as scmplatform
else:
from . import scmposix as scmplatform
termsize = scmplatform.termsize
# pyre-fixme[39]: `Tuple[Any, ...]` is not a valid parent class.
class status(tuple):
"""Named tuple with a list of files per status. The 'deleted', 'unknown'
and 'ignored' properties are only relevant to the working copy.
"""
__slots__ = ()
def __new__(cls, modified, added, removed, deleted, unknown, ignored, clean):
assert all(isinstance(f, str) for f in modified)
assert all(isinstance(f, str) for f in added)
assert all(isinstance(f, str) for f in removed)
assert all(isinstance(f, str) for f in deleted)
assert all(isinstance(f, str) for f in unknown)
assert all(isinstance(f, str) for f in ignored)
assert all(isinstance(f, str) for f in clean)
return tuple.__new__(
cls, (modified, added, removed, deleted, unknown, ignored, clean)
)
@property
def modified(self):
"""files that have been modified"""
return self[0]
@property
def added(self):
"""files that have been added"""
return self[1]
@property
def removed(self):
"""files that have been removed"""
return self[2]
@property
def deleted(self):
"""files that are in the dirstate, but have been deleted from the
working copy (aka "missing")
"""
return self[3]
@property
def unknown(self):
"""files not in the dirstate that are not ignored"""
return self[4]
@property
def ignored(self):
"""files not in the dirstate that are ignored (by _dirignore())"""
return self[5]
@property
def clean(self):
"""files that have not been modified"""
return self[6]
def __repr__(self, *args, **kwargs):
return (
"<status modified=%r, added=%r, removed=%r, deleted=%r, "
"unknown=%r, ignored=%r, clean=%r>"
) % self
def nochangesfound(ui, repo, excluded=None):
"""Report no changes for push/pull, excluded is None or a list of
nodes excluded from the push/pull.
"""
secretlist = []
if excluded:
for n in excluded:
ctx = repo[n]
if ctx.phase() >= phases.secret:
secretlist.append(n)
if secretlist:
ui.status(
_("no changes found (ignored %d secret changesets)\n") % len(secretlist)
)
else:
ui.status(_("no changes found\n"))
def callcatch(ui, func):
"""call func() with global exception handling
return func() if no exception happens. otherwise do some error handling
and return an exit code accordingly. does not handle all exceptions.
"""
try:
try:
return func()
except Exception as ex: # re-raises
ui.traceback()
# Log error info for all non-zero exits.
_uploadtraceback(ui, str(ex), util.smartformatexc())
raise
finally:
# Print 'remote:' messages before 'abort:' messages.
# This also avoids sshpeer.__del__ during Py_Finalize -> GC
# on Python 3, which can cause deadlocks waiting for the
# stderr reading thread.
from . import sshpeer
sshpeer.cleanupall()
# Global exception handling, alphabetically
# Mercurial-specific first, followed by built-in and library exceptions
except error.LockHeld as inst:
if inst.errno == errno.ETIMEDOUT:
reason = _("timed out waiting for lock held by %s") % inst.lockinfo
else:
reason = _("lock held by %r") % inst.lockinfo
ui.warn(_("%s: %s\n") % (inst.desc or inst.filename, reason), error=_("abort"))
if not inst.lockinfo:
ui.warn(_("(lock might be very busy)\n"))
except error.LockUnavailable as inst:
ui.warn(
_("could not lock %s: %s\n")
% (inst.desc or inst.filename, encoding.strtolocal(inst.strerror)),
error=_("abort"),
)
except error.OutOfBandError as inst:
if inst.args:
msg = _("remote error:\n")
else:
msg = _("remote error\n")
ui.warn(msg, error=_("abort"))
if inst.args:
ui.warn("".join(inst.args))
if inst.hint:
ui.warn("(%s)\n" % inst.hint)
except error.RepoError as inst:
ui.warn(_("%s!\n") % inst, error=_("abort"))
inst.printcontext(ui)
if inst.hint:
ui.warn(_("(%s)\n") % inst.hint)
except error.ResponseError as inst:
ui.warn(inst.args[0], error=_("abort"))
if not isinstance(inst.args[1], basestring):
ui.warn(" %r\n" % (inst.args[1],))
elif not inst.args[1]:
ui.warn(_(" empty string\n"))
else:
ui.warn("\n%r\n" % util.ellipsis(inst.args[1]))
except error.CensoredNodeError as inst:
ui.warn(_("file censored %s!\n") % inst, error=_("abort"))
except error.CommitLookupError as inst:
ui.warn(_("%s!\n") % inst.args[0], error=_("abort"))
except error.CertificateError as inst:
# This error is definitively due to a problem with the user's client
# certificate, so print the configured remediation message.
helptext = ui.config("help", "tlsauthhelp")
if helptext is None:
helptext = _("(run 'hg config auth' to see configured certificates)")
ui.warn(
_("%s!\n\n%s\n") % (inst.args[0], helptext),
erro | r=_("certificate error"),
)
except error.TlsError as inst:
# This is a generic TLS error that may or may not be due to the user's
# client certificate, so print a more | generic message about TLS errors.
helptext = ui.config("help", "tlshelp")
if helptext is None:
helptext = _("(is your client certificate valid?)")
ui.warn(
_("%s!\n\n%s\n") % (inst.args[0], helptext),
error=_("tls error"),
)
except error.RevlogError as inst:
ui.warn(_("%s!\n") % inst, error=_("abort"))
inst.printcontext(ui)
except error.InterventionRequired as inst:
ui.warn("%s\n" % inst)
if inst.hint:
ui.warn(_("(%s)\n") % inst.hint)
return 1
except error.WdirUnsupported:
ui.warn(_("working directory revision cannot be specified\n"), error=_("abort"))
except error.Abort as inst:
ui.warn(_("%s\n") % inst, error=_("abort"), component=inst.component)
inst.printcontext(ui)
if inst.hint:
ui.warn(_("(%s)\n") % inst.hint)
return inst.exitcode
except (error.IndexedLogError, error.MetaLogError) as inst:
ui.warn(_("internal storage is corrupted\n"), error=_("abort"))
ui.warn(_(" %s\n\n") % str(inst).replace("\n", "\n "))
ui.warn(_("(this usually happens after hard reboot or system crash)\n"))
ui.warn(_("(try '@prog@ doctor' to attempt to fix it)\n"))
except error.RustError as inst:
if ui.config("ui", "traceback") and inst.args[0].has_metadata():
fault = inst.args[0 |
chilcote/unearth | artifacts/mac_address.py | Python | apache-2.0 | 879 | 0.001138 | from SystemConfiguration import (
SCDynamicStoreCopyValue,
SCDynamic | StoreCreate,
SCNetworkInterfaceCopyAll,
SCNetworkInterfaceGetBSDName,
SCNetworkInterfaceGetHardwareAddressString,
)
factoid = "mac_addr | ess"
def fact():
"""Returns the mac address of this Mac"""
primary_MAC = "None"
net_config = SCDynamicStoreCreate(None, "net", None, None)
states = SCDynamicStoreCopyValue(net_config, "State:/Network/Global/IPv4")
primary_interface = states["PrimaryInterface"]
primary_devices = [
x
for x in SCNetworkInterfaceCopyAll()
if SCNetworkInterfaceGetBSDName(x) == primary_interface
]
if primary_devices:
primary_MAC = SCNetworkInterfaceGetHardwareAddressString(primary_devices[0])
return {factoid: primary_MAC}
if __name__ == "__main__":
print("<result>%s</result>" % fact()[factoid])
|
michelesr/gasistafelice | gasistafelice/rest/views/blocks/users.py | Python | agpl-3.0 | 4,621 | 0.01082 |
from django.utils.translation import ugettext as _, ugettext_lazy as _lazy
from rest.views.blocks.base import BlockSSDataTables, ResourceBlockAction
from consts import EDIT, CONFIRM, EDIT_MULTIPLE, VIEW
from lib.shortcuts import render_to_response, render_to_xml_response, render_to_context_response
from gf.gas.forms.base import SingleUserForm
from django.forms.formsets import formset_factory
from lib.formsets import BaseFormSetWithRequest
from flexi_auth.models import ObjectWithContext
from gf.base.models import Person
#------------------------------------------------------------------------------#
# #
#------------------------------------------------------------------------------#
class Block(BlockSSDataTables):
BLOCK_NAME = "users"
#FIXME minor: BLOCK_DESCRIPTION = _lazy("Users")
#FIXME minor: _lazy is appropriate, but there is probably some bug elsewhere...now use ugettext it is safe in our case
BLOCK_DESCRIPTION = _("Users")
BLOCK_VALID_RESOURCE_TYPES = [] #KO: because we NEED subclasses
COLUMN_INDEX_NAME_MAP = {
0: 'pk',
1: 'username',
2: 'first_name',
3: 'last_name',
4: 'email',
5: 'last_login',
6: 'date_joined',
7: 'is_active',
8: 'person'
}
def _get_user_actions(self, request):
user_actions = []
if request.user.has_perm(EDIT, obj=ObjectWithContext(request.resource)):
user_actions += [
ResourceBlockAction(
block_name = self.BLOCK_NAME,
resource = request.resource,
name=VIEW, verbose_name=_("Show"),
popup_form=False,
method="get",
),
ResourceBlockAction(
block_name = self.BLOCK_NAME,
resource = request.resource,
name=EDIT_MULTIPLE, verbose_name=_("Edit"),
popup_form=False,
method="get",
),
]
return user_actions
def _get_resource_list(self, request):
"""Rather than adding a 'users' method to the resource,
we compute users list here, because users may be not still bound to
the correspondent Person. This block is in fact used only for Admin
purposes during a specific stage of the registration process.
"""
raise ProgrammingError("You must use a subclass to retrieve users list")
def _get_edit_multiple_form_class(self):
qs = self._get_resource_list(self.request)
return formset_factory(
form=SingleUserForm,
formset=BaseFormSetWithRequest,
extra=qs.count() #0
)
def _get_records(self, request, querySet):
"""Return records of rendered table fields."""
data = {}
i = 0
c = querySet.count()
map_info = { }
av = True
for i,el in enumerate(querySet):
key_prefix = 'form-%d' % i
try:
el._cached_p = el.person
except Person.DoesNotExist as e:
el._cached_p = None
data.update({
'%s-id' % key_prefix : el.pk,
'%s-pk' % key_prefix : el.pk,
'%s-is_active' % key_prefix : bool(el.is_active),
'%s-person' % key_prefix : el._cached_p,
})
map_info[el.pk] = {'formset_index' : i}
data['form-TOTAL_FORMS'] = c
data['form-INITIAL_FORMS'] = c
data['form-MAX_NUM_FORMS'] = 0
formset = self._get_edit_multiple_form_class()(request, data)
records = []
for i, el in enumerate(querySet):
form = formset[map_info[el.pk]['formset_index']]
if el._cached_p:
person = el._cached_p
person_urn = el._cached_p.urn
else:
person = form['person']
person_urn = None
records.append({
'id' : "%s %s" % (form['pk'], form['id']),
'username' : el.username,
'first_name' : el.first_name,
'last_name' : el.last_name,
'email' : el.email,
'last_login' : el.last_login,
'date_joined' : el.date_joined,
| 'is_active' : form['is_active'],
'person' : person,
'person_urn': person_urn,
})
return formset, records, {}
| |
tomchuk/meetup | meetup/todo/serializers.py | Python | mit | 182 | 0 | from rest_framework import seri | alizers
from .models import Todo
class TodoSerializer(ser | ializers.ModelSerializer):
class Meta:
model = Todo
fields = '__all__'
|
cherokee/pyscgi | tests/test5.py | Python | bsd-3-clause | 850 | 0.024706 | import os
import CTK
UPLOAD_DIR = "/tmp"
|
def ok (filename, target_dir, target_file, params):
txt = "<h1>It worked!</h1>"
txt += "<pre>%s</pre>" %(os. | popen("ls -l '%s'" %(os.path.join(target_dir, target_file))).read())
txt += "<p>Params: %s</p>" %(str(params))
txt += "<p>Filename: %s</p>" %(filename)
return txt
class default:
def __init__ (self):
self.page = CTK.Page ()
self.page += CTK.RawHTML ("<h1>Direct Upload with params</h1>")
self.page += CTK.Uploader({'handler': ok, 'target_dir': UPLOAD_DIR}, {'var':'foo'})
self.page += CTK.RawHTML ("<h1>Temporal Upload without params</h1>")
self.page += CTK.Uploader({'handler': ok, 'target_dir': UPLOAD_DIR}, direct=False)
def __call__ (self):
return self.page.Render()
CTK.publish ('', default)
CTK.run (port=8000)
|
alirizakeles/tendenci | tendenci/apps/categories/urls.py | Python | gpl-3.0 | 227 | 0.004405 | from django.conf.urls import patterns, url
urlpa | tterns = patterns('tendenci.apps.categories.views',
url(r'^update/(?P<app_label>\w+)/(?P<model>\w+)/(?P<p | k>[\w\d]+)/$',
'edit_categories', name="category.update"),
)
|
Cal-CS-61A-Staff/ok | migrations/versions/7e1d5c529924_custom_submission_times_for_backup.py | Python | apache-2.0 | 1,201 | 0.011657 | """Custom submission times for Backup
Revision ID: 7e1d5c529924
Revises: 6ce2cf5c4534
Create Date: 2016-10-25 20:31:14.423524
"""
# revision identifiers, used by Alembic.
revision = '7e1d5c529924'
down_revision = '6ce2cf5c4534'
from alembic import op
import sqlalchemy as sa
import server
from sqlalchemy.dialects import mysql
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('backup', sa.Column('creator_id', sa.Integer(), nullable=True))
op.add_column('backup', sa.Column('custom_submission_time', sa.DateTime(timezone=True), nullable=True))
op.create_foreign_key(op.f('fk_backup_creato | r_id_user'), 'backup', 'user', ['creator_id'], ['id'])
op.drop_column('backup', 'extension')
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('backup', sa.Column('extension', mysql.TINYINT(display_width=1), autoincrement=False, nullable=True))
op.drop_constraint(op.f('fk_backup_creator_id_user'), 'backu | p', type_='foreignkey')
op.drop_column('backup', 'custom_submission_time')
op.drop_column('backup', 'creator_id')
### end Alembic commands ###
|
cxchope/YashiLogin | tests/test_searchuser.py | Python | mit | 448 | 0.004739 | # -*- coding | :utf-8 -*-
import test_core
impo | rt sys
import demjson
test_core.title("搜索用户")
f = open("testconfig.json", 'r')
lines = f.read()
f.close()
jsonfiledata = demjson.decode(lines)
if jsonfiledata["url"] == "":
test_core.terr("错误: 'testconfig.json' 配置不完全。")
exit()
uurl = jsonfiledata["url"]+"search.php"
udataarr = {
'type': "username",
'word': sys.argv[1]
}
test_core.postarray(uurl,udataarr,True)
|
UdK-VPT/Open_eQuarter | oeq_tb/resources.py | Python | gpl-2.0 | 5,957 | 0.000839 | # -*- coding: utf-8 -*-
# Resource object code
#
# Created by: The Resource Compiler for PyQt5 (Qt v5.12.1)
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore
qt_resource_data = b"\
\x00\x00\x04\x0a\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x17\x00\x00\x00\x18\x08\x06\x00\x00\x00\x11\x7c\x66\x75\
\x00\x00\x00\x01\x73\x52\x47\x42\x00\xae\xce\x1c\xe9\x00\x00\x00\
\x06\x62\x4b\x47\x44\x00\xff\x00\xff\x00\xff\xa0\xbd\xa7\x93\x00\
\x00\x00\x09\x70\x48\x59\x73\x00\x00\x0b\x13\x00\x00\x0b\x13\x01\
\x00\x9a\x9c\x18\x00\x00\x00\x07\x74\x49\x4d\x45\x07\xd9\x02\x15\
\x16\x11\x2c\x9d\x48\x83\xbb\x00\x00\x03\x8a\x49\x44\x41\x54\x48\
\xc7\xad\x95\x4b\x68\x5c\x55\x18\xc7\x7f\xe7\xdc\x7b\x67\xe6\xce\
\x4c\x66\x26\x49\xd3\x24\x26\xa6\xc6\xf8\x40\x21\xa5\x04\xb3\x28\
\xda\x98\x20\xa5\x0b\xad\x55\xa8\x2b\xc5\x50\x1f\xa0\x6e\x34\x2b\
\x45\x30\x14\x02\xba\x52\x69\x15\x17\x66\x63\x45\x97\x95\xa0\xad\
\x0b\xfb\xc0\x06\x25\xb6\x71\x61\x12\x41\x50\xdb\x2a\x21\xd1\xe2\
\x24\xf3\x9e\xc9\xcc\xbd\xe7\x1c\x17\x35\x43\x1e\x33\x21\xb6\xfd\
\x56\x87\xf3\x9d\xfb\xfb\x1e\xf7\xff\x9d\x23\x8c\x31\x43\x95\xf4\
\x85\x1e\x3f\x3b\x35\xac\xfd\xcc\x43\xdc\xa4\x49\x3b\xfe\x9d\x1d\
\xdb\x7b\x22\x90\x78\xf8\xb2\x28\xa7\xbe\x7d\xc1\x4b\x9d\x79\xdf\
\x18\x15\xe5\x16\x99\x10\x56\xde\x69\xdc\x3f\x22\xfd\xec\xd4\xf0\
\xad\x04\x03\x18\xa3\xa2\x7e\x76\x6a\x58\xde\x68\x2b\xb4\x36\xf8\
\xbe\xc6\x18\x53\xdb\xef\xe7\xfa\xec\xed\x67\x63\x10\x42\x00\xf0\
\xfb\xd5\x65\x2a\x15\x45\xc7\x6d\x0d\x00\xc4\xa2\xc1\xaa\x6f\x0d\
\x3e\x6c\xab\xc2\x1c\x56\xa4\x77\x4b\xb0\xf2\x35\x15\x5f\x21\x85\
\xe0\xc8\x6b\x5f\x92\x2d\x37\x33\x39\xf9\x03\x27\x8e\x1f\xa2\xf7\
\xbe\x9d\x04\x1c\x0b\x37\xe4\xac\xff\xa6\x30\x87\xbd\xba\x00\x6a\
\x06\x79\xe5\xf5\xaf\x89\xd9\x92\xc5\xcc\x0a\xd9\x7c\x19\xcf\xe9\
\xe2\xe4\xa9\x2f\x78\x7c\xff\x01\x72\x85\x0a\x2b\x65\x1f\xa5\x4c\
\xb5\xb2\x55\x16\x80\xbd\x31\xda\xda\x20\x1f\x7d\x3e\xcd\xc2\xfd\
\x59\xa6\x93\x39\x92\xd1\x22\xea\x9b\x16\xce\x9d\x3f\xce\xe0\x83\
\x03\x24\x82\x59\x3a\xdb\x7b\x88\xc7\x82\x68\x63\x58\xc9\xcc\x62\
\x8c\x21\x18\xb0\x6a\xc3\x37\x06\x49\x16\xff\x24\x6b\xa5\x49\xbb\
\x25\xbc\xa2\xa6\x21\xbb\x40\x7f\xdf\x00\x83\xbd\x01\x8e\x3c\xd5\
\x45\xd7\x8e\x6b\x9c\x9c\x98\x25\x1a\xb6\xe8\xbe\x3d\xc2\xdd\x77\
\x44\x48\xc4\x1c\x22\xe1\xeb\x58\x59\xaf\xcf\xd3\x33\x29\x2e\x34\
\x2d\x91\x93\x3e\xbe\x34\x78\x01\xc5\xe2\x61\xc5\xae\x72\x8e\x70\
\xc8\xc2\x0d\x5a\xbc\xf5\xee\x2f\x9c\xfa\x3e\x86\x69\x7a\x8e\xcf\
\x26\xe6\xf9\x63\xa1\x44\xa1\xa4\xd0\xda\x6c\x0d\x2f\x15\x7c\xb4\
\x67\x28\x59\x0a\xcf\xd6\x54\xe2\x06\x13\x87\x2b\x6f\x68\xa6\x27\
\xaf\x31\x32\x36\xc7\xb2\x7f\x17\xef\x7d\x7c\x8c\x33\x67\xcf\x12\
\x70\x24\x4a\x69\xd6\x6a\x46\xd6\xd3\x70\x72\xa9\x82\x67\x34\x45\
\xad\x28\xdb\x1a\x15\x34\x98\xff\x46\xed\xef\x37\x0d\x99\xbf\x4a\
\x3c\x30\x38\xc0\xc8\x4b\xaf\x92\x5a\x9c\xe2\xe0\x23\x6d\x74\xb4\
\xba\x84\x5d\x0b\x29\x45\x7d\xb8\x94\x82\x96\xb6\x10\xf3\xc5\x12\
\x2a\xef\x53\x11\x1a\x63\xad\x3f\x93\x19\x85\xf1\xb1\x77\x58\x5a\
\xf8\x99\x97\x9f\xe9\xa6\x75\x47\x90\xc6\xb8\x43\xd8\xb5\xb6\xce\
\xfc\xfa\xfd\x00\xfb\x3e\xf4\xc8\x05\x35\xba\x5e\xeb\x46\x21\xf9\
\xcf\x0a\xa9\x8c\x87\xe3\x48\xdc\x90\xb5\x6e\x98\x6a\xaa\x65\xf2\
\x52\x92\x43\x2f\x5e\xc2\x8c\x02\x1a\x10\xf5\x07\xac\xc3\x75\x70\
\x83\x92\x80\xb3\xf9\xd0\x26\xf8\x8f\xb3\x29\xc6\x3e\xb8\x8c\x19\
\x35\x75\x6b\x7b\x7e\x3c\xca\x45\x0c\x7e\x49\x31\xf4\x58\x3b\xf7\
\xf6\x34\x90\x88\x39\x04\x1c\x59\x1f\xfe\xdb\xd5\x3c\x5f\x9d\x4b\
\x32\xfd\x44\xb2\xba\xd7\xfa\xb6\x60\xcf\xde\x16\xdc\x90\x45\x4c\
\x4a\x2a\x9e\x62\xfe\x4e\xc5\xc8\xc1\x4e\xda\x76\x86\xe8\xe9\x0a\
\xe3\xd8\x92\x58\xd4\xc6\xb2\x44\x6d\x78\x2a\x53\xe1\xca\x7c\x99\
\x63\x5d\xbf\x56\x9d\xbd\x9f\x44\x18\x7a\xba\x95\x27\x0f\xb4\xd3\
\xdc\x18\xc0\xf3\x0d\x52\x40\xd8\xb5\xb0\xa4\x20\x14\xb2\x70\x6c\
\x81\x63\xcb\xaa\x42\xd6\xfd\xb7\xf4\xec\xa3\x06\xa0\x50\x52\xd8\
\x4e\x1b\x7e\x4a\xd3\x31\xf9\x29\xcf\xfe\xd4\x49\x7f\x5f\x13\xfb\
\xfa\x9b\x71\x43\x92\x58\xd4\x21\x18\x90\xac\xde\xb0\x42\x50\x13\
\x58\x33\xf3\x88\x6b\xa1\xfd\x65\x96\xf2\x79\xc6\x43\x7b\xd8\x75\
\x38\xcc\x3d\xdd\xd1\xaa\xcf\x71\xe4\xff\x7f\x91\x56\x33\xaf\xea\
\x37\xe7\xa1\x94\x21\x16\xb5\xd1\x06\x2c\x29\x36\xf5\x72\x9b\x96\
\x95\xc0\xc4\xda\x9d\x78\x83\x43\x53\x22\x80\x65\x09\x1c\xfb\x86\
\xc1\x00\xe7\x25\x70\x14\x48\x6f\x1e\x22\x51\xe3\x75\xd9\xb6\xa5\
\x81\xa3\x32\xb1\xfb\xf4\x0c\x30\xb8\xb1\x82\x9b\xb0\x09\x60\x30\
\xb1\xfb\xf4\xcc\xbf\xa0\xe9\x6e\xae\x5a\xdf\x4b\x81\x00\x00\x00\
\x00\x49\x45\x4e\x44\xae\x42\x60\x82\
"
qt_resource_name = b"\
\x00\x07\
\x07\x3b\xe0\xb3\
\x00\x70\
\x00\x6c\x00\x75\x00\x67\x00\x69\x00\x6e\x00\x73\
\x00\x06\
\x07\x5c\x76\xa2\
\x00\x6f\
\x00\x65\x00\x71\x00\x5f\x00\x74\x00\x62\
\x00\x08\
\x0a\x61\x5a\xa7\
\x00\x69\
\x00\x63\x00\x6f\x00\x6e\x00\x2e\x00\x70\x00\x6e\x00\x67\
"
qt_resource_struct_v1 = b"\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x01\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x02\
\x00\x00\x00\x14\x00\x02\x00\x00\x00\x01\x00\x00\x00\x03\
\x00\x00\x00\x26\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\
"
qt_resource_struct_v2 = b"\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x01\
\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x02\
\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x14\x00\x02\x00\x00\x00\x01\x00\x00\x00\x03\
\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x26\x00\x00\x00\x | 00\x00\x01\x00\x00\x00\x00\
\x00\x00\x01\x69\x23\xc2\x96\x6e\
"
qt_version = [int(v) for v in QtCore.qVersion().split('.')]
if qt_version < [5, 8, 0]:
rcc_version = 1
qt_resource_struct = qt_resource_struct_v1
else:
rcc_version = 2
qt_resource_struct = qt_res | ource_struct_v2
def qInitResources():
QtCore.qRegisterResourceData(rcc_version, qt_resource_struct, qt_resource_name, qt_resource_data)
def qCleanupResources():
QtCore.qUnregisterResourceData(rcc_version, qt_resource_struct, qt_resource_name, qt_resource_data)
qInitResources()
|
scryver/gampy | gampy/engine/events/time.py | Python | gpl-3.0 | 2,008 | 0.005976 | __author__ = 'michiel'
import time
class Time:
def __init__(self):
self._delta = 0.
@staticmethod
def get_time():
return time.monotonic()
@staticmethod
def sleep():
time.sleep(0.001)
@property
def delta(self):
return self._delta
@delta.setter
def delta(self, delta):
self._delta = delta
class Timing:
def __init__(self):
self.timings = {}
self.col = self.__collector()
next(self.col) #coroutine syntax
def __collector(self):
while True:
(name, t) = (yield) #coroutine syntax
if name in self.timings:
self.timings[name]['timings'] += [t]
self.timings[name]['count'] += 1
self.timings[name]['total'] += t
else:
self.timings[name] = {} #if this entry doesn't exist yet
self.timings[name]['timings'] = [t]
self.timings[name]['count'] = 1
self.timings[name]['total'] = t
def __call__(self, func):
"""Turn the object into a decorator"""
def wrapper(*arg, **kwargs):
t1 = time.time() #start time
res = func(*arg, **kwargs) #call the originating function
| t2 = time.time() #stop time
t = (t2-t1)*1000.0 #time in milliseconds
data = (func.__name__, t)
self.col.send(data) #collect the data
return res
return wrapper
def __str__(self):
s = 'Timings:\n'
for key in self.timings.keys():
s += '{timingKey} | '.format(timingKey=key)
ts = self.timings[key]['timings']
count = self.timings[key]['count']
total = self.timings[key]['total']
s += 'average: {avg} | total: {tot} | count: {cnt}\n'.format(avg=total / count, tot=total, cnt=count)
return '{}'.format(s) | |
google/google-ctf | third_party/edk2/BaseTools/Source/Python/CommonDataClass/FdfClass.py | Python | apache-2.0 | 7,994 | 0.008506 | ## @file
# classes represent data in FDF
#
# Copyright (c) 2007 - 2018, Intel Corporation. All rights reserved.<BR>
#
# This program and the accompanying materials
# are licensed and made available under the terms and conditions of the BSD License
# which accompanies this distribution. The full text of the license may be found at
# http://opensource.org/licenses/bsd-license.php
#
# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
#
## FD data in FDF
#
#
class FDClassObject:
## The constructor
#
# @param self The object pointer
#
def __init__(self):
self.FdUiName = ''
self.CreateFileName = None
self.BaseAddress = None
self.BaseAddressPcd = None
self.Size = None
self.SizePcd = None
self.ErasePolarity = None
# 3-tuple list (blockSize, numBlocks, pcd)
self.BlockSizeList = []
# DefineVarDict[var] = value
self.DefineVarDict = {}
# SetVarDict[var] = value
self.SetVarDict = {}
self.RegionList = []
## FFS data in FDF
#
#
class FfsClassObject:
## The constructor
#
# @param self The object pointer
#
def __init__(self):
self.NameGuid = None
self.Fixed = False
self.CheckSum = False
self.Alignment = None
self.SectionList = []
## FILE statement data in FDF
#
#
class FileStatementClassObject (FfsClassObject) :
## The constructor
#
# @param self The object pointer
#
def __init__(self):
FfsClassObject.__init__(self)
self.FvFileType = None
self.FileName = None
self.KeyStringList = []
self.FvName = None
self.FdName = None
self.DefineVarDict = {}
self.KeepReloc = None
## INF statement data in FDF
#
#
class FfsInfStatementClassObject(FfsClassObject):
## The constructor
#
# @param self The object pointer
#
def __init__(self):
FfsClassObject.__init__(self)
self.Rule = None
self.Version = None
self.Ui = None
self.InfFileName = None
self.BuildNum = ''
self.KeyStringList = []
self.KeepReloc = None
self.UseArch = None
## section data in FDF
#
#
class SectionClassObject:
## The constructor
#
# @param self The object pointer
#
def __init__(self):
self.Alignment = None
## Depex expression section in FDF
#
#
class DepexSectionClassObject (SectionClassObject):
## The constructor
#
# @param self The object pointer
#
def __init__(self):
self.DepexType = None
self.Expression = None
self.ExpressionProcessed = False
## Compress section data in FDF
#
#
class CompressSectionClassObject (SectionClassObject) :
## The constructor
#
# @param self The object pointer
#
def __init__(self):
SectionClassObject.__init__(self)
self.CompType = None
self.SectionList = []
## Data section data in FDF
#
#
class DataSectionClassObject (SectionClassObject):
## The constructor
#
# @param self The object pointer
#
def __init__(self):
SectionClassObject.__init__(self)
|
self.SecType = None
self.SectFileName = None
self.SectionList = []
self.KeepReloc = True
## Rule section data in FDF
#
#
class EfiSectionClassObject (SectionClassObject):
## The constructor
#
# @param self The object pointer
#
def __init__(self):
SectionClassObject.__init__(self)
self.SectionType = None
self.Optional = False
self.FileType = None
self.StringData = None
self.F | ileName = None
self.FileExtension = None
self.BuildNum = None
self.KeepReloc = None
## FV image section data in FDF
#
#
class FvImageSectionClassObject (SectionClassObject):
## The constructor
#
# @param self The object pointer
#
def __init__(self):
SectionClassObject.__init__(self)
self.Fv = None
self.FvName = None
self.FvFileType = None
self.FvFileName = None
self.FvFileExtension = None
self.FvAddr = None
## GUIDed section data in FDF
#
#
class GuidSectionClassObject (SectionClassObject) :
## The constructor
#
# @param self The object pointer
#
def __init__(self):
SectionClassObject.__init__(self)
self.NameGuid = None
self.SectionList = []
self.SectionType = None
self.ProcessRequired = False
self.AuthStatusValid = False
self.ExtraHeaderSize = -1
self.FvAddr = []
self.FvParentAddr = None
self.IncludeFvSection = False
## UI section data in FDF
#
#
class UiSectionClassObject (SectionClassObject):
## The constructor
#
# @param self The object pointer
#
def __init__(self):
SectionClassObject.__init__(self)
self.StringData = None
self.FileName = None
## Version section data in FDF
#
#
class VerSectionClassObject (SectionClassObject):
## The constructor
#
# @param self The object pointer
#
def __init__(self):
SectionClassObject.__init__(self)
self.BuildNum = None
self.StringData = None
self.FileName = None
## Rule data in FDF
#
#
class RuleClassObject :
## The constructor
#
# @param self The object pointer
#
def __init__(self):
self.Arch = None
self.ModuleType = None # For Module Type
self.TemplateName = None
self.NameGuid = None
self.Fixed = False
self.Alignment = None
self.SectAlignment = None
self.CheckSum = False
self.FvFileType = None # for Ffs File Type
self.KeyStringList = []
self.KeepReloc = None
## Complex rule data in FDF
#
#
class RuleComplexFileClassObject(RuleClassObject) :
## The constructor
#
# @param self The object pointer
#
def __init__(self):
RuleClassObject.__init__(self)
self.SectionList = []
## Simple rule data in FDF
#
#
class RuleSimpleFileClassObject(RuleClassObject) :
## The constructor
#
# @param self The object pointer
#
def __init__(self):
RuleClassObject.__init__(self)
self.FileName = None
self.SectionType = ''
self.FileExtension = None
## File extension rule data in FDF
#
#
class RuleFileExtensionClassObject(RuleClassObject):
## The constructor
#
# @param self The object pointer
#
def __init__(self):
RuleClassObject.__init__(self)
self.FileExtension = None
## Capsule data in FDF
#
#
class CapsuleClassObject :
## The constructor
#
# @param self The object pointer
#
def __init__(self):
self.SpecName = None
self.UiCapsuleName = None
self.CreateFile = None
self.GroupIdNumber = None
# DefineVarDict[var] = value
self.DefineVarDict = {}
# SetVarDict[var] = value
self.SetVarDict = {}
# TokensDict[var] = value
self.TokensDict = {}
self.CapsuleDataList = []
self.FmpPayloadList = []
## OptionROM data in FDF
#
#
class OptionRomClassObject:
## The constructor
#
# @param self The object pointer
#
def __init__(self):
self.DriverName = None
self.FfsList = []
|
roadmapper/ansible | lib/ansible/modules/cloud/vmware/vmware_guest_serial_port.py | Python | gpl-3.0 | 19,614 | 0.003161 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2019, Anusha Hegde <anushah@vmware.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: vmware_guest_serial_port
short_description: Manage serial ports on an existing VM
version_added: "2.10"
description:
- "This module can be used to manage serial ports on an existing VM"
options:
name:
description:
- Name of the virtual machine.
- This is a required parameter, if parameter C(uuid) or C(moid) is not supplied.
type: str
uuid:
description:
- UUID of the instance to manage the serial ports, this is VMware's unique identifier.
- This is a required parameter, if parameter C(name) or C(moid) is not supplied.
type: str
moid:
description:
- Managed Object ID of the instance to manage if known, this is a unique identifier only within a single vCenter instance.
- This is required if C(name) or C(uuid) is not supplied.
type: str
use_instance_uuid:
description:
- Whether to use the VMware instance UUID rather than the BIOS UUID.
default: no
type: bool
backings:
type: list
description:
- A list of backings for serial ports.
- 'C(backing_type) (str): is required to add or reconfigure or remove an existing serial port.'
- | 'Valid attributes are:'
- ' - C(backing_type) (str): Backing type is required for the serial ports to be added or reconfigured or removed.'
- ' - C(state) (str): is required to identify whether we are adding, modifying or removing the serial port.
- choices:
| - C(present): modify an existing serial port. C(backing_type) is required to determine the port.
The first matching C(backing_type) and either of C(service_uri) or C(pipe_name) or C(device_name) or C(file_path) will be modified.
If there is only one device with a backing type, the secondary details are not needed.
We will match the last such device with the given backing type.
- C(absent): remove an existing serial port. C(backing_type) is required to determine the port.
The first matching C(backing_type) and either of C(service_uri) or C(pipe_name) or C(device_name) or C(file_path) will be removed.
If there is only one device with a backing type, the secondary details are not needed.
We will match the last such device with the given backing type.'
- ' - C(yield_on_poll) (bool): Enables CPU yield behavior. Default value is true.'
- ' - C(direction) (str): Required when I(backing_type=network).
The direction of the connection.
- choices:
- client
- server'
- ' - C(service_uri) (str): Required when I(backing_type=network).
Identifies the local host or a system on the network, depending on the value of I(direction).
If you use the virtual machine as a server, the URI identifies the host on which the virtual machine runs.
In this case, the host name part of the URI should be empty, or it should specify the address of the local host.
If you use the virtual machine as a client, the URI identifies the remote system on the network.'
- ' - C(endpoint) (str): Required when I(backing_type=pipe).
When you use serial port pipe backing to connect a virtual machine to another process, you must define the endpoints.'
- ' - C(no_rx_loss) (bool): Required when I(backing_type=pipe).
Enables optimized data transfer over the pipe.
- choices:
- client
- server'
- ' - C(pipe_name) (str): Required when I(backing_type=pipe).'
- ' - C(device_name) (str): Required when I(backing_type=device).'
- ' - C(file_path) (str): Required when I(backing_type=file).
File path for the host file used in this backing. Fully qualified path is required, like <datastore_name>/<file_name>'
extends_documentation_fragment:
- vmware.documentation
author:
- Anusha Hegde (@anusha94)
'''
EXAMPLES = '''
# Create serial ports
- name: Create multiple serial ports with Backing type - network, pipe, device and file
vmware_guest_serial_port:
hostname: "{{ vcenter_hostname }}"
username: "{{ vcenter_username }}"
password: "{{ vcenter_password }}"
validate_certs: no
name: "test_vm1"
backings:
- type: 'network'
direction: 'client'
service_uri: 'tcp://6000'
yield_on_poll: True
- type: 'pipe'
pipe_name: 'serial_pipe'
endpoint: 'client'
- type: 'device'
device_name: '/dev/char/serial/uart0'
- type: 'file'
file_path: '[datastore1]/file1'
yield_on_poll: True
register: create_multiple_ports
# Modify existing serial port
- name: Modify Network backing type
vmware_guest_serial_port:
hostname: '{{ vcenter_hostname }}'
username: '{{ vcenter_username }}'
password: '{{ vcenter_password }}'
name: '{{ name }}'
backings:
- type: 'network'
state: 'present'
direction: 'server'
service_uri: 'tcp://6000'
delegate_to: localhost
# Remove serial port
- name: Remove pipe backing type
vmware_guest_serial_port:
hostname: '{{ vcenter_hostname }}'
username: '{{ vcenter_username }}'
password: '{{ vcenter_password }}'
name: '{{ name }}'
backings:
- type: 'pipe'
state: 'absent'
delegate_to: localhost
'''
RETURN = r'''
serial_port_data:
description: metadata about the virtual machine's serial ports after managing them
returned: always
type: dict
sample: [
{
"backing_type": "network",
"direction": "client",
"service_uri": "tcp://6000"
},
{
"backing_type": "pipe",
"direction": "server",
"pipe_name": "serial pipe"
},
]
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.vmware import PyVmomi, vmware_argument_spec, wait_for_task
from ansible.module_utils._text import to_native
try:
from pyVmomi import vim
except ImportError:
pass
class PyVmomiHelper(PyVmomi):
""" This class is a helper to create easily VMware Spec for PyVmomiHelper """
def __init__(self, module):
super(PyVmomiHelper, self).__init__(module)
self.change_applied = False # a change was applied meaning at least one task succeeded
self.config_spec = vim.vm.ConfigSpec()
self.config_spec.deviceChange = []
self.serial_ports = []
def check_vm_state(self, vm_obj):
"""
To add serial port, the VM must be in powered off state
Input:
- vm: Virtual Machine
Output:
- True if vm is in poweredOff state
- module fails otherwise
"""
if vm_obj.runtime.powerState == vim.VirtualMachinePowerState.poweredOff:
return True
else:
self.module.fail_json(msg="A serial device cannot be added to a VM in the current state(" + vm_obj.runtime.powerState + ")."
+ "Please use the vmware_guest_powerstate module to power off the VM")
def get_serial_port_config_spec(self, vm_obj):
"""
Variables changed:
- self.config_spec
- self.change_applied
"""
# create serial config spec for adding, editing, removing
for backing in self.params.get('backings'):
backing_keys = backing.keys()
serial_port = get_serial_port(vm_obj, backing)
if serial_port is None and 'state' not in backing_keys:
# if serial port is None and state is not mentioned
# create a new serial port
serial_port_spec = self.create_serial_port(backing)
|
sumit4iit/django-guardian | guardian/tests/other_test.py | Python | bsd-2-clause | 12,625 | 0.005861 |
from itertools import chain
from django.contrib.contenttypes.models import ContentType
from django.core.exceptions import ValidationError
from django.test import TestCase
import guardian
from guardian.backends import ObjectPermissionBackend
from guardian.exceptions import GuardianError
from guardian.exceptions import NotUserNorGroup
from guardian.exceptions import ObjectNotPersisted
from guardian.exceptions import WrongAppError
from guardian.models import GroupObjectPermission
from guardian.models import UserObjectPermission
from guardian.models import AnonymousUser
from guardian.models import Group
from guardian.models import Permission
from guardian.models import User
class UserPermissionTests(TestCase):
fixtures = ['tests.json']
def setUp(self):
self.user = User.objects.get(username='jack')
self.ctype = ContentType.objects.create(name='foo', model='bar',
app_label='fake-for-guardian-tests')
self.obj1 = ContentType.objects.create(name='ct1', model='foo',
app_label='guardian-tests')
self.obj2 = ContentType.objects.create(name='ct2', model='bar',
app_label='guardian-tests')
def test_assignement(self):
self.assertFalse(self.user.has_perm('change_contenttype', self.ctype))
UserObjectPermission.objects.assign('change_contenttype', self.user,
self.ctype)
self.assertTrue(self.user.has_perm('change_contenttype', self.ctype))
self.assertTrue(self.user.has_perm('contenttypes.change_contenttype',
self.ctype))
def test_assignement_and_remove(self):
UserObjectPermission.objects.assign('change_contenttype', self.user,
self.ctype)
self.assertTrue(self.user.has_perm('change_contenttype', self.ctype))
UserObjectPermission.objects.remove_perm('change_contenttype',
self.user, self.ctype)
self.assertFalse(self.user.has_perm('change_contenttype', self.ctype))
def test_ctypes(self):
UserObjectPermission.objects.assign('change_contenttype', self.user, self.obj1)
self.assertTrue(self.user.has_perm('change_contenttype', self.obj1))
self.assertFalse(self.user.has_perm('change_contenttype', self.obj2))
UserObjectPermission.objects.remove_perm('change_contenttype', self.user, self.obj1)
UserObjectPermission.objects.assign('change_contenttype', self.user, self.obj2)
self.assertTrue(self.user.has_perm('change_contenttype', self.obj2))
self.assertFalse(self.user.has_perm('change_contenttype', self.obj1))
UserObjectPermission.objects.assign('change_contenttype', self.user, self.obj1)
UserObjectPermission.objects.assign('change_contenttype', self.user, self.obj2)
self.assertTrue(self.user.has_perm('change_contenttype', self.obj2))
self.assertTrue(self.user.has_perm('change_contenttype', self.obj1))
UserObjectPermission.objects.remove_perm('change_contenttype', self.user, self.obj1)
UserObjectPermission.objects.remove_perm('change_contenttype', self.user, self.obj2)
self.assertFalse(self.user.has_perm('change_contenttype', self.obj2))
self.assertFalse(self.user.has_perm('change_contenttype', self.obj1))
def test_get_for_object(self):
perms = UserObjectPermission.objects.get_for_object(self.user, self.ctype)
self.assertEqual(perms.count(), 0)
to_assign = sorted([
'delete_contenttype',
'change_contenttype',
])
for perm in to_assign:
UserObjectPermission.objects.assign(perm, self.user, self.ctype)
perms = UserObjectPermission.objects.get_for_object(self.user, self.ctype)
codenames = sorted(chain(*perms.values_list('permission__codename')))
self.assertEqual(to_assign, codenames)
def test_assign_validation(self):
self.assertRaises(Permission.DoesNotExist,
UserObjectPermission.objects.assign, 'change_group', self.user,
self.user)
group = Group.objects.create(name='test_g | roup_assign_validation')
ctype = ContentType.objects.get_for_model(group)
perm = Permission.objects.get(codename='change_user')
create_info = dict(
permission = perm,
user = self.user,
content_type = ctype,
object_pk = group.pk
)
self.assertRaises(ValidationError, UserObjectPermission.objects.create,
**create_info)
def test_unicode(self):
obj_perm = UserObjectPermission.objects.assi | gn("change_user",
self.user, self.user)
self.assertTrue(isinstance(obj_perm.__unicode__(), unicode))
def test_errors(self):
not_saved_user = User(username='not_saved_user')
self.assertRaises(ObjectNotPersisted,
UserObjectPermission.objects.assign,
"change_user", self.user, not_saved_user)
self.assertRaises(ObjectNotPersisted,
UserObjectPermission.objects.remove_perm,
"change_user", self.user, not_saved_user)
self.assertRaises(ObjectNotPersisted,
UserObjectPermission.objects.get_for_object,
"change_user", not_saved_user)
class GroupPermissionTests(TestCase):
fixtures = ['tests.json']
def setUp(self):
self.user = User.objects.get(username='jack')
self.group, created = Group.objects.get_or_create(name='jackGroup')
self.user.groups.add(self.group)
self.ctype = ContentType.objects.create(name='foo', model='bar',
app_label='fake-for-guardian-tests')
self.obj1 = ContentType.objects.create(name='ct1', model='foo',
app_label='guardian-tests')
self.obj2 = ContentType.objects.create(name='ct2', model='bar',
app_label='guardian-tests')
def test_assignement(self):
self.assertFalse(self.user.has_perm('change_contenttype', self.ctype))
self.assertFalse(self.user.has_perm('contenttypes.change_contenttype',
self.ctype))
GroupObjectPermission.objects.assign('change_contenttype', self.group,
self.ctype)
self.assertTrue(self.user.has_perm('change_contenttype', self.ctype))
self.assertTrue(self.user.has_perm('contenttypes.change_contenttype',
self.ctype))
def test_assignement_and_remove(self):
GroupObjectPermission.objects.assign('change_contenttype', self.group,
self.ctype)
self.assertTrue(self.user.has_perm('change_contenttype', self.ctype))
GroupObjectPermission.objects.remove_perm('change_contenttype',
self.group, self.ctype)
self.assertFalse(self.user.has_perm('change_contenttype', self.ctype))
def test_ctypes(self):
GroupObjectPermission.objects.assign('change_contenttype', self.group,
self.obj1)
self.assertTrue(self.user.has_perm('change_contenttype', self.obj1))
self.assertFalse(self.user.has_perm('change_contenttype', self.obj2))
GroupObjectPermission.objects.remove_perm('change_contenttype',
self.group, self.obj1)
GroupObjectPermission.objects.assign('change_contenttype', self.group,
self.obj2)
self.assertTrue(self.user.has_perm('change_contenttype', self.obj2))
self.assertFalse(self.user.has_perm('change_contenttype', self.obj1))
GroupObjectPermission.objects.assign('change_contenttype', self.group,
self.obj1)
GroupObjectPermission.objects.assign('change_contenttype', self.group,
self.obj2)
self.assertTrue(self.user.has_perm('change_contenttype', self.obj2))
self.assertTrue(self.user.has_perm('change_contenttype', self.obj1))
GroupObjectPermission.objects.remove_perm('change_contenttype',
self.group, self.obj1)
GroupObjectPermission.objects.remove_perm('change_contenttype',
self.group, self.obj2)
self.assertFalse(self.user.has_perm('change_contenttype', self.obj2))
self.assertFalse(self.user.has_perm('change_contenttype', self.obj1))
def test_get_for_object(self):
|
doismellburning/django | django/contrib/messages/storage/cookie.py | Python | bsd-3-clause | 6,545 | 0.000764 | import json
from django.conf import settings
from django.contrib.messages.storage.base import BaseStorage, Message
from django.http import SimpleCookie
from django.utils.crypto import salted_hmac, constant_time_compare
from django.utils.safestring import SafeData, mark_safe
from django.utils import six
class MessageEncoder(json.JSONEncoder):
"""
Compactly serializes instances of the ``Message`` class as JSON.
"""
message_key = '__json_message'
def default(self, obj):
if isinstance(obj, Message):
# Using 0/1 here instead of False/True to produce more compact json
is_safedata = 1 if isinstance(obj.message, SafeData) else 0
message = [self.message_key, is_safedata, obj.level, obj.message]
if obj.extra_tags:
message.append(obj.extra_tags)
return message
return super(MessageEncoder, self).default(obj)
class MessageDecoder(json.JSONDecoder):
"""
Decodes JSON that includes serialized ``Message`` instances.
"""
def process_messages(self, obj):
if isinstance(obj, list) and obj:
if obj[0] == MessageEncoder.message_key:
if len(obj) == 3:
# Compatibility with previously-encoded messages
return Message(*obj[1:])
if obj[1]:
obj[3] = mark_safe(obj[3])
return Message(*obj[2:])
return [self.process_messages(item) for item in obj]
if isinstance(obj, dict):
return {key: self.process_messages(value)
for key, value in six.iteritems(obj)}
return obj
def decode(self, s, **kwargs):
decoded = super(MessageDecoder, self).decode(s, **kwargs)
return self.process_messages(decoded)
class CookieStorage(BaseStorage):
"""
Stores messages in a cookie.
"""
cookie_name = 'messages'
# uwsgi's default configuration enforces a maximum size of 4kb for all the
# HTTP headers. In order to leave some room for other cookies and headers,
# restrict the session cookie to 1/2 of 4kb. See #18781.
max_cookie_size = 2048
not_finished = '__messagesnotfinished__'
def _get(self, *args, **kwargs):
"""
Retrieves a list of messages from the messages cookie. If the
not_finished sentinel value is found at the end of the message list,
remove it and return a result indicating that not all messages were
retrieved by this storage.
"""
data = self.request.COOKIES.get(self.cookie_name)
messages = self._decode(data)
all_retrieved = not (messages and messages[-1] == self.not_finished)
if messages and not all_retrieved:
# remove the sentinel value
messages.pop()
return messages, all_retrieved
def _update_cookie(self, encoded_data, response):
"""
Either sets the cookie with the encoded data if there is any data to
store, or deletes the cookie.
"""
if encoded_data:
response.set_cookie(self.cookie_name, encoded_data,
domain=settings.SESSION_COOKIE_DOMAIN,
secure=settings.SESSION_COOKIE_SECURE or None,
httponly=settings.SESSION_COOKIE_HTTPONLY or None)
else:
response.delete_cookie(self.cookie_name,
domain=settings.SESSION_COOKIE_DOMAIN)
def _store(self, messages, response, remove_oldest=True, *args, **kwargs):
"""
Stores the messages to a cookie, returning a list of any messages which
could not be stored.
If the encoded data is larger than ``max_cookie_size``, removes
messages until the data fits (these are the messages which are
returned), and add the not_finished sentinel value to indicate as much.
"""
unstored_messages = []
encoded_data = self._encode(messages)
if self.max_cookie_size:
# d | ata is going to be stored eventually by SimpleCookie, which
# adds its own overhead, which we must account for.
| cookie = SimpleCookie() # create outside the loop
def stored_length(val):
return len(cookie.value_encode(val)[1])
while encoded_data and stored_length(encoded_data) > self.max_cookie_size:
if remove_oldest:
unstored_messages.append(messages.pop(0))
else:
unstored_messages.insert(0, messages.pop())
encoded_data = self._encode(messages + [self.not_finished],
encode_empty=unstored_messages)
self._update_cookie(encoded_data, response)
return unstored_messages
def _hash(self, value):
"""
Creates an HMAC/SHA1 hash based on the value and the project setting's
SECRET_KEY, modified to make it unique for the present purpose.
"""
key_salt = 'django.contrib.messages'
return salted_hmac(key_salt, value).hexdigest()
def _encode(self, messages, encode_empty=False):
"""
Returns an encoded version of the messages list which can be stored as
plain text.
Since the data will be retrieved from the client-side, the encoded data
also contains a hash to ensure that the data was not tampered with.
"""
if messages or encode_empty:
encoder = MessageEncoder(separators=(',', ':'))
value = encoder.encode(messages)
return '%s$%s' % (self._hash(value), value)
def _decode(self, data):
"""
Safely decodes an encoded text stream back into a list of messages.
If the encoded text stream contained an invalid hash or was in an
invalid format, ``None`` is returned.
"""
if not data:
return None
bits = data.split('$', 1)
if len(bits) == 2:
hash, value = bits
if constant_time_compare(hash, self._hash(value)):
try:
# If we get here (and the JSON decode works), everything is
# good. In any other case, drop back and return None.
return json.loads(value, cls=MessageDecoder)
except ValueError:
pass
# Mark the data as used (so it gets removed) since something was wrong
# with the data.
self.used = True
return None
|
CivicTechTO/django-councilmatic | councilmatic_core/migrations/0006_bill_subject.py | Python | mit | 422 | 0 | # -*- coding: utf | -8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('councilmatic_core', '0005_auto_20151215_1430'),
]
operations = [
migrations.AddField(
model_name='bill',
name='subject',
field=models.CharField(blank=True, max_length=2 | 55),
),
]
|
gtfierro/BAS | web/smapgeo/__init__.py | Python | gpl-3.0 | 1,039 | 0 | # def inject_app_defaults(application):
# """Inject an application's default settings"""
# try:
# __import__('%s.settings' % application)
# import sys
# # Import our defaults, project defaults, and project settings
# | _app_settings = sys.modules['%s.settings' % application]
# _def_settings = sys.modules['django.conf.global_settings']
# _settings = sys.modules['django.conf'].settings
# # Add the values from the application.settings module
# for _k in dir(_app_settings):
# if _k.isupper():
# # Add the value to the default settings module
# s | etattr(_def_settings, _k, getattr(_app_settings, _k))
# # Add the value to the settings, if not already present
# if not hasattr(_settings, _k):
# setattr(_settings, _k, getattr(_app_settings, _k))
# except ImportError:
# # Silently skip failing settings modules
# pass
# inject_app_defaults(__name__)
|
UrLab/incubator | manmail/admin.py | Python | agpl-3.0 | 3,491 | 0.002312 | from django.contrib import admin
from django.contrib import messages
from django.core.mail import EmailMultiAlternatives
from django.conf import settings
from .models import Email
from users.models import User
@admin.register(Email)
class EmailAdmin(admin.ModelAdmin):
list_display = ('subject', 'sent', 'created', 'modified', 'get_approvers', 'is_sendable')
list_filter = ('sent', 'created', 'modified')
search_fields = ('subject', 'content', )
readonly_fields = ('sent', 'approvers', 'markdown_content')
actions = ['approve', "send_email", "send_test_email"]
def get_approvers(self, obj):
return ", ".join([u.username for u in obj.approvers.all()])
get_approvers.short_description = "Approbateurs"
def is_sendable(self, obj):
return obj.approvers.count() >= settings.MINIMAL_MAIL_APPROVERS and not obj.sent
is_sendable.short_description = "Est envoyable"
def approve(self, request, queryset):
if not queryset.count() == 1:
self.message_user(request, message="Vous ne devez séléctionner qu'un email à approuver", level=messages.ERROR)
return
email = queryset.first()
email.approvers.add(request.user)
self.message_user(request, "L'email a été approuvé.")
approve.short_description = "Approu | ver cet email"
def send_email(self, request, queryset):
if not queryset.count() == 1:
self.message_user(request, message="Vous ne devez séléctionner qu'un emai | l à envoyer", level=messages.ERROR)
return
email = queryset.first()
if email.sent:
self.message_user(request, message="Cet email a déjà été envoyé", level=messages.ERROR)
return
if email.approvers.count() < settings.MINIMAL_MAIL_APPROVERS:
self.message_user(request, message="Ce message n'a pas assez d'approbateurs", level=messages.ERROR)
return
recipients = [u.email for u in User.objects.filter(newsletter=True)]
message = EmailMultiAlternatives(
subject=email.subject,
body=email.content,
from_email='Newsletter UrLab <contact@urlab.be>',
to=["UrLab <contact@urlab.be>"],
bcc=recipients,
)
message.attach_alternative(email.markdown_content(), "text/html")
message.send()
email.sent = True
email.save()
self.message_user(request, "L'email a été énvoyé.")
send_email.short_description = "Envoyer cet email A TOUT LE MONDE"
def send_test_email(self, request, queryset):
if not queryset.count() == 1:
self.message_user(request, message="Vous ne devez séléctionner qu'un email à envoyer", level=messages.ERROR)
return
email = queryset.first()
if email.sent:
self.message_user(request, message="Cet email a déjà été envoyé", level=messages.ERROR)
return
message = EmailMultiAlternatives(
subject=email.subject,
body=email.content,
from_email='Newsletter UrLab <contact@urlab.be>',
to=["contact-test@urlab.be"],
bcc=[request.user.email],
)
message.attach_alternative(email.markdown_content(), "text/html")
message.send()
self.message_user(request, "L'email a été énvoyé à votre adresse")
send_test_email.short_description = "Envoyer cet email A MOI UNIQUEMENT"
|
kjflyback/June-work | dailywork/forms.py | Python | apache-2.0 | 771 | 0.022049 | from flask_wtf import Form
from wtforms import TextField, Boole | anField, TextAreaField
from wtforms.validators import Req | uired
class LoginForm(Form):
SECRET_KEY = "xman"
openid = TextField('openid', validators = [Required()])
remember_me = BooleanField('remember_me', default = False)
class PostForm(Form):
clientname = TextField('clientname', validators = [Required()])
clienttype = TextField('clienttype')
clientinterface=TextField('clientinterface')
telephone = TextField('telephone')
phone= TextField('phone')
group=TextField('group')
comment = TextAreaField('comment')
class ClearForm():
clientname = ''
clienttype = ''
clientinterface=''
telephone = ''
phone= ''
group=''
comment = ''
|
mindnuts/proventeq-test-repo | src/python/ios_webview.py | Python | apache-2.0 | 1,607 | 0.001867 | """
Simple iOS WebView tests.
"""
import unittest
import os
from random import randint
from appium import webdriver
from time import sleep
from selenium.webdriver.common.keys import Keys
class WebVie | wIOSTests(unittest.TestCase):
def setUp(self):
# set up appium
app = os.path.join(os.path.dirname(__file__),
'../../apps/WebViewApp/build/Release-iphonesimulator',
'WebVie | wApp.app')
app = os.path.abspath(app)
self.driver = webdriver.Remote(
command_executor='http://127.0.0.1:4723/wd/hub',
desired_capabilities={
'app': app,
'deviceName': 'iPhone Simulator',
'platformName': 'iOS',
'platformVersion': '7.1'
})
def tearDown(self):
self.driver.quit()
def test_get_url(self):
url_el = self.driver.find_element_by_xpath('//UIAApplication[1]/UIAWindow[1]/UIATextField[1]')
url_el.send_keys('http://www.google.com')
go_el = self.driver.find_element_by_accessibility_id('Go')
go_el.click()
sleep(1)
self.driver.switch_to.context('WEBVIEW')
search = self.driver.find_element_by_name('q')
search.send_keys('sauce labs')
search.send_keys(Keys.RETURN)
# allow the page to load
sleep(1)
self.assertEquals('sauce labs - Google Search', self.driver.title)
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(WebViewIOSTests)
unittest.TextTestRunner(verbosity=2).run(suite)
|
romain-intel/bcc | tools/lib/ugc.py | Python | apache-2.0 | 7,598 | 0.001579 | #!/usr/bin/python
# @lint-avoid-python-3-compatibility-imports
#
# ugc Summarize garbage collection events in high-level languages.
# For Linux, uses BCC, eBPF.
#
# USAGE: ugc [-v] [-m] [-M MSEC] [-F FILTER] {java,python,ruby,node} pid
#
# Copyright 2016 Sasha Goldshtein
# Licensed under the Apache License, Version 2.0 (the "License")
#
# 19-Oct-2016 Sasha Goldshtein Created this.
from __future__ import print_function
import argparse
from bcc import BPF, USDT, utils
import ctypes as ct
import time
import os
languages = ["java", "python", "ruby", "node"]
| examples = """examples:
./ugc -l java 185 # trace Java GCs in process 185
./ugc -l ruby 1344 -m # trace Ruby GCs reporting in ms
./ugc -M 10 -l java 185 # trace only Java GCs longer than 10ms
"""
parser = argparse.ArgumentParser(
description="Summarize garbage collection events in high-level languages.",
formatter_class=argparse.RawDescriptionHelp | Formatter,
epilog=examples)
parser.add_argument("-l", "--language", choices=languages,
help="language to trace")
parser.add_argument("pid", type=int, help="process id to attach to")
parser.add_argument("-v", "--verbose", action="store_true",
help="verbose mode: print the BPF program (for debugging purposes)")
parser.add_argument("-m", "--milliseconds", action="store_true",
help="report times in milliseconds (default is microseconds)")
parser.add_argument("-M", "--minimum", type=int, default=0,
help="display only GCs longer than this many milliseconds")
parser.add_argument("-F", "--filter", type=str,
help="display only GCs whose description contains this text")
args = parser.parse_args()
usdt = USDT(pid=args.pid)
program = """
struct gc_event_t {
u64 probe_index;
u64 elapsed_ns;
u64 field1;
u64 field2;
u64 field3;
u64 field4;
char string1[32];
char string2[32];
};
struct entry_t {
u64 start_ns;
u64 field1;
u64 field2;
};
BPF_PERF_OUTPUT(gcs);
BPF_HASH(entry, u64, struct entry_t);
"""
class Probe(object):
def __init__(self, begin, end, begin_save, end_save, formatter):
self.begin = begin
self.end = end
self.begin_save = begin_save
self.end_save = end_save
self.formatter = formatter
def generate(self):
text = """
int trace_%s(struct pt_regs *ctx) {
u64 pid = bpf_get_current_pid_tgid();
struct entry_t e = {};
e.start_ns = bpf_ktime_get_ns();
%s
entry.update(&pid, &e);
return 0;
}
int trace_%s(struct pt_regs *ctx) {
u64 elapsed;
struct entry_t *e;
struct gc_event_t event = {};
u64 pid = bpf_get_current_pid_tgid();
e = entry.lookup(&pid);
if (!e) {
return 0; // missed the entry event on this thread
}
elapsed = bpf_ktime_get_ns() - e->start_ns;
if (elapsed < %d) {
return 0;
}
event.elapsed_ns = elapsed;
%s
gcs.perf_submit(ctx, &event, sizeof(event));
return 0;
}
""" % (self.begin, self.begin_save, self.end,
args.minimum * 1000000, self.end_save)
return text
def attach(self):
usdt.enable_probe_or_bail(self.begin, "trace_%s" % self.begin)
usdt.enable_probe_or_bail(self.end, "trace_%s" % self.end)
def format(self, data):
return self.formatter(data)
probes = []
language = args.language
if not language:
language = utils.detect_language(languages, args.pid)
#
# Java
#
if language == "java":
# Oddly, the gc__begin/gc__end probes don't really have any useful
# information, while the mem__pool* ones do. There's also a bunch of
# probes described in the hotspot_gc*.stp file which aren't there
# when looking at a live Java process.
begin_save = """
bpf_usdt_readarg(6, ctx, &e.field1); // used bytes
bpf_usdt_readarg(8, ctx, &e.field2); // max bytes
"""
end_save = """
event.field1 = e->field1; // used bytes at start
event.field2 = e->field2; // max bytes at start
bpf_usdt_readarg(6, ctx, &event.field3); // used bytes at end
bpf_usdt_readarg(8, ctx, &event.field4); // max bytes at end
u64 manager = 0, pool = 0;
bpf_usdt_readarg(1, ctx, &manager); // ptr to manager name
bpf_usdt_readarg(3, ctx, &pool); // ptr to pool name
bpf_probe_read(&event.string1, sizeof(event.string1), (void *)manager);
bpf_probe_read(&event.string2, sizeof(event.string2), (void *)pool);
"""
def formatter(e):
"%s %s used=%d->%d max=%d->%d" % \
(e.string1, e.string2, e.field1, e.field3, e.field2, e.field4)
probes.append(Probe("mem__pool__gc__begin", "mem__pool__gc__end",
begin_save, end_save, formatter))
probes.append(Probe("gc__begin", "gc__end",
"", "", lambda _: "no additional info available"))
#
# Python
#
elif language == "python":
begin_save = """
int gen = 0;
bpf_usdt_readarg(1, ctx, &gen);
e.field1 = gen;
"""
end_save = """
long objs = 0;
bpf_usdt_readarg(1, ctx, &objs);
event.field1 = e->field1;
event.field2 = objs;
"""
def formatter(event):
"gen %d GC collected %d objects" % \
(event.field1, event.field2)
probes.append(Probe("gc__start", "gc__done",
begin_save, end_save, formatter))
#
# Ruby
#
elif language == "ruby":
# Ruby GC probes do not have any additional information available.
probes.append(Probe("gc__mark__begin", "gc__mark__end",
"", "", lambda _: "GC mark stage"))
probes.append(Probe("gc__sweep__begin", "gc__sweep__end",
"", "", lambda _: "GC sweep stage"))
#
# Node
#
elif language == "node":
end_save = """
u32 gc_type = 0;
bpf_usdt_readarg(1, ctx, &gc_type);
event.field1 = gc_type;
"""
descs = {"GC scavenge": 1, "GC mark-sweep-compact": 2,
"GC incremental mark": 4, "GC weak callbacks": 8}
probes.append(Probe("gc__start", "gc__done", "", end_save,
lambda e: str.join(", ",
[desc for desc, val in descs.items()
if e.field1 & val != 0])))
else:
print("No language detected; use -l to trace a language.")
exit(1)
for probe in probes:
program += probe.generate()
probe.attach()
if args.verbose:
print(usdt.get_text())
print(program)
bpf = BPF(text=program, usdt_contexts=[usdt])
print("Tracing garbage collections in %s process %d... Ctrl-C to quit." %
(language, args.pid))
time_col = "TIME (ms)" if args.milliseconds else "TIME (us)"
print("%-8s %-8s %-40s" % ("START", time_col, "DESCRIPTION"))
class GCEvent(ct.Structure):
_fields_ = [
("probe_index", ct.c_ulonglong),
("elapsed_ns", ct.c_ulonglong),
("field1", ct.c_ulonglong),
("field2", ct.c_ulonglong),
("field3", ct.c_ulonglong),
("field4", ct.c_ulonglong),
("string1", ct.c_char * 32),
("string2", ct.c_char * 32)
]
start_ts = time.time()
def print_event(cpu, data, size):
event = ct.cast(data, ct.POINTER(GCEvent)).contents
elapsed = event.elapsed_ns / 1000000 if args.milliseconds else \
event.elapsed_ns / 1000
description = probes[event.probe_index].format(event)
if args.filter and args.filter not in description:
return
print("%-8.3f %-8.2f %s" % (time.time() - start_ts, elapsed, description))
bpf["gcs"].open_perf_buffer(print_event)
while 1:
bpf.kprobe_poll()
|
dials/dials | util/filter_reflections.py | Python | bsd-3-clause | 38,269 | 0.001934 | """
Methods for filtering reflection tables for bad data.
The filtering methods are combined into functions to perform the relevant
filtering on a reflection table(s), to produce a filtered reflection table
ready for export or further processing.
The set of classes defined in this module have filtering methods implemented as
classmethods/staticmethods, to allow easy use of individual methods. The
different classes are to handle filtering of different intensity types - profile,
scale, sum, profile + sum, etc. All functions and classmethods/staticmethods act on
a reflection table, returning a reflection table that is typically a new object,
due to the use of flex selections. Each filtering method raises a ValueError if
no reflections remain after filtering.
Functions:
- filter_reflection_table:
performs a full filtering algorithm for a given intensity ch | oice
- sum_partial_reflections:
combines matching partials, replacing them with a single combined value
filter_reflection_table takes in the following parameters: min_isigi=float,
filter_ice_rings=bool, combine_partials=bool, partiality_threshold=float,
intensity_choice=strings (passed in as a list e.g. ['sum', 'profile'])
Classes:
- FilteringReductionMethods:
a collection of staticmethods applicable to any kind of intensity values
- FilterForExportA | lgorithm:
defines a full, general filtering algorithm for a reflection table
- PrfIntensityReducer:
implements methods specific to filtering of profile fitted (prf) intensities
- SumIntensityReducer
implements methods specific to filtering of summation (sum) intensities
- SumAndPrfIntensityReducer
implements filtering methods when using prf intensities if present, else
sum (per reflection)
- ScaleIntensityReducer
implements filtering methods for intensities output from scaling
- AllSumPrfScaleIntensityReducer
implements filtering methods for using all of prf, sum and scale intensities
"""
from __future__ import annotations
import logging
from collections import defaultdict
from typing import Any, List, Type
from cctbx import crystal, miller
from dials.algorithms.scaling.outlier_rejection import reject_outliers
from dials.array_family import flex
from dials.util import tabulate
from dials.util.batch_handling import assign_batches_to_reflections
logger = logging.getLogger("dials")
class NoProfilesException(Exception):
"""Custom exception when no integrated_prf reflections found."""
pass
def filter_reflection_table_selection(
reflection_table, intensity_choice, *args, **kwargs
):
"""Return the selection mask for filtering."""
reflection_table["original_index"] = flex.size_t(range(0, reflection_table.size()))
sel = flex.bool(reflection_table.size(), False)
filtered_table = filter_reflection_table(
reflection_table, intensity_choice, *args, **kwargs
)
sel.set_selected(filtered_table["original_index"], True)
return sel
def filter_reflection_table(
reflection_table: flex.reflection_table,
intensity_choice: List[str],
*args: Any,
**kwargs: Any,
) -> flex.reflection_table:
"""Filter the data and delete unneeded intensity columns.
A list of which intensities to filter on e.g "sum", "scale", "profile" or
allowed combinations. If a combination is given, only those reflections
which have valid reflections for the multiple intensity types are retained.
Strict checks are made that the requested intensity choice(s) has the
required data in the reflection table.
Args:
reflection_table: a single reflection table object
intensity_choice[List]: a list of the which intensities to filter on
Returns:
A reflection table filtered based on the arguments (of reduced size
compared to the input table.)
Raises:
ValueError: if invalid intensity_choice given, if one step of filtering
causes no reflections to remain, if no profile reflections remain
after filtering and the choice is "profile".
"""
if not isinstance(intensity_choice, list):
raise ValueError("intensity_choice must be List[str]")
if intensity_choice == ["scale"]:
reducer: Type[FilterForExportAlgorithm] = ScaleIntensityReducer
elif intensity_choice == ["sum"]:
reducer = SumIntensityReducer
elif intensity_choice == ["profile"]:
reducer = PrfIntensityReducer
elif all(i in intensity_choice for i in ["sum", "scale", "profile"]):
reducer = AllSumPrfScaleIntensityReducer
elif all(i in intensity_choice for i in ["sum", "profile"]):
reducer = SumAndPrfIntensityReducer
elif all(i in intensity_choice for i in ["sum | profile"]):
reducer = SumORPrfIntensityReducer
elif all(i in intensity_choice for i in ["sum", "scale"]):
reducer = SumAndScaleIntensityReducer
else:
raise ValueError(
(
"Unrecognised intensity choice for filter_reflection_table,\n"
"value read: {}\n"
"must be one of: 'scale', 'profile', 'sum', 'profile sum', \n"
" 'sum scale', 'profile sum scale'\n"
"(if parsing from command line, multiple choices passed as e.g. profile+sum"
).format(intensity_choice)
)
# Validate that the reflection table has the columns we need
required_columns_lookup = {
"scale": {"inverse_scale_factor", "intensity.scale.value"},
"profile": {"intensity.prf.value"},
"sum": {"intensity.sum.value"},
}
for intensity_kind, required_columns in required_columns_lookup.items():
if intensity_kind in intensity_choice:
missing_columns = required_columns - set(reflection_table.keys())
if missing_columns:
raise ValueError(
"Cannot export intensity kind '{}'; missing column(s): {}".format(
intensity_kind, ", ".join(missing_columns)
)
)
# Do the filtering, but with an exception for the case of no profile fitted
# reflections - in this case, try to reprocess without profile fitted.
try:
reflection_table = reducer.filter_for_export(reflection_table, *args, **kwargs)
except NoProfilesException as e:
logger.warning(e, exc_info=True)
if "profile" in intensity_choice:
intensity_choice.remove("profile")
else:
intensity_choice = None
if intensity_choice:
logger.info(
"Attempting to reprocess with intensity choice: %s",
" + ".join(i for i in intensity_choice),
)
reflection_table = filter_reflection_table(
reflection_table, intensity_choice, *args, **kwargs
)
else:
raise ValueError(
"Unable to process data due to absence of profile fitted reflections"
)
return reflection_table
def filtered_arrays_from_experiments_reflections(
experiments,
reflections,
outlier_rejection_after_filter=False,
partiality_threshold=0.99,
return_batches=False,
):
"""Create a list of filtered arrays from experiments and reflections.
A partiality threshold can be set, and if outlier_rejection_after_filter
is True, and intensity.scale values are not present, then a round of
outlier rejection will take place.
Raises:
ValueError: if no datasets remain after filtering.
"""
miller_arrays = []
ids_to_del = []
if return_batches:
assert all(expt.scan is not None for expt in experiments)
batch_offsets = [expt.scan.get_batch_offset() for expt in experiments]
reflections = assign_batches_to_reflections(reflections, batch_offsets)
batch_arrays = []
for idx, (expt, refl) in enumerate(zip(experiments, reflections)):
crystal_symmetry = crystal.symmetry(
unit_cell=expt.crystal.get_unit_cell(),
space_group=expt.crystal.get_space_group(),
)
# want to use |
junneyang/taskflow | taskflow/tests/unit/worker_based/test_pipeline.py | Python | apache-2.0 | 3,812 | 0.000525 | # -*- coding: utf-8 -*-
# Copyright (C) 2014 Yahoo! Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import futurist
from futurist import waiters
from oslo_utils import uuidutils
from taskflow.engines.action_engine import executor as base_executor
from taskflow.engines.worker_based import endpoint
from taskflow.engines.worker_based import executor as worker_executor
from taskflow.engines.worker_based import server as worker_server
from taskflow import test
from taskflow.tests import utils as test_utils
from taskflow.types import failure
from taskflow.utils import threading_utils
TEST_EXCHANGE, TEST_TOPIC = ('test-exchange', 'test-topic')
WAIT_TIMEOUT = 1.0
POLLING_INTERVAL = 0.01
class TestPipeline(test.TestCase):
def _fetch_server(self, task_classes):
endpoints = []
for cls in task_classes:
endpoints.append(endpoint.Endpoint(cls))
server = worker_server.Server(
TEST_TOPIC, TEST_EXCHANGE,
futurist.ThreadPoolExecutor(max_workers=1), endpoints,
transport='memory',
transport_options={
'polling_interval': POLLING_INTERVAL,
})
server_thread = threading_utils.daemon_thread(server.start)
return (server, server_thread)
def _fetch_executor(self):
executor = worker_executor.WorkerTaskExecutor(
uuidutils.generate_uuid(),
TEST_EXCHANGE,
[TEST_TOPIC],
transport='memory',
transport_options={
'polling_interval': POLLING_INTERVAL,
})
return executor
def _start_components(self, t | ask_classes):
server, server_thread = self._fetch_server(task_classes)
executor = self._fetch_executor()
self.addCleanup(executor.stop)
self.addCleanup(server_thread.join)
self.addCleanup(server.stop)
executor.start()
server_thread.start()
server.wait()
return (executor, server)
def test_execution_pipeline(self):
executor, | server = self._start_components([test_utils.TaskOneReturn])
self.assertEqual(0, executor.wait_for_workers(timeout=WAIT_TIMEOUT))
t = test_utils.TaskOneReturn()
progress_callback = lambda *args, **kwargs: None
f = executor.execute_task(t, uuidutils.generate_uuid(), {},
progress_callback=progress_callback)
waiters.wait_for_any([f])
event, result = f.result()
self.assertEqual(1, result)
self.assertEqual(base_executor.EXECUTED, event)
def test_execution_failure_pipeline(self):
task_classes = [
test_utils.TaskWithFailure,
]
executor, server = self._start_components(task_classes)
t = test_utils.TaskWithFailure()
progress_callback = lambda *args, **kwargs: None
f = executor.execute_task(t, uuidutils.generate_uuid(), {},
progress_callback=progress_callback)
waiters.wait_for_any([f])
action, result = f.result()
self.assertIsInstance(result, failure.Failure)
self.assertEqual(RuntimeError, result.check(RuntimeError))
self.assertEqual(base_executor.EXECUTED, action)
|
kosior/taktyk | taktyk/entry.py | Python | mit | 2,324 | 0.001721 | import os
from inspect import signature
from . import db
from . import settings
class Entry:
def __init__(self, id_=None, author=None, date=None, body=None, body_html=None, url=None,
plus=None, media_url=None, tags=None, is_nsfw=None, entry_id=None, type_=None):
self.id_ = id_
self.author = author
self.date = date
self.body = body
self.body_html = body_html
self.url = url
self.plus = plus
self.media_url = media_url
self.tags = tags
self.is_nsfw = is_nsfw
self.entry_id = entry_id # only for comment
self.type_ = type_
def __iter__(self):
return self.attrs_gen()
def attrs_gen(self):
attrs = list(signature(self.__init__).parameters.keys()) # attributes from __init__()
return (getattr(self, attr) for attr in attrs[:11])
def __str__(self):
if self.entry_id:
return '{}_{}'.format(self.entry_id, self.id_)
return str(self.id_)
def download_info(self):
return {
'id_': self.__str__(),
'media_url': self.media_url,
'is_nsfw': self.is_nsfw,
'local_file_path': self.local_file_path,
}
@property
def comments_count(self):
if not self.entry_id: # if entry_id is not none it's a comment
return db.DB.count_comments(self.id_)
@property
def media_ext(self):
if self.media_url:
_, ext = os.path.splitext(self.media_url)
if len(ext) > 4 and '?' in ext: # fix for urls with '?'
ext = ext.split('?')[0]
elif not ext and 'gfycat.com' in self.media_url:
ext = '.webm'
return ext
else:
return None
@property
def local_file_path(self):
path = settings.FILES_DIR_NAME
ext = self.media_ext
if self.media_url and ext:
if self.is_nsfw:
path = os.path.join(path, settings.NSFW_DI | R_NAME)
if self.entry_id: # it's a comment
return os.path.join(path, settings.COMMENTS_DIR_NAME,
'{}_{}{}'.format(self.entry_id, self.id_, ext))
return os.pa | th.join(path, '{}{}'.format(self.id_, ext))
return ''
|
uwosh/uwosh.initiatives | uwosh/initiatives/browser/initiatives.py | Python | gpl-2.0 | 1,839 | 0.014138 | from Products.Five.browser import BrowserView
from Products.Five.browser.pagetemplatefile import ViewPageTemplateFile
from plone.app.layout.viewlets.common import ViewletBase
from Products.CMFCore.utils import getToolByName
from AccessControl import getSecurityManager
from zope.component import getMultiAdapter
from zope.app.component.hooks import getSite
class InitiativesViewlet(ViewletBase):
render = ViewPageTemplateFile('initiatives_viewlet.pt')
def update(self):
super(InitiativesViewlet, self).update()
context_state = getMultiAdapter((self.context, self.request), name=u'plone_context_state')
site = getSite()
props = getToolByName(self.context, 'portal_properties').uwosh_initiatives
if props.getProperty('only_show_in_site_root', True):
#must be site root
self.should_display = context_state.is_portal_root()
else:
sel | f.should_display = True
if self.should_display:
self.sitetitle = site.title
catalog = getToolByName(self.context, 'portal_catalog')
self.view_all_url = props.getProperty('view_all_url', None)
if self.view_all_url is None and 'initiatives' in site.objectIds():
self.view_all_url = site.absolute_url() + ' | /initiatives'
self.initiatives = catalog(
portal_type = 'Initiative',
review_state = 'published',
sort_on = 'getObjPositionInParent',
sort_order = 'ascending',
getShowInitiative='True'
)
if len(self.initiatives) == 0:
self.should_display = False
catalog.refreshCatalog()
|
rhelmer/socorro | socorro/app/socorro_app.py | Python | mpl-2.0 | 19,914 | 0.001506 | #! /usr/bin/env python
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
"""This module defines the class hierarchy for all Socorro applications.
The base of the hierarchy is "SocorroApp" which defines the interface and some
of the base methods.
Derived from the base "SocorroApp" is the "App" class. This class adds logging
configuration requirements to the application. App is the class from which
all the Socorro Apps derive.
Also derived from the base class "SocorroApp" is "SocorroWelcomeApp", an app
that serves as a dispatcher for all other Socorro apps. Rather than forcing
the user to know what and where all the other Socorro apps are, this app adds
an "application" config requirement as a commandline arguement. The user may
specify the app name that they want to run. Running --help on this app, will
also list all the Socorro Apps.
If a configuration file exists that includes a not-commented-out 'application'
parameter, it can be give directly to the "SocorroWelomeApp". In that case,
the "SocorroWelcomeApp" becomes the app requested in the config file.
"""
import logging
import logging.handlers
import functools
import signal
import os
import sys
import re
import threading
import socorro.app.for_application_defaults
from socorro.app.for_application_defaults import (
ApplicationDefaultsProxy,
)
from configman import (
ConfigurationManager,
Namespace,
RequiredConfig,
ConfigFileFutureProxy,
environment,
command_line,
)
from configman.converters import py_obj_to_str
#------------------------------------------------------------------------------
# every socorro app has a class method called 'get_application_defaults' from
# which configman extracts the preferred configuration default values.
#
# The Socorro App class hierachy will create a 'values_source_list' with the
# App's preferred config at the base. These become the defaults over which
# the configuration values from config file, environment, and command line are
# overlaid.
#
# In the case where the actual app is not specified until configman is already
# invoked, application defaults cannot be determined until configman
# has already started the overlay process. To resolve this
# chicken/egg problem, we create a ApplicationDefaultsProxy class that stands
# in values_source list (the list of places that overlay config values come
# from). Since the ApplicationDefaultsProxy also serves as the 'from_string'
# converter for the Application config option, it can know when the target
# application has been determined, fetch the defaults. Since the
# ApplicationDefaultsProxy object is already in the values source list, it can
# then start providing overlay values immediately.
# Configman knows nothing about how the ApplicationDefaultsProxy object works,
# so we must regisiter it as a new values overlay source class. We do that
# by manually inserting inserting the new class into Configman's
# handler/dispatcher. That object associates config sources with modules that
# are able to implement Configman's overlay handlers.
from configman.value_sources import type_handler_dispatch
# register our new type handler with configman
type_handler_dispatch[ApplicationDefaultsProxy].append(
socorro.app.for_application_defaults
)
#------------------------------------------------------------------------------
# create the app default proxy object
application_defaults_proxy = ApplicationDefaultsProxy()
#------------------------------------------------------------------------------
# for use with SIGHUP for apps that run as daemons
restart = True
#------------------------------------------------------------------------------
def respond_to_SIGHUP(signal_number, frame, logger=None):
"""raise the KeyboardInterrupt which will cause the app to effectively
shutdown, closing all it resources. Then, because it sets 'restart' to
True, the app will reread all the configuration information, rebuild all
of its structures and resources and start running again"""
global restart
restart = True
if logger:
logger.info('detected SIGHUP')
raise KeyboardInterrupt
#--------------------------------------------------------------------------
def klass_to_pypath(klass):
"""when a class is defined within the module that is being executed as
main, the module name will be specified as '__main__' even though the
module actually had its own real name. This ends up being very confusing
to Configman as it tries to refer to a class by its proper module name.
This function will convert a class into its properly qualified actual
pathname. This method is used when a Socorro app is actually invoked
directly through the file in which the App class is defined. This allows
configman to reimport the class under its proper name and treat it as if
it had been run through the SocorroWelcomeApp. In turn, this allows
the application defaults to be fetched from the properly imported class
in time for configman use that information as value source."""
if klass.__module__ == '__main__':
module_path = (
sys.modules['__main__']
.__file__[:-3]
)
module_name = ''
for a_python_path in sys.path:
tentative_pathname = module_path.replace(a_python_path, '')
if tentative_pathname != module_path:
module_name = (
tentative_pathname.replace('/', '.').strip('.')
)
break
if module_name == '':
return py_obj_to_str(klass)
else:
module_name = klass.__module__
return "%s.%s" % (module_name, klass.__name__)
#==============================================================================
class SocorroApp(RequiredConfig):
"""The base class for all Socorro applications"""
app_name = 'SocorroAppBaseClass'
app_version = "1.0"
app_description = 'base class for app system'
required_config = Namespace()
#--------------------------------------------------------------------------
def __init__(self, config):
self.config = config
#--------------------------------------------------------------------------
@staticmethod
def get_application_defaults():
"""this method allows an app to inject defaults into the configuration
that can override defaults not under the direct control of the app.
For example, if an app were to use a class that had a config default
of X and that was not appropriate as a default for this app, then
this method could be used to override that default"""
return {}
#--------------------------------------------------------------------------
def main(self): # pragma: no cover
"""derived classes must override this function with business logic"""
raise NotImplementedError(
"A definition of 'main' in a derived class is required"
)
#--------------------------------------------------------------------------
@classmethod
def run(klass, config_path=None, values_source_list=None):
global restart
restart = True
while restart:
# the SIGHUP hand | ler will change that back to True if it wants
# the app to restart and run again.
restart = False
app_exit_code = klass._do_run(
config_path=config_path,
values_source_list=values_source_list
)
r | eturn app_exit_code
#--------------------------------------------------------------------------
@classmethod
def _do_run(klass, config_path=None, values_source_list=None):
# while this method is defined here, only derived classes are allowed
# to call it.
if klass is SocorroApp:
raise NotImplementedError(
"The SocorroApp class has no useable 'main' method"
)
if config_path is None:
confi |
PyCQA/astroid | astroid/brain/brain_multiprocessing.py | Python | lgpl-2.1 | 3,516 | 0.000853 | # Copyright (c) 2016, 2018, 2020 Claudiu Popa <pcmanticore@gmail.com>
# Copyright (c) 2019 Hugo van Kemenade <hugovk@users.noreply.github.com>
# Copyright (c) 2020-2021 hippo91 <guillaume.peillex@gmail.com>
# Copyright (c) 2020 David Gilman <davidgilman1@gmail.com>
# Copyright (c) 2021 Pierre Sassoulas <pierre.sassoulas@gmail.com>
# Copyright (c) 2021 Marc Mueller <30130371+cdce8p@users.noreply.github.com>
# Licensed under the LGPL: https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html
# For details: https://github.com/PyCQA/astroid/blob/main/LICENSE
from astroid.bases import BoundMethod
from astroid.brain.helpers import register_module_extender
from astroid.builder import parse
from astroid.exceptions import InferenceError
from astroid.manager import AstroidManager
from astroid.nodes.scoped_nodes import FunctionDef
def _multiprocessing_transform():
module = parse(
"""
from multiprocessing.managers import SyncManager
def Manager():
return SyncManager()
"""
)
# Multiprocessing uses a getattr lookup inside contexts,
# in order to get the attributes they need. Since it's extremely
# dynamic, we use this approach to fake it.
node = parse(
"""
from multiprocessing.context import DefaultContext, BaseContext
default = DefaultContext()
base = BaseContext()
"""
)
try:
context = next(node["default"].infer())
base = next(node["base"].infer())
except (InferenceError, StopIteration):
return module
for node in (context, base):
for key, value in node.locals.items():
if key.startswith("_"):
continue
value = value[0]
if isinstance(value, FunctionDef):
# We need to rebound this, since otherwise
# it will have an extra argument (self).
value = BoundMethod(value, node)
module[key] = valu | e
return module
def _multiprocessing_managers_transform():
return parse(
"""
import array
import threading
import multiprocessing.pool as pool
import queue
class Namespace(object):
pass
class Value(object):
def __init__(self, typecode, value, lock=True):
self._typecode = typecode
self._value = value
def get(self):
return self._value
def set(self, value):
self._value = va | lue
def __repr__(self):
return '%s(%r, %r)'%(type(self).__name__, self._typecode, self._value)
value = property(get, set)
def Array(typecode, sequence, lock=True):
return array.array(typecode, sequence)
class SyncManager(object):
Queue = JoinableQueue = queue.Queue
Event = threading.Event
RLock = threading.RLock
BoundedSemaphore = threading.BoundedSemaphore
Condition = threading.Condition
Barrier = threading.Barrier
Pool = pool.Pool
list = list
dict = dict
Value = Value
Array = Array
Namespace = Namespace
__enter__ = lambda self: self
__exit__ = lambda *args: args
def start(self, initializer=None, initargs=None):
pass
def shutdown(self):
pass
"""
)
register_module_extender(
AstroidManager(), "multiprocessing.managers", _multiprocessing_managers_transform
)
register_module_extender(
AstroidManager(), "multiprocessing", _multiprocessing_transform
)
|
Jamlum/pytomo | pytomo/dns/rdtypes/ANY/X25.py | Python | gpl-2.0 | 2,219 | 0.008562 | # Copyright (C) 2003-2007, 2009, 2010 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS | ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
from __future__ import absolute_import
from . import exception as dns_exception
from . import rdata as dns_rdata
from . impor | t tokenizer as dns_tokenizer
class X25(dns_rdata.Rdata):
"""X25 record
@ivar address: the PSDN address
@type address: string
@see: RFC 1183"""
__slots__ = ['address']
def __init__(self, rdclass, rdtype, address):
super(X25, self).__init__(rdclass, rdtype)
self.address = address
def to_text(self, origin=None, relativize=True, **kw):
return '"%s"' % dns_rdata._escapify(self.address)
def from_text(cls, rdclass, rdtype, tok, origin = None, relativize = True):
address = tok.get_string()
tok.get_eol()
return cls(rdclass, rdtype, address)
from_text = classmethod(from_text)
def to_wire(self, file, compress = None, origin = None):
l = len(self.address)
assert l < 256
byte = chr(l)
file.write(byte)
file.write(self.address)
def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin = None):
l = ord(wire[current])
current += 1
rdlen -= 1
if l != rdlen:
raise dns_exception.FormError
address = wire[current : current + l]
return cls(rdclass, rdtype, address)
from_wire = classmethod(from_wire)
def _cmp(self, other):
return cmp(self.address, other.address)
|
Haabb/pwnfork | pwn/process.py | Python | mit | 1,834 | 0.004362 | import pwn
from basechatter import basechatter
class process(basechatter):
def __init__(self, cmd, *args, **kwargs):
env = kwargs.get('env', {})
timeout = kwargs.get('timeout', 'default')
silent = kwargs.get('silent', False)
basechatter.__init__(self, timeout, silent)
self.proc = None
self.stdout = None
self.start(cmd, args, env)
def start(self, cmd, args, env):
import subprocess, fcntl, os
if self.connected():
pwn.log.warning('Program "%s" already started' % cmd)
return
if not self.silent:
pwn.log.waitfor('Starting program "%s"' % cmd)
self.proc = subprocess.Popen(
tuple(cmd.split()) + args,
stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
env = env,
bufsize = 0)
fd = self.proc.stdout.fileno()
fl = fcntl.fcntl(fd, fcntl.F_GETFL)
fcntl.fcntl(fd, fcntl.F_SETFL, fl | os.O_NONBLOCK)
self.stdout = fd
if not self.silent:
| pwn.log.succeeded()
def connected(self):
return self.proc != None
def close(self):
if self.proc:
self.proc.kill()
self.proc = None
def _send(self, dat):
self.proc.stdin.write(dat)
self.proc.stdin.flush()
def _recv(self, numb):
import time
end_time = time.time() + self.timeout
while True:
r = ''
try:
r = self.proc.stdout.read(numb)
except IOError as e:
if e.errno != 11:
raise
if r or time.time() > end_time:
break
time.sleep(0.0001)
return r
def fileno(self):
return self.stdout
| |
arne-cl/turian-parser | scripts/treebank-processing/modules/postags.py | Python | gpl-2.0 | 873 | 0.054983 | #
# postags.py
#
# List of function POS tags and content POS tags
#
# $Id: postags.py 1657 2006-06-04 03:03:05Z turian $
#
#######################################################################
# Copyright (c) 2004-2006, New York University. All rights reserved
################################################################## | #####
# Function POS tags
function = {
"AUX": 1,
"AUXG": 1,
"CC": 1,
"DT": 1,
"EX": 1,
"IN": 1,
"MD": 1,
"PDT": 1,
"POS": 1,
"PRP": 1,
"PRP$": 1,
"RP": 1,
"TO": 1,
"WDT": 1,
"WP": 1,
"WP$": 1,
"WRB": 1,
"#": 1,
"$": 1,
".": 1,
",": 1,
":": 1,
"''": 1,
"``": 1,
"-LRB-": 1,
"-RRB-": 1,
"-NONE-": 1,
}
# Content POS tags
content = {
"CD": 1,
"FW": 1,
"JJ": 1,
"JJR": 1,
"JJS": 1,
"LS": 1,
"NN": 1,
"NNS": 1,
"NNP": | 1,
"NNPS": 1,
"RB": 1,
"RBR": 1,
"RBS": 1,
"SYM": 1,
"UH": 1,
"VB": 1,
"VBD": 1,
"VBG": 1,
"VBN": 1,
"VBP": 1,
"VBZ": 1,
}
|
aspaas/ion | test/functional/reindex.py | Python | mit | 1,525 | 0.003934 | #!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distribute | d under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test running iond with -reindex and -reindex-chainstate options.
- | Start a single node and generate 3 blocks.
- Stop the node and restart it with -reindex. Verify that the node has reindexed up to block 3.
- Stop the node and restart it with -reindex-chainstate. Verify that the node has reindexed up to block 3.
"""
from test_framework.test_framework import IonTestFramework
from test_framework.util import assert_equal
import time
class ReindexTest(IonTestFramework):
def __init__(self):
super().__init__()
self.setup_clean_chain = True
self.num_nodes = 1
def reindex(self, justchainstate=False):
self.nodes[0].generate(3)
blockcount = self.nodes[0].getblockcount()
self.stop_nodes()
extra_args = [["-reindex-chainstate" if justchainstate else "-reindex", "-checkblockindex=1"]]
self.nodes = self.start_nodes(self.num_nodes, self.options.tmpdir, extra_args)
while self.nodes[0].getblockcount() < blockcount:
time.sleep(0.1)
assert_equal(self.nodes[0].getblockcount(), blockcount)
self.log.info("Success")
def run_test(self):
self.reindex(False)
self.reindex(True)
self.reindex(False)
self.reindex(True)
if __name__ == '__main__':
ReindexTest().main()
|
mikoim/japanization | reviews/urls.py | Python | mit | 834 | 0.004796 | from django.conf.urls import include, url
from django.views.decorators.cache import cache_page as cp
from django.views | .generic import TemplateView
from rest_framework.routers import DefaultRouter
from .v | iews import ReviewViewSet, ReviewView
router = DefaultRouter()
router.register(r'reviews', ReviewViewSet)
urlpatterns = [
url(r'^$', cp(60 * 5)(ReviewView.as_view(template_name='reviews/index_list.html')), name='reviews-index'),
url(r'^api/', include(router.urls), name='reviews-api'),
url(r'^manual$', cp(60 * 60)(ReviewView.as_view(template_name='reviews/manual.html')), name='reviews-manual'),
url(r'^sample$', cp(60 * 5)(ReviewView.as_view(template_name='reviews/sample_list.html')), name='reviews-sample'),
url(r'^edit$', TemplateView.as_view(template_name='reviews/edit.html'), name='reviews-edit'),
]
|
Azure/azure-sdk-for-python | sdk/cognitiveservices/azure-cognitiveservices-knowledge-qnamaker/azure/cognitiveservices/knowledge/qnamaker/models/endpoint_settings_dto_active_learning.py | Python | mit | 946 | 0 | # coding=utf-8
# ----------------------------------------------------------------- | ---------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# ------------------- | -------------------------------------------------------
from .active_learning_settings_dto import ActiveLearningSettingsDTO
class EndpointSettingsDTOActiveLearning(ActiveLearningSettingsDTO):
"""Active Learning settings of the endpoint.
:param enable: True/False string providing Active Learning
:type enable: str
"""
_attribute_map = {
'enable': {'key': 'enable', 'type': 'str'},
}
def __init__(self, **kwargs):
super(EndpointSettingsDTOActiveLearning, self).__init__(**kwargs)
|
tox-dev/tox | tests/unit/config/test_config.py | Python | mit | 123,326 | 0.000649 | # coding=utf-8
import os
import re
import sys
from textwrap import dedent
import py
import pytest
from pluggy import PluginManager
from six import PY2
from virtualenv.info import IS_PYPY
import tox
from tox.config import (
CommandParser,
DepOption,
PosargsOption,
SectionReader,
get_homedir,
get_version_info,
getcontextname,
is_section_substitution,
parseconfig,
)
from tox.config.parallel import ENV_VAR_KEY_PRIVATE as PARALLEL_ENV_VAR_KEY_PRIVATE
from tox.config.parallel import ENV_VAR_KEY_PUBLIC as PARALLEL_ENV_VAR_KEY_PUBLIC
class TestVenvConfig:
def test_config_parsing_minimal(self, tmpdir, newconfig):
config = newconfig(
[],
"""
[testenv:py1]
""",
)
assert len(config.envconfigs) == 1
assert config.toxworkdir.realpath() == tmpdir.join(".tox").realpath()
assert config.envconfigs["py1"].basepython == sys.executable
assert config.envconfigs["py1"].deps == []
assert config.envconfigs["py1"].platform == ".*"
def test_config_parsing_multienv(self, tmpdir, newconfig):
config = newconfig(
[],
"""
[tox]
toxworkdir = {}
indexserver =
xyz = xyz_repo
[testenv:py1]
deps=hello
[testenv:py2]
deps=
world1
:xyz:http://hello/world
""".format(
tmpdir,
),
)
assert config.toxworkdir == tmpdir
assert len(config.envconfigs) == 2
assert config.envconfigs["py1"].envdir == tmpdir.join("py1")
dep = config.envconfigs["py1"].deps[0]
assert dep.name == "hello"
assert dep.indexserver is None
assert config.envconfigs["py2"].envdir == tmpdir.join("py2")
dep1, dep2 = config.envconfigs["py2"].deps
assert dep1.name == "world1"
assert dep2.name == "http://hello/world"
assert dep2.indexserver.name == "xyz"
assert dep2.indexserver.url == "xyz_repo"
def test_envdir_set_manually(self, tmpdir, newconfig):
config = newconfig(
[],
"""
[testenv:dev]
envdir = dev
""",
)
envconfig = config.envconfigs["dev"]
assert envconfig.envdir == tmpdir.join("dev")
def test_envdir_set_manually_with_substitutions(self, newconfig):
config = newconfig(
[],
"""
[testenv:dev]
envdir = {toxworkdir}/foobar
""",
)
envconfig = config.envconfigs["dev"]
assert envconfig.envdir == config.toxworkdir.join("foobar")
def test_envdir_set_manually_setup_cfg(self, tmpdir, newconfig):
config = newconfig(
[],
"""
[tox:tox]
envlist = py36,py37
[testenv]
envdir = dev
[testenv:py36]
envdir = dev36
""",
filename="setup.cfg",
)
envconfig = config.envconfigs["py36"]
assert envconfig.envdir == tmpdir.join("dev36")
envconfig = config.envconfigs["py37"]
assert envconfig.envdir == tmpdir.join("dev")
def test_force_dep_version(self, initproj):
"""
Make sure we can override dependencies configured in tox.ini when using the command line
option --force-dep.
"""
initproj(
"example123-0.5",
filedefs={
"tox.ini": """
[tox]
[testenv]
deps=
dep1==1.0
dep2>=2.0
dep3
dep4==4.0
""",
},
)
config = parseconfig(
["--force-dep=dep1==1.5", "--force-dep=dep2==2.1", "--force-dep=dep3==3.0"],
)
assert config.option.force_dep == ["dep1==1.5", "dep2==2.1", "dep3==3.0"]
expected_deps = ["dep1==1.5", "dep2==2.1", "dep3==3.0", "dep4==4.0"]
assert expected_deps == [str(x) for x in config.envconfigs["python"].deps]
def test_force_dep_with_url(self, initproj):
initproj(
"example123-0.5",
filedefs={
"tox.ini": """
[tox]
[testenv]
deps=
dep1==1.0
https://pypi.org/xyz/pkg1.tar.gz
""",
},
)
config = parseconfig(["--force-dep=dep1==1.5"])
assert config.option.force_dep == ["dep1==1.5"]
expected_deps = ["dep1==1.5", "https://pypi.org/xyz/pkg1.tar.gz"]
assert [str(x) for x in config.envconfigs["python"].deps] == expected_deps
def test_process_deps(self, newconfig):
config = newconfig(
[],
"""
[testenv]
deps =
-r requirements.txt
yapf>=0.25.0,<0.27 # pyup: < 0.27 # disable updates
--index-url https://pypi.org/simple
pywin32 >=1.0 ; sys_platform == '#my-magic-platform' # so what now
-fhttps://pypi.org/packages
--global-option=foo
-v dep1
--help dep2
""",
) # note that those last two are invalid
expected_deps = [
"-rrequirements.txt",
"yapf>=0.25.0,<0.27",
"--index-url=https://pypi.org/simple",
"pywin32 >=1.0 ; sys_platform == '#my-magic-platform'",
"-fhttps://pypi.org/packages",
"--global-option=foo",
"-v dep1",
"--help dep2",
]
assert [str(x) for x in config.envconfigs["python"].deps] == expected_deps
def test_is_same_dep(self):
"""
Ensure correct parseini._is_same_dep is working with a few samples.
"""
assert DepOption._is_same_dep("pkg_hello-world3==1.0", "pkg_hello-world3")
assert DepOption._is_same_dep("pkg_hello-world3==1.0", "pkg_hello-world3>=2.0")
assert DepOption._is_same_dep("pkg_hello-world3==1.0", "pkg_hello-world3>2.0")
assert DepOption._is_same_dep("pkg_hello-world3==1.0", "pkg_hello-world3<2.0")
assert DepOption._is_same_dep("pkg_hello-world3==1.0", "pkg_hello-world3<=2.0")
assert not DepOption._is_same_dep("pkg_hello-world3==1.0", "otherpkg>=2.0")
def test_suicide_interrupt_terminate_timeout_set_manually(self, newconfig):
config = newconfig(
[],
"""
[testenv:dev]
suicide_timeout = 30.0
interrupt_timeout = 5.0
terminate_timeout = 10.0
[testenv:other]
""",
)
envconfig = config.envconfigs["other"]
assert 0.0 == envconfig.suicide_timeout
assert 0.3 == | envconfig.interrupt_timeout
assert 0.2 == envconfig.terminate_timeout
envconfig = config.envconfigs["dev"]
assert 30.0 == envconfig.suicide_timeout
assert 5.0 == envconfig.interrupt_timeout
assert 10.0 == envconfig.terminate_time | out
class TestConfigPlatform:
def test_config_parse_platform(self, newconfig):
config = newconfig(
[],
"""
[testenv:py1]
platform = linux2
""",
)
assert len(config.envconfigs) == 1
assert config.envconfigs["py1"].platform == "linux2"
def test_config_parse_platform_rex(self, newconfig, mocksession, monkeypatch):
config = newconfig(
[],
"""
[testenv:py1]
platform = a123|b123
""",
)
mocksession.config = config
assert len(config.envconfigs) == 1
venv = mocksession.getvenv("py1")
assert not venv.matching_platform()
monkeypatch.setattr(sys, "platform", "a123")
assert venv.matching_platform()
monkeypatch.setattr(sys, "platform", "b123")
assert venv.matching_platform()
monkeypatch.undo()
assert not venv.matching_platform()
@pytest.mark.parametrize("plat", ["win", "lin", "osx"])
def test_config_parse_platfo |
stevearc/python-pike | pike/nodes/source.py | Python | mit | 1,294 | 0 | """ Nodes that read files. """
from .base import Node
from pike.items import FileMeta
from pike.util import recursive_glob, resource_spec
class SourceNode(Node | ):
"""
Base class for source nodes.
Source nodes are nodes that read files from disk and inject them into a
graph.
| """
name = 'source'
def __init__(self, root):
super(SourceNode, self).__init__()
self.root = resource_spec(root)
def process(self):
return [FileMeta(filename, self.root) for filename in self.files()]
def files(self):
"""
Return a list of all filenames for this source node (relative to
self.root)
"""
raise NotImplementedError
class GlobNode(SourceNode):
"""
Source node that creates a stream of files via glob matching.
The parameters are the same as :meth:`~pike.util.recursive_glob`
"""
name = 'glob_source'
def __init__(self, root, patterns, prefix=''):
super(GlobNode, self).__init__(root)
self.patterns = patterns
self.prefix = prefix
prefix_arg = ', %r' % prefix if prefix else ''
self.name = 'glob(%r, %r%s)' % (root, patterns, prefix_arg)
def files(self):
return recursive_glob(self.root, self.patterns, self.prefix)
|
diogocs1/comps | web/addons/pad/res_company.py | Python | apache-2.0 | 423 | 0.007092 | # -*- coding: utf-8 -*-
from openerp.osv import fields, osv
cl | ass company_pad(osv.osv):
_inherit = 'res.company'
_columns = {
'pad_server': fields.char('Pad Server', help="Etherpad lite server. Example: beta.primarypad.com"),
'pad_key': fields.char('Pad Api Key', help="Etherpad li | te api key.", groups="base.group_system"),
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
hdemers/webapp-template | webapp/publish.py | Python | mit | 1,031 | 0 | import gevent
from cloudly.pubsub import RedisWebSocket
from cloudly.tweets import Tweets, StreamManager, keep
from cloudly import logger
from webapp import config
log = logger.init(__name__)
pubsub = RedisWebSocket(config.pubsub_channel)
pubsub.spawn()
running = False
def processor(tweets):
pubsub.publish(keep(['coordinates'], tweets), "tweets")
return len(tweets)
def run():
log.info("Starting Twitter stream manager.")
| streamer = StreamManager('locate', processor, is_queuing=False)
tweets = Tweets()
streamer.run(tweets.with_coordinates(), stop)
log.info("Twitter stream manager has stopped.")
def start():
global running
if not running:
running = True
gevent.spawn(run)
def subscribe(we | bsocket):
log.info("Subscribed a new websocket client.")
pubsub.register(websocket)
def stop():
global running
if len(pubsub.websockets) == 0:
log.info("Stopping Twitter stream manager.")
running = False
return True
return False
|
nefarioustim/parker | test/test_fileops.py | Python | gpl-3.0 | 1,419 | 0 | # -*- coding: utf-8 -*-
"""Test the file operations."""
import os
import parker.fileops
TEST_FILE_PATH = "/tmp/test/file/path"
TEST_FILE = "/tmp/t | est.log"
TEST_S | TRING = "This is a string."
TEST_DICT = {
"this": "dict"
}
TEST_DICT_LINE = '{"this": "dict"}'
TEST_CHUNK_STRING = 'MAGICUNICORNS'
EXPECTED_CHUNK_PATH = 'MAG/ICU/NIC/ORN/S'
def test_create_dirs_actually_creates_dirs():
"""Test fileops.create_dirs actually creates all dirs in path."""
parker.fileops.create_dirs(TEST_FILE_PATH)
assert os.path.isdir(TEST_FILE_PATH)
os.removedirs(TEST_FILE_PATH)
def test_dump_string_to_file_dumps_to_file():
"""Test string is output, and get_line_from_file loads it back in."""
parker.fileops.dump_string_to_file(TEST_STRING, TEST_FILE)
line = parker.fileops.get_line_from_file(TEST_FILE).next()
assert line == TEST_STRING
os.remove(TEST_FILE)
def test_dump_dict_to_file():
"""Test dict is output, and get_line_from_file loads it back in."""
parker.fileops.dump_dict_to_file(TEST_DICT, TEST_FILE)
line = parker.fileops.get_line_from_file(TEST_FILE).next()
assert line == TEST_DICT_LINE
os.remove(TEST_FILE)
def test_get_chunk_path_from_string():
"""Test fileops.get_chunk_path_from_string returns the expected path."""
path = parker.fileops.get_chunk_path_from_string(
TEST_CHUNK_STRING
)
assert path == EXPECTED_CHUNK_PATH
|
PaddlePaddle/Paddle | python/paddle/fluid/tests/unittests/test_executor_check_feed.py | Python | apache-2.0 | 3,210 | 0 | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
import numpy
import paddle.fluid.core as core
import paddle.fluid as fluid
class TestExecutor(unittest.TestCase):
def net(self):
lr = fluid.data(name="lr", shape=[1], dtype='float32')
x = fluid.data(name="x", shape=[None, 1], dtype='float32')
y = fluid.data(name="y", shape=[None, 1], dtype='float32')
y_predict = fluid.layers.fc(inpu | t=x, size=1, act=None)
cost = fluid.layers.square_error_cost(input=y_predict, label=y)
| avg_cost = fluid.layers.mean(cost)
opt = fluid.optimizer.Adam(learning_rate=lr)
opt.minimize(avg_cost)
return lr, avg_cost
def test_program_check_feed(self):
main_program = fluid.Program()
startup_program = fluid.Program()
scope = fluid.Scope()
with fluid.program_guard(main_program, startup_program):
with fluid.scope_guard(scope):
cpu = fluid.CPUPlace()
exe = fluid.Executor(cpu)
lr, cost = self.net()
exe.run(startup_program)
train_data = [[1.0], [2.0], [3.0], [4.0]]
y_true = [[2.0], [4.0], [6.0], [8.0]]
a = 0
with self.assertRaises(ValueError):
exe.run(feed={'x': train_data,
'lr': a},
fetch_list=[lr, cost],
return_numpy=False,
use_prune=True)
def test_compiled_program_check_feed(self):
main_program = fluid.Program()
startup_program = fluid.Program()
scope = fluid.Scope()
with fluid.program_guard(main_program, startup_program):
with fluid.scope_guard(scope):
cpu = fluid.CPUPlace()
exe = fluid.Executor(cpu)
lr, cost = self.net()
exe.run(startup_program)
compiled_prog = fluid.CompiledProgram(
main_program).with_data_parallel(loss_name=cost.name)
train_data = [[1.0], [2.0], [3.0], [4.0]]
y_true = [[2.0], [4.0], [6.0], [8.0]]
a = 0
with self.assertRaises(ValueError):
exe.run(compiled_prog,
feed={'x': train_data,
'lr': a},
fetch_list=[lr, cost],
return_numpy=False,
use_prune=True)
if __name__ == '__main__':
unittest.main()
|
danielvdao/facebookMacBot | venv/lib/python2.7/site-packages/sleekxmpp/plugins/base.py | Python | mit | 12,142 | 0 | # -*- encoding: utf-8 -*-
"""
sleekxmpp.plugins.base
~~~~~~~~~~~~~~~~~~~~~~
This module provides XMPP functionality that
is specific to client connections.
Part of SleekXMPP: The Sleek XMPP Library
:copyright: (c) 2012 Nathanael C. Fritz
:license: MIT, see LICENSE for more details
"""
import sys
import copy
import logging
import threading
if sys.version_info >= (3, 0):
unicode = str
log = logging.getLogger(__name__)
#: Associate short string names of plugins with implementations. The
#: plugin names are based on the spec used by the plugin, such as
#: `'xep_0030'` for a plugin that implements XEP-0030.
PLUGIN_REGISTRY = {}
#: In order to do cascading plugin disabling, reverse dependencies
#: must be tracked.
PLUGIN_DEPENDENTS = {}
#: Only allow one thread to manipulate the plugin registry at a time.
REGISTRY_LOCK = threading.RLock()
class PluginNotFound(Exception):
"""Raised if an unknown plugin is accessed."""
def register_plugin(impl, name=None):
"""Add a new plugin implementation to the registry.
:param class impl: The plugin class.
The implementation class must provide a :attr:`~BasePlugin.name`
value that will be used as a short name for enabling and disabling
the plugin. The name should be based on the specification used by
the plugin. For example, a plugin implementing XEP-0030 would be
named `'xep_0030'`.
"""
if name is None:
name = impl.name
with REGISTRY_LOCK:
PLUGIN_REGISTRY[name] = impl
if name not in PLUGIN_DEPENDENTS:
PLUGIN_DEPENDENTS[name] = set()
for dep in impl.dependencies:
if dep not in PLUGIN_DEPENDENTS:
PLUGIN_DEPENDENTS[dep] = set()
PLUGIN_DEPENDENTS[dep].add(name)
def load_plugin(name, module=None):
"""Find and import a plugin module so that it can be registered.
This function is called to import plugins that have selected for
enabling, but no matching registered plugin has been found.
:param str name: The name of the plugin. It is expected that
plugins are in packages matching their name,
even though the plugin class name does not
have to match.
:param str module: The name of the base module to search
for the plugin.
"""
try:
if not module:
try:
module = 'sleekxmpp.plugins.%s' % name
__import__(module)
mod = sys.modules[module]
except ImportError:
module = 'sleekxmpp.features.%s' % name
__import__(module)
mod = sys.modules[module]
elif isinstance(module, (str, unicode)):
__import__(module)
mod = sys.modules[module]
else:
mod = module
# Add older style plugins to the registry.
if hasattr(mod, name):
plugin = getattr(mod, name)
if hasattr(plugin, 'xep') or hasattr(plugin, 'rfc'):
plugin.name = name
# Mark the plugin as an older style plugin so
# we can work around dependency issues.
plugin.old_style = True
register_plugin(plugin, name)
except ImportError:
log.exception("Unable to load plugin: %s", name)
class PluginManager(object):
def __init__(self, xmpp, config=None):
#: We will track all enabled plugins in a set so that we
#: can enable plugins in batches and pull in dependencies
#: without problems.
self._enabled = set()
#: Maintain references to active plugins.
self._plugins = {}
self._plugin_lock = threading.RLock()
#: Globally set default plugin configuration. This will
#: be used for plugins that are auto-enabled through
#: dependency loading.
self.config = config if config else {}
self.xmpp = xmpp
def register(self, plugin, enable=True):
"""Register a new plugin, and optionally enable it.
:param class plugin: The implementation class of the plugin
to register.
:param bool enable: If ``True``, immediately enable the
plugin after registration.
"""
register_plugin(plugin)
if enable:
self.enable(plugin.name)
def enable(self, name, config=None, enabled=None):
"""Enable a plugin, including any dependencies.
:param string name: The short name of the plugin.
:param dict config: Optional settings dictionary for
configuring plugin behaviour.
"""
top_level = False
if enabled is None:
enabled = set()
with self._plugin_lock:
if name not in self._enabled:
enabled.add(name)
self._enabled.add(name)
if not self.registered(name):
load_plugin(name)
plugin_class = PLUGIN_REGISTRY.get(name, None)
if not plugin_class:
raise PluginNotFound(name)
if config is None:
config = self.config.get(name, None)
plugin = plugin_class(self.xmpp, config)
self._plugins[name] = plugin
for dep in plugin.dependencies:
self.enable(dep, enabled=enabled)
plugin._init()
if top_level:
for name in enabled:
if hasattr(self.plugins[name], 'old_style'):
# Older style plugins require post_init()
# to run just before stream processing begins,
# so we don't call it here.
pass
self.plugins[name].post_init()
def enable_all(self, names=None, config=None):
"""Enable all registered plugins.
:param list names: A list of plugin names to enable. If
none are provided, all registered plugins
will be enabled.
:param dict config: A dictionary mapping plugin names to
configuration dictionaries, as used by
:meth:`~PluginManager.enable`.
"""
names = names if names else PLUGIN_REGISTRY.keys()
if config is None:
config = {}
for name in names:
self.enable(name, config.get(name, {}))
def enabled(self, name):
"""Check if a plugin has been enabled.
:param string name: The name of the plugin to check.
:return: boolean
"""
return name in self._enabled
def registered(self, name):
"""Check if a plugin has been registered.
:param string name: The name of the plugin to check.
:return: boolean
"""
return name in PLUGIN_REGISTRY
def disable(self, name, _disabled=None):
"""Disable a plugin, including any dependent upon it.
:param string name: The name of the plugin to disable.
:param set _disabled: Private set used to track the
disabled status of plugins during
the cascading process.
"""
if _disabled is None:
_disabled = s | et()
with self._plugin_lock:
if name not in _disabled and name in self._enabled:
_disabled.add(name)
plugin = self._plugins.get(name, None)
if plugin is None:
raise PluginNotFound(name)
for dep in PLUGIN_DEPENDENTS[name]:
| self.disable(dep, _disabled)
plugin._end()
if name in self._enabled:
self._enabled.remove(name)
del self._plugins[name]
def __keys__(self):
"""Return the set of enabled plugins."""
return self._plugins.keys()
def __getitem__(self, name):
"""
Allow plugins to be accessed through the manager as if
it were |
vadim-ex/subcommand | cmd-lib/cmdutil/utils.py | Python | mit | 3,029 | 0.00066 | #!/usr/bin/env python3
import pathlib
import subprocess
import sys
def _exec(command, check):
"""
execute the `command`.
If `check` is True, the execution end if git root not found.
Otherwise `None` is returned.
"""
complete = subprocess.run(command, stdout=subprocess.PIPE, encoding="utf-8")
if complete.returncode == 0:
return complete.stdout[:-1]
elif check:
sys.exit(complete.returncode)
else:
return None
def git_path(check=True):
"""
locate git's root directory
If `check` is True, the execution end if git root not found.
Otherwise `None` is returned.
"""
path = _exec("git rev-parse --show-toplevel".split(), check)
return pathlib.Path(path) if path else path
def git_ref(check=True):
"""
return name for current branch / tag
If `check` is True, the execution end if git root not found.
Otherwise `None` is returned.
"""
branch = _exec("git rev-parse --abbrev-ref HEAD".split(), check)
if branch != "HEAD":
return branch
tag_ref = _exec("git describe --all".split(), check)
return tag_ref[5:] if tag_ref.startswith("tags/") else "HEAD"
def git_sha(check=True):
"""
return sha of current commit
If `check` is True, the execution end if git root not found.
Otherwise `None` is returned.
"""
return _exec("git rev-parse HEAD".split(), check)
def git_dirty(check=True):
"""
returns dirty status of git
"""
command = "git diff-index --quiet HEAD --".split()
complete = subprocess.run(command, stdout=subprocess.PIPE, encoding="utf-8")
return_code = complete.returncode
if return_code == 0:
return False
elif return_code == 1:
return True
elif check:
sys.exit(complete.returncode)
else:
return None
def file_location(file_name, check=True):
"""
locate ancestor directory containing specified `file_name`
If `check` is True, the execution end if git root not found.
Otherwise `None` is returned.
"""
current = pathlib.Path.cwd()
while not (current / file_name).is_file():
if l | en(current.parts) == 1:
break
current = current.parent
if (current / file_name).is_file():
return current
elif check:
print(f"expected file `{file_name}` not found")
sys.exit(4)
else:
return None
def project_location(file_name, check=True):
"""
Locate a minimal directory containing specified `file_name` |
If `check` is True, the execution end if git root not found.
Otherwise `None` is returned.
"""
current = pathlib.Path.cwd()
projects = current.glob("**/" + file_name)
while not list(projects) and len(current.parts) > 1:
current = current.parent
projects = current.glob("**/" + file_name)
if projects:
return current
elif check:
print(f"expected file `{file_name}` not found")
sys.exit(4)
else:
return None
|
jcsp/manila | manila/db/api.py | Python | apache-2.0 | 34,750 | 0 | # Copyright (c) 2011 X.commerce, a business unit of eBay Inc.
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Defines interface for DB access.
The underlying driver is loaded as a :class:`LazyPluggable`.
Functions in this module are imported into the manila.db namespace. Call these
functions from manila.db namespace, not the manila.db.api namespace.
All functions in this module return objects that implement a dictionary-like
interface. Currently, many of these objects are sqlalchemy objects that
implement a dictionary interface. However, a future goal is to have all of
these objects be simple dictionaries.
**Related Flags**
:backend: string to lookup in the list of LazyPluggable backends.
`sqlalchemy` is the only supported backend right now.
:connection: string specifying the sqlalchemy connection to use, like:
`sqlite:///var/lib/manila/manila.sqlite`.
:enable_new_services: when adding a new service to the database, is it in the
pool of available hardware (Default: True)
"""
from oslo_config import cfg
from oslo_db import api as db_api
db_opts = [
cfg.StrOpt('db_backend',
default='sqlalchemy',
help='The backend to use for database.'),
cfg.BoolOpt('enable_new_services',
default=True,
help='Services to be added to the available pool on create.'),
cfg.StrOpt('share_name_template',
default='share-%s',
help='Template string to be used to generate share names.'),
cfg.StrOpt('share_snapshot_name_template',
default='share-snapshot-%s',
help='Template string to be used to generate share snapshot '
'names.'),
]
CONF = cfg.CONF
CONF.register_opts(db_opts)
_BACKEND_MAPPING = {'sqlalchemy': 'manila.db.sqlalchemy.api'}
IMPL = db_api.DBAPI.from_config(cfg.CONF, backend_mapping=_BACKEND_MAPPING,
lazy=True)
###################
def service_destroy(context, service_id):
"""Destroy the service or raise if it does not exist."""
return IMPL.service_destroy(context, service_id)
def service_get(context, service_id):
"""Get a service or raise if it does not exist."""
return IMPL.service_get(context, service_id)
def service_get_by_host_and_topic(context, host, topic):
"""Get a service by host it's on and topic it listens to."""
return IMPL.service_get_by_host_and_topic(context, host, topic)
def service_get_all(context, disabled=None):
"""Get all services."""
return IMPL.service_get_all(context, disabled)
def service_get_all_by_topic(context, topic):
"""Get all services for a given topic."""
return IMPL.service_get_all_by_topic(context, topic)
def service_get_all_by_host(context, host):
"""Get all services for a given host."""
return IMPL.service_get_all_by_host(context, host)
def service_get_all_share_sorted(context):
"""Get all share services sorted by share count.
:returns: a list of (Service, share_count) tuples.
"""
return IMPL.service_get_all_share_sorted(context)
def service_get_by_args(context, host, binary):
"""Get the state of an service by node name and binary."""
return IMPL.service_get_by_args(context, host, binary)
def service_create(context, values):
"""Create a service from the values dictionary."""
return IMPL.service_create(context, values)
def service_update(context, service_id, values):
"""Set the given properties on an service and update it.
Raises NotFound if service does not exist.
"""
return IMPL.service_update(context, service_id, values)
####################
def quota_create(context, project_id, resource, limit, user_id=None):
"""Create a quota for the given project and resource."""
return IMPL.quota_create(context, project_id, resource, limit,
user_id=user_id)
def quota_get(context, project_id, resource, user_id=None):
"""Retrieve a quota or raise if it does not exist."""
return IMPL.quota_get(context, project_id, resource, user_id=user_id)
def quota_get_all_by_project_and_user(context, project_id, user_id):
"""Retrieve all quotas associated with a given project and user."""
return IMPL.quota_get_all_by_project_and_user(context, project_id, user_id)
def quota_get_all_by_project(context, project_id):
"""Retrieve all quotas associated with a given project."""
return IMPL.quota_get_all_by_project(context, project_id)
def quota_get_all(context, project_id):
"""Retrieve all user quotas associated with a given project."""
return IMPL.quota_get_all(context, project_id)
def quota_update(context, project_id, resource, limit, user_id=None):
"""Update a quota or raise if it does not exist."""
retur | n IMPL.quota_update(context, project_id, resource, limit,
user_id=user_id)
###################
def quota_class_create(context, class_name, resource, limit):
"""Create a quota class for the given name and resource."""
return IMPL.quota_class_create(context, class_name, resource, limit)
def quota_class_get(context, class_name, resource):
"""Retrieve a qu | ota class or raise if it does not exist."""
return IMPL.quota_class_get(context, class_name, resource)
def quota_class_get_default(context):
"""Retrieve all default quotas."""
return IMPL.quota_class_get_default(context)
def quota_class_get_all_by_name(context, class_name):
"""Retrieve all quotas associated with a given quota class."""
return IMPL.quota_class_get_all_by_name(context, class_name)
def quota_class_update(context, class_name, resource, limit):
"""Update a quota class or raise if it does not exist."""
return IMPL.quota_class_update(context, class_name, resource, limit)
###################
def quota_usage_get(context, project_id, resource, user_id=None):
"""Retrieve a quota usage or raise if it does not exist."""
return IMPL.quota_usage_get(context, project_id, resource, user_id=user_id)
def quota_usage_get_all_by_project_and_user(context, project_id, user_id):
"""Retrieve all usage associated with a given resource."""
return IMPL.quota_usage_get_all_by_project_and_user(context,
project_id, user_id)
def quota_usage_get_all_by_project(context, project_id):
"""Retrieve all usage associated with a given resource."""
return IMPL.quota_usage_get_all_by_project(context, project_id)
def quota_usage_create(context, project_id, user_id, resource, in_use,
reserved=0, until_refresh=None):
"""Create a quota usage."""
return IMPL.quota_usage_create(context, project_id, user_id, resource,
in_use, reserved, until_refresh)
def quota_usage_update(context, project_id, user_id, resource, **kwargs):
"""Update a quota usage or raise if it does not exist."""
return IMPL.quota_usage_update(context, project_id, user_id, resource,
**kwargs)
###################
def reservation_create(context, uuid, usage, project_id, user_id, resource,
delta, expire):
"""Create a reservation for the given project and resource."""
return IMPL.reservation_create(context, uuid, usage, project_id,
user_id, resource, delta, expire)
def reservation_get(con |
nprapps/visits | etc/gdocs.py | Python | mit | 3,436 | 0.003492 | #!/usr/bin/env python
from exceptions import KeyError
import os
import requests
class GoogleDoc(object):
"""
A class for accessing a Google document as an object.
Includes the bits necessary for accessing the document and auth and such.
For example:
doc = {
"key": "123456abcdef",
"file_name": "my_google_doc",
"gid": "2"
}
g = GoogleDoc(**doc)
g.get_auth()
g.get_document()
Will download your google doc to data/file_name.format.
"""
# You can update these values with kwargs.
# In fact, you better pass a key or else it won't work!
key = None
file_format = 'xlsx'
file_name = 'copy'
gid = '0'
# You can change these with kwargs but it's not recommended.
spreadsheet_url = 'https://spreadsheets.google.com/feed | s/download/spreadsheets/Export?key=%(key)s&exportFormat=%(format)s&gid=%(gid)s'
new_spreadsheet_url = 'https://docs.google.com/spreadsheets/d/%(key)s/export?format=%(format)s&id=%(key)s | &gid=%(gid)s'
auth = None
email = os.environ.get('APPS_GOOGLE_EMAIL', None)
password = os.environ.get('APPS_GOOGLE_PASS', None)
scope = "https://spreadsheets.google.com/feeds/"
service = "wise"
session = "1"
def __init__(self, **kwargs):
"""
Because sometimes, just sometimes, you need to update the class when you instantiate it.
In this case, we need, minimally, a document key.
"""
if kwargs:
if kwargs.items():
for key, value in kwargs.items():
setattr(self, key, value)
def get_auth(self):
"""
Gets an authorization token and adds it to the class.
"""
data = {}
if not self.email or not self.password:
raise KeyError("Error! You're missing some variables. You need to export APPS_GOOGLE_EMAIL and APPS_GOOGLE_PASS.")
else:
data['Email'] = self.email
data['Passwd'] = self.password
data['scope'] = self.scope
data['service'] = self.service
data['session'] = self.session
r = requests.post("https://www.google.com/accounts/ClientLogin", data=data)
self.auth = r.content.split('\n')[2].split('Auth=')[1]
def get_document(self):
"""
Uses the authentication token to fetch a google doc.
"""
# Handle basically all the things that can go wrong.
if not self.auth:
raise KeyError("Error! You didn't get an auth token. Something very bad happened. File a bug?")
elif not self.key:
raise KeyError("Error! You forgot to pass a key to the class.")
else:
headers = {}
headers['Authorization'] = "GoogleLogin auth=%s" % self.auth
url_params = { 'key': self.key, 'format': self.file_format, 'gid': self.gid }
url = self.spreadsheet_url % url_params
r = requests.get(url, headers=headers)
if r.status_code != 200:
url = self.new_spreadsheet_url % url_params
r = requests.get(url, headers=headers)
if r.status_code != 200:
raise KeyError("Error! Your Google Doc does not exist.")
with open('data/%s.%s' % (self.file_name, self.file_format), 'wb') as writefile:
writefile.write(r.content)
|
hezral/Rogu | reference/menubutton.py | Python | gpl-3.0 | 750 | 0.005333 | #!/usr/bin/env python3
import gi
gi.require_version('Gtk', '3.0')
from gi.repository import Gtk
class MenuButton(Gtk.Window):
def __init__(self):
Gtk.Window.__init__(self)
self.connect("destroy", Gtk.main_quit)
menubutton = Gtk.MenuButton("MenuButton")
self.add(menubutton)
menu = Gtk.Menu()
menubutton.set_popup(menu)
for count in range(1, 6):
menuitem = Gtk.MenuItem("Item %i" % (count))
menuitem.connect("activate", self.on_menuitem_activated)
menu.append(menuitem)
menu.show_all()
def on_menuitem_activated(self, menuitem):
print("%s Activated" % (menuitem.get_label()))
wi | ndow = MenuButton()
window.show_all()
Gt | k.main() |
sassoftware/rbuild | rbuild/productstore/__init__.py | Python | apache-2.0 | 861 | 0 | #
# Copyright (c) SAS Institute Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy o | f the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language go | verning permissions and
# limitations under the License.
#
"""
Implementations of product stores for different implementations.
- C{abstract}: product store base class
- C{dirstore}: directory-based checkout as created by the C{rbuild init}
command
-C{decorators}: decorators for functions that work with the product store
"""
|
Yegor-Budnikov/cort | cort/core/external_data.py | Python | mit | 4,384 | 0.000228 | """ Read in and access data from external resources such as gender lists."""
import os
import pickle
import cort
from cort.core import singletons
from cort.core import util
__author__ = 'smartschat'
@singletons.Singleton
class GenderData:
""" Read in and access data from lists with gender information.
Attributes:
word_to_gender (dict(str, str)): A mapping from lower-case strings
to one of four genders: 'MALE', 'FEMALE', 'NEUTRAL' and 'PLURAL'.
"""
def __init__(self):
""" Initialize the word-to-gender mapping from gender lists.
"""
self.word_to_gender = {}
directory = cort.__path__[0] + "/resources/"
lists = [
open(directory + "male.list"),
open(directory + "female.list"),
open(directory + "neutral.list"),
open(directory + "plural.list")
]
genders = ["MALE", "FEMALE", "NEUTRAL", "PLURAL"]
for gender, gender_list in zip(genders, lists):
for word in gender_list.readlines():
self.word_to_gender[word.strip()] = gender
def look_up(self, attributes):
""" Look up the gender of a mention described by the input attributes.
Args:
attributes (dict(str,object)): A dict describing attributes of
mentions. Must contain "tokens" and "head", which have lists
of strings as values.
Returns:
(str): None or one of the four genders 'MALE', 'FEMALE',
'NEUTRAL' or 'PLURAL'.
"""
# whole string
if " ".join(attributes["tokens"]).lower() in self.word_to_gender:
return self.word_to_gender[" ".join(attributes["tokens"]).lower()]
# head
elif " ".join(attributes["head"]).lower() in self.word_to_gender:
return self.word_to_gender[" ".join(attributes["head"]).lower()]
# head token by token
elif self.__look_up_token_by_token(attributes["head"]):
return self.__look_up_token_by_token(attributes["head"])
def __look_up_token_by_token(self, tokens):
for token in tokens:
if token[0].isupper() and token.lower() in self.word_to_gender:
return self.word_to_gender[token.lower()]
@singletons.Singleton
class LexicalData:
""" Read in and access data containing pairs of coreferent mention strings.
Attributes:
pairs (set((str, str))): A set of string pairs, which represent strings
of potentially coreferent mentions.
"""
def __init__(self):
""" Initialize the set of pairs from
package_root/resources/coreferent_pairs.obj.
"""
directory = cort.__path__[0] + "/resources/"
self.pairs = pickle.load(
open(directory + "coreferent_pairs.obj", "rb"))
def look_up(self, anaphor, antecedent):
""" Look up strings of the mentions in the pair list.
Args:
anaphor (Mention): A mention.
antecedent (Mention): Another mention, the candidate antecedent
for anaphor.
Returns:
True if the pair of strings corresponding to anaphor of
antecedent, stripped determiners and possessive s, can be found
in the list of pairs.
"""
# whole string
anaphor_cleaned = " ".join(
util.clean_via_pos(anaphor.attributes["tokens"],
anaphor.attributes["pos"]))
antecedent_cleaned = " ".join(
util.clean_via_pos(antec | edent.attributes["tokens"],
antecedent.attributes["pos"]))
return (
(anaphor_cleaned, antecedent_cleaned) in self.pairs
or (antecedent_cleaned, anaphor_cleaned) in self.pairs
)
@singletons.Singleton
class SingletonMentions:
""" Read in and access data strings of singleton mentions.
Attributes:
singletons (set(str)): A set of strings, which represent strings of
| of potential singleton mentions.
"""
def __init__(self):
""" Initialize the set of pairs from
package_root/resources/singletons_not_cleaned.obj.
"""
directory = cort.__path__[0] + "/resources/"
self.singletons = pickle.load(
open(directory + "singletons_not_cleaned.obj", "rb"))
|
elkingtowa/azove | azove/packet.py | Python | mit | 9,338 | 0 | import logging
import rlp
from utils import big_endian_to_int as idec
from utils import int_to_big_endian4 as ienc4
from utils import int_to_big_endian as ienc
from utils import recursive_int_to_big_endian
import dispatch
import sys
import signals
logger = logging.getLogger(__name__)
def lrlp_decode(data):
"always return a list"
d = rlp.decode(data)
if isinstance(d, str):
d = [d]
return d
def load_packet(packet):
return Packeter.load_packet(packet)
class Packeter(object):
"""
Translates between the network and the local data
https://github.com/ethereum/wiki/wiki/%5BEnglish%5D-Wire-Protocol
stateless!
.. note::
#. Can only be used after the `config` method is called
'''
"""
cmd_map = dict(((0x00, 'Hello'),
(0x01, 'Disconnect'),
(0x02, 'Ping'),
(0x03, 'Pong'),
(0x10, 'GetPeers'),
(0x11, 'Peers'),
(0x12, 'Transactions'),
(0x13, 'Blocks'),
(0x14, 'GetChain'),
(0x15, 'NotInChain'),
(0x16, 'GetTransactions')))
cmd_map_by_name = dict((v, k) for k, v in cmd_map.items())
disconnect_reasons_map = dict((
('Disconnect requested', 0x00),
('TCP sub-system error', 0x01),
('Bad protocol', 0x02),
('Useless peer', 0x03),
('Too many peers', 0x04),
('Already connected', 0x05),
('Wrong genesis block', 0x06),
('Incompatible network protocols', 0x07),
('Client quitting', 0x08)))
disconnect_reasons_map_by_id = \
dict((v, k) for k, v in disconnect_reasons_map.items())
SYNCHRONIZATION_TOKEN = 0x22400891
PROTOCOL_VERSION = 19
# is the node s Unique Identifier and is the 512-bit hash that serves to
# identify the node.
NETWORK_ID = 0
CLIENT_ID = 'Ethereum(py)/0.5.2/%s/Protocol:%d' % (sys.platform,
PROTOCOL_VERSION)
CAPABILITIES = 0x01 + 0x02 + 0x04 # node discovery + transaction relaying
def __init__(self):
pass
def configure(self, config):
self.config = config
self.CLIENT_ID = self.config.get('network', 'client_id') \
or self.CLIENT_ID
self.NODE_ID = self.config.get('network', 'node_id')
@classmethod
def load_packet(cls, packet):
'''
Though TCP provides a connection-oriented medium, Ethereum nodes
communicate in terms of packets. These packets are formed as a 4-byte
synchronisation token (0x22400891), a 4-byte "payload size", to be
interpreted as a big-endian integer and finally an N-byte
RLP-serialised data structure, where N is the aforementioned
"payload size". To be clear, the payload size specifies the number of
bytes in the packet ''following'' the first 8.
:return: (success, result), where result should be None when fail,
and (header, payload_len, cmd, data) when success
'''
header = idec(packet[:4])
if header != cls.SYNCHRONIZATION_TOKEN:
return False, 'check header failed, skipping message,'\
'sync token was hex: {0:x}'.format(header)
try:
payload_len = idec(packet[4:8])
except Exception as e:
return False, str(e)
if len(packet) < payload_len + 8:
return False, 'Packet is broken'
try:
payload = lrlp_decode(packet[8:8 + payload_len])
except Exception as e:
return False, str(e)
if (not len(payload)) or (idec(payload[0]) not in cls.cmd_map):
return False, 'check cmd failed'
cmd = Packeter.cmd_map.get(idec(payload[0]))
remain = packet[8 + payload_len:]
return True, (header, payload_len, cmd, payload[1:], remain)
def load_cmd(self, packet):
success, res = self.load_packet(packet)
if not success:
raise Exception(res)
_, _, cmd, data, remain = res
return cmd, data, remain
@classmethod
def dump_packet(cls, data):
"""
4-byte synchronisation token, (0x22400891),
a 4-byte "payload size", to be interpreted as a big-endian integer
an N-byte RLP-serialised data structure
"""
payload = rlp.encode(recursive_int_to_big_endian(data))
packet = ienc4(cls.SYNCHRONIZATION_TOKEN)
packet += ienc4(len(payload))
packet += payload
return packet
def dump_Hello(self):
# inconsistency here!
# spec says CAPABILITIES, LISTEN_PORT but code reverses
"""
[0x00, PROTOCOL_VERSION, NETWORK_ID, CLIENT_ID, CAPABILITIES,
LISTEN_PORT, NODE_ID]
First packet sent over the connection, and sent once by both sides.
No other messages may be sent until a Hello is received.
PROTOCOL_VERSION is one of:
0x00 for PoC-1;
0x01 for PoC-2;
0x07 for PoC-3.
0x08 sent by Ethereum(++)/v0.3.11/brew/Darwin/unknown
NETWORK_ID should be 0.
CLIENT_ID Specifies the client software identity, as a human-readable
string (e.g. "Ethereum(++)/1.0.0").
LISTEN_PORT specifies the port that the client is listening on
(on the interface that the present connection traverses).
If 0 it indicates the client is not listening.
CAPABILITIES specifies the capabilities of the client as a set of
flags; presently three bits are used:
0x01 for peers discovery,
0x02 for transaction relaying,
0x04 for block-chain querying.
NODE_ID is optional and specifies a 512-bit hash, (potentially to be
used as public key) that identifies this node.
"""
data = [self.cmd_map_by_name['Hello'],
self.PROTOCOL_VERSION,
self.NETWORK_ID,
self.CLIENT_ID,
self.config.getint('network', 'listen_port'),
self.CAPABILITIES,
self.NODE_ID
]
return self.dump_packet(data)
def dump_Ping(self):
data = [self.cmd_map_by_name['Ping']]
return self.dump_packet(data)
def dump_Pong(self):
data = [self.cmd_map_by_name['Pong']]
return self.dump_packet(data)
def dump_Disconnect(self, reason=None):
data = [self.cmd_map_by_name['Disconnect']]
if reason:
data.append(self.disconnect_reasons_map[reason])
return self.dump_packet(data)
def dump_GetPeers(self):
data = [self.cmd_map_by_name['GetPeers']]
return self.dump_packet(data)
def dump_Peers(self, peers):
'''
:param peers: a sequence of (ip, port, pid)
:return: None if no peers
'''
data = [self.cmd_map_by_name['Peers']]
for ip, port, pid in p | eers:
assert ip.count('.') == 3
ip = ''.join(chr(int(x)) for x in ip.split('.'))
data.append([ip, port, pid])
return self.dump_packet(data)
def dump_Transactions(self, transactions):
data = [self.cmd_map_by_name['Transactions']] + transactions
return self.dump_packet(data)
def dump_GetTransactions(self):
"""
| [0x12, [nonce, receiving_address, value, ... ], ... ]
Specify (a) transaction(s) that the peer should make sure is included
on its transaction queue. The items in the list (following the first
item 0x12) are transactions in the format described in the main
Ethereum specification.
"""
data = [self.cmd_map_by_name['GetTransactions']]
return self.dump_packet(data)
def dump_Blocks(self, blocks):
blocks_as_lists = [rlp.decode(b.serialize()) for b in blocks]
# FIXME, can we have a method to append rlp encoded data
data = [self.cmd_map_by_name['Blocks']] + blocks_as_lists
return self.dump_packet(data)
def dump_GetChain(self, parent_hashes=[], count=1):
" |
lkundrak/scraperwiki | web/codewiki/tests/models.py | Python | agpl-3.0 | 322 | 0.006211 | from django.test import TestCase
from codewiki.models impo | rt Scraper
from django.contrib.auth.models import User
class Test__unicode__(TestCase):
def test_scraper_name(self):
s | elf.assertEqual(
'test_scraper',
unicode(Scraper(title='Test Scraper', short_name='test_scraper'))
) |
Artemkaaas/indy-sdk | docs/how-tos/issue-credential/python/step4.py | Python | apache-2.0 | 2,869 | 0.00488 | # 14.
print_log('\n14. Issuer (Trust Anchor) is creating a Credential Offer for Prover\n')
cred_offer_json = await anoncreds.issuer_create_credential_offer(issuer_wallet_handle,
cred_def_id)
print_log('Credential Offer: ')
pprint.pprint(json.loads(cred_offer_json))
# 15.
print_log('\n15. Prover creates Credential Request for the given credential offer\n')
(cred_req_json, cred_req_metadata_json) = \
await anoncreds.prover_create_credential_req(prover_wallet_handle,
prover_did,
cred_offer_json,
cred_def_json,
prover_link_secret_name)
print_log('Credential Request: ')
pprint.pprint(json.loads(cred_req_json))
# 16.
print_log('\n16. Issuer (Trust Anchor) creates Credential for Credential Request\n')
cred_values_json = json.dumps({
"sex": {"raw": "male", "encoded": "5944657099558967239210949258394887428692050081607692519917050011144233"},
"name": {"raw": "Alex", "encoded": "1139481716457488690172217916278103335"},
"height": {"raw": "175", "encoded": "175"},
"age": {"raw": "28", "encoded": "28"}
})
(cred_json, _, _) = \
await anoncreds.issuer_create_credential(issuer_wallet_handle,
cred_offer_json,
cred_req_json,
cred_values_json, None, None)
print_log('Credential: ')
pprint.pprint(json.loads(cred_json))
# 17.
print_log('\n17. Prover processes and stores received Credential\n')
await anoncreds.prover_store_credential(prover_wallet_handle, None,
cred_req_metadata_json,
cred_json,
cred_def_json, None)
# 18.
print_log('\n18. Closing both wallet_handles and pool\n')
await wallet.close_wallet(issuer_wallet_handle)
await wallet.close_wallet(prover_wallet_handle)
await pool.close_pool_ledger(pool_handle)
# 19.
print_log('\n19. Deleting created wallet_handles | \n')
await wallet.delete_wallet(issuer_wallet_config, issuer_wallet_credentials)
await wallet.delete_wallet(prover_w | allet_config, prover_wallet_credentials)
# 20.
print_log('\n20. Deleting pool ledger config\n')
await pool.delete_pool_ledger_config(pool_name) |
arevaloarboled/Clases_2015 | topics/threads/Examples/python/basic.py | Python | gpl-2.0 | 203 | 0.014778 | import threading
def worker | ():
"""thre | ad worker function"""
print 'Worker'
return
threads = []
for i in range(5):
t = threading.Thread(target=worker)
threads.append(t)
t.start() |
MockyJoke/numbers | ex3/code/calc_distance_hint.py | Python | mit | 1,089 | 0.00551 | def output_gpx(points, output_filename):
"""
Output a GPX file with latitude and longitude from the points DataFrame.
"""
from xml.dom.minidom import getDOMImplementation
def append_trkpt(pt, trkseg, doc):
trkpt = doc.createElement('trkpt')
tr | kpt.setAttribute('lat', '%.8f' % (pt['lat']))
trkpt.setAttribute('lon', '%.8f' % (pt['lon']))
trkseg.appendChild(trkpt)
doc = getDOMImplementation().createDocument(None, 'gpx', None)
trk = doc.createElement('trk')
doc.documentElement.appendChild(trk)
trkseg = doc.createElement('trkseg')
trk.appendChild(trkseg)
points.apply(append_trkpt, axis=1, trkseg=trkseg, doc=do | c)
with open(output_filename, 'w') as fh:
doc.writexml(fh, indent=' ')
def main():
points = get_data(sys.argv[1])
print('Unfiltered distance: %0.2f' % (distance(points),))
smoothed_points = smooth(points)
print('Filtered distance: %0.2f' % (distance(smoothed_points),))
output_gpx(smoothed_points, 'out.gpx')
if __name__ == '__main__':
main() |
stripe/stripe-python | tests/test_integration.py | Python | mit | 9,930 | 0 | from __future__ import absolute_import, division, print_function
import platform
import sys
from threading import Thread, Lock
import json
import warnings
import time
import stripe
import pytest
if platform.python_implementa | tion() == "PyPy":
pytest.skip("skip integration tests with PyPy", allow_module_level=True)
if sys.version_info[0] < 3:
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
else:
from http.server import BaseHTTPRequestHandler, HTTPServer
class TestIntegration(object):
@pytest.fixture(autouse=True)
def close_mock_server(self):
yield
if self.mock_server:
| self.mock_server.shutdown()
self.mock_server.server_close()
self.mock_server_thread.join()
@pytest.fixture(autouse=True)
def setup_stripe(self):
orig_attrs = {
"api_base": stripe.api_base,
"api_key": stripe.api_key,
"default_http_client": stripe.default_http_client,
"enable_telemetry": stripe.enable_telemetry,
"max_network_retries": stripe.max_network_retries,
"proxy": stripe.proxy,
}
stripe.api_base = "http://localhost:12111" # stripe-mock
stripe.api_key = "sk_test_123"
stripe.default_http_client = None
stripe.enable_telemetry = False
stripe.max_network_retries = 3
stripe.proxy = None
yield
stripe.api_base = orig_attrs["api_base"]
stripe.api_key = orig_attrs["api_key"]
stripe.default_http_client = orig_attrs["default_http_client"]
stripe.enable_telemetry = orig_attrs["enable_telemetry"]
stripe.max_network_retries = orig_attrs["max_network_retries"]
stripe.proxy = orig_attrs["proxy"]
def setup_mock_server(self, handler):
# Configure mock server.
# Passing 0 as the port will cause a random free port to be chosen.
self.mock_server = HTTPServer(("localhost", 0), handler)
_, self.mock_server_port = self.mock_server.server_address
# Start running mock server in a separate thread.
# Daemon threads automatically shut down when the main process exits.
self.mock_server_thread = Thread(target=self.mock_server.serve_forever)
self.mock_server_thread.setDaemon(True)
self.mock_server_thread.start()
def test_hits_api_base(self):
class MockServerRequestHandler(BaseHTTPRequestHandler):
num_requests = 0
def do_GET(self):
self.__class__.num_requests += 1
self.send_response(200)
self.send_header(
"Content-Type", "application/json; charset=utf-8"
)
self.end_headers()
self.wfile.write(json.dumps({}).encode("utf-8"))
return
self.setup_mock_server(MockServerRequestHandler)
stripe.api_base = "http://localhost:%s" % self.mock_server_port
stripe.Balance.retrieve()
assert MockServerRequestHandler.num_requests == 1
def test_hits_proxy_through_default_http_client(self):
class MockServerRequestHandler(BaseHTTPRequestHandler):
num_requests = 0
def do_GET(self):
self.__class__.num_requests += 1
self.send_response(200)
self.send_header(
"Content-Type", "application/json; charset=utf-8"
)
self.end_headers()
self.wfile.write(json.dumps({}).encode("utf-8"))
return
self.setup_mock_server(MockServerRequestHandler)
stripe.proxy = "http://localhost:%s" % self.mock_server_port
stripe.Balance.retrieve()
assert MockServerRequestHandler.num_requests == 1
stripe.proxy = "http://bad-url"
with warnings.catch_warnings(record=True) as w:
stripe.Balance.retrieve()
assert len(w) == 1
assert "stripe.proxy was updated after sending a request" in str(
w[0].message
)
assert MockServerRequestHandler.num_requests == 2
def test_hits_proxy_through_custom_client(self):
class MockServerRequestHandler(BaseHTTPRequestHandler):
num_requests = 0
def do_GET(self):
self.__class__.num_requests += 1
self.send_response(200)
self.send_header(
"Content-Type", "application/json; charset=utf-8"
)
self.end_headers()
self.wfile.write(json.dumps({}).encode("utf-8"))
return
self.setup_mock_server(MockServerRequestHandler)
stripe.default_http_client = (
stripe.http_client.new_default_http_client(
proxy="http://localhost:%s" % self.mock_server_port
)
)
stripe.Balance.retrieve()
assert MockServerRequestHandler.num_requests == 1
def test_passes_client_telemetry_when_enabled(self):
class MockServerRequestHandler(BaseHTTPRequestHandler):
num_requests = 0
def do_GET(self):
try:
self.__class__.num_requests += 1
req_num = self.__class__.num_requests
if req_num == 1:
time.sleep(31 / 1000) # 31 ms
assert not self.headers.get(
"X-Stripe-Client-Telemetry"
)
elif req_num == 2:
assert self.headers.get("X-Stripe-Client-Telemetry")
telemetry = json.loads(
self.headers.get("x-stripe-client-telemetry")
)
assert "last_request_metrics" in telemetry
req_id = telemetry["last_request_metrics"][
"request_id"
]
duration_ms = telemetry["last_request_metrics"][
"request_duration_ms"
]
assert req_id == "req_1"
# The first request took 31 ms, so the client perceived
# latency shouldn't be outside this range.
assert 30 < duration_ms < 300
else:
assert False, (
"Should not have reached request %d" % req_num
)
self.send_response(200)
self.send_header(
"Content-Type", "application/json; charset=utf-8"
)
self.send_header("Request-Id", "req_%d" % req_num)
self.end_headers()
self.wfile.write(json.dumps({}).encode("utf-8"))
except AssertionError as ex:
# Throwing assertions on the server side causes a
# connection error to be logged instead of an assertion
# failure. Instead, we return the assertion failure as
# json so it can be logged as a StripeError.
self.send_response(400)
self.send_header(
"Content-Type", "application/json; charset=utf-8"
)
self.end_headers()
self.wfile.write(
json.dumps(
{
"error": {
"type": "invalid_request_error",
"message": str(ex),
}
}
).encode("utf-8")
)
self.setup_mock_server(MockServerRequestHandler)
stripe.api_base = "http://localhost:%s" % self.mock_server_port
stripe.enable_telemetry = True
stripe.Balance.retrieve()
stripe.Balance.retrieve()
assert MockServerRequestHandler.num_requests == |
softlayer/softlayer-cinder-driver | slos/test/fixtures/Billing_Item.py | Python | mit | 16 | 0 | c | ancelItem | = {}
|
edespino/gpdb | gpMgmt/bin/gppylib/test/unit/test_unit_gpssh.py | Python | apache-2.0 | 1,292 | 0.002322 | import imp
import os
import io
import sys
from mock import patch
from gp_unittest import GpTestCase
class GpSshTes | tCase(GpTestCase):
def setUp(self):
# because gpssh does not have a .py extension, we have to use imp to impor | t it
# if we had a gpssh.py, this is equivalent to:
# import gpssh
# self.subject = gpssh
gpssh_file = os.path.abspath(os.path.dirname(__file__) + "/../../../gpssh")
self.subject = imp.load_source('gpssh', gpssh_file)
self.old_sys_argv = sys.argv
sys.argv = []
def tearDown(self):
sys.argv = self.old_sys_argv
@patch('sys.exit')
def test_when_run_without_args_prints_help_text(self, sys_exit_mock):
sys_exit_mock.side_effect = Exception("on purpose")
# GOOD_MOCK_EXAMPLE of stdout
with patch('sys.stdout', new=io.BytesIO()) as mock_stdout:
with self.assertRaisesRegexp(Exception, "on purpose"):
self.subject.main()
self.assertIn('gpssh -- ssh access to multiple hosts at once', mock_stdout.getvalue())
@patch('sys.exit')
def test_happy_ssh_to_localhost_succeeds(self, sys_mock):
sys.argv = ['', '-h', 'localhost', 'uptime']
self.subject.main()
sys_mock.assert_called_with(0)
|
roopali8/keystone | keystone/tests/unit/test_token_provider.py | Python | apache-2.0 | 29,676 | 0 | # Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
from oslo_config import cfg
from oslo_utils import timeutils
from keystone.common import dependency
from keystone.common import utils
from keystone import exception
from keystone.tests import unit
from keystone.tests.unit.ksfixtures import database
from keystone import token
from keystone.token.providers import fernet
from keystone.token.providers import pki
from keystone.token.providers import pkiz
from keystone.token.providers import uuid
CONF = cfg.CONF
FUTURE_DELTA = datetime.timedelta(seconds=CONF.token.expiration)
CURRENT_DATE = timeutils.utcnow()
SAMPLE_V2_TOKEN = {
"access": {
"trust": {
"id": "abc123",
"trustee_user_id": "123456",
"trustor_user_id": "333333",
"impersonation": False
},
"serviceCatalog": [
{
"endpoints": [
{
"adminURL": "http://localhost:8774/v1.1/01257",
"id": "51934fe63a5b4ac0a32664f64eb462c3",
"internalURL": "http://localhost:8774/v1.1/01257",
"publicURL": "http://localhost:8774/v1.1/01257",
"region": "RegionOne"
}
],
"endpoints_links": [],
"name": "nova",
"type": "compute"
},
{
"endpoints": [
{
"adminURL": "http://localhost:9292",
"id": "aaa17a539e364297a7845d67c7c7cc4b",
"internalURL": "http://localhost:9292",
"publicURL": "http://localhost:9292",
"region": "RegionOne"
}
],
"endpoints_links": [],
"name": "glance",
"type": "image"
},
{
"endpoints": [
{
"adminURL": "http://localhost:8776/v1/01257",
"id": "077d82df25304abeac2294004441db5a",
"internalURL": "http://localhost:8776/v1/01257",
"publicURL": "http://localhost:8776/v1/01257",
"region": "RegionOne"
}
],
"endpoints_links": [],
"name": "volume",
"type": "volume"
},
{
"endpoints": [
{
"adminURL": "http://localhost:8773/services/Admin",
"id": "b06997fd08414903ad458836efaa9067",
"internalURL": "http://localhost: | 8773/services/Cloud",
"publicURL": "http://localhost:8773/services/Cloud",
"region": "RegionOne"
}
],
"endpoints_links": [],
"name": "ec2",
"type": "ec2"
},
{
"endpoints": [
{
"adminURL": "http://localhost:8080/v1",
"id": "7bd0c643e05a4a2ab40902b2fa0dd4e6" | ,
"internalURL": "http://localhost:8080/v1/AUTH_01257",
"publicURL": "http://localhost:8080/v1/AUTH_01257",
"region": "RegionOne"
}
],
"endpoints_links": [],
"name": "swift",
"type": "object-store"
},
{
"endpoints": [
{
"adminURL": "http://localhost:35357/v2.0",
"id": "02850c5d1d094887bdc46e81e1e15dc7",
"internalURL": "http://localhost:5000/v2.0",
"publicURL": "http://localhost:5000/v2.0",
"region": "RegionOne"
}
],
"endpoints_links": [],
"name": "keystone",
"type": "identity"
}
],
"token": {
"expires": "2013-05-22T00:02:43.941430Z",
"id": "ce4fc2d36eea4cc9a36e666ac2f1029a",
"issued_at": "2013-05-21T00:02:43.941473Z",
"tenant": {
"enabled": True,
"id": "01257",
"name": "service"
}
},
"user": {
"id": "f19ddbe2c53c46f189fe66d0a7a9c9ce",
"name": "nova",
"roles": [
{
"name": "_member_"
},
{
"name": "admin"
}
],
"roles_links": [],
"username": "nova"
}
}
}
SAMPLE_V3_TOKEN = {
"token": {
"catalog": [
{
"endpoints": [
{
"id": "02850c5d1d094887bdc46e81e1e15dc7",
"interface": "admin",
"region": "RegionOne",
"url": "http://localhost:35357/v2.0"
},
{
"id": "446e244b75034a9ab4b0811e82d0b7c8",
"interface": "internal",
"region": "RegionOne",
"url": "http://localhost:5000/v2.0"
},
{
"id": "47fa3d9f499240abb5dfcf2668f168cd",
"interface": "public",
"region": "RegionOne",
"url": "http://localhost:5000/v2.0"
}
],
"id": "26d7541715a44a4d9adad96f9872b633",
"type": "identity",
},
{
"endpoints": [
{
"id": "aaa17a539e364297a7845d67c7c7cc4b",
"interface": "admin",
"region": "RegionOne",
"url": "http://localhost:9292"
},
{
"id": "4fa9620e42394cb1974736dce0856c71",
"interface": "internal",
"region": "RegionOne",
"url": "http://localhost:9292"
},
{
"id": "9673687f9bc441d88dec37942bfd603b",
"interface": "public",
"region": "RegionOne",
"url": "http://localhost:9292"
}
],
"id": "d27a41843f4e4b0e8cf6dac4082deb0d",
"type": "image",
},
{
"endpoints": [
{
"id": "7bd0c643e05a4a2ab40902b2fa0dd4e6",
"interface": "admin",
"region": "RegionOne",
"url": "http://localhost:8080/v1"
},
{
"id": "43bef154594d4ccb8e49014d20624e1d",
"interface": "internal",
"region": "RegionOne",
"url": "http://localhost:8080/v1/AUTH_01257"
},
{
"id": "e63b5f5d7aa3493690189d0ff843b9b3",
"interface": "public",
"region": "RegionOne",
|
FannyCheung/python_Machine-Learning | MapReduce处理日志文件/Reduce.py | Python | gpl-2.0 | 1,172 | 0.064607 | # coding : utf-8
#file: Reduce.py
import os,os.path,re
def Reduce(sourceFoler,targetFile):
tempData = {} | #缓存列表
p_re = re.compile(r'(.*?)(\d{1,}$)',re.IGNORECASE) #用正则表达式解析数据
for root,dirs,files in os.walk(sourceFolder):
for fil in files:
if fil.endswith('_map.txt'): #判断是reduce文件
sFile = open(os.path.abspath(os.path.join(root,fil)),'r')
dataLine = sFile.readline()
while dataLine: #当有数据时
subdata = p_re.findall(dataLine) #用空格分割数据
if subdata[0][0] in tempData:
tempData[subdata[0][0]] += int(subdata[0 | ][1])
else:
tempData[subdata[0][0]] = int(subdata[0][1])
dataLine = sFile.readline() #读入下一行数据
sFile.close()
tList = []
for key,value in sorted(tempData.items(),key = lambda k:k[1],reverse = True):
tList.append(key + ' ' + str(value) + '\n' )
tFilename = os.path.join(sourceFolder,targetFile + '_reduce.txt')
tFile = open(tFilename,'a+') #创建小文件
tFile.writelines(tList) #将列表保存到文件中
tFile.close()
if __name__ == '__main__':
Reduce ('access','access') |
brianmay/spud | spud/tests/b_integration/test_photos.py | Python | gpl-3.0 | 3,320 | 0.000301 | import base64
import os
import pytest
import six
from pytest_bdd import parsers, scenarios, then, when
from spud import media, models
scenarios('photos.feature')
@when('we create a photo called <name> using <filename>')
def step_create_photo(session, name, filename, data_files):
url = "/api/photos/"
path = os.path.join(data_files, filename)
data = {
'title': name,
'description': 'description',
'utc_offset': 660,
'datetime': '2012-12-20 12:34:56',
'level': 0,
'sha256_hash': base64.encodebytes(media.get_media(path).get_sha256_hash()),
}
files = {'photo': open(path, 'rb')}
session.post(url, data=data, files=files)
@when('we update a photo called <name>')
def step_update_photo(session, photos, name):
desired_photo = models.photo.objects.get(title=name)
url = "/api/photos/%d/" % desired_photo.id
data = {
'title': name,
'description': 'new description',
'utc_offset': 660,
'datetime': '2012-12-20 12:34:56',
'level': 0,
}
session.put(url, json=data)
@when('we patch a photo called <name>')
def step_patch_photo(session, photos, name):
desired_photo = models.photo.objects.get(title=name)
url = "/api/photos/%d/" % desired_photo.id
data = {
'description': 'new description',
}
session.patch(url, json=data)
@when('we get a photo called <name>')
def step_get_photo(session, photos, name):
desired_photo = models.photo.objects.get(title=name)
url = "/api/photos/%d/" % desired_photo.id
session.get(url)
@when('we delete a photo called <name>')
def step_delete_photo(session, photos, name):
desired_photo = models.photo.objects.get(title=name)
url = "/api/photos/%d/" % desired_photo.id
session.delete(url)
@when('we list all photos')
def step_list_photos(session, photos):
url = "/api/photos/"
session.get(url)
@then(parsers.cfparse(
'the photo <name> description should be {description}'))
def step_test_photo_description(name, description):
photo = models.photo.objects.get(title=name)
assert photo.description == description
@then('the photo called <name> should exist')
def step_test_photo_valid(name):
models.photo.objects.get(title=name)
@then('the photo called <name> should not exist')
def step_test_photo_not_exist(name):
with pytest.raises(models.photo.DoesNot | Exist):
models.photo.objects.get(title=name)
@then('we should get a valid photo called <name>')
def step_test_r_valid_photo(session, name):
photo = session.obj
assert photo['title'] == name
assert isinstance(photo['description'], six.string_types)
assert isinstance(photo['title'], six.string_types)
@then(parsers.cfparse(
'we should get a photo with des | cription {description}'))
def step_test_r_photo_description(session, description):
photo = session.obj
assert photo['description'] == description
@then(parsers.cfparse('we should get {number:d} valid photos'))
def step_test_r_n_results(session, number):
data = session.obj
assert data['count'] == number
assert len(data['results']) == number
for photo in data['results']:
assert isinstance(photo['description'], six.string_types)
assert isinstance(photo['title'], six.string_types)
|
Branlala/docker-sickbeardfr | sickbeard/cherrypy/lib/httputil.py | Python | mit | 15,480 | 0.004845 | """HTTP library functions."""
# This module contains functions for building an HTTP application
# framework: any one, not just one whose name starts with "Ch". ;) If you
# reference any modules from some popular framework inside *this* module,
# FuManChu will personally hang you up by your thumbs and submit you
# to a public caning.
from binascii import b2a_base64
from BaseHTTPServer import BaseHTTPRequestHandler
response_codes = BaseHTTPRequestHandler.responses.copy()
# From http://www.cherrypy.org/ticket/361
response_codes[500] = ('Internal Server Error',
'The server encountered an unexpected condition '
'which prevented it from fulfilling the request.')
response_codes[503] = ('Service Unavailable',
'The server is currently unable to handle the '
'request due to a temporary overloading or '
'maintenance of the server.')
import re
import urllib
from rfc822 import formatdate as HTTPDate
def urljoin(*atoms):
"""Return the given path *atoms, joined into a single URL.
This will correctly join a SCRIPT_NAME and PATH_INFO into the
original URL, even if either atom is blank.
"""
url = "/".join([x for x in atoms if x])
while "//" in url:
url = url.replace("//", "/")
# Special-case the final url of "", and return "/" instead.
return url or "/"
def protocol_from_http(protocol_str):
"""Return a protocol tuple from the given 'HTTP/x.y' string."""
return int(protocol_str[5]), int(protocol_str[7])
def get_ranges(headervalue, content_length):
"""Return a list of (start, stop) indices from a Range header, or None.
Each (start, stop) tuple will be composed of two ints, which are suitable
for use in a slicing operation. That is, the header "Range: bytes=3-6",
if applied against a Python string, is requesting resource[3:7]. This
function will return the list [(3, 7)].
If this function returns an empty list, you should return HTTP 416.
"""
if not headervalue:
return None
result = []
bytesunit, byteranges = headervalue.split("=", 1)
for brange in byteranges.split(","):
start, stop = [x.strip() for x in brange.split("-", 1)]
if start:
if not stop:
stop = content_length - 1
start, stop = int(start), int(stop)
if start >= content_length:
# From rfc 2616 sec 14.16:
# "If the server receives a request (other than one
# including an If-Range request-header field) with an
# unsatisfiable Range request-header field (that is,
# all of whose byte-range-spec values have a f | irst-byte-pos
# value greater than the current length of the selected
# resource), it SHOULD return a response code of 416
# (Requested range not satisfiable)."
continue
if stop < start:
# From rfc 2616 sec 14.16:
# "If the server ignores a byte-r | ange-spec because it
# is syntactically invalid, the server SHOULD treat
# the request as if the invalid Range header field
# did not exist. (Normally, this means return a 200
# response containing the full entity)."
return None
result.append((start, stop + 1))
else:
if not stop:
# See rfc quote above.
return None
# Negative subscript (last N bytes)
result.append((content_length - int(stop), content_length))
return result
class HeaderElement(object):
"""An element (with parameters) from an HTTP header's element list."""
def __init__(self, value, params=None):
self.value = value
if params is None:
params = {}
self.params = params
def __cmp__(self, other):
return cmp(self.value, other.value)
def __unicode__(self):
p = [";%s=%s" % (k, v) for k, v in self.params.iteritems()]
return u"%s%s" % (self.value, "".join(p))
def __str__(self):
return str(self.__unicode__())
def parse(elementstr):
"""Transform 'token;key=val' to ('token', {'key': 'val'})."""
# Split the element into a value and parameters. The 'value' may
# be of the form, "token=token", but we don't split that here.
atoms = [x.strip() for x in elementstr.split(";") if x.strip()]
if not atoms:
initial_value = ''
else:
initial_value = atoms.pop(0).strip()
params = {}
for atom in atoms:
atom = [x.strip() for x in atom.split("=", 1) if x.strip()]
key = atom.pop(0)
if atom:
val = atom[0]
else:
val = ""
params[key] = val
return initial_value, params
parse = staticmethod(parse)
def from_str(cls, elementstr):
"""Construct an instance from a string of the form 'token;key=val'."""
ival, params = cls.parse(elementstr)
return cls(ival, params)
from_str = classmethod(from_str)
q_separator = re.compile(r'; *q *=')
class AcceptElement(HeaderElement):
"""An element (with parameters) from an Accept* header's element list.
AcceptElement objects are comparable; the more-preferred object will be
"less than" the less-preferred object. They are also therefore sortable;
if you sort a list of AcceptElement objects, they will be listed in
priority order; the most preferred value will be first. Yes, it should
have been the other way around, but it's too late to fix now.
"""
def from_str(cls, elementstr):
qvalue = None
# The first "q" parameter (if any) separates the initial
# media-range parameter(s) (if any) from the accept-params.
atoms = q_separator.split(elementstr, 1)
media_range = atoms.pop(0).strip()
if atoms:
# The qvalue for an Accept header can have extensions. The other
# headers cannot, but it's easier to parse them as if they did.
qvalue = HeaderElement.from_str(atoms[0].strip())
media_type, params = cls.parse(media_range)
if qvalue is not None:
params["q"] = qvalue
return cls(media_type, params)
from_str = classmethod(from_str)
def qvalue(self):
val = self.params.get("q", "1")
if isinstance(val, HeaderElement):
val = val.value
return float(val)
qvalue = property(qvalue, doc="The qvalue, or priority, of this value.")
def __cmp__(self, other):
diff = cmp(self.qvalue, other.qvalue)
if diff == 0:
diff = cmp(str(self), str(other))
return diff
def header_elements(fieldname, fieldvalue):
"""Return a sorted HeaderElement list from a comma-separated header str."""
if not fieldvalue:
return []
result = []
for element in fieldvalue.split(","):
if fieldname.startswith("Accept") or fieldname == 'TE':
hv = AcceptElement.from_str(element)
else:
hv = HeaderElement.from_str(element)
result.append(hv)
result.sort()
result.reverse()
return result
def decode_TEXT(value):
"""Decode RFC-2047 TEXT (e.g. "=?utf-8?q?f=C3=BCr?=" -> u"f\xfcr")."""
from email.Header import decode_header
atoms = decode_header(value)
decodedvalue = ""
for atom, charset in atoms:
if charset is not None:
atom = atom.decode(charset)
decodedvalue += atom
return decodedvalue
def valid_status(status):
"""Return legal HTTP status Code, Reason-phrase and Message.
The status arg must be an int, or a str that begins with an int.
If status is an int, or a str and no reason-phrase is supplied,
a default reason-phrase will be provided.
"""
if not status:
status = 200
status = str(status)
|
milkmeat/thomas | project euler/q49.py | Python | mit | 1,149 | 0.021758 | import copy
import math
def permutation(s):
if len(s)==1:
return s
all=[]
for x in s:
other=copy.deepcopy(s)
other.remove(x)
for rest in permutation(other):
all.append(x+rest)
return all
def isprime(prime):
if prime<2:
return False
for x in range(2,int(math.sqrt(prime))+1):
if prime%x==0:
return False
return True
for d1 in range(1,9+1):
for d2 in range(d1,9+1):
for d3 in range(d2,9+1):
for d4 in range(d3,9+1):
s=set()
| #['4','1','8','7']
#print permutation([str(d1),str(d2),str(d3),str(d4)])
for a in permutation([str(d1),str(d2),str(d3),str(d4)]):
if isprime(int(a)):
s.add(int(a))
p=sorted(list(s))
| for a in range(len(p)):
for b in range(a+1, len(p)):
for c in range(b+1, len(p)):
if p[b]-p[a]==p[c]-p[b]:
print p[a],p[b],p[c]
|
google-research/pisac | pisac/tanh_normal_projection_network.py | Python | apache-2.0 | 5,102 | 0.002548 | # coding=utf-8
# Copyright 2020 The PI-SAC Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Project inputs to a tanh-squashed MultivariateNormalDiag distribution."""
import gin
import tensorflow as tf
import tensorflow_probability as tfp
from tf_agents.distributions import utils as distribution_utils
from tf_agents.networks import network
from tf_agents.networks import utils as network_utils
from tf_agents.specs import distribution_spec
from tf_agents.specs import tensor_spec
@gin.configurable
class TanhNormalProjectionNetwork(network.DistributionNetwork):
"""Generates a tanh-squashed MultivariateNormalDiag distribution."""
def __init__(self,
sample_spec,
activation_fn=None,
kernel_initializer=None,
std_transform=tf.exp,
min_std=None,
max_std=None,
name='TanhNormalProjectionNetwork'):
"""Creates an instance of TanhNormalProjectionNetwork.
Args:
sample_spec: A `tensor_spec.BoundedTensorSpec` detailing the shape and
dtypes of samples pulled from the output distribution.
activation_fn: Activation function to use in dense layer.
kernel_initializer: Initializer to use for the kernels of the conv and
dense layers. If none is provided a default glorot_uniform
std_transform: Transformation function to apply to the stddevs.
min_std: Minimum std.
max_std: Maximum std.
name: A string representing name of the network.
"""
if len(tf.nest.flatten(sample_spec)) != 1:
raise ValueError('Tanh Normal Projection network only supports single'
' spec samples.')
output_spec = self._output_distribution_spec(sample_spec, name)
super(TanhNormalProjectionNetwork, self).__init__(
# We don't need these, but base class requires them.
input_tensor_spec=None,
state_spec=(),
output_spec=output_spec,
name=name)
self._sample_spec = sample_spec
self._std_transform = std_transform
self._min_std = min_std
self._max_std = max_std
if kernel_initializer is None:
kernel_initializer = 'glorot_uniform'
self._projection_layer = tf.keras.layers.Dense(
sample_spec.shape.num_elements() * 2,
activation=activation_fn,
kernel_initializer=kernel_initializer,
name='projection_layer')
def _output_distribution_spec(self, sample_spec, network_name):
input_param_shapes = {
'loc': sample_spec.shape,
'scale_diag': sample_spec.shape
}
input_param_spec = { # pylint: disable=g-complex-comprehension
name: tensor_spec.TensorSpec(
shape=shape,
dtype=sample_spec.dtype,
name=network_name + '_' + name)
for name, shape in input_param_shapes.items()
}
def distribution_builder(*args, **kwargs):
distribution = tfp.distributions.MultivariateNormalDiag(*args, **kwargs)
return distribution_utils.scale_distribution_to_spec(
distribution, sample_spec)
return distribution_spec.DistributionSpec(
distribution_builder, input_param_spec, sample_spec=sample_spec)
def call(self, inputs, outer_rank, training=False, mask=None):
if inputs.dtype != self._sample_spec.dtype:
| raise ValueError('Inputs to TanhNormalProjectionNetwork must match the '
'sample_spec.dtype.')
if mask is not None:
raise NotImplementedError(
'TanhNormalProjectionNetwork does not yet implement action masking; '
'got mask={}'.format(mask))
# outer_rank is needed because the proje | ction is not done on the raw
# observations so getting the outer rank is hard as there is no spec to
# compare to.
batch_squash = network_utils.BatchSquash(outer_rank)
inputs = batch_squash.flatten(inputs)
means_and_stds = self._projection_layer(inputs, training=training)
means, stds = tf.split(means_and_stds, num_or_size_splits=2, axis=-1)
means = tf.reshape(means, [-1] + self._sample_spec.shape.as_list())
means = tf.cast(means, self._sample_spec.dtype)
if self._std_transform is not None:
stds = self._std_transform(stds)
if self._min_std is not None:
stds = tf.maximum(stds, self._min_std)
if self._max_std is not None:
stds = tf.minimum(stds, self._max_std)
stds = tf.cast(stds, self._sample_spec.dtype)
means = batch_squash.unflatten(means)
stds = batch_squash.unflatten(stds)
return self.output_spec.build_distribution(loc=means, scale_diag=stds), ()
|
smarthomeNG/smarthome | lib/model/mqttplugin.py | Python | gpl-3.0 | 11,395 | 0.004651 | #!/usr/bin/env python3
# vim: set encoding=utf-8 tabstop=4 softtabstop=4 shiftwidth=4 expandtab
#########################################################################
# Copyright 2019- Martin Sinn m.sinn@gmx.de
#########################################################################
# This file is part of SmartHomeNG
#
# SmartHomeNG is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SmartHomeNG is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SmartHomeNG If not, see <http://www.gnu.org/licenses/>.
#########################################################################
import threading
from lib.module import Modules
from lib.model.smartplugin import SmartPlugin
from lib.shtime import Shtime
class MqttPlugin(SmartPlugin):
_item_values = {} # dict of dicts
# Initialization of SmartPlugin class called by super().__init__() from the plugin's __init__() method
def __init__(self):
"""
Initialization Routine for the mqtt extension class to SmartPlugin
"""
SmartPlugin.__init__(self)
# get instance of MQTT module
try:
self.mod_mqtt = Modules.get_instance().get_module('mqtt') # try/except to handle running in a core version that does not support modules
except:
self.mod_mqtt = None
if self.mod_mqtt == None:
self.logger.error("Module 'mqtt' not loaded. The plugin is not starting")
self._init_complete = False
return False
self._subscribed_topics_lock = threading.Lock()
self._subscribed_topics = {} # subscribed topics (a dict of dicts)
self._subscribe_current_number = 0 # current number of the subscription entry
self._subscriptions_started = False
# get broker configuration (for display in web interface)
self.broker_config = self.mod_mqtt.get_broker_config()
return True
def start_subscriptions(self):
"""
Start subscription to all topics
Should be called from the run method of a plugin
"""
if self.mod_mqtt:
with self._subscribed_topics_lock:
for topic in self._subscribed_topics:
# start subscription to all items for this topic
for item_path in self._subscribed_topics[topic]:
self._start_subscription(topic, item_path)
self._subscriptions_started = True
return
def stop_subscriptions(self):
"""
Stop subscription to all topics
Should be called from the stop method of a plugin
"""
if self.mod_mqtt:
with self._subscribed_topics_lock:
for topic in self._subscribed_topics:
# stop subscription to all items for this topic
for item_path in self._subscribed_topics[topic]:
current = str(self._subscribed_topics[topic][item_path]['current'])
self.logger.info("stop(): Unsubscribing from topic {} for item {}".format(topic, item_path))
self.mod_mqtt.unsubscribe_topic(self.get_shortname() + '-' + current, topic)
self._subscriptions_started = False
return
def _start_subscription(self, topic, item_path):
current = str(self._subscribed_topics[topic][item_path]['current'])
qos = self._subscribed_topics[topic][item_path].get('qos', None)
payload_type = self._subscribed_topics[topic][item_path].get('payload_type', None)
callback = self._subscribed_topics[topic][item_path].get('callback', None)
bool_values = self._subscribed_topics[topic][item_path].get('bool_values', None)
self.logger.info("_start_subscription: Subscribing to topic {}, payload_type '{}' for item {} (callback={})".format(topic, payload_type, item_path, callback))
self.mod_mqtt.sub | scribe_topic(self.get_shortname() + '-' + current, topic, callback=callback,
qos=qos, payload_type=payload_type, bool_values=bool_values)
re | turn
def add_subscription(self, topic, payload_type, bool_values=None, item=None, callback=None):
"""
Add mqtt subscription to subscribed_topics list
subscribing is done directly, if subscriptions have been started by self.start_subscriptions()
:param topic: topic to subscribe to
:param payload_type: payload type of the topic (for this subscription to the topic)
:param bool_values: bool values (for this subscription to the topic)
:param item: item that should receive the payload as value. Used by the standard handler (if no callback function is specified)
:param callback: a plugin can provide an own callback function, if special handling of the payload is needed
:return:
"""
with self._subscribed_topics_lock:
# test if topic is new
if not self._subscribed_topics.get(topic, None):
self._subscribed_topics[topic] = {}
# add this item to topic
if item is None:
item_path = '*no_item*'
else:
item_path = item.path()
self._subscribed_topics[topic][item_path] = {}
self._subscribe_current_number += 1
self._subscribed_topics[topic][item_path]['current'] = self._subscribe_current_number
self._subscribed_topics[topic][item_path]['item'] = item
self._subscribed_topics[topic][item_path]['qos'] = None
self._subscribed_topics[topic][item_path]['payload_type'] = payload_type
if callback:
self._subscribed_topics[topic][item_path]['callback'] = callback
else:
self._subscribed_topics[topic][item_path]['callback'] = self._on_mqtt_message
self._subscribed_topics[topic][item_path]['bool_values'] = bool_values
if self._subscriptions_started:
# directly subscribe to added subscription, if subscribtions are started
self._start_subscription(topic, item_path)
return
def publish_topic(self, topic, payload, item=None, qos=None, retain=False, bool_values=None):
"""
Publish a topic to mqtt
:param topic: topic to publish
:param payload: payload to publish
:param item: item (if relevant)
:param qos: qos for this message (optional)
:param retain: retain flag for this message (optional)
:param bool_values: bool values (for publishing this topic, optional)
:return:
"""
self.mod_mqtt.publish_topic(self.get_shortname(), topic, payload, qos, retain, bool_values)
if item is not None:
self.logger.info("publish_topic: Item '{}' -> topic '{}', payload '{}', QoS '{}', retain '{}'".format(item.id(), topic, payload, qos, retain))
# Update dict for periodic updates of the web interface
self._update_item_values(item, payload)
else:
self.logger.info("publish_topic: topic '{}', payload '{}', QoS '{}', retain '{}'".format(topic, payload, qos, retain))
return
# ----------------------------------------------------------------------------------------
# methods to handle the broker connection
# ----------------------------------------------------------------------------------------
_broker_version = '?'
_broker = {}
broker_config = {}
broker_monitoring = False
def get_broker_info(self):
if self.mod_mqtt:
(self._brok |
senechal/ssc0570-Redes-Neurais | run.py | Python | mit | 708 | 0.002825 | """
Usage:
run.py mlp --train=<train> --test=<test> --config=<config>
run.py som --train=<train> --test=<test> --config=<config>
Options | :
--train Path to training data, txt file.
--test Path to test data, txt file.
--config Json configuration for the network.
"""
from redes_neurais.resources.manager import run_mlp, run_som
import docopt
def run():
try:
args = docopt.docopt(__doc__)
| if args["mlp"]:
run_mlp(args['--config'], args['--train'], args['--test'])
if args["som"]:
run_som(args['--config'], args['--train'], args['--test'])
except docopt.DocoptExit as e:
print e.message
if __name__ == "__main__":
run()
|
tianhuil/isaac-thedataincubator-project | analysis/models.py | Python | apache-2.0 | 3,132 | 0.004151 | import os
import cPickle as pkl
from matplotlib.pyplot import *
from sklearn.pipeline import Pipeline, FeatureUnion
from sklearn.ensemble import RandomForestClassifier, RandomForestRegressor
from sklearn.grid_search import GridSearchCV
from sklearn.decomposition import PCA
from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer
from sklearn.linear_model import SGDClassifier, LogisticRegression,\
LinearRegression, Ridge, Lasso
from sklearn.feature_selection import SelectKBest
from sklearn.svm import SVC, LinearSVC, SVR
from sklearn.preprocessing import StandardScaler
from sklearn import base
class ColumnSelectTransformer(base.BaseEstimator, base.TransformerMixin):
"""
Transformer to select only particular columns from a dataset.
"""
def __init__(self, columns=[], astype=None):
self.columns = columns
self.astype = astype
def fit(self, X, y=None):
return self
def transform(self, X):
if self.astype is None:
return X[:,self.columns]
else:
return X[:,self.columns].astype(self.astype)
class EstimatorTransformer(base.BaseEstimator, base.TransformerMixin):
"""
Wrap an estimator so that its transform function mirrors the predictor.
"""
def __init__(self, estimator=None):
self.estimator = estimator
def fit(self, X, y=None):
self.estimator.fit(X, y)
return self
def transform(self, X):
return self.estimator.predict(X).reshape(-1, 1)
def _get_param_names(self):
return ['estimator']
def column_locs(df, columns):
"""
Find and return the index of each of the named columns.
"""
return [df.columns.get_loc(c) for c in columns]
def select_data(df, balance=True):
"""
Select a subset of the data where the outcomes of all loans is essentially
known. Optionally balance the data to have as many failed loans as
successful.
"""
comp = df[df.completed]
if balance:
num_use = min(len(comp[comp.failed].index),
len(comp[~comp.failed].index))
return comp.ix[
comp[comp.failed][:num_use].index.union(
comp[~comp.failed][:num_use].index)]
else:
return comp
def featurize(df, params={}, fit=True,
columns=['subgrade_code', 'annual_inc', 'total_acc', 'revol_bal'],
text_trans=Pipeline([
('vect', CountVectorizer()),
('tfidf', TfidfTransformer()),
('k | best', SelectKBest(k=1000))])):
"""
Convert a data-frame's descriptions to text features, extract a set of
numeric columns, and return the result as a matrix.
Also return a vector indicating whether each loan has not yet failed.
(1 = no | failure yet, 0 = loan has failed)
"""
y = ~df.failed.reshape(-1)
if fit:
text_trans.set_params(**params)
text_trans.fit(df.desc.apply(str), y)
textdat = text_trans.transform(df.desc.apply(str))
data = df[columns].fillna(-1).as_matrix().astype(float)
alldata = concatenate((data, textdat.todense()), axis=1)
return alldata, y
|
pymedusa/Medusa | medusa/session/factory.py | Python | gpl-3.0 | 748 | 0 | """Session class factory methods."""
from __future__ import unicode_literals
import logging
from cachecontrol import CacheControlAdapter
from cachecontrol.cache import DictCache
log = logging.getLogger(__name__)
log.addHandler( | logging.NullHandler())
def add_cache_control(session, cache_control_config):
"""Add cache_control adapter to session object."""
adapter = CacheControlAdapter(
DictCache(),
cache_etags=cache_control_config.get('cache_etags', True),
serializer=cache_control_config.get('serializer', None),
heuristic=cache_control_config.get('heuristi | c', None),
)
session.mount('http://', adapter)
session.mount('https://', adapter)
session.cache_controller = adapter.controller
|
Azure/azure-sdk-for-python | sdk/recoveryservices/azure-mgmt-recoveryservices/azure/mgmt/recoveryservices/_configuration.py | Python | mit | 3,262 | 0.003985 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
from azure.core.configuration import Configuration
from azure.core.pipeline import policies
from azure.mgmt.core.policies import ARMHttpLoggingPolicy
from | ._version import VERSION
if TYPE_CHECKING:
# pyl | int: disable=unused-import,ungrouped-imports
from typing import Any
from azure.core.credentials import TokenCredential
class RecoveryServicesClientConfiguration(Configuration):
"""Configuration for RecoveryServicesClient.
Note that all parameters used to create this instance are saved as instance
attributes.
:param credential: Credential needed for the client to connect to Azure.
:type credential: ~azure.core.credentials.TokenCredential
:param subscription_id: The subscription Id.
:type subscription_id: str
"""
def __init__(
self,
credential, # type: "TokenCredential"
subscription_id, # type: str
**kwargs # type: Any
):
# type: (...) -> None
if credential is None:
raise ValueError("Parameter 'credential' must not be None.")
if subscription_id is None:
raise ValueError("Parameter 'subscription_id' must not be None.")
super(RecoveryServicesClientConfiguration, self).__init__(**kwargs)
self.credential = credential
self.subscription_id = subscription_id
self.api_version = "2021-03-01"
self.credential_scopes = kwargs.pop('credential_scopes', ['https://management.azure.com/.default'])
kwargs.setdefault('sdk_moniker', 'mgmt-recoveryservices/{}'.format(VERSION))
self._configure(**kwargs)
def _configure(
self,
**kwargs # type: Any
):
# type: (...) -> None
self.user_agent_policy = kwargs.get('user_agent_policy') or policies.UserAgentPolicy(**kwargs)
self.headers_policy = kwargs.get('headers_policy') or policies.HeadersPolicy(**kwargs)
self.proxy_policy = kwargs.get('proxy_policy') or policies.ProxyPolicy(**kwargs)
self.logging_policy = kwargs.get('logging_policy') or policies.NetworkTraceLoggingPolicy(**kwargs)
self.http_logging_policy = kwargs.get('http_logging_policy') or ARMHttpLoggingPolicy(**kwargs)
self.retry_policy = kwargs.get('retry_policy') or policies.RetryPolicy(**kwargs)
self.custom_hook_policy = kwargs.get('custom_hook_policy') or policies.CustomHookPolicy(**kwargs)
self.redirect_policy = kwargs.get('redirect_policy') or policies.RedirectPolicy(**kwargs)
self.authentication_policy = kwargs.get('authentication_policy')
if self.credential and not self.authentication_policy:
self.authentication_policy = policies.BearerTokenCredentialPolicy(self.credential, *self.credential_scopes, **kwargs)
|
compops/gpo-abc2015 | state/smc_resampling.py | Python | mit | 3,290 | 0.006383 | ##############################################################################
##############################################################################
# Routines for
# Resampling
#
# Copyright (c) 2016 Johan Dahlin
# liu (at) johandahlin.com
#
##############################################################################
##############################################################################
import numpy as np
import scipy.weave as weave
##############################################################################
# Resampling for SMC sampler: Multinomial
##############################################################################
def resampleMultinomial(w, N=0, u=None):
code = \
""" py::list ret;
for(int kk = 0; kk < N; kk++) // For each particle
{
int jj = 0;
while( ww(jj) < u(kk) && jj < H - 1)
{
jj++;
}
ret.append(jj);
}
return_val = ret;
"""
H = len(w)
if N == 0:
N = H
if (u == None):
u = np.random.uniform(0.0, 1.0, N)
else:
u = float(u)
ww = (np.cumsum(w) / np.sum(w)).astype(float)
idx = weave.inline(code, ['u', 'H', 'ww', 'N'],
type_converters=weave.converters.blitz)
return np.array(idx).astype(int)
##############################################################################
# Resampling for SMC sampler: Stratified
##############################################################################
def resampleStratified(w, N=0, u=None):
code = \
| """ py::list ret;
int jj = 0;
for(int kk = 0; kk < N; kk++)
{
double uu = ( u(kk) + kk ) / N;
while( ww(jj) < uu && jj < H - 1)
{
jj++;
}
ret.append(jj);
}
return_val = ret;
"""
H = len(w)
if N == 0:
N = H
if (u == None):
| u = (np.random.uniform(0.0, 1.0, (N, 1))).astype(float)
else:
u = float(u)
ww = (np.cumsum(w) / np.sum(w)).astype(float)
idx = weave.inline(code, ['u', 'H', 'ww', 'N'],
type_converters=weave.converters.blitz)
return np.array(idx).astype(int)
##############################################################################
# Resampling for SMC sampler: Systematic
##############################################################################
def resampleSystematic(w, N=0, u=None):
code = \
""" py::list ret;
int jj = 0;
for(int kk = 0; kk < N; kk++)
{
double uu = ( u + kk ) / N;
while( ww(jj) < uu && jj < H - 1)
{
jj++;
}
ret.append(jj);
}
return_val = ret;
"""
H = len(w)
if N == 0:
N = H
if (u == None):
u = float(np.random.uniform())
else:
u = float(u)
ww = (np.cumsum(w) / np.sum(w)).astype(float)
idx = weave.inline(code, ['u', 'H', 'ww', 'N'],
type_converters=weave.converters.blitz)
return np.array(idx).astype(int)
########################################################################
# End of file
########################################################################
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.