repo_name stringlengths 5 100 | path stringlengths 4 231 | language stringclasses 1 value | license stringclasses 15 values | size int64 6 947k | score float64 0 0.34 | prefix stringlengths 0 8.16k | middle stringlengths 3 512 | suffix stringlengths 0 8.17k |
|---|---|---|---|---|---|---|---|---|
palankai/xadrpy | src/ckeditor/urls.py | Python | lgpl-3.0 | 229 | 0 | f | rom django.conf.urls.defaults impor | t patterns, url
urlpatterns = patterns(
'',
url(r'^upload/', 'ckeditor.views.upload', name='ckeditor_upload'),
url(r'^browse/', 'ckeditor.views.browse', name='ckeditor_browse'),
)
|
Puyb/inscriptions_roller | inscriptions/consumers.py | Python | gpl-3.0 | 1,896 | 0.003692 | import logging
import json
import re
import smtplib
from channels.consumer import SyncConsumer
from django.conf import settings
from django.contrib.sites.models import Site
from django.core.mail import EmailMessage
from django.urls import reverse
from .models import Mail
logger = lo | gging.getLogger(__name__)
class MailConsumer(SyncConsumer):
def send_mail(self, message):
try:
logger.info('sending mail %s', message)
message.pop('type')
name = message.pop('n | ame', 'Enduroller')
content_type = message.pop('content_type', 'html')
message_id = message.pop('message_id')
#to = json.loads(message.pop('to'))
body = message.pop('body')
if content_type == 'html':
body = re.sub(
r'(https?://%s[^?"#]+)(\?([^#"]*))?(\#[^"]*)?' % Site.objects.get_current(),
r'\1?\3&message_id=%s\4' % message_id,
body
)
body += '<img src="https://%s/blank.gif?message_id=%s" width=1 height=1 />' % (
Site.objects.get_current(),
message_id,
)
mail = EmailMessage(
from_email='%s <%s>' % (name, settings.DEFAULT_FROM_EMAIL),
#to=to,
body=body,
headers={'Message-ID': message_id},
**message,
)
mail.content_subtype = content_type
try:
mail.send()
except smtplib.SMTPRecipientsRefused as e:
to = message['to']
logger.exception('error sending mail %s' % e.recipients[to[0]][1])
Mail.objects.filter(uid=message_id, destinataires=to).update(error=e.recipients[to[0]][1])
except Exception:
logger.exception('error sending mail')
|
pengzhangdev/slackbot | slackbot/plugins/ibotcloud.py | Python | mit | 6,471 | 0.001545 | import datetime
import hashlib
import copy
import httplib, urllib, urlparse
import collections
__author__ = 'peter.liu@xiaoi.com'
class AskParams:
def __init__(self, platform="", user_id="", url="", response_format="json"):
self.platform = platform
self.user_id = user_id
self.url = url
self.response_format = response_format
def __str__(self):
return "platform:" + self.platform + "\n" + \
"user_id:" + self.user_id + "\n" + \
"url:" + self.url + "\n" + \
"format:" + self.response_format
class AskSession:
def __init__(self, signature, params):
if not isinstance(signature, IBotSignature):
raise TypeError("signature should be IBotSignature")
if not isinstance(params, AskParams):
raise TypeError("params should be AskParams")
self.signature = copy.copy(signature)
self.params = copy.copy(params)
def get_answer(self, question):
http_params = urllib.urlencode({'question': question,
'format': self.params.response_format,
'platform': self.params.platform,
'userId': self.params.user_id})
xauth = self.signature.get_http_header_xauth()
http_headers = {"Content-type": "application/x-www-form-urlencoded",
"Accept": "text/plain",
xauth.keys()[0]: xauth.values()[0]}
url = urlparse.urlparse(self.params.url)
conn = httplib.HTTPConnection(url.netloc)
conn.request("POST", url.path, http_params, http_headers)
response = conn.getresponse()
ret = collections.namedtuple("get_answer_return", "http_status http_body")
ret.http_body = response.read()
ret.http_status = response.status
conn.close()
return ret
class RegParams:
def __init__(self, url=""):
self.aue = ""
self.txe = ""
self.auf = ""
self.url = url
self.setup_for_speex_wb()
def setup_for_speex_wb(self):
self.aue = "speex-wb;7"
self.txe = "utf-8"
self.auf = "audio/L16;rate=16000"
def setup_for_speex_nb(self):
self.aue = "speex-nb;7"
self.txe = "utf-8"
self.auf = "audio/L16;rate=16000"
def __str__(self):
return "aue:" + self.aue + "\n" + \
"txe:" + self.txe + "\n" + \
"auf:" + self.auf + "\n" + \
"url:" + self.url
class RegSession:
def __init__(self, signature, params):
if not isinstance(signature, IBotSignature):
raise TypeError("signature should be IBotSignature")
if not isinstance(params, RegParams):
raise TypeError("params should be RegParams")
self.signature = copy.copy(signature)
self.params = copy.copy(params)
def get_reg_result(self, speex_data):
xauth = self.signature.get_http_header_xauth()
http_headers = {"Content-type": "application/audio",
"Accept": "text/plain",
"X-AUE": self.params.aue,
"X-TXE": self.params.txe,
"X-AUF": self.params.auf,
xauth.keys()[0]: xauth.values()[0]}
url = urlparse.urlparse(self.params.url)
conn = httplib.HTTPConnection(url.netloc)
conn.request("POST", url.path, speex_data, http_headers)
response = conn.getresponse()
ret = collections.namedtuple("get_reg_result_return", "http_status http_body")
ret.http_body = response.read()
ret.http_status = response.status
conn.close()
return ret
class TTSParams(RegParams):
def __init__(self, url=""):
RegParams.__init__(self, url)
class TTSSession:
def __init__(self, signature, params):
if not isinstance(signature, IBotSignature):
raise TypeError("signature should be IBotSignature")
if not isinstance(params, TTSParams):
raise TypeError("params should be TTSParams")
self.signature = copy.copy(signature)
self.params = copy.copy(params)
def get_tts_result(self, tts_string):
xauth = self.signature.get_http_header_xauth()
http_headers = {"Content-type": "text/plain",
"X-AUE": self.params.aue,
"X-TXE": self.params.txe,
"X-AUF": self.params.auf,
xauth.keys()[0]: xauth.values()[0]}
url = urlparse.urlparse(self.params.url)
conn = httplib.HTTPConnection(url.netloc)
conn.request("POST", url.path, tts_string, http_headers)
response = conn.getresponse()
ret = collections.namedtuple("get_tts_result_return", "http_status http_body")
ret.http_body = response.read()
ret.http_status = response.status
conn.close()
return ret
class IBotSignature:
"""
It's about iBotCloud signature stuff
"""
def __init__(self, app_key, app_sec, uri, http_method="POST", realm="xiaoi.com"):
self.app_key = app_key
self.app_sec = app_sec
self.uri = uri
self.http_method = http_method.upper()
self.realm = realm
def get_signature(self):
time_str = str(datetime.datetime.now()) |
nonce = hashlib.sha1(time_str).hexdigest()
HA1 = "{0}:{1}:{2}".format(self.app_key, self.realm, self.app_sec)
HA1 = hashlib.sha1(HA1).hexdigest()
HA2 = "{0}:{1}".format(self.http_method, self.uri)
HA2 = hashlib.sha1(HA2).hexdigest()
signature = "{0}:{1}:{2}".format(HA1, nonce, HA2)
signature = hashlib.sha1(signature).hexdigest()
# print "signature:" + signature
# print "nonce:" + nonce
ret = collections.namedtuple("ge | t_signature_reture", "signature nonce")
ret.signature = signature
ret.nonce = nonce
return ret
def get_http_header_xauth(self):
ret_vals = self.get_signature()
ret = {'X-Auth': "app_key=\"{0}\",nonce=\"{1}\",signature=\"{2}\"".format(self.app_key,
ret_vals.nonce,
ret_vals.signature)}
return ret
|
TeamEOS/external_chromium_org | tools/perf/measurements/no_op.py | Python | bsd-3-clause | 373 | 0.008043 | # Copyright | 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry.page import page_measurement
class NoOp(page_measurement.PageMeasurement):
def __init__(self):
super(NoOp, self).__init__('RunNoOp')
def MeasurePage(self, page, tab, results) | :
pass
|
keras-team/keras | keras/engine/compile_utils_test.py | Python | apache-2.0 | 31,261 | 0.002399 | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for compile utitilies."""
import tensorflow.compat.v2 as tf
from keras import backend
from keras.testing_infra import test_combinations
from keras import losses as losses_mod
from keras import metrics as metrics_mod
from keras.engine import compile_utils
class LossesContainerTest(test_combinations.TestCase):
def test_single_loss(self):
loss_container = compile_utils.LossesContainer('mse')
y_t, y_p = tf.ones((10, 5)), tf.zeros((10, 5))
total_loss = loss_container(y_t, y_p)
self.assertTrue(loss_container._built)
self.assertLen(loss_container._losses, 1)
self.assertIsInstance(total_loss, tf.Tensor)
self.assertEqual(total_loss.numpy(), 1.)
self.assertLen(loss_container.metrics, 1)
loss_metric = loss_container.metrics[0]
self.assertEqual(loss_metric.name, 'loss')
self.assertEqual(loss_metric.result().numpy(), 1.)
loss_container.reset_state()
self.assertEqual(loss_metric.result().numpy(), 0.)
def test_loss_list(self):
loss_container = compile_utils.LossesContainer(['mse', 'mae'], [1, 0.5])
y_t = [tf.ones((10, 1)), tf.zeros((10, 1))]
y_p = [tf.ones((10, 1)), tf.ones((10, 1))]
sw = tf.convert_to_tensor([0, 0, 0, 0, 0, 1, 1, 1, 1, 1])
total_loss = loss_container(y_t, y_p, sample_weight=sw)
self.assertEqual(loss_container._output_names, ['output_1', 'output_2'])
self.assertLen(loss_container._losses, 2)
self.assertEqual(total_loss.numpy(), 0.25)
loss_metric = loss_container.metrics[0]
self.assertEqual(loss_metric.name, 'loss')
self.assertEqual(loss_metric.result().numpy(), 0.25)
output_1_metric = loss_container.metrics[1]
self.assertEqual(output_1_metric.name, 'output_1_loss')
self.assertEqual(output_1_metric.result().numpy(), 0)
output_2_metric = loss_container.metrics[2]
self.assertEqual(output_2_metric.name, 'output_2_loss')
self.assertEqual(output_2_metric.result().numpy(), 0.5)
loss_container.reset_state()
self.assertEqual(loss_metric.result().numpy(), 0)
self.assertEqual(output_1_metric.result().numpy(), 0)
self.assertEqual(output_2_metric.result().numpy(), 0)
def test_loss_dict(self):
loss_container = compile_utils.LossesContainer(
{
'out1': 'mse',
'out2': 'mae'
}, {
'out1': 1,
'out2': 0.5
})
y_t = {'out1': tf.ones((10, 1)), 'out2': tf.zeros((10, 1))}
y_p = {'out1': tf.ones((10, 1)), 'out2': tf.ones((10, 1))}
sw = tf.convert_to_tensor([0, 0, 0, 0, 0, 1, 1, 1, 1, 1])
total_loss = loss_container(y_t, y_p, sample_weight=sw)
self.assertLen(loss_container._losses, 2)
self.assertIsInstance(total_loss, tf.Tensor)
self.assertEqual(total_loss.numpy(), 0.25)
self.assertLen(loss_container.metrics, 3)
loss_metric = loss_container.metrics[0]
self.assertEqual(loss_metric.name, 'loss')
self.assertEqual(loss_metric.result().numpy(), 0.25)
out1_metric = loss_container.metrics[1]
self.assertEqual(out1_metric.name, 'out1_loss')
self.assertEqual(out1_metric.result().numpy(), 0)
out2_metric = loss_container.metrics[2]
self.assertEqual(out2_metric.name, 'out2_loss')
self.assertEqual(out2_metric.result().numpy(), 0.5)
loss_container.reset_state()
self.assertEqual(loss_metric.result().numpy(), 0)
self.assertEqual(out1_metric.result().numpy(), 0)
self.assertEqual(out2_metric.result().numpy(), 0)
def test_loss_partial_dict_with_output_names(self):
loss_container = compile_utils.LossesContainer(
{'out2': 'mae'}, {'out2': 1.}, output_names=['out1', 'out2'])
y_t = [tf.ones((10, 1)), tf.zeros((10, 1))]
y_p = [tf.ones((10, 1)), tf.ones((10, 1))]
sw = tf.convert_to_tensor([0, 0, 0, 0, 0, 1, 1, 1, 1, 1])
total_loss = loss_container(y_t, y_p, sample_weight=sw)
self.assertEqual(total_loss.numpy(), 0.5)
self.assertLen(loss_container.metrics, 2)
loss_metric = loss_container.metrics[0]
self.assertEqual(loss_metric.name, 'loss')
self.assertEqual(loss_metric.result().numpy(), 0.5)
out2_metric = loss_container.metrics[1]
self.assertEqual(out2_metric.name, 'out2_loss')
self.assertEqual(out2_metric.result().numpy(), 0.5)
def test_loss_dict_with_nones(self):
loss_container = compile_utils.LossesContainer({
'out1': None,
'out2': 'mae'
})
y_t = {'out1': tf.ones((10, 1)), 'out2': tf.zeros((10, 1))}
y_p = {'out1': tf.ones((10, 1)), 'out2': tf.ones((10, 1))}
sw = tf.convert_to_tensor([0, 0, 0, 0, 0, 1, 1, 1, 1, 1])
total_loss = loss_container(y_t, y_p, sample_weight=sw)
self.assertIsInstance(total_loss, tf.Tensor)
self.assertEqual(total_loss.numpy(), 0.5)
self.assertLen(loss_container.metrics, 2)
loss_metric = loss_container.metrics[0]
self.assertEqual(loss_metric.name, 'loss')
self.assertEqual(loss_metric.result().numpy(), 0.5)
out2_metric = loss_container.metrics[1]
self.assertEqual(out2_metric.name, 'out2_loss')
self.assertEqual(out2_metric.result().numpy(), 0.5)
def test_nested_structure(self):
loss_container = compile_utils.LossesContainer(
{
'b': ['mse', None],
'a': 'mae'
}, loss_weights={
'b': [0.5, 0],
'a': 1
})
y_t = {
'b': [tf.ones((10, 1)),
tf.zeros((10, 1))],
'a': tf.zeros((10, 1))
}
y_p = {
'b': [tf.zeros((10, 1)),
tf.zeros((10, 1))],
'a': tf.ones((10, 1))
}
sw = tf.convert_to_tensor([0, 0, 0, 0, 0, 1, 1, 1, 1, 1])
total_loss = loss_container(y_t, y_p, sample_weight=sw)
self.assertIsInstance(total_loss, tf.Tensor)
self.assertEqual(total_loss.numpy(), 0.75)
self.assertLen(loss_container.metrics, 3)
loss_metric = loss_container.metrics[0]
self.assertEqual(loss_metric.name, 'loss')
self.assertEqual(loss_metric.result().numpy(), 0.75)
a_metric = loss_container.metrics[1]
self.assertEqual(a_metric.name, 'a_loss')
self.assertEqual(a_metric.result().numpy(), 0.5)
b_1_metric = loss_container.metrics[2]
self.assertEqual(b_1_metric.name, 'b_1_loss')
self.assertEqual(b_1_metric.result().numpy(), 0.5)
def test_broadcast_single_loss(self):
loss_container = compile_utils.LossesContainer('mse')
y_t = [tf.ones((10, 1)), tf.zeros((10, 1))]
y_p = [tf.ones((10, 1)), tf.ones((10, 1))]
sw = tf.convert_to_tensor([0, 0, 0, 0, 0, 1, 1, 1, 1, 1])
total_loss = loss_container(y_t, y_p, sample_weight=sw)
self.assertEqual(total_loss.numpy(), 0.5)
self.assertLen(loss_container.metrics, 3)
loss_metric = loss_container.metrics[0]
self.assertEqual(loss_metric.name, 'loss')
self.assertEqual(loss_metric.result().numpy(), 0.5)
output_1_metric = loss_container.metrics[1]
self.assertEqual(output_1_metric.name, 'output_1_loss')
self.assertEqual(output_1_metric.result().numpy(), 0.)
output_2_metric = loss_container.metrics[2]
self.as | sertEqual(output_2_metric.name, 'output_2_loss')
self.assertEqual(output_2_metric.result().numpy(), 0.5)
def test_missing_label_with_no_loss(self):
# It's ok to exclude a | label if that label has no
# losses or metrics associated with it.
loss_container = compile_utils.LossesContainer({
'output1': 'mse',
'output3': 'mae'
})
y_p = {
'output1': tf.convert_to_tensor([[0], [1], [2]]),
'ou |
screepers/screeps-stats | screeps_etl/settings.py | Python | mit | 743 | 0.002692 | import os
from os.path import expanduser
import sys
import yaml
def getSettings():
if not getSettings.settings:
cwd = os.getcwd()
| path = cwd + '/.settings.yaml'
if not os.path.isfile(path):
path = cwd + '/.screeps_settings.yaml'
if not os.path.isfile(path):
path = expanduser('~') + '/.screeps_settings.yaml'
if not os.path.isfile(path):
path = '/vagran | t/.screeps_settings.yaml'
if not os.path.isfile(path):
print 'no settings file found'
sys.exit(-1)
return False
with open(path, 'r') as f:
getSettings.settings = yaml.load(f)
return getSettings.settings
getSettings.settings = False
|
matikbird/matikbird.github.io | portfolio/quay/back_end/payments2/mercadopago/api-mercadopago-master/templates/code-examples-master/mp-checkout/shipping/python/ipn_merchant_order.py | Python | mit | 824 | 0.009709 | # coding: UTF-8
import os, sys
import mercadopago
def index(req, **kwargs):
mp = mercadopago.MP("CLIENT_ID", "CLIENT_SECRET")
topic = kwargs["topic"]
merchant_order_info = None
if topic == "payment"
payment_info = mp.get("/collections/notifications/"+kwargs["id"])
merchant_order_info = mp.get("/merchant_or | ders/"+payment_info["response"]["collection"]["merchant_order_id"])
elif topic == "merchant_order"
merchant_order_info = mp.get("/merchant_orders/"+kwargs["id"])
if merchant_order_info == None
raise ValueError("Error obtaining the merchant_order")
if merchant_order_info["status"] == 200
return {
"payment": merchant_order_info["response"]["payments"],
| "shipment": merchant_order_info["response"]["shipments"]
}
|
harshilasu/LinkurApp | y/google-cloud-sdk/lib/googlecloudapis/apitools/base/py/cli.py | Python | gpl-3.0 | 432 | 0 | """Top-level import for all CLI-related functionality in apitools.
Note that i | mporting this file will ultimately have side-effects, and
may require imports not available in all environments (such as App
Engine). In particular, picking up some readline-related imports can
cause pain.
"""
# pylint:disable=wildcard-import
from googlecloudapis.apitools.base.py.app2 import *
from | googlecloudapis.apitools.base.py.base_cli import *
|
jesuscript/topo-mpi | topo/analysis/vision.py | Python | bsd-3-clause | 11,628 | 0.020296 | """
Vision-specific analysis functions.
$Id: featureresponses.py 7714 2008-01-24 16:42:21Z antolikjan $
"""
__version__='$Revision: 7714 $'
from math import fmod,floor,pi,sin,cos,sqrt
import numpy
from numpy.oldnumeric import Float
from numpy import zeros, array, size, empty, object_
#import scipy
try:
import pylab
except ImportError:
print "Warning: Could not import matplotlib; pylab plots will not work."
import param
import topo
from topo.base.cf import CFSheet
from topo.base.sheetview import SheetView
from topo.misc.filepath import normalize_path
from topo.misc.numbergenerator import UniformRandom
from topo.plotting.plotgroup import create_plotgroup, plotgroups
from topo.command.analysis import measure_sine_pref
max_value = 0
global_index = ()
def _complexity_rec(x,y,index,depth,fm):
"""
Recurrent helper function for complexity()
"""
global max_value
global global_index
if depth<size(fm.features):
for i in range(size(fm.features[depth].values)):
_complexity_rec(x,y,index + (i,),depth+1,fm)
else:
if max_value < fm.full_matrix[index][x][y]:
global_index = index
max_value = fm.full_matrix[index][x][y]
def complexity(full_matrix):
global global_index
global max_value
"""This function expects as an input a object of type FullMatrix which contains
responses of all neurons in a sheet to stimuly with different varying parameter values.
One of these parameters (features) has to be phase. In such case it computes the classic
modulation ratio (see Hawken et al. for definition) for each neuron and returns them as a matrix.
"""
rows,cols = full_matrix.matrix_shape
complexity = zeros(full_matrix.matrix_shape)
complex_matrix = zeros(full_matrix.matrix_shape,object_)
fftmeasure = zeros(full_matrix.matrix_shape,Float)
i = 0
for f in full_matrix.features:
if f.name == "phase":
phase_index = i
break
i=i+1
sum = 0.0
res = 0.0
average = 0.0
for x in range(rows):
for y in range(cols):
complex_matrix[x,y] = []#
max_value=-0.01
global_index = ()
_complexity_rec(x,y,(),0,full_matrix)
#compute the sum of the responses over phases given the found index of highest response
iindex = array(global_index)
sum = 0.0
for i in range(size(full_matrix.features[phase_index].values)):
iindex[phase_index] = i
sum = sum + full_matrix.full_matrix[tuple(iindex.tolist())][x][y]
#average
average = sum / float(size(full_matrix.features[phase_index].values))
res = 0.0
#compute the sum of absolute values of the responses minus average
for i in range(size(full_matrix.features[phase_index].values)):
iindex[phase_index] = i
res = res + abs(full_matrix.full_matrix[tuple(iindex.tolist())][x][y] - average)
complex_matrix[x,y] = complex_matrix[x,y] + [full_matrix.full_matrix[tuple(iindex.tolist())][x][y]]
#this is taking away the DC component
#complex_matrix[x,y] -= numpy.min(complex_matrix[x,y])
if x==15 and y==15:
pylab.figure()
pylab.plot(complex_matrix[x,y])
if x==26 and y==26:
pylab.figure()
pylab.plot(complex_matrix[x,y])
#complexity[x,y] = res / (2*sum)
fft = numpy.fft.fft(complex_matrix[x,y]+complex_matrix[x,y]+complex_matrix[x,y]+complex_matrix[x,y],2048)
first_har = 2048/len(complex_matrix[0,0])
if abs(fft[0]) != 0:
fftmeasure[x,y] = 2 *abs(fft[first_har]) /abs(fft[0])
else:
fftmeasure[x,y] = 0
return fftmeasure
def compute_ACDC_orientation_tuning_curves(full_matrix,curve_label,sheet):
""" This function allows and alternative computation of orientation tuning curve where
for each given orientation the response is computed as a maximum of AC or DC component
across the phases instead of the maximum used as a standard in Topographica"""
# this method assumes that only single frequency has been used
i = 0
for f in full_matrix.features:
if f.name == "phase":
phase_index = i
if f.name == "orientation":
orientation_index = i
if f.name == "frequency":
frequency_index = i
i=i+1
print sheet.curve_dict
if not sheet.curve_dict.has_key("orientationACDC"):
sheet.curve_dict["orientationACDC"]={}
sheet.curve_dict["orientationACDC"][curve_label]={}
rows,cols = full_matrix.matrix_shape
for o in xrange(size(full_matrix.features[orientation_index].values)):
s_w = zeros(full_matrix.matrix_shape)
for x in range(rows):
for y in range(cols):
or_response=[]
for p in xrange(size(full_matrix.features[phase_index].values)):
index = [0,0,0]
index[phase_index] = p
index[orientation_index] = o
index[frequency_index] = 0
or_response.append(full_matrix.full_matrix[tuple(index)][x][y])
fft = numpy.fft.fft(or_response+or_response+or_response+or_response,2048)
first_har = 2048/len(or_response)
s_w[x][y] = numpy.maximum(2 *abs(fft[first_har]),abs(fft[0]))
s = SheetView((s_w,sheet.bounds), sheet.name , sheet.precedence, topo.sim.time(),sheet.row_precedence)
sheet.curve_dict["orientationACDC"][curve_label].update({full_matrix.features[orientation_index].values[o]:s})
def phase_preference_scatter_plot(sheet_name,diameter=0.39):
r = UniformRandom(seed=1023)
preference_map = topo.sim[sheet_name].sheet_views['PhasePreference']
offset_magnitude = 0.03
datax = []
datay = []
(v,bb) = preference_map.view()
for z in zeros(66):
x = (r() - 0.5)*2*diameter
y = (r() - 0.5)*2*diameter
rand = r()
xoff = sin(rand*2*pi)*offset_magnitude
yoff = cos(rand*2*pi)*offset_magnitude
xx = max(min(x+xoff,diameter),-diameter)
yy = max(min(y+yoff,diameter),-diameter)
x = max(min(x,diameter),-diameter)
y = max(min(y,diameter),-diameter)
[xc1,yc1] = topo.sim[sheet_name].sheet2matrixidx(xx,yy)
[xc2,yc2] = topo.sim[sheet_name].sheet2matrixidx(x,y)
if((xc1==xc2) & (yc1==yc2)): continue
datax = datax + [v[xc1,yc1]]
datay = datay + [v[xc2,yc2]]
for i in range(0,len(datax)):
datax[i] = datax[i] * 360
datay[i] = datay[i] * 360
if(datay[i] > datax[i] + 180): datay[i]= datay[i]- 360
if((datax[i] > 180) & (da | tay[i]> 180)): datax[i] = datax[i] - 360; datay[i] = datay[i] - 360
if((datax[i] > 180) & (datay[i] < (datax[i]-180))): datax[i] = datax[i] - 360; #datay[i] = datay[i] - 360
|
f = pylab.figure()
ax = f.add_subplot(111, aspect='equal')
pylab.plot(datax,datay,'ro')
pylab.plot([0,360],[-180,180])
pylab.plot([-180,180],[0,360])
pylab.plot([-180,-180],[360,360])
ax.axis([-180,360,-180,360])
pylab.xticks([-180,0,180,360], [-180,0,180,360])
pylab.yticks([-180,0,180,360], [-180,0,180,360])
pylab.grid()
pylab.savefig(normalize_path(str(topo.sim.timestr()) + sheet_name + "_scatter.png"))
###############################################################################
# JABALERT: Should we move this plot and command to analysis.py or
# pylabplots.py, where all the rest are?
#
# In any case, it requires generalization; it should not be hardcoded
# to any particular map name, and should just do the right thing for
# most networks for which it makes sense. E.g. it already measures
# the ComplexSelectivity for all measured_sheets, but then
# plot_modulation_ratio only |
pari685/AStream | dist/client/dash_client.py | Python | mit | 23,767 | 0.00446 | #!/usr/local/bin/python
"""
Author: Parikshit Juluri
Contact: pjuluri@umkc.edu
Testing:
import dash_client
mpd_file = <MPD_FILE>
dash_client.playback_duration(mpd_file, 'http://198.248.242.16:8005/')
From commandline:
python dash_client.py -m "http://198.248.242.16:8006/media/mpd/x4ukwHdACDw.mpd" -p "all"
python dash_client.py -m "http://127.0.0.1:8000/media/mpd/x4ukwHdACDw.mpd" -p "basic"
"""
from __future__ import division
import read_mpd
import urlparse
import urllib2
import random
import os
import sys
import errno
import timeit
import httplib
from string import ascii_letters, digits
from argparse import ArgumentParser
from multiprocessing import Process, Queue
from collections import defaultdict
from adaptation import basic_dash, basic_dash2, weighted_dash, netflix_dash
from adaptation.adaptation import WeightedMean
import config_dash
import dash_buffer
from configure_log_file import configure_log_file, write_json
import time
try:
WindowsError
except NameError:
from shutil import WindowsError
# Constants
DEFAULT_PLAYBACK = 'BASIC'
DOWNLOAD_CHUNK = 1024
# Globals for arg parser with the default values
# Not sure if this is the correct way ....
MPD = None
LIST = False
PLAYBACK = DEFAULT_PLAYBACK
DOWNLOAD = False
SEGMENT_LIMIT = None
class DashPlayback:
"""
Audio[bandwidth] : {duration, url_list}
Video[bandwidth] : {duration, url_list}
"""
def __init__(self):
self.min_buffer_time = None
self.playback_duration = None
self.audio = dict()
self.video = dict()
def get_mpd(url):
""" Module to download the MPD from the URL and save it to file"""
print url
try:
connection = urllib2.urlopen(url, timeout=10)
except urllib2.HTTPError, error:
config_dash.LOG.error("Unable to dow | nload MPD file HTTP Error: %s" % error.code)
return None
except urllib2.URLError:
error_message = "URLError. Unable to reach Server.Check if Server active"
config_dash.LOG.error(error_message)
print error_message
return None
except IOError, httplib.HTTPException:
| message = "Unable to , file_identifierdownload MPD file HTTP Error."
config_dash.LOG.error(message)
return None
mpd_data = connection.read()
connection.close()
mpd_file = url.split('/')[-1]
mpd_file_handle = open(mpd_file, 'w')
mpd_file_handle.write(mpd_data)
mpd_file_handle.close()
config_dash.LOG.info("Downloaded the MPD file {}".format(mpd_file))
return mpd_file
def get_bandwidth(data, duration):
""" Module to determine the bandwidth for a segment
download"""
return data * 8/duration
def get_domain_name(url):
""" Module to obtain the domain name from the URL
From : http://stackoverflow.com/questions/9626535/get-domain-name-from-url
"""
parsed_uri = urlparse.urlparse(url)
domain = '{uri.scheme}://{uri.netloc}/'.format(uri=parsed_uri)
return domain
def id_generator(id_size=6):
""" Module to create a random string with uppercase
and digits.
"""
return 'TEMP_' + ''.join(random.choice(ascii_letters+digits) for _ in range(id_size))
def download_segment(segment_url, dash_folder):
""" Module to download the segment """
try:
connection = urllib2.urlopen(segment_url)
except urllib2.HTTPError, error:
config_dash.LOG.error("Unable to download DASH Segment {} HTTP Error:{} ".format(segment_url, str(error.code)))
return None
parsed_uri = urlparse.urlparse(segment_url)
segment_path = '{uri.path}'.format(uri=parsed_uri)
while segment_path.startswith('/'):
segment_path = segment_path[1:]
segment_filename = os.path.join(dash_folder, os.path.basename(segment_path))
make_sure_path_exists(os.path.dirname(segment_filename))
segment_file_handle = open(segment_filename, 'wb')
segment_size = 0
while True:
segment_data = connection.read(DOWNLOAD_CHUNK)
segment_size += len(segment_data)
segment_file_handle.write(segment_data)
if len(segment_data) < DOWNLOAD_CHUNK:
break
connection.close()
segment_file_handle.close()
#print "segment size = {}".format(segment_size)
#print "segment filename = {}".format(segment_filename)
return segment_size, segment_filename
def get_media_all(domain, media_info, file_identifier, done_queue):
""" Download the media from the list of URL's in media
"""
bandwidth, media_dict = media_info
media = media_dict[bandwidth]
media_start_time = timeit.default_timer()
for segment in [media.initialization] + media.url_list:
start_time = timeit.default_timer()
segment_url = urlparse.urljoin(domain, segment)
_, segment_file = download_segment(segment_url, file_identifier)
elapsed = timeit.default_timer() - start_time
if segment_file:
done_queue.put((bandwidth, segment_url, elapsed))
media_download_time = timeit.default_timer() - media_start_time
done_queue.put((bandwidth, 'STOP', media_download_time))
return None
def make_sure_path_exists(path):
""" Module to make sure the path exists if not create it
"""
try:
os.makedirs(path)
except OSError as exception:
if exception.errno != errno.EEXIST:
raise
def print_representations(dp_object):
""" Module to print the representations"""
print "The DASH media has the following video representations/bitrates"
for bandwidth in dp_object.video:
print bandwidth
def start_playback_smart(dp_object, domain, playback_type=None, download=False, video_segment_duration=None):
""" Module that downloads the MPD-FIle and download
all the representations of the Module to download
the MPEG-DASH media.
Example: start_playback_smart(dp_object, domain, "SMART", DOWNLOAD, video_segment_duration)
:param dp_object: The DASH-playback object
:param domain: The domain name of the server (The segment URLS are domain + relative_address)
:param playback_type: The type of playback
1. 'BASIC' - The basic adapataion scheme
2. 'SARA' - Segment Aware Rate Adaptation
3. 'NETFLIX' - Buffer based adaptation used by Netflix
:param download: Set to True if the segments are to be stored locally (Boolean). Default False
:param video_segment_duration: Playback duratoin of each segment
:return:
"""
# Initialize the DASH buffer
dash_player = dash_buffer.DashPlayer(dp_object.playback_duration, video_segment_duration)
dash_player.start()
# A folder to save the segments in
file_identifier = id_generator()
config_dash.LOG.info("The segments are stored in %s" % file_identifier)
dp_list = defaultdict(defaultdict)
# Creating a Dictionary of all that has the URLs for each segment and different bitrates
for bitrate in dp_object.video:
# Getting the URL list for each bitrate
dp_object.video[bitrate] = read_mpd.get_url_list(dp_object.video[bitrate], video_segment_duration,
dp_object.playback_duration, bitrate)
if "$Bandwidth$" in dp_object.video[bitrate].initialization:
dp_object.video[bitrate].initialization = dp_object.video[bitrate].initialization.replace(
"$Bandwidth$", str(bitrate))
media_urls = [dp_object.video[bitrate].initialization] + dp_object.video[bitrate].url_list
#print "media urls"
#print media_urls
for segment_count, segment_url in enumerate(media_urls, dp_object.video[bitrate].start):
# segment_duration = dp_object.video[bitrate].segment_duration
#print "segment url"
#print segment_url
dp_list[segment_count][bitrate] = segment_url
bitrates = dp_object.video.keys()
bitrates.sort()
average_dwn_time = 0
segment_files = []
# For basic adaptation
previous_se |
qspin/qtaste | doc/src/docbkx/scripts/config.py | Python | lgpl-3.0 | 281 | 0.003559 |
AppName = "Qtaste"
GitHubRepositoryUrl = "qspin/q | taste"
ReleaseNotesTemplate = "./qtaste_release_notes_template.xml"
ReleaseNotesOutputFile = "../qtaste_release_notes.xml"
# Version Info (generated by ma | ven)
VersionFile = "../../../../Version.txt"
VersionTag = "qtaste-version"
|
hoelsner/product-database | app/productdb/migrations/0025_auto_20170109_2017.py | Python | mit | 579 | 0.001727 | # -*- coding: utf-8 -*-
# Generated by Django 1.9.11 on 2017-01-09 19:17
from __future__ import unicode_li | terals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('productdb', '0024_auto_20161227_1015'),
]
operations = [
migrations.AlterFie | ld(
model_name='productcheck',
name='input_product_ids',
field=models.CharField(help_text='unordered Product IDs, separated by line breaks or semicolon', max_length=65536, verbose_name='Product ID list'),
),
]
|
daanwierstra/pybrain | pybrain/rl/environments/serverInterface.py | Python | bsd-3-clause | 1,099 | 0.009099 | __author__ = 'Frank Sehnke, sehnke@in.tum.de'
from environment import Environment
class GraphicalEnvironment(Environment):
""" Special type of environment that has graphical output and therefore needs a renderer.
"""
def __init__(self):
self. | renderInterface = None
def setRenderInterface(self, renderer):
""" set the renderer, which is an object of or inherited from class Renderer.
@param renderer: The renderer that should display the Environment
@type renderer: L{Renderer}
@see Renderer
"""
self.renderInterface = renderer
def getRenderInterface(self):
""" returns the current renderer.
@return: the current renderer
| @rtype: L{Renderer}
"""
return self.renderInterface
def hasRenderInterface(self):
""" tells you, if a Renderer has been set previously or not
@return: True if a renderer was set, False otherwise
@rtype: Boolean
"""
return (self.getRenderInterface() != None)
|
allan-simon/tatoSSO | tools/config.py | Python | mit | 1,959 | 0.015314 |
# where the application code will be generated
# relative to the "tools" directory
APP_ROOT = "../app"
# this represent the logical structure of your code
# the script init will use this to generate a skeleton
# of code
ARCHITECTURE = {
'controllers' : {
'Users' : {
| 'description': 'module to centralize user related actions',
'methods' : {
'show_all': {}
},
'forms' : {
'register_new' : {},
'login' : {}
},
'actions_only' : {
'logout': {},
'delete_by_id': {}
}
},
'Tokens' : {
| 'description': 'module to centralize action related to token grated to user on services',
'methods' : {
'check_token': {}
},
'forms': {
'external_login' : {}
},
'actions_only' : {
'kick_user': {}
}
},
'Admins' : {
'description': 'module to centralize administrator actions',
'forms' : {
'reset_password': {}
}
}
},
'models': {
'Tokens' : {},
'Tickets' : {}
},
'models_controllers': [
('Tickets','Users'),
('Tokens','Tokens'),
('Tickets','Tokens'),
]
}
# placeholders, replace the value by those specific to your
# project
REPLACEMENTS = {
'@AUTHOR@' : 'Allan SIMON',
'@EMAIL@': 'allan.simon@supinfo.com',
'@PROJECT_NAME_CODE@' : 'TatoSSO',
'@PROJECT_NAME_HUMAN@': 'TatoSSO',
'@PROJECT_NS@': 'tatosso',
'@MAIN_CLASS@' : 'TatoSSO',
'@MAIN_CLASS_HEADER@' : 'TATO_SSO',
'@DESCRIPTION@' : 'Single Sign On (SSO) system',
'@PROJECT_WEBSITE' : 'https://github.com/allan-simon/tatosso',
'@YEARS@' : '2014',
' ' : ' ' #your prefered indentation, replace the second one
}
|
VirusTotal/content | Packs/MajorBreachesInvestigationandResponse/Scripts/RapidBreachResponseEradicationTasksCountWidget/RapidBreachResponseEradicationTasksCountWidget.py | Python | mit | 978 | 0.003067 | import demistomock as demisto # noqa: F401
from CommonServerPython import * # noqa: F401
ORANGE_HTML_STYLE = "'color:#EF9700;font-size:48px;padding: 60px; text-align:center;padding-left: 70px'>"
GREEN_HTML_STYLE = "'color:#1DB846;font-size:48px;padding: 60px; text-align:center;padding-left: 70px'>"
GREY_ | HTML_STYLE = "'color:#404142;font-size:48px;padding: 60px; text-align:center;padding-left: 70px'>"
def main():
incident = demisto.incidents()
query = incident[0].get('CustomFields', {}).get('eradicationtaskcount', 0)
if not query:
html = f"<div style={GREY_HTML_STYLE}{0}</div>"
elif int(query) == 0:
html = f"<div style={ORANGE_HTML_STYLE | }{str(query)}</div>"
else:
html = f"<div style={GREEN_HTML_STYLE}{str(query)}</div>"
demisto.results({
'ContentsFormat': formats['html'],
'Type': entryTypes['note'],
'Contents': html
})
if __name__ in ["__main__", "builtin", "builtins"]:
main()
|
jmesteve/saas3 | openerp/addons/base/ir/__init__.py | Python | agpl-3.0 | 1,426 | 0.000701 | # -*- coding: utf-8 -*-
####################################################################### | #######
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This p | rogram is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import ir_model
import ir_sequence
import ir_needaction
import ir_ui_menu
import ir_ui_view
import ir_default
import ir_actions
import ir_attachment
import ir_cron
import ir_filters
import ir_values
import ir_translation
import ir_exports
import ir_rule
import ir_config_parameter
import osv_memory_autovacuum
import ir_mail_server
import ir_fields
import ir_qweb
import ir_http
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
tmaiwald/OSIM | OSIM/Modeling/Testbenches/DepletionCharge_TB.py | Python | bsd-2-clause | 1,541 | 0.038287 | import numpy as np
from OSIM.Modeling.Components.NPN_Vertical_Bipolar_Intercompany_Model.VBIC_Charges.VBIC_DepletionCharge import VBIC_DepletionCharge
from OSIM.Modeling.Components.Resistor import Resistor
from OSIM.Modeling.Components.VoltageSource import VoltageSource
from OSIM.Modeling.Components.Capacity import Capacity
from OSIM.Modeling.CircuitSystemEquations import CircuitSystemEquations
from OSIM.Simulation.CircuitAnalysis.CircuitAnalyser import CircuitAnalyser
#QJBE
vbic_cje_mm = 1
vbic_cje = 1
CJx = 9.7E-15*(1*0.25)**0.95*1*(1+(1-1)/np.sqrt(1)) # CJE#9.7E-15*(Nx*0.25)**0.95*vbic_cje*(1+(vbic_cje_mm-1)/np.sqrt(Nx)) # CJE
P = 0.9 # PE
M = 0.105 # ME
AJ = -0.5 # AJE
WBx = 1 # WBE
F = 0.97 # FC
gnd = '0'
sigi | n = '1'
sigout = '2'
ik = '3'
vsource = VoltageSource([gnd,sigin],"V1",0,None,paramdict={'FUNC':'SIN','F':'1e11', 'DC':'0', 'AC':'1', 'P':'180'})
r1 = Resistor([sigin,sigout],"R1",0.00001,None)
c = VBIC_DepletionCharge([sigout, ik],"QJ", CJx, None, paramdict= {'P':P,'M':M,'F':F,'AJ':AJ,'FAK':WBx})
cref = Capacity([sigout, ik],"CR",0.35e-14, None)
r2 = Resistor([ik,gnd],"R2",1000,None)
TBSys = CircuitSystemEquati | ons([vsource,r1,c,r2,cref])
ca = CircuitAnalyser(TBSys)
volts = [x/100 for x in range(-200,200)]
res = np.zeros((2,len(volts)),dtype= np.float64)
#for vidx,v in enumerate(volts):
# vsource.changeMyVoltageInSys(v)
# ca.calcDCOperatingPoint()
# res[0][vidx] = v
# res[1][vidx] = c.getCharge()
#ca.plot_lin([res,["Q"],"Ladung"])
ca.plot_lin(ca.getTrans(0 ,4e-11,1e-14,['CR','QJ']))
|
fcwu/desktop-mirror | lib/common.py | Python | apache-2.0 | 123 | 0 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
APPNA | ME = 'desktop-mirror'
DEF | AULT_PORT = 47767
VERSION = 'v0.8-7-g2038d52'
|
GhostshipSoftware/avaloria | contrib/menu_login.py | Python | bsd-3-clause | 13,491 | 0.002224 | """
Menu-driven login system
Contribution - Griatch 2011
This is an alternative login system for Evennia, using the
contrib.menusystem module. As opposed to the default syste | m it doesn't
use emails for authentication and also don't auto-creates a Character
with the same name as the Player (ins | tead assuming some sort of
character-creation to come next).
Install is simple:
To your settings file, add/edit the line:
CMDSET_UNLOGGEDIN = "contrib.menu_login.UnloggedInCmdSet"
That's it. Reload the server and try to log in to see it.
The initial login "graphic" is taken from strings in the module given
by settings.CONNECTION_SCREEN_MODULE. You will want to copy the
template file in game/gamesrc/conf/examples up one level and re-point
the settings file to this custom module. you can then edit the string
in that module (at least comment out the default string that mentions
commands that are not available) and add something more suitable for
the initial splash screen.
"""
import re
import traceback
from django.conf import settings
from ev import managers
from ev import utils, logger, create_player
from ev import Command, CmdSet
from ev import syscmdkeys
from src.server.models import ServerConfig
from contrib.menusystem import MenuNode, MenuTree
CMD_LOGINSTART = syscmdkeys.CMD_LOGINSTART
CMD_NOINPUT = syscmdkeys.CMD_NOINPUT
CMD_NOMATCH = syscmdkeys.CMD_NOMATCH
CONNECTION_SCREEN_MODULE = settings.CONNECTION_SCREEN_MODULE
# Commands run on the unloggedin screen. Note that this is not using
# settings.UNLOGGEDIN_CMDSET but the menu system, which is why some are
# named for the numbers in the menu.
#
# Also note that the menu system will automatically assign all
# commands used in its structure a property "menutree" holding a reference
# back to the menutree. This allows the commands to do direct manipulation
# for example by triggering a conditional jump to another node.
#
# Menu entry 1a - Entering a Username
class CmdBackToStart(Command):
"""
Step back to node0
"""
key = CMD_NOINPUT
locks = "cmd:all()"
def func(self):
"Execute the command"
self.menutree.goto("START")
class CmdUsernameSelect(Command):
"""
Handles the entering of a username and
checks if it exists.
"""
key = CMD_NOMATCH
locks = "cmd:all()"
def func(self):
"Execute the command"
player = managers.players.get_player_from_name(self.args)
if not player:
self.caller.msg("{rThis account name couldn't be found. Did you create it? If you did, make sure you spelled it right (case doesn't matter).{n")
self.menutree.goto("node1a")
else:
# store the player so next step can find it
self.menutree.player = player
self.caller.msg(echo=False)
self.menutree.goto("node1b")
# Menu entry 1b - Entering a Password
class CmdPasswordSelectBack(Command):
"""
Steps back from the Password selection
"""
key = CMD_NOINPUT
locks = "cmd:all()"
def func(self):
"Execute the command"
self.menutree.goto("node1a")
self.caller.msg(echo=True)
class CmdPasswordSelect(Command):
"""
Handles the entering of a password and logs into the game.
"""
key = CMD_NOMATCH
locks = "cmd:all()"
def func(self):
"Execute the command"
self.caller.msg(echo=True)
if not hasattr(self.menutree, "player"):
self.caller.msg("{rSomething went wrong! The player was not remembered from last step!{n")
self.menutree.goto("node1a")
return
player = self.menutree.player
if not player.check_password(self.args):
self.caller.msg("{rIncorrect password.{n")
self.menutree.goto("node1b")
return
# before going on, check eventual bans
bans = ServerConfig.objects.conf("server_bans")
if bans and (any(tup[0]==player.name.lower() for tup in bans)
or
any(tup[2].match(self.caller.address) for tup in bans if tup[2])):
# this is a banned IP or name!
string = "{rYou have been banned and cannot continue from here."
string += "\nIf you feel this ban is in error, please email an admin.{x"
self.caller.msg(string)
self.caller.sessionhandler.disconnect(self.caller, "Good bye! Disconnecting...")
return
# we are ok, log us in.
self.caller.msg("{gWelcome %s! Logging in ...{n" % player.key)
#self.caller.session_login(player)
self.caller.sessionhandler.login(self.caller, player)
# abort menu, do cleanup.
self.menutree.goto("END")
# we are logged in. Look around.
character = player.character
if character:
character.execute_cmd("look")
else:
# we have no character yet; use player's look, if it exists
player.execute_cmd("look")
# Menu entry 2a - Creating a Username
class CmdUsernameCreate(Command):
"""
Handle the creation of a valid username
"""
key = CMD_NOMATCH
locks = "cmd:all()"
def func(self):
"Execute the command"
playername = self.args
# sanity check on the name
if not re.findall('^[\w. @+-]+$', playername) or not (3 <= len(playername) <= 30):
self.caller.msg("\n\r {rAccount name should be between 3 and 30 characters. Letters, spaces, dig\
its and @/./+/-/_ only.{n") # this echoes the restrictions made by django's auth module.
self.menutree.goto("node2a")
return
if managers.players.get_player_from_name(playername):
self.caller.msg("\n\r {rAccount name %s already exists.{n" % playername)
self.menutree.goto("node2a")
return
# store the name for the next step
self.menutree.playername = playername
self.caller.msg(echo=False)
self.menutree.goto("node2b")
# Menu entry 2b - Creating a Password
class CmdPasswordCreateBack(Command):
"Step back from the password creation"
key = CMD_NOINPUT
locks = "cmd:all()"
def func(self):
"Execute the command"
self.caller.msg(echo=True)
self.menutree.goto("node2a")
class CmdPasswordCreate(Command):
"Handle the creation of a password. This also creates the actual Player/User object."
key = CMD_NOMATCH
locks = "cmd:all()"
def func(self):
"Execute the command"
password = self.args
self.caller.msg(echo=False)
if not hasattr(self.menutree, 'playername'):
self.caller.msg("{rSomething went wrong! Playername not remembered from previous step!{n")
self.menutree.goto("node2a")
return
playername = self.menutree.playername
if len(password) < 3:
# too short password
string = "{rYour password must be at least 3 characters or longer."
string += "\n\rFor best security, make it at least 8 characters "
string += "long, avoid making it a real word and mix numbers "
string += "into it.{n"
self.caller.msg(string)
self.menutree.goto("node2b")
return
# everything's ok. Create the new player account. Don't create
# a Character here.
try:
permissions = settings.PERMISSION_PLAYER_DEFAULT
typeclass = settings.BASE_PLAYER_TYPECLASS
new_player = create_player(playername, None, password,
typeclass=typeclass,
permissions=permissions)
if not new_player:
self.msg("There was an error creating the Player. This error was logged. Contact an admin.")
self.menutree.goto("START")
return
utils.init_new_player(new_player)
# join the new player to the public channel
pchanneldef = settings.CHANNEL_PUBLIC
if pchanneldef:
pchannel = managers.channels.get_channel(pchanneldef[0])
|
onyb/mooca | Udacity/UD032_Data_Wrangling_with_MongoDB/Lesson_4/23-Using_$in_Operator/find_cars.py | Python | mit | 1,151 | 0.008688 | #!/usr/bin/env python
""" Your task is to write a query that will return all cars manufactured by "Ford Motor Company"
that are assembled in Germany, United Kingdom, or Japan.
Please modify only 'in_query' function, as only that will be taken into account.
Your code will be run against a MongoDB instance that we have provided.
If you want to run this code locally on your machine,
you have to install MongoDB, download and insert the dataset.
For instructions related to MongoDB setup and datasets please see Course Materials.
"""
def get_db() | :
from pymongo import MongoClient
client = MongoClient('localhost:27017')
db = client.examples
return db
def in_query():
# Write the query
query = {
"manufacturer" : "Ford Motor Company",
"assembly": {"$in": ["Germany", "United Kingdom", "Japan"]}
}
return query
if __name__ == "__main__":
db = get_db()
query = in_query()
autos = db.autos.find(query, {"name":1, "manufacturer":1, "assembly": 1, "_id" | :0})
print "Found autos:", autos.count()
import pprint
for a in autos:
pprint.pprint(a)
|
loafbaker/django_ecommerce2 | orders/migrations/0002_useraddress.py | Python | mit | 1,035 | 0.002899 | # -*- coding: utf-8 -*-
# Generated by Django 1.9.4 on 2016-03-29 17:03
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('orders', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Us | erAddress',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('type', models.CharField(choices=[('billing', | 'Billing'), ('shipping', 'Shipping')], max_length=120)),
('street', models.CharField(max_length=120)),
('city', models.CharField(max_length=120)),
('state', models.CharField(max_length=120)),
('zipcode', models.CharField(max_length=120)),
('user_checkout', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='orders.UserCheckout')),
],
),
]
|
makerbot/conveyor | src/test/python/client.py | Python | agpl-3.0 | 1,877 | 0.001599 | # vim:ai:et:ff=unix:fileencoding=utf-8:sw=4:ts=4:
# conveyor/src/test/python/client.py
#
# conveyor - Printing dispatch engine for 3D objects and their friends.
# Copyright © 2012 Matthew W. Samsonoff <matthew.samsonoff@makerbot.c | om>
#
# This program is free software: you can redistribute it and/or modify it under
# the terms of the GNU Affero General Public License | as published by the Free
# Software Foundation, either version 3 of the License, or (at your option) any
# later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more
# details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, print_function, unicode_literals)
import argparse
import sys
import threading
import conveyor.address
import conveyor.connection
def _main(argv):
parser = argparse.ArgumentParser()
parser.add_argument('address', metavar='ADDRESS')
parsedargs = parser.parse_args(argv[1:])
address = conveyor.address.Address.parse(parsedargs.address)
connection = address.connect()
connection.write('hello from client')
def target():
while True:
data = sys.stdin.readline()
if '' == data:
break
else:
connection.write(data)
thread = threading.Thread(target=target)
thread.start()
while True:
data = connection.read()
if '' == data:
break
else:
print('data=%r' % (data,))
return 0
if '__main__' == __name__:
code = _main(sys.argv)
if None is code:
code = 0
sys.exit(code)
|
echanna/EdxNotAFork | lms/lib/comment_client/comment.py | Python | agpl-3.0 | 3,282 | 0.002133 | from .utils import CommentClientRequestError, perform_request
from .thread import Thread, _url_for_flag_abuse_thread, _url_for_unflag_abuse_thread
import models
import settings
class Comment(models.Model):
accessible_fields = [
'id', 'body', 'anonymous', 'anonymous_to_peers', 'course_id',
'endorsed', 'parent_id', 'thread_id', 'username', 'votes', 'user_id',
'closed', 'created_at', 'updated_at', 'depth', 'at_position_list',
'type', 'commentable_id', 'abuse_flaggers'
]
updatable_fields = [
'body', 'anonymous', 'anonymous_to_peers', 'course_id', 'closed',
'user_id', 'endorsed'
]
initializable_fields = updatable_fields
metrics_tag_fields = ['course_id', 'endorsed', 'closed']
base_url = "{prefix}/comments".format(prefix=settings.PREFIX)
type = 'comment'
@property
def thread(self):
return Thread(id=self.thread_id, type='thread')
@classmethod
def url_for_comments(cls, params={}):
if params.get('thread_id'):
return _url_for_thread_comments(params['thread_id'])
else:
return _url_for_comment(params['parent_id'])
@classmethod
def url(cls, action, params={}):
if action in ['post']:
return cls.url_for_comments(params)
else:
return super(Comment, cls).url(action, params)
def flagAbuse(self, user, voteable):
if voteable.type == 'thread':
url = _url_for_flag_abuse_thread(voteable.id)
elif voteable.type == 'comment':
url = _url_for_flag_abuse_comment(voteable.id)
else:
raise CommentClientRequestError("Can only flag/unflag threads or comments")
params = {'user_id': user.id}
request = perform_request(
'put',
url,
params,
metric_tags=self._metric_tags,
metric_action='comment.abuse.flagged'
)
voteable.update_attributes(request)
def unFlagAbuse(self, user, voteable, removeAll):
if voteable.type == 'thread':
url = _url_for_unflag_abuse_thread(voteable.id)
elif voteable.type == 'comment':
url = _url_for_unflag_abuse_comment(voteable.id)
else:
raise CommentClientRequestError("Can flag/unflag for threads or comments")
params = {'user_id': user.id}
if removeAll:
params['all'] = True
request = perform_request(
'put',
url,
params,
metric_tags=self._metric_tags,
metric_action='comment.abuse.unflagged'
)
voteable.update_attributes(request)
def _url_for_thread_comments(thread_id):
return "{prefix}/threads/{thread_id}/comments".format(prefix=settings.PREFIX, thread_id=thread_id)
def _url_for_comment(comment_id):
return "{prefix}/comments/{comment_id}".format(prefix=settings.PREFIX, comment_id=comment_id)
def _url_for_flag_abuse_comment(comment_id):
return "{prefix}/comments/{comment_id}/abuse_flag".format(prefix=settings.PREFIX, comment_id=comment_id)
def _url_for_unflag_abuse_comment(comment_id):
return "{prefix}/comments/{comment_id}/abuse_unflag".format(prefix=settings.PREFIX | , comment_id=c | omment_id)
|
ywcui1990/nupic | src/nupic/algorithms/monitor_mixin/temporal_memory_monitor_mixin.py | Python | agpl-3.0 | 14,320 | 0.007402 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2014, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Temporal Memory mixin that enables detailed monitoring of history.
"""
import copy
from collections import defaultdict
from nupic.algorithms.monitor_mixin.metric import Metric
from nupic.algorithms.monitor_mixin.monitor_mixin_base import MonitorMixinBase
from prettytable import PrettyTable
from nupic.algorithms.monitor_mixin.trace import (IndicesTrace, CountsTrace,
BoolsTrace, StringsTrace)
class TemporalMemoryMonitorMixin(MonitorMixinBase):
"""
Mixin for TemporalMemory that stores a detailed history, for inspection and
debugging.
"""
def __init__(self, *args, **kwargs):
super(TemporalMemoryMonitorMixin, self).__init__(*args, **kwargs)
self._mmResetActive = True # First iteration is always a reset
def mmGetTraceActiveColumns(self):
"""
@return (Trace) Trace of active columns
"""
return self._mmTraces["activeColumns"]
def mmGetTracePredictiveCells(self):
"""
@return (Trace) Trace of predictive cells
"""
return sel | f._mmTraces["predictiveCells"]
def mmGetTraceNumSegments(self):
"""
@return (Trace) Trace of # segments
"""
return self._mmTraces["numSegments"]
def mmGetTraceNumSynapses(self):
"""
@return (Trace) Trace of # synapses
"""
return self._mmTraces["numSynapses"]
def mmGetTraceSequenceLabels(self):
"""
@return (Trace) Trace of sequence labels
"""
return self._mmTraces["sequenceLabels"]
|
def mmGetTraceResets(self):
"""
@return (Trace) Trace of resets
"""
return self._mmTraces["resets"]
def mmGetTracePredictedActiveCells(self):
"""
@return (Trace) Trace of predicted => active cells
"""
self._mmComputeTransitionTraces()
return self._mmTraces["predictedActiveCells"]
def mmGetTracePredictedInactiveCells(self):
"""
@return (Trace) Trace of predicted => inactive cells
"""
self._mmComputeTransitionTraces()
return self._mmTraces["predictedInactiveCells"]
def mmGetTracePredictedActiveColumns(self):
"""
@return (Trace) Trace of predicted => active columns
"""
self._mmComputeTransitionTraces()
return self._mmTraces["predictedActiveColumns"]
def mmGetTracePredictedInactiveColumns(self):
"""
@return (Trace) Trace of predicted => inactive columns
"""
self._mmComputeTransitionTraces()
return self._mmTraces["predictedInactiveColumns"]
def mmGetTraceUnpredictedActiveColumns(self):
"""
@return (Trace) Trace of unpredicted => active columns
"""
self._mmComputeTransitionTraces()
return self._mmTraces["unpredictedActiveColumns"]
def mmGetMetricFromTrace(self, trace):
"""
Convenience method to compute a metric over an indices trace, excluding
resets.
@param (IndicesTrace) Trace of indices
@return (Metric) Metric over trace excluding resets
"""
return Metric.createFromTrace(trace.makeCountsTrace(),
excludeResets=self.mmGetTraceResets())
def mmGetMetricSequencesPredictedActiveCellsPerColumn(self):
"""
Metric for number of predicted => active cells per column for each sequence
@return (Metric) metric
"""
self._mmComputeTransitionTraces()
numCellsPerColumn = []
for predictedActiveCells in (
self._mmData["predictedActiveCellsForSequence"].values()):
cellsForColumn = self.mapCellsToColumns(predictedActiveCells)
numCellsPerColumn += [len(x) for x in cellsForColumn.values()]
return Metric(self,
"# predicted => active cells per column for each sequence",
numCellsPerColumn)
def mmGetMetricSequencesPredictedActiveCellsShared(self):
"""
Metric for number of sequences each predicted => active cell appears in
Note: This metric is flawed when it comes to high-order sequences.
@return (Metric) metric
"""
self._mmComputeTransitionTraces()
numSequencesForCell = defaultdict(lambda: 0)
for predictedActiveCells in (
self._mmData["predictedActiveCellsForSequence"].values()):
for cell in predictedActiveCells:
numSequencesForCell[cell] += 1
return Metric(self,
"# sequences each predicted => active cells appears in",
numSequencesForCell.values())
def mmPrettyPrintConnections(self):
"""
Pretty print the connections in the temporal memory.
TODO: Use PrettyTable.
@return (string) Pretty-printed text
"""
text = ""
text += ("Segments: (format => "
"(#) [(source cell=permanence ...), ...]\n")
text += "------------------------------------\n"
columns = range(self.numberOfColumns())
for column in columns:
cells = self.cellsForColumn(column)
for cell in cells:
segmentDict = dict()
for seg in self.connections.segmentsForCell(cell):
synapseList = []
for synapse in self.connections.synapsesForSegment(seg):
synapseData = self.connections.dataForSynapse(synapse)
synapseList.append(
(synapseData.presynapticCell, synapseData.permanence))
synapseList.sort()
synapseStringList = ["{0:3}={1:.2f}".format(sourceCell, permanence) for
sourceCell, permanence in synapseList]
segmentDict[seg] = "({0})".format(" ".join(synapseStringList))
text += ("Column {0:3} / Cell {1:3}:\t({2}) {3}\n".format(
column, cell,
len(segmentDict.values()),
"[{0}]".format(", ".join(segmentDict.values()))))
if column < len(columns) - 1: # not last
text += "\n"
text += "------------------------------------\n"
return text
def mmPrettyPrintSequenceCellRepresentations(self, sortby="Column"):
"""
Pretty print the cell representations for sequences in the history.
@param sortby (string) Column of table to sort by
@return (string) Pretty-printed text
"""
self._mmComputeTransitionTraces()
table = PrettyTable(["Pattern", "Column", "predicted=>active cells"])
for sequenceLabel, predictedActiveCells in (
self._mmData["predictedActiveCellsForSequence"].iteritems()):
cellsForColumn = self.mapCellsToColumns(predictedActiveCells)
for column, cells in cellsForColumn.iteritems():
table.add_row([sequenceLabel, column, list(cells)])
return table.get_string(sortby=sortby).encode("utf-8")
# ==============================
# Helper methods
# ==============================
def _mmComputeTransitionTraces(self):
"""
Computes the transition traces, if necessary.
Transition traces are the following:
predicted => active cells
predicted => inactive cells
predicted => active columns
predicted => inactive columns
unpredicted => active columns
"""
if not self._mmTransitionTracesStale:
return
self._mmData["predictedActiveCellsForSequence"] = defaultdict(set)
self._mmTraces["predictedActiveCells"] = IndicesTrace(self,
"predicted => active cells (correct)")
self._ |
nwaxiomatic/django-wpadmin | wpadmin/menu/utils.py | Python | mit | 4,176 | 0.000718 | """
Menu utilities.
"""
from fnmatch import fnmatch
from django.utils.importlib import import_module
from django.core.urlresolvers import reverse
from wpadmin.utils import (
get_wpadmin_settings, get_admin_site, get_admin_site_name)
def get_menu_cls(menu, admin_site_name='admin'):
"""
menu - menu name ('top' or 'left')
"""
return get_wpadmin_settings(admin_site_name).get('menu', {}).get(menu, None)
def get_menu(menu, admin_site_name='admin'):
"""
menu - menu name ('top' or 'left')
"""
menu_cls = get_menu_cls(menu, admin_site_name)
if menu_cls:
mod, inst = menu_cls.rsplit('.', 1)
mod = import_module(mod)
return getattr(mod, inst)()
return None
# I had to copy (and slightly modify) those utils from django-admin-tools
# to override get_admin_site
def get_avail_models(context):
""" Returns (model, perm,) for all models user can possibly see """
items = []
admin_site = get_admin_site(context)
for model, model_admin in list(admin_site._registry.items()):
perms = model_admin.get_model_perms(context.get('request'))
if True not in list(perms.values()):
continue
items.append((model, perms,))
return items
def filter_models(context, models, exclude):
"""
Returns (model, perm,) for all models that match models/exclude patterns
and are visible by current user.
"""
items = get_avail_models(context)
included = []
full_name = lambda m: '%s.%s' % (m.__module__, m.__name__)
# I believe that that implemented
# O(len(patterns)*len(matched_patterns)*len(all_models))
# algorithm is fine for model lists because they are small and admin
# performance is not a bottleneck. If it is not the case then the code
# should be optimized.
if len(models) == 0:
included = items
else:
for pattern in models:
for item in items:
model, perms = ite | m
if fnmatch(full_name(model), pattern) and item not in included:
included.append(item)
result = included[:]
for pattern in exclude:
for item in included:
model, perms = item
if fnmatch(full_name(model), pattern):
try:
result.remove(item)
except ValueError: # if the item was already removed skip
| pass
return result
class UserTestElementMixin(object):
"""
Mixin which adds a method for checking if current user is allowed to see
something (menu, menu item, etc.).
"""
def is_user_allowed(self, user):
"""
This method can be overwritten to check if current user can see this
element.
"""
return True
class AppListElementMixin(object):
"""
Mixin class for AppList and ModelList MenuItem.
"""
def _visible_models(self, context):
included = self.models[:]
excluded = self.exclude[:]
if excluded and not included:
included = ["*"]
return filter_models(context, included, excluded)
def _get_admin_app_list_url(self, model, context):
"""
Returns the admin change url.
"""
app_label = model._meta.app_label
return reverse('%s:app_list' % get_admin_site_name(context),
args=(app_label,))
def _get_admin_change_url(self, model, context):
"""
Returns the admin change url.
"""
app_label = model._meta.app_label
return reverse('%s:%s_%s_changelist' % (get_admin_site_name(context),
app_label,
model.__name__.lower()))
def _get_admin_add_url(self, model, context):
"""
Returns the admin add url.
"""
app_label = model._meta.app_label
return reverse('%s:%s_%s_add' % (get_admin_site_name(context),
app_label,
model.__name__.lower()))
def is_empty(self):
return len(self.children) == 0
|
xshotD/lolbot | cogs/eval.py | Python | mit | 4,708 | 0.003823 | """The following code is (c) sliceofcode 2017."""
"""Source: https://github.com/sliceofcode/dogbot/blob/master/dog/core/ext/exec.py """
"""
Handy exec (eval, debug) cog. Allows you to run code on the bot during runtime. This cog
is a combination of the exec commands of other bot authors:
Credit:
- Rapptz (Danny)
- https://github.com/Rapptz/RoboDanny/blob/master/cogs/repl.py#L31-L75
- b1naryth1ef (B1nzy, Andrei)
- https://github.com/b1naryth1ef/b1nb0t/blob/master/plugins/util.py#L220-L257
Features:
- Strips code markup (code blocks, inline code markup)
- Access to last result with _
- _get and _find instantly available without having to import discord
- Redirects stdout so you can print()
- Sane syntax error reporting
"""
import io
import logging
import textwrap
import traceback
from contextlib import redirect_stdout
import aiohttp
import discord
from discord.ext import commands
from cogs.utils import paste
log = logging.getLogger(__name__)
def strip_code_markup(content: str) -> str:
""" Strips code markup from a string. """
# ```py
# code
# ```
if content.startswith('```') and content.endswith('```'):
# grab the lines in the middle
return '\n'.join(content.split('\n')[1:-1])
# `code`
return content.strip('` \n')
def format_syntax_error(e: SyntaxError) -> str:
""" Formats a SyntaxError. """
if e.text is None:
return '```py\n{0.__class__.__name__}: {0}\n```'.format(e)
# display a nice arrow
return '```py\n{0.text}{1:>{0.offset}}\n{2}: {0}```'.format(e, '^', type(e).__name__)
class Exec:
def __init__(self, bot, *args, **kwargs):
self.bot = bot
self.last_result = None
@commands.command(name='eval', aliases=['exec', 'debug'])
@commands.is_owner()
async def _eval(self, ctx, *, code: str):
""" Executes Python code. """
async def upload(file_name: str):
with open(file_name, 'rb') as fp:
await ctx.send(file=discord.File(fp))
async def send(*args, **kwargs):
await ctx.send(*args, **kwargs)
env = {
'self': self,
'bot': ctx.bot,
'ctx': ctx,
'msg': ctx.message,
'guild': ctx.guild,
'channel': ctx.channel,
'me': ctx.message.author,
# utilities
'_get': discord.utils.get,
'_find': discord.utils.find,
'_upload': upload,
'_send': send,
# last result
'_': self.last_result
}
env.update(globals())
# remove any markup that might be in the message
code = strip_code_markup(code)
# add an implicit return at the end
lines = code.split('\n')
if not lines[-1].startswith('return') and not lines[-1].startswith(' '):
lines[-1] = 'return ' + lines[-1]
code = '\n'.join(lines)
# simulated stdout
stdout = io.StringIO()
# wrap the code in a function, so that we can use await
wrapped_code = 'async def func():\n' + textwrap.indent(code, ' ')
try:
exec(compile(wrapped_code, '<exec>', 'exec'), env)
except SyntaxError as e:
ret | urn await ctx.send(format_syntax_error(e))
func = env['func']
try:
with redirect_s | tdout(stdout):
ret = await func()
except Exception as e:
# something went wrong
stream = stdout.getvalue()
await ctx.send('```py\n{}{}\n```'.format(stream, traceback.format_exc()))
else:
# successful
stream = stdout.getvalue()
try:
await ctx.message.add_reaction('\u2705')
except:
# couldn't add the reaction, ignore
log.warning('Failed to add reaction to eval message, ignoring.')
try:
self.last_result = self.last_result if ret is None else ret
await ctx.send('```py\n{}{}\n```'.format(stream, repr(ret)))
except discord.HTTPException:
# too long
try:
url = await paste(ctx.bot.session, stream + repr(ret))
await ctx.send('Result was too long. ' + url)
except KeyError:
# even hastebin couldn't handle it
await ctx.send('Result was too long, even for Hastebin.')
except aiohttp.ClientError:
await ctx.send('Unable to send the result to Hastebin, it\'s probably down.')
def setup(bot):
bot.add_cog(Exec(bot))
|
kartikshah1/Test | courseware/permissions.py | Python | mit | 7,593 | 0.000922 | """
Handles permissions of the courseware API
Permission Classes:
IsInstructorOrReadOnly
- safe methods allowed for all users. other only for instructor
IsContentDeveloper
- Checks whether he is a ContentDeveloper
IsRegistered
- Checks whether the student is enrolled in the course
"""
from rest_framework import permissions
from courseware.models import CourseHistory
from user_profile.models import CustomUser
from courseware.models import Group, Concept
class IsAdminUserOrReadOnly(permissions.BasePermission):
"""
safe methods allowed for all the users. other only for admin
"""
def has_permission(self, request, view):
"""
Returns whether user has permission for this table or not
"""
if request.method in permissions.SAFE_METHODS:
return True
return request.user.is_superuser
def has_object_permission(self, request, view, obj):
"""
Returns whether user has permission on this object or not
"""
# Read permissions are allowed to any request,
# so we'll always allow GET, HEAD or OPTIONS requests.
if request.method in permissions.SAFE_METHODS:
return True
return request.user.is_superuser
class IsContentDeveloperOrReadOnly(permissions.BasePermission):
"""
Check if the current mode of operation is valid
and in content developer mode
"""
def has_permission(self, request, view):
if request.method in permissions.SAFE_METHODS:
return True
else:
if request.user.is_authenticated():
customuser = CustomUser.objects.get(user=request.user)
if customuser.is_content_developer:
return True
return False
def has_object_permission(self, request, view, obj):
"""
Returns whether user has permission on this object or not
"""
if request.user.is_authenticated():
# return true if he is the content developer for this textbook
try:
CourseHistory.objects.get(
course=obj,
user=request.user,
is_owner=True
)
except:
if request.method in permissions.SAFE_METHODS:
# return true if he is an instructor
customuser = CustomUser.objects.get(user=request.user)
if customuser.is_instructor:
return True
return False
return True
return False
class IsInstructorOrReadOnly(permissions.BasePermission):
"""
Allows complete permission to instructor and list access to others
"""
def has_permission(self, request, view):
if request.method in permissions.SAFE_METHODS:
return True
else:
if request.user.is_authenticated():
customuser = CustomUser.objects.get(user=request.user)
if customuser.is_instructor:
return True
return False
def has_object_permission(self, request, view, obj):
"""
Returns whether user has permission on this object or not
"""
if request.user.is_authenticated():
if request.method in permissions.SAFE_METHODS:
# return true if registered
try:
CourseHistory.objects.get(
course=obj,
user=request.user,
active='A'
)
except:
return False
return True
# return true if he is the instructor for this offering
try:
CourseHistory.objects.get(
course=obj,
user=request.user,
is_owner=True
)
except:
return False
return True
return False
class IsOwnerOrReadOnly(permissions.BasePermission):
"""
Allows complete permission to the owner and list access to others
"""
def has_permission(self, request, view):
if request.method in permissions.SAFE_METHODS:
return True
else:
if request.user.is_authenticated():
customuser = CustomUser.objects.get(user=request.user)
if customuser.is_instructor or customuser.is_content_developer:
return True
return False
def has_object_permission(self, request, view, obj):
"""
Returns whether user has permission on this object or not
"""
if request.user.is_authenticated():
if type(obj) == Group:
obj = obj.course
if type(obj) == Concept:
obj = obj.group.course
if request.method in permissions.SAFE_METHODS:
# return true if registered if it is an offering
if obj.type == 'O':
try:
CourseHistory.objects.get(
course=obj,
user=request.user,
active='A'
)
except:
return False
return True
elif obj.type == 'T':
customuser = CustomUser.objects.get(user=request.user)
| if customuser.is_instructor:
return True
return False
# return true if he is the owner of the course
try:
CourseHistory.objects.get(
course=obj,
user=request.user,
is_owner=T | rue
)
except:
return False
return True
return False
class IsOwner(permissions.BasePermission):
"""
Allows complete permission to the owner and none to others
"""
def has_permission(self, request, view):
if request.user.is_authenticated():
customuser = CustomUser.objects.get(user=request.user)
if customuser.is_instructor or customuser.is_content_developer:
return True
return False
def has_object_permission(self, request, view, obj):
"""
Returns whether user has permission on this object or not
"""
if request.user.is_authenticated():
# return true if he is the owner of the course
try:
CourseHistory.objects.get(
course=obj,
user=request.user,
is_owner=True
)
except:
return False
return True
return False
class IsRegistered(permissions.BasePermission):
"""Checks whether a user is registered in the course"""
def has_permission(self, request, view):
"""
Checks whether person is the course instructor or is a superuser
"""
return request.user.is_authenticated()
def has_object_permission(self, request, view, obj):
try:
if request.user.is_authenticated():
CourseHistory.objects.get(
course=obj,
user=request.user,
active='A')
else:
return False
except:
return False
return True
|
tensorflow/ranking | tensorflow_ranking/python/feature.py | Python | apache-2.0 | 10,055 | 0.004774 | # Copyright 2021 The TensorFlow Ranking Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Feature transformations for ranking library."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six
import tensorflow as tf
from tensorflow.python.feature_column import feature_column_lib
from tensorflow_ranking.python import utils
def make_identity_transform_fn(context_feature_names):
"""Returns transform fn that split the features.
The make_identity_transform_fn generates a transform_fn which handles only
non-prefixed features. The per-example features need to have shape
[batch_size, input_size, ...] and the context features need to have shape
[batch_size, ...].
Args:
context_feature_names: A list of strings representing the context feature
names.
Returns:
An identity transform function that splits into context and per example
features.
"""
def _transform_fn(features, mode):
"""Splits the features into context and per-example features."""
del mode
context_features = {
name: feature
for name, feature in six.iteritems(features)
if name in context_feature_names
}
per_example_features = {
name: feature
for name, feature in six.iteritems(features)
if name not in context_feature_names
}
return context_features, per_example_features
return _transform_fn
def _is_sequence_column_v2(feature_column):
"""Returns whether the feature column is a V2 Sequence Feature Column."""
if isinstance(feature_column, feature_column_lib.SequenceCategoricalColumn):
return True
if isinstance(feature_column, feature_column_lib.SequenceNumericColumn):
return True
if hasattr(feature_column, "categorical_column") and isinstance(
feature_column.categorical_column,
feature_column_lib.SequenceCategoricalColumn):
return True
return False
def encode_features(features,
feature_columns,
mode=tf.estimator.ModeKeys.TRAIN,
scope=None):
"""Returns dense tensors from features using feature columns.
This function encodes the feature column transformation on the 'raw'
`features`.
Args:
features: (dict) mapping feature names to feature values, possibly obtained
from input_fn.
feature_columns: (list) list of feature columns.
mode: (`estimator.ModeKeys`) Specifies if this is training, evaluation or
inference. See `ModeKeys`.
scope: (str) variable scope for the per column input layers.
Returns:
(dict) A mapping from columns to dense tensors.
"""
# Having scope here for backward compatibility.
del scope
trainable = (mode == tf.estimator.ModeKeys.TRAIN)
cols_to_tensors = {}
# TODO: Ensure only v2 Feature Columns are used.
if hasattr(feature_column_lib, "is_feature_column_v2"
) and feature_column_lib.is_feature_column_v2(feature_columns):
dense_feature_columns = [
col for col in feature_columns if not _is_sequence_column_v2(col)
]
sequence_feature_columns = [
col for col in feature_columns if _is_sequence_column_v2(col)
]
if dense_feature_columns:
dense_layer = tf.compat.v1.keras.layers.DenseFeatures(
feature_columns=dense_feature_columns,
name="encoding_layer",
trainable=trainable)
dense_layer(features, cols_to_output_tensors=cols_to_tensors)
for col in sequence_feature_columns:
sequence_feature_layer = tf.keras.experimental.SequenceFeatures(col)
sequence_input, _ = sequence_feature_layer(features)
cols_to_tensors[col] = sequence_input
else:
tf.compat.v1.feature_column.input_layer(
features=features,
feature_columns=feature_columns,
trainable=trainable,
cols_to_output_tensors=cols_to_tensors)
return cols_to_tensors
def encode_listwise_features(features,
context_feature_columns,
example_feature_columns,
input_size=None,
mode=tf.estimator.ModeKeys.TRAIN,
scope=None):
"""Returns dense tensors from features using feature columns.
Args:
features: (dict) mapping feature names (str) to feature values (`tf.Tensor`
or `tf.SparseTensor`), possibly obtained from input_fn. For context
features, the tensors are 2-D, while for example features the tensors are
3-D.
context_feature_columns: (dict) context feature names to columns.
example_feature_columns: (dict) example feature names to columns.
input_size: (int) [DEPRECATED: Use without this argument.] number of
examples per query. If this is None, input_size is inferred as the size
of second dimension of the Tensor corresponding to one of the example
feature columns.
mode: (`estimator.ModeKeys`) Specifies if this is training, evaluation or
inference. See `ModeKeys`.
scope: (str) variable scope for the per column input layers.
Returns:
context_features: (dict) A mapping from | context feature names to dense
2-D tensors of shape [batch_size, ...].
example_features: (dict) A mapping from ex | ample feature names to dense
3-D tensors of shape [batch_size, input_size, ...].
Raises:
ValueError: If `input size` is not equal to 2nd dimension of example
tensors.
"""
context_features = {}
if context_feature_columns:
context_cols_to_tensors = encode_features(
features, context_feature_columns.values(), mode=mode, scope=scope)
context_features = {
name: context_cols_to_tensors[col]
for name, col in six.iteritems(context_feature_columns)
}
# Compute example_features. Note that the keys in `example_feature_columns`
# dict can be different from the keys in the `features` dict. We only need to
# reshape the per-example tensors in `features`. To obtain the keys for
# per-example features, we use the parsing feature specs.
example_features = {}
if example_feature_columns:
if feature_column_lib.is_feature_column_v2(
example_feature_columns.values()):
example_specs = tf.compat.v2.feature_column.make_parse_example_spec(
example_feature_columns.values())
else:
example_specs = tf.compat.v1.feature_column.make_parse_example_spec(
example_feature_columns.values())
example_name = next(six.iterkeys(example_specs))
batch_size = tf.shape(input=features[example_name])[0]
if input_size is None:
input_size = tf.shape(input=features[example_name])[1]
# Reshape [batch_size, input_size] to [batch * input_size] so that
# features are encoded.
reshaped_features = {}
for name in example_specs:
if name not in features:
tf.compat.v1.logging.warn("Feature {} is not found.".format(name))
continue
try:
reshaped_features[name] = utils.reshape_first_ndims(
features[name], 2, [batch_size * input_size])
except:
raise ValueError(
"2nd dimension of tensor must be equal to input size: {}, "
"but found feature {} with shape {}.".format(
input_size, name, features[name].get_shape()))
example_cols_to_tensors = encode_features(
reshaped_features,
example_feature_columns.values(),
mode=mode,
scope=scope)
example_features = {
name: utils.reshape_first_ndims(example_cols_to_tensors[col], 1,
[batch_size, input_size])
for name, col in |
evelynmitchell/pdq | examples/Linux Magazine/spamcan1.py | Python | mit | 1,994 | 0.004012 | #!/usr/bin/env python
###############################################################################
# Copyright (C) 1994 - 2013, Performance Dynamics Company #
# #
# This software is licensed as described in the file COPYING, which #
# you should have received as part of this distribution. The terms #
# are also available at http://www.perfdynamics.com/Tools/copyright.html. #
# #
# You may opt to use, copy, modify, merge, publish, distribute and/or sell #
# copies of the Software, and permit persons to whom the Software is #
# furnished to do so, under the terms of the COPYING file. #
# | #
# This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY #
# KIND, either express or implied. #
###############################################################################
# $Id: spamcan1.py,v 1.3 2012/11/13 03:12:04 earl-lang Exp $
# Created by NJG on | Wed, Apr 18, 2007
#
# Queueing model of an email-spam analyzer system comprising a
# battery of SMP servers essentially running in batch mode.
# Each node was a 4-way SMP server.
# The performance metric of interest was the mean queue length.
#
# This simple M/M/4 model gave results that were in surprisingly
# good agreement with monitored queue lengths.
import pdq
# Measured performance parameters
cpusPerServer = 4
emailThruput = 2376 # emails per hour
scannerTime = 6.0 # seconds per email
pdq.Init("Spam Farm Model")
# Timebase is SECONDS ...
nstreams = pdq.CreateOpen("Email", float(emailThruput)/3600)
nnodes = pdq.CreateNode("spamCan", int(cpusPerServer), pdq.MSQ)
pdq.SetDemand("spamCan", "Email", scannerTime)
pdq.Solve(pdq.CANON)
pdq.Report()
|
edx/edx-analytics-dashboard | analytics_dashboard/courses/serializers.py | Python | agpl-3.0 | 531 | 0.001883 | from django.core.serializers.json import DjangoJSONEncoder
from django.utils.encoding import force_text
from django.utils.f | unctional import Promise
class LazyEncoder(DjangoJSONEncoder):
"""
Force the conversion of lazy translations so that they can be serialized to JSON.
via https://docs.djangoproject.com/en/dev/topics/serializatio | n/
"""
# pylint: disable=method-hidden
def default(self, obj):
if isinstance(obj, Promise):
return force_text(obj)
return super().default(obj)
|
dbrgn/django-tabination | tabination/tests/multilevel/tests.py | Python | lgpl-3.0 | 2,204 | 0.001361 | from django.core.exceptions import ImproperlyConfigured
from django.test.client import RequestFactory
from django.utils import unittest
from .views import (ParentTab, EmptyTab, FirstChildTab, SecondChildTab, BrokenChildTab)
class MultilevelTest(unittest.TestCase):
rf = RequestFactory()
def test_parent(self):
"""Tests the parent of a child navigation tab."""
request = self.rf.get('/')
response = FirstChildTab.as_view()(request)
context = response.context_data
# Check list of parent tabs
self.assertIsInstance(context['parent_tabs'], list)
self.assertEqual(len(context['parent_tabs']), 2)
self.assertIsInstance(context['parent_tabs'][0], ParentTab)
self.assertIsInstance(context['parent_tabs'][1], EmptyTab)
# Check important tab IDs
self.assertEqual(context['parent_tab_id'], ParentTab.tab_id)
self.assertEqual(context['current_tab_id'], FirstChildTab.tab_id)
# Check references to current tab
self.assertEqual(context['tabs'][1].group_current_tab.tab_id, FirstChildTab.tab_id)
self.assertEqual(context['parent_tabs'][1].group_current_tab.tab_id, ParentTab.tab_id)
def test_children(self):
"""Tests if a parent tab knows it's children."""
request = self.rf.get('/')
response = ParentTab.as_view()(request)
context = response.context_data
self.assertIsInstance(context['child_tabs'], list)
self.assertEqual(len(context['child_tabs']), 2)
self.assertIsInstance(context['child_tabs'][0], FirstChildTab)
self.assertIsInstance(context['child_tabs'][1], SecondChildTab)
def test_parent_none(self):
"""If tab_parent is not configured it is missing from con | text."""
request = self.rf.get('/')
response = ParentTab.as_view()(request)
with self.assertRaises(KeyError):
res | ponse.context_data['parent_tabs']
def test_parent_not__is_tab(self):
"""Using a TabView as parent which has not _is_tab = True fails."""
request = self.rf.get('/')
with self.assertRaises(ImproperlyConfigured):
BrokenChildTab.as_view()(request)
|
USGSDenverPychron/pychron | pychron/user/tasks/panes.py | Python | apache-2.0 | 1,974 | 0 | # ===============================================================================
# Copyright 2014 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
from __future__ import absolute_import
from pyface.tasks.traits_task_pane import TraitsTaskPane
from traitsui.api import View, TableEditor, UItem, HGroup, Item
# ============= standard library imports ========================
# ============= local library imports ==========================
from traitsui.extras.checkbox_column import CheckboxColumn
from traitsui.table_column import ObjectColumn
from pychron.core.helpers.traitsui_shortcuts import okcancel_view
NewUserView = okcancel_view(
Item("new_user_name", label="Name"),
Item("new_user_email", label="Email"),
title="New User",
)
class UsersPane(TraitsTaskPane):
def traits_view(self):
cols = [
ObjectColumn(name="name"),
ObjectColumn(name="email"),
CheckboxColumn(name="enabled"),
]
v = Vi | ew(
HGroup(
UItem("filter_attribute"),
UItem("filter_str"),
show_border=True,
label="Filter",
),
UItem("users", editor=TableEditor(columns=cols)),
)
return v
# ============= EOF ================================ | =============
|
daggaz/python-pyptables | pyptables/base.py | Python | gpl-2.0 | 806 | 0.001241 | import inspect
class DebugObject(object):
"""Base class f | or most iptables classes.
Allows objects to determine the source line they were created from,
which is used to insert debugging information into the generated output
"""
def __init__(self, *args, **kwargs):
super(DebugObject, self).__init__(*args, **kwargs)
frame = inspect.currentframe().f_back
while frame:
info = inspect.getframeinfo(frame)
if not info[2].startswith('__'):
break
frame = frame.f_b | ack
self.filename, self.lineno, self.function, __, __ = info
def debug_info(self):
"""Returns a string of debug info about the creation of this object"""
return "%s:%s %s" % (self.filename, self.lineno, self.function)
|
sankroh/satchmo | satchmo/feeds/views.py | Python | bsd-3-clause | 1,945 | 0.010797 | import datetime
from django.contrib.auth.decorators import user_passes_test
from django.core import urlresolvers
from django.http import Http404
from django.shortcuts import get_object_or_404, render_to_response
from satchmo.payment.config import credit_choices
from satchmo.product.models import Product, Category
from satchmo.shop.models import Config
@user_passes_test(lambda u: u.is_authenticated() and u.is_staff, login_url='/accounts/login/')
def admin_product_feed(request, category=None, template="feeds/product_feed.csv", mimetype="text/csv"):
"""Admin authenticated feed - same as product feed but for different types of feeds.
"""
return product_feed(request, category=category, template=template, mimetype=mimetype)
def product_feed(request, category=None, template="feeds/googlebase_atom.x | ml", mimetype="application/atom+xml"):
"""Build a feed of all active products.
"""
shop_config = Config.objects.get_current()
if category:
try:
cat = Category.objects.get(slug=category)
products = cat.active_products()
except Category.DoesNotExist:
raise Http404, _("Bad Category: %s" % category)
else:
| cat = None
products = Product.objects.active_by_site()
products = filter(lambda product:"ConfigurableProduct" not in product.get_subtypes(), products)
params = {}
view = 'satchmo_atom_feed'
if category:
params['category'] = category
view = 'satchmo_atom_category_feed'
url = shop_config.base_url + urlresolvers.reverse(view, None, params)
payment_choices = [c[1] for c in credit_choices(None, True)]
return render_to_response(template, {
'products' : products,
'category' : cat,
'url' : url,
'shop' : shop_config,
'payments' : payment_choices,
'date' : datetime.datetime.now(),
},
mimetype=mimetype)
|
igrlas/CentralHub | CHPackage/src/centralhub/server/home_endpoints.py | Python | gpl-2.0 | 1,856 | 0.002694 | # Endpoints for user to control the home.
from datetime import datetime
from flask import Blueprint, jsonify, request
from services import elements_services, home_services
home_api = Blueprint('/home_api', __name__)
elem | ents_services = elements_services.ElementsServices()
home_services = home_services.HomeServices()
@home_api.route('/profiles')
def profiles():
"""Gets all profiles for all elements for user application to display and manipulate e | lements"""
return jsonify(home_services.get_profiles())
@home_api.route('/element', methods=['POST'])
def update_element():
"""Updates single element with all new values received from the user application"""
received_element = request.get_json()
home_services.update_element(received_element)
return 'OK'
@home_api.route('/elements', methods=['POST'])
def update_elements():
"""Updates all elements with all new values received from the user application"""
received_elements = request.get_json()
home_services.update_elements(received_elements)
return 'OK'
@home_api.route('/elementdelete', methods=['POST'])
def delete_element():
"""Deletes a single element with given hid"""
element = request.get_json()
home_services.delete_element(element['hid'])
return 'OK'
@home_api.route('/timerules', methods=['POST'])
def timerules():
"""Adds, Updates or deletes time rule for the given element"""
rules = request.get_json()
if len(rules) == 0:
raise Exception("No elements in the list")
for rule in rules:
if 'id' not in rule:
rule['id'] = None
home_services.save_time_rules(rules)
return 'OK'
@home_api.route('/timerules/<string:hid>')
def get_timerules(hid):
"""Gets list of timerules for given hid"""
timerules= home_services.read_time_rules(hid)
return jsonify(timerules)
|
skylines-project/skylines | tests/model/test_user.py | Python | agpl-3.0 | 1,765 | 0.000567 | # -*- coding: utf-8 -*-
import os
import pytest
from skylines.lib import files
from skylines.lib.types import is_unicode
from skylines.model import User, IGCFile
from tests.data import users, igcs
def test_user_delete_deletes_user(db_session):
john = users.john()
db_session.add(john)
db_session.commit()
john_id = john.id
assert john_id is not None
assert db_session.query(User).get(john_id) is not None
john.delete()
db_session.commit()
assert db_session.query(User).get(john_id) is None
@pytest.mark.usefixtures("files_folder")
def test_user_delete_deletes_owned_igc_files(db_session):
with open(igcs.simple_path, "rb") as f:
filename = files.add_file("simple.igc", f)
assert filename is not None
assert os.path.isfile(files.filename_to_path(filename))
john = users.john()
igc = igcs.simple(owner=john, filename=filename)
db_session.add(igc)
db_session.commit()
assert db_session.query(IGCFile).count() == 1
assert db_session.query(IGCFile).get(igc.id).owner_id == john.id
john.delete()
db_session.commit()
assert db_session.query(IGCFile).count() == 0
assert not os.path.isfile(files.filename_to_path(filename))
def test_repr_is_str(db_session):
john = users.john(last_name=u"Müller")
db_session.add(john)
db_session.commit()
assert isinstance(repr(john), str)
assert repr(john) == "<User: email=johnny@doe.com, display=John Müller>"
def test_hash_password():
hash = | User._hash_password(u"secret123", salt=b"abcdef")
assert (
hash
== "bef57ec7f53a6d40beb640a780a639c83bc29ac8a9816f1fc6c5c6dcd93c4721272b82aa344691fb4037f20617b1d19212042e7e | 6cb39f4ba0dad95d8137104a"
)
assert is_unicode(hash)
|
OpenGenus/cosmos | tree/master/code/string_algorithms/sum_of_numbers_string/sum_of_numbers_string.py | Python | gpl-3.0 | 258 | 0 | # ADDING A | LL NUMBERS IN A STRING.
st = input("Enter a string: ")
a = ""
total = 0
for i in st:
if i.isdigit():
a += i
else:
total += int(a)
a = "0"
print(total + int(a))
# INPUT:
# Enter a string: 567hdon2
# O | UTPUT:
# 569
|
Geonovum/sospilot | src/fiware/client/python/UL20/SimulateCommand.py | Python | gpl-3.0 | 3,074 | 0.016591 | #!/usr/bin/env python
# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U
#
# This file is part of FIGWAY software (a set of tools for FIWARE Orion ContextBroker and IDAS2.6).
#
# FIGWAY is free software: you can redistribute it and/or modify it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version.
# FIGWAY is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License along with FIGWARE.
# If not, see http://www.gnu.org/licenses/
#
# For those usages not covered by the GNU Affero General Public License please contact with: Carlos Ralli Ucendo [ralli@tid.es]
# Developed by Carlos Ralli Ucendo (@carlosralli), Apr 2015.
import requests, json
import ConfigParser
import io
import sys
CONFIG_FILE = "../config.ini"
NUM_ARG=len(sys.argv)
COMMAND=sys.argv[0]
if NUM_ARG==3:
ENTITY_ID=sys.argv[1]
COMMAND=sys.argv[2]
else:
print 'Usage: '+COMMAND+' [ENTITY_ID] \'[COMMAND]\''
sys.exit(2)
# Load the configuration file
with open(CONFIG_FILE,'r+') as f:
sample_config = f.read()
config = ConfigParser.RawConfigParser(allow_no_value=True)
config.readfp(io.BytesIO(sample_config))
IDAS_HOST=config.get('idas', 'host')
IDAS_ADMIN_PORT=config.get('idas', 'adminport')
IDAS_UL | 20_PORT=config.get('idas', 'ul20port')
FIWARE_SERVICE=config.get('idas', 'fiware-service')
FIWARE_SERVICE_PATH=config.get('idas', 'fiware-service-path')
IDAS_AAA=config.get('idas', 'OAuth')
| if IDAS_AAA == "yes":
TOKEN=config.get('user', 'token')
TOKEN_SHOW=TOKEN[1:5]+"**********************************************************************"+TOKEN[-5:]
else:
TOKEN="NULL"
TOKEN_SHOW="NULL"
HOST_ID=config.get('local', 'host_id')
f.close()
URL = "http://"+IDAS_HOST+":"+IDAS_ADMIN_PORT+'/iot/ngsi/d/updateContext'
PAYLOAD = '{ \
"updateAction": "UPDATE", \
"contextElements": [ \
{ \
"id": "'+ENTITY_ID+'", \
"type": "thing", \
"isPattern": "false", \
"attributes": [ \
{ \
"name": "RawCommand", \
"type": "command", \
"value": "'+COMMAND+'" \
} \
] \
} \
] \
}'
HEADERS = {'content-type': 'application/json' , 'X-Auth-Token' : TOKEN, 'Fiware-Service' : FIWARE_SERVICE, 'Fiware-ServicePath' : FIWARE_SERVICE_PATH }
HEADERS_SHOW = {'content-type': 'application/json' , 'X-Auth-Token' : TOKEN_SHOW, 'Fiware-Service' : FIWARE_SERVICE, 'Fiware-ServicePath' : FIWARE_SERVICE_PATH}
print "* Asking to "+URL
print "* Headers: "+str(HEADERS_SHOW)
print "* Sending PAYLOAD: "
print json.dumps(json.loads(PAYLOAD), indent=4)
print
print "..."
r = requests.post(URL, data=PAYLOAD, headers=HEADERS)
print
print "* Status Code: "+str(r.status_code)
print "* Response: "
print r.text
print
|
puttarajubr/commcare-hq | corehq/apps/app_manager/migrations/0004_add_detail_filter_unreleased_builds.py | Python | bsd-3-clause | 250 | 0 | # encoding: utf-8 |
from south.v2 import DataMigration
from corehq.apps.app_manager.migrations import AppFilterMigrat | ionMixIn
class Migration(AppFilterMigrationMixIn, DataMigration):
def get_app_ids(self):
return self._get_all_app_ids()
|
wanghe4096/WangBlog | src/wangblog/migrations/0005_auto_20160417_0152.py | Python | bsd-2-clause | 802 | 0.001247 | # -*- coding: utf-8 -*-
# Generated by Django 1.9.5 on 2016-04-17 01:52
from __future__ import unicode_literals
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('wangblog', '0004_auto_20160416_1039'),
]
operations = [
| migrations.RemoveField(
model_name='user',
name='is_staff',
),
migrations.AddField(
model_name='user',
name= | 'is_admin',
field=models.BooleanField(default=False),
),
migrations.AlterField(
model_name='user',
name='date_joined',
field=models.DateTimeField(default=datetime.datetime(2016, 4, 17, 1, 52, 6, 276869), verbose_name='date joined'),
),
]
|
evgeny-boger/rus-elections-stats | plot_results.py | Python | unlicense | 7,993 | 0.017015 | #!/usr/bin/python
# -*- coding: UTF-8 -*-
import csv, sys, array
import ROOT
import math
def make_plots(title, input_files, close = True, party_num = 14, party_name = "ruling party"):
ROOT.gStyle.SetOptFit(1)
turnout_arr = array.array('d')
rp_arr = array.array('d')
h2 = ROOT.TH2D("h2", "weighted " + party_name + " % vs turnout % (" + title + ");turnout,%;" + party_name + ",%", 105,0, 104, 105, 0, 104)
h2_rp_vs_home = ROOT.TH2D("h2_rp_vs_home", "weighted " + party_name + " % vs home voters % (" + title + ");home voters,%;" + party_name + ",%", 30,0, 29, 82, 30, 70)
h2_rp_vs_home.Sumw2()
home_hist = ROOT.TH1D("home_hist", "Distribution of electoral commissions by home voters % (" + title + " );home voters, %; Number of electoral commissions (0.5% bins)", 100,0, 49)
home_hist_w = ROOT.TH1D("home_hist_w", "Weighted distribution of electoral commissions by home voters % (" + title + " );home voters, %; Number of electoral commissions (0.5% bins)", 100,0, 49)
rp_valid = ROOT.TH2D("rp_invalid", "weighted " + party_name + " % vs valid % (" + title + ");valid,%;" + party_name + ",%", 80 ,90, 110, 110, 0, 110)
total_rp = 0
total = 0
turnout_hist = ROOT.TH1D("turnout_hist", "Distribution of electoral commissions by turnout (" + title + " );turnout ,%; Number of electoral commissions (0.5% bins)", 220,0, 110)
rp_abs_rate = ROOT.TH1D("rp_abs_rate", "rp_abs_rate (" + title + " );turnout * " + party_name + " %; Number of electoral commissions (0.5% bins)", 220,0, 110)
rp_abs_rate2 = ROOT.TH2D("h2", "weighted " + party_name + " abs % vs turnout % (" + title + ");turnout,%;turnout * " + party_name + ",%", 105,0, 104, 105, 0, 104)
turnout_weighted_hist = ROOT.TH1D("turnout_weighted_hist", "Distribution of electoral commissions by turnout (weighted) (" + title + ");turnout,%;Total electoral commissions size (0.5% | bins)", 220,0, 110)
rp_votes_hist = ROOT.TH1D("rp_votes_hist", "Distribution of electoral commissions by " + party_name + " votes (" + title + ");" + party_name + " votes;Number of electoral commissions (--)", 10001,0, 10000)
rp_votes_lastdigit_h | ist = ROOT.TH1D("rp_votes_lastdigit_hist", "Distribution of electoral commissions by last digit of " + party_name + " votes (" + title + ");last digit of rp votes;Number of electoral commissions (--)", 10,-0.5, 9.5)
rp_weighted_hist = ROOT.TH1D("rp_weighted_hist", "Distribution of electoral commissions by " + party_name + " % (weighted) (" + title + ");" + party_name + " % ;Total electoral commissions size (0.5% bins)", 220, 0 - 1./4, 110 - 1./4)
rp_hist = ROOT.TH1D("rp_hist", "Distribution of electoral commissions by " + party_name + " % (raw) (" + title + ");" + party_name + " % ;Number of electoral commissions (0.5% bins)", 220, 0, 110)
rp_weighted_hist.Sumw2()
for fname in input_files:
reader = csv.reader(open(fname))
for row in reader:
values = map(float, row[4:])
print row[3], values
if not values[0]:
continue
turnout = (values[2] + values[3]) / values[0] * 100.0
#~ if turnout > 90:
#~ continue
total_ballots = values[7] + values[8]
home_rate = 100.0 * values[5] / total_ballots if total_ballots else 0
#~
#~ if home_rate > 7:
#~ continue
if total_ballots:
rp = values[party_num] / (total_ballots)* 100.0 #
total_rp+= values[party_num]
total += total_ballots
valid = values[8] / total_ballots * 100.0
else:
rp = 0.0
h2.Fill(turnout, rp, values[0])
h2_rp_vs_home.Fill(home_rate, rp, values[0])
home_hist.Fill(home_rate)
home_hist_w.Fill(home_rate, values[0])
rp_valid.Fill(valid, rp, values[0])
turnout_hist.Fill(turnout)
turnout_weighted_hist.Fill(turnout,values[0])
if values[party_num] > 00:
rp_weighted_hist.Fill(rp, values[0])
rp_hist.Fill(rp)
rp_votes_hist.Fill(values[party_num])
rp_votes_lastdigit_hist.Fill(int(str(int(values[party_num]))[0]))
rp_abs_rate.Fill(turnout * rp / 100, values[0])
rp_abs_rate2.Fill(turnout, turnout * rp / 100, values[0])
turnout_arr.append(turnout)
rp_arr.append(rp)
#~ gr = ROOT.TGraph(len(turnout_arr), turnout_arr, rp_arr)
#~ gr.Draw("A*")
print total_rp, total
print "Total rp: %2.2f%%"%(100.0 * total_rp/ total)
c0 = ROOT.TCanvas()
c0.cd()
h2.Draw("COL CONT0")
print "Correlation factor: ", h2.GetCorrelationFactor()
#~ raw_input()
#~ sys.exit(0)
h2.FitSlicesY()
c0.SaveAs(title + "_2d.png")
c01 = ROOT.TCanvas()
c01.cd()
h2_1 = ROOT.gDirectory.Get("h2_1")
h2_1.GetXaxis().SetRangeUser(0,100)
h2_1.GetYaxis().SetRangeUser(0,100)
h2_1.SetTitle("Fitted " + party_name + " % mean value vs turnout (" + title + ")")
h2_1.Draw()
c01.SaveAs(title + "_2d_slices.png")
c1 = ROOT.TCanvas()
c1.cd()
gaus_1 = ROOT.TF1("gaus_1","gaus",0, 55)
turnout_hist.Fit("gaus", "", "", 0, 54);
turnout_hist.Draw()
c1.SaveAs(title + "_turnout.png")
#~ TF1 *f1=gROOT->GetFunction("myfunc");
#~ f1->SetParameters(800,1);
c2 = ROOT.TCanvas()
c2.cd()
turnout_weighted_hist.Fit("gaus", "", "");
turnout_weighted_hist.Draw()
c2.SaveAs(title + "_turnout_w.png")
c3 = ROOT.TCanvas()
c3.cd()
rp_weighted_hist.GetXaxis().SetRangeUser(0,105)
rp_weighted_hist.Fit("gaus", "", "");
rp_weighted_hist.Draw()
#~ g1 = ROOT.TF1("g1", "gaus", 0, 90)
#~ g2 = ROOT.TF1("g2", "gaus", 0, 90)
#~ bkg = ROOT.TF1("bkg", "gaus", 0, 90)
#~ rp_weighted_hist.Fit("g1", "", "", 0, 30)
#~ rp_weighted_hist.Fit("g2", "", "", 40, 90)
#~ rp_weighted_hist.Fit("bkg", "", "", 0, 90)
#~
#~ total = ROOT.TF1("total", "gaus(0) + gaus(3)+gaus(6)", 0, 85)
#~ total.SetParameters( g1.GetParameter(0),g1.GetParameter(1),g1.GetParameter(2),
#~ g2.GetParameter(0),g2.GetParameter(1),g2.GetParameter(2),
#~ bkg.GetParameter(0),bkg.GetParameter(1),bkg.GetParameter(2))
#~
#~
#~ rp_weighted_hist.Fit("total", "", "", 0,85)
c3.SaveAs(title + "_rp_w.png")
c4 = ROOT.TCanvas()
c4.cd()
rp_hist.GetXaxis().SetRangeUser(0,105)
rp_hist.Draw()
c4.SaveAs(title + "_rp.png")
#~ c5 = ROOT.TCanvas()
#~ c5.cd()
#~ rp_valid.Draw("COL CONT0")
#~ c5.SaveAs(title + "_valid_2d.png")
#~ c6 = ROOT.TCanvas()
#~ c6.cd()
#~ rp_abs_rate2.Draw()
c7 = ROOT.TCanvas()
c7.cd()
h2_rp_vs_home.Draw("COL CONT0")
h2_rp_vs_home.FitSlicesY()
#~ c0.SaveAs(title + "_2d.png")
#~
c71 = ROOT.TCanvas()
c71.cd()
#~
h2_rp_vs_home_1 = ROOT.gDirectory.Get("h2_rp_vs_home_1")
h2_rp_vs_home_1.GetXaxis().SetRangeUser(0,100)
h2_rp_vs_home_1.GetYaxis().SetRangeUser(45,60)
h2_rp_vs_home_1.SetTitle("Fitted " + party_name + " % mean value vs home voters % (" + title + ")")
h2_rp_vs_home_1.Draw()
#~ c01.SaveAs(title + "_2d_slices.png")
#~
c8 = ROOT.TCanvas()
c8.cd()
home_hist.Draw()
#~ #~
c9 = ROOT.TCanvas()
c9.cd()
home_hist_w.Draw()
if close:
c0.Close()
c01.Close()
c1.Close()
c2.Close()
c3.Close()
c4.Close()
c5.Close()
else:
raw_input()
if __name__ == '__main__':
title = sys.argv[1]
input_files = sys.argv[2:]
#~ make_plots(title, input_files, close = False)
#~ make_plots(title + '_navalny' , input_files, close = False, party_num = 13, party_name = 'navalny')
make_plots(title, input_files, close = False, party_num = 17, party_name = 'vorobyev')
|
strongswan/strongTNC | config/wsgi.py | Python | agpl-3.0 | 1,414 | 0.000707 | """
WSGI config for strongTNC.
This module contains the WSGI application used by Django's development server
and any production | WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or comb | ine a Django application with an application of another
framework.
"""
import os
# We defer to a DJANGO_SETTINGS_MODULE already in the environment. This breaks
# if running multiple sites in the same mod_wsgi process. To fix this, use
# mod_wsgi daemon mode with each site in its own daemon process, or use
# os.environ["DJANGO_SETTINGS_MODULE"] = "config.settings"
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "config.settings")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
|
creditbit/electrum-creditbit | gui/qt/network_dialog.py | Python | gpl-3.0 | 9,292 | 0.007211 | #!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2012 thomasv@gitorious
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import sys, time, datetime, re, threading
from electrum_creditbit.i18n import _
from electrum_creditbit.util import print_error, print_msg
import os.path, json, ast, traceback
from PyQt4.QtGui import *
from PyQt4.QtCore import *
from electrum_creditbit import DEFAULT_SERVERS, DEFAULT_PORTS
from util import *
#protocol_names = ['TCP', 'HTTP', 'SSL', 'HTTPS']
#protocol_letters = 'thsg'
protocol_names = ['TCP', 'SSL']
protocol_letters = 'ts'
class NetworkDialog(QDialog):
def __init__(self, network, config, parent):
QDialog.__init__(self,parent)
self.setModal(1)
self.setWindowTitle(_('Network'))
self.setMinimumSize(375, 20)
self.network = network
self.config = config
self.protocol = None
self.servers = network.get_servers()
host, port, protocol, proxy_config, auto_connect = network.get_parameters()
if not proxy_config:
proxy_config = { "mode":"none", "host":"localhost", "port":"9050"}
if parent:
n = len(network.get_interfaces())
if n:
status = _("Blockchain") + ": " + "%d "%(network.get_local_height()) + _("blocks") + ".\n" + _("Getting block headers from %d nodes.")%n
else:
status = _("Not connected")
if network.is_connected():
status += "\n" + _("Server") + ": %s"%(host)
else:
status += "\n" + _("Disconnected from server")
else:
status = _("Please choose a server.") + "\n" + _("Select 'Cancel' if you are offline.")
vbox = QVBoxLayout()
vbox.setSpacing(30)
hbox = QHBoxLayout()
l = QLabel()
l.setPixmap(QPixmap(":icons/network.png"))
hbox.addStretch(10)
hbox.addWidget(l)
hbox.addWidget(QLabel(status))
hbox.addStretch(50)
msg = _("Electrum sends your wallet addresses to a single server, in order to receive your transaction history.") + "\n\n" \
+ _("In addition, Electrum connects to several nodes in order to download block headers and find out the longest blockchain.") + " " \
+ _("This blockchain is used to verify the transactions sent by the address server.")
hbox.addWidget(HelpButton(msg))
vbox.addLayout(hbox)
# grid layout
grid = QGridLayout()
grid.setSpacing(8)
vbox.addLayout(grid)
# server
self.server_host = QLineEdit()
self.server_host.setFixedWidth(200)
self.server_port = QLineEdit()
self.server_port.setFixedWidth(60)
grid.addWidget(QLabel(_('Server') + ':'), 0, 0)
# use SSL
self.ssl_cb = QCheckBox(_('Use SSL'))
self.ssl_cb.setChecked(auto_connect)
grid.addWidget(self.ssl_cb, 3, 1)
self.ssl_cb.stateChanged.connect(self.change_protocol)
# auto connect
self.autocycle_cb = QCheckBox(_('Auto-connect'))
self.autocycle_cb.setChecked(auto_connect)
grid.addWidget(self.autocycle_cb, 0, 1)
if not self.config.is_modifiable('auto_cycle'): self.autocycle_cb.setEnabled(False)
msg = _("If auto-connect is enabled, Electrum will always use a server that is on the longest blockchain.") + " " \
+ _("If it is disabled, Electrum will warn you if your server is lagging.")
grid.addWidget(HelpButton(msg), 0, 4)
grid.addWidget(self.server_host, 0, 2, 1, 2)
grid.addWidget(self.server_port, 0, 3)
label = _('Active Servers') if network.is_connected() else _('Default Servers')
self.servers_list_widget = QTreeWidget(parent)
self.servers_list_widget.setHeaderLabels( [ label, _('Limit') ] )
self.servers_list_widget.setMaximumHeight(150)
self.servers_list_widget.setColumnWidth(0, 240)
self.change_server(host, protocol)
self.set_protocol(protocol)
self.servers_list_widget.connect(self.servers_list_widget,
SIGNAL('currentItemChanged(QTreeWidgetItem*,QTreeWidgetItem*)'),
lambda x,y: self.server_changed(x))
grid.addWidget(self.servers_list_widget, 1, 1, 1, 3)
def enable_set_server():
if config.is_modifiable('server'):
enabled = not self.autocycle_cb.isChecked()
self.server_host.setEnabled(enabled)
self.server_port.setEnabled(enabled)
self.servers_list_widget.setEnabled(enabled)
else:
for w in [self.autocycle_cb, self.server_host, self.server | _port, self.ssl_cb, self.servers_list_widget]:
w.setEnabled(False)
s | elf.autocycle_cb.clicked.connect(enable_set_server)
enable_set_server()
# proxy setting
self.proxy_mode = QComboBox()
self.proxy_host = QLineEdit()
self.proxy_host.setFixedWidth(200)
self.proxy_port = QLineEdit()
self.proxy_port.setFixedWidth(60)
self.proxy_mode.addItems(['NONE', 'SOCKS4', 'SOCKS5', 'HTTP'])
def check_for_disable(index = False):
if self.config.is_modifiable('proxy'):
if self.proxy_mode.currentText() != 'NONE':
self.proxy_host.setEnabled(True)
self.proxy_port.setEnabled(True)
else:
self.proxy_host.setEnabled(False)
self.proxy_port.setEnabled(False)
else:
for w in [self.proxy_host, self.proxy_port, self.proxy_mode]: w.setEnabled(False)
check_for_disable()
self.proxy_mode.connect(self.proxy_mode, SIGNAL('currentIndexChanged(int)'), check_for_disable)
self.proxy_mode.setCurrentIndex(self.proxy_mode.findText(str(proxy_config.get("mode").upper())))
self.proxy_host.setText(proxy_config.get("host"))
self.proxy_port.setText(proxy_config.get("port"))
grid.addWidget(QLabel(_('Proxy') + ':'), 4, 0)
grid.addWidget(self.proxy_mode, 4, 1)
grid.addWidget(self.proxy_host, 4, 2)
grid.addWidget(self.proxy_port, 4, 3)
# buttons
vbox.addLayout(Buttons(CancelButton(self), OkButton(self)))
self.setLayout(vbox)
def init_servers_list(self):
self.servers_list_widget.clear()
for _host, d in sorted(self.servers.items()):
if d.get(self.protocol):
pruning_level = d.get('pruning','')
self.servers_list_widget.addTopLevelItem(QTreeWidgetItem( [ _host, pruning_level ] ))
def set_protocol(self, protocol):
if protocol != self.protocol:
self.protocol = protocol
self.init_servers_list()
def change_protocol(self, use_ssl):
p = 's' if use_ssl else 't'
host = unicode(self.server_host.text())
pp = self.servers.get(host, DEFAULT_PORTS)
if p not in pp.keys():
p = pp.keys()[0]
port = pp[p]
self.server_host.setText( host )
self.server_port.setText( port )
self.set_protocol(p)
def server_changed(self, x):
if x:
self.change_server(str(x.text(0)), self.protocol)
def change_server(self, host, protocol):
pp = self.servers.get(host, DEFAULT_PORTS)
if protocol and protocol not in protocol_letters:
protocol = None
if protocol:
|
DBrianKimmel/PyHouse | Project/src/Modules/Computer/Web/ipyhouse.py | Python | mit | 113 | 0 | """ |
Created on Jul 11, 2013
@author: briank
"""
# from zope.interface import Interface, Attribute
# | ## END DBK
|
jamlamberti/bogo_probe | tests/classifier/test_surrogate_model.py | Python | gpl-3.0 | 445 | 0 | "" | "Collection of tests for classifier.surrogate_model"""
from classifier import surrogate_model
from learner import svm
def test_surrogate_regression():
"""Test case for running a classifier based Surrogate Model"""
surrogate = svm.SVM()
black_box = svm.SVM()
surrogate_model.main(
black_box,
surrogate,
training_data='data-small',
out_dir=None,
threshold=0.2,
iterat | ions=100)
|
frc2423/2015 | recycle_rush/custom/kwarqs_drive_mech.py | Python | gpl-2.0 | 3,114 | 0.007707 | import wpilib
import hal
from wpilib import RobotDrive
class KwarqsDriveMech(RobotDrive):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.weight_multiplier = 1
def set_multiplier(self, in_multi = None):
if in_multi != None :
| self.weight_multiplier = in_multi
else:
self.weight_multiplier = 1
def mecanumDrive_C | artesian(self, x, y, rotation, gyroAngle):
"""Drive method for Mecanum wheeled robots.
A method for driving with Mecanum wheeled robots. There are 4 wheels
on the robot, arranged so that the front and back wheels are toed in
45 degrees. When looking at the wheels from the top, the roller
axles should form an X across the robot.
This is designed to be directly driven by joystick axes.
:param x: The speed that the robot should drive in the X direction.
[-1.0..1.0]
:param y: The speed that the robot should drive in the Y direction.
This input is inverted to match the forward == -1.0 that
joysticks produce. [-1.0..1.0]
:param rotation: The rate of rotation for the robot that is
completely independent of the translation. [-1.0..1.0]
:param gyroAngle: The current angle reading from the gyro. Use this
to implement field-oriented controls.
"""
if not wpilib.RobotDrive.kMecanumCartesian_Reported:
hal.HALReport(hal.HALUsageReporting.kResourceType_RobotDrive,
self.getNumMotors(),
hal.HALUsageReporting.kRobotDrive_MecanumCartesian)
RobotDrive.kMecanumCartesian_Reported = True
xIn = x
yIn = y
# Negate y for the joystick.
yIn = -yIn
# Compenstate for gyro angle.
xIn, yIn = RobotDrive.rotateVector(xIn, yIn, gyroAngle)
wheelSpeeds = [0]*self.kMaxNumberOfMotors
wheelSpeeds[self.MotorType.kFrontLeft] = xIn + yIn + rotation
wheelSpeeds[self.MotorType.kFrontRight] = -xIn + yIn - rotation
wheelSpeeds[self.MotorType.kRearLeft] = -xIn + yIn + ( rotation * self.weight_multiplier )
wheelSpeeds[self.MotorType.kRearRight] = xIn + yIn - ( rotation * self.weight_multiplier )
RobotDrive.normalize(wheelSpeeds)
self.frontLeftMotor.set(wheelSpeeds[self.MotorType.kFrontLeft] * self.invertedMotors[self.MotorType.kFrontLeft] * self.maxOutput, self.syncGroup)
self.frontRightMotor.set(wheelSpeeds[self.MotorType.kFrontRight] * self.invertedMotors[self.MotorType.kFrontRight] * self.maxOutput, self.syncGroup)
self.rearLeftMotor.set(wheelSpeeds[self.MotorType.kRearLeft] * self.invertedMotors[self.MotorType.kRearLeft] * self.maxOutput, self.syncGroup)
self.rearRightMotor.set(wheelSpeeds[self.MotorType.kRearRight] * self.invertedMotors[self.MotorType.kRearRight] * self.maxOutput, self.syncGroup)
if self.syncGroup != 0:
wpilib.CANJaguar.updateSyncGroup(self.syncGroup)
self.feed()
|
to266/hyperspy | hyperspy/_components/eels_cl_edge.py | Python | gpl-3.0 | 14,359 | 0.00007 | # -*- coding: utf-8 -*-
# Copyright 2007-2016 The HyperSpy developers
#
# This file is part of HyperSpy.
#
# HyperSpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# HyperSpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with HyperSpy. If not, see <http://www.gnu.org/licenses/>.
import math
import logging
import numpy as np
from scipy.interpolate import splev
from hyperspy.defaults_parser import preferences
from hyperspy.component import Component
from hyperspy.misc.eels.hartree_slater_gos import HartreeSlaterGOS
from hyperspy.misc.eels.hydrogenic_gos import HydrogenicGOS
from hyperspy.misc.eels.effective_angle import effective_angle
_logger = logging.getLogger(__name__)
class EELSCLEdge(Component):
"""EELS core loss ionisation edge from hydrogenic or tabulated
Hartree-Slater GOS with splines for fine structure fitting.
Hydrogenic GOS are limited to K and L shells.
Currently it only supports Peter Rez's Hartree Slater cross sections
parametrised as distributed by Gatan in their Digital Micrograph (DM)
software. If Digital Micrograph is installed in the system HyperSpy in the
standard location HyperSpy should find the path to the HS GOS folder.
Otherwise, the location of the folder can be defined in HyperSpy
preferences, which can be done through hs.preferences.gui() or the
hs.preferences.EELS.eels_gos_files_path variable.
Parameters
----------
element_subshell : {str, dict}
Usually a string, for example, 'Ti_L3' for the GOS of the titanium L3
subshell. If a dictionary is passed, it is assumed that Hartree Slater
GOS was exported using `GOS.as_dictionary`, and will be reconstructed.
GOS : {'hydrogenic', 'Hartree-Slater', None}
The GOS to use. If None it will use the Hartree-Slater GOS if
they are available, otherwise it will use the hydrogenic GOS.
Attributes
----------
onset_energy : Parameter
The edge onset position
intensity : Parameter
The factor by which the cross section is multiplied, what in
favourable cases is proportional to the number of atoms of
the element. It is a component.Parameter instance.
It is fixed by default.
fine_structure_coeff : Parameter
The coefficients of the spline that fits the fine structure.
Fix this parameter to fix the fine structure. It is a
component.Parameter instance.
effective_angle : Parameter
The effective collection semi-angle. It is automatically
calculated by set_microscope_parameters. It is a
component.Parameter instance. It is fixed by default.
fine_structure_smoothing : float between 0 and 1
Controls the level of smoothing of the fine structure model.
Decreasing the value increases the level of smoothing.
fine_structure_active : bool
Activates/deactivates the fine structure feature. Its
default value can be choosen in the preferences.
"""
_fine_structure_smoothing = \
preferences.EELS.fine_structure_smoothing
def __init__(self, element_subshell, GOS=None):
# Declare the parameters
Component.__init__(self,
['intensity',
'fine_structure_coeff',
'effective_angle',
'onset_energy'])
if isinstance(element_subshell, dict):
self.element = element_subshell['element']
self.subshell = element_subshell['subshell']
else:
self.element, self.subshell = element_subshell.split('_')
self.name = "_".join([self.element, self.subshell])
self.energy_scale = None
self.effective_angle.free = False
self.fine_structure_active = preferences.EELS.fine_structure_active
self.fine_structure_width = preferences.EELS.fine_structure_width
self.fine_structure_coeff.ext_force_positive = False
self.GOS = None
# Set initial actions
if GOS is None:
try:
self.GOS = HartreeSlaterGOS(element_subshell)
GOS = 'Hartree-Slater'
except IOError:
GOS = 'hydrogenic'
_logger.info(
'Hartree-Slater GOS not available. '
'Using hydrogenic GOS')
if self.GOS is None:
if GOS == 'Hartree-Slater':
self.GOS = HartreeSlaterGOS(element_subshell)
elif GOS == 'hydrogenic':
self.GOS = HydrogenicGOS(element_subshell)
else:
raise ValueError(
'gos must be one of: None, \'hydrogenic\''
' or \'Hartree-Slater\'')
self.onset_energy.value = self.GOS.onset_energy
self.onset_energy.free = False
self._position = self.onset_energy
self.free_onset_energy = False
self.intensity.grad = self.grad_intensity
self.intensity.value = 1
self.intensity.bmin = 0.
self.intensity.bmax = None
self._whitelist['GOS'] = ('init', GOS)
if GOS == 'Hartree-Slater':
self._whitelist['element_subshell'] = (
'init',
self.GOS.as_dictionary(True))
elif GOS == 'hydrogenic':
self._whitelist['element_subshell'] = ('init', element_subshell)
self._whitelist['fine_structure_active'] = None
self._whitelist['fine_structure_width'] = None
self._whitelist['fine_structure_smoothing'] = None
self.effective_angle.events.value_changed.connect(
self._integrate_GOS, [])
self.onset_energy.events.value_changed.connect(self._integrate_GOS, [])
self.onset_energy.events.value_changed.connect(
self._calculate_knots, [])
# Automatically fix the fine structure when the fine structure is
# disable.
# In this way we avoid a common source of problems when fitting
# However the fine structure must be *manually* freed when we
# reactivate the fine structure.
def _get_fine_structure_active(self):
return self.__fine_structure_active
def _set_fine_structure_active(self, arg):
if arg is False:
self.fine_structure_coeff.free = False
self.__fine_structure_active = arg
# Force replot
self.intensity.value = self.intensity.value
fine_structure_active = property(_get_fine_structure_active,
_set_fine_structure_active)
def _get_fine_structure_width(self):
return sel | f.__fine_structure_width
def _set_fine_structure_width(self, arg):
self.__fine_structure_width = arg
self._set_fine_structure_coeff()
fine_structure_width = property(_get_fine_structure_width,
_set_fine_structure_width)
# E0
def _get_E0(self):
return self.__E0
def _set_E0(self, arg):
self.__E0 = arg
self._calculate_effective_angle()
E0 = property(_get_E0, _set_E0)
# Collection semi-angle
def | _get_collection_angle(self):
return self.__collection_angle
def _set_collection_angle(self, arg):
self.__collection_angle = arg
self._calculate_effective_angle()
collection_angle = property(_get_collection_angle,
_set_collection_angle)
# Convergence semi-angle
def _get_convergence_angle(self):
return self.__convergence_angle
def _set_convergence_angle(self, arg):
self.__convergence_angle = arg
self._calculate_effective_angle()
convergence_angle = property(_get_convergence_angle,
|
lxylinki/medCC | src/main/resources/output/evalresults2014/calcuImp.py | Python | gpl-3.0 | 1,644 | 0.011557 | #!/usr/bin/python3
# import concurrent.futures
import os
# produce an imp file
def calcuImp(mods, edges, index):
# med results
filedir = './{}_{}/'.format(mods, edges)
filename = 'workflow_{}_{}_{}.txt'.format(mods, edges, index)
filename = os.path.join(filedir,filename)
results = open(filename, 'r')
# output filename
outputFiledir = './avgImpNew/'
impfilename = '{}_{}_{}_Imp.txt'.format(mods, edges, index)
impfilename = os.path.join(outputFiledir, impfilename)
impfile = open(impfilename, 'w')
impfile.write(' CG/HBCS CG/ScaleStar\n')
for line in results:
# skip header line
if (line.split()[0].isdigit()==False):
continue
| items = line.split()
budlevel = int(items[0])
# med measurements
cg | = float(items[1])
hbcs = float(items[2])
ss = float(items[3])
# print percentage
impoverhbcs = (hbcs - cg)*100/hbcs
impoverss = (ss-cg)*100/ss
impline = '%d %.2f %.2f\n' % (budlevel, impoverhbcs, impoverss)
impfile.write(impline)
results.close()
impfile.close()
if __name__=='__main__':
Mods = [5, 10, 15, 20, 25, 30, 35, 40, 45, 50,
55, 60, 65, 70, 75, 80, 85, 90, 95, 100]
Edges = [6, 15, 60, 80, 200, 300, 500, 500, 580, 500,
800, 900, 950, 950, 1000, 1200, 1200, 1600, 1600, 2000]
scales = 20
maxIdx = 50
for i in range (0, maxIdx):
for j in range (0, scales):
m = Mods[j]
e = Edges[j]
calcuImp(m, e, i)
|
janusnic/21v-python | unit_02/calc/1.py | Python | mit | 991 | 0.035318 | # Program make a simple calculator that can add, subtract, multiply and divide using functions
# define functi | ons
def add(x, y): |
"""This function adds two numbers"""
return x + y
def subtract(x, y):
"""This function subtracts two numbers"""
return x - y
def multiply(x, y):
"""This function multiplies two numbers"""
return x * y
def divide(x, y):
"""This function divides two numbers"""
return x / y
# take input from the user
print("Select operation.")
print("1.Add")
print("2.Subtract")
print("3.Multiply")
print("4.Divide")
choice = input("Enter choice(1/2/3/4):")
num1 = int(input("Enter first number: "))
num2 = int(input("Enter second number: "))
if choice == '1':
print(num1,"+",num2,"=", add(num1,num2))
elif choice == '2':
print(num1,"-",num2,"=", subtract(num1,num2))
elif choice == '3':
print(num1,"*",num2,"=", multiply(num1,num2))
elif choice == '4':
print(num1,"/",num2,"=", divide(num1,num2))
else:
print("Invalid input") |
x2Ident/x2Ident_test | mitmproxy/test/pathod/test_language_actions.py | Python | gpl-3.0 | 3,653 | 0.000821 | from six import BytesIO
from pathod.language import actions, parse_pathoc, parse_pathod, serve
def parse_request(s):
return next(parse_pathoc(s))
def test_unique_name():
assert not actions.PauseAt(0, "f").unique_name
assert actions.DisconnectAt(0).unique_name
class TestDisconnects:
def test_parse_pathod(self):
a = next(parse_pathod("400:d0")).actions[0]
assert a.spec() == "d0"
a = next(parse_pathod("400:dr")).actions[0]
assert a.spec() == "dr"
def test_at(self):
e = actions.DisconnectAt.expr()
v = e.parseString("d0")[0]
assert isinstance(v, actions.DisconnectAt)
assert v.offset == 0
v = e.parseString("d100")[0]
asser | t v.offset == 100
e = actions.DisconnectAt.expr()
v = e.parseString("dr")[0]
assert v.offset == "r"
def test_spec(self):
assert actions.DisconnectAt("r").spec() == "dr"
assert actions.DisconnectAt(10).spec() == "d10"
class TestInject:
def test_parse_pathod(self):
a = next(parse_pathod("400:ir,@100")).actions[0]
assert a.offset == "r"
assert a.value.datatype == "byt | es"
assert a.value.usize == 100
a = next(parse_pathod("400:ia,@100")).actions[0]
assert a.offset == "a"
def test_at(self):
e = actions.InjectAt.expr()
v = e.parseString("i0,'foo'")[0]
assert v.value.val == b"foo"
assert v.offset == 0
assert isinstance(v, actions.InjectAt)
v = e.parseString("ir,'foo'")[0]
assert v.offset == "r"
def test_serve(self):
s = BytesIO()
r = next(parse_pathod("400:i0,'foo'"))
assert serve(r, s, {})
def test_spec(self):
e = actions.InjectAt.expr()
v = e.parseString("i0,'foo'")[0]
assert v.spec() == "i0,'foo'"
def test_spec2(self):
e = actions.InjectAt.expr()
v = e.parseString("i0,@100")[0]
v2 = v.freeze({})
v3 = v2.freeze({})
assert v2.value.val == v3.value.val
class TestPauses:
def test_parse_pathod(self):
e = actions.PauseAt.expr()
v = e.parseString("p10,10")[0]
assert v.seconds == 10
assert v.offset == 10
v = e.parseString("p10,f")[0]
assert v.seconds == "f"
v = e.parseString("pr,f")[0]
assert v.offset == "r"
v = e.parseString("pa,f")[0]
assert v.offset == "a"
def test_request(self):
r = next(parse_pathod('400:p10,10'))
assert r.actions[0].spec() == "p10,10"
def test_spec(self):
assert actions.PauseAt("r", 5).spec() == "pr,5"
assert actions.PauseAt(0, 5).spec() == "p0,5"
assert actions.PauseAt(0, "f").spec() == "p0,f"
def test_freeze(self):
l = actions.PauseAt("r", 5)
assert l.freeze({}).spec() == l.spec()
class Test_Action:
def test_cmp(self):
a = actions.DisconnectAt(0)
b = actions.DisconnectAt(1)
c = actions.DisconnectAt(0)
assert a < b
assert a == c
l = sorted([b, a])
assert l[0].offset == 0
def test_resolve(self):
r = parse_request('GET:"/foo"')
e = actions.DisconnectAt("r")
ret = e.resolve({}, r)
assert isinstance(ret.offset, int)
def test_repr(self):
e = actions.DisconnectAt("r")
assert repr(e)
def test_freeze(self):
l = actions.DisconnectAt(5)
assert l.freeze({}).spec() == l.spec()
|
pyro-ppl/numpyro | numpyro/distributions/mixtures.py | Python | apache-2.0 | 8,387 | 0.002146 | # Copyright Contributors to the Pyro project.
# SPDX-License-Identifier: Apache-2.0
import jax
from jax import lax
import jax.numpy as jnp
from numpyro.distributions import Distribution, constraints
from numpyro.distributions.discrete import CategoricalLogits, CategoricalProbs
from numpyro.distributions.util import is_prng_key, validate_sample
class MixtureSameFamily(Distribution):
"""
Marginalized Finite Mixture distribution of vectorized components.
The components being a vectorized distribution implies that all components are from the same family,
represented by a single Distribution object.
:param numpyro.distribution.Distribution mixing_distribution:
The mixing distribution to select the components. Needs to be a categorical.
:param numpyro.distribution.Distribution component_distribution:
Vectorized component distribution.
As an example:
.. doctest::
>>> import jax
>>> import jax.numpy as jnp
>>> import numpyro.distributions as dist
>>> mixing_dist = dist.Categorical(probs=jnp.ones(3) / 3.)
>>> component_dist = dist.Normal(loc=jnp.zeros(3), scale=jnp.ones(3))
>>> mixture = dist.MixtureSameFamily(mixing_dist, component_dist)
>>> mixture.sample(jax.random.PRNGKey(42)).shape
()
"""
def __init__(self, mixing_distribution, component_distribution, validate_args=None):
# Check arguments
if not isinstance(mixing_distribution, (CategoricalLogits, CategoricalProbs)):
raise ValueError(
"The mixing distribution need to be a numpyro.distributions.Categorical. "
f"However, it is of type {type(mixing_distribution)}"
)
mixture_size = mixing_distribution.probs.shape[-1]
if not isinstance(component_distribution, Distribution):
raise ValueError(
"The component distribution need to be a numpyro.distributions.Distribution. "
f"However, it is of type {type(component_distribution)}"
)
assert component_distribution.batch_shape[-1] == mixture_size, (
"Component distribution batch shape last dimension "
f"(size={component_distribution.batch_shape[-1]}) "
"needs to correspond to the mixture_size={mixture_size}!"
)
# Assign checked arguments
self._mixing_distribution = mixing_distribution
self._component_distribution = component_distribution
self._mixture_size = mixture_size
batch_shape = lax.broadcast_shapes(
mixing_distribution.batch_shape,
component_distribution.batch_shape[:-1], # Without probabilities
)
super().__init__(
batch_shape=batch_shape,
event_shape=component_distribution.event_shape,
validate_args=validate_args,
)
@property
def mixture_size(self):
"""
Returns the number of distributions in the mixture
:return: number of mixtures.
:rtype: int
| """
return self._mixture_size
@property
def mixing_distribution(self):
"""
Returns th | e mixing distribution
:return: Categorical distribution
:rtype: Categorical
"""
return self._mixing_distribution
@property
def mixture_dim(self):
return -self.event_dim - 1
@property
def component_distribution(self):
"""
Return the vectorized distribution of components being mixed.
:return: Component distribution
:rtype: Distribution
"""
return self._component_distribution
@constraints.dependent_property
def support(self):
return self.component_distribution.support
@property
def is_discrete(self):
return self.component_distribution.is_discrete
def tree_flatten(self):
mixing_flat, mixing_aux = self.mixing_distribution.tree_flatten()
component_flat, component_aux = self.component_distribution.tree_flatten()
params = (mixing_flat, component_flat)
aux_data = (
(type(self.mixing_distribution), type(self.component_distribution)),
(mixing_aux, component_aux),
)
return params, aux_data
@classmethod
def tree_unflatten(cls, aux_data, params):
mixing_params, component_params = params
child_clss, child_aux = aux_data
mixing_cls, component_cls = child_clss
mixing_aux, component_aux = child_aux
mixing_dist = mixing_cls.tree_unflatten(mixing_aux, mixing_params)
component_dist = component_cls.tree_unflatten(component_aux, component_params)
return cls(
mixing_distribution=mixing_dist, component_distribution=component_dist
)
@property
def mean(self):
probs = self.mixing_distribution.probs
probs = probs.reshape(probs.shape + (1,) * self.event_dim)
weighted_component_means = probs * self.component_distribution.mean
return jnp.sum(weighted_component_means, axis=self.mixture_dim)
@property
def variance(self):
probs = self.mixing_distribution.probs
probs = probs.reshape(probs.shape + (1,) * self.event_dim)
# E[Var(Y|X)]
mean_cond_var = jnp.sum(
probs * self.component_distribution.variance, axis=self.mixture_dim
)
# Variance is the expectation of the squared deviation of a random variable from its mean
sq_deviation = (
self.component_distribution.mean
- jnp.expand_dims(self.mean, axis=self.mixture_dim)
) ** 2
# Var(E[Y|X])
var_cond_mean = jnp.sum(probs * sq_deviation, axis=self.mixture_dim)
# Law of total variance: Var(Y) = E[Var(Y|X)] + Var(E[Y|X])
return mean_cond_var + var_cond_mean
def cdf(self, samples):
"""
The cumulative distribution function of this mixture distribution.
:param value: samples from this distribution.
:return: output of the cummulative distribution function evaluated at `value`.
:raises: NotImplementedError if the component distribution does not implement the cdf method.
"""
cdf_components = self.component_distribution.cdf(
jnp.expand_dims(samples, axis=self.mixture_dim)
)
return jnp.sum(cdf_components * self.mixing_distribution.probs, axis=-1)
def sample_with_intermediates(self, key, sample_shape=()):
"""
Same as ``sample`` except that the sampled mixture components are also returned.
:param jax.random.PRNGKey key: the rng_key key to be used for the distribution.
:param tuple sample_shape: the sample shape for the distribution.
:return: Tuple (samples, indices)
:rtype: tuple
"""
assert is_prng_key(key)
key_comp, key_ind = jax.random.split(key)
# Samples from component distribution will have shape:
# (*sample_shape, *batch_shape, mixture_size, *event_shape)
samples = self.component_distribution.expand(
sample_shape + self.batch_shape + (self.mixture_size,)
).sample(key_comp)
# Sample selection indices from the categorical (shape will be sample_shape)
indices = self.mixing_distribution.expand(
sample_shape + self.batch_shape
).sample(key_ind)
n_expand = self.event_dim + 1
indices_expanded = indices.reshape(indices.shape + (1,) * n_expand)
# Select samples according to indices samples from categorical
samples_selected = jnp.take_along_axis(
samples, indices=indices_expanded, axis=self.mixture_dim
)
# Final sample shape (*sample_shape, *batch_shape, *event_shape)
return jnp.squeeze(samples_selected, axis=self.mixture_dim), indices
def sample(self, key, sample_shape=()):
return self.sample_with_intermediates(key=key, sample_shape=sample_shape)[0]
@validate_sample
def log_prob(self, value, intermediates=None):
del intermediates
value = jnp.expand_dims(value, self.mixture_dim)
|
ptsg/AtamaTracker | atamatracker/data.py | Python | mit | 2,081 | 0 | """data structure module
"""
import csv
# setup csv format
csv.register_dialect('result', delimiter='\t', lineterminator='\n')
class Track(object):
"""Tracked point data struct.
Public properties:
point -- [Point] Tracked/ | clicked point position
label -- [int] Index number
| time -- [float] Clicked time
is_manual -- [bool] Whether the point was clicked manually
"""
__slots__ = ['time', 'label', 'point', 'is_manual']
def __init__(self, point, label, time, is_manual=False):
self.point = point
self.time = time
self.label = label
self.is_manual = is_manual
def __str__(self):
return "Track(({point.x}, {point.y}), {label}, {time})".format(
point=self.point,
label=self.label,
time=self.time,
)
class History(list):
"""List of Track objects.
"""
def track(self, time, idenfifier):
matches = self.trasks(time=time, label=label)
if len(matches) > 0:
return matches[0]
else:
return None
def tracks(self, time=None, label=None):
"""Filter with given properties.
"""
matches = []
for track in self:
if ((time is None or track.time == time) and
(label is None or track.label == label)):
matches.append(track)
return matches
def sort(self, **kwargs):
# override default sort key
if not ('key' in kwargs):
kwargs['key'] = lambda p: (p.time, p.label)
super(History, self).sort(**kwargs)
def dump(self, file_path):
"""Create a result CSV file at the given path.
file_path -- [str] Path to result file
"""
with open(file_path, 'wb') as f:
writer = csv.writer(f, dialect='result')
for track in self:
writer.writerow((
'{}'.format(track.time),
track.label,
track.point.y,
track.point.x
))
|
oinopion/django | tests/admin_ordering/models.py | Python | bsd-3-clause | 899 | 0 | # -*- coding: utf-8 -*-
from django.contrib import admin
from django.db import models
class Band(models.Model):
name = models.CharField(max_length=100)
bio = models.TextField()
rank = models.IntegerField()
class Meta:
ordering = ('name',)
class Song(models.Model):
band = models.ForeignKey(Band)
name = models.CharField(max_length=100)
duration = models.IntegerField()
other_interpreters = models.ManyToManyField(Ba | nd, related_name='covers')
class Meta:
ordering = ('name',)
class SongInlineDefaultOrdering(admin.StackedInline):
model = Song
class SongInlineNewOrdering(admin.StackedInline):
model = Song
ordering = ('duration', )
class DynOrderingBandAdmin(admin.ModelAdmin):
def get_ordering(self, request):
if request.user.is | _superuser:
return ['rank']
else:
return ['name']
|
LLNL/spack | var/spack/repos/builtin/packages/py-onnx/package.py | Python | lgpl-2.1 | 1,686 | 0.001779 | # Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyOnnx(PythonPackage):
"""Open Neural Network Exchange (ONNX) is an open ecosystem that
empowers AI developers to choose the right tools as their
project evolves. ONNX provides an open source format for AI
models, both deep learning and traditional ML. It defines an
extensible | computation graph model, as well as definitions of
built-in operators and standard data types. Currently we focus
on the capabilities needed for inferencing (scoring)."""
homepage = "https://github.com/onnx/onnx"
pypi = "Onnx/onnx-1.6.0.tar.gz"
version('1.6.0', sha256='3b88c3fe521151651a0403c4d131cb2e0311bd28b753ef692020a432a81ce345')
version('1.5.0', sha256='1a584a4ef62a6db178c257fffb06a9d8e61b41c0a80bfd8bcd8a253d72c4b0b4')
depends_on('py-setuptools', type='build | ')
# Protobuf version limit is due to https://github.com/protocolbuffers/protobuf/pull/8794
depends_on('protobuf@:3.17')
depends_on('py-protobuf+cpp@:3.17', type=('build', 'run'))
depends_on('py-numpy', type=('build', 'run'))
depends_on('py-six', type=('build', 'run'))
depends_on('py-typing@3.6.4:', when='^python@:3.4', type=('build', 'run'))
depends_on('py-typing-extensions@3.6.2.1:', type=('build', 'run'))
depends_on('cmake@3.1:', type='build')
depends_on('py-pytest-runner', type='build')
# 'python_out' does not recognize dllexport_decl.
patch('remove_dllexport_decl.patch', when='@:1.6.0')
|
hiteshagrawal/python | udemy/russian-peasant-by-author-testing-fasting.py | Python | gpl-2.0 | 1,509 | 0.047714 | #!/usr/bin/python
## Lecture 6
## Fasting using a cache as empty dict
##: AKA - Mediation and DUplication
##:
##: Inouts -> two numbers
#Output - > the solution to those two numbers
# multiplied together using the Russian Peason
# 192 x 13 = 2496
import time
CACHE = {}
def russian(a,b):
key = (a,b) # define tuple
if key in CACHE:
z = CACHE[key]
else:
print 'Not in Cache'
x = a; y = b ## Semicolon -> Compund Statement
z = 0 ## Acumulator
while x > 0: ## While loop Begins
if x % 2 == 1: z = z + y ## Modulo Operator
y = y << 1 ## Shift Binary over to left , divide by 2
x = x >> 1 ## Shift Binary over to right , multiply by two
CACHE[key] = z
return z
print russian(192,13)
#2496
# 17 in base 2: 10001 = 17 10001
# >> 1 << 1
## 1000 = 8 100010 = 34
def test_russian():
start_time = time.time()
print russian(357,16)
print "Russian Algorithm took %f seconds" % (time.time()-start_time) # %f is a float |
start_time = time.time()
print russian(357,16)
print "Russian Algorithm took %f seconds" % (time.time()-start_time) # %f is a float
assert russian(357,16) == 5712
test_russian()
#5712
#Russian Algori | thm took 0.000015 seconds
#5712
#Russian Algorithm took 0.000011 seconds
#[Finished in 0.1s]
|
YAmikep/django-feedstorage | feedstorage/log.py | Python | bsd-3-clause | 703 | 0.001422 | # Django
from django.core.files.storage import get_storage_class
from django.utils.functional import LazyObject
# Internal
from .settings import (
FILE_STORAGE, FILE_STORAGE_ARGS,
LOGGER_NAME, LOG_FILE, LOG_SIZE, LOGGER_FORMAT, LOG_LEVEL
)
from .ut | ils.loggers import LoggerWithStorage
class DefaultFileStorage(LazyObject):
| def _setup(self):
self._wrapped = get_storage_class(FILE_STORAGE)(**FILE_STORAGE_ARGS)
default_file_storage = DefaultFileStorage()
default_logger = LoggerWithStorage(
storage=default_file_storage,
logger_name=LOGGER_NAME,
level=LOG_LEVEL,
log_file=LOG_FILE,
log_size=LOG_SIZE,
logger_format=LOGGER_FORMAT
)
|
demisto/content | Packs/ILert/Integrations/ILert/ILert_test.py | Python | mit | 1,276 | 0 | import pytest
import demistomock as demisto
PARAMS = {'url': 'https://test.com', 'integrationKey': 'mock_key'}
@pytest.fixture(autouse=True)
def set_params(mocker):
mocker.patch.object(demisto, 'params', return_value=PARAMS)
def test_submit_new_event_command(requests_mock):
from ILert import submit_new_event_command
kwargs = {
'summary': 'mock_summary'
}
requests_mock.post('https://test.com/events', json={})
result = submit_new_event_command(**kwargs)
assert result == 'Incident has been created'
def test_submit_acknowledge_event_command(requests_mock):
from ILert import submit_acknowledge_event_command
kwargs = {
'summary': 'mock_summary',
'incident_key': 'mock_key'
}
requests_mock.post('https://test.com/events', json={ | })
result = submit_acknowledge_event_command(**kwargs)
assert result == 'Incident has been acknowledged'
def test_submit_resolve_event_command(requests_mock):
from ILert import submit_resolve_event_command
kwargs = {
'summary': 'mock_summary',
'incident_key': 'mock_key'
}
requests_mock.post('https://test.com/events', json={})
result = submit_resolve_event_command(**kwargs)
assert resu | lt == 'Incident has been resolved'
|
marcomanciniunitn/Final-LUS-project | RNN/rnn/lus_rnn_lab/rnn_slu/data/new_data/word-pos-enhanced/enhance.py | Python | gpl-3.0 | 467 | 0.036403 | #Funct | ion used to change all the O concepts of the words into the words themselves
def changeAllO(file, out):
w = open(out, "w")
for line in (open(file).readlines()):
v = line.split("\t")
if(len(v)>1):
if v[1][0:1] == "I" or v[1][0:1] == "B":
w.write(line)
else:
w.write(v[0] + "\t" + "$-"+str(v[0])+"\n")
else:
w.write("\n")
flag = 0
w.close()
changeAllO("TRAIN.txt", "NLSPARQL.train.data")
changeAllO("TEST.txt", "NLSPARQL.test | .data") |
whd/data-pipeline | reports/stability-summary/rollup.py | Python | mpl-2.0 | 11,194 | 0.001697 | import psycopg2
import os
import sys
import csv
from datetime import datetime, date, timedelta
from utils import S3CompressedWriter
from summarize import summarize
# How many days "back" do we look for activity?
latency_interval = 10
default_bucket = 'telemetry-public-analysis-2'
current_cutoff = date.today() - timedelta(days=1)
def date_range(date, days, cutoff):
"""Iterate from `date` for the next `days`"""
for d in range(0, days):
curd = date + timedelta(days=d)
if curd > cutoff:
return
yield curd
def put_counts(cur, date):
from_template = '''
SELECT
buildversion,
buildid,
buildarchitecture,
channel,
os,
osversion,
osservicepackmajor,
osservicepackminor,
locale,
activeexperimentid,
activeexperimentbranch,
country,
reason,
subsessionlength,
abortsplugin,
abortscontent,
abortsgmplugin,
crashesdetectedplugin,
pluginhangs,
crashesdetectedcontent,
crashesdetectedgmplugin,
crashsubmitattemptmain,
crashsubmitattemptcontent,
crashsubmitattemptplugin,
crashsubmitsuccessmain,
crashsubmitsuccesscontent,
crashsubmitsuccessplugin
FROM {tablename}
WHERE subsessiondate = %(day)s'''
final_template = '''
SELECT
buildversion,
buildid,
buildarchitecture,
channel,
os,
osversion,
osservicepackmajor,
osservicepackminor,
locale,
activeexperimentid,
activeexperimentbranch,
country,
SUM(CASE WHEN reason = 'aborted-session' THEN 1 ELSE 0 END),
SUM(subsessionlength),
SUM(abortsplugin),
SUM(abortscontent),
SUM(abortsgmplugin),
SUM(crashesdetectedplugin),
SUM(pluginhangs),
SUM(crashesdetectedcontent),
SUM(crashesdetectedgmplugin),
SUM(crashsubmitattemptmain),
SUM(crashsubmitattemptcontent),
SUM(crashsubmitattemptplugin),
SUM(crashsubmitsuccessmain),
SUM(crashsubmitsuccesscontent),
SUM(crashsubmitsuccessplugin)
FROM ( {unionclause} )
GROUP BY
buildversion,
buildid,
buildarchitecture,
channel,
os,
osversion,
osservicepackmajor,
osservicepackminor,
locale,
activeexperimentid,
activeexperimentbranch,
country'''
union_query = ' UNION ALL '.join(
from_template.format(tablename='main_summary_{}'.format(d.strftime('%Y%m%d')))
for d in date_range(date - timedelta(days=1), latency_interval + 1, current_cutoff))
final_query = final_template.format(unionclause=union_query)
cur.execute(final_query, {'day': date})
path = 'stability-rollups/{year}/{date}-main.csv.gz'.format(
year=date.year, date=date.strftime('%Y%m%d'))
with S3CompressedWriter(default_bucket, path) as fd:
outcsv = csv.writer(fd)
outcsv.writerow((
'buildversion',
'buildid',
'buildarchitecture',
'channel',
'os',
'osversion',
'osservicepackmajor',
'osservicepackminor',
'locale',
'activeexperimentid',
'activeexperimentbranch',
'country',
'abortedsessioncount',
'subsessionlengths',
'abortsplugin',
'abortscontent',
'abortsgmplugin',
'crashesdetectedplugin',
'pluginhangs',
'crashesdetectedcontent',
'crashesdetectedgmplugin',
'crashsubmitattemptmain',
'crashsubmitattemptcontent',
'crashsubmitattemptplugin',
'crashsubmitsuccessmain',
'crashsubmitsuccesscontent',
'crashsubmitsuccessplugin'))
for r in cur:
outcsv.writerow(r)
def put_actives(cur, date, weekly):
if weekly:
where_clause = '''subsessiondate <= %(day)s::date AND subsessiondate > %(day)s::date - '1 week'::interval'''
else:
where_clause = '''subsessiondate = %(day)s'''
from_template = '''
SELECT
subsessiondate,
clientid,
buildversion,
buildarchitecture,
channel,
os,
osversion,
osservicepackmajor,
osservicepackminor,
locale,
activeexperimentid,
activeexperimentbranch,
country
FROM {tablename}
WHERE {whereclause}'''
final_template = '''
SELECT
buildversion,
buildarchitecture,
channel,
os,
osversion,
osservicepackmajor,
osservicepackminor,
locale,
activeexperimentid,
activeexperimentbranch,
country,
activedays,
COUNT(*)
FROM (
SELECT
buildversion,
buildarchitecture,
channel,
os,
osversion,
osservicepackmajor,
osservicepackminor,
locale,
activeexperimentid,
activeexperimentbranch,
country,
DENSE_RANK() OVER (PARTITION BY clientid ORDER BY subsessiondate ASC) AS activedays,
ROW_NUMBER() OVER (PARTITION BY clientid ORDER BY subsessiondate DESC) AS rownumber
FROM ( {unionclause} )
)
WHERE rownumber = 1
GROUP BY
buildversion,
buildarchitecture,
channel,
os,
osversion,
osservicepackmajor,
osservicepackminor,
locale,
activeexperimentid,
activeexperimentbranch,
country,
activedays'''
if weekly:
dates = date_range(date - timedelta(days=8), latency_interval + 1 + 7, current_cutoff)
else:
dates = date_range(date - timedelta(days=1), latency_interval + 1, current_cutoff)
union_query = ' UNION ALL '.join(
from_template.format(tablename='main_summary_{}'.format(d.strftime('%Y%m%d')), whereclause=where_clause)
for d in dates)
final_query = final_template.format(unionclause=union_query)
cur.execute(final_query, {'day': date})
if weekly:
segment = 'weekly'
else:
segment = 'daily'
path = 'stability-rollups/{year}/{date}-active-{segment}.csv.gz'.format(
year=date.year, date=dat | e.strftime('%Y%m%d'), segment=segment)
with S3CompressedWriter(default_bucket, path) as fd:
outcsv = csv.writer(fd)
outcsv.writerow((
'buildversion',
'buildid',
'buildarchitecture',
'channel',
'os',
| 'osversion',
'osservicepackmajor',
'osservicepackminor',
'locale',
'activeexperimentid',
'activeexperimentbranch',
'country',
'active_days',
'active_users'))
for r in cur:
outcsv.writerow(r)
def put_crashes(cur, date):
from_template = '''
SELECT
buildversion,
buildid,
buildarchitecture,
channel,
os,
osversion,
osservicepackmajor,
osservicepackminor,
locale,
activeexperimentid,
activeexperimentbranch,
country,
hascrashenvironment
FROM {tablename}
WHERE crashdate = %(day)s'''
final_template = '''
SELECT
buildversion,
buildid,
buildarchitecture,
channel,
os,
osversion,
osservicepackmajor,
osservicepackminor,
locale,
activeexperimentid,
activeexperimentbranch,
country,
hascrashenvironment,
COUNT(*)
FROM ( {unionclause} )
GROUP BY
buildversion,
buildid,
buildarchitecture,
channel,
os,
osversion,
osservicepackmajor,
osservicepackminor,
locale,
activeexperimentid,
activeexperimentbranch,
country,
hascrashenvironment'' |
civisanalytics/ansible | lib/ansible/modules/cloud/ovirt/ovirt_disks.py | Python | gpl-3.0 | 20,012 | 0.002848 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2016 Red Hat, Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = '''
---
module: ovirt_disks
short_description: "Module to manage Virtual Machine and floating disks in oVirt"
version_added: "2.2"
author: "Ondra Machacek (@machacekondra)"
description:
- "Module to manage Virtual Machine and floating disks in oVirt."
options:
id:
description:
- "ID of the disk to manage. Either C(id) or C(name) is required."
name:
description:
- "Name of the disk to manage. Either C(id) or C(name)/C(alias) is required."
aliases: ['alias']
vm_name:
description:
- "Name of the Virtual Machine to manage. Either C(vm_id) or C(vm_name) is required if C(state) is I(attached) or I(detached)."
vm_id:
description:
- "ID of the Virtual Machine to manage. Either C(vm_id) or C(vm_name) is required if C(state) is I(attached) or I(detached)."
state:
description:
- "Should the Virtual Machine disk be present/absent/attached/detached."
choices: ['present', 'absent', 'attached', 'detached']
default: 'present'
image_path:
description:
- "Path to disk image, which should be uploaded."
- "Note that currently we support only compability version 0.10 of the qcow disk."
- "Note that you must have an valid oVirt engine CA in your system trust store
or you must provide it in C(ca_file) parameter."
- "Note that there is no reliable way to achieve idempotency, so
if you want to upload the disk even if the disk with C(id) or C(name) exists,
then please use C(force) I(true). If you will use C(force) I(false), which
is default, then the disk image won't be uploaded."
version_added: "2.3"
size:
description:
- "Size of the disk. Size should be specified using IEC standard units.
For example 10GiB, 1024MiB, etc."
- "Size can be only increased, not decreased."
interface:
description:
- "Driver of the storage interface."
choices: ['virtio', 'ide', 'virtio_scsi']
default: 'virtio'
format:
description:
- Specify format of the disk.
- If (cow) format is used, disk will by created as sparse, so space will be allocated for the volume as needed, also known as I(thin provision).
- If (raw) format is used, disk storage will be allocated right away, also known as I(preallocated).
- Note that this option isn't idempotent as it's not currently possible to change format of the disk via API.
choices: ['raw', 'cow']
storage_domain:
description:
- "Storage domain name where disk should be created. By default storage is chosen by oVirt engine."
storage_domains:
description:
- "Storage domain names where disk should be copied."
- "C(**IMPORTANT**)"
- "There is no reliable way to achieve idempotency, so every time
you specify this parameter the disks are copied, so please handle
your playbook accordingly to not copy the disks all the time. This
is valid only for VM and floating disks, template disks works
as expected."
version_added: "2.3"
force:
description:
- "Please take a look at C(image_path) documentation to see the correct
usage of this parameter."
version_added: "2.3"
profile:
description:
- "Dis | k profile name to be attached to disk. By default profile is chosen by oVirt engine."
bootable:
description:
- "I(True) if the disk should be bootable. By default when disk is created it isn't bootable."
shareable:
description:
- "I(True) if the disk should be shareable. By default when disk is created it isn't shareable."
logical_unit:
description:
- "Dictionary which describes LUN to be directly attached to VM:"
- | "C(address) - Address of the storage server. Used by iSCSI."
- "C(port) - Port of the storage server. Used by iSCSI."
- "C(target) - iSCSI target."
- "C(lun_id) - LUN id."
- "C(username) - CHAP Username to be used to access storage server. Used by iSCSI."
- "C(password) - CHAP Password of the user to be used to access storage server. Used by iSCSI."
- "C(storage_type) - Storage type either I(fcp) or I(iscsi)."
extends_documentation_fragment: ovirt
'''
EXAMPLES = '''
# Examples don't contain auth parameter for simplicity,
# look at ovirt_auth module to see how to reuse authentication:
# Create and attach new disk to VM
- ovirt_disks:
name: myvm_disk
vm_name: rhel7
size: 10GiB
format: cow
interface: virtio
# Attach logical unit to VM rhel7
- ovirt_disks:
vm_name: rhel7
logical_unit:
target: iqn.2016-08-09.brq.str-01:omachace
id: 1IET_000d0001
address: 10.34.63.204
interface: virtio
# Detach disk from VM
- ovirt_disks:
state: detached
name: myvm_disk
vm_name: rhel7
size: 10GiB
format: cow
interface: virtio
# Upload local image to disk and attach it to vm:
# Since Ansible 2.3
- ovirt_disks:
name: mydisk
vm_name: myvm
interface: virtio
size: 10GiB
format: cow
image_path: /path/to/mydisk.qcow2
storage_domain: data
'''
RETURN = '''
id:
description: "ID of the managed disk"
returned: "On success if disk is found."
type: str
sample: 7de90f31-222c-436c-a1ca-7e655bd5b60c
disk:
description: "Dictionary of all the disk attributes. Disk attributes can be found on your oVirt instance
at following url: https://ovirt.example.com/ovirt-engine/api/model#types/disk."
returned: "On success if disk is found and C(vm_id) or C(vm_name) wasn't passed."
disk_attachment:
description: "Dictionary of all the disk attachment attributes. Disk attachment attributes can be found
on your oVirt instance at following url:
https://ovirt.example.com/ovirt-engine/api/model#types/disk_attachment."
returned: "On success if disk is found and C(vm_id) or C(vm_name) was passed and VM was found."
'''
import os
import time
import traceback
import ssl
from httplib import HTTPSConnection
try:
from urllib.parse import urlparse
except ImportError:
from urlparse import urlparse
try:
import ovirtsdk4.types as otypes
except ImportError:
pass
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ovirt import (
BaseModule,
check_sdk,
check_params,
create_connection,
convert_to_bytes,
equal,
follow_link,
ovirt_full_argument_spec,
search_by_name,
wait,
)
def _search_by_lun(disks_service, lun_id):
"""
Find disk by LUN ID.
"""
res = [
disk for disk in disks_service.list(search='disk_type=lun') if (
disk.lun_storage.id == lun_id
)
]
return res[0] if res else None
def upload_disk_image(connection, module):
size = os.path.getsize(module.params['image_path'])
transfers_service = connection.system_service().image_tran |
meta-it/misc-addons | project_tags/project.py | Python | lgpl-3.0 | 1,241 | 0.002417 | # -*- coding: utf-8 -*-
#
#
# Project Tags
# Copyright (C) 2013 Sistemas ADHOC
# No email
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of t | he GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#
from openerp import models, fields
class Project(models.Model):
""""""
_name = 'project.project'
_inherits = {}
| _inherit = ['project.project']
project_tag_ids = fields.Many2many('project_tags.project_tag', 'project_tags___project_tag_ids_rel', 'project_id', 'project_tag_id', string='Tags')
_defaults = {
}
_constraints = [
]
Project()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
stnava/ITK | Wrapping/Generators/Python/Tests/SmoothingRecursiveGaussianImageFilter.py | Python | apache-2.0 | 1,076 | 0.001859 | #==========================================================================
#
# Copyright Insight Software Consortium
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0.txt
#
# Unless required by applicable law or agreed to in writing, software
# distributed | under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#==========================================================================*/
#
# Example on the use of the Smooth | ingRecursiveGaussianImageFilter
#
import itk
from sys import argv
itk.auto_progress(2)
reader = itk.ImageFileReader.IUC2.New(FileName=argv[1])
filter = itk.SmoothingRecursiveGaussianImageFilter.New(
reader,
Sigma=eval(argv[3]))
itk.imwrite(filter, argv[2])
|
LaurentRDC/scikit-ued | skued/time_series/tests/test_fitting.py | Python | gpl-3.0 | 6,251 | 0.00144 | from random import random, seed
import numpy as np
from skued import biexpone | ntial, exponent | ial, with_irf
seed(23)
def test_exponential_tzero_limits():
"""Test that the output of ``exponential`` has the correct time-zero"""
tzero = 10 * (random() - 0.5) # between -5 and 5
amp = 5 * random() + 5 # between 5 and 10
tconst = random() + 0.3 # between 0.3 and 1.3
t = np.arange(-10, 50, step=0.3)
I = exponential(t, tzero=tzero, amp=amp, tconst=tconst)
# Check that all values before time-zero are the amplitude
assert np.all(np.equal(I[t < tzero], amp))
assert np.all(np.less(I[t > tzero], amp))
def test_exponential_positivity():
"""Test that the output of ``exponential`` is always positive."""
tzero = 10 * (random() - 0.5) # between -5 and 5
amp = 5 * random() + 5 # between 5 and 10
tconst = random() + 0.3 # between 0.3 and 1.3
t = np.arange(-10, 50, step=0.3)
I = exponential(t, tzero=tzero, amp=amp, tconst=tconst)
assert np.all(I > 0)
def test_exponential_amplitude():
"""Test that the output of ``exponential`` is at most ``amp``."""
tzero = 10 * (random() - 0.5) # between -5 and 5
amp = 5 * random() + 5 # between 5 and 10
tconst = random() + 0.3 # between 0.3 and 1.3
t = np.arange(-10, 50, step=0.3)
I = exponential(t, tzero=tzero, amp=amp, tconst=tconst)
assert np.all(np.less_equal(I, amp))
def test_exponential_offset():
"""Test that the output of ``exponential`` is at lest ``offset``."""
tzero = 10 * (random() - 0.5) # between -5 and 5
amp = 5 * random() + 5 # between 5 and 10
tconst = random() + 0.3 # between 0.3 and 1.3
offset = 15
t = np.arange(-10, 50, step=0.3)
I = exponential(t, tzero=tzero, amp=amp, tconst=tconst, offset=offset)
assert np.all(np.greater_equal(I, offset))
def test_biexponential_tzero_limits():
"""Test that the output of ``biexponential`` has the correct time-zero"""
tzero = 10 * (random() - 0.5) # between -5 and 5
amp1 = 5 * random() + 5 # between 5 and 10
tconst1 = random() + 0.3 # between 0.3 and 1.3
amp2 = 5 * random() + 5 # between 5 and 10
tconst2 = random() + 0.3 # between 0.3 and 1.3
t = np.arange(-10, 50, step=0.3)
I = biexponential(
t,
tzero=tzero,
amp1=amp1,
amp2=amp2,
tconst1=tconst1,
tconst2=tconst2,
)
# Check that all values before time-zero are the amplitude
assert np.all(np.equal(I[t < tzero], amp1 + amp2))
assert np.all(np.less(I[t > tzero], amp1 + amp2))
def test_biexponential_positivity():
"""Test that the output of ``biexponential`` is always positive."""
tzero = 10 * (random() - 0.5) # between -5 and 5
amp1 = 5 * random() + 5 # between 5 and 10
tconst1 = random() + 0.3 # between 0.3 and 1.3
amp2 = 5 * random() + 5 # between 5 and 10
tconst2 = random() + 0.3 # between 0.3 and 1.3
t = np.arange(-10, 50, step=0.3)
I = biexponential(
t,
tzero=tzero,
amp1=amp1,
amp2=amp2,
tconst1=tconst1,
tconst2=tconst2,
)
assert np.all(I > 0)
def test_biexponential_amplitude():
"""Test that the output of ``biexponential`` is at most ``amp1 + amp2``."""
tzero = 10 * (random() - 0.5) # between -5 and 5
amp1 = 5 * random() + 5 # between 5 and 10
tconst1 = random() + 0.3 # between 0.3 and 1.3
amp2 = 5 * random() + 5 # between 5 and 10
tconst2 = random() + 0.3 # between 0.3 and 1.3
t = np.arange(-10, 50, step=0.3)
I = biexponential(
t,
tzero=tzero,
amp1=amp1,
amp2=amp2,
tconst1=tconst1,
tconst2=tconst2,
)
assert np.all(np.less_equal(I, amp1 + amp2))
def test_biexponential_offset():
"""Test that the output of ``biexponential`` is at least ``offset``."""
tzero = 10 * (random() - 0.5) # between -5 and 5
amp1 = 5 * random() + 5 # between 5 and 10
tconst1 = random() + 0.3 # between 0.3 and 1.3
amp2 = 5 * random() + 5 # between 5 and 10
tconst2 = random() + 0.3 # between 0.3 and 1.3
offset = 15
t = np.arange(-10, 50, step=0.3)
I = biexponential(
t,
tzero=tzero,
amp1=amp1,
amp2=amp2,
tconst1=tconst1,
tconst2=tconst2,
offset=offset,
)
assert np.all(np.greater_equal(I, offset))
def test_biexponential_against_exponential():
"""Test that ``biexponential`` reduces to ``exponential`` for appropriate parameters"""
tzero = 10 * (random() - 0.5) # between -5 and 5
amp1 = 5 * random() + 5 # between 5 and 10
tconst1 = random() + 0.3 # between 0.3 and 1.3
amp2 = 5 * random() + 5 # between 5 and 10
tconst2 = random() + 0.3 # between 0.3 and 1.3
t = np.arange(-10, 50, step=0.3)
offset = 2
exp = exponential(t, tzero, amp1, tconst1, offset=offset)
biexp = biexponential(t, tzero, amp1, 0, tconst1, 1, offset=offset)
assert np.allclose(exp, biexp)
def test_with_irf_trivial_constant_spacing():
"""Test with_irf with a trivial IRF, with constant spacing"""
params = (0, 1, 3)
times = np.linspace(-5, 15, 256)
data = exponential(times, *params)
@with_irf(0.00001) # vanishingly small irf
def exponential_with_irf(time, *args, **kwargs):
return exponential(time, *args, **kwargs)
conv = exponential_with_irf(times, *params)
assert np.allclose(data, conv)
def test_with_irf_trivial_nonconstant_spacing():
"""Test with_irf with a trivial IRF, with non-constant spacing"""
# Note that the spacing of the steps is important for this test
# If the array `times` is of even length, then the convolution will result
# in one time-step shift
params = (0, 1, 3)
times = np.concatenate(
(
np.arange(-10, -2, step=1),
np.arange(-2, 2, step=0.04),
np.arange(2, 10, step=1),
)
)
data = exponential(times, *params)
@with_irf(0.00001) # vanishingly small irf
def exponential_with_irf(time, *args, **kwargs):
return exponential(time, *args, **kwargs)
conv = exponential_with_irf(times, *params)
assert np.allclose(data, conv)
|
bjpop/annokey | annokey/get_ncbi_gene_snapshot_xml.py | Python | bsd-3-clause | 22,163 | 0.000406 | #!/bin/env python
'''NCBI Gene Downloading Tool
This program downloads gene data from the ftp server and \
converts data to xml file by using linux.gene2xml tool.
This program checks the last modified time of the database in the server, \
when it tries to download the gene database from the server. \
If the same version of database is in the local directory already, \
this program does not download the database from server. \
If you want to download the database anyway, please use the --force option. \
If you already have the database file that you need to look up and \
want to convert the file, please use the --convertonly option.
'''
import os
import sys
import ftplib
import glob
import re
import subprocess
from argparse import ArgumentParser
# global variables
NCBI_SERVER = 'ftp.ncbi.nlm.nih.gov'
HOMO_SAPIENS = '/gene/DATA/ASN_BINARY/Mammalia/Homo_sapiens.ags.gz'
VERBOSE_LEVEL = 2
class Namespace(object):
'''Class introducing a new namespace'''
pass
def print_verbose(level, message):
'''Print a message if the level is less than the verbose level.'''
if level <= VERBOSE_LEVEL:
print message
if VERBOSE_LEVEL >= 3 and message.startswith(' [Error Log]'):
raise
def flush_verbose(level, message):
'''Flush a message if the level is less than the verbose level.'''
if level <= VERBOSE_LEVEL:
print message,
sys.stdout.flush()
def convert_genetoxml(program, infile, outfile):
'''Convert infile to xml file named outfile using linux.gene2xml.
The path of the linux.gene2xml should be passed as an argument. \
The program should be ether the full path of linux.gene2xml or \
linux.gene2xml if path of linux.gene2xml is in PATH.
'''
# If infile is not ags format,
# linux.gene2xml would complain about it.
extension = infile.rfind('.ags')
if extension == -1:
print_verbose(0, '[Error] '
'The file to be converted is not ags file (%s).' %
infile)
return
class GeneToXmlError(Exception):
'''Exception raised on conversion of gene entry to XML'''
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
print_verbose(1, 'Started converting %s to %s.' % (infile, outfile))
print_verbose(2, ' >> Converting ...')
try:
ret = subprocess.check_output([program, '-i', infile,
'-o', outfile, '-b', '-c'],
stderr=subprocess.STDOUT)
if ret != '': # error occurs
raise GeneToXmlError(ret)
except (subprocess.CalledProcessError, GeneToXmlError) as exception:
if os.path.exists(outfile):
os.remove(outfile)
outfile = None
print_verbose(0, 'Error occurred while converting data.')
print_verbose(0, ' [Error Log] ' + str(exception))
except KeyboardInterrupt:
if os.path.exists(outfile):
os.remove(outfile)
outfile = None
print_verbose(0, 'User interrupt. Finished program.')
except Exception as exception:
if os.path.exists(outfile):
os.remove(outfile)
outfile = None
if exception.errno == 2: # linux.gene2xml is not in PATH
print_verbose(
0,
'[Error] '
'Could not execute the program linux.gene2xml.'
'\n Perhaps it is not installed in your PATH? '
'\n Please add the program path to PATH or '
'use --gene2xmlpath option.')
else:
print_verbose(0, 'Undefined error occurs while '
'converting data to xml.')
print_verbose(0, ' [Error Log] ' + str(exception.errno))
else:
print_verbose(2, ' >> Converting %s to %s is successful.'
% (infile, outfile))
return outfile
class Ftp(object):
'''Ftp class for representation of ftplib.FTP
This class supports connect(), disconnect() for FTP site, and \
download(), get_last_modified_time(), and get_size() for a file \
on the FTP site.
'''
def __init__(self, ftpSite):
self.site = ftpSite
self.ftp = None
def connect(self):
'''Connect FTP site and login.'''
if self.ftp is not None:
print_verbose(0, '[Error] ftp connection is already established.')
return
try:
print_verbose(1, 'Starts connecting to server (%s).' % self.site)
self.ftp = ftplib.FTP(self.site)
self.ftp.login()
print_verbose(2, ' >> Connecting to server is successful.')
except ftplib.all_errors as exception:
self.disconnect()
print_verbose(0, 'Error occurs while connecting ftp server.')
print_verbose(0, ' [Error Log] ' + str(exception))
except KeyboardInterrupt:
self.disconnect()
print_verbose(0, 'User interrupt. Finished program.')
except Exception as exception:
self.disconnect()
print_verbose(0, 'Undefined error occurs while '
'connecting ftp server.')
print_verbose(0, ' [ | Error Log] ' + str(exception))
return self.ftp
def get_last_modified_time(self, filepath):
'''Get the last modified time of the file on the server.
The server gives the result code and the value.
e.g) '213 20120101051112'
'''
if self.ftp is None:
print_verbose(0, '[Error] ftp server is not connected.')
return
| class MDTMError(Exception):
'''Representation of modification time'''
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
md_server = None
try:
print_verbose(1, 'Starts getting the last modified time of data.')
modified_time = self.ftp.sendcmd('MDTM ' + filepath)
md_time_result = modified_time.split()
if md_time_result[0] == '213': # 213 means successful
md_server = int(md_time_result[1])
else:
raise MDTMError('Fail to get the last modified time of data '
'from server. [%s]' % modified_time)
print_verbose(2, ' >> Getting the last modified time of data '
'is successful.')
print_verbose(2, ' >> The last modified time of the data '
'at the server is %s' % str(md_server))
except (ftplib.all_errors, MDTMError) as exception:
print_verbose(0, 'Error occurs while connecting ftp server.')
print_verbose(0, ' [Error Log] ' + str(exception))
except KeyboardInterrupt:
print_verbose(0, 'User interrupt. Finished program.')
except Exception as exception:
print_verbose(0, 'Undefined error occurs while '
'connecting the ftp server.')
print_verbose(0, ' [Error Log] ' + str(exception))
return md_server
def get_size(self, filepath):
'''Get the size of the file on the server.'''
if self.ftp is None:
print_verbose(0, '[Error] ftp server is not connected.')
return
data_size = None
try:
self.ftp.sendcmd('TYPE i')
data_size = self.ftp.size(filepath)
data_size = float(data_size)
print_verbose(2, ' >> The file size is %.2fM'
% (data_size/(1024**2)))
except ftplib.all_errors as exception:
print_verbose(0, 'Error occurs while getting file size.')
print_verbose(0, ' [Error Log] ' + str(exception))
except KeyboardInterrupt:
print_verbose(0, 'User interrupt. Finished program.')
except Exception as exception:
print_verbose(0, 'Undefined error occurs while '
|
benkohler/catalyst | catalyst/builder.py | Python | gpl-2.0 | 702 | 0.035613 | import os
cla | ss generic(object):
def __init__(self,myspec):
self.settings=myspec
self.settings.setdefault('CHROOT', 'chroot')
def setarch(self, arch):
"""Set the chroot wrapper to run through `setarch |arch|`
Useful for building x86-on-amd64 and such.
"""
if os.uname()[0] == 'Linux':
self.settings['CHROOT'] = 'setarch %s %s' % (arch, self.settings['CHROOT'])
def mount_safety_check(self):
"""
Make sure that no bind mounts exist in chrootdir (to use before
cleaning the directory, | to make sure we don't wipe the contents of
a bind mount
"""
pass
def mount_all(self):
"""do all bind mounts"""
pass
def umount_all(self):
"""unmount all bind mounts"""
pass
|
ysh329/django-test | db10_admin/blog/admin.py | Python | apache-2.0 | 121 | 0.008264 | from d | jango.contrib import admin
from blog.models import User
admin.site.register(User)
|
# Register your models here.
|
froyobin/horizon | openstack_dashboard/dashboards/admin/volumes/volume_types/qos_specs/tests.py | Python | apache-2.0 | 7,735 | 0 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.core.urlresolvers import reverse
from django import http
from mox import IsA # noqa
from openstack_dashboard import api
from openstack_dashboard.test import helpers as test
class QosSpecsTests(test.BaseAdminViewTests):
@test.create_stubs({api.cinder: ('qos_spec_get',), })
def test_manage_qos_spec(self):
qos_spec = self.cinder_qos_specs.first()
index_url = reverse(
'horizon:admin:volumes:volume_types:qos_specs:index',
args=[qos_spec.id])
api.cinder.qos_spec_get(IsA(http.HttpRequest),
qos_spec.id)\
.AndReturn(qos_spec)
self.mox.ReplayAll()
res = self.client.get(index_url)
self.assertTemplateUsed(
res, 'admin/volumes/volume_types/qos_specs/index.html')
rows = res.context['table'].get_rows()
specs = self.cinder_qos_specs.first().specs
for row in rows:
key = row.cells['key'].data
self.assertTrue(key in specs)
self.assertEqual(row.cells['value'].data,
specs.get(key))
@test.create_stubs({api.cinder: ('qos_spec_create',)})
def test_create_qos_spec | (self):
formData = {'name': 'qos-spec-1',
'consumer': 'back-end'}
api.cinder.qos_spec_create(IsA(http.HttpRequest),
formData['name'],
{'consumer': formData['consume | r']}).\
AndReturn(self.cinder_qos_specs.first())
self.mox.ReplayAll()
res = self.client.post(
reverse('horizon:admin:volumes:volume_types:create_qos_spec'),
formData)
redirect = reverse('horizon:admin:volumes:volume_types_tab')
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, redirect)
self.assertMessageCount(success=1)
@test.create_stubs({api.cinder: ('volume_type_list_with_qos_associations',
'qos_spec_list',
'qos_spec_delete',)})
def test_delete_qos_spec(self):
qos_spec = self.cinder_qos_specs.first()
formData = {'action': 'qos_specs__delete__%s' % qos_spec.id}
api.cinder.volume_type_list_with_qos_associations(
IsA(http.HttpRequest)).\
AndReturn(self.volume_types.list())
api.cinder.qos_spec_list(IsA(http.HttpRequest)).\
AndReturn(self.cinder_qos_specs.list())
api.cinder.qos_spec_delete(IsA(http.HttpRequest),
str(qos_spec.id))
self.mox.ReplayAll()
res = self.client.post(
reverse('horizon:admin:volumes:volume_types_tab'),
formData)
redirect = reverse('horizon:admin:volumes:volume_types_tab')
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, redirect)
self.assertMessageCount(success=1)
@test.create_stubs({api.cinder: ('qos_spec_get',
'qos_spec_get_keys',
'qos_spec_set_keys',), })
def test_spec_edit(self):
qos_spec = self.cinder_qos_specs.first()
key = 'minIOPS'
edit_url = reverse('horizon:admin:volumes:volume_types:qos_specs:edit',
args=[qos_spec.id, key])
index_url = reverse(
'horizon:admin:volumes:volume_types:qos_specs:index',
args=[qos_spec.id])
data = {'value': '9999'}
qos_spec.specs[key] = data['value']
api.cinder.qos_spec_get(IsA(http.HttpRequest),
qos_spec.id)\
.AndReturn(qos_spec)
api.cinder.qos_spec_get_keys(IsA(http.HttpRequest),
qos_spec.id, raw=True)\
.AndReturn(qos_spec)
api.cinder.qos_spec_set_keys(IsA(http.HttpRequest),
qos_spec.id,
qos_spec.specs)
self.mox.ReplayAll()
resp = self.client.post(edit_url, data)
self.assertNoFormErrors(resp)
self.assertMessageCount(success=1)
self.assertRedirectsNoFollow(resp, index_url)
@test.create_stubs({api.cinder: ('qos_spec_get',
'qos_spec_set_keys',), })
def test_edit_consumer(self):
qos_spec = self.cinder_qos_specs.first()
# modify consumer to 'front-end'
formData = {'consumer_choice': 'front-end'}
edit_url = reverse(
'horizon:admin:volumes:volume_types:edit_qos_spec_consumer',
args=[qos_spec.id])
api.cinder.qos_spec_get(IsA(http.HttpRequest),
qos_spec.id).AndReturn(qos_spec)
api.cinder.qos_spec_set_keys(IsA(http.HttpRequest),
qos_spec.id,
{'consumer': formData['consumer_choice']})
self.mox.ReplayAll()
resp = self.client.post(edit_url, formData)
redirect = reverse('horizon:admin:volumes:volume_types_tab')
self.assertNoFormErrors(resp)
self.assertMessageCount(success=1)
self.assertRedirectsNoFollow(resp, redirect)
@test.create_stubs({api.cinder: ('qos_spec_list',
'qos_spec_get',
'qos_spec_get_associations',
'volume_type_get',
'qos_spec_associate',), })
def test_associate_qos_spec(self):
volume_type = self.volume_types.first()
volume_types = self.volume_types.list()
qos_spec = self.cinder_qos_specs.first()
qos_specs = self.cinder_qos_specs.list()
# associate qos spec with volume type
formData = {'qos_spec_choice': qos_spec.id}
edit_url = reverse(
'horizon:admin:volumes:volume_types:manage_qos_spec_association',
args=[volume_type.id])
api.cinder.qos_spec_get(IsA(http.HttpRequest),
qos_spec.id).AndReturn(qos_spec)
api.cinder.qos_spec_list(IsA(http.HttpRequest)) \
.AndReturn(qos_specs)
api.cinder.qos_spec_get_associations(IsA(http.HttpRequest),
qos_spec.id) \
.AndReturn(volume_types)
api.cinder.qos_spec_get_associations(IsA(http.HttpRequest),
qos_specs[1].id) \
.AndReturn(volume_types)
api.cinder.volume_type_get(IsA(http.HttpRequest),
str(volume_type.id)) \
.AndReturn(volume_type)
api.cinder.qos_spec_associate(IsA(http.HttpRequest),
qos_spec,
str(volume_type.id))
self.mox.ReplayAll()
resp = self.client.post(edit_url, formData)
redirect = reverse('horizon:admin:volumes:volume_types_tab')
self.assertNoFormErrors(resp)
self.assertMessageCount(success=1)
self.assertRedirectsNoFollow(resp, redirect)
|
netscaler/neutron | neutron/db/db_base_plugin_v2.py | Python | apache-2.0 | 66,754 | 0.000075 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2012 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import random
import netaddr
from oslo.config import cfg
from sqlalchemy import orm
from sqlalchemy.orm import exc
from neutron.api.v2 import attributes
from neutron.common import constants
from neutron.common import exceptions as q_exc
from neutron.db import api as db
from neutron.db import models_v2
from neutron.db import sqlalchemyutils
from neutron import neutron_plugin_base_v2
from neutron.openstack.common import excutils
from neutron.openstack.common import log as logging
from neutron.openstack.common import timeutils
from neutron.openstack.common import uuidutils
LOG = logging.getLogger(__name__)
AGENT_OWNER_PREFIX = 'network:'
# Ports with the following 'device_owner' values will not prevent
# network deletion. If delete_network() finds that all ports on a
# network have these owners, it will explicitly delete each port
# and allow network deletion to continue. Similarly, if delete_subnet()
# finds out that all existing IP Allocations are associated with ports
# with these owners, it will allow subnet deletion to proceed with the
# IP allocations being cleaned up by cascade.
AUTO_DELETE_PORT_OWNERS = ['network:dhcp']
class CommonDbMixin(object):
"""Common methods used in core and service plugins."""
# Plugins, mixin classes implementing extension will register
# hooks into the dict below for "augmenting" the "core way" of
# building a query for retrieving objects from a model class.
# To this aim, the register_model_query_hook and unregister_query_hook
# from this class should be invoked
_model_query_hooks = {}
# This dictionary will store methods for extending attributes of
# api resources. Mixins can use this dict for adding their own methods
# TODO(salvatore-orlando): Avoid using class-level variables
_dict_extend_functions = {}
@classmethod
def register_model_query_hook(cls, model, name, query_hook, filter_hook,
result_filters=None):
"""Register a hook to be invoked when a query is executed.
Add the hooks to the _model_query_hooks dict. Models are the keys
of this dict, whereas the value is another dict mapping hook names to
callables performing the hook.
Each hook has a "query" component, used to build the query expression
and a "filter" component, which is used to build the filter expression.
Query hooks take as input the query being built and return a
transformed query expression.
Filter hooks take as input the filter expression being built and return
a transformed filter expression
"""
model_hooks = cls._model_query_hooks.get(model)
if not model_hooks:
# add key to dict
model_hooks = {}
cls._model_query_hooks[model] = model_hooks
model_hooks[name] = {'query': query_hook, 'filter': filter_hook,
'result_filters': result_filters}
def _model_query(self, context, model):
query = context.session.query(model)
# define basic filter condition for model query
# NOTE(jkoelker) non-admin queries are scoped to their tenant_id
# NOTE(salvatore-orlando): unless the model allows for shared objects
query_filter = None
if not context.is_admin and hasattr(model, 'tenant_id'):
if hasattr(model, 'shared'):
query_filter = ((model.tenant_id == context.tenant_id) |
(model.shared == True))
else:
query_filter = (model.tenant_id == context.tenant_id)
# Execute query hooks registered from mixins and plugins
for _name, hooks in self._model_query_hooks.get(model,
{}).iteritems():
query_hook = hooks.get('query')
if isinstance(query_hook, basestring):
query_hook = getattr(self, query_hook, None)
if query_hook:
query = query_hook(context, model, query)
filter_hook = hooks.get('filter')
if isinstance(filter_hook, basestring):
filter_hook = getattr(self, filter_hook, None)
if filter_hook:
query_filter = filter_hook(context, model, query_filter)
# NOTE( | salvatore-orlando): 'if query_filter' will try to evaluate the
# condition, raising an exception
if query_filter is not None:
query = query.filter(query_filter)
return query
def _fields(self, resource, fields):
if fields:
return dict(((key, item) for key, item in resource.items()
if key in fields))
| return resource
def _get_tenant_id_for_create(self, context, resource):
if context.is_admin and 'tenant_id' in resource:
tenant_id = resource['tenant_id']
elif ('tenant_id' in resource and
resource['tenant_id'] != context.tenant_id):
reason = _('Cannot create resource for another tenant')
raise q_exc.AdminRequired(reason=reason)
else:
tenant_id = context.tenant_id
return tenant_id
def _get_by_id(self, context, model, id):
query = self._model_query(context, model)
return query.filter(model.id == id).one()
def _apply_filters_to_query(self, query, model, filters):
if filters:
for key, value in filters.iteritems():
column = getattr(model, key, None)
if column:
query = query.filter(column.in_(value))
for _name, hooks in self._model_query_hooks.get(model,
{}).iteritems():
result_filter = hooks.get('result_filters', None)
if isinstance(result_filter, basestring):
result_filter = getattr(self, result_filter, None)
if result_filter:
query = result_filter(query, filters)
return query
def _apply_dict_extend_functions(self, resource_type,
response, db_object):
for func in self._dict_extend_functions.get(
resource_type, []):
args = (response, db_object)
if isinstance(func, basestring):
func = getattr(self, func, None)
else:
# must call unbound method - use self as 1st argument
args = (self,) + args
if func:
func(*args)
def _get_collection_query(self, context, model, filters=None,
sorts=None, limit=None, marker_obj=None,
page_reverse=False):
collection = self._model_query(context, model)
collection = self._apply_filters_to_query(collection, model, filters)
if limit and page_reverse and sorts:
sorts = [(s[0], not s[1]) for s in sorts]
collection = sqlalchemyutils.paginate_query(collection, model, limit,
sorts,
marker_obj=marker_obj)
return collection
def _get_collection(self, context, model, dict_func, filters=None,
fields=None, sorts=None, limit=None, marker_obj=None,
page_reverse=False):
query = self._ge |
mwhoffman/pybo | pybo/demos/solve.py | Python | bsd-2-clause | 1,238 | 0 | """
Demo which illustrates how to use solve_bayesopt as a simple method for global
optimization. The return values are the sequence of recommendations made by the
algorithm as well as the final model. The point `xbest[-1]` is the final
recommendation, i.e. the expected maximizer.
"""
from __future__ import division
from __future__ import absolute_import
from __future__ import print_function
import numpy as np
from ezplot import figure, show
from pybo import solve_bayesopt
__all__ = []
def f(x):
"""
Test function that we will optimize. This is a simple sinusoidal function
whose maximum should be found very quickly.
"""
x = float(x)
return -np.cos(x) - np.sin(3*x)
def main():
"""Run the demo."""
# grab a test function
bounds = [0, 2*np.pi]
x = np.linspace(bounds[0], bounds[1], 500)
# solve the model
xbest, model | , info = solve_bayesopt(f, bounds, niter=30, verbose=True)
# make some predictions
mu, s2 = model.predict(x[:, None])
# plot the final model
ax = figure().gca()
ax.plot_banded(x, mu, 2*np.sqrt(s2))
ax.axvline(xbest)
ax.scatter(info.x.ravel(), info.y)
ax.figur | e.canvas.draw()
show()
if __name__ == '__main__':
main()
|
BlogTANG/blog-a | theme.py | Python | mit | 1,730 | 0.000578 | import os
def apply_theme(theme):
theme_dir = os.path.join('themes', theme)
if not os.path.isdir(theme_dir):
print('There seems no "themes" directory existing.')
return
template_dir = os.path.join(theme_dir, 'templates')
if not os.path.isdir(template_dir):
print('The theme "'
+ theme + '" is invalid, because there is no "templates" subdirectory in this theme.')
return
applied_template_dir = 'templates'
applied_static_dir = 'static'
# make symlinks for template files
print('Applying template files...', end='')
if not os.path.exists(applied_template_dir):
os.mkdir(applied_template_dir)
file_list = os.listdir(template_dir)
for file in file_list:
rel_path = os.path.join('..', template_dir, file)
symlink_path = os.path.join(applied_template_dir, file)
if os.path.lexists(symlink_path):
os.remove(symlink_path)
os.symlink(rel_path, symlink_path, os | .path.isdir(rel_path))
print('OK')
# make symlinks for static files
print('Applying static files...', end='')
static_dir = os.path.join(theme_dir, 'static')
if os.path.isdir(static_dir):
if not os.path.exists(applied_static_dir):
os.mkdir(applied_static_dir)
file_list = os.listdir(static_dir)
for file in file_list:
rel_path = os.path.join('..', static_di | r, file)
symlink_path = os.path.join(applied_static_dir, file)
if os.path.lexists(symlink_path):
os.remove(symlink_path)
os.symlink(rel_path, symlink_path, os.path.isdir(rel_path))
print('OK')
print('Successfully applied theme "' + theme + '"')
|
googleapis/python-dialogflow | samples/generated_samples/dialogflow_generated_dialogflow_v2_entity_types_batch_create_entities_async.py | Python | apache-2.0 | 1,788 | 0.001678 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the Licen | se is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the s | pecific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for BatchCreateEntities
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-cloud-dialogflow
# [START dialogflow_generated_dialogflow_v2_EntityTypes_BatchCreateEntities_async]
from google.cloud import dialogflow_v2
async def sample_batch_create_entities():
# Create a client
client = dialogflow_v2.EntityTypesAsyncClient()
# Initialize request argument(s)
entities = dialogflow_v2.Entity()
entities.value = "value_value"
entities.synonyms = ['synonyms_value_1', 'synonyms_value_2']
request = dialogflow_v2.BatchCreateEntitiesRequest(
parent="parent_value",
entities=entities,
)
# Make the request
operation = client.batch_create_entities(request=request)
print("Waiting for operation to complete...")
response = await operation.result()
# Handle the response
print(response)
# [END dialogflow_generated_dialogflow_v2_EntityTypes_BatchCreateEntities_async]
|
odeke-em/mp | tests/testCacheConnector.py | Python | mit | 1,439 | 0.006254 | import unittest
import sys
import time
import CacheConnector
class TestInitializer(unittest.TestCase):
def testInitializerWithArgs(self):
cc = CacheConnector.CacheConnector()
cc[9] = 10
self.assertEqual(cc.get(9, 100), 10)
self.assertEqual(cc.pop(9, 'nonsense'), 10)
self.assertEqual(cc.pop(9, 'nonsense'), 'nonsense')
def testCallbackInvokations(self):
cc = CacheConnector.CacheConnector()
key, value = 'NUMA', 0x7485571
cc.put(key, value)
self.assertEqual(cc.get(key, value), value)
# Passing in a Callback returns None
self.assertEqual(cc.put(key, 'lingo', print), None)
self.assertEqual(cc.pop(key, 'angle', lambda *args, **kwargs: 'nil'), None)
__terminationToggle = False
| def dFuncUnchained(*args, **kwargs):
global __terminationToggle
for i in range(10):
sys.stdout.write('#%d \033[92mSleeping.%s\033[00m\r'%(i, ' ' if i&1 else '.'))
time.sleep(1)
__terminationToggle = True
self.assertEqual(cc.put(key, value, dFuncUnchained), None)
| # Expectation here is that dFuncUnchained should keep running
# even after this test func exits, also __terminationToggle since it
# is set to True at the end of dFuncUnchained, the assertion should hold
self.assertEqual(__terminationToggle, False)
|
suutari-ai/shoop | shuup/configuration.py | Python | agpl-3.0 | 4,388 | 0 | # -*- coding: utf-8 -*-
# This file is part of Shuup. |
#
# Copyright (c) 2012-2017, Shoop Commerce Ltd. All rights reserved.
#
# This source code is licensed under the OSL-3.0 license found in the
# LICENSE file in the root directory of this source tree.
"""
API for Shuup's Dynamic Configuration.
Idea of the Dynamic Configuratio | n is to allow storing configuration
values similarly as :ref:`Django settings <django-settings-module>`
allows, but in a more flexible way: Dynamic Configuration can be changed
with a simple API and there is no need restart the application server
after changing a value.
Dynamic configuration values are permanent. Current implementation
stores the values with `~shuup.core.models.ConfigurationItem` model into
database, but that may change in the future.
Configuration values are get and set by a key string. There is a global
configuration and a shop specific configuration for each shop. Values
in shop specific configuration override the values in global
configuration.
"""
from __future__ import unicode_literals
from shuup.core import cache
from shuup.core.models import ConfigurationItem
def set(shop, key, value):
"""
Set configuration item value for a shop or globally.
If given `shop` is ``None``, the value of given `key` is set
globally for all shops. Otherwise sets a shop specific value which
overrides the global value in configuration of the specified shop.
:param shop: Shop to set value for, or None to set a global value
:type shop: shuup.core.models.Shop|None
:param key: Name of the key to set
:type key: str
:param value: Value to set. Note: Must be JSON serializable.
:type value: Any
"""
ConfigurationItem.objects.update_or_create(
shop=shop, key=key, defaults={"value": value})
if shop:
cache.set(_get_cache_key(shop), None)
else:
cache.bump_version(_SHOP_CONF_NAMESPACE)
def get(shop, key, default=None):
"""
Get configuration value by shop and key.
Global configuration can be accessed with ``shop=None``.
:param shop: Shop to get configuration value for, or None
:type shop: shuup.core.models.Shop|None
:param key: Configuration item key
:type key: str
:param default:
Default value returned if no value is set for given key (globally
or in given shop).
:type default: Any
:return: Configuration value or the default value
:rtype: Any
"""
return _get_configuration(shop).get(key, default)
def _get_configuration(shop):
"""
Get global or shop specific configuration with caching.
:param shop: Shop to get configuration for, or None
:type shop: shuup.core.models.Shop|None
:return: Global or shop specific configuration
:rtype: dict
"""
configuration = cache.get(_get_cache_key(shop))
if configuration is None:
configuration = _cache_shop_configuration(shop)
return configuration
def _cache_shop_configuration(shop):
"""
Cache global or shop specific configuration.
Global configuration (`shop` is ``None``) is read first, then `shop`
based configuration is updated over that.
:param shop: Shop to cache configuration for, or None
:type shop: shuup.core.models.Shop|None
:return: Cached configuration
:rtype: dict
"""
configuration = {}
configuration.update(_get_configuration_from_db(None))
if shop:
configuration.update(_get_configuration_from_db(shop))
cache.set(_get_cache_key(shop), configuration)
return configuration
def _get_configuration_from_db(shop):
"""
Get global or shop specific configuration from database.
:param shop: Shop to fetch configuration for, or None
:type shop: shuup.core.models.Shop|None
:return: Configuration as it was saved in database
:rtype: dict
"""
configuration = {}
for conf_item in ConfigurationItem.objects.filter(shop=shop):
configuration[conf_item.key] = conf_item.value
return configuration
_SHOP_CONF_NAMESPACE = str("shop_config")
def _get_cache_key(shop):
"""
Get global or shop specific cache key.
:param shop: Shop to get cache key for, or None
:type shop: shuup.core.models.Shop|None
:return: Global or shop specific cache key
:rtype: str
"""
return str("%s:%s") % (_SHOP_CONF_NAMESPACE, shop.pk if shop else 0)
|
JavaCardOS/pyResMan | pyResMan/BaseDialogs/pyResManCommandDialogBase_MifareIncrement.py | Python | gpl-2.0 | 4,763 | 0.04325 | # -*- coding: utf-8 -*-
###########################################################################
## Python code generated with wxFormBuilder (version Jun 17 2015)
## http://www.wxformbuilder.org/
##
## PLEASE DO "NOT" EDIT THIS FILE!
###########################################################################
import wx
import wx.xrc
###########################################################################
## Class CommandDialogBase_MifareIncrement
###########################################################################
class CommandDialogBase_MifareIncrement ( wx.Dialog ):
def __init__( self, parent ):
wx.Dialog.__init__ ( self, parent, id = wx.ID_ANY, title = u"Mifare Increment", pos = wx.DefaultPosition, size = wx.Size( 374,171 ), style = wx.DEFAULT_DIALOG_STYLE )
self.SetSizeHintsSz( wx.DefaultSize, wx.DefaultSize )
bSizer45 = wx.BoxSizer( wx.VERTICAL )
bSizer117 = wx.BoxSizer( wx.HORIZONTAL )
self.m_staticText65 = wx.StaticText( self, wx.ID_ANY, u"Block Number", wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_staticText65.Wrap( -1 )
bSizer117.Add( self.m_staticText65, 0, wx.ALL|wx.ALIGN_CENTER_VERTICAL, 5 )
_choiceBlockNumberChoices = []
self._choiceBlockNumber = wx.Choice( self, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, _choiceBlockNumberChoices, 0 )
self._choiceBlockNumber.SetSelection( 0 )
bSizer117.Add( self._choiceBlockNumber, 0, wx.ALL, 5 )
bSizer45.Add( bSizer117, 1, wx.EXPAND, 5 )
bSizer146 = wx.BoxSizer( wx.HORIZONTAL )
self.m_staticText86 = wx.StaticText( self, wx.ID_ANY, u"Operand", wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_staticText86.Wrap( -1 )
bSizer146.Add( self.m_staticText86, 0, wx.ALL|wx.ALIGN_CENTER_VERTICAL, 5 )
self._textctrlValue = wx.TextCtrl( self, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.DefaultSize, 0 )
self._textctrlValue.SetMaxLength( 8 )
bSizer146.Add( self._textctrlValue, 0, wx.ALL|wx.ALIGN_CENTER_VERTICAL, 5 )
bSizer45.Add( bSizer146, 0, wx.EXPAND, 5 )
bSizer45.AddSpacer( ( 0, 20), 1, wx.EXPAND, 5 )
bSizer46 = wx.BoxSizer( wx.HORIZONTAL )
self._statictextCommandName = wx.StaticText( self, wx.ID_ANY, u"Mifare Increment", wx.DefaultPosition, wx.DefaultSize, 0 )
self._statictextCommandName.Wrap( -1 )
bSizer46.Add( self._statictextCommandName, 0, wx.ALL|wx.ALIGN_CENTER_VERTICAL, 5 )
self._textctrlCommandValue = wx.TextCtrl( self, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.DefaultSize, 0 )
self._textctrlCommandValue.SetExtraStyle( wx.WS_EX_VALIDATE_RECURSIVELY )
bSizer46.Add( self._textctrlCommandValue, 1, wx.ALL|wx.ALIGN_CENTER_VERTICAL, 5 )
bSizer45.Add( bSizer46, 0, wx.EXPAND, 5 )
bSizer47 = wx.BoxSizer( wx.HORIZONTAL )
bSizer47.AddSpacer( ( 0, 0), 1, | wx.EXPAND, 5 )
self._buttonOK = wx.Button( self, wx.ID_ANY, u"OK", wx.DefaultPosition, wx.DefaultSize, 0 )
| self._buttonOK.SetDefault()
bSizer47.Add( self._buttonOK, 0, wx.ALL|wx.ALIGN_CENTER_VERTICAL, 5 )
self._buttonCancel = wx.Button( self, wx.ID_ANY, u"Cancel", wx.DefaultPosition, wx.DefaultSize, 0 )
bSizer47.Add( self._buttonCancel, 0, wx.ALL|wx.ALIGN_CENTER_VERTICAL, 5 )
bSizer47.AddSpacer( ( 0, 0), 1, wx.EXPAND, 5 )
bSizer45.Add( bSizer47, 0, wx.EXPAND, 5 )
self.SetSizer( bSizer45 )
self.Layout()
self.Centre( wx.BOTH )
# Connect Events
self._choiceBlockNumber.Bind( wx.EVT_CHOICE, self._choiceBlockNumberOnChoice )
self._textctrlValue.Bind( wx.EVT_TEXT, self._textctrlValueOnText )
self._textctrlCommandValue.Bind( wx.EVT_TEXT, self._textctrlCommandValueOnText )
self._buttonOK.Bind( wx.EVT_BUTTON, self._buttonOKOnButtonClick )
self._buttonCancel.Bind( wx.EVT_BUTTON, self._buttonCancelOnButtonClick )
def __del__( self ):
pass
# Virtual event handlers, overide them in your derived class
def _choiceBlockNumberOnChoice( self, event ):
event.Skip()
def _textctrlValueOnText( self, event ):
event.Skip()
def _textctrlCommandValueOnText( self, event ):
event.Skip()
def _buttonOKOnButtonClick( self, event ):
event.Skip()
def _buttonCancelOnButtonClick( self, event ):
event.Skip()
|
alexskc/Fruitydo | profilepage/admin.py | Python | gpl-3.0 | 369 | 0.00813 | """Add tasks and events to the admin in | terface"""
from django.contrib import admin
from .models import Task, Event
class EventInline(admin.StackedInline):
"""Create an interface for adding events."""
model = Event
extra = 3
class TaskAdmin(a | dmin.ModelAdmin):
"""Add the interface."""
inlines = [EventInline]
admin.site.register(Task, TaskAdmin)
|
areriff/pythonlearncanvas | Python Script Sample/create_dir_if_not_there.py | Python | mit | 615 | 0.009756 | # Script Name : create_dir_if_not_there.py
# Author : Craig Richa | rds
# Created : 09th January 2012
# Last Modified :
# Version : 1.0
# Modifications :
# Description : Checks to see if a directory exists in the users home directory, if not then create it
import os # Import the OS module
home = os.path.expanduser("~") # Set the variable home by expanding the users set home director | y
print
home # Print the location
if not os.path.exists(home + '/testdir'): # Check to see if the directory exists
os.makedirs(home + '/testdir') # If not create the directory, inside their home directory
|
jinyu121/Canteen | CanteenWebsite/templatetags/canteen_website_tags.py | Python | gpl-3.0 | 1,773 | 0.00282 | # -*- coding: utf-8 -*-
from django import template
from CanteenWebsite.models import Category
from CanteenWebsite.utils.functions impor | t setting_get
register = template.Library()
@register.simple_tag
def get_setting(name, default=None):
return setting_get(name, default)
@register.inclusion_tag('CanteenWebsite/inclusions/sidebar_category_list.htm | l', takes_context=True)
def sidebar_category_list(context):
categories = Category.objects.all()
try:
current_category = context['current_category']
except:
current_category = None
return {
'current': current_category,
'categories': categories,
}
@register.inclusion_tag('CanteenWebsite/inclusions/pagination.html')
def show_pagination(page):
pagination = page.paginator
page_range = list()
if pagination.num_pages <= 10:
page_range = pagination.page_range
else:
ON_EACH_SIDE = 2
ON_ENDS = 2
DOT = '...'
if page.number > (ON_EACH_SIDE + ON_ENDS):
page_range.extend(range(1, ON_ENDS))
page_range.append(DOT)
page_range.extend(range(page.number - ON_EACH_SIDE, page.number + 1))
else:
page_range.extend(range(1, page.number + 1))
if page.number < (pagination.num_pages - ON_EACH_SIDE - ON_ENDS - 1):
page_range.extend(range(page.number + 1, page.number + ON_EACH_SIDE + 1))
page_range.append(DOT)
page_range.extend(range(pagination.num_pages - ON_ENDS, pagination.num_pages + 1))
else:
page_range.extend(range(page.number + 1, pagination.num_pages + 1))
return {
'page': page,
'pages': page_range
}
@register.assignment_tag
def define(val=None):
return val
|
Pursuit92/antlr4 | runtime/Python3/src/antlr4/Recognizer.py | Python | bsd-3-clause | 5,808 | 0.005682 | #
# Copyright (c) 2012-2016 The ANTLR Project. All rights reserved.
# Use of this file is governed by the BSD 3-clause license that
# can be found in the LICENSE.txt file in the project root.
#
from antlr4.RuleContext import RuleContext
from antlr4.Token import Token
from antlr4.error.ErrorListener import ProxyErrorListener, ConsoleErrorListener
# need forward delcaration
RecognitionException = None
class Recognizer(object):
tokenTypeMapCache = dict()
ruleIndexMapCache = dict()
def __init__(self):
self._listeners = [ ConsoleErrorListener.INSTANCE ]
self._interp = None
self._stateNumber = -1
def extractVersion(self, version):
pos = version.find(".")
major = version[0:pos]
version = version[pos+1:]
pos = version.find(".")
if pos==-1:
pos = version.find("-")
if pos==-1:
pos = len(version)
minor = version[0:pos]
return major, minor
def checkVersion(self, toolVersion):
runtimeVersion = "4.6.1"
rvmajor, rvminor = self.extractVersion(runtimeVersion)
tvmajor, tvminor = self.extractVersion(toolVersion)
if rvmajor!=tvmajor or rvminor!=tvminor:
print("ANTLR runtime and generated code versions disagree: "+runtimeVersion+"!="+toolVersion)
def addErrorListener(self, listener):
self._listeners.append(listener)
def removeErrorListener(self, listener):
self._listeners.remove(listener)
def removeErrorListeners(self):
self._listeners = []
def getTokenTypeMap(self):
tokenNames = self.getTokenNames()
if tokenNames is None:
from antlr4.error.Errors import UnsupportedOperationException
raise UnsupportedOperationException("The current recognizer does not provide a list of token names.")
result = self.tokenTypeMapCache.get(tokenNames, None)
if result is None:
result = zip( tokenNames, range(0, len(tokenNames)))
result["EOF"] = Token.EOF
self.tokenTypeMapCache[tokenNames] = result
return result
# Get a map from rule names to rule indexes.
#
# <p>Used for XPath and tree pattern compilation.</p>
#
def getRuleIndexMap(self):
ruleNames = self.getRuleNames()
if ruleNames is None:
from antlr4.error.Errors import UnsupportedOperationException
raise UnsupportedOperationException("The current recognizer does not provide a list of rule names.")
result = self.ruleIndexMapCache.get(ruleNames, None)
if result is None:
result = zip( ruleNames, range(0, len(ruleNames)))
self.ruleIndexMapCache[ruleNames] = result
return result
def getTokenType(self, tokenName:str):
ttype = self.getTokenTypeMap().get(tokenName, None)
if ttype is not None:
return ttype
else:
return Token.INVALID_TYPE
# What is the error header, normally line/character position information?#
def getErrorHeader(self, e:RecognitionException):
line = e.getOffendingToken().line
column = e.getOffendingToken().column
return "line "+line+":"+column
# How should a token be displayed in an error message? The default
# is to display just the text, but during development you might
# want to have a lot of information spit out. Override in that case
# to use t.toString() (which, for CommonToken, dumps everything about
# the token). This is better than forcing you to override a method in
# your token objects because you don't have to go modify your lexer
# so that it creates a new Java type.
#
# @deprecated This method is not called by the ANTLR 4 Runtime. Specific
# implementations of {@link ANTLRErrorStrategy} may provide a similar
# feature when necessary. For example, see
# {@link DefaultErrorStrategy#getTokenErrorDisplay}.
#
def getTokenErrorDisplay(self, t:Token):
if t is None:
return "<no token>"
s = t.text
if s is None:
if t.type==Token.EOF:
s = "<EOF>"
else:
s = "<" + str(t.type) + ">"
s = s.replace("\n","\\n")
s = s.replace("\r","\\r")
s = s.replace("\t","\\t")
return "'" + s + "'"
def getErrorListenerDispatch(self):
return ProxyErrorListener(self._listeners)
# subclass needs to override these if there are sempreds or actions
# that the ATN interp needs to execute
def sempred(self, localctx:RuleContext, ruleIndex:int, actionIndex:int):
return True
def precpred(self, localctx:RuleContext , precedence:int):
return True
@property
def state(self):
return self._stateNumber
| # Indicate that the recognizer has changed internal state that is
# consistent with the ATN state passed in. This way we always know
# where we are in the ATN as the parser goes along. The rule
# context objects form a stack that lets us see the stack of
# invoking rules. Combine this and we have complete ATN
# configuration information.
@state.setter
def state(self, atnState:int):
self._stateNumber = atnState
del Recogni | tionException
import unittest
class Test(unittest.TestCase):
def testVersion(self):
major, minor = Recognizer().extractVersion("1.2")
self.assertEqual("1", major)
self.assertEqual("2", minor)
major, minor = Recognizer().extractVersion("1.2.3")
self.assertEqual("1", major)
self.assertEqual("2", minor)
major, minor = Recognizer().extractVersion("1.2-snapshot")
self.assertEqual("1", major)
self.assertEqual("2", minor)
|
dvarrazzo/arduino | thermo/client/google_api.py | Python | gpl-3.0 | 1,030 | 0.004854 | #!/usr/bin/env python
"""Read the Google Weather API and emit data in the usual format.
Write a sample every 5 minutes on stdout, log on stderr.
"""
import sys
import time
from ur | llib import quote_plus
from urllib2 i | mport urlopen
from datetime import datetime, timedelta
from xml.etree import cElementTree as ET
import logging
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger()
def main():
location = sys.argv[1]
url = ("http://www.google.com/ig/api?weather="
+ quote_plus(location))
while 1:
ts = datetime.now()
try:
et = ET.parse(urlopen(url))
value = float(et.getroot()
.find('weather')
.find('current_conditions')
.find('temp_c')
.attrib['data'])
except Exception, e:
logger.error("error reading: %s", e)
else:
print ts.strftime("%Y-%m-%d %H:%M:%S"), value
time.sleep(5 * 60)
if __name__ == '__main__':
sys.exit(main())
|
kcompher/pygraphistry | docs/source/conf.py | Python | bsd-3-clause | 11,851 | 0.006329 | import sys
import os
sys.path.insert(0, os.path.abspath('../..'))
# -*- coding: utf-8 -*-
#
# PyGraphistry documentation build configuration file, created by
# sphinx-quickstart on Fri Aug 21 19:30:19 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that | not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shlex
# If extensions (or modules to document with autodoc) are in anothe | r directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
'sphinx.ext.ifconfig',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'PyGraphistry'
copyright = u'2015, Graphistry, Inc.'
author = u'Graphistry, Inc.'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../..'))
import graphistry
# The short X.Y version.
version = graphistry.__version__
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
html_show_sourcelink = False
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
html_show_sphinx = False
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'PyGraphistrydoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'PyGraphistry.tex', u'PyGraphistry Documentation',
u'Graphistry, Inc.', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this |
weidnerm/solar_data_monitor | SolarSensors_test.py | Python | mit | 3,665 | 0.014734 |
from unittest import TestCase, main, skip
from mock import patch, call, MagicMock
from SolarSensors import SolarSensors
import os
class SolarSensors_test(TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def get_default_config(self):
config = [
{
"address": "0x45",
"name": "Panel",
"scale": 2.0
},
{
"address": "0x49",
"name": "Batt 5",
"scale": 1.0
},
{
"address": "0x41",
"name": "Batt 6",
"scale": 1.0
},
{
"address": "0x40",
"name": "Load",
"scale": 2.0
},
{
"address": "0x42",
"name": "Batt 7",
"scale": 1.0
},
{
"address": "0x43",
"name": "Batt 8",
"scale": 1.0
},
{
"address": "0x48",
"name": "Batt 4",
"scale": 1.0
},
{
"address": "0x47",
"name": "Batt 3",
"scale": 1.0
},
{
"address": "0x4a",
"name": "Batt 2",
"scale": 1.0
},
{
"address": "0x46",
"name": "Batt 1",
"scale": 1.0
}
]
return config
def test_get_default_config(self):
config = self.get_default_config()
self.assertEqual( [
{'scale': 2.0, 'name': 'Panel', 'address': '0x45'},
{'scale': 1.0, 'name': 'Batt 5', 'address': '0x49'},
{'scale': 1.0, 'name': 'Batt 6', 'address': '0x41'},
{'scale': 2.0, 'name': 'Load', 'address': '0x40'},
{'scale': 1.0, 'name': 'Batt 7', 'address': '0x42'},
{'scale': 1.0, 'name': 'Batt 8', 'address': '0x43'},
{'scale': 1.0, 'name': 'Batt 4', 'address': '0x48'},
{'scale': 1.0, 'name': 'Batt 3', 'address': '0x47'},
| {'scale': 1.0, 'name': 'Batt 2', 'address': '0x4a'},
{'scale': 1.0, 'name': 'Batt 1', 'address': '0x46'}], c | onfig)
@patch('SolarSensors.INA219')
def test_ctor(self, mockINA):
mySolarSensors = SolarSensors( self.get_default_config() )
self.assertEqual(['Panel', 'Batt 5', 'Batt 6', 'Load', 'Batt 7', 'Batt 8', 'Batt 4', 'Batt 3', 'Batt 2', 'Batt 1']
, mySolarSensors.m_sensorNames)
#self.assertEqual("" , mySolarSensors.m_sensors)
self.assertEqual([2.0, 1.0, 1.0, 2.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0] , mySolarSensors.m_scale_factors)
@patch('SolarSensors.INA219')
def test_getData(self, mockINA):
mockINA.return_value.getBusVoltage_V.side_effect = [12.844 ,12.844 ,12.848 ,12.828 ,12.848 ,12.844 ,12.832 ,12.832 ,12.836 ,12.596]
mockINA.return_value.getCurrent_mA.side_effect = [154 ,-17 ,-15 ,196 ,-21 ,-12 ,2 ,6 ,3 ,2346]
mySolarSensors = SolarSensors( self.get_default_config() )
data = mySolarSensors.getData()
mySolarSensors
self.assertEqual({
'names': ['Panel', 'Batt 5', 'Batt 6', 'Load', 'Batt 7', 'Batt 8', 'Batt 4', 'Batt 3', 'Batt 2', 'Batt 1'],
'current': [308, -17, -15, 392, -21, -12, 2, 6, 3, 2346],
'voltage': [12.844, 12.844, 12.848, 12.828, 12.848, 12.844, 12.832, 12.832, 12.836, 12.596]
}, data)
if __name__ == '__main__':
main()
|
frewsxcv/lop.farm | app/landing/tests.py | Python | mpl-2.0 | 544 | 0 | from django.core.urlresolvers impor | t reverse
from django.test import Client, TestCase
class LandingViewTestCase(TestCase):
def test_landing(self):
url = reverse('landing')
res = self.client.get(url)
self.assertEqual(200, res.status_code)
class LandingTestCase(TestCase):
def test_no_catch_all(self):
"""Ensure there are no URL routes that catch-all"""
client = Client()
response = client.get(' | /this-should-not-be-a-valid-endpoint')
self.assertEqual(response.status_code, 404)
|
plecto/motorway | motorway/contrib/recurly_integration/ramps.py | Python | apache-2.0 | 1,594 | 0.000627 | import os
import time
from motorway.messages import Message
from motorway.ramp import Ramp
"""
Requires pip install recurly
"""
class RecurlyRamp(Ramp):
@property
def recurly(self):
import recurly
recurly.SUBDOMAIN = os.e | nviron.get('RECURLY_SUBDOMAIN')
recurly.API_KEY = os.environ.get('RECURLY_API_KEY')
return recurly
class RecurlyInvoiceRamp(RecurlyRamp):
def next(self):
for invoice in self.recurly.Invoice.all():
yield Message(invoice.uuid, {
'uuid': invoice.uuid,
'invoice_number': invoice.invoice_number,
| 'vat_number': invoice.vat_number,
'total_in_cents': invoice.total_in_cents,
'tax_in_cents': invoice.tax_in_cents,
'subtotal_in_cents': invoice.subtotal_in_cents,
'state': invoice.state,
'collection_method': invoice.collection_method,
'currency': invoice.currency,
'account': invoice.account().account_code, # TODO: Parse this from the href in the XML
'created_at': invoice.created_at,
'updated_at': invoice.updated_at,
'closed_at': invoice.closed_at,
})
time.sleep(3600)
class RecurlyAccountRamp(RecurlyRamp):
def next(self):
for account in self.recurly.Account.all():
yield Message(account.account_code, {
'company_name': account.company_name,
'account_code': account.account_code,
})
time.sleep(3600)
|
felix9064/python | Demo/liaoxf/do_generator.py | Python | mit | 2,677 | 0 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# 生成器表达式----一边循环一边计算
# 列表元素可以在循环的过程中不断推算出后续的元素
# 这样就不必创建完整的list,从而节省大量的空间
from collections import Iterable
import array
# 第一种方法:将列表生成式最外面的[] 改成()
# 列表生成式
list_comp = [x * x for x in range(10)]
# 生成器表达式
list_gene = (x * x for x in range(10))
# 生成器是可迭代对象
print(isinstance(list_gene, Iterable))
# 如果生成器表达式是一个函数调用过程中的唯一参数,那么不需要额外再用括号把它围起来
symbols = '$¢£¥€¤'
t = tuple(ord(symbol) for symbol in symbols)
print(t)
# 如果生成器表达式不是一个函数的唯一参数,则外面的圆括号是必须的
array.array('I', (ord(s) for s in symbols))
# 生成器表达式是逐个产出元素,从来不会一次性产出一个含有6个T恤样式的列表
colors = ['black', 'whi | te']
sizes = ['S', 'M', 'L']
for t_shirts in ('%s %s' % (c, s) for c in colors for s in sizes):
print(t_shirts)
# 用函数循环的方法实现斐波拉契数列
def fibonacci1(num):
n, a, b = 0, 0, 1
while n < num:
prin | t(b, end=' ')
a, b = b, a + b
n = n + 1
print('done')
return 'done'
fibonacci1(20)
# 第二种方法:如果一个函数定义中包含yield关键字,那么这个函数就不再是一个普通函数,而是一个generator(生成器函数)
# 把上面定义的函数改一下就成了一个生成器
def fibonacci2(num):
n, a, b = 0, 0, 1
while n < num:
yield b
a, b = b, a + b
n = n + 1
print('done')
return 'done'
for g in fibonacci2(20):
print(g, end=' ')
# 练习:输出杨辉三角
def triangles(num):
n, list1 = 0, [1]
while n < num:
yield list1
i = len(list1) - 1
while i:
list1[i] = list1[i] + list1[i-1]
i -= 1
list1.append(1)
n = n + 1
# 输出杨辉三角更简洁的写法
def triangles():
list2 = [1]
while True:
yield list2
list2 = [x + y for x, y in zip([0] + list2, list2 + [0])]
x = 0
results = []
for t in triangles():
print(t)
results.append(t)
x = x + 1
if x == 10:
break
if results == [
[1],
[1, 1],
[1, 2, 1],
[1, 3, 3, 1],
[1, 4, 6, 4, 1],
[1, 5, 10, 10, 5, 1],
[1, 6, 15, 20, 15, 6, 1],
[1, 7, 21, 35, 35, 21, 7, 1],
[1, 8, 28, 56, 70, 56, 28, 8, 1],
[1, 9, 36, 84, 126, 126, 84, 36, 9, 1]
]:
print('测试通过!')
else:
print('测试失败!')
|
lcpt/xc | verif/tests/database/test_database_14.py | Python | gpl-3.0 | 4,437 | 0.038323 | # -*- coding: utf-8 -*-
# home made test
import xc_base
import geom
import xc
from model import predefined_spaces
from solution import predefined_solutions
from materials import typical_materials
__author__= "Luis C. Pérez Tato (LCPT)"
__copyright__= "Copyright 2014, LCPT"
__license__= "GPL"
__version__= "3.0"
__email__= "l.pereztato@gmail.com"
E= 30e6 # Young modulus (psi)
l= 20*12 # Bar length in inches
h= 30 # Beam cross-section depth in inches.
A= 50.65 # viga area in square inches.
I= 7892 # Inertia of the beam section in inches to the fourth power.
F= 1000.0 # Force
# Problem type
feProblem= xc.FEProblem()
preprocessor= feProblem.getPreprocessor
nodes= preprocessor.getNodeHandler
modelSpace= predefined_spaces.StructuralMechanics2D(nodes)
nodes.defaultTag= 1 #First node number.
nod= nodes.newNodeXY(0,0)
nod= nodes.newNodeXY(l,0.0)
nod= nodes.newNodeXY(2*l,0.0)
nod= nodes.newNodeXY(3*l,0.0)
# Geometric transformations
lin= modelSpace.newLinearCrdTransf("lin")
# Materials definition
scc= typical_materials.defElasticSection2d(preprocessor, "scc",A,E,I)
# Elements definition
elements= preprocessor.getElementHandler
elements.defaultTransformation= "lin"
elements.defaultMaterial= "scc"
# sintaxis: beam2d_02[<tag>]
elements.defaultTag= 1 #Tag for next element.
beam2d= elements.newElement("ElasticBeam2d",xc.ID([1,2]))
beam2d.h= h
beam2d= elements.newElement("ElasticBeam2d",xc.ID([3,4]))
beam2d.h= h
# Constraints
constraints= preprocessor.getBoundaryCondHandler
#
spc= constraints.newSPConstraint(1,0,0.0) # Node 1
spc= constraints.newSPConstraint(1,1,0.0)
spc= constraints.newSPConstraint(1,2,0.0)
spc= constraints.newSPConstraint(4,0,0.0) # Node 4
spc= constraints.newSPConstraint(4,1,0.0)
spc= constraints.newSPConstraint(4,2,0.0)
setTotal= preprocessor.getSets.getSet("total")
setTotal.killElements() # deactivate the elements
mesh= preprocessor.getDomain.getMesh
mesh.setDeadSRF(0.0)
mesh.freezeDeadNodes("congela") # Constraint inactive nodes.
# Loads definition
loadHandler= preprocessor.getLoadHandler
lPatterns= loadHandler.getLoadPatterns
#Load modulation.
ts= lPatterns.newTimeSeries("constant_ts","ts")
lPatterns.currentTimeSeries= "ts"
#Load case definition
lp0= lPatterns.newLoadPattern("default","0")
lp0.newNodalLoad(2,xc.Vector([F,F,F]))
#We add the load case to domain.
lPatterns.addToDomain("0")
import os
os.system("rm -r -f /tmp/test14.db")
db= feProblem.newDatabase("BerkeleyDB","/tmp/test14.db")
db.save(100)
feProblem.clearAll()
feProblem.setVerbosityLevel(0) #Dont print warning messages
#about pointers to material.
db.restore(100)
feProblem.setVerbosityLevel(1) #Print warnings again
# Solution
analisis= predefined_solutions.simple_static_linear(feProblem)
result= analisis.analyze(1)
nodes.calculateNodalReactions(True,1e-7)
nod1= nodes.getNode(1)
deltax1= nod1.getDisp[0]
deltay1= nod1.getDisp[1]
nod2= nodes.getNode(2)
deltax2= nod2.getDisp[0]
deltay2= nod2.getDisp[1]
R1= nod1.getReaction[0]
R2= nod2.getReaction[0]
setTotal= preprocessor.getSets.getSet("total")
setTotal.aliveElements()
mesh= preprocessor.getDomain.getMesh
mesh.meltAliveNodes("congela") # Reactivate inactive nodes.
# Solution
analisis= predefined_solutions.simple_static_linear(feProblem)
result= analisis.analyze(1)
db.save(105)
feProblem.clearAll()
feProblem.setVerbosityLevel(0) #Dont print warning messages
#about pointers to material.
db.restore(105)
feProblem.setVerbosityLevel(1) #Print warnings again
nodes.calculateNodalReactions(True,1e-7)
nod1= nodes.getNode(1)
deltaxB1= nod1.getDisp[0]
deltayB1= nod1.getDisp[1]
nod2= nodes.getNode(2)
deltaxB2= nod2.getDisp[0]
deltayB2= nod2.getDisp[1]
RB1= nod1.getReaction[0]
nod2= nodes.getNode(2)
RB2= nod2.getReaction[0]
ratio1= (R1)
ratio2= ((R2+F)/F)
ratio3= ((RB1+F)/F)
ratio4= (RB2)
'''
print "R1= ",R1
print "R2= ",R2
print "dx2= ",deltax2
print "dy2= ",deltay2
print "RB1= ",RB1
print "RB2= ",RB2
print "dxB2= ",deltaxB2
print "dyB2= ",deltayB2
print "ratio1= " | ,ratio1
print "ratio2= ",ratio2
print "ratio3= ",ratio3
print "ratio4= ",ratio4
'''
import os
from miscUtils import LogMessages as lmsg
fname= os.path.basename(__file__)
if (abs(ratio1)<1e-5) & (abs(ratio2)<1e-5) & (abs(ratio3)<1e-5):
print "test ",fname,": ok."
else:
lmsg.error(fname+' ERROR.')
os.system( | "rm -rf /tmp/test14.db") # Your garbage you clean it
|
MIT-LCP/mimic-code | mimic-iv/tests/test_sepsis.py | Python | mit | 481 | 0.002079 | import pandas as pd
from pandas.io import gbq
def test_sepsis3_one_row_per_stay_id(dataset, project_id):
"""Verifies one stay_id per row of sepsis-3"""
query = f"""
SELECT
COUNT(*) AS n
FROM
| (
SELECT stay_id FROM {dataset}.sepsis3 GROUP BY 1 HAVING COUNT(*) > 1
) s
"""
df = gbq.read_gbq(query, project_id=project_id, d | ialect="standard")
n = df.loc[0, 'n']
assert n == 0, 'sepsis-3 table has more than one row per stay_id'
|
TomasTomecek/docker-scripts | tests/test_integ_squash.py | Python | mit | 19,462 | 0.001182 | import unittest
import pytest
import mock
import six
import docker
import os
import json
import logging
import sys
import tarfile
import io
from io import BytesIO
import uuid
from docker_scripts.squash import Squash
from docker_scripts.errors import SquashError
if not six.PY3:
import docker_scripts.lib.xtarfile
class TestIntegSquash(unittest.TestCase):
docker = docker.Client(version='1.16')
log = logging.getLogger()
handler = logging.StreamHandler()
formatter = logging.Formatter(
'%(asctime)s %(name)-12s %(levelname)-8s %(message)s')
handler.setFormatter(formatter)
log.addHandler(handler)
log.setLevel(logging.DEBUG)
class Image(object):
def __init__(self, dockerfile):
self.dockerfile = dockerfile
self.docker = TestIntegSquash.docker
self.name = "integ-%s" % uuid.uuid1()
self.tag = "%s:latest" % self.name
def __enter__(self):
f = BytesIO(self.dockerfile.encode('utf-8'))
for line in self.docker.build(fileobj=f, tag=self.tag, rm=True):
try:
print(json.loads(line)["stream"].strip())
except:
print(line)
self.history = self.docker.history(self.tag)
self.layers = [o['Id'] for o in self.history]
self.metadata = self.docker.inspect_image(self.tag)
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if not os.getenv('CI'):
self.docker.remove_image(image=self.tag, force=True)
class SquashedImage(object):
def __init__(self, image, number_of_layers, output_path=None, load_output_back=False):
self.image = image
self.number_of_layers = number_of_layers
self.docker = TestIntegSquash.docker
self.log = TestIntegSquash.log
self.tag = "%s:squashed" % self.image.name
self.output_path = output_path
self.load_output_back = load_output_back
def __enter__(self):
from_layer = self.docker.history(
self.image.tag)[self.number_of_layers]['Id']
squash = | Squash(
self.log, self.image.tag, self.docker, tag=self.tag, from_layer=from_layer,
output_path=self.outp | ut_path, load_output_back=self.load_output_back)
self.image_id = squash.run()
if not self.output_path or self.load_output_back:
self.squashed_layer = self._squashed_layer()
self.layers = [o['Id'] for o in self.docker.history(self.tag)]
self.metadata = self.docker.inspect_image(self.tag)
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if not (os.getenv('CI') or self.output_path):
self.docker.remove_image(image=self.tag, force=True)
def _save_image(self):
image = self.docker.get_image(self.tag)
buf = io.BytesIO()
buf.write(image.data)
buf.seek(0) # Rewind
return buf
def _extract_file(self, name, tar_object):
with tarfile.open(fileobj=tar_object, mode='r') as tar:
member = tar.getmember(name)
return tar.extractfile(member)
def _squashed_layer(self):
image_id = self.docker.inspect_image(self.tag)['Id']
image = self._save_image()
return self._extract_file(image_id + '/layer.tar', image)
def assertFileExists(self, name):
self.squashed_layer.seek(0) # Rewind
with tarfile.open(fileobj=self.squashed_layer, mode='r') as tar:
assert name in tar.getnames(
), "File '%s' was not found in the squashed files: %s" % (name, tar.getnames())
def assertFileDoesNotExist(self, name):
self.squashed_layer.seek(0) # Rewind
with tarfile.open(fileobj=self.squashed_layer, mode='r') as tar:
assert name not in tar.getnames(
), "File '%s' was found in the squashed layer files: %s" % (name, tar.getnames())
def assertFileIsNotHardLink(self, name):
self.squashed_layer.seek(0) # Rewind
with tarfile.open(fileobj=self.squashed_layer, mode='r') as tar:
member = tar.getmember(name)
assert member.islnk(
) == False, "File '%s' should not be a hard link, but it is" % name
class Container(object):
def __init__(self, image):
self.image = image
self.docker = TestIntegSquash.docker
self.log = TestIntegSquash.log
def __enter__(self):
self.container = self.docker.create_container(image=self.image.tag)
data = self.docker.export(self.container)
self.content = six.BytesIO(data.read())
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if not os.getenv('CI'):
self.docker.remove_container(self.container, force=True)
def assertFileExists(self, name):
self.content.seek(0) # Rewind
with tarfile.open(fileobj=self.content, mode='r') as tar:
assert name in tar.getnames(
), "File %s was not found in the container files: %s" % (name, tar.getnames())
def assertFileDoesNotExist(self, name):
self.content.seek(0) # Rewind
with tarfile.open(fileobj=self.content, mode='r') as tar:
assert name not in tar.getnames(
), "File %s was found in the container files: %s" % (name, tar.getnames())
def test_all_files_should_be_in_squashed_layer(self):
"""
We squash all layers in RUN, all files should be in the resulting squashed layer.
"""
dockerfile = '''
FROM busybox
RUN touch /somefile_layer1
RUN touch /somefile_layer2
RUN touch /somefile_layer3
'''
with self.Image(dockerfile) as image:
with self.SquashedImage(image, 3) as squashed_image:
squashed_image.assertFileDoesNotExist('.wh.somefile_layer1')
squashed_image.assertFileDoesNotExist('.wh.somefile_layer2')
squashed_image.assertFileDoesNotExist('.wh.somefile_layer3')
squashed_image.assertFileExists('somefile_layer1')
squashed_image.assertFileExists('somefile_layer2')
squashed_image.assertFileExists('somefile_layer3')
with self.Container(squashed_image) as container:
container.assertFileExists('somefile_layer1')
container.assertFileExists('somefile_layer2')
container.assertFileExists('somefile_layer3')
# We should have two layers less in the image
self.assertTrue(
len(squashed_image.layers) == len(image.layers) - 2)
def test_only_files_from_squashed_image_should_be_in_squashed_layer(self):
"""
We squash all layers in RUN, all files should be in the resulting squashed layer.
"""
dockerfile = '''
FROM busybox
RUN touch /somefile_layer1
RUN touch /somefile_layer2
RUN touch /somefile_layer3
'''
with self.Image(dockerfile) as image:
with self.SquashedImage(image, 2) as squashed_image:
squashed_image.assertFileDoesNotExist('.wh.somefile_layer2')
squashed_image.assertFileDoesNotExist('.wh.somefile_layer3')
# This file should not be in the squashed layer
squashed_image.assertFileDoesNotExist('somefile_layer1')
# Nor a marker files for it
squashed_image.assertFileDoesNotExist('.wh.somefile_layer1')
squashed_image.assertFileExists('somefile_layer2')
squashed_image.assertFileExists('somefile_layer3')
with self.Container(squashed_image) as container:
# This file should be in the containe |
gkc1000/pyscf | examples/doci/00-simple_doci_casscf.py | Python | apache-2.0 | 361 | 0 | #!/usr/bin/env python
#
# Author: Qiming Sun <osirpt.sun@gmail.com>
#
'''
A simple example to run DOCI-CASCI and DOCI- | CASSCF calculation.
'''
from pyscf import gto
from pyscf import | doci
mol = gto.M(atom='N 0 0 0; N 0 0 2.', basis='6-31g')
mf = mol.RHF().run()
mc = doci.CASSCF(mf, 18, 14)
mc.verbose = 4
mc.kernel()
mc = doci.CASCI(mf, 18, 14)
mc.kernel()
|
shadowmint/nwidget | lib/cocos2d-0.5.5/cocos/gl_framebuffer_object.py | Python | apache-2.0 | 3,430 | 0.003207 | # ----------------------------------------------------------------------------
# cocos2d
# Copyright (c) 2008-2012 Daniel Moisset, Ricardo Quesada, Rayentray Tappa,
# Lucio Torre
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of cocos2d nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
"""A thin wrapper for OpenGL framebuffer objets. For implementation use only"""
__docformat__ = 'restructuredtext'
import ctypes as ct
from pyglet.gl import *
class FramebufferObject (object):
"""
Wrapper for framebuffer objects. See
http://oss.sgi.com/projects/ | ogl-sample/registry/EXT/framebuffer_object.txt
API is not very OO, should be improved.
"""
def __init__ (self):
"""Create a new framebuffer object"""
id = GLuint(0)
glGenFramebuffersEXT (1, ct.byref(id))
self._id = id.value
def bind (self):
"""Set FBO as current rendering target"""
glBindFramebufferEXT (GL_FRAMEBUFFER_EXT, self._id)
def unbind (self):
"""Set default framebuffer as current rendering target"""
| glBindFramebufferEXT (GL_FRAMEBUFFER_EXT, 0)
def texture2d (self, texture):
"""Map currently bound framebuffer (not necessarily self) to texture"""
glFramebufferTexture2DEXT (
GL_FRAMEBUFFER_EXT,
GL_COLOR_ATTACHMENT0_EXT,
texture.target,
texture.id,
texture.level)
def check_status(self):
"""Check that currently set framebuffer is ready for rendering"""
status = glCheckFramebufferStatusEXT (GL_FRAMEBUFFER_EXT)
if status != GL_FRAMEBUFFER_COMPLETE_EXT:
raise Exception ("Frambuffer not complete: %d" % status)
def __del__(self):
'''Delete the framebuffer from the GPU memory'''
id = GLuint(self._id)
glDeleteFramebuffersEXT(1, ct.byref(id))
|
fxia22/ASM_xf | PythonD/site_python/MMTK/MolecularSurface.py | Python | gpl-2.0 | 6,739 | 0.006529 | #
# Copyright 2000 by Peter McCluskey (pcm@rahul.net).
# You may do anything you want with it, provided this notice is kept intact.
#
# This module is in part a replacement for the MolecularSurface MMTK module
# that uses the NSC code. It has a less restrictive license than the NSC code,
# and is mostly more flexible, but is probably slower and somewhat less
# accurate. It can run (slowly) without any of the C code. It has an
# additional routine surfacePointsAndGradients, which returns area, an
# "up" vector, and surface points for each atom.
# Minor modifications by Konrad Hinsen <hinsen@cnrs-orleans.fr>:
# - Replaced tabs by spaces
# - Added/adapted docstrings to MMTK conventions
# - Removed assignment of methods to class GroupOfAtoms
# - Use vectors in the return value of surfacePointsAndGradients
# - Replaced "math" module by "Numeric"
# - Renamed module _surface to MMTK_surface
"""This module provides functions that calculate molecular surfaces
and volumes.
"""
import surfm
from MMTK.Collection import GroupOfAtoms, Collection
from MMTK import Vector
import Numeric
def surfaceAndVolume(self, probe_radius = 0.):
"""Returns the molecular surface and volume of |object|,
defining the surface at a distance of |probe_radius| from
the van-der-Waals surfaces of the atoms."""
atoms = self.atomList()
smap = surfm.surface_atoms(atoms, probe_radius, ret_fmt = 2)
tot_a = 0
tot_v = 0
for a in atoms:
atom_data = smap[a]
tot_a = tot_a + atom_data[0]
tot_v = tot_v + atom_data[1]
return (tot_a, tot_v)
#GroupOfAtoms.surfaceAndVolume = surfaceAndVolume
def surfaceAtoms(self, probe_radius = 0.):
"""Returns a dictionary that maps the surface atoms to their
exposed surface areas."""
atoms = self.atomList()
smap = surfm.surface_atoms(atoms, probe_radius, ret_fmt = 1)
surface_atoms = {}
for a in atoms:
area = smap[a]
if area > 0.:
# we have a surface atom
surface_atoms[a] = area
return surface_atoms
#GroupOfAtoms.surfaceAtoms = surfaceAtoms
def surfacePointsAndGradients(self, probe_radius = 0., point_density = 258):
"""Returns a dictionary that maps the surface atoms to a tuple
containing three surface-related quantities: the exposed surface
are, a list of points in the exposed surface, and a gradient vector
pointing outward from the surface."""
atoms = self.atomList()
smap = surfm.surface_atoms(atoms, probe_radius, ret_fmt = 4,
point_density = point_density)
surface_data = {}
for a in atoms:
(area, volume, points1, grad) = smap[a]
if area > 0.:
# we have a surface atom
surface_data[a] = (area, map(Vector, points1), Vector(grad))
return surface_data
#GroupOfAtoms.surfacePointsAndGradients = surfacePointsAndGradients
class Contact:
def __init__(self, a1, a2, dist = Non | e):
self.a1 = a1
self.a2 = a2
if dist is None:
self.dist = (a1.position() - a2.position()).length()
else:
| self.dist = dist
def __getitem__(self, index):
return (self.a1, self.a2)[index]
def __cmp__(a, b):
return cmp(a.dist, b.dist)
def __hash__(self):
return (self.a1, self.a2)
def __repr__(self):
return 'Contact(%s, %s)' % (self.a1, self.a2)
__str__ = __repr__
def findContacts(object1, object2, contact_factor = 1.0, cutoff = 0.0):
"""Returns a list of MMTK.MolecularSurface.Contact objects
that describe atomic contacts between |object1| and |object2|.
A contact is defined as a pair of atoms whose distance is less than
|contact_factor|*(r1+r2+|cutoff|) where r1 and r2 are the atomic
van-der-Waals radii."""
max_object1 = len(object1.atomList())
atoms = object1.atomList() + object2.atomList()
tup = surfm.get_atom_data(atoms, 0.0)
max_rad = tup[0] # max vdW_radius
atom_data = tup[1]
nbors = surfm.NeighborList(atoms, contact_factor*(2*max_rad+cutoff),
atom_data)
clist = []
done = {}
for index1 in range(len(atoms)):
for (index2, dist2) in nbors[index1]:
if (index1 < max_object1) != (index2 < max_object1):
if index1 < index2:
a1 = atoms[index1]
a2 = atoms[index2]
else:
a1 = atoms[index2]
a2 = atoms[index1]
dist = Numeric.sqrt(dist2)
if dist >= contact_factor*(a1.vdW_radius + a2.vdW_radius + cutoff):
continue
if not done.has_key((index1, index2)):
clist.append(Contact(a1, a2, dist))
done[(index1, index2)] = 1
return clist
if __name__ == '__main__':
from MMTK.PDB import PDBConfiguration
from MMTK import Units
import sys
target_filename = sys.argv[2]
pdb_conf1 = PDBConfiguration(target_filename)
if sys.argv[1][:2] == '-f':
chains = pdb_conf1.createNucleotideChains()
molecule_names = []
if len(chains) >= 2:
clist = findContacts(chains[0], chains[1])
else:
molecule_names = []
for (key, mol) in pdb_conf1.molecules.items():
for o in mol:
molecule_names.append(o.name)
targets = pdb_conf1.createAll(molecule_names = molecule_names)
if len(molecule_names) > 1:
clist = findContacts(targets[0], targets[1])
else:
atoms = targets.atomList()
mid = len(atoms)/2
clist = findContacts(Collection(atoms[:mid]),
Collection(atoms[mid:]))
print len(clist), 'contacts'
for c in clist[:8]:
print '%-64s %6.2f' % (c, c.dist/Units.Ang)
else:
target = pdb_conf1.createAll()
if sys.argv[1][:2] == '-v':
(a, v) = target.surfaceAndVolume()
print 'surface area %.2f volume %.2f' \
% (a/(Units.Ang**2), v/(Units.Ang**3))
elif sys.argv[1][:2] == '-a':
smap = target.surfaceAtoms(probe_radius = 1.4*Units.Ang)
print len(smap.keys()),'of',len(target.atomList()),'atoms on surface'
elif sys.argv[1][:2] == '-p':
smap = target.surfacePointsAndGradients(probe_radius = 1.4*Units.Ang)
for (a, tup) in smap.items():
print '%-40.40s %6.2f %5d %5.1f %5.1f %5.1f' \
% (a.fullName(), tup[0]/(Units.Ang**2), len(tup[1]),
tup[2][0], tup[2][1], tup[2][2])
|
jkilpatr/browbeat | lib/Shaker.py | Python | apache-2.0 | 22,576 | 0.001772 | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import datetime
import Elastic
import Grafana
import json
import logging
import os
import time
import Tools
import uuid
import WorkloadBase
import yaml
class Shaker(WorkloadBase.WorkloadBase):
def __init__(self, config):
self.logger = logging.getLogger('browbeat.Shaker')
self.config = config
self.tools = Tools.Tools(self.config)
self.grafana = Grafana.Grafana(self.config)
self.elastic = Elastic.Elastic(self.config, self.__class__.__name__.lower())
self.error_count = 0
self.pass_count = 0
self.test_count = 0
self.scenario_count = 0
def shaker_checks(self):
cmd = "source /home/stack/overcloudrc; glance image-list | grep -w shaker-image"
if self.tools.run_cmd(cmd) == "":
self.logger.error("Shaker Image is not built, try again")
exit(1)
else:
self.logger.info("Shaker image is built, continuing")
def get_stats(self):
self.logger.info(
"Current number of Shaker tests executed: {}".format(
self.test_count))
self.logger.info(
"Current number of Shaker tests passed: {}".format(
self.pass_count))
self.logger.info(
"Current number of Shaker tests failed: {}".format(
self.error_count))
def accommodation_to_dict(self, accommodation):
accommodation_dict = {}
for item in accommodation:
if isinstance(item, dict):
accommodation_dict.update(item)
else:
accommodation_dict[item] = True
return accommodation_dict
def accommodation_to_list(self, accommodation):
accommodation_list = []
for key, value in accommodation.iteritems():
if value is True:
accommodation_list.append(key)
else:
temp_dict = {}
temp_dict[key] = value
accommodation_list.append(temp_dict)
return accommodation_list
def final_stats(self, total):
self.logger.info(
"Total Shaker scenarios enabled by user: {}".format(total))
self.logger.info(
"Total number of Shaker tests executed: {}".format(
self.test_count))
self.logger.info(
"Total number of Shaker tests passed: {}".format(self.pass_count))
self.logger.info(
"Total number of Shaker tests failed: {}".format(self.error_count))
def update_tests(self):
self.test_count += 1
def update_pass_tests(self):
self.pass_count += 1
def update_fail_tests(self):
self.error_count += 1
def update_scenarios(self):
self.scenario_count += 1
# Method to process JSON outputted by Shaker, model data in a format that can be consumed
# by ElasticSearch and ship the data to ES
def send_to_elastic(self, outputfile, browbeat_scenario,
shaker_uuid, es_ts, es_list, run, test_name, result_dir):
fname = outputfile
failure = False
# Load output json
try:
with open(fname) as data_file:
data = json.load(data_file)
# If output JSON doesn't exist, ship UUID of failed run to ES
except IOError:
self.logger.error(
"The Shaker output JSON cou | ld not be found, pushing details to Elastic")
record = {'status': "error"}
shaker_stats = {
'timestamp': str(es_ts).replac | e(" ", "T"),
'browbeat_scenario': browbeat_scenario,
'shaker_uuid': str(shaker_uuid),
'record': record,
'browbeat_rerun': run
}
result = self.elastic.combine_metadata(shaker_stats)
index_status = self.elastic.index_result(result, test_name, result_dir, _type='error')
if index_status is False:
return False
else:
return True
# Dictionary to capture common test data
shaker_test_meta = {}
for scenario in data['scenarios'].iterkeys():
# Populating common test data
if 'shaker_test_info' not in shaker_test_meta:
shaker_test_meta['shaker_test_info'] = data[
'scenarios'][scenario]
if "progression" not in shaker_test_meta[
'shaker_test_info']['execution']:
shaker_test_meta['shaker_test_info'][
'execution']['progression'] = "all"
accommodation = self.accommodation_to_dict(data['scenarios'][scenario][
'deployment'].pop('accommodation'))
if 'deployment' not in shaker_test_meta:
shaker_test_meta['deployment'] = {}
shaker_test_meta['deployment']['accommodation'] = {}
if 'single' in accommodation:
shaker_test_meta['deployment'][
'accommodation']['distribution'] = 'single'
elif 'pair' in accommodation:
shaker_test_meta['deployment'][
'accommodation']['distribution'] = 'pair'
if 'single_room' in accommodation:
shaker_test_meta['deployment'][
'accommodation']['placement'] = 'single_room'
elif 'double_room' in accommodation:
shaker_test_meta['deployment'][
'accommodation']['placement'] = 'double_room'
if 'density' in accommodation:
shaker_test_meta['deployment']['accommodation'][
'density'] = accommodation['density']
if 'compute_nodes' in accommodation:
shaker_test_meta['deployment']['accommodation'][
'compute_nodes'] = accommodation['compute_nodes']
shaker_test_meta['deployment']['template'] = data[
'scenarios'][scenario]['deployment']['template']
# Iterating through each record to get result values
for record in data['records'].iterkeys():
if data['records'][record]['status'] == "ok" and data[
'records'][record]['executor'] != "shell":
if 'stdout' in data['records'][record]:
del data['records'][record]['stdout']
metadata = data['records'][record].pop('meta')
samples = data['records'][record].pop('samples')
# Ordered Dictionary to capture result types and metrics
outputs = collections.OrderedDict()
for metric in metadata:
outputs[metric[0]] = metric[1]
# Iterate over each result type for each sample in record and
# get associated value
for key in outputs.iterkeys():
if key == "time":
continue
# Iterate in step lock over each list of samples in the
# samples list wrt timestamp
for sample, es_time in zip(samples, es_list):
elastic_timestamp = str(es_time).replace(" ", "T")
result = {}
shaker_stats = {}
result['value'] = sample[outputs.keys().index(key)]
result['metric'] = outputs[key]
result['result_type'] = key
|
lojaintegrada/pyboleto | docs/conf.py | Python | bsd-3-clause | 8,020 | 0.006862 | # -*- coding: utf-8 -*-
#
# pyboleto documentation build configuration file, created by
# sphinx-quickstart on Thu Jul 5 02:10:50 2012.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../'))
import pyboleto
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.coverage',
'sphinx.ext.ifconfig', 'sphinx.ext.viewcode',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'pyboleto'
copyright = u'2012, Eduardo Cereto Carvalho'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = __import__('pyboleto').__version__
# The full version, including alpha/beta/rc tags.
release = __import__('pyboleto').__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
language = 'pt_BR'
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'tango'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
'collapsiblesidebar': True,
}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'pyboletodoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
'papersize': 'a4paper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX pr | eamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'pyboleto.tex', u'Documentação pyboleto',
u'Eduardo Cereto Carvalho', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are par | ts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
latex_domain_indices = False
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'pyboleto', u'pyboleto Documentation',
[u'Eduardo Cereto Carvalho'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'pyboleto', u'pyboleto Documentation',
u'Eduardo Cereto Carvalho', 'pyboleto', u'Biblioteca para geração de boletos bancários',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
|
gridengine/config-api | uge/objects/access_list_v1_0.py | Python | apache-2.0 | 2,599 | 0.005002 | #!/usr/bin/env python
#
# ___INFO__MARK_BEGIN__
#######################################################################################
# Copyright 2016-2021 Univa Corporation (acquired and owned by Altair Engineering Inc.)
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License.
#
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# See the License for the specific language governing permissions and
# limitations under the License.
#######################################################################################
# ___INFO__MARK_END__
#
from .qconf_object import QconfObject
class AccessList(QconfObject):
""" This class encapsulates UGE access list object. """
#: Object version.
VERSION = '1.0'
#: Object name key.
NAME_KEY = 'name'
#: Object keys that must be provided by access list.
USER_PROVIDED_KEYS = ['name']
#: Default values for required data keys.
REQUIRED_DATA_DEFAULTS = {
'type': 'ACL',
'fshare': 0,
'oticket': 0,
'entries': None,
}
INT_KEY_MAP = QconfObject.get_int_key_map(REQUIRED_DATA_DEFAULTS)
FLOAT_KEY_MAP = QconfObject.get_float_key_map(REQUIRED_DATA_DEFAULTS)
DEFAULT_LIST_DELIMITER = ','
LIST_KEY_MAP = {
'entries': ',',
}
def __init__(self, name=None, data=None, metadata=None, json_string=None):
"""
Class constructor.
:param name: Access list name. If provided, it will override access list name from data or JSON string parameters ('name' key). :type name: str
:param data: Access list data. If provided, it will override corresponding data from access list JSON string representation.
:type data: dict
:param metadata: Access list metadata. If provided, it will override corresponding metadata from access list JSON string representation.
:type metadata: dict
:param json_string: Access list JSON string representation.
:type json_string: | str
:raises: **InvalidArgument** - in case metadata is not a dictionary, JSON string is not valid, or it does not contain dictionary representing an AccessList object.
"""
QconfObject.__init__(self, name=name, data=data, metadata=metadata | , json_string=json_string)
|
linhdh/SnakeRepellerOnMbed50 | mbed-os/tools/build_api.py | Python | gpl-2.0 | 54,455 | 0.001653 | """
mbed SDK
Copyright (c) 2011-2016 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import re
import tempfile
import datetime
import uuid
from types import ListType
from shutil import rmtree
from os.path import join, exists, dirname, basename, abspath, normpath, splitext
from os.path import relpath
from os import linesep, remove, makedirs
from time import time
from intelhex import IntelHex
from json import load, dump
from tools.utils import mkdir, run_cmd, run_cmd_ext, NotSupportedException,\
ToolException, InvalidReleaseTargetException, intelhex_offset
from tools.paths import MBED_CMSIS_PATH, MBED_TARGETS_PATH, MBED_LIBRARIES,\
MBED_HEADER, MBED_DRIVERS, MBED_PLATFORM, MBED_HAL, MBED_CONFIG_FILE,\
MBED_LIBRARIES_DRIVERS, MBED_LIBRARIES_PLATFORM, MBED_LIBRARIES_HAL,\
BUILD_DIR
from tools.targets import TARGET_NAMES, TARGET_MAP
from tools.libraries import Library
from tools.toolchains import TOOLCHAIN_CLASSES
from jinja2 import FileSystemLoader
from jinja2.environment import Environment
from tools.config import Config
RELEASE_VERSIONS = ['2', '5']
def prep_report(report, target_name, toolchain_name, id_name):
"""Setup report keys
Positional arguments:
report - the report to fill
target_name - the target being used
toolchain_name - the toolchain being used
id_name - the name of the executable or library being built
"""
if not target_name in report:
report[target_name] = {}
if not toolchain_name in report[target_name]:
report[target_name][toolchain_name] = {}
if not id_name in report[target_name][toolchain_name]:
report[target_name][toolchain_name][id_name] = []
def prep_properties(properties, target_name, toolchain_name, vendor_label):
"""Setup test properties
Positional arguments:
properties - the dict to fill
target_name - the target the test is targeting
toolchain_name - the toolchain that will compile the test
vendor_label - the vendor
"""
if not target_name in properties:
properties[target_name] = {}
if not toolchain_name in properties[target_name]:
properties[target_name][toolchain_name] = {}
properties[target_name][toolchain_name]["target"] = target_name
properties[target_name][toolchain_name]["vendor"] = vendor_label
properties[target_name][toolchain_name]["toolchain"] = toolchain_name
def create_result(target_name, toolchain_name, id_name, description):
"""Create a result dictionary
Positional arguments:
target_name - the target being built for
toolchain_name - the toolchain doing the building
id_name - the name of the executable or library being built
description - a human readable description of what's going on
"""
cur_result = {}
cur_result["target_name"] = target_name
cur_result["toolchain_name"] = toolchain_name
cur_result["id"] = id_name
cur_result["description"] = description
cur_result["elapsed_time"] = 0
cur_result["output"] = ""
return cur_result
def add_result_to_report(report, result):
"""Add a single result | to a report dictionary
Positional arguments:
report - the report to append to
result - the result to append
"""
result["date"] = datetime.datetime.utcnow().isoformat()
result["uuid"] = str(uuid.uuid1())
target = result["target_name"]
toolchain = result["toolchain_name"]
id_name = result['id']
result_wrap = {0: result}
report[target][toolchain][id_name | ].append(result_wrap)
def get_config(src_paths, target, toolchain_name):
"""Get the configuration object for a target-toolchain combination
Positional arguments:
src_paths - paths to scan for the configuration files
target - the device we are building for
toolchain_name - the string that identifies the build tools
"""
# Convert src_paths to a list if needed
if type(src_paths) != ListType:
src_paths = [src_paths]
# Pass all params to the unified prepare_resources()
toolchain = prepare_toolchain(src_paths, None, target, toolchain_name)
# Scan src_path for config files
resources = toolchain.scan_resources(src_paths[0])
for path in src_paths[1:]:
resources.add(toolchain.scan_resources(path))
# Update configuration files until added features creates no changes
prev_features = set()
while True:
# Update the configuration with any .json files found while scanning
toolchain.config.add_config_files(resources.json_files)
# Add features while we find new ones
features = set(toolchain.config.get_features())
if features == prev_features:
break
for feature in features:
if feature in resources.features:
resources += resources.features[feature]
prev_features = features
toolchain.config.validate_config()
if toolchain.config.has_regions:
_ = list(toolchain.config.regions)
cfg, macros = toolchain.config.get_config_data()
features = toolchain.config.get_features()
return cfg, macros, features
def is_official_target(target_name, version):
""" Returns True, None if a target is part of the official release for the
given version. Return False, 'reason' if a target is not part of the
official release for the given version.
Positional arguments:
target_name - Name if the target (ex. 'K64F')
version - The release version string. Should be a string contained within
RELEASE_VERSIONS
"""
result = True
reason = None
target = TARGET_MAP[target_name]
if hasattr(target, 'release_versions') \
and version in target.release_versions:
if version == '2':
# For version 2, either ARM or uARM toolchain support is required
required_toolchains = set(['ARM', 'uARM'])
if not len(required_toolchains.intersection(
set(target.supported_toolchains))) > 0:
result = False
reason = ("Target '%s' must support " % target.name) + \
("one of the folowing toolchains to be included in the") + \
((" mbed 2.0 official release: %s" + linesep) %
", ".join(required_toolchains)) + \
("Currently it is only configured to support the ") + \
("following toolchains: %s" %
", ".join(target.supported_toolchains))
elif version == '5':
# For version 5, ARM, GCC_ARM, and IAR toolchain support is required
required_toolchains = set(['ARM', 'GCC_ARM', 'IAR'])
required_toolchains_sorted = list(required_toolchains)
required_toolchains_sorted.sort()
supported_toolchains = set(target.supported_toolchains)
supported_toolchains_sorted = list(supported_toolchains)
supported_toolchains_sorted.sort()
if not required_toolchains.issubset(supported_toolchains):
result = False
reason = ("Target '%s' must support " % target.name) + \
("ALL of the folowing toolchains to be included in the") + \
((" mbed OS 5.0 official release: %s" + linesep) %
", ".join(required_toolchains_sorted)) + \
("Currently it is only configured to support the ") + \
("following toolchains: %s" %
", ".join(supported_toolchains_sorted))
elif not target.default_lib == 'std':
result = False
reason = ("Target '%s' must set the " % target.n |
davcastroruiz/boston-django-project | webSite/music/models.py | Python | gpl-3.0 | 878 | 0.001139 | from django.db import models
from django.core.urlresolvers import reverse
class Album(models.Model):
artist = models.CharField(max_length=250)
album_title = models.CharField(max_length=500)
genre = models.CharField(max_length=100)
album_logo = models.FileField()
def get_absolute_url(self):
return reverse('music:detail', kwargs={'pk': self.pk})
def __str__(self):
return self.artist + " - " + self.album_title
class Song(models.Model):
album = models.ForeignKey(Album, on_del | ete=models.CASCADE)
song_title = models.CharField(max_length=500)
file_type = models.CharField(max_length=10)
is_favorite = models.BooleanField(default=False)
def get_absolute_url(self):
return reverse('music:detail', kwargs={'pk': self.album_i | d})
def __str__(self):
return self.song_title + "." + self.file_type
|
smartxworks/jira-comment-slack | setup.py | Python | mit | 1,060 | 0 | # -*- coding: utf-8 -*-
# Copyright (c) 2013-2015, SMARTX
import os
from setuptools import setup
here = os.path.abspath(os.path.dirname(__file__))
setup(
name='jira-comment-slack',
version='0.1',
url='https://github.com/smartxworks/jira-comment-slack',
license='MIT',
author='SmartXWorks/Kisung',
description='Send JIRA co | mment update to slack channel',
long_description=__doc__,
py_modules=["jira_comment_slack"],
zip_safe=False,
include_package_data=True,
platforms='any',
install_requires=[
'Flask',
| 'requests',
],
entry_points={
'console_scripts': [
'jira-comment-slack-server = jira_comment_slack:main',
],
},
classifiers=[
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Software Development :: Libraries :: Python Modules'
]
)
|
assertnotnull/bookmarklets | bookmarklets/bookmarkletsapp/methods.py | Python | mit | 211 | 0.004739 | from .model | s import Vote
def get_review_avg(bid):
votes = Vote.objects.filter(bookmarklet_id=bid)
total = len(votes)
avg = 0.0
for vote i | n votes:
avg += vote.rating
return avg/total |
chrisspen/django-pjm | django_pjm/management/commands/import_pjm_prices.py | Python | mit | 798 | 0.005013 | from optparse import make_option
from django.core.management.base import NoArgsCommand, BaseCommand
from django_pjm import models
class Command(BaseCommand):
help = "Imports PJM locational marginal pricing day-ahead data."
args = ''
option_list = BaseCommand.option_list + (
make_option('--start-year', defau | lt=0),
make_option('--end-year', default=0),
make_option('--only-type', default=None),
make_option('--auto-reprocess-days', default=0),
)
def handle(self, **options):
models.Node.load(
start_year=int(options['start_year']),
end_year=int(options['end_year']),
only_type=options['only_type'].strip(),
auto_ | reprocess_days=int(options['auto_reprocess_days']),
)
|
eBrnd/i3pystatus | i3pystatus/syncthing.py | Python | mit | 4,026 | 0.000248 | import json
import os.path
import requests
from subprocess import call
from urllib.parse import urljoin
import xml.etree.ElementTree as ET
from i3pystatus import IntervalModule
from i3pystatus.core.util import user_open
class Syncthing(IntervalModule):
"""
Check Syncthing's online status and start/stop Syncthing via
click events.
Requires `requests`.
"""
format_up = 'ST up'
color_up = '#00ff00'
format_down = 'ST down'
color_down = '#ff0000'
configfile = '~/.config/syncthing/config.xml'
url = 'auto'
apikey = 'auto'
verify_ssl = True
interval = 10
on_leftclick = 'st_open'
on_rightclick = 'st_toggle_systemd'
settings = (
('format_up', 'Text to show when Syncthing is running'),
('format_down', 'Text to show when Syncthing is not running'),
('color_up', 'Color when Syncthing is running'),
('color_down', 'Color when Syncthing is not running'),
('configfile', 'Path to Syncthing config'),
('url', 'Syncthing GUI URL; "auto" reads from local config'),
('apikey', 'Syncthing APIKEY; "auto" reads from local config'),
('verify_ssl', 'Verify SSL certificate'),
)
def st_get(self, endpoint):
response = requests.get(
urljoin(self.url, endpoint),
verify=self.verify_ssl,
)
return json.loads(response.text)
def st_post(self, endpoint, data=None):
headers = {'X-API-KEY': self.apikey}
requests.post(
urljoin(self.url, endpoint),
data=data,
headers=headers,
)
def read_config(self):
self.configfile = os.path.expanduser(self.configfile)
# Parse config only once!
if self.url == 'auto' or self.apikey == 'auto':
tree = ET.parse(self.configfile)
root = tree.getroot()
if self.url == 'auto':
tls = root.find('./gui').attrib['tls']
address = root.find('./gui/address').text
if tls == 'true':
self.url = 'https://' + address
else:
self.url = 'http://' + address
if self.apikey == 'auto':
self.apikey = root.find('./gui/apike | y').text
def ping(self):
try:
ping_data = self.st_get('/ | rest/system/ping')
if ping_data['ping'] == 'pong':
return True
else:
return False
except requests.exceptions.ConnectionError:
return False
def run(self):
self.read_config()
self.online = True if self.ping() else False
if self.online:
self.output = {
'full_text': self.format_up,
'color': self.color_up
}
else:
self.output = {
'full_text': self.format_down,
'color': self.color_down
}
# Callbacks
def st_open(self):
"""Callback: Open Syncthing web UI"""
user_open(self.url)
def st_restart(self):
"""Callback: Restart Syncthing"""
self.st_post('/rest/system/restart')
def st_stop(self):
"""Callback: Stop Syncthing"""
self.st_post('/rest/system/shutdown')
def st_start_systemd(self):
"""Callback: systemctl --user start syncthing.service"""
call(['systemctl', '--user', 'start', 'syncthing.service'])
def st_restart_systemd(self):
"""Callback: systemctl --user restart syncthing.service"""
call(['systemctl', '--user', 'restart', 'syncthing.service'])
def st_stop_systemd(self):
"""Callback: systemctl --user stop syncthing.service"""
call(['systemctl', '--user', 'stop', 'syncthing.service'])
def st_toggle_systemd(self):
"""Callback: start Syncthing service if offline, or stop it when online"""
if self.online:
self.st_stop_systemd()
else:
self.st_start_systemd()
|
BeTeK/EliteMerchant | src/ui/TabAbstract.py | Python | bsd-3-clause | 1,019 | 0.040236 | import time, Options
class TabAbstract:
def setTabName(self, name):
raise NotImplemented()
def getTabName(self):
raise NotImplemented()
def getType(self):
raise NotImplemented()
def dispose(self):
raise NotImplemented()
def AgeToColor(self,timestamp):
def clamp(minv,maxv,v):
return max(minv | ,min(maxv,v))
age= ( (time.time() - timestamp )/(60.0*60.0*24.0) ) / float(Options.get("Market-valid-days", 7))
colorblind=False # todo: add an option
# colorblind mode
if (colorblind):
print(age)
i=clamp(64,255,255-age*128)
return i,i,i
# if data is older than market validity horizon | , have it show
desat=1
if age>1.0:
desat=0
age*=2
r=255*clamp(0.2*desat,1.0,(age-1)+0.5)
g=255*clamp(0.4*desat,1.0,(1-age)+0.5)
b=255*clamp(0.5*desat,1.0,(abs(age-1)*-1)+1)
return r,g,b |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.