repo_name stringlengths 5 100 | path stringlengths 4 231 | language stringclasses 1 value | license stringclasses 15 values | size int64 6 947k | score float64 0 0.34 | prefix stringlengths 0 8.16k | middle stringlengths 3 512 | suffix stringlengths 0 8.17k |
|---|---|---|---|---|---|---|---|---|
ctools/ctools | test/science_verification.py | Python | gpl-3.0 | 24,640 | 0.002476 | #! /usr/bin/env python
# ==========================================================================
# This script performs the ctools science verification. It creates and
# analyses the pull distributions for a variety of spectral and spatial
# models. Test are generally done in unbinned mode, but also a stacked
# analysis test is included. At the end the script produces a JUnit
# compliant science verification report.
#
# Usage:
# ./science_verification.py
#
# --------------------------------------------------------------------------
#
# Copyright (C) 2015-2021 Juergen Knoedlseder
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# ==========================================================================
import os
import csv
import math
import sys
import gammalib
#import ctools
import cscripts
# ========================== #
# Generate pull distribution #
# ========================== #
def generate_pull_distribution(model, obs='NONE', onsrc='NONE', onrad=0.2, \
trials=100, caldb='prod2', irf='South_50h', \
deadc=0.98, edisp=False, \
ra=83.63, dec=22.01, rad=5.0, \
emin=0.1, emax=100.0, enumbins=0, \
duration=1800.0, \
npix=200, binsz=0.02, \
coordsys='CEL', proj='TAN', \
debug=False, chatter=2):
"""
Generates pull distribution for a given model
Parameters
----------
model : str
Model XML filename (without .xml extension)
obs : str, optional
Input observation definition XML filename
onsrc : str, optional
Name of On source for On/Off analysis
onrad : float, optional
Radius of On region
trials : int, optional
Number of trials
caldb : str, optional
Calibration database
irf : str, optional
Name of instrument response function
deadc : float, optional
Deadtime correction factor
edisp : bool, optional
Use energy dispersion?
ra : float, optional
Right Ascension of pointing (deg)
dec : float, optional
Declination of pointing (deg)
rad : float, optional
Simulation radius (deg)
emin : float, optional
Minimum energy (TeV)
emax : float, optional
Maximum energy (TeV)
enumbins : int, optional
Number of energy bins (0 for unbinned analysis)
duration : float, optional
Observation duration (sec)
npix : int, optional
Number of pixels
binsz : float, optional
Pixel size (deg/pixel)
coordsys : str, optional
Coordinate system (CEL or GAL)
proj : str, optional
Sky projection
debug : bool, optional
Enable debugging?
chatter : int, optional
Chatter level
Returns
-------
outfile : str
Name of pull distribution output file
"""
# Derive parameters
_, tail = os.path.split(model)
inmodel = model + '.xml'
outfile = 'cspull_' + tail + '.fits'
# Setup pull distribution generation
pull = cscripts.cspull()
pull['inobs'] = obs
pull['inmodel'] = inmodel
pull['onsrc'] = onsrc
pull['onrad'] = onrad
pull['outfile'] = outfile
pull['caldb'] = caldb
pull['irf'] = irf
pull['edisp'] = edisp
pull['ra'] = ra
pull['dec'] = dec
pull['rad'] = rad
pull['emin'] = emin
pull['emax'] = emax
pull['tmin'] = 0.0
pull['tmax'] = duration
pull['enumbins'] = enumbins
pull['npix'] = npix
pull['binsz'] = binsz
pull['coordsys'] = coordsys
pull['proj'] = proj
pull['deadc'] = deadc
pull['rad'] = rad
pull['ntrials'] = trials
pull['debug'] = debug
pull['chatter'] = chatter
# Generate pull distributions
pull.execute()
# Return
return outfile
# ========================= #
# Analyse pull distribution #
# ========================= #
def analyse_pull_distribution(filename):
"""
Compute mean and standard deviation of pull distribution
Parameters
----------
filename : str
Pull distribution ASCII file to analyse
Returns
-------
results : dict
Result dictionary
"""
# Initialise column names, means and standard deviations
colnames = []
means = []
stds = []
# Open FITS file
fits = gammalib.GFits(filename)
| # Get pull distribution table
table = fits.table('PULL_DISTRIBUTION')
nrows = table.nrows()
ncolumns = table.ncols()
# Loop over columns
for i in range(ncolumns | ):
# Get table column
column = table[i]
# Get column names and initialise mean and standard deviations
colnames.append(column.name())
# Compute means and standard deciation
mean = 0.0
std = 0.0
samples = 0.0
for row in range(nrows):
mean += float(column[row])
std += float(column[row])*float(column[row])
samples += 1.0
std = math.sqrt(std/samples - mean*mean/(samples*samples))
mean /= samples
# Store mean and standard deviations
means.append(mean)
stds.append(std)
# Setup results
results = {}
for i in range(len(colnames)):
results[colnames[i]] = {'mean': means[i], 'std': stds[i]}
# Return results
return results
# =================================== #
# Test class for science verification #
# =================================== #
class sciver(gammalib.GPythonTestSuite):
"""
Test class for science verification
"""
# Constructor
def __init__(self):
"""
Constructor
"""
# Call base class constructor
gammalib.GPythonTestSuite.__init__(self)
# Initialise results
self.results = None
# Return
return
# Set test functions
def set(self):
"""
Set all test functions
"""
# Set test name
self.name('Science Verification')
# Append background model test
self.append(self.bgd, 'Test background model')
# Append spectral tests
self.append(self.spec_plaw, 'Test power law model')
self.append(self.spec_plaw_edisp, 'Test power law model with energy dispersion')
self.append(self.spec_plaw_stacked, 'Test power law model with stacked analysis')
self.append(self.spec_plaw_onoff, 'Test power law model with On/Off analysis')
self.append(self.spec_plaw2, 'Test power law 2 model')
self.append(self.spec_smoothbplaw, 'Test smoothly broken power law model')
self.append(self.spec_eplaw, 'Test exponentially cut off power law model')
self.append(self.spec_supeplaw, 'Test super exponentially cut off power law model')
self.append(self.spec_logparabola, 'Test log parabola model')
self.append(self.spec_gauss, 'Test Gaussian model')
self.append(self.spec_filefct, 'Test file function model')
self.append(self.spec_nodes, 'Test nodes model')
self.append(self.spec_table, 'Test table model')
self.append(self.spec_exponential, 'Test exponential model')
# Append spatial tests
self.append(self.spat_ptsrc, 'Test point source model')
self.append(self.spat_rdisk, 'Test radial disk model')
self.append(self.spat_rring, 'Test radial rin |
spk/flask-recipes | app/views.py | Python | mit | 2,282 | 0 | from flask import render_template, request, jsonify, Blueprint
from .models import Recipe, Category
from .schemas import RecipeSchema, PaginationSchema
recipes = Blueprint("recipes", __name__)
DEFAULT_PER_PAGE = 10
MAX_PER_PAGE = 1000
@recipes.route('/api/v1/<int:id>')
def api_get_recipe(id):
recipe = Recipe.query.get_or_404(id)
result = RecipeSchema().dump(recipe)
return jsonify(result)
@recipes.route('/api/v1/recipes', defaults={'page': 1})
@recipes.route('/api/v1/recipes/page/<int:page>')
def api_get_recipes(page):
per_page = get_per_page()
pagination = Recipe.query.order_by(
Recipe.created_at.desc()).paginate(
page, per_page)
result = Pa | ginationSchema().dump(pagination)
return jsonify(result)
@recipes.route('/random')
def random():
recipe = Recipe.random().first_or_404()
return render_temp | late('show.html', recipe=recipe)
@recipes.route('/recipes/<id>')
def show(id):
recipe = Recipe.query.get_or_404(id)
return render_template('show.html', recipe=recipe)
@recipes.route('/categories/', defaults={'page': 1})
@recipes.route('/categories/page/<int:page>')
def categories(page):
per_page = get_per_page()
pagination = Category.query.order_by(
Category.created_at.desc()).paginate(
page, per_page)
return render_template('categories.html', pagination=pagination)
@recipes.route('/categories/<title>', defaults={'page': 1})
@recipes.route('/categories/<title>/page/<int:page>')
def recipes_by_category(title, page):
per_page = get_per_page()
pagination = Category.query.filter_by(
title=title).first_or_404().recipes.paginate(
page, per_page)
return render_template('index.html', pagination=pagination)
@recipes.route('/', defaults={'page': 1})
@recipes.route('/page/<int:page>')
def index(page):
per_page = get_per_page()
pagination = Recipe.query.order_by(
Recipe.created_at.desc()).paginate(
page, per_page)
return render_template('index.html', pagination=pagination)
def get_per_page():
per_page = request.args.get('per_page')
if per_page and per_page.isdigit() and int(per_page) <= MAX_PER_PAGE:
per_page = int(per_page)
else:
per_page = DEFAULT_PER_PAGE
return per_page
|
chadmv/plow | lib/python/plow/rndaemon/server.py | Python | apache-2.0 | 2,191 | 0.003651 | #!/usr/bin/env python
import logging
import sys
import os
import signal
import conf
import core
from thrift.transport import TSocket
from thrift.transport import TTransport
from thrift.pr | otocol.TBinaryProtocol import TBinaryProtocolAcceleratedFactory
from thrift.server import TServer
from rpc import RndNodeApi
logger = logging.getLogger(__name__)
class RndProcessHandler(object):
def runTask(self, rtc):
logger.debug("starting core.ProcessMgr.runProcess(rtc): %s", rtc.taskId)
core.ProcessMgr.runProcess(rtc)
logger.debug("finished core.ProcessMgr.runProcess(rtc): %s", rtc.taskId)
d | ef killRunningTask(self, procId, reason):
core.ProcessMgr.killRunningTask(procId, reason)
def getRunningTasks(self):
logger.debug("starting core.ProcessMgr.getRunningTasks()")
tasks = core.ProcessMgr.getRunningTasks()
logger.debug("finished core.ProcessMgr.getRunningTasks()")
return tasks
def reboot(self, now=False):
core.ProcessMgr.reboot(now)
def pingPong(self, withTasks=False):
ping = core.Profiler.getPing()
ping.isReboot = core.ProcessMgr.isReboot
if withTasks:
ping.tasks = self.getRunningTasks()
return ping
def get_server(api, handler, port, **kwargs):
processor = api.Processor(handler)
socket = TSocket.TServerSocket(port=port)
tfactory = kwargs.get('transport') or TTransport.TFramedTransportFactory()
pfactory = kwargs.get('protocol') or TBinaryProtocolAcceleratedFactory()
server = TServer.TThreadPoolServer(processor, socket, tfactory, pfactory)
server.setNumThreads(8)
return server
def exit_handler(*args):
logger.info("Caught SIGTERM. Shutting down Process Manager...")
core.ProcessMgr.shutdown()
logger.info("Process Manager finished shutting down")
os._exit(0)
signal.signal(signal.SIGTERM, exit_handler)
def start():
logger.info("Staring Render Node Daemon on TCP port %d" % conf.NETWORK_PORT)
server = get_server(RndNodeApi, RndProcessHandler(), conf.NETWORK_PORT)
try:
server.serve()
except KeyboardInterrupt:
exit_handler()
sys.exit(0)
|
sindresf/The-Playground | Python/Machine Learning/LSTM Music Visualizer/LSTM Music Visualizer/graphics_module/initialization.py | Python | mit | 576 | 0.026042 | from graphics_module.objects import *
import numpy as np
def make_pixels_array_basic(amount):
return np.full(10,Pixel(), dtype=np.object)
def make_pixels_ar | ray_config_based(config):
if config.colorscheme == "b&w":
c = Color()
elif config.colorscheme == "light":
c = Color(r=245,g=235,b=234,a=0.85) #"light" or whatever to be slightly colorized dots
if config.aplha == True:
lol = 4 #random influenced aplha
#and so on
def get_color(config):
if not config:#has attribute "lower_limit": I don't know
| lower_limit = 230
|
sio2project/oioioi | oioioi/problems/menu.py | Python | gpl-3.0 | 162 | 0 | from | django.utils.translation impor | t ugettext_lazy as _
from oioioi.base.menu import MenuRegistry
navbar_links_registry = MenuRegistry(_("Navigation Bar Menu"))
|
qedsoftware/commcare-hq | corehq/form_processor/change_publishers.py | Python | bsd-3-clause | 4,539 | 0.001542 | from casexml.apps.case.xform import get_case_ids_from_form
from corehq.apps.change_feed import topics
from corehq.apps.change_feed.producer import producer
from corehq.apps.change_feed import data_sources
from corehq.form_processor.interfaces.dbaccessors import FormAccessors, CaseAccessors
from corehq.form_processor.signals import sql_case_post_save
from pillowtop.feed.interface import ChangeMeta
def republish_all_changes_for_form(domain, form_id):
"""
Publishes all changes for the form and any touched cases/ledgers.
"""
form = FormAccessors(domain=domain).get_form(form_id)
publish_form_saved(form)
for case in _get_cases_from_form(domain, form):
publish_case_saved(case, send_post_save_signal=False)
_publish_ledgers_from_form(domain, form)
def publish_form_saved(form):
producer.send_change(topics.FORM_SQL, change_meta_from_sql_form(form))
def change_meta_from_sql_form(form):
return ChangeMeta(
document_id=form.form_id,
data_source_type=data_sources.FORM_SQL,
data_source_name='form-sql', # todo: this isn't really needed.
document_type=form.doc_type,
document_subtype=form.xmlns,
domain=form.domain,
is_deletion=form.is_deleted,
)
def publish_form_deleted(domain, form_id):
producer.send_change(topics.FORM_SQL, ChangeMeta(
document_id=form_id,
data_source_type=data_sources.FORM_SQL,
data_source_name | ='form-sql',
document_type='XFormInstance-Deleted',
domain=domain,
is_deletion=True,
))
def publish_case_saved(case, send_post_save_signal=True):
"""
Publish the change to kafka and run case post-save signals.
"""
producer.send_change(topics.CASE_SQL, change_meta_from_sql_case(case))
if send_post_save_signal:
sql_ca | se_post_save.send(case.__class__, case=case)
def change_meta_from_sql_case(case):
return ChangeMeta(
document_id=case.case_id,
data_source_type=data_sources.CASE_SQL,
data_source_name='case-sql', # todo: this isn't really needed.
document_type='CommCareCase',
document_subtype=case.type,
domain=case.domain,
is_deletion=case.is_deleted,
)
def publish_case_deleted(domain, case_id):
producer.send_change(topics.CASE_SQL, ChangeMeta(
document_id=case_id,
data_source_type=data_sources.CASE_SQL,
data_source_name='case-sql', # todo: this isn't really needed.
document_type='CommCareCase-Deleted',
domain=domain,
is_deletion=True,
))
def publish_ledger_v2_saved(ledger_value):
producer.send_change(topics.LEDGER, change_meta_from_ledger_v2(ledger_value))
def change_meta_from_ledger_v2(ledger_value):
return ChangeMeta(
document_id=ledger_value.ledger_reference.as_id(),
data_source_type=data_sources.LEDGER_V2,
data_source_name='ledger-v2', # todo: this isn't really needed.
domain=ledger_value.domain,
is_deletion=False,
)
def publish_ledger_v1_saved(stock_state):
producer.send_change(topics.LEDGER, change_meta_from_ledger_v1(stock_state))
def change_meta_from_ledger_v1(stock_state):
return ChangeMeta(
document_id=stock_state.pk,
data_source_type=data_sources.LEDGER_V1,
data_source_name='ledger-v1', # todo: this isn't really needed.
domain=stock_state.domain,
is_deletion=False,
)
def _get_cases_from_form(domain, form):
from corehq.form_processor.parsers.ledgers.form import get_case_ids_from_stock_transactions
case_ids = get_case_ids_from_form(form) | get_case_ids_from_stock_transactions(form)
return CaseAccessors(domain).get_cases(list(case_ids))
def _publish_ledgers_from_form(domain, form):
from corehq.form_processor.parsers.ledgers.form import get_all_stock_report_helpers_from_form
unique_references = {
transaction.ledger_reference
for helper in get_all_stock_report_helpers_from_form(form)
for transaction in helper.transactions
}
for ledger_reference in unique_references:
producer.send_change(topics.LEDGER, _change_meta_from_ledger_reference(domain, ledger_reference))
def _change_meta_from_ledger_reference(domain, ledger_reference):
return ChangeMeta(
document_id=ledger_reference.as_id(),
data_source_type=data_sources.LEDGER_V2,
data_source_name='ledger-v2', # todo: this isn't really needed.
domain=domain,
is_deletion=False,
)
|
okuta/chainer | tests/chainer_tests/functions_tests/loss_tests/test_contrastive.py | Python | mit | 6,422 | 0 | import math
import unittest
import numpy
import six
import chainer
from chainer.backends import cuda
from chainer import functions
from chainer import gradient_check
from chainer import testing
f | rom chainer.testing import attr
@testing.parameterize(*testing.product_dict(
[{'dtype': numpy.float | 16,
'forward_options': {'rtol': 1e-2, 'atol': 1e-2},
'backward_options': {'rtol': 1e-2, 'atol': 1e-3},
'double_backward_options': {'rtol': 3e-1, 'atol': 3e-1}},
{'dtype': numpy.float32,
'forward_options': {'rtol': 1e-2},
'backward_options': {'rtol': 1e-2, 'atol': 1e-3},
'double_backward_options': {'rtol': 1e-2, 'atol': 1e-3}},
{'dtype': numpy.float64,
'forward_options': {'rtol': 1e-2},
'backward_options': {'rtol': 1e-2, 'atol': 1e-3},
'double_backward_options': {'rtol': 1e-2, 'atol': 1e-3}},
],
testing.product({
'batchsize': [5, 10],
'input_dim': [2, 3],
'margin': [1, 2],
'reduce': ['mean', 'no'],
'label_dtype': [numpy.int32, numpy.int64]
})
))
class TestContrastive(unittest.TestCase):
def setUp(self):
x_shape = (self.batchsize, self.input_dim)
retry = 0
while True:
self.x0 = numpy.random.uniform(-1, 1, x_shape).astype(self.dtype)
self.x1 = numpy.random.uniform(-1, 1, x_shape).astype(self.dtype)
d = numpy.sqrt(numpy.sum((self.x0 - self.x1) ** 2, axis=1))
if (d > 3e-2).all() and (numpy.abs(d - self.margin) > 1e-2).all():
break
retry += 1
assert retry <= 10, 'Too many retries to generate inputs'
self.t = numpy.random.randint(
0, 2, (self.batchsize,)).astype(self.label_dtype)
if self.reduce == 'mean':
self.gy = numpy.random.uniform(-1, 1, ()).astype(self.dtype)
else:
self.gy = numpy.random.uniform(
-1, 1, (self.batchsize,)).astype(self.dtype)
self.gx0 = numpy.random.uniform(-1, 1, x_shape).astype(self.dtype)
self.gx1 = numpy.random.uniform(-1, 1, x_shape).astype(self.dtype)
def check_forward(self, x0_data, x1_data, t_data):
x0_val = chainer.Variable(x0_data)
x1_val = chainer.Variable(x1_data)
t_val = chainer.Variable(t_data)
loss = functions.contrastive(
x0_val, x1_val, t_val, self.margin, self.reduce)
self.assertEqual(loss.data.dtype, self.dtype)
if self.reduce == 'mean':
self.assertEqual(loss.data.shape, ())
else:
self.assertEqual(loss.data.shape, (self.batchsize,))
loss_value = cuda.to_cpu(loss.data)
# Compute expected value
loss_expect = numpy.empty((self.batchsize,), self.dtype)
for i in six.moves.range(self.x0.shape[0]):
x0d, x1d, td = self.x0[i], self.x1[i], self.t[i]
d = numpy.sum((x0d - x1d) ** 2)
if td == 1: # similar pair
loss_expect[i] = d
elif td == 0: # dissimilar pair
loss_expect[i] = max(self.margin - math.sqrt(d), 0) ** 2
loss_expect[i] /= 2.
if self.reduce == 'mean':
loss_expect = numpy.sum(loss_expect) / self.t.shape[0]
numpy.testing.assert_allclose(
loss_expect, loss_value, **self.forward_options)
def test_negative_margin(self):
self.margin = -1
self.assertRaises(ValueError, self.check_forward,
self.x0, self.x1, self.t)
self.assertRaises(ValueError, self.check_backward,
self.x0, self.x1, self.t, self.gy)
def test_forward_cpu(self):
self.check_forward(self.x0, self.x1, self.t)
@attr.gpu
def test_forward_gpu_no_cudnn(self):
self.check_forward(cuda.to_gpu(self.x0), cuda.to_gpu(self.x1),
cuda.to_gpu(self.t))
def check_backward(self, x0_data, x1_data, t_data, gy_data):
def f(x0, x1, t):
return functions.contrastive(x0, x1, t, self.margin, self.reduce)
gradient_check.check_backward(
f, (x0_data, x1_data, t_data), gy_data, dtype='d',
**self.backward_options)
def test_backward_cpu(self):
self.check_backward(self.x0, self.x1, self.t, self.gy)
@attr.gpu
def test_backward_gpu_no_cudnn(self):
self.check_backward(cuda.to_gpu(self.x0), cuda.to_gpu(self.x1),
cuda.to_gpu(self.t), cuda.to_gpu(self.gy))
def test_backward_zero_dist_cpu(self):
self.check_backward(self.x0, self.x0, self.t, self.gy)
@attr.gpu
def test_backward_zero_dist_gpu_no_cudnn(self):
self.check_backward(cuda.to_gpu(self.x0), cuda.to_gpu(self.x0),
cuda.to_gpu(self.t), cuda.to_gpu(self.gy))
def check_double_backward(
self, x0_data, x1_data, t_data, gy_data, gx0_data, gx1_data):
def f(x0, x1):
return functions.contrastive(
x0, x1, t_data, self.margin, self.reduce)
gradient_check.check_double_backward(
f, (x0_data, x1_data), gy_data,
(gx0_data, gx1_data),
dtype='f', **self.double_backward_options)
def test_double_backward_cpu(self):
self.check_double_backward(
self.x0, self.x1, self.t, self.gy, self.gx0, self.gx1)
@attr.gpu
def test_double_backward_gpu(self):
self.check_double_backward(
cuda.to_gpu(self.x0), cuda.to_gpu(self.x1),
cuda.to_gpu(self.t), cuda.to_gpu(self.gy),
cuda.to_gpu(self.gx0), cuda.to_gpu(self.gx1))
class TestContrastiveInvalidReductionOption(unittest.TestCase):
def setUp(self):
self.x0 = numpy.random.uniform(-1, 1, (5, 10)).astype(numpy.float32)
self.x1 = numpy.random.uniform(-1, 1, (5, 10)).astype(numpy.float32)
self.t = numpy.random.randint(0, 2, (5,)).astype(numpy.int32)
def check_invalid_option(self, xp):
x0 = xp.asarray(self.x0)
x1 = xp.asarray(self.x1)
t = xp.asarray(self.t)
with self.assertRaises(ValueError):
functions.contrastive(x0, x1, t, 1, 'invalid_option')
def test_invalid_option_cpu(self):
self.check_invalid_option(numpy)
@attr.gpu
def test_invalid_option_gpu(self):
self.check_invalid_option(cuda.cupy)
testing.run_module(__name__, __file__)
|
lduarte1991/edx-platform | lms/envs/devstack_docker.py | Python | agpl-3.0 | 2,419 | 0.000827 | """ Overrides for Docker-based devstack. """
from .devstack import * # pylint: disable=wildcard-import, unused-wildcard-import
# Docker does not support the syslog socket at /dev/log. Rely on the console.
LOGGING['handlers']['local'] = LOGGING['handlers']['tracking'] = {
'class': 'logging.NullHandler',
}
LOGGING['loggers']['tracking']['handlers'] = ['console']
LMS_BASE = 'edx.devstack.lms:18000'
CMS_BASE = 'edx.devstack.studio:18010'
SITE_NAME = LMS_BASE
LMS_ROOT_URL = 'http://{}'.format(LMS_BASE)
LMS_INTERNAL_ROOT_URL = LMS_ROOT_URL
ECOMMERCE_PUBLIC_URL_ROOT = 'http://localhost:18130'
ECOMMERCE_API_URL = 'http://edx.devstack.ecommerce:18130/api/v2'
COMMENTS_SERVICE_URL = 'http://edx.devstack.forum:4567'
ENTERPRISE_API_URL = '{}/enterprise/api/v1/'.format(LMS_INTERNAL_ROOT_URL)
CREDENTIALS_INTERNAL_SERVICE_URL = 'http://edx.devstack.credentials:18150'
CREDENTIALS_PUBLIC_SERVICE_URL = 'http://localhost:18150'
OAUTH_OIDC_ISSUER = '{}/oauth2'.format(LMS_ROOT_URL)
JWT_AUTH.update({
'JWT_SECRET_KEY': 'lms-secret',
'JWT_ISSUER': OAUTH_OIDC_ISSUER,
'JWT_AUDIENCE': 'lms-key',
})
FEATURES.update({
'AUTOMATIC_AUTH_FOR_TESTING': True,
'ENABLE_COURSEWARE_SEARCH': False,
'ENABLE_COURSE_DISCOVERY': False,
'ENABLE_DASHBOARD_SEARCH': False,
'ENABLE_DISCUSSION_SERVICE': True,
'SHOW_HEADER_LANGUAGE_SELECTOR': True,
'ENABLE_ENTERPRISE_INTEGRATION': False,
})
ENABLE_MKTG_SITE = os.environ.get('ENABLE_MARKETING_SITE', False)
MARKETING_SITE_ROOT = os.environ.get('MARKETING_SITE_ROOT', 'http://localhost:8080')
MKTG_URLS = {
'ABOUT': '/about',
'ACCESSIBILITY': '/accessibility',
'AFFILIATES': '/affiliates',
'BLOG': '/blog',
'CAREERS': '/careers',
'CONTACT': '/contact',
'COURSES': '/course',
'DONATE': '/ | donate',
'ENTERPRISE': '/enterprise',
'FAQ': '/student-faq',
'HONOR': '/edx-terms-service',
'HOW_ | IT_WORKS': '/how-it-works',
'MEDIA_KIT': '/media-kit',
'NEWS': '/news-announcements',
'PRESS': '/press',
'PRIVACY': '/edx-privacy-policy',
'ROOT': MARKETING_SITE_ROOT,
'SCHOOLS': '/schools-partners',
'SITE_MAP': '/sitemap',
'TOS': '/edx-terms-service',
'TOS_AND_HONOR': '/edx-terms-service',
'WHAT_IS_VERIFIED_CERT': '/verified-certificate',
}
CREDENTIALS_SERVICE_USERNAME = 'credentials_worker'
COURSE_CATALOG_API_URL = 'http://edx.devstack.discovery:18381/api/v1/'
|
benzrf/Lispnoria | parthial_ext.py | Python | gpl-3.0 | 3,208 | 0.003117 | import supybot.callbacks as callbacks
import supybot.ircutils as ircutils
import supybot.ircmsgs as ircmsgs
from parthial.vals import LispSymbol, LispList, LispFunc, LispBuiltin
from parthial.errs import LimitationError
from parthial import built_ins
import re
import threading
parseMessage = re.compile('%s: (?P<content>.*)' %
ircutils.nickRe.pattern.lstrip('^').rstrip('$'))
class FakeIrc:
def __init__(self, irc):
self._irc = irc
self._data = ''
self._event = threading.Event()
def _set_data(self, message):
if isinstance(message, ircmsgs.IrcMsg):
if message.command in ('PRIVMSG', 'NOTICE'):
parsed = parseMessage.match(message.args[1])
if parsed is not None:
message = parsed.group('content')
else:
message = message.args[1]
self._set_data(message)
else:
self._irc.queueMsg(message)
return
self._data = message
self._event.set()
error = _set_data
reply = _set_data
queueMsg = _set_data
def __getattr__(self, name):
return getattr(self._irc, name)
def lisp_cmd(self, ctx, args):
for arg, val in enumerate(args):
built_ins.check_type(self, val, LispSymbol, arg + 1)
args = [s.val for s in args]
pl, i, m = ctx.bot_ctx
fakeIrc = FakeIrc(i)
pl.Proxy(fakeIrc, m, args, nested=getattr(i, 'nested', 0) + 1)
fakeIrc._event.wait(10)
res = fakeIrc._data
if len(res) > 1024:
raise LimitationError('symbol result too large')
return ctx.env.new(LispSymbol(res))
class CommandGlobals:
def __init__(self, d, irc):
self.d = d
self.irc = irc
def cmd_exists(self, cmd):
cmd = callbacks.tokenize(cmd)
cmd = list(map(callbacks.canonicalName, cmd))
# v this is awful and I'm sorry v
maxL, cbs = callbacks.NestedCommandsIrcProxy.findCallbacksForArgs(self, cmd)
return maxL == cmd and len(cbs) == 1
def __getitem__(self, k):
try:
return self.d[k]
except KeyError:
if self.cmd_exists(k):
cmd = list(map(LispSymbol, callbacks.tokenize(k)))
def wrapper(self, ctx, args):
return lisp_cmd(self, ctx, cmd + args)
return LispBuiltin(wrapper, k)
else:
raise
def __setitem__(self, *args, **kwargs):
return self.d.__setitem__(*args, **kwargs)
def __delitem__(self, *args, | **kwargs):
return self.d.__delitem__(*args, **kwargs)
def __contai | ns__(self, k):
d_contains = self.d.__contains__(k)
if d_contains:
return d_contains
else:
return self.cmd_exists(k)
underlying = built_ins.default_globals.copy()
underlying['cmd'] = LispBuiltin(lisp_cmd, 'cmd')
@built_ins.built_in(underlying, 'src')
def lisp_src(self, ctx, f):
built_ins.check_type(self, f, LispFunc, 1)
pars = LispList(list(map(LispSymbol, f.pars)))
return LispList([LispSymbol('lambda'), pars, f.body])
def bot_globals(irc):
return CommandGlobals(underlying, irc)
|
dsanders11/easypost-python | tests/conftest.py | Python | mit | 1,864 | 0 | # setup for py.test
import os
import pytest
import easypost
TEST_API_KEY = os.environ["TEST_API_KEY"]
PROD_API_KEY = os.environ["PROD_API_KEY"]
def pytest_sessionstart(session):
# this is for local unit testing with google appengine, otherwise you get a
# 'No api proxy found for service "urlfetch"' response
try:
from google.appengine.ext import testbed
session.appengine_testbed = testbed.Testbed()
session.appengine_testbed.activate()
session.appengine_testbed.init_urlfetch_stub()
except ImportError:
# if the import fails then we're not using the appengine sdk, so just
# keep going without initializing the testbed stub
pass
def pytest_sessionfinish(session, exitstatus):
if hasattr(session, "appengine_testbed"):
session.appengine_testbed.deactivate()
# this fi | xture is auto-loaded by all tests; it | sets up the api key
@pytest.yield_fixture(autouse=True)
def setup_api_key():
default_key = easypost.api_key
easypost.api_key = TEST_API_KEY
yield
easypost.api_key = default_key
# if a test needs to use the prod api key, make it depend on this fixture
@pytest.yield_fixture()
def prod_api_key():
default_key = easypost.api_key
easypost.api_key = PROD_API_KEY
yield
easypost.api_key = default_key
@pytest.fixture
def per_run_unique():
# this used to return a unique value per-run; however, now that we use
# VCR, treat it more like an epoch
return "20200511150500100"
@pytest.fixture(scope="module")
def vcr_config():
return {
# Replace the Authorization request header with "DUMMY" in cassettes
"filter_headers": [
("authorization", "EZTK-NONE"),
("x-client-user-agent", "suppressed"),
("user-agent", "easypost/v2 pythonclient/suppressed"),
],
}
|
dtysky/Gal2Renpy | Gal2Renpy/DefineSyntax/MovieDefine.py | Python | mit | 678 | 0.060472 | #coding:utf-8
#################################
#Copyright(c) 2014 dtysky
#################################
import G2R,os
class MovieDefine(G2R.DefineSyntax):
def Creat(self,Flag,US,FS,DictHash):
DictHash=G2R.DefineSyntax.Creat(self,Flag,US,FS,DictHash)
if DictHash[Flag]==G2R.DHash(US.Args[Flag]):
return DictHash
path=US.Args['pathmode']['ScriptPath']+'define/movie.rpy'
e | lepath=US.Args['pathmode']['MoviePath']
Args=US.Args[Flag]
so=''
for ele in Args:
if Args[ele]=='StopMoive':
| continue
so+='define movie_'+os.path.splitext(Args[ele])[0]+' = '
so+="'"+elepath+Args[ele]+"'\n"
FS.Open(path,'w')
FS.Write(so)
FS.Close()
return DictHash |
weso/CWR-DataApi | tests/grammar/factory/record/test_npa.py | Python | mit | 4,113 | 0.00073 | # -*- coding: utf-8 -*-
import unittest
from pyparsing import ParseException
from tests.utils.grammar import get_record_grammar
"""
CWR Non-Roman Alphabet Agreement Party Name grammar tests.
The following cases are tested:
"""
__author__ = 'Bernardo Martínez Garrido'
__license__ = 'MIT'
__status__ = 'Development'
class TestNPAGrammar(unittest.TestCase):
"""
Tests that the NPA grammar decodes correctly formatted strings
"""
def setUp(self):
self.grammar = get_record_grammar('nra_agreement_party')
def test_valid_full(self):
"""
Tests that IPA grammar decodes correctly formatted record prefixes.
This test contains all the optional fields.
"""
record = 'NPA0000123400000023012345678PARTY NAME PARTY WRITER NAME | ES'
result = self.grammar.parseString(record)[0]
self.assertEqual('NPA', result.record_type)
self.assertEqual(1234, result.transaction_sequence_n)
self.assertEqual(23, result.record_sequence_n)
self.assertEqual('012345678', result.ip_n)
self.assertEqual('PARTY NAME', result.ip_name)
self.assertEqual('PARTY WRITER NAME', result.ip_writer_name) |
self.assertEqual('ES', result.language_code)
def test_valid_min(self):
"""
Tests that IPA grammar decodes correctly formatted record prefixes.
This test contains none of the optional fields.
"""
record = 'NPA0000123400000023000000000PARTY NAME PARTY WRITER NAME '
result = self.grammar.parseString(record)[0]
self.assertEqual('NPA', result.record_type)
self.assertEqual(1234, result.transaction_sequence_n)
self.assertEqual(23, result.record_sequence_n)
self.assertEqual('000000000', result.ip_n)
self.assertEqual('PARTY NAME', result.ip_name)
self.assertEqual('PARTY WRITER NAME', result.ip_writer_name)
self.assertEqual(None, result.language_code)
def test_extended_character(self):
"""
Tests that IPA grammar decodes correctly formatted record prefixes.
This test contains none of the optional fields.
"""
record = 'NPA0000123400000023000000000PARTY NAME \xc6\x8f PARTY WRITER NAME \xc6\x8f '
result = self.grammar.parseString(record)[0]
self.assertEqual('NPA', result.record_type)
self.assertEqual(1234, result.transaction_sequence_n)
self.assertEqual(23, result.record_sequence_n)
self.assertEqual('000000000', result.ip_n)
self.assertEqual('PARTY NAME \xc6\x8f', result.ip_name)
self.assertEqual('PARTY WRITER NAME \xc6\x8f', result.ip_writer_name)
self.assertEqual(None, result.language_code)
class TestNPAGrammarException(unittest.TestCase):
def setUp(self):
self.grammar = get_record_grammar('nra_agreement_party')
def test_empty(self):
"""
Tests that a exception is thrown when the the works number is zero.
"""
record = ''
self.assertRaises(ParseException, self.grammar.parseString, record)
def test_invalid(self):
record = 'This is an invalid string'
self.assertRaises(ParseException, self.grammar.parseString, record)
|
fingeronthebutton/RIDE | src/robotide/lib/robot/running/arguments/argumentmapper.py | Python | apache-2.0 | 2,517 | 0.000397 | # Copyright 2008-2015 Nokia Solutions and Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from robotide.lib.robot.errors import DataError
from robotide.lib.robot.utils import DotDict
class ArgumentMapper(object):
def __init__(self, argspec):
self._argspec = argspec
def map(self, positional, named, variables=None, prune_trailing_defaults=False):
template = KeywordCallTemplate(self._argspec, variables)
template.fill_positional(positional)
template.fill_named(named)
if prune_trailing_defaults:
template.prune_trailing_defaults()
template.fill_defaults()
return template.args, template.kwargs
class KeywordCallTemplate(object):
def __init__(self, argspec, variables):
defaults = argspec.defaults
if variables:
defaults = variables.replace_list(defaults)
self._positional = argspec.positional
self._supports_kwargs = bool(argspec.kwargs)
self._supports_named = argspec.supports_named
self.args = [None] * argspec.minargs + [Default(d) for d in defaults]
self.kwargs = DotDict()
def fill_positional(self, positional):
self.args[:len(positional)] = positional
def fi | ll_named(self, named):
for name, value in named.items():
if name in self._positional and self._supports_named:
index = self._positional.index(name)
self.args[index] = value
elif self._supports_kwargs:
self.kwargs[name] = value
else:
raise Dat | aError("Non-existing named argument '%s'." % name)
def prune_trailing_defaults(self):
while self.args and isinstance(self.args[-1], Default):
self.args.pop()
def fill_defaults(self):
self.args = [arg if not isinstance(arg, Default) else arg.value
for arg in self.args]
class Default(object):
def __init__(self, value):
self.value = value
|
cliburn/flow | src/plugins/projections/Princomp/Main.py | Python | gpl-3.0 | 631 | 0.011094 | from plugin import Projections
import pca
class Pca(Project | ions):
name = "Pca"
def Main(self,model):
self.model = model
pca_data = pca.pca(self.model.GetCurrentData()[:])
fields = ['Comp%02d' % c for c in range(1, pca_data.shape[1]+1)]
self.model.updateHDF('PCA', pca_data, fields=fields)
# self.model.NewGroup('PCA')
# data = self.model.hdf5.createArray(self.model.current_group, 'data', pca_data)
# data.setAttr('fields', ['PC%d' % (i+1) for i in range(len(pca_dat | a[0]))])
# self.model.current_array = data
# self.model.update()
|
yuraic/koza4ok | skTMVA/sci_bdt_electron_DecisionTree.py | Python | mit | 1,980 | 0.004545 | from array import array
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import AdaBoostClassifier
from sklearn.metrics import classification_report, roc_auc_score, roc_curve
from sklearn import tree
import cPickle
data = np.load('/Users/musthero/Documents/Yura/Applications/tmva_local/output_electrons_fullsim_v5_VeryTightLH_20per.npz')
# Train on the first 2000, test o | n the rest
X_train, y_train = data['data_training'], data['isprompt_training'].ravel()
X_test, y_test = data['data_testing'][0:1000], data['isprompt_testing'][0:1000].ravel()
# sklearn
dt = DecisionTreeClassifier(max_depth=3,
min_samples_leaf=100)
#min_samples_leaf=0.05*len(X_train))
doFit = False
if doFit:
print "Performing DecisionTree fit..."
dt.fit(X_train, y_train)
import cPickle
with open('electrons | _toTMVA.pkl', 'wb') as fid:
cPickle.dump(dt, fid)
else:
print "Loading DecisionTree..."
# load it again
with open('electrons_toTMVA.pkl', 'rb') as fid:
dt = cPickle.load(fid)
#sk_y_predicted = dt.predict(X_test)
#sk_y_predicted = dt.predict_proba(X_test)[:, 1]
sk_y_predicted = dt.predict_proba(X_test)[:, 1]
predictions = dt.predict(X_test)
print predictions
print y_test
# Draw ROC curve
fpr, tpr, _ = roc_curve(y_test, sk_y_predicted)
plt.figure()
plt.plot(fpr, tpr, label='ROC curve of class')
plt.plot([0, 1], [0, 1], 'k--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Some extension of Receiver operating characteristic to multi-class')
plt.legend(loc="lower right")
plt.savefig("output_fullsim_v5_electrons_roc_20per_DecisionTree.png", dpi=144)
tree.export_graphviz(dt, out_file='dt_viz.dot')
# Save to file fpr, tpr
#np.savez('output_fullsim_v3_electrons_fpr_tpr_10per.npz',
# fpr=fpr, tpr=tpr) |
ksu-mechatronics-research/deep-visual-odometry | models/hand_crafted/quat_rot_models/vggVO_0/model.py | Python | mit | 4,152 | 0.010116 | from keras.layers import Input
from keras.layers.core import Flatten, Dense, Dropout, Lambda
from keras.layers.convolutional import Convolution2D, MaxPooling2D, ZeroPadding2D
from keras.layers.normalization import BatchNormalization
from keras.models import Model
from keras import backend as | K
from keras.optimizers import Adam
def VGG_16():
input_img = Input(shape=(128, 128, 6), name='input_img')
x = ZeroPadding2D((1,1),input_shape=(128,128,6))(input_img)
x = Convolution2D(64, 3, 3, activation='relu')(x)
x = BatchNormalization()(x)
x = ZeroPadding2D((1,1))(x)
| x = Convolution2D(64, 3, 3, activation='relu')(x)
x = BatchNormalization()(x)
x = MaxPooling2D((2,2), strides=(2,2))(x)
x = ZeroPadding2D((1,1))(x)
x = Convolution2D(128, 3, 3, activation='relu')(x)
x = BatchNormalization()(x)
x = ZeroPadding2D((1,1))(x)
x = Convolution2D(128, 3, 3, activation='relu')(x)
x = BatchNormalization()(x)
x = MaxPooling2D((2,2), strides=(2,2))(x)
x = ZeroPadding2D((1,1))(x)
x = Convolution2D(256, 3, 3, activation='relu')(x)
x = BatchNormalization()(x)
x = ZeroPadding2D((1,1))(x)
x = Convolution2D(256, 3, 3, activation='relu')(x)
x = BatchNormalization()(x)
x = ZeroPadding2D((1,1))(x)
x = Convolution2D(256, 3, 3, activation='relu')(x)
x = BatchNormalization()(x)
x = MaxPooling2D((2,2), strides=(2,2))(x)
x = ZeroPadding2D((1,1))(x)
x = Convolution2D(512, 3, 3, activation='relu')(x)
x = BatchNormalization()(x)
x = ZeroPadding2D((1,1))(x)
x = Convolution2D(512, 3, 3, activation='relu')(x)
x = BatchNormalization()(x)
x = ZeroPadding2D((1,1))(x)
x = Convolution2D(512, 3, 3, activation='relu')(x)
x = BatchNormalization()(x)
x = MaxPooling2D((2,2), strides=(2,2))(x)
x = ZeroPadding2D((1,1))(x)
x = Convolution2D(512, 3, 3, activation='relu')(x)
x = BatchNormalization()(x)
x = ZeroPadding2D((1,1))(x)
x = Convolution2D(512, 3, 3, activation='relu')(x)
x = BatchNormalization()(x)
x = ZeroPadding2D((1,1))(x)
x = Convolution2D(512, 3, 3, activation='relu')(x)
x = BatchNormalization()(x)
x = MaxPooling2D((2,2), strides=(2,2))(x)
x = Flatten()(x)
x = Dense(4096, activation='relu')(x)
x = BatchNormalization()(x)
x = Dropout(0.5)(x)
x = Dense(4096, activation='relu')(x)
x = BatchNormalization()(x)
x = Dropout(0.5)(x)
x = Dense(1024, activation='relu')(x)
x = BatchNormalization()(x)
x = Dropout(0.5)(x)
# Delta Translation output
vector_translation = Dense(3, init='normal', activation='linear', name='translation')(x)
# Delta rotation in quaternion form
rotation_proc = Dense(4, init='normal', activation='tanh')(x)
quaternion_rotation = Lambda(normalize_quaternion, name='rotation')(rotation_proc)
model = Model(input=input_img, output=[vector_translation, quaternion_rotation])
return model
def normalize_quaternion(x):
"use tensorflow normalize function on this layer to ensure valid quaternion rotation"
x = K.l2_normalize(x, axis=1)
return x
def train_model(model, Xtr, Ytr, Xte, Yte, save_path=None):
"Note: y should be [[translation],[quat rotation]]"
adam = Adam(lr=0.017, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)
model.compile(loss='mean_squared_error', optimizer=adam, metrics=['mean_absolute_error'])
history = model.fit(Xtr, Ytr, validation_split=0.2, batch_size=32, nb_epoch=10, verbose=1)
score = model.evaluate(Xte, Yte, verbose=1)
if save_path:
model.save(save_path)
return score, history
#if __name__ == "__main__":
# im = cv2.resize(cv2.imread('cat.jpg'), (224, 224)).astype(np.float32)
# im[:,:,0] -= 103.939
# im[:,:,1] -= 116.779
# im[:,:,2] -= 123.68
# im = im.transpose((2,0,1))
# im = np.expand_dims(im, axis=0)
# Test pretrained model
# model = VGG_16('vgg16_weights.h5')
# sgd = SGD(lr=0.1, decay=1e-6, momentum=0.9, nesterov=True)
# model.compile(optimizer=sgd, loss='categorical_crossentropy')
# out = model.predict(im)
#print np.argmax(out)
|
GFZ-Centre-for-Early-Warning/REM_RRVS | scripts/prepareinput.py | Python | bsd-3-clause | 3,970 | 0.011089 | '''
-----------------------------------------------------------------------------
WARNING: OUTDATED SCRIPT but might be of value for some (M.Haas 26.02.16)
Prepare input files for RRVS survey
-----------------------------------------------------------------------------
Created on 24.04.2015
Last modified on 24.04.2015
Author: Marc Wieland
Description: this script produces the gps.js and buildings.js files for a specific application
based on user selection of buildings to be surveyed.
Input: gids of buildings that need to be surveyed
Output: gps.js and buildings.js in /webapp/static/panoimg
TODO: This is a rather static solution with writing to files.
Ask for building_gid string on start of the application and make it available to the map
-> than dynamically populate the geojson variables in map.html with the below queries
-> dont write to file but directly to the variable in the map file (like I did before with php!)
----
'''
import os
import psycopg2
import numpy as np
# Input parameters#################################################################################################
db_connect = "host=localhost dbname=rrvstool_v01 user=postgres password=postgres" # database connection string
building_gid = '1743, 1744, 1745' # string list with the gids of buildings to be surveyed
###################################################################################################################
# connect to database
try:
conn=psycopg2.connect(db_connect)
conn.autocommit = True
except:
print 'not able to conn | ect to database'
cur = conn.cursor()
#create geojson file from buildings selection
#TODO: add a where "gid IN (building_gid)" statement here
cur.execute("DROP TABLE IF EXISTS panoimg.geojson;")
cur.execute("SELECT * INTO panoimg.geojson FROM (" +
"SELECT row_to_json(fc) " +
"FROM (SELECT 'FeatureCollection' AS type, array_to_json(array_agg(f)) AS features " +
"FR | OM (SELECT 'Feature' AS type, ST_AsGeoJSON(lg.the_geom)::json AS geometry, " +
"row_to_json((SELECT l FROM (SELECT gid) AS l)) AS properties " +
"FROM object_res1.ve_resolution1 AS lg) AS f) AS fc " +
") a;")
#TODO: define path relative to application in flask!
cur.execute("COPY panoimg.geojson TO '/webapp/static/panoimg/buildings.js' USING DELIMITERS ';';")
#TODO: add "var buildings = " at the beginning of the json string
#select gps points within radius of 50m (ca 0.0005 degrees) of selected buildings
cur.execute("SELECT a.gid FROM " +
"panoimg.gps a, " +
"(SELECT st_buffer(the_geom, 0.0005) as buffer_geom FROM object_res1.ve_resolution1 " +
"WHERE gid IN (" + building_gid + ")) b " +
"WHERE st_intersects(a.the_geom, b.buffer_geom) GROUP BY a.gid;")
rows = cur.fetchall()
h = np.asarray(rows)
gps_gid = ""
for i in range(len(h)):
gps_gid += str(h[i])[:-2].replace('[\'', '') + ','
gps_gid = gps_gid[:-1]
print gps_gid
#create geojson file from gps points selection
cur.execute("DROP TABLE IF EXISTS panoimg.geojson;")
cur.execute("SELECT * INTO panoimg.geojson FROM (" +
"SELECT row_to_json(fc) " +
"FROM (SELECT 'FeatureCollection' AS type, array_to_json(array_agg(f)) AS features " +
"FROM (SELECT 'Feature' AS type, ST_AsGeoJSON(lg.the_geom)::json AS geometry, " +
"row_to_json((SELECT l FROM (SELECT img_id, azimuth) AS l)) AS properties " +
"FROM panoimg.gps AS lg where gid in (" + gps_gid + ") AS f) AS fc " +
") a;")
#TODO: define path relative to application in flask!
cur.execute("COPY panoimg.geojson TO '/webapp/static/panoimg/gps.js' USING DELIMITERS ';';")
#TODO: add "var gps = " at the beginning of the json string
# Close database connection
cur.close()
conn.close()
|
nischu7/paramiko | tests/test_buffered_pipe.py | Python | lgpl-2.1 | 2,696 | 0.001113 | # Copyright (C) 2006-2007 Robey Pointer <robeypointer@gmail.com>
#
# This file is part of paramiko.
#
# Paramiko is free software; you can redistribute it and/or modify it under the
# terms of the GNU Lesser General Public License as published by the Free
# Software Foundation; either version 2.1 of the License, or (at your option)
# any later version.
#
# Paramiko is distrubuted in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
"""
Some unit tests for BufferedPipe.
"""
import threading
import time
import unittest
from paramiko.buffered_pipe import BufferedPipe, PipeTimeout
from paramiko import pipe
from util import ParamikoTest
def delay_thread(pipe):
pipe.feed('a')
time.sleep(0.5)
pipe.feed('b')
pipe.close()
def close_thread(pipe):
time.sleep(0.2)
pipe.close()
class BufferedPipeTest(ParamikoTest):
def test_1_buffered_pipe(self):
p = BufferedPipe()
self.assert_(not p.read_ready())
p.feed('hello.')
self.assert_(p.read_ready())
data = p.read(6)
self.assertEquals(b'hello.', data)
p.feed('plus/minus')
self.assertEquals(b'plu', p.read(3))
self.assertEquals(b's/m', p.read(3))
self.assertEquals(b'inus', p.read(4))
p.close()
self.assert_(not p.read_ready())
self.assertEquals(b'', p.read(1))
def test_2_delay(self):
p = BufferedPipe()
self.assert_(not p.read_ready())
| threading.Thread(target=delay_thread, args=(p,)).start()
self.assertEquals(b'a', p.read(1, 0.1))
try:
p.read(1, 0.1)
self.assert_(False)
except PipeTimeout:
pass
self.assertEquals(b'b', p.read(1, 1.0))
self.assertEquals(b'', p.read(1))
def test_3_close_while_reading(self):
p = BufferedPipe()
threadin | g.Thread(target=close_thread, args=(p,)).start()
data = p.read(1, 1.0)
self.assertEquals(b'', data)
def test_4_or_pipe(self):
p = pipe.make_pipe()
p1, p2 = pipe.make_or_pipe(p)
self.assertFalse(p._set)
p1.set()
self.assertTrue(p._set)
p2.set()
self.assertTrue(p._set)
p1.clear()
self.assertTrue(p._set)
p2.clear()
self.assertFalse(p._set)
|
SMALLplayer/smallplayer-image-creator | storage/.xbmc/addons/plugin.audio.tuneinradio.smallplayer/resources/lib/tunein.py | Python | gpl-2.0 | 37,835 | 0.002669 | #/*
# *
# * TuneIn Radio for XBMC.
# *
# * Copyright (C) 2013 Brian Hornsby
# *
# * This program is free software: you can redistribute it and/or modify
# * it under the terms of the GNU General Public License as published by
# * the Free Software Foundation, either version 3 of the License, or
# * (at your option) any later version.
# *
# * This program is distributed in the hope that it will be useful,
# * but WITHOUT ANY WARRANTY; without even the implied warranty of
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# * GNU General Public License for more details.
# *
# * You should have received a copy of the GNU General Public License
# * along with this program. If not, see <http://www.gnu.org/licenses/>.
# *
# */
import sys
import os
import simplejson
import urllib
import urllib2
import subprocess
import re
from . import astralradio as astralradio
from . import streamtheworld as streamtheworld
import ConfigParser
import xml.dom.minidom as minidom
BASE_URL = 'opml.radiotime.com/'
class TuneIn:
class TuneInError(Exception):
''' Exception raised when an error or invalid response is received.
'''
def __init__(self, status, fault, faultcode=''):
self.status = status
self.fault = fault
self.faultcode = faultcode
def __str__(self):
return repr(self.status)
return repr(self.fault)
return repr(self.faultcode)
def log_debug(self, msg):
if self._debug is True:
print 'TuneIn Library: DEBUG: %s' % msg
def __init__(self, partnerid, serial=None, locale="en-GB", formats=None, https=True, debug=False):
if https is False:
self._protocol = 'http://'
else:
self._protocol = 'https://'
| self._global_params = []
self._global_params.append({'param': 'partnerId', 'value': partnerid})
if serial is not None:
self._global_params.append({'param': 'serial', 'value': serial})
self._global_params.append({'param': 'render', 'value': 'json'})
self._global_params.append({'param': 'locale', 'value': locale})
if (formats is not None):
| self._global_params.append({'param': 'formats', 'value': formats})
self._debug = debug
self.log_debug('Protocol: %s' % self._protocol)
self.log_debug('Global Params: %s' % self._global_params)
def __add_params_to_url(self, method, fnparams=None, addrender=True, addserial=True):
params = {}
for param in self._global_params:
if (param['param'] == 'render' and addrender is False):
pass
elif (param['param'] == 'serial' and addserial is False):
pass
elif (param['value']):
params[param['param']] = param['value']
for param in fnparams:
if (param['value']):
params[param['param']] = param['value']
url = '%s%s%s?%s' % (
self._protocol, BASE_URL, method, urllib.urlencode(params))
self.log_debug('URL: %s' % url)
return url
def __call_tunein(self, method, params=None):
url = self.__add_params_to_url(method, params)
req = urllib2.Request(url)
f = urllib2.urlopen(req)
result = simplejson.load(f)
f.close()
return result
def __parse_asp(self, url):
streams = []
req = urllib2.Request(url)
f = urllib2.urlopen(req)
html = f.read()
start = html.find('http://player.streamtheworld.com')
if start != -1:
end = html.find('"', start)
if end != -1:
pattern = re.compile('(.*)callsign\=(.*)$')
result = pattern.match(html[start:end])
if (result):
stw = streamtheworld.StreamTheWorld(result.group(2))
stw_url = stw.get_stream_url(result.group(2))
streams.append(stw_url)
f.close()
return streams
def __parse_asx(self, url):
streams = []
req = urllib2.Request(url)
f = urllib2.urlopen(req)
xmlstr = f.read().decode('ascii', 'ignore')
dom = minidom.parseString(xmlstr)
asx = dom.childNodes[0]
for node in asx.childNodes:
if (str(node.localName).lower() == 'entryref' and node.hasAttribute('href')):
streams.append(node.getAttribute('href'))
elif (str(node.localName).lower() == 'entryref' and node.hasAttribute('HREF')):
streams.append(node.getAttribute('HREF'))
elif (str(node.localName).lower() == 'entry'):
for subnode in node.childNodes:
if (str(subnode.localName).lower() == 'ref' and subnode.hasAttribute('href') and not subnode.getAttribute('href') in streams):
streams.append(subnode.getAttribute('href'))
elif (str(subnode.localName).lower() == 'ref' and subnode.hasAttribute('HREF') and not subnode.getAttribute('HREF') in streams):
streams.append(subnode.getAttribute('HREF'))
f.close()
return streams
def __parse_pls(self, url):
streams = []
req = urllib2.Request(url)
f = urllib2.urlopen(req)
config = ConfigParser.RawConfigParser()
config.readfp(f)
numentries = config.getint('playlist', 'NumberOfEntries')
while (numentries > 0):
streams.append(
config.get('playlist', 'File' + str(numentries)))
numentries -= 1
f.close()
return streams
def __result_ok(self, result):
return result['head']['status'] != '200'
def __result_status(self, result):
return int(result['head']['status'])
def __result_fault(self, result):
if ('fault' in result['head']):
return result['head']['fault']
else:
return ''
def __result_fault_code(self, result):
if ('fault_code' in result['head']):
return result['head']['fault_code']
else:
return ''
def is_category_id(self, id):
''' Returns True if argument is a TuneIn category id.
'''
if (not id or len(id) == 0 or id[0] != 'c' or not id[1:].isdigit()):
return False
return True
def is_folder_id(self, id):
''' Returns True if argument is a TuneIn folder id.
'''
if (not id or len(id) == 0 or id[0] != 'f' or not id[1:].isdigit()):
return False
return True
def is_genre_id(self, id):
''' Returns True if argument is a TuneIn genre id.
'''
if (not id or len(id) == 0 or id[0] != 'g' or not id[1:].isdigit()):
return False
return True
def is_artist_id(self, id):
''' Returns True if argument is a TuneIn artist id.
'''
if (not id or len(id) == 0 or id[0] != 'm' or not id[1:].isdigit()):
return False
return True
def is_region_id(self, id):
''' Returns True if argument is a TuneIn region id.
'''
if (not id or len(id) == 0 or id[0] != 'r' or not id[1:].isdigit()):
return False
return True
def is_show_id(self, id):
''' Returns True if argument is a TuneIn show id.
'''
if (not id or len(id) == 0 or id[0] != 'p' or not id[1:].isdigit()):
return False
return True
def is_station_id(self, id):
''' Returns True if argument is a TuneIn station id.
'''
if (not id or len(id) == 0 or id[0] != 's' or not id[1:].isdigit()):
return False
return True
def is_topic_id(self, id):
''' Returns True if argument is a TuneIn topic id.
'''
if (not id or len(id) == 0 or id[0] != 't' or not id[1:].isdigit()):
return False
return True
def is_custom_url_id(self, id):
''' Returns True if argument is a TuneIn custom url id.
'''
if (not id or len(id) == 0 or id[0] != 'u' or not id[1:].isdigit()): |
karpelescoin/karpelescoin | contrib/bitrpc/bitrpc.py | Python | mit | 7,846 | 0.038109 | from jsonrpc import ServiceProxy
import sys
import string
# ===== BEGIN USER SETTINGS =====
# if you do not set these you will be prompted for a password for every command
rpcuser = ""
rpcpass = ""
# ====== END USER SETTINGS ======
if rpcpass == "":
access = ServiceProxy("http://127.0.0.1:9332")
else:
access = ServiceProxy("http://"+rpcuser+":"+rpcpass+"@127.0.0.1:9332")
cmd = sys.argv[1].lower()
if cmd == "backupwallet":
try:
path = raw_input("Enter destination path/filename: ")
print access.backupwallet(path)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccount":
try:
addr = raw_input("Enter a KarpelesCoin address: ")
print access.getaccount(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccountaddress":
try:
acct = raw_input("Enter an account name: ")
print access.getaccountaddress(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getaddressesbyaccount":
try:
acct = raw_input("Enter an account name: ")
print access.getaddressesbyaccount(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getbalance":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getbalance(acct, mc)
except:
print access.getbalance()
except:
print "\n---An error occurred---\n"
elif cmd == "getblockbycount":
try:
height = raw_input("Height: ")
print access.getblockbycount(height)
except:
print "\n---An error occurred---\n"
elif cmd == "getblockcount":
try:
print access.getblockcount()
except:
print "\n---An error occurred---\n"
elif cmd == "getblocknumber":
try:
print access.getblocknumber()
except:
print "\n---An error occurred---\n"
elif cmd == "getconnectioncount":
try:
print access.getconnectioncount()
except:
print "\n---An error occurred---\n"
elif cmd == "getdifficulty":
try:
print access.getdifficulty()
except:
print "\n---An error occurred---\n"
elif cmd == "getgenerate":
try:
print access.getgenerate()
except:
print "\n---An error occurred---\n"
elif cmd == "gethashespersec":
try:
print access.gethashespersec()
except:
print "\n---An error occurred---\n"
elif cmd == "getinfo":
try:
print access.getinfo()
except:
print "\n---An error occurred---\n"
elif cmd == "getnewaddress":
try:
acct = raw_input("Enter an account name: ")
try:
print access.getnewaddress(acct)
except:
print access.getnewaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaccount":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaccount(acct, mc)
except:
print access.getreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaddress":
try:
addr = raw_input("Enter a KarpelesCoin address (optional): ")
| mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getrec | eivedbyaddress(addr, mc)
except:
print access.getreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "gettransaction":
try:
txid = raw_input("Enter a transaction ID: ")
print access.gettransaction(txid)
except:
print "\n---An error occurred---\n"
elif cmd == "getwork":
try:
data = raw_input("Data (optional): ")
try:
print access.gettransaction(data)
except:
print access.gettransaction()
except:
print "\n---An error occurred---\n"
elif cmd == "help":
try:
cmd = raw_input("Command (optional): ")
try:
print access.help(cmd)
except:
print access.help()
except:
print "\n---An error occurred---\n"
elif cmd == "listaccounts":
try:
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.listaccounts(mc)
except:
print access.listaccounts()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaccount":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaccount(mc, incemp)
except:
print access.listreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaddress":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaddress(mc, incemp)
except:
print access.listreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "listtransactions":
try:
acct = raw_input("Account (optional): ")
count = raw_input("Number of transactions (optional): ")
frm = raw_input("Skip (optional):")
try:
print access.listtransactions(acct, count, frm)
except:
print access.listtransactions()
except:
print "\n---An error occurred---\n"
elif cmd == "move":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.move(frm, to, amt, mc, comment)
except:
print access.move(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendfrom":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendfrom(frm, to, amt, mc, comment, commentto)
except:
print access.sendfrom(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendmany":
try:
frm = raw_input("From: ")
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.sendmany(frm,to,mc,comment)
except:
print access.sendmany(frm,to)
except:
print "\n---An error occurred---\n"
elif cmd == "sendtoaddress":
try:
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
amt = raw_input("Amount:")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendtoaddress(to,amt,comment,commentto)
except:
print access.sendtoaddress(to,amt)
except:
print "\n---An error occurred---\n"
elif cmd == "setaccount":
try:
addr = raw_input("Address: ")
acct = raw_input("Account:")
print access.setaccount(addr,acct)
except:
print "\n---An error occurred---\n"
elif cmd == "setgenerate":
try:
gen= raw_input("Generate? (true/false): ")
cpus = raw_input("Max processors/cores (-1 for unlimited, optional):")
try:
print access.setgenerate(gen, cpus)
except:
print access.setgenerate(gen)
except:
print "\n---An error occurred---\n"
elif cmd == "settxfee":
try:
amt = raw_input("Amount:")
print access.settxfee(amt)
except:
print "\n---An error occurred---\n"
elif cmd == "stop":
try:
print access.stop()
except:
print "\n---An error occurred---\n"
elif cmd == "validateaddress":
try:
addr = raw_input("Address: ")
print access.validateaddress(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrase":
try:
pwd = raw_input("Enter wallet passphrase: ")
access.walletpassphrase(pwd, 60)
print "\n---Wallet unlocked---\n"
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrasechange":
try:
pwd = raw_input("Enter old wallet passphrase: ")
pwd2 = raw_input("Enter new wallet passphrase: ")
access.walletpassphrasechange(pwd, pwd2)
print
print "\n---Passphrase changed---\n"
except:
print
print "\n---An error occurred---\n"
print
else:
print "Command not found or not supported"
|
csakatoku/uamobile | uamobile/scrapers/base.py | Python | mit | 543 | 0.003683 | # -*- coding: utf-8 -*-
import urllib2
from lxml import etree
class Scraper(object):
# subclass must override this property
url = None
def scrape(self):
stream = self.get_stream()
doc = self.get_document(stream)
return | self.do_scrape(doc)
def get_document(self, stream):
doc = etree.parse(stream, etree.HTMLParser(remove_comments=True))
return doc
d | ef get_stream(self):
return urllib2.urlopen(self.url)
def do_scrape(self, doc):
raise NotImplementedError()
|
ianadmu/bolton_bot | bot/emoji_master.py | Python | mit | 3,263 | 0.000306 | import random
import json
import os.path
class Response:
names = ["bolton", "qbot"]
def __init__(self, emoji, responses, added, removed):
self.emoji = emoji
self.responses = responses
self.added = added
self.removed = removed
def get_response(self, message, tokens, user):
has_trigger = False
is_named = False
lower = message.lower()
for phrase in self.phrases:
if phrase in lower:
has_trigger = True
continue
if not has_trigger:
for word in self.words:
for token in tokens:
if word == token:
has_trigger = True
continue
for name in Response.names:
if name in lower:
is_named = True
result = ""
if has_trigger and (not self.named or is_named):
if self.use_hash:
result = self.start + self.hash(message) + self.end
else:
result = self.start + self.random() + self.end
result = result.replace("user_id", "<@" + user + ">")
return result
def hash(self, text):
hashValue = 11
for character in text:
hashValue *= 47
hashValue += ord(character)
return self.responses[hashValue % len(self.responses)]
def random(self):
return random.choice(self.responses)
class Emoji_master:
def __init__(self, msg_writer):
try:
master_file = open(
os.path.join('./resources', 'emoji_event.txt'), 'r'
)
json_events = json.load(master_file)
self.events = []
for event in json_events["Events"]:
use_hash = "Hash" in event and event["Hash"]
named = "Named" in event and event["Named"]
start = ""
end = ""
if "Start" in event:
start = event["Start"]
if "End" in event:
| end = event["End"]
phrases = []
words = []
responses = []
if "Words" in event["Triggers"]:
for w in event["Triggers"]["Words"]:
words.append(w)
if "Phrases" in event["Triggers"]:
for p in event["Triggers"]["Phrases"]:
phrases.append(p)
for r in event["Responses"]:
responses.a | ppend(r)
self.events.append(
Response(
phrases, words, responses, use_hash, named, start, end
)
)
except:
msg_writer.write_error("Error loading JSON file")
self.events = []
def get_response(self, message, user):
combined_responses = ""
tokens = message.lower().split()
for event in self.events:
current_response = event.get_response(message, tokens, user)
if current_response != "":
current_response += '\n'
combined_responses += current_response
return combined_responses
|
dbiesecke/plugin.video.xstream | sites/bundesliga_de.py | Python | gpl-3.0 | 7,292 | 0.006175 | # -*- coding: utf-8 -*-
from resources.lib.parser import cParser
from resources.lib.handler.requestHandler import cRequestHandler
from resources.lib.gui.guiElement import cGuiElement
from resources.lib.gui.gui import cGui
from resources.lib.util import cUtil
from resources.lib.handler.ParameterHandler import ParameterHandler
SITE_IDENTIFIER = 'bundesliga_de'
SITE_NAME = 'Bundesliga.de'
SITE_ICON = 'bl.png'
URL_MAIN = 'http://www.bundesliga.de'
URL_TV = 'http://www.bundesliga.de/de/service/?action=teaserbox&type=video&language=de&amount=25&category='
URL_GET_STREAM = 'http://btd-flv-lbwww-01.odmedia.net/bundesliga/'
def load():
oGui = cGui()
__createMainMenuItem(oGui, 'Aktuell', 'new')
__createMainMenuItem(oGui, 'Spieltag', 'spieltag')
__createMainMenuItem(oGui, 'Stars', 'stars')
__createMainMenuItem(oGui, 'Stories', 'stories')
__createMainMenuItem(oGui, 'Historie', 'historie')
__createMainMenuItem(oGui, 'Partner', 'partner')
__createMainMenuItem(oGui, 'Vereine', 'clubs')
oGui.setEndOfDirectory()
def __createMainMenuItem(oGui, sTitle, sPlaylistId):
oGuiElement = cGuiElement()
oGuiElement.setSiteName(SITE_IDENTIFIER)
oGuiElement.setFunction('listVideos')
oGuiElement.setTitle(sTitle)
oOutputParameterHandler = ParameterHandler()
oOutputParameterHandler.setParam('playlistId', sPlaylistId)
oGui.addFolder(oGuiElement, oOutputParameterHandler)
def listVideos():
oGui = cGui()
params = ParameterHandler()
if (params.exist('playlistId')):
sPlaylistId = params.getValue('playlistId')
if not params.exist('sUrl'):
sUrl = URL_TV + str(sPlaylistId)
else:
sUrl = params.getValue('sUrl')
if sPlaylistId == 'spieltag':
oParser = cParser()
if not params.exist('saison'):
oRequest = cRequestHandler('http://www.bundesliga.de/de/bundesliga-tv/index.php')
sHtmlContent = oRequest.request()
sPattern = 'data-season="([^"]+)" class="active grey-gradient"'
aResult = oParser.parse(sHtmlContent, sPattern)
saison = aResult[1][0]
else:
saison = params.getValue('saison')
oRequest = cRequestHandler(sUrl+'&season='+saison+'&matchday=1')
sHtmlContent = oRequest.request()
if sHtmlContent.find('"message":"nothing found"') != -1:
return False
#ausgewählte Saison
for matchDay in range(1,35):
oGuiElement = cGuiElement()
oGuiElement.setSiteName(SITE_IDENTIFIER)
oGuiElement.setFunction('listVideos')
oGuiElement.setTitle('%s Spieltag Saison %s' % (matchDay,saison))
sUrl = sUrl+'&season='+saison+'&matchday='+str(matchDay)
oOutputParameterHandler = ParameterHandler()
oOutputParameterHandler.setParam('sUrl', sUrl)
oOutputParameterHandler.setParam('saison', saison)
oOutputParameterHandler.setParam('matchDay', matchDay)
oOutputParameterHandler.setParam('playlistId', 'spieltagEinzeln')
oGui.addFolder(oGuiElement, oOutputParameterHandler)
#ältere Saison
oGuiElement = cGuiElement()
oGuiElement.setSiteName(SITE_IDENTIFIER)
oGuiElement.setFunction('listVideos')
lastSaison = str(int(saison) - 1)
oGuiElement.setTitle('* Saison %s/%s *' % (lastSaison,saison))
oOutputParameterHandler = ParameterHandler()
oOutputParameterHandler.setParam('sUrl', sUrl)
oOutputParameterHandler.setParam('saison', lastSaison)
oOutputParameterHandler.setParam('playlistId', 'spieltag')
oGui.addFolder(oGuiElement, oOutputParameterHandler)
elif sPlaylistId == 'clubs':
sPattern = '<li data-club="([^"]+)" data-name="([^"]+)".*?src="([^"]+)"'
oRequest = cRequestHandler('http://www.bundesliga.de/de/bundesliga-tv/index.php')
sHtmlContent = oRequest.request()
oParser = cParser()
aResult = oParser.parse(sHtmlContent, sPattern)
if (aResult[0] == False):
return False
for aEntry in aResult[1]:
oGuiElement = cGuiElement()
oGuiElement.setSiteName(SITE_IDENTIFIER)
oGuiElement.setFunction('listVideos')
oGuiElement.setTitle((aEntry[1]))
sThumbnail = URL_MAIN + str(aEntry[2]).replace('variant27x27.','')
oGuiElement.setThumbnail(sThumbnail)
sUrl = sUrl +'&club='+str(aEntry[0])
oOutputParameterHandler = ParameterHandler()
oOutputParameterHandler.setParam('sUrl', sUrl)
oOutputParameterHandler.setParam('playlistId', 'clubVideos')
oGui.addFolder(oGuiElement, oOutputParameterHandler)
else:
sPattern = 'btd-teaserbox-entry.*?<a href="([^"]+)".*?<h3 class=.*?>([^<]+)<.*?src="([^"]+).*?class="teaser-text">([^<]+)'
oRequest = cRequestHandler(sUrl)
sHtmlContent = oRequest.request()
sHtmlContent = sHtmlContent.replace('\\"','"').replace('\\/','/')
oParser = cParser()
aResult = oParser.parse(sHtmlContent, sPattern)
if (aResult[0] == False):
return False
for aEntry in aResult[1]:
sThumbnail = URL_MAIN + str(aEntry[2])
sUrl = URL_MAIN + str(aEntry[0])
sTitle = cUtil().unescape(str(aEntry[1]).decode('unicode-escape')).encode('utf-8')
sDescription = cUtil().unescape(str(aEntry[3]).decode('unicode-escape')).encode('utf-8')
oGuiElement = cGuiElement()
oGuiElement.setSiteName(SITE_IDENTIFIER)
oGuiElement.setFunction('play')
oGuiElement.se | tTitle(sTitle)
oGuiElement.setDescription(sDescription)
oGuiElement.setThumbnail(sThumbnail)
oOutputParameterHandler = ParameterHandler()
oOutputParameterHandler.setParam('sUrl', sUrl)
| oOutputParameterHandler.setParam('sTitle', sTitle)
oGui.addFolder(oGuiElement, oOutputParameterHandler, bIsFolder = False)
oGui.setView('movies')
oGui.setEndOfDirectory()
def play():
params = ParameterHandler()
if (params.exist('sUrl') and params.exist('sTitle')):
sUrl = params.getValue('sUrl')
sTitle = params.getValue('sTitle')
print sUrl
oRequest = cRequestHandler(sUrl)
sHtmlContent = oRequest.request()
sPattern = ': "([^\."]+)\.flv"'
oParser = cParser()
aResult = oParser.parse(sHtmlContent, sPattern)
if (aResult[0] == True):
sStreamUrl = URL_GET_STREAM + str(aResult[1][0])+'_HD.flv?autostart=true'
result = {}
result['streamUrl'] = sStreamUrl
result['resolved'] = True
return result
return False
|
xiaoxiangs/devops | pedevops/devops/form.py | Python | mpl-2.0 | 2,621 | 0.046055 | #!/usr/bin/python
#coding: utf-8
from django import forms
from models import deletelogapply
class deletelogform(forms.Form):
log_host = forms.CharField(label=u'日志主机',error_messages={'required':u'日志主机不可为空'},
widget = forms.TextInput(attrs={'class':'form-control','placeholder':'必填,不可为空(hostname or ip)'}))
log_path = forms.CharField(label=u'日志路径',max_length=255,initial='/data/logs',
widget= forms.TextInput(attrs={'class':'form-control','placeholder':'默认为/data/logs'}))
save_date = forms.CharField(label=u'保留天数',initial='7',
widget = forms.TextInput(attrs={'class':'form-control','placeholder':'保留日志的天数,默认7天(必须为数字,否则按默认值处理)'}))
mark = forms.CharField(label=u'备注',max_length=255,required=False,
widget = forms.TextInput(attrs={'class':'form-control'}))
def __init__(self,*args,**kwargs):
super(deletelogform,self).__init__(*args,**kwargs)
class dns_updateform(forms.Form):
operation_id = forms.CharField(label=u'操作类型',
widget = forms.Select(choices=(('Add',u'添加'),('Modify',u'修改'),('Delete',u'删除'))))
dns_name = forms.CharField(label=u'DNS名称',error_messages={'required':u'DNS名称不可为空'},
widget = forms.TextInput(attrs={'placeholder':'必填,不可为空'}))
domain = forms.CharField(label=u'操作域',
widget = forms.Select(choices=(('',u'--------'),('d.xiaonei.com','d.xiaonei.com'))))
record_type = forms.CharField(label=u'记录类型',
widget = forms.Select(choices=(('A','A'),('CNAME','CNAME'))))
analy_ip = forms.CharField(label=u'IP或CNAME地址',
widget = forms.TextInput(attrs={'placeholder':'必填,不可为空'}))
def __init__(self,*args,**kwargs):
super(dns_updateform,self).__init__(*args,**kwargs)
class proxy_updateform(forms.Form):
domain = forms.CharField(label=u'域名',error_messages={'required':u'域名不可为空'},
widget = forms.TextInput(attrs={'class':'form-control','placeholder':'不可为空,多个请用,隔开'}))
mapping_ip = forms.CharField(label=u'后端地址',error_messages={'required':u'后端地址,不可为空'},
widget = forms.TextInput(attrs={'class':'form-control','placeholder':'不可为空,多个请用,隔开'}))
mark = forms.CharField(label=u'操作说明',ma | x_length=255,required=False,
widget = forms.TextInput(attrs={'class':'form-control'}))
def __init__(self,*args,**kwargs):
super(proxy_updateform,self).__ | init__(*args,**kwargs)
|
sharifelgamal/runtimes-common | ftl/php/builder.py | Python | apache-2.0 | 2,878 | 0 | # Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This package defines the interface for orchestrating image builds."""
import os
from ftl.common import builder
from ftl.common import constants
from ftl.common import ftl_util
from ftl.common import layer_builder as base_builder
from ftl.php import layer_builder as php_builder
class PHP(builder.RuntimeBase):
def __init__(self, ctx, args):
super(PHP, self).__init__(
ctx, constants.PHP_CACHE_NAMESPACE, args,
[constants.COMPOSER_LOCK, constants.COMPOSER_JSON])
def Build(self):
lyr_imgs = []
lyr_imgs.append(self._base_image)
# delete any existing files in vendor folder
if self._args.directory:
vendor_dir = os.path.join(self._args.directory, 'vendor')
rm_cmd = ['rm', '-rf', | vendor_dir]
ftl_util.run_command('rm_vendor_dir', rm_cmd)
os.makedirs(os.path.join(vendor_dir))
if ftl_util.has_pkg_descriptor(self._descripto | r_files, self._ctx):
layer_builder = php_builder.PhaseOneLayerBuilder(
ctx=self._ctx,
descriptor_files=self._descriptor_files,
directory=self._args.directory,
destination_path=self._args.destination_path,
cache_key_version=self._args.cache_key_version,
cache=self._cache)
layer_builder.BuildLayer()
lyr_imgs.append(layer_builder.GetImage())
app = base_builder.AppLayerBuilder(
directory=self._args.directory,
destination_path=self._args.destination_path,
entrypoint=self._args.entrypoint,
exposed_ports=self._args.exposed_ports)
app.BuildLayer()
lyr_imgs.append(app.GetImage())
if self._args.additional_directory:
additional_directory = base_builder.AppLayerBuilder(
directory=self._args.additional_directory,
destination_path=self._args.additional_directory,
entrypoint=self._args.entrypoint,
exposed_ports=self._args.exposed_ports)
additional_directory.BuildLayer()
lyr_imgs.append(additional_directory.GetImage())
ftl_image = ftl_util.AppendLayersIntoImage(lyr_imgs)
self.StoreImage(ftl_image)
|
gorocacher/payload | payload/api/controllers/v1/queue/__init__.py | Python | apache-2.0 | 3,397 | 0 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (C) 2013 PolyBeacon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pecan
import wsme
from pecan import rest
from wsme import types as wtypes
from wsmeext import pecan as wsme_pecan
from payload.api.controllers.v1 import base
from payload.api.controllers.v1.queue import caller
from payload.api.controllers.v1.queue import member
from payload.common import exception
# TODO(pabelanger): We should not be access db.sqlalchemy directly.
from payload.db.sqlalchemy import models
from payload.openstack.common import log as logging
LOG = logging.getLogger(__name__)
class Queue(base.APIBase):
"""API repre | sentation of a queue."""
description = wtypes.text
disabled = bool
name = wtypes.text
project_id = wtypes.text
user_id = wtypes.text
uuid = wtypes. | text
def __init__(self, **kwargs):
self.fields = vars(models.Queue)
for k in self.fields:
setattr(self, k, kwargs.get(k))
class QueuesController(rest.RestController):
"""REST Controller for queues."""
callers = caller.QueueCallersController()
members = member.QueueMembersController()
@wsme_pecan.wsexpose(None, wtypes.text, status_code=204)
def delete(self, uuid):
"""Delete a queue."""
try:
pecan.request.db_api.delete_queue(uuid=uuid)
except exception.QueueNotFound as e:
raise wsme.exc.ClientSideError(e.message, status_code=e.code)
@wsme_pecan.wsexpose([Queue])
def get_all(self):
"""Retrieve a list of queues."""
return pecan.request.db_api.list_queues()
@wsme_pecan.wsexpose(Queue, unicode)
def get_one(self, uuid):
"""Retrieve information about the given queue."""
try:
result = pecan.request.db_api.get_queue(uuid=uuid)
except exception.QueueNotFound as e:
# TODO(pabelanger): See if there is a better way of handling
# exceptions.
raise wsme.exc.ClientSideError(e.message, status_code=e.code)
return result
@wsme.validate(Queue)
@wsme_pecan.wsexpose(Queue, body=Queue)
def post(self, body):
"""Create a new queue."""
user_id = pecan.request.headers.get('X-User-Id')
project_id = pecan.request.headers.get('X-Tenant-Id')
d = body.as_dict()
res = pecan.request.db_api.create_queue(
name=d['name'], user_id=user_id, project_id=project_id,
description=d['description'], disabled=d['disabled'])
return res
@wsme.validate(Queue)
@wsme_pecan.wsexpose(Queue, wtypes.text, body=Queue)
def put(self, uuid, body):
queue = pecan.request.db_api.get_queue(uuid)
items = body.as_dict().items()
for k, v in [(k, v) for (k, v) in items if v]:
queue[k] = v
queue.save()
return queue
|
olafhauk/mne-python | mne/minimum_norm/inverse.py | Python | bsd-3-clause | 66,037 | 0 | # -*- coding: utf-8 -*-
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Matti Hämäläinen <msh@nmr.mgh.harvard.edu>
# Teon Brooks <teon.brooks@gmail.com>
#
# License: BSD (3-clause)
from copy import deepcopy
from math import sqrt
import numpy as np
from scipy import linalg
from ._eloreta import _compute_eloreta
from ..fixes import _safe_svd
from ..io.base import BaseRaw
from ..io.constants import FIFF
from ..io.open import fiff_open
from ..io.tag import find_tag
from ..io.matrix import (_read_named_matrix, _transpose_named_matrix,
write_named_matrix)
from ..io.proj import (_read_proj, make_projector, _write_proj,
_needs_eeg_average_ref_proj)
from ..io.tree import dir_tree_find
from ..io.write import (write_int, write_float_matrix, start_file,
start_block, end_block, end_file, write_float,
write_coord_trans, write_string)
from ..io.pick import channel_type, pick_info, pick_types, pick_channels
from ..cov import (compute_whitener, _read_cov, _write_cov, Covariance,
prepare_noise_cov)
from ..epochs import BaseEpochs
from ..evoked import EvokedArray, Evoked
from ..forward import (compute_depth_prior, _read_forward_meas_info,
is_fixed_orient, compute_orient_prior,
convert_forward_solution, _select_orient_forward)
from ..forward.forward import write_forward_meas_info, _triage_loose
from ..source_space import (_read_source_spaces_from_tree, _get_src_nn,
find_source_space_hemi, _get_vertno,
_write_source_spaces_to_fid, label_src_vertno_sel)
from ..surface import _normal_orth
from ..transforms import _ensure_trans, transform_surface_to
from ..source_estimate import _make_stc, _get_src_type
from ..utils import (check_fname, logger, verbose, warn, _validate_type,
_check_compensation_grade, _check_option,
_check_depth, _check_src_normal)
INVERSE_METHODS = ('MNE', 'dSPM', 'sLORETA', 'eLORETA')
class InverseOperator(dict):
"""InverseOperator class to represent info from inverse operator."""
def copy(self):
"""Return a copy of the InverseOperator."""
return InverseOperator(deepcopy(self))
def __repr__(self): # noqa: D105
"""Summarize inverse info instead of printing all."""
entr = '<InverseOperator'
nchan = len(pick_types(self['info'], meg=True, eeg=False))
entr += ' | ' + 'MEG channels: %d' % nchan
nchan = len(pick_types(self['info'], meg=False, eeg=True))
entr += ' | ' + 'EEG channels: %d' % nchan
entr += (' | Source space: %s with %d sources'
% (self['src'].kind, self['nsource']))
source_ori = {FIFF.FIFFV_MNE_UNKNOWN_ORI: 'Unknown',
FIFF.FIFFV_MNE_FIXED_ORI: 'Fixed',
FIFF.FIFFV_MNE_FREE_ORI: 'Free'}
entr += ' | Source orientation: %s' % source_ori[self['source_ori']]
entr += '>'
return entr
def _pick_channels_inverse_operator(ch_names, inv):
"""Return data channel indices to be used knowing an inverse operator.
Unlike ``pick_channels``, this respects the order of ch_names.
"""
sel = list()
for name in inv['noise_cov'].ch_names:
try:
sel.append(ch_names.index(name))
except ValueError:
raise ValueError('The inverse operator was computed with '
'channel %s which is not present in '
'the data. You should compute a new inverse '
'operator restricted to the good data '
'channels.' % name)
return sel
@verbose
def read_inverse_operator(fname, verbose=None):
"""Read the inverse operator decomposition from a FIF file.
Parameters
----------
fname : str
The name of the FIF file, which ends with -inv.fif or -inv.fif.gz.
%(verbose)s
Returns
-------
inv : instance of InverseOperator
The inverse operator.
See Also
--------
write_inverse_operator, make_inverse_operator
"""
check_fname(fname, 'inverse operator', ('-inv.fif', '-inv.fif.gz',
'_inv.fif', '_inv.fif.gz'))
#
# Open the file, create directory
#
logger.info('Reading inverse operator decomposition from %s...'
% fname)
f, tree, _ = fiff_open(fname)
with f as fid:
#
# Find all inverse operators
#
invs = dir_tree_find(tree, FIFF.FIFFB_MNE_INVERSE_SOLUTION)
if invs is None or len(invs) < 1:
raise Exception('No inverse solutions in %s' % fname)
invs = invs[0]
#
# Parent MRI data
#
parent_mri = dir_tree_find(tree, FIFF.FIFFB_MNE_PARENT_MRI_FILE)
if len(parent_mri) == 0:
raise Exception('No parent MRI information in %s' % fname)
parent_mri = parent_mri[0] # take only first one
logger.info(' Reading inverse operator info...')
#
# Methods and source orientations
#
tag = find_tag(fid, invs, FIFF.FIFF_MNE_INCLUDED_METHODS)
if tag is None:
raise Exception('Modalities not found')
inv = dict()
inv['methods'] = int(tag.data)
tag = find_tag(fid, invs, FIFF.FIFF_MNE_SOURCE_ORIENTATION)
if tag is None:
raise Exception('Source orientation constraints not found')
inv['source_ori'] = int(tag.data)
tag = find_tag(fid, invs, FIFF.FIFF_MNE_SOURCE_SPACE_NPOINTS)
if tag is None:
raise Exception('Number of sources not found')
inv['nsource'] = int(tag.data)
inv['nchan'] = 0
#
# C | oordinate frame
#
tag = find_tag(fid, invs, FIFF.FIFF_MNE_COORD_FRAME)
if tag is None:
raise Exc | eption('Coordinate frame tag not found')
inv['coord_frame'] = tag.data
#
# Units
#
tag = find_tag(fid, invs, FIFF.FIFF_MNE_INVERSE_SOURCE_UNIT)
unit_dict = {FIFF.FIFF_UNIT_AM: 'Am',
FIFF.FIFF_UNIT_AM_M2: 'Am/m^2',
FIFF.FIFF_UNIT_AM_M3: 'Am/m^3'}
inv['units'] = unit_dict.get(int(getattr(tag, 'data', -1)), None)
#
# The actual source orientation vectors
#
tag = find_tag(fid, invs, FIFF.FIFF_MNE_INVERSE_SOURCE_ORIENTATIONS)
if tag is None:
raise Exception('Source orientation information not found')
inv['source_nn'] = tag.data
logger.info(' [done]')
#
# The SVD decomposition...
#
logger.info(' Reading inverse operator decomposition...')
tag = find_tag(fid, invs, FIFF.FIFF_MNE_INVERSE_SING)
if tag is None:
raise Exception('Singular values not found')
inv['sing'] = tag.data
inv['nchan'] = len(inv['sing'])
#
# The eigenleads and eigenfields
#
inv['eigen_leads_weighted'] = False
inv['eigen_leads'] = _read_named_matrix(
fid, invs, FIFF.FIFF_MNE_INVERSE_LEADS, transpose=True)
if inv['eigen_leads'] is None:
inv['eigen_leads_weighted'] = True
inv['eigen_leads'] = _read_named_matrix(
fid, invs, FIFF.FIFF_MNE_INVERSE_LEADS_WEIGHTED,
transpose=True)
if inv['eigen_leads'] is None:
raise ValueError('Eigen leads not found in inverse operator.')
#
# Having the eigenleads as cols is better for the inverse calcs
#
inv['eigen_fields'] = _read_named_matrix(fid, invs,
FIFF.FIFF_MNE_INVERSE_FIELDS)
logger.info(' [done]')
#
# Read the covariance matrices
#
inv['noise_cov'] = Covariance(
**_read_cov(fid, invs, FIFF.FIFFV_MNE_NOISE_COV, limited=True))
logger.info(' Noise covariance matrix read.')
|
sarielsaz/sarielsaz | test/functional/net.py | Python | mit | 4,213 | 0.001899 | #!/usr/bin/env python3
# Copyright (c) 2017 The Sarielsaz Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/li | censes/mit-license.php.
"""Test RPC calls related to net.
Tests correspond to code in rpc/net.cpp.
"""
import time
from test_framework.test_framework import SarielsazTestFramework
from test_framework.util import (
assert_equa | l,
assert_raises_rpc_error,
connect_nodes_bi,
p2p_port,
)
class NetTest(SarielsazTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 2
def run_test(self):
self._test_connection_count()
self._test_getnettotals()
self._test_getnetworkinginfo()
self._test_getaddednodeinfo()
self._test_getpeerinfo()
def _test_connection_count(self):
# connect_nodes_bi connects each node to the other
assert_equal(self.nodes[0].getconnectioncount(), 2)
def _test_getnettotals(self):
# check that getnettotals totalbytesrecv and totalbytessent
# are consistent with getpeerinfo
peer_info = self.nodes[0].getpeerinfo()
assert_equal(len(peer_info), 2)
net_totals = self.nodes[0].getnettotals()
assert_equal(sum([peer['bytesrecv'] for peer in peer_info]),
net_totals['totalbytesrecv'])
assert_equal(sum([peer['bytessent'] for peer in peer_info]),
net_totals['totalbytessent'])
# test getnettotals and getpeerinfo by doing a ping
# the bytes sent/received should change
# note ping and pong are 32 bytes each
self.nodes[0].ping()
time.sleep(0.1)
peer_info_after_ping = self.nodes[0].getpeerinfo()
net_totals_after_ping = self.nodes[0].getnettotals()
for before, after in zip(peer_info, peer_info_after_ping):
assert_equal(before['bytesrecv_per_msg']['pong'] + 32, after['bytesrecv_per_msg']['pong'])
assert_equal(before['bytessent_per_msg']['ping'] + 32, after['bytessent_per_msg']['ping'])
assert_equal(net_totals['totalbytesrecv'] + 32*2, net_totals_after_ping['totalbytesrecv'])
assert_equal(net_totals['totalbytessent'] + 32*2, net_totals_after_ping['totalbytessent'])
def _test_getnetworkinginfo(self):
assert_equal(self.nodes[0].getnetworkinfo()['networkactive'], True)
assert_equal(self.nodes[0].getnetworkinfo()['connections'], 2)
self.nodes[0].setnetworkactive(False)
assert_equal(self.nodes[0].getnetworkinfo()['networkactive'], False)
timeout = 3
while self.nodes[0].getnetworkinfo()['connections'] != 0:
# Wait a bit for all sockets to close
assert timeout > 0, 'not all connections closed in time'
timeout -= 0.1
time.sleep(0.1)
self.nodes[0].setnetworkactive(True)
connect_nodes_bi(self.nodes, 0, 1)
assert_equal(self.nodes[0].getnetworkinfo()['networkactive'], True)
assert_equal(self.nodes[0].getnetworkinfo()['connections'], 2)
def _test_getaddednodeinfo(self):
assert_equal(self.nodes[0].getaddednodeinfo(), [])
# add a node (node2) to node0
ip_port = "127.0.0.1:{}".format(p2p_port(2))
self.nodes[0].addnode(ip_port, 'add')
# check that the node has indeed been added
added_nodes = self.nodes[0].getaddednodeinfo(ip_port)
assert_equal(len(added_nodes), 1)
assert_equal(added_nodes[0]['addednode'], ip_port)
# check that a non-existent node returns an error
assert_raises_rpc_error(-24, "Node has not been added",
self.nodes[0].getaddednodeinfo, '1.1.1.1')
def _test_getpeerinfo(self):
peer_info = [x.getpeerinfo() for x in self.nodes]
# check both sides of bidirectional connection between nodes
# the address bound to on one side will be the source address for the other node
assert_equal(peer_info[0][0]['addrbind'], peer_info[1][0]['addr'])
assert_equal(peer_info[1][0]['addrbind'], peer_info[0][0]['addr'])
if __name__ == '__main__':
NetTest().main()
|
dracidoupe/graveyard | ddcz/migrations/0103_letters_col_rename.py | Python | mit | 800 | 0 | # Generated by Django 2.0.13 on 2021-08-08 15:41
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
("ddcz", "0102_letters_of_postal_service"),
]
operations = [
migrations.RenameField(
model_name="letters",
| old_name="datum",
new_name="date",
),
migrations.RenameField(
model_name="letters",
old_name="prijemce",
new_name="receiver",
),
migrations.RenameField(
model_name="letters",
old_name="odesilatel",
new_name="sender",
),
migrations.Rename | Field(
model_name="letters",
old_name="viditelnost",
new_name="visibility",
),
]
|
mtresch/probfit | doc/pyplots/costfunc/ulh.py | Python | mit | 726 | 0.00551 | from iminuit import Minuit
from probfit import UnbinnedLH, gaussian, Extended
from matplotlib import pyplot as plt
from numpy.random import randn
data = randn(1000)*2 + 1
ulh = UnbinnedLH(gaussian, data | )
m = Minuit(ulh, mean=0., sigma=0.5)
plt.figure(figsize=(8, 6))
plt.subplot(221)
ulh.draw(m)
plt.title('Unextended Before')
m.migrad() # fit
plt.subplot(222)
ulh.draw(m)
plt.title('Unextended After')
#Extended
data = randn(2000)*2 + 1
egauss = Extended(gaussian)
ulh = UnbinnedLH(egauss, data, extended=True, extended_bound=(-10.,10.))
m = Minuit(ulh, mean=0., sigma=0.5, N=1800.)
plt.subplot(223)
ulh.draw(m)
plt.title('Extended Before')
m.migrad() # fit
plt.subplot(224)
ulh | .draw(m)
plt.title('Extended After')
|
KWierso/treeherder | tests/etl/conftest.py | Python | mpl-2.0 | 561 | 0 | import datetime
import pytest |
from tests.test_utils import create_generic_job
| from treeherder.model.models import Push
@pytest.fixture
def perf_push(test_repository):
return Push.objects.create(
repository=test_repository,
revision='1234abcd',
author='foo@bar.com',
time=datetime.datetime.now())
@pytest.fixture
def perf_job(perf_push, failure_classifications, generic_reference_data):
return create_generic_job('myfunguid', perf_push.repository,
perf_push.id, generic_reference_data)
|
hittu123/ruhive | src/basic/events/models.py | Python | mit | 1,838 | 0.000544 | import datetime
from django.db import models
from django.utils.translation import ugettext_lazy as _
from django.db.models import permalink
from django.contrib.auth.models import User
from tagging.fields import TagField
from src.basic.places import Place
class Event(models.Model):
"""Event model"""
title = models.CharField(max_length=200)
slug = models.SlugField()
place = models.ForeignKey(Place, blank=True, null=True)
one_off_place = models.CharField(max_length=200, blank=True)
description = models.TextField(blank=True)
submitted_by = models.ForeignKey(User, blank=True, null=True)
tags = TagField()
created = models.DateTimeField(auto_now_add=True)
modified = models.DateTimeField(auto_now=True)
class Meta:
verbose_name = _('event')
verbose_name_plural = _('events')
db_table = 'events'
def __unicode__(self):
return self.title
class EventTime(models.Model):
"""EventTime model"""
event = models.ForeignKey(Event, related_name='event_times')
| start = models.DateTimeField()
end = models.DateTimeField(blank=True, null=True)
is_all_day = models.BooleanField(default=False)
class Meta:
verbose_name = _('event time')
verbose_name_plural = _('event times')
db_table = 'event_times'
@property
def is_past(self):
NOW = datetime.date.now()
if self.start < NOW:
ret | urn True
return False
def __unicode__(self):
return u'%s' % self.event.title
@permalink
def get_absolute_url(self):
return ('event_detail', None, {
'year': self.start.year,
'month': self.start.strftime('%b').lower(),
'day': self.start.day,
'slug': self.event.slug,
'event_id': self.event.id
}) |
vhaupert/mitmproxy | mitmproxy/net/http/request.py | Python | mit | 16,075 | 0.002115 | import time
import urllib.parse
from dataclasses import dataclass
from typing import Dict, Iterable, Optional, Tuple, Union
import mitmproxy.net.http.url
from mitmproxy.coretypes import multidict
from mitmproxy.net.http import cookies, multipart
from mitmproxy.net.http import message
from mitmproxy.net.http.headers import Headers
from mitmproxy.utils.strutils import always_bytes, always_str
@dataclass
class RequestData(message.MessageData):
host: str
port: int
method: bytes
scheme: bytes
authority: bytes
path: bytes
class Request(message.Message):
"""
An HTTP request.
"""
data: RequestData
def __init__(
self,
host: str,
port: int,
method: bytes,
scheme: bytes,
authority: bytes,
path: bytes,
http_version: bytes,
headers: Union[Headers, Tuple[Tuple[bytes, bytes], ...]],
content: Optional[bytes],
trailers: Union[None, Headers, Tuple[Tuple[bytes, bytes], ...]],
timestamp_start: float,
timestamp_end: Optional[float],
):
# auto-convert invalid types to retain compatibility with older code.
if isinstance(host, bytes):
host = host.decode("idna", "strict")
if isinstance(method, str):
method = method.encode("ascii", "strict")
if isinstance(scheme, str):
scheme = scheme.encode("ascii", "strict")
if isinstance(authority, str):
authority = authority.encode("ascii", "strict")
if isinstance(path, str):
path = path.encode("ascii", "strict")
if isinstance(http_version, str):
http_version = http_version.encode("ascii", "strict")
if isinstance(content, str):
raise ValueError(f"Content must be bytes, not {type(content).__name__}")
if not isinstance(headers, Headers):
headers = Headers(headers)
if trailers is not None and not isinstance(trailers, Headers):
trailers = Headers(trailers)
self.data = RequestData(
host=host,
port=port,
method=method,
scheme=scheme,
authority=authority,
path=path,
http_version=http_version,
headers=headers,
content=content,
trailers=trailers,
timestamp_start=timestamp_start,
timestamp_end=timestamp_end,
)
def __repr__(self) -> str:
if self.host and self.port:
hostport = f"{self.host}:{self.port}"
else:
hostport = ""
path = self.path or ""
return f"Request({self.method} {hostport}{path})"
@classmethod
def make(
cls,
method: str,
url: str,
content: Union[bytes, str] = "",
headers: Union[Headers, Dict[Union[str, bytes], Union[str, bytes]], Iterable[Tuple[bytes, bytes]]] = ()
) -> "Request":
"""
Simplified API for creating request objects.
"""
# Headers can be list or dict, we differentiate here.
if isinstance(headers, Headers):
pass
elif isinstance(headers, dict):
headers = Headers(
(always_bytes(k, "utf-8", "surrogateescape"),
always_bytes(v, "utf-8", "surrogateescape"))
for k, v in headers.items()
)
elif isinstance(headers, Iterable):
headers = Headers(headers)
else:
raise TypeError("Expected headers to be an iterable or dict, but is {}.".format(
type(headers).__name__
))
req = cls(
"",
0,
method.enco | de("utf-8", "surrogateescape"),
b"",
b"",
b"",
b"HTTP/1.1",
headers,
| b"",
None,
time.time(),
time.time(),
)
req.url = url
# Assign this manually to update the content-length header.
if isinstance(content, bytes):
req.content = content
elif isinstance(content, str):
req.text = content
else:
raise TypeError(f"Expected content to be str or bytes, but is {type(content).__name__}.")
return req
@property
def first_line_format(self) -> str:
"""
HTTP request form as defined in `RFC7230 <https://tools.ietf.org/html/rfc7230#section-5.3>`_.
origin-form and asterisk-form are subsumed as "relative".
"""
if self.method == "CONNECT":
return "authority"
elif self.authority:
return "absolute"
else:
return "relative"
@property
def method(self) -> str:
"""
HTTP request method, e.g. "GET".
"""
return self.data.method.decode("utf-8", "surrogateescape").upper()
@method.setter
def method(self, val: Union[str, bytes]) -> None:
self.data.method = always_bytes(val, "utf-8", "surrogateescape")
@property
def scheme(self) -> str:
"""
HTTP request scheme, which should be "http" or "https".
"""
return self.data.scheme.decode("utf-8", "surrogateescape")
@scheme.setter
def scheme(self, val: Union[str, bytes]) -> None:
self.data.scheme = always_bytes(val, "utf-8", "surrogateescape")
@property
def authority(self) -> str:
"""
HTTP request authority.
For HTTP/1, this is the authority portion of the request target
(in either absolute-form or authority-form)
For HTTP/2, this is the :authority pseudo header.
"""
try:
return self.data.authority.decode("idna")
except UnicodeError:
return self.data.authority.decode("utf8", "surrogateescape")
@authority.setter
def authority(self, val: Union[str, bytes]) -> None:
if isinstance(val, str):
try:
val = val.encode("idna", "strict")
except UnicodeError:
val = val.encode("utf8", "surrogateescape") # type: ignore
self.data.authority = val
@property
def host(self) -> str:
"""
Target host. This may be parsed from the raw request
(e.g. from a ``GET http://example.com/ HTTP/1.1`` request line)
or inferred from the proxy mode (e.g. an IP in transparent mode).
Setting the host attribute also updates the host header and authority information, if present.
"""
return self.data.host
@host.setter
def host(self, val: Union[str, bytes]) -> None:
self.data.host = always_str(val, "idna", "strict")
# Update host header
if "Host" in self.data.headers:
self.data.headers["Host"] = val
# Update authority
if self.data.authority:
self.authority = mitmproxy.net.http.url.hostport(self.scheme, self.host, self.port)
@property
def host_header(self) -> Optional[str]:
"""
The request's host/authority header.
This property maps to either ``request.headers["Host"]`` or
``request.authority``, depending on whether it's HTTP/1.x or HTTP/2.0.
"""
if self.is_http2:
return self.authority or self.data.headers.get("Host", None)
else:
return self.data.headers.get("Host", None)
@host_header.setter
def host_header(self, val: Union[None, str, bytes]) -> None:
if val is None:
if self.is_http2:
self.data.authority = b""
self.headers.pop("Host", None)
else:
if self.is_http2:
self.authority = val # type: ignore
if not self.is_http2 or "Host" in self.headers:
# For h2, we only overwrite, but not create, as :authority is the h2 host header.
self.headers["Host"] = val
@property
def port(self) -> int:
"""
Target port
"""
return self.data.port
@port.setter
|
planaspa/Data-Mining | tests/test_graphDb.py | Python | mit | 4,201 | 0 | from src.graphDb import *
db = 'db/test.db'
def test_text_format():
assert text_format("asdkjhaeih") == "asdkjhaeih"
assert text_format("as&dkj>hae<ih") == "as&dkj>hae<ih"
assert text_format("") == ""
def test_creatingGroups():
conn = sqlite3.connect(db)
conn.execute("INSERT INTO TWEETS(ID, TWEET_TEXT, FAVS, RTS"
| ", LAT, LONG, FOLLOWERS) "
"VALUES(0, 'test2 test0',0 , 0,-5.6, 6.12, 105)")
conn.execute("INSERT INTO TWEETS(ID, TWEET_TEXT, FAVS, RTS"
", FOLLOWERS) "
"VALUES(1, 'test2 test0',0 , 0, 5)")
conn.execute("INSERT INTO TWEETS(ID, TWEET_TEXT, FAVS, RTS"
", FOLLOWERS) "
"VALUES(2, 'test2',0 , | 0, 30)")
c = conn.cursor()
groups1 = creatingGroups(c, 2)
groups2 = creatingGroups(c, 4)
conn.execute("DELETE FROM TWEETS WHERE ID=0")
conn.execute("DELETE FROM TWEETS WHERE ID=1")
conn.execute("DELETE FROM TWEETS WHERE ID=2")
# Closing the connection
conn.close()
assert groups1 == [5, 55, 105]
assert groups2 == [5, 30, 55, 80, 105]
def test_numberOfTweetsPerGroup():
conn = sqlite3.connect(db)
conn.execute("INSERT INTO TWEETS(ID, TWEET_TEXT, FAVS, RTS"
", LAT, LONG, FOLLOWERS) "
"VALUES(0, 'test2 test0',0 , 0,-5.6, 6.12, 105)")
conn.execute("INSERT INTO TWEETS(ID, TWEET_TEXT, FAVS, RTS"
", FOLLOWERS) "
"VALUES(1, 'test2 test0',0 , 0, 5)")
conn.execute("INSERT INTO TWEETS(ID, TWEET_TEXT, FAVS, RTS"
", FOLLOWERS) "
"VALUES(2, 'test2',0 , 0, 30)")
c = conn.cursor()
groups1 = creatingGroups(c, 2)
groups2 = creatingGroups(c, 4)
tweetsPerGroup1 = numberOfTweetsPerGroup(c, groups1)
tweetsPerGroup2 = numberOfTweetsPerGroup(c, groups2)
conn.execute("DELETE FROM TWEETS WHERE ID=0")
conn.execute("DELETE FROM TWEETS WHERE ID=1")
conn.execute("DELETE FROM TWEETS WHERE ID=2")
# Closing the connection
conn.close()
assert tweetsPerGroup1 == [1, 1]
assert tweetsPerGroup2 == [1, 0, 0, 1]
def test_numberOfReTweetsPerGroup():
conn = sqlite3.connect(db)
conn.execute("INSERT INTO TWEETS(ID, TWEET_TEXT, FAVS, RTS"
", LAT, LONG, FOLLOWERS) "
"VALUES(0, 'test2 test0',5 , 5,-5.6, 6.12, 105)")
conn.execute("INSERT INTO TWEETS(ID, TWEET_TEXT, FAVS, RTS"
", FOLLOWERS) "
"VALUES(1, 'test2 test0',0 , 0, 5)")
conn.execute("INSERT INTO TWEETS(ID, TWEET_TEXT, FAVS, RTS"
", FOLLOWERS) "
"VALUES(2, 'test2',3 , 3, 30)")
c = conn.cursor()
groups1 = creatingGroups(c, 2)
groups2 = creatingGroups(c, 4)
rtsPerGroup1 = numberOfReTweetsPerGroup(c, groups1)
rtsPerGroup2 = numberOfReTweetsPerGroup(c, groups2)
conn.execute("DELETE FROM TWEETS WHERE ID=0")
conn.execute("DELETE FROM TWEETS WHERE ID=1")
conn.execute("DELETE FROM TWEETS WHERE ID=2")
# Closing the connection
conn.close()
assert rtsPerGroup1 == [3, 5]
assert rtsPerGroup2 == [3, 0, 0, 5]
def test_numberOfFavsPerGroup():
conn = sqlite3.connect(db)
conn.execute("INSERT INTO TWEETS(ID, TWEET_TEXT, FAVS, RTS"
", LAT, LONG, FOLLOWERS) "
"VALUES(0, 'test2 test0',5 , 5,-5.6, 6.12, 105)")
conn.execute("INSERT INTO TWEETS(ID, TWEET_TEXT, FAVS, RTS"
", FOLLOWERS) "
"VALUES(1, 'test2 test0',0 , 0, 5)")
conn.execute("INSERT INTO TWEETS(ID, TWEET_TEXT, FAVS, RTS"
", FOLLOWERS) "
"VALUES(2, 'test2',3 , 3, 30)")
c = conn.cursor()
groups1 = creatingGroups(c, 2)
groups2 = creatingGroups(c, 4)
favsPerGroup1 = numberOfFavsPerGroup(c, groups1)
favsPerGroup2 = numberOfFavsPerGroup(c, groups2)
conn.execute("DELETE FROM TWEETS WHERE ID=0")
conn.execute("DELETE FROM TWEETS WHERE ID=1")
conn.execute("DELETE FROM TWEETS WHERE ID=2")
# Closing the connection
conn.close()
assert favsPerGroup1 == [3, 5]
assert favsPerGroup2 == [3, 0, 0, 5]
|
kaedroho/wagtail | wagtail/admin/localization.py | Python | bsd-3-clause | 3,698 | 0.000812 | import pytz
from django.conf import settings
from django.utils.dates import MONTHS, WEEKDAYS, WEEKDAYS_ABBR
from django.utils.translation import gettext as _
from django.utils.translation import gettext_lazy
# Wagtail languages with >=90% coverage
# This list is manually maintained
WAGTAILADMIN_PROVIDED_LANGUAGES = [
('ar', gettext_lazy('Arabic')),
('ca', gettext_lazy('Catalan')),
('cs', gettext_lazy('Czech')),
('de', gettext_lazy('German')),
('el', gettext_lazy('Greek')),
('en', gettext_lazy('English')),
('es', gettext_lazy('Spanish')),
('et', gettext_lazy('Estonian')),
('fi', gettext_lazy('Finnish')),
('fr', gettext_lazy('French')),
('gl', gettext_lazy('Galician')),
('hr', gettext_lazy('Croatian')),
('hu', gettext_lazy('Hungarian')),
('id-id', gettext_lazy('Indonesian')),
('is-is', gettext_lazy('Icelandic')),
('it', gettext_lazy('Italian')),
('ja', gettext_lazy('Japanese')),
('ko', gettext_lazy('Korean')),
('lt', gettext_lazy('Lithuanian')),
('mn', gettext_lazy('Mongolian')),
('nb', gettext_lazy('Norwegian Bokmål')),
('nl-nl', gettext_lazy('Netherlands Dutch')),
('fa', gettext_lazy('Persian')),
('pl', gettext_lazy('Polish')),
('pt-br', gettext_lazy('Brazilian Portuguese')),
('pt-pt', gettext_lazy('Portuguese')),
('ro', gettext_lazy('Romanian')),
('ru', gettext_lazy('Russian')),
('sv', gettext_lazy('Swedish')),
('sk-sk', gettext_lazy('Slovak')),
('th', gettext_lazy('Thai')),
('tr', gettext_lazy('Turkish')),
('tr-tr', gettext_lazy('Turkish (Turkey)')),
('uk', gettext_lazy('Ukrainian')),
('zh-hans', gettext_lazy('Chinese (Simplified)')),
('zh-hant', gettext_lazy('Chinese (Traditional)')),
]
# Translatable strings to be made available to JavaScript code
# as the wagtailConfig.STRINGS object
def get_js_translation_strings():
return {
'DELETE': _('Delete'),
'EDIT': _('Edit'),
'PAGE': _('Page'),
'PAGES': _('Pages'),
'LOADING': _('Loading…'),
'NO_RESULTS': _('No results'),
'SERVER_ERROR': _('Server Error'),
'SEE_ALL': _('See all'),
'CLOSE_EXPLORER': _('Close explorer'),
'ALT_TEXT': _('Alt text'),
'DECORATIVE_IMAGE': _('Decorative image'),
'WRITE_HERE': _('Write here…'),
'HORIZONTAL_LINE': _('Horizontal line'),
'LINE_BREAK': _('Line break'),
'UNDO': _('Undo'),
'REDO': _('Redo'),
'RELOAD_PAGE': _('Reload the page'),
'RELOAD_EDITOR': _('Reload saved content'),
'SHOW_LATEST_CONTENT': _('Show latest content'),
'SHOW_ERROR': _('Show error'),
'EDITOR_CRASH': _('The editor just crashed. Content has been reset to the last saved | version.'),
'BROKEN_LINK': _('Broken link'),
'MISSING_DOCUMENT': _('Missing document'),
'CLOSE': _('Close'),
'EDIT_PAGE': _('Edit \'{title}\''),
'VIEW_CHILD_PAGES_OF_PAGE | ': _('View child pages of \'{title}\''),
'PAGE_EXPLORER': _('Page explorer'),
'MONTHS': [str(m) for m in MONTHS.values()],
# Django's WEEKDAYS list begins on Monday, but ours should start on Sunday, so start
# counting from -1 and use modulo 7 to get an array index
'WEEKDAYS': [str(WEEKDAYS[d % 7]) for d in range(-1, 6)],
'WEEKDAYS_SHORT': [str(WEEKDAYS_ABBR[d % 7]) for d in range(-1, 6)],
}
def get_available_admin_languages():
return getattr(settings, 'WAGTAILADMIN_PERMITTED_LANGUAGES', WAGTAILADMIN_PROVIDED_LANGUAGES)
def get_available_admin_time_zones():
return getattr(settings, 'WAGTAIL_USER_TIME_ZONES', pytz.common_timezones)
|
podhmo/kamo | demo/fizzbuzz.py | Python | mit | 224 | 0 | from kamo import Template
template = Template("""
%for x in range(1, N):
%if x % 15 == 0:
"fizzbuzz"
%elif x % 3 == 0:
"fizz"
%elif x % 5 == 0:
"buzz"
%else:
${x | }
%endif
%endfor
""")
print(template.re | nder(N=100))
|
tvtsoft/odoo8 | addons/website_sale_digital/controllers/main.py | Python | agpl-3.0 | 4,318 | 0.002547 | # -*- coding: utf-8 -*-
import base64
from openerp.addons.web import http
from openerp.addons.web.http import request
from openerp.addons.website.controllers.main import Website
from openerp.addons.website_portal.controllers.main import website_account
from openerp.addons.website_sale.controllers.main import website_sale
from cStringIO import StringIO
from werkzeug.utils import redirect
class website_sale_digital_confirmation(website_sale):
@http.route([
'/shop/confirmation',
], type='http', auth="public", website=True)
def payment_confirmation(self, **post):
response = super(website_sale_digital_confirmation, self).payment_confirmation(**post)
order_lines = response.qcontext['order'].order_line
digital_content = map(lambda x: x.product_id.digital_content, order_lines)
response.qcontext.update(digital=any(digital_content))
return response
class website_sale_digital(website_account):
orders_page = '/my/orders'
@http.route([
'/my/orders/<int:order>',
], type='http', auth='user', website=True)
def orders_followup(self, order=None, **post):
response = super(website_sale_digital, self).orders_followup(order=order, **post)
order_products_attachments = {}
order = response.qcontext['order']
invoiced_lines = request.env['account.invoice.line'].sudo().search([('invoice_id', 'in', order.invoice_ids.ids), ('invoice_id.state', '=', 'paid')])
purchased_products_attachments = {}
for il in invoiced_lines:
p_obj = il.product_id
# Ignore products that do not have digital content
if not p_obj.product_tmpl_id.digital_content:
continue
# Search for product attachments
A = request.env['ir.attachment']
p_id = p_obj.id
template = p_obj.product_tmpl_id
att = A.search_read(
domain=['|', '&', ('res_model', '=', p_obj._name), ('res_id', '=', p_id), '&', ('res_model', '=', template._name), ('res_id', '=', template.id)],
fields=['name', 'write_date'],
order='write_date desc',
)
# Ignore products with no attachments
if not att:
continue
purchased_products_attachments[p_id] = att
response.qcontext.update({
'digital_attachments': purchased_products_attachments,
})
return response
@http.route([
'/my/download',
], type='http', auth='public')
def download_attachment(self, attachment_id):
# Check if this is a valid attachment id
attachment = request.env['ir.attachment'].sudo().search_read(
[('id', '=', int(attachment_id))],
["name", "datas", "file_type", "res_model", "res_id", "type", "url"]
)
if attachment:
attachment = attachment[0]
else:
return redirect(self.orders_page)
# Check if the user has bought the associated product
res_model = attachment['res_model']
res_id = attachment['res_id']
purchased_products = request.env['account.invoice.line'].get_digital_purchases(request.uid)
if res_model == 'product.product':
if res_id not in purchased_products:
return redirect(self.orders_page)
# Also check for attachments in the product templates
elif res_model == 'product.template':
P = requ | est.env['product.product']
template_ids = map(lambda x: P.browse(x).product_tmpl_id.id, purchased_products)
if res_id not in template_ids:
return redirect(self.orders_page)
else:
return redirect(self.orders_page)
| # The client has bought the product, otherwise it would have been blocked by now
if attachment["type"] == "url":
if attachment["url"]:
return redirect(attachment["url"])
else:
return request.not_found()
elif attachment["datas"]:
data = StringIO(base64.standard_b64decode(attachment["datas"]))
return http.send_file(data, filename=attachment['name'], as_attachment=True)
else:
return request.not_found()
|
andyzsf/django | tests/template_tests/syntax_tests/test_template_tag.py | Python | bsd-3-clause | 2,410 | 0.00083 | from django.template.base import TemplateSyntaxError
from django.template.loader import get_template
from django.test import SimpleTestCase
from .utils import render, setup
class TemplateTagTests(SimpleTestCase):
@setup({'templatetag01': '{% templatetag openblock %}'})
def test_templatetag01(self):
output = render('templatetag01')
self.assertEqual(output, '{%')
@setup({'templatetag02': '{% templatetag closeblock %}'})
def test_templatetag02(self):
output = render('templatetag02')
self.assertEqual(output, '%}')
@setup({'templatetag03': '{% templatetag openvariable %}'})
def test_templatetag03(self):
output = render('templatetag03')
self.assertEqual(output, '{{')
@setup({'templatetag04': '{% templatetag closevariable %}'})
def test_templatetag04(self):
output = render('templatetag04')
self.assertEqual(output, '}}')
@setup({'templatetag05': '{% templatetag %}'})
def test_templatetag05(self):
with self.assertRaises(TemplateSyntaxError):
get_template('templatetag05')
@setup({'templatetag06': '{% templatetag foo %}'})
def test_templatetag06(self):
with self.assertRaises(TemplateSyntaxError):
get_template('templatetag06')
@setup({'templatetag07': '{% templatetag openbrace %}'})
def test_templatetag07(self):
output = render('templatetag07')
self.assertEqual(output, '{')
@setup({'templatetag08': '{% templatetag | closebrace %}'})
def test_templatetag08(self):
output = render('templatetag08')
self.assertEqual(output, '}')
@setup({'templatetag09': '{% templatetag openbrace %}{% templatetag openbrace %}'})
def test_templatetag09(self):
output = render('templatetag09')
self.asse | rtEqual(output, '{{')
@setup({'templatetag10': '{% templatetag closebrace %}{% templatetag closebrace %}'})
def test_templatetag10(self):
output = render('templatetag10')
self.assertEqual(output, '}}')
@setup({'templatetag11': '{% templatetag opencomment %}'})
def test_templatetag11(self):
output = render('templatetag11')
self.assertEqual(output, '{#')
@setup({'templatetag12': '{% templatetag closecomment %}'})
def test_templatetag12(self):
output = render('templatetag12')
self.assertEqual(output, '#}')
|
rvmoura96/projeto-almoxarifado | almoxarifado/migrations/0002_auto_20170929_1929.py | Python | mit | 1,726 | 0 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2017-09-29 22:29
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('almoxarifado', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='equipamento',
name='fabricante',
field=models.CharField(default=None, max_length=90),
),
migrations.AlterField(
model_name='equipamento',
name='ativo_imobilizado',
field=models.PositiveIntegerField(default=None),
),
migrations.AlterField(
model_name='equipamento',
name='data_entrega',
| field=models.DateTimeField(default=None),
),
migrations.AlterField(
model_name='equipamento',
name='data_retirada',
field=models | .DateTimeField(default=None),
),
migrations.AlterField(
model_name='equipamento',
name='localizacao',
field=models.CharField(default=None, max_length=150),
),
migrations.AlterField(
model_name='equipamento',
name='observacoes',
field=models.TextField(default=None),
),
migrations.AlterField(
model_name='equipamento',
name='serial_number',
field=models.CharField(default=None, max_length=30),
),
migrations.AlterField(
model_name='equipamento',
name='status',
field=models.CharField(default=None, max_length=12),
),
]
|
sc0w/pluma | tools/generate-plugin.py | Python | gpl-2.0 | 5,946 | 0.005383 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# generate-plugin.py - pluma plugin skeletton generator
# This file is part of pluma
#
# Copyright (C) 2006 - Steve Frécinaux
#
# pluma is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# pluma is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Pu | blic License
# along with pluma; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fift | h Floor,
# Boston, MA 02110-1301 USA
import re
import os
import sys
import getopt
from datetime import date
import preprocessor
# Default values of command line options
options = {
'language' : 'c',
'description' : 'Type here a short description of your plugin',
'author' : os.getenv('USERNAME'),
'email' : os.getenv('LOGNAME') + '@email.com',
'standalone' : False,
'with-side-pane' : False,
'with-bottom-pane' : False,
'with-menu' : False,
'with-config-dlg' : False
}
USAGE = """Usage:
%s [OPTIONS...] pluginname
""" % os.path.basename(sys.argv[0])
HELP = USAGE + """
generate skeleton source tree for a new pluma plugin.
Options:
--author Set the author name
--email Set the author email
--description Set the description you want for your new plugin
--standalone Is this plugin intended to be distributed as a
standalone package ? (N/A)
--language / -l Set the language (C) [default: %(language)s]
--with-$feature Enable $feature
--without-$feature Disable $feature
--help / -h Show this message and exits
Features:
config-dlg Plugin configuration dialog
menu Plugin menu entries
side-pane Side pane item (N/A)
bottom-pane Bottom pane item (N/A)
""" % options
TEMPLATE_DIR = os.path.join(os.path.dirname(sys.argv[0]), "plugin_template")
# Parsing command line options
try:
opts, args = getopt.getopt(sys.argv[1:],
'l:h',
['language=',
'description=',
'author=',
'email=',
'standalone',
'with-menu' , 'without-menu',
'with-side-pane' , 'without-side-pane',
'with-bottom-pane' , 'without-bottom-pane',
'with-config-dlg' , 'without-config-dlg',
'help'])
except getopt.error, exc:
print >>sys.stderr, '%s: %s' % (sys.argv[0], str(exc))
print >>sys.stderr, USAGE
sys.exit(1)
for opt, arg in opts:
if opt in ('-h', '--help'):
print >>sys.stderr, HELP
sys.exit(0)
elif opt in ('--description', '--author', '--email'):
options[opt[2:]] = arg
elif opt in ('-l', '--language'):
options['language'] = arg.lower()
elif opt == '--standalone':
options['standalone'] = True
elif opt[0:7] == '--with-':
options['with-' + opt[7:]] = True
elif opt[0:10] == '--without-':
options['with-' + opt[10:]] = False
# What's the new plugin name ?
if len(args) < 1:
print >>sys.stderr, USAGE
sys.exit(1)
plugin_name = args[0]
plugin_id = re.sub('[^a-z0-9_]', '', plugin_name.lower().replace(' ', '_'))
plugin_module = plugin_id.replace('_', '-')
directives = {
'PLUGIN_NAME' : plugin_name,
'PLUGIN_MODULE' : plugin_module,
'PLUGIN_ID' : plugin_id,
'AUTHOR_FULLNAME' : options['author'],
'AUTHOR_EMAIL' : options['email'],
'DATE_YEAR' : date.today().year,
'DESCRIPTION' : options['description'],
}
# Files to be generated by the preprocessor, in the form "template : outfile"
output_files = {
'Makefile.am': '%s/Makefile.am' % plugin_module,
'pluma-plugin.desktop.in': '%s/%s.pluma-plugin.desktop.in' % (plugin_module, plugin_module)
}
if options['language'] == 'c':
output_files['pluma-plugin.c'] = '%s/%s-plugin.c' % (plugin_module, plugin_module)
output_files['pluma-plugin.h'] = '%s/%s-plugin.h' % (plugin_module, plugin_module)
else:
print >>sys.stderr, 'Value of --language should be C'
print >>sys.stderr, USAGE
sys.exit(1)
if options['standalone']:
output_files['configure.ac'] = 'configure.ac'
if options['with-side-pane']:
directives['WITH_SIDE_PANE'] = True
if options['with-bottom-pane']:
directives['WITH_BOTTOM_PANE'] = True
if options['with-menu']:
directives['WITH_MENU'] = True
if options['with-config-dlg']:
directives['WITH_CONFIGURE_DIALOG'] = True
# Generate the plugin base
for infile, outfile in output_files.iteritems():
print 'Processing %s\n' \
' into %s...' % (infile, outfile)
infile = os.path.join(TEMPLATE_DIR, infile)
outfile = os.path.join(os.getcwd(), outfile)
if not os.path.isfile(infile):
print >>sys.stderr, 'Input file does not exist : %s.' % os.path.basename(infile)
continue
# Make sure the destination directory exists
if not os.path.isdir(os.path.split(outfile)[0]):
os.makedirs(os.path.split(outfile)[0])
# Variables relative to the generated file
directives['DIRNAME'], directives['FILENAME'] = os.path.split(outfile)
# Generate the file
preprocessor.process(infile, outfile, directives.copy())
print 'Done.'
# ex:ts=4:et:
|
KentaYamada/Siphon | app/config.py | Python | mit | 1,835 | 0 | """
Siphon
config.py
Siphon app config
Author: Kenta Yamada
See configration options
Flask
https://flask.palletsprojects.com/en/1.1.x/config/#builtin-configuration-values
Flask-JWT-extended
https://flask-jwt-extended.readthedocs.io/en/latest/
"""
from os import environ
class BaseConfig:
# flask options
DEBUG = False
ENV = ''
JSON_AS_ASCII = False
JSON_SORT_KEYS = True
TESTING = False
SECRET_KEY = ''
# JWT options
JWT_BLACKLIST_ENABLED = True
JWT_SECRET_KEY = ''
JWT_BACKLIST_TOKEN_CHECKS = ['identity']
JWT_ACCESS_TOKEN_EXPIRES = False
JWT_REFRESH_TOKEN_EXPIRES = False
# database configs
DATABASE = {}
def __str__(self):
return 'app.config.{0}'.format(type(self).__name__)
class ProductionConfig(BaseConfig):
ENV = 'production'
JWT_SECRET_KEY = ''
class StagingConfig(BaseConfig):
ENV = 'production'
JWT_SECRET_KEY = ''
class TestConfig(BaseConfig):
ENV = 'test'
TESTING = True
TEST_ROOT_DIR = './app/tests'
DATABASE = {
'host': 'localhost',
| 'dbname': 'siphon_test',
'user': 'kenta',
'password': 'kenta'
}
JWT_BLACKLIST_ENABLED = False
JWT_SECRET_KEY = 'testing'
class DevelopmentConfig(BaseConfig):
DEBUG = True
ENV = 'development'
DATABASE = {
'host': 'localhost',
'dbname': 'siphon_dev',
'user': 'kenta',
'password': 'kenta'
}
JWT_BLACKLIST_ENABLED = True
JWT_SECRET_KEY = 'development'
d | ef get_config():
configs = {
'production': ProductionConfig(),
'test': TestConfig(),
'development': DevelopmentConfig()}
app_env = environ.get('APP_TYPE')
if app_env not in configs:
raise RuntimeError()
return configs[app_env]
|
eharney/nova | nova/tests/api/openstack/compute/contrib/test_extended_hypervisors.py | Python | apache-2.0 | 4,737 | 0 | # Copyright 2014 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova.api.openstack.compute.contrib import hypervisors
from nova.tests.api.openstack.compute.contrib import test_hypervisors
from nova.tests.api.openstack import fakes
class ExtendedHypervisorsTest(test_hypervisors.HypervisorsTest):
def setUp(self):
super(ExtendedHypervisorsTest, self).setUp()
self.ext_mgr.extensions['os-extended-hypervisors'] = True
self.controller = hypervisors.HypervisorsController(self.ext_mgr)
def test_view_hypervisor_detail_noservers(self):
result = self.controller._view_hypervisor(
test_hypervisors.TEST_HYPERS[0], True)
self.assertEqual(result, dict(
id=1,
hypervisor_hostname="hyper1",
vcpus=4,
memory_mb=10 * 1024,
local_gb=250,
vcpus_used=2,
memory_mb_used=5 * 1024,
local_gb_used=125,
hypervisor_type="xen",
hypervisor_version=3,
free_ram_mb=5 * 1024,
free_disk_gb=125,
current_workload=2,
running_vms=2,
cpu_info='cpu_info',
disk_available_least=100,
host_ip='1.1.1.1',
service=dict(id=1, host='compute1')))
def test_detail(self):
req = fakes.HTTPRequest.blank('/v2/fake/os-hypervisors/de | tail',
use_admin_context=True)
result = self.controller.detail(req)
self.assertEqual(result, dict(hypervisors=[
dict(id=1,
service=dict(id=1, host="compute1"),
vcpus=4,
memory_mb=10 * 1024,
local_gb=250,
vcpus_used=2,
| memory_mb_used=5 * 1024,
local_gb_used=125,
hypervisor_type="xen",
hypervisor_version=3,
hypervisor_hostname="hyper1",
free_ram_mb=5 * 1024,
free_disk_gb=125,
current_workload=2,
running_vms=2,
cpu_info='cpu_info',
disk_available_least=100,
host_ip='1.1.1.1'),
dict(id=2,
service=dict(id=2, host="compute2"),
vcpus=4,
memory_mb=10 * 1024,
local_gb=250,
vcpus_used=2,
memory_mb_used=5 * 1024,
local_gb_used=125,
hypervisor_type="xen",
hypervisor_version=3,
hypervisor_hostname="hyper2",
free_ram_mb=5 * 1024,
free_disk_gb=125,
current_workload=2,
running_vms=2,
cpu_info='cpu_info',
disk_available_least=100,
host_ip='2.2.2.2')]))
def test_show_withid(self):
req = fakes.HTTPRequest.blank('/v2/fake/os-hypervisors/1')
result = self.controller.show(req, '1')
self.assertEqual(result, dict(hypervisor=dict(
id=1,
service=dict(id=1, host="compute1"),
vcpus=4,
memory_mb=10 * 1024,
local_gb=250,
vcpus_used=2,
memory_mb_used=5 * 1024,
local_gb_used=125,
hypervisor_type="xen",
hypervisor_version=3,
hypervisor_hostname="hyper1",
free_ram_mb=5 * 1024,
free_disk_gb=125,
current_workload=2,
running_vms=2,
cpu_info='cpu_info',
disk_available_least=100,
host_ip='1.1.1.1')))
|
OregonWalks/qgis_vector_selectbypoint | vector_selectbypoint.py | Python | gpl-3.0 | 4,244 | 0.005184 | # -*- coding: utf-8 -*-
"""
/***************************************************************************
vector_selectbypoint
A QGIS plugin
Select vector features, point and click.
-------------------
begin : 2014-04-07
copyright : (C) 2014 by Brylie Oxley
email : brylie@geolibre.org
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
# Import the PyQt and QGIS libraries
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from qgis.core import *
from qgis.gui import *
# Initialize Qt resources from file resources.py
import resources_rc
# Import the code for the dialog
from vector_selectbypointdialog import vector_selectbypointDialog
import os.path
class vector_selectbypoint:
def __init__(self, iface):
# Save reference to the QGIS interface
self.iface = iface
# Reference map canvas
self.canvas = self.iface.mapCanvas()
# Emit QgsPoint after each click on canvas
self.clickTool = QgsMapToolEmitPoint(self.canvas)
|
# initialize plugin directory
self.plugin_dir = os.path.dirname(__file__)
# initialize locale
| locale = QSettings().value("locale/userLocale")[0:2]
localePath = os.path.join(self.plugin_dir, 'i18n', 'vector_selectbypoint_{}.qm'.format(locale))
if os.path.exists(localePath):
self.translator = QTranslator()
self.translator.load(localePath)
if qVersion() > '4.3.3':
QCoreApplication.installTranslator(self.translator)
# Create the dialog (after translation) and keep reference
self.dlg = vector_selectbypointDialog()
# Create the GUI
self.canvas.setMapTool( self.clickTool )
def initGui(self):
# Create action that will start plugin configuration
self.action = QAction(
QIcon(":/plugins/vector_selectbypoint/icon.png"),
u"Select by point and click.", self.iface.mainWindow())
# connect the action to the run method
self.action.triggered.connect(self.run)
# Add toolbar button and menu item
self.iface.addToolBarIcon(self.action)
self.iface.addPluginToMenu(u"&Select vector features by point and click.", self.action)
# Signal connections for mouse clicks
result = QObject.connect(self.clickTool, SIGNAL("canvasClicked(const QgsPoint &, Qt::MouseButton)"), self.handleMouseDown)
#QMessageBox.information( self.iface.mainWindow(), "Info", "connect = %s" %str(result) )
def handleMouseDown(self, point, button):
#QMessageBox.information( self.iface.mainWindow(), "Info", "X,Y = %s, %s" % (str(point.x()), str(point.y())) )
self.dlg.clearTextBrowser()
self.dlg.setTextBrowser( str(point.x()) + " , " + str(point.y()) )
def unload(self):
# Remove the plugin menu item and icon
self.iface.removePluginMenu(u"&Select vector features by point and click.", self.action)
self.iface.removeToolBarIcon(self.action)
# run method that performs all the real work
def run(self):
# Activate click tool
# show the dialog
self.dlg.show()
# Run the dialog event loop
result = self.dlg.exec_()
# See if OK was pressed
if result == 1:
# do something useful (delete the line containing pass and
# substitute with your code)
pass
|
NicovincX2/Python-3.5 | Géométrie/Fractales/arbre.py | Python | gpl-3.0 | 261 | 0.007663 | # -*- coding: utf-8 -*-
import os
from turtle import*
def T(l):
if l > | 4:
pensize(l / 6)
fd(l)
rt(33)
T(l * .7)
lt(66)
T(l * .7)
rt(33)
bk(l)
seth(90)
goto(0, -99)
T(99)
os. | system("pause")
|
chaosdorf/chaospizza | src/config/settings/base.py | Python | mit | 5,807 | 0.001033 | """
Django settings for web-application project.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import environ
# (chaosdorf-pizza/config/settings/base.py - 3 = chaosdorf-pizza/)
ROOT_DIR = environ.Path(__file__) - 3
# chaosdorf-pizza/chaospizza
APPS_DIR = ROOT_DIR.path('chaospizza')
# Load django configuration from environment variables
env = environ.Env()
# environ.Env.read_env(str(ROOT_DIR.path('.env')))
# DEBUG MODE
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/1.11/ref/settings/#debug
# Disable debug by default to prevent accidental usage
DEBUG = False
# APP CONFIGURATION
# https://docs.djangoproject.com/en/1.11/ref/settings/#installed-apps
# ------------------------------------------------------------------------------
DJANGO_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'whitenoise.runserver_nostatic',
'django.contrib.staticfiles',
]
THIRD_PARTY_APPS = [
'bootstrap3',
]
LOCAL_APPS = [
'chaospizza.menus.apps.MenusConfig',
'chaospizza.orders.apps.OrdersConfig',
]
INSTALLED_APPS = DJANGO_APPS + THIRD_PARTY_APPS + LOCAL_APPS
# MIDDLEWARE CONFIGURATION
# ------------------------------------------------------------------------------
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
# URL Configuration
# ------------------------------------------------------------------------------
ROOT_URLCONF = 'config.urls'
# WSGI Configuration
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/1.11/ref/settings/#wsgi-application
WSGI_APPLICATION = 'config.wsgi.application'
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
# ------------------------------------------------------------------------------
TIME_ZONE = 'UTC'
LANGUAGE_CODE = 'en-us'
USE_I18N = False
USE_L10N = False
USE_TZ = True
# DATABASE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {'default': env.db('DJANGO_DATABASE_URL')}
# EMAIL CONFIGURATION
# ------------------ | ------------------------------------------------------------
EMAIL_BACKEND = env('DJANGO_EMAIL_BACKEND', default='django.cor | e.mail.backends.smtp.EmailBackend')
EMAIL_SUBJECT_PREFIX = env('DJANGO_EMAIL_SUBJECT_PREFIX', default='[chaospizza]')
DEFAULT_FROM_EMAIL = env('DJANGO_DEFAULT_FROM_EMAIL', default='chaospizza <noreply@pizza.chaosdorf.de>')
SERVER_EMAIL = env('DJANGO_SERVER_EMAIL', default=DEFAULT_FROM_EMAIL)
# PASSWORD STORAGE SETTINGS
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/topics/auth/passwords/#using-argon2-with-django
PASSWORD_HASHERS = [
'django.contrib.auth.hashers.Argon2PasswordHasher',
'django.contrib.auth.hashers.PBKDF2PasswordHasher',
'django.contrib.auth.hashers.PBKDF2SHA1PasswordHasher',
'django.contrib.auth.hashers.BCryptSHA256PasswordHasher',
'django.contrib.auth.hashers.BCryptPasswordHasher',
]
# Password validation
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator'},
{'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator'},
{'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator'},
{'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator'},
]
# TEMPLATE CONFIGURATION
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/1.11/ref/settings/#templates
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'APP_DIRS': True,
'DIRS': [str(APPS_DIR.path('templates'))],
'OPTIONS': {
'context_processors': [
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.template.context_processors.tz',
'django.contrib.messages.context_processors.messages',
'chaospizza.orders.context_processors.user_session',
],
},
},
]
# STATIC FILE CONFIGURATION
# ------------------------------------------------------------------------------
# Use Whitenoise to serve static files
# See: https://whitenoise.readthedocs.io/
STATICFILES_DIRS = [str(APPS_DIR.path('static'))]
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
STATIC_ROOT = env('DJANGO_STATIC_ROOT', default=str(ROOT_DIR('staticfiles')))
STATIC_URL = env('DJANGO_STATIC_URL', default='/static/')
MIDDLEWARE += ['whitenoise.middleware.WhiteNoiseMiddleware']
|
dcf21/4most-4gp | src/pythonModules/fourgp_pipeline/fourgp_pipeline/pipeline.py | Python | mit | 6,115 | 0.003434 | # -*- coding: utf-8 -*-
"""
The `Pipeline` class represents a pipeline which runs a sequence of tasks for analysing spectra. By defining new
descendents of the PipelineTask class, and appending them to a Pipeline, it is
possible to configure which 4GP classes it uses to perform each task within the
pipeline -- e.g. determining RVs, or continuum normalising spectra.
"""
import logging
from .spectrum_analysis import SpectrumAnalysis
class Pipeline:
"""
Class which represents a pipeline for analysing spectra.
"""
def __init__(self):
"""
Base class representing a pipeline which runs a sequence of PipelineTasks in turn.
"""
self.task_names = {}
self.task_list = []
def append_task(self, task_name, task_implementation):
"""
Appends a new task to the end of the actions performed by this pipeline.
:param task_name:
The name of the task we are to carry out.
:type task_name:
str
:param task_implementation:
The descendent of the class PipelineTask which implements this task.
:return:
None
"""
assert task_name not in self.task_names, "Pipeline has multiple tasks with the name <{}>".format(task_name)
assert isinstance(task_implementation, PipelineTask), \
"A Pipeline task must be a descendent of the class PipelineTask."
self.task_names[task_name] = True
self.task_li | st.append({'name': task_name, 'implementation': task_implementation})
def analyse_spectrum(self, input_spectrum, spectrum_identifier):
"""
Analyse a spectrum through the | 4GP pipeline.
:param input_spectrum:
The Spectrum object we are to analyse.
:type input_spectrum:
Spectrum
:param spectrum_identifier:
Some string name that we can use in logging messages to identify which spectrum we are working on.
:type spectrum_identifier:
str
:return:
A SpectrumAnalysis object representing the analysis of this spectrum.
"""
logging.info("Working on spectrum <{}>".format(spectrum_identifier))
# Create a structure to hold the results from analysing this spectrum
spectrum_analysis = SpectrumAnalysis(input_spectrum=input_spectrum)
spectrum_analysis.store_result(task_name="initial_input",
output_spectrum=input_spectrum,
output_metadata=input_spectrum.metadata
)
# Run each task in turn. Stop running tasks if something breaks.
for task in self.task_list:
if not spectrum_analysis.failure:
task['implementation'].run_task(spectrum_analysis=spectrum_analysis)
# Return result
return spectrum_analysis
class PipelineTask:
"""
A class representing a task which needs to be performed by the pipeline, and exposing it via a standard calling API.
"""
def __init__(self, configuration=None):
"""
A class representing a task which needs to be performed by the pipeline, and exposing it via a standard
calling API.
:param configuration:
Optional dictionary, containing configuration parameters to pass to this task.
"""
logging.info("Initialising pipeline task <{}>".format(self.task_name()))
if configuration is None:
configuration = {}
self.configuration = configuration
@staticmethod
def task_name():
"""
All pipeline tasks must have a defined name.
:return:
string name
"""
raise NotImplementedError("Descendents of the class PipelineTask must define a name for themselves")
def run_task(self, spectrum_analysis):
"""
Run this pipeline task, as the next step in the analysis of a spectrum, whose analysis hitherto is summarised
in the structure spectrum_analysis.
:param spectrum_analysis:
The analysis hitherto of the spectrum we are to perform a pipeline task upon.
:type spectrum_analysis:
SpectrumAnalysis
:return:
None
"""
task_name = self.task_name()
# Fetch the most recent intermediate result from the analysis of this spectrum
input_spectrum = spectrum_analysis.intermediate_results[-1]
# Run this task on that intermediate result
try:
logging.info("Running task <{task_name}>".format(task_name=task_name))
result = self.task_implementation(
input_spectrum=input_spectrum,
spectrum_analysis=spectrum_analysis
)
# Store the output of this step
spectrum_analysis.store_result(
task_name=task_name,
output_spectrum=result['spectrum'],
output_metadata=result['metadata']
)
except PipelineFailure:
spectrum_analysis.report_failure(task_name=self.task_name())
def task_implementation(self, input_spectrum, spectrum_analysis):
"""
Desecent classes must insert code here to perform some task on the spectrum <input_spectrum>, or to analyse
the analysis done so far, as described in spectrum_analysis.
:param input_spectrum:
The latest intermediate result produced by this pipeline.
:type input_spectrum:
Spectrum
:param spectrum_analysis:
The complete analysis of this spectrum done so far by this pipeline.
:type spectrum_analysis:
SpectrumAnalysis
:return:
None
"""
raise NotImplementedError("The task implementation must be specified for each descendent of the "
"PipelineTask class")
class PipelineFailure(Exception):
"""
An exception that PipelineTasks should raise when shit happens.
"""
pass
|
Batterfii/tornado | tornado/platform/twisted.py | Python | apache-2.0 | 21,586 | 0.000278 | # Author: Ovidiu Predescu
# Date: July 2011
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the | License.
"""Bridges between the Twisted reactor and Tornado IOLoop.
This module lets you run applications and libraries written for
Twisted in a Tornado application. It can be used in two modes,
depending on which library's underlying event loop you want to use.
This module ha | s been tested with Twisted versions 11.0.0 and newer.
"""
from __future__ import absolute_import, division, print_function, with_statement
import datetime
import functools
import numbers
import socket
import sys
import twisted.internet.abstract
from twisted.internet.defer import Deferred
from twisted.internet.posixbase import PosixReactorBase
from twisted.internet.interfaces import \
IReactorFDSet, IDelayedCall, IReactorTime, IReadDescriptor, IWriteDescriptor
from twisted.python import failure, log
from twisted.internet import error
import twisted.names.cache
import twisted.names.client
import twisted.names.hosts
import twisted.names.resolve
from zope.interface import implementer
from tornado.concurrent import Future
from tornado.escape import utf8
from tornado import gen
import tornado.ioloop
from tornado.log import app_log
from tornado.netutil import Resolver
from tornado.stack_context import NullContext, wrap
from tornado.ioloop import IOLoop
from tornado.util import timedelta_to_seconds
@implementer(IDelayedCall)
class TornadoDelayedCall(object):
"""DelayedCall object for Tornado."""
def __init__(self, reactor, seconds, f, *args, **kw):
self._reactor = reactor
self._func = functools.partial(f, *args, **kw)
self._time = self._reactor.seconds() + seconds
self._timeout = self._reactor._io_loop.add_timeout(self._time,
self._called)
self._active = True
def _called(self):
self._active = False
self._reactor._removeDelayedCall(self)
try:
self._func()
except:
app_log.error("_called caught exception", exc_info=True)
def getTime(self):
return self._time
def cancel(self):
self._active = False
self._reactor._io_loop.remove_timeout(self._timeout)
self._reactor._removeDelayedCall(self)
def delay(self, seconds):
self._reactor._io_loop.remove_timeout(self._timeout)
self._time += seconds
self._timeout = self._reactor._io_loop.add_timeout(self._time,
self._called)
def reset(self, seconds):
self._reactor._io_loop.remove_timeout(self._timeout)
self._time = self._reactor.seconds() + seconds
self._timeout = self._reactor._io_loop.add_timeout(self._time,
self._called)
def active(self):
return self._active
@implementer(IReactorTime, IReactorFDSet)
class TornadoReactor(PosixReactorBase):
"""Twisted reactor built on the Tornado IOLoop.
`TornadoReactor` implements the Twisted reactor interface on top of
the Tornado IOLoop. To use it, simply call `install` at the beginning
of the application::
import tornado.platform.twisted
tornado.platform.twisted.install()
from twisted.internet import reactor
When the app is ready to start, call ``IOLoop.current().start()``
instead of ``reactor.run()``.
It is also possible to create a non-global reactor by calling
``tornado.platform.twisted.TornadoReactor(io_loop)``. However, if
the `.IOLoop` and reactor are to be short-lived (such as those used in
unit tests), additional cleanup may be required. Specifically, it is
recommended to call::
reactor.fireSystemEvent('shutdown')
reactor.disconnectAll()
before closing the `.IOLoop`.
.. versionchanged:: 4.1
The ``io_loop`` argument is deprecated.
"""
def __init__(self, io_loop=None):
if not io_loop:
io_loop = tornado.ioloop.IOLoop.current()
self._io_loop = io_loop
self._readers = {} # map of reader objects to fd
self._writers = {} # map of writer objects to fd
self._fds = {} # a map of fd to a (reader, writer) tuple
self._delayedCalls = {}
PosixReactorBase.__init__(self)
self.addSystemEventTrigger('during', 'shutdown', self.crash)
# IOLoop.start() bypasses some of the reactor initialization.
# Fire off the necessary events if they weren't already triggered
# by reactor.run().
def start_if_necessary():
if not self._started:
self.fireSystemEvent('startup')
self._io_loop.add_callback(start_if_necessary)
# IReactorTime
def seconds(self):
return self._io_loop.time()
def callLater(self, seconds, f, *args, **kw):
dc = TornadoDelayedCall(self, seconds, f, *args, **kw)
self._delayedCalls[dc] = True
return dc
def getDelayedCalls(self):
return [x for x in self._delayedCalls if x._active]
def _removeDelayedCall(self, dc):
if dc in self._delayedCalls:
del self._delayedCalls[dc]
# IReactorThreads
def callFromThread(self, f, *args, **kw):
assert callable(f), "%s is not callable" % f
with NullContext():
# This NullContext is mainly for an edge case when running
# TwistedIOLoop on top of a TornadoReactor.
# TwistedIOLoop.add_callback uses reactor.callFromThread and
# should not pick up additional StackContexts along the way.
self._io_loop.add_callback(f, *args, **kw)
# We don't need the waker code from the super class, Tornado uses
# its own waker.
def installWaker(self):
pass
def wakeUp(self):
pass
# IReactorFDSet
def _invoke_callback(self, fd, events):
if fd not in self._fds:
return
(reader, writer) = self._fds[fd]
if reader:
err = None
if reader.fileno() == -1:
err = error.ConnectionLost()
elif events & IOLoop.READ:
err = log.callWithLogger(reader, reader.doRead)
if err is None and events & IOLoop.ERROR:
err = error.ConnectionLost()
if err is not None:
self.removeReader(reader)
reader.readConnectionLost(failure.Failure(err))
if writer:
err = None
if writer.fileno() == -1:
err = error.ConnectionLost()
elif events & IOLoop.WRITE:
err = log.callWithLogger(writer, writer.doWrite)
if err is None and events & IOLoop.ERROR:
err = error.ConnectionLost()
if err is not None:
self.removeWriter(writer)
writer.writeConnectionLost(failure.Failure(err))
def addReader(self, reader):
if reader in self._readers:
# Don't add the reader if it's already there
return
fd = reader.fileno()
self._readers[reader] = fd
if fd in self._fds:
(_, writer) = self._fds[fd]
self._fds[fd] = (reader, writer)
if writer:
# We already registered this fd for write events,
# update it for read events as well.
self._io_loop.update_handler(fd, IOLoop.READ | IOLoop.WRITE)
else:
with NullContext():
self._fds[fd] = (reader, None)
self._io_loop.add_handler(fd, self._invoke_callback,
|
cartwheelweb/packaginator | apps/core/tests/__init__.py | Python | mit | 32 | 0.03125 | from core.t | ests.test_ga impo | rt * |
SKIRT/PTS | do/core/makewavemovie.py | Python | agpl-3.0 | 2,769 | 0.005058 | #!/usr/bin/env python
# -*- coding: utf8 -*-
# *****************************************************************
# ** PTS -- Python Toolkit for working with SKIRT **
# ** © Astronomical Observatory, Ghent University **
# *****************************************************************
## \package pts.do.core.makewavemovie Create a movie that runs through all wavelengths in the SKIRT simulation output.
#
# This script creates a movie for the output of each SKIRT simulation specified through the command line argument
# (see below). The movie combines the SEDs (bottom panel) and the pixel frames (top panel, from left to right)
# for up to three instruments, running through all wavelengths in the simulation. The movie is placed next to the
# original file(s) with a similar name (omitting the instrument name) but a different extension.
#
# The script expects the complete output of a SKIRT simulation to be present (including log file etc.).
# If there are no arguments, the script processes all simulation output sets residing in the current directory.
# If the first | argument contains a slash, the script processes all simulation output sets in the indicated directory.
# If the first argument does not contain a slash, the script processes just the simulation in the current directory
# with the indicated prefix.
#
# By default both axes of the SED plot and the luminosity of | the frames are autoscaled. You can hardcode specific
# ranges in the script.
# -----------------------------------------------------------------
# Import standard modules
import sys
# Import the relevant PTS classes and modules
from pts.core.simulation.simulation import createsimulations
from pts.core.plot.wavemovie import makewavemovie
# -----------------------------------------------------------------
# a value of None means that the axis is autoscaled;
# alternatively specify a range through a tuple with min and max values
xlim = None
ylim = None
#xlim = ( 5e-2, 1e3 )
#ylim = ( 1e-13, 1e-9 )
# the percentile values, in range [0,100], used to clip the luminosity values
# loaded from the fits files; the default values are 30 and 100 respectively
from_percentile = 30
to_percentile = 100
# -----------------------------------------------------------------
print "Starting makewavemovie..."
# get the command-line argument specifying the simulation(s)
argument = sys.argv[1] if len(sys.argv) > 1 else ""
# construct the list of simulation objects and make the movies
for simulation in createsimulations(argument):
makewavemovie(simulation, xlim=xlim, ylim=ylim, from_percentile=from_percentile, to_percentile=to_percentile)
print "Finished makewavemovie"
# -----------------------------------------------------------------
|
tunegoon/asteria | asteria/wsgi.py | Python | mit | 1,136 | 0.00088 | """
WSGI config for asteria project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here | , or combine a Django application with an application of another
framework.
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "asteria.settings")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication |
# application = HelloWorldApplication(application)
|
ebmdatalab/openprescribing | openprescribing/dmd/build_search_filters.py | Python | mit | 3,765 | 0.002125 | from django.db.models import fields, ForeignKey, ManyToOneRel, OneToOneRel
from .obj_types import clss
from .search_schema import schema as search_schema
def build_search_filters(cls):
"""Return list of dicts of options for a QueryBuilder filter.
See https://querybuilder.js.org/#filters for details.
"""
filters = [
_build_search_filter(cls, field_name)
for field_name in search_schema[cls.obj_type]["fields"]
]
return filters
def _build_search_filter(cls, field_name):
if field_name == "bnf_code":
return _build_search_filter_bnf_code_prefox()
field = cls._meta.get_field(field_name)
builder = {
ForeignKey: _build_search_filter_fk,
ManyToOneRel: _build_search_filter_rev_fk,
OneToOneRel: _build_search_filter_rev_fk,
fields.CharField: _build_search_filter_char,
fields.DateField: _build_search_filter_date,
fields.BooleanField: _build_search_filter_boolean,
fields.DecimalField: _build_search_filter_decimal,
}[type(field)]
search_filter = builder(field)
search_filter["id"] = field_name
return search_filter
def _build_search_filter_bnf_code_prefox():
return {
"id": "bnf_code",
"type": "string",
"label": "BNF code",
"operators": ["begins_with", "not_begins_with"],
"validation": {"min": 4},
}
def _build_search_filter_fk(field):
values = field.related_model.objects.values_list("cd", "descr").order_by("descr")
values = [{r[0]: r[1]} for r in values]
# The type is "string", even though the values are actually integers. This is
# because the QueryBuilder library calls parseInt on any values produced by a filter
# of type "integer" (see call to Utils.changeType in getRuleInputValue). It turns
# out that parseInt cannot actually parse integers larger than
# Number.MAX_SAFE_INTEGER, which is (2 ** 53) - 1, or 9007199254740991, and loses
# precision when it tries. This is a problem, because certain dm+d models have
# identifiers larger than Number.MAX_SAFE_INTEGER. Fortunately, Django is able to
# deal with query parameters for integer fields that are submitted as strings.
return {
"type": "string",
"label": field.help_text,
"input": "select",
"values": values,
"operators": ["equal"],
"plugin": "selectpicker",
"plugin_config": {"liveSearch": True, "liveSearchStyle": "contains"},
}
def _build_search_filter_rev_fk(field):
intermediate_model = field.related_model
fk_fields = [
f
for f in intermediate_model._meta.get_fields()
if (
isinstance(f, ForeignKey)
and f.related_model not in clss
and "prev" not in f.name
)
]
assert len(fk_fields) == 1
field = fk_fields[0]
return _build_search_fi | lter_fk(field)
def _build_search_filter_char(field):
return {
"type": "string",
"label": field.help_text,
"operators": ["contains"],
"validation": {"min": 3},
}
def _build_search_filter_date(field):
return {
"type": "date",
"label": field.help_text,
"operators": ["equal", "before", "after"],
"plugin": "datepicker",
"plugin_config": {"format": "yyyy-mm-dd"},
}
def _build_search_filter_boolean(field | ):
return {
"type": "boolean",
"label": field.help_text,
"input": "radio",
"values": [{1: "Yes"}, {0: "No"}],
"operators": ["equal"],
}
def _build_search_filter_decimal(field):
return {
"type": "double",
"label": field.help_text,
"operators": ["equal", "less than", "greater than"],
}
|
reinbach/django-machina | machina/apps/forum_permission/checker.py | Python | bsd-3-clause | 6,359 | 0.005032 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.contrib.auth import get_user_model
from django.db.models import Q
from machina.conf import settings as machina_settings
from machina.core.db.models import get_model
ForumPermission = get_model('forum_permission', 'ForumPermission')
GroupForumPermission = get_model('forum_permission', 'GroupForumPermission')
UserForumPermission = get_model('forum_permission', 'UserForumPermission')
class ForumPermissionChecker(object):
"""
The ForumPermissionChecker allows to check forum permissions
on Forum instances.
"""
def __init__(self, user):
self.user = user
self._forum_perms_cache = {}
def has_perm(self, perm, forum):
"""
Checks if the considered user has given permission for the passed forum.
"""
if not self.user.is_anonymous() and not self.user.is_active:
# An inactive user cannot have permissions
return False
elif self.user and self.user.is_superuser:
# The superuser have all permissions
return True
return perm in self.get_perms(forum)
def get_perms(self, forum):
"""
Returns the list of permission codenames of all permissions for the given forum.
"""
# An inactive user has no permissions
if not self.user.is_anonymous() and not self.user.is_active:
return []
user_model = get_user_model()
user_groups_related_name = user_model.groups.field.related_query_name()
if forum.id not in self._forum_perms_cache:
if self.user and self.user.is_superuser:
# The superuser has all the permissions
perms = list(ForumPermission.objects.values_list('codename', flat=True))
elif self.user:
default_auth_forum_perms = \
machina_settings.DEFAULT_AUTHENTICATED_USER_FORUM_PERMISSIONS
user_kwargs_filter = {'anonymous_user': True} if self.user.is_anonymous() \
else {'user': self.user}
# Fetches the permissions of the considered user for the given forum
user_perms = UserForumPermission.objects.select_related() \
.filter(**user_kwargs_filter) \
.filter(Q(forum__isnull=True) | Q(forum=forum))
# Computes the list of permissions that are granted for all the forums
globally_granted_user_perms = list(
filter(lambda p: p.has_perm and p.forum is None, user_perms))
globally_granted_user_perms = [
p.permission.codename for p in globally_granted_user_perms]
# Computes the list of permissions that are granted on a per-forum basis
per_forum_granted_user_perms = list(
filter(lambda p: p.has_perm and p.forum is not None, user_perms))
per_forum_granted_user_perms = [
p.permission.codename for p in per_forum_granted_user_perms]
# Computes the list of permissions that are not granted on a per-forum basis
per_forum_nongranted_user_perms = list(
filter(lambda p: not p.has_perm and p.forum is not None, user_perms))
per_forum_nongranted_user_perms = [
p.permission.codename for p in per_forum_nongranted_user_perms]
# If the considered user have no global permissions, the permissions defined by
# the DEFAULT_AUTHENTICATED_USER_FORUM_PERMISSIONS settings are used instead
if self.user.is_authenticated() and not globally_granted_user_perms:
globally_granted_user_perms = default_auth_forum_perms
# Finally computes the list of permission codenames that are granted to
# the user for the considered forum
granted_user_perms = [
c for c in globally_granted_user_perms if
c not in per_forum_nongranted_user_perms] + per_forum_granted_user_perms
granted_user_perms = set(granted_user_perms)
perms = granted_user_perms
# If the user is a registered user, we have to check the permissions
# of its groups in order to determine the additional permissions he could
# have
if not self.user.is_anonymous():
group_perms = GroupForumPermission.objects.select_related() \
.filter(**{'group__{}'.format(user_groups_related_name): self.user}) \
.filter(Q(forum__isnull=True) | Q(forum=forum))
globally_granted_group_perms = list(
filter(lambda p: p.has_perm and p.forum is None, group_perms))
globally_granted_group_perms = [
p.permission.codename for p in globally_granted_group_perms]
per_forum_granted_group_perms = list(
filter(lambda p: p.has_perm | and p.forum is not None, group_perms))
per_forum_granted_group_perms = [
p.permission.codename for p in per_forum_granted_group_perms]
per_forum_nongranted_group_perms = list(
filter(lambda p: not p.has_perm and p.forum is not None, group_perms))
per_forum_nongranted_g | roup_perms = [
p.permission.codename for p in per_forum_nongranted_group_perms]
granted_group_perms = [
c for c in globally_granted_group_perms if
c not in per_forum_nongranted_group_perms] + per_forum_granted_group_perms
granted_group_perms = filter(
lambda x: x not in per_forum_nongranted_user_perms, granted_group_perms)
granted_group_perms = set(granted_group_perms)
# Includes the permissions granted for the user' groups in the initial set of
# permission codenames
perms |= granted_group_perms
self._forum_perms_cache[forum.id] = perms
return self._forum_perms_cache[forum.id]
|
cloud9ers/gurumate | environment/share/doc/ipython/examples/lib/gui-tk.py | Python | lgpl-3.0 | 612 | 0.004902 | #!/usr/bin/env python
"""Si | mple Tk example to manually test event loop integration.
This is meant to run tests manually in ipython as:
In [5]: %gui tk
In [6]: %run gui-tk.py
"""
from Tkinter import *
class MyApp:
def __init__(self, root):
frame = Frame(root)
frame.pack()
self.button = Button(frame, text="Hello", command=self.hello_world)
self.button.pack(side=LEFT)
def hello_world(self):
print("Hello World!" | )
root = Tk()
app = MyApp(root)
try:
from IPython.lib.inputhook import enable_tk; enable_tk(root)
except ImportError:
root.mainloop()
|
meguiraun/mxcube3 | mxcube3/video/streaming.py | Python | gpl-2.0 | 7,123 | 0.001544 | # -*- coding: utf-8 -*-
"""Functions for video streaming."""
import cStringIO
import fcntl
import os
import signal
import struct
import subprocess
import sys
import time
import types
import json
from PIL import Image
import v | 4l2
VIDEO_DEVICE = None
VIDEO_STREAM_PROCESS = None
VIDEO_INITIALIZED = False
VIDEO_SIZE = "-1,-1"
VIDEO_RESTART = False
VIDEO_ORIGINAL_SIZE = 0,0
def open_video_device(path="/dev/video0" | ):
global VIDEO_DEVICE
if os.path.exists(path):
# binary, unbuffered write
device = open(path, "wb", 0)
VIDEO_DEVICE = device
else:
msg = "Cannot open video device %s, path do not exist. " % path
msg += "Make sure that the v4l2loopback kernel module is loaded (modprobe v4l2loopback). "
msg += "Falling back to MJPEG."
raise RuntimeError(msg)
return VIDEO_DEVICE
def initialize_video_device(pixel_format, width, height, channels):
f = v4l2.v4l2_format()
f.type = v4l2.V4L2_BUF_TYPE_VIDEO_OUTPUT
f.fmt.pix.pixelformat = pixel_format
f.fmt.pix.width = width
f.fmt.pix.height = height
f.fmt.pix.field = v4l2.V4L2_FIELD_NONE
f.fmt.pix.bytesperline = width * channels
f.fmt.pix.sizeimage = width * height * channels
f.fmt.pix.colorspace = v4l2.V4L2_COLORSPACE_SRGB
res = fcntl.ioctl(VIDEO_DEVICE, v4l2.VIDIOC_S_FMT, f)
if res != 0:
raise RuntimeError("Could not initialize video device: %d" % res)
return True
def set_video_size(width=-1, height=-1):
global VIDEO_SIZE
global VIDEO_RESTART
VIDEO_SIZE = "%s,%s" % (width, height)
VIDEO_RESTART = True
def video_size():
current_size = VIDEO_SIZE.split(",")
scale = float(current_size[0]) / VIDEO_ORIGINAL_SIZE[0]
return current_size + list((scale,))
def new_frame_received(img, width, height, *args, **kwargs):
"""
Executed when a new image is received, (new frame received callback).
"""
pixel_format = v4l2.V4L2_PIX_FMT_RGB24
channels = 3
global VIDEO_INITIALIZED
global VIDEO_STREAM_PROCESS
global VIDEO_RESTART
# Assume that we are getting a qimage if we are not getting a str,
# to be able to handle data sent by hardware objects used in MxCuBE 2.x
if not isinstance(img, str):
# 4 Channels with alpha
channels = 4
pixel_format = v4l2.V4L2_PIX_FMT_RGB32
rawdata = img.bits().asstring(img.numBytes())
img = rawdata
else:
# Is the image on JPEG format get the RGB data otherwise assume its
# already RGB and do nothing with the data
if img.startswith('\xff\xd8\xff\xe0\x00\x10JFIF'):
# jpeg image
strbuf = cStringIO.StringIO(img)
img = Image.open(strbuf)
img = img.tobytes()
if VIDEO_DEVICE:
if not VIDEO_INITIALIZED:
VIDEO_INITIALIZED = \
initialize_video_device(pixel_format, width, height, channels)
VIDEO_DEVICE.write(img)
if VIDEO_RESTART and VIDEO_STREAM_PROCESS:
os.system('pkill -TERM -P {pid}'.format(pid=VIDEO_STREAM_PROCESS.pid))
VIDEO_RESTART = False
VIDEO_STREAM_PROCESS = None
# start the streaming process if not started or restart if terminated
if not VIDEO_STREAM_PROCESS or VIDEO_STREAM_PROCESS.poll() is not None:
sfpath = os.path.join(os.path.dirname(__file__), "streaming_processes.py")
python_executable = os.sep.join(os.path.dirname(os.__file__).split(os.sep)[:-2]+["bin", "python"])
VIDEO_STREAM_PROCESS = subprocess.Popen([python_executable, sfpath, VIDEO_DEVICE.name, VIDEO_SIZE], close_fds=True)
def get_available_sizes(camera):
try:
w, h = camera.getWidth(), camera.getHeight()
# Some video decoders have difficulties to decode videos with odd image dimensions
# (JSMPEG beeing one of them) so we make sure that the size is even
w = w if w % 2 == 0 else w + 1
h = h if h % 2 == 0 else h + 1
# Calculate half the size and quarter of the size if MPEG streaming is used
# otherwise just return the orignal size.
if VIDEO_STREAM_PROCESS:
video_sizes = [(w, h), (w/2, h/2), (w/4, h/4)]
else:
video_sizes = [(w, h)]
except (ValueError, AttributeError):
video_sizes = []
return video_sizes
def set_initial_stream_size(camera, video_device_path):
global VIDEO_SIZE
global VIDEO_ORIGINAL_SIZE
w, h = camera.getWidth(), camera.getHeight()
w = w if w % 2 == 0 else w + 1
h = h if h % 2 == 0 else h + 1
VIDEO_ORIGINAL_SIZE = w, h
VIDEO_SIZE = "%s,%s" % VIDEO_ORIGINAL_SIZE
def tango_lima_video_plugin(camera, video_device):
"""
Configures video frame handling for TangoLimaVideo devices.
:param HardwareObject camera: Object providing frames to encode and stream
:param str video_device: Video loopback path
"""
if camera.__class__.__name__ == 'TangoLimaVideo':
# patch hardware object to set acquisition to the right mode
# and to get the right frames out of the video device
if camera.isReady():
camera.setLive(False)
camera.device.video_mode = "RGB24"
time.sleep(0.1)
camera.setLive(True)
def parse_image_data(self, img_data):
hfmt = ">IHHqiiHHHH"
hsize = struct.calcsize(hfmt)
_, _, img_mode, frame_number, width, height, _, _, _, _ = \
struct.unpack(hfmt, img_data[1][:hsize])
raw_data = img_data[1][hsize:]
return width, height, raw_data
def do_polling(self, sleep_time):
hfmt = ">IHHqiiHHHH"
hsize = struct.calcsize(hfmt)
while True:
width, height, raw_data = \
self.parse_image_data(self.device.video_last_image)
self.emit("imageReceived", raw_data, width, height, False)
time.sleep(sleep_time)
def take_snapshot(self, path, bw=False):
width, height, raw_data = \
self.parse_image_data(self.device.video_last_image)
img = Image.frombytes("RGB", (width, height), raw_data)
if bw:
img.convert("1")
img.save(path)
camera._do_polling = types.MethodType(do_polling, camera)
camera.takeSnapshot = types.MethodType(take_snapshot, camera)
camera.parse_image_data = types.MethodType(parse_image_data, camera)
def init(camera, video_device_path):
"""
Initialize video loopback device.
:param HardwareObject camera: Object providing frames to encode and stream
:param str video_device_path: Video loopback path
"""
set_initial_stream_size(camera, video_device_path)
tango_lima_video_plugin(camera, video_device_path)
video_device = open_video_device(video_device_path)
camera.connect("imageReceived", new_frame_received)
return video_device
|
TE-ToshiakiTanaka/stve | project/col/client.py | Python | mit | 829 | 0.018094 | import websocket
import thread
import time
import cv2
from StringIO import S | tringIO
import base64
from PIL import Image
import numpy as np
import time
def on_message(ws, message):
img = base64.b64decode(message)
print img
def on_error(ws, | error):
print error
def on_close(ws):
print "### closed ###"
cv2.destroyAllWindows()
def on_open(ws):
ws.send('1920x1080/0')
if __name__ == "__main__":
cv2.namedWindow("img", cv2.WINDOW_NORMAL)
websocket.enableTrace(True)
ws = websocket.WebSocketApp("ws://localhost:9002/minicap",
on_message = on_message,
on_error = on_error,
on_close = on_close,
subprotocols=["binary", "base64"])
ws.on_open = on_open
ws.run_forever()
|
kupiakos/pybcd | elements.py | Python | mit | 10,086 | 0.00694 |
import struct
from common import *
from objects import ObjectAppType
from bcddevice import BCDDevice
# element types:
# X X ???? XX
# class format subtype
# class:
# 1 = Library
# 2 = Application
# 3 = Device
# format:
# 0 = Unknown
# 1 = Device
# 2 = String
# 3 = Object
# 4 = Object List
# 5 = Integer
# 6 = Boolean
# 7 = IntegerList
ElementClass = enum(Library=0x1,
Application=0x2,
Device=0x3,
Hidden=0x4)
ElementFormat = enum(Unknown=0,
Device=1,
String=2,
Object=3,
ObjectList=4,
Integer=5,
Boolean=6,
IntegerList=7)
# based on both my personal findings and on this website:
# http://www.geoffchappell.com/notes/windows/boot/bcd/elements.htm?tx=5
_library = {
0x01: (1, 'device'),
0x02: (2, 'path'),
0x04: (2, 'description'),
0x05: (2, 'locale'),
0x06: (4, 'inherit'),
0x07: (5, 'truncatememory'),
0x08: (4, 'recoverysequence'),
0x09: (6, 'recoveryenabled'),
0x0A: (7, 'badmemorylist'),
0x0B: (6, 'badmemoryaccess'),
0x0C: (5, 'firstmegabytepolicy', enum('UseNone','UseAll','UsePrivate')),
0x0D: (5, 'relocatephysical'),
0x0E: (5, 'avoidlowmemory'),
0x0F: (6, 'traditionalksegmappings'),
0x10: (6, 'bootdebug'),
0x11: (5, 'debugtype', enum('Serial','1394','USB')),
0x12: (5, 'debugaddress'),
0x13: (5, 'debugport'),
0x14: (5, 'baudrate'),
0x15: (5, 'channel'),
0x16: (2, 'targetname'),
0x17: (6, 'noumex'),
0x18: (5, 'debugstart', enum('Active', 'AutoEnable', 'Disable')),
0x19: (2, 'busparams'),
0x20: (6, 'bootems'),
0x22: (5, 'emsport'),
0x23: (5, 'emsbaudrate'),
0x30: (2, 'loadoptions'),
0x31: (6, 'attemptnonbcdstart'),
0x40: (6, 'advancedoptions'),
0x41: (6, 'optionsedit'),
0x42: (5, 'keyringaddress'),
# no alias
0x43: (1, 'bootstatusdatalogdevice'),
# no alias
0x44: (2, 'bootstatusdatalogfile'),
# no alias
0x45: (6, 'bootstatusdatalogappend'),
0x46: (6, 'graphicsmodedisabled'),
0x47: (5, 'configaccesspolicy', enum('Default', 'DisallowMmConfig')),
0x48: (6, 'nointegritychecks'),
0x49: (6, 'testsigning'),
0x4A: (2, 'fontpath'),
# seems to be wrong in the table?
0x4B: (5, 'integrityservices'),
0x50: (6, 'extendedinput'),
0x51: (5, 'initialconsoleinput'),
# not in table
0x60: (6, 'isolatedcontext'),
# not in table
0x65: (5, 'displaymessage', enum('Default','Resume','HyperV', 'Recovery','StartupRepair', 'SystemImageRecovery','CommandPrompt', 'SystemRestore', 'PushButtonReset')),
# not in table
0x77: (7, 'allowedinmemorysettings'),
}
_bootmgr = {
0x01: (4, 'displayorder'),
0x02: (4, 'bootsequence'),
0x03: (3, 'default'),
0x04: (5, 'timeout'),
0x05: (6, 'resume'),
0x06: (3, 'resumeobject'),
0x10: (4, 'toolsdisplayorder'),
0x20: (6, 'displaybootmenu'),
0x21: (6, 'noerrordisplay'),
0x22: (1, 'bcddevice'),
0x23: (2, 'bcdfilepath'),
0x30: (7, 'customactions'),
}
_osloader = {
0x001: (1, 'osdevice'),
0x002: (2, 'systemroot'),
0x003: (3, 'resumeobject'),
0x004: (6, 'stampdisks'),
0x010: (6, 'detecthal'),
0x011: (2, 'kernel'),
0x012: (2, 'hal'),
0x013: (2, 'dbgtransport'),
0x020: (5, 'nx', enum('OptIn', 'OptOut', 'AlwaysOff', 'AlwaysOn')),
0x021: (5, 'pae', enum('Default', 'ForceEnable', 'ForceDisable')),
0x022: (6, 'winpe'),
0x024: (6, 'nocrashautoreboot'),
0x025: (6, 'lastknowngood'),
0x026: (6, 'oslnointegritychecks'),
0x027: (6, 'osltestsigning'),
0x030: (6, 'nolowmem'),
0x031: (5, 'removememory'),
0x032: (5, 'increaseuserva'),
0x033: (5, 'perfmem'),
0x040: (6, 'vga'),
0x041: (6, 'quietboot'),
0x042: (6, 'novesa'),
0x050: (5, 'clustermodeaddressing'),
0x051: (6, 'usephysicaldestination'),
0x052: (5, 'restrictapiccluster'),
0x053: (2, 'evstore'),
0x054: (6, 'uselegacyapicmode'),
0x060: (6, 'onecpu'),
0x061: (5, 'numproc'),
0x062: (6, 'maxproc'),
0x063: (5, 'configflags'),
0x064: (6, 'maxgroup'),
0x065: (6, 'groupaware'),
0x066: (5, 'groupsize'),
0x070: (6, 'usefirmwarepcisettings'),
0x071: (5, 'msi', enum('Default', 'ForceDisable')),
0x072: (5, 'pciexpress', enum('Default', 'ForceDisable')),
0x080: (5, 'safeboot', enum('Minimal', 'Network', 'DsRepair')),
0x081: (6, 'safebootalternateshell'),
0x090: (6, 'bootlog'),
0x091: (6, 'sos'),
0x0A0: (6, 'debug'),
0x0A1: (6, 'halbreakpoint'),
0x0A2: (6, 'useplatformclock'),
0x0B0: (6, 'ems'),
# no alias
0x0C0: (5, 'forcefailure', enum('Load', 'Hive', 'Acpi', 'General')),
0x0C1: (5, 'driverloadfailurepolicy', enum('Fatal', 'UseErrorControl')),
# not in table
0x0C2: (5, 'bootmenupolicy', enum('TODO0', 'Standard', 'TODO2', 'TODO3')),
0x0E0: (5, 'bootstatuspolicy', enum('DisplayAllFailures', 'IgnoreAllFailures', 'IgnoreShutdownFailures', 'IgnoreBootFailures')),
0x0F0: (5, 'hypervisorlaunchtype', enum('Off', 'Auto')),
0x0F1: (2, 'hypervisorpath'),
0x0F2: (6, 'hypervisordebug'),
0x0F3: (5, 'hypervisordebugtype', enum('Serial', '1394')),
0x0F4: (5, 'hypervisordebugport'),
0x0F5: (5, 'hypervisorbaudrate'),
0x0F6: (5, 'hypervisorchannel'),
# not a lot known
0x0F7: (5, 'bootuxpolicy'),
0x0F8: (6, 'hypervisordisableslat'),
0x100: (5, 'tpmbootentropy', enum('Default', 'ForceDisable', 'ForceEnable')),
0x120: (5, 'xsavepolicy'),
0x121: (5, 'xsaveaddfeature0'),
0x122: (5, 'xsaveaddfeature1'),
0x123: (5, 'xsaveaddfeature2'),
0x124: (5, 'xsaveaddfeature3'),
0x125: (5, 'xsaveaddfeature4'),
0x126: (5, 'xsaveaddfeature5'),
0x127: (5, 'xsaveaddfeature6'),
0x128: (5, 'xsaveaddfeature7'),
0x129: (5, 'xsaveremovefeature'),
0x12A: (5, 'xsaveprocessorsmask'),
0x12B: (5, 'xsavedisable'),
}
_resume = {
0x01: (1, 'filedevice'),
0x02: (2, 'filepath'),
0x03: (6, 'customsettings'),
0x04: (6, 'pae'),
0x05: (1, 'associatedosdevice'),
0x06: (6, 'debugoptionenabled'),
0x07: (5, 'bootux', enu | m('Disabled', 'Basic', 'Standard')),
# not in table
0x08: (5, 'bootmenupolicy', enum('TODO0', 'Standard', 'TODO2', 'TODO3')),
}
_memdiag = {
0x01: (5, 'passcount'),
0x02: (5, 'testmix', enum('Basic', 'Extended')),
0x03: (5, 'failurecount'),
0x04: (5, 'testtofail', enum('Stride', 'Mats', 'InverseCoupling', 'Ran | domPattern', 'Checkerboard')),
0x05: (6, 'cacheenable'),
}
_ntldr = {
0x01: (2, 'bpbstring'),
}
_startup = {
0x01: (6, 'pxesoftreboot'),
0x02: (2, 'applicationname'),
}
_device = {
0x01: (5, 'ramdiskimageoffset'),
0x02: (5, 'ramdiskftpclientport'),
0x03: (1, 'ramdisksdidevice'),
0x04: (2, 'ramdisksdipath'),
0x05: (5, 'ramdiskimagelength'),
0x06: (6, 'exportascd'),
0x07: (5, 'ramdisktftpblocksize'),
0x08: (5, 'ramdisktftpwindowsize'),
0x09: (6, 'ramdiskmcenabled'),
0x0A: (6, 'ramdiskmctftpfallback'),
}
# All of these are hidden during a bcdedit /enum all command
# I design good software, so I'll show it even if bcdedit doesn't.
_setup = {
0x01: (1, 'devicetype'),
0x02: (2, 'applicationrelativepath'),
0x03: (2, 'ramdiskdevicerelativepath'),
0x04: (6, 'omitosloaderelements'),
0x10: (6, 'recoveryos'),
}
alias_dict = {
# applies to all object types
ElementClass.Library: _library,
# these depend on the application
ElementClass.Application: {
#objectapptype
0: {},
ObjectAppType.FirmwareMgr: _bootmgr,
ObjectAppType.WinBootMgr: _bootmgr,
ObjectAppType.WinBootLdr: _osloader,
ObjectAppType.WinResume: _resume,
ObjectAppType.WinMemTest: _memdiag,
ObjectAppType.Ntldr: _ntldr,
ObjectAppType.Setupldr: _ntldr,
ObjectAppType.BootSect: {},
ObjectAppType.Startup: _startup,
},
# only works for devices
E |
9h37/pompadour-wiki | pompadour_wiki/pompadour_wiki/apps/utils/git_db.py | Python | mit | 10,263 | 0.001754 | # -*- coding: utf-8 -*-
from django.utils.translation import ugettext
from django.utils import simplejson as json
from django.conf import settings
from StringIO import StringIO
from gitdb import IStream
from git import *
from git.exc import InvalidGitRepositoryError
from collections import defaultdict
from datetime import datetime
import os
_hook = """#!/bin/sh
cd ..
env -i git reset --hard > /dev/null 2>&1
env -i git update-index > /dev/null 2>&1
"""
def _do_commit(repo, path, content, commit_msg=None):
""" Do a commit """
# Create the blob object
stream = StringIO(content.encode('utf-8'))
stream.seek(0, 2)
streamlen = stream.tell()
stream.seek(0)
istream = IStream('blob', streamlen, stream)
# Add it to the repository object database
repo.odb.store(istream)
# Create the corresponding Blob object
blob = Blob(repo, istream.binsha, Blob.file_mode, path.encode('utf-8'))
# Add blob to the index
repo.index.add([IndexEntry.from_blob(blob)])
if not commit_msg:
commit_msg = ugettext(u'Update Wiki: {0}').format(path).encode('utf-8')
repo.index.commit(commit_msg)
class Repository(object):
""" Repository object. """
@classmethod
def new(cls, gitdir):
""" Initialize a repository and create the root commit """
# Create repository
if os.path.exists(gitdir.encode('utf-8')):
return cls(gitdir)
repo = Repo.init(gitdir.encode('utf-8'))
repo.config_writer().set_value('receive', 'denyCurrentBranch', 'ignore')
# Create hook to automatically update when we receive commits from clients
post_receive = os.path.join(gitdir, '.git', 'hooks', 'post-receive')
with open(post_receive, 'w') as f:
f.write(_hook)
os.chmod(post_receive, 0775)
# Create the initial commit
_do_commit(repo, u'{0}.md'.format(settings.WIKI_INDEX), '# Home', commit_msg=ugettext(u'Initialize'))
return cls(gitdir)
def __init__(self, gitdir):
""" Initialize repository. """
self.repo = Repo(gitdir.encode('utf-8'))
self.gitdir = gitdir
self.parse()
@property
def git(self):
return self.repo.git
@property
def head(self):
return self.repo.head
def parse(self):
""" Parse Tree and Blob objects. """
# Do git reset --hard and git update-index
self.repo.head.reset(index=True, working_tree=True)
self.repo.git.update_index()
self.repo_tree = self.repo.tree()
self.entries = [e for e in self.repo_tree.traverse()]
self.blobs = [b for b in self.entries if isinstance(b, Blob)]
self.trees = [self.repo_tree] + [t for t in self.entries if isinstance(t, Tree)]
def exists(self, path):
""" Check if path exists in repository. """
if path == self.repo_tree.path:
return True
for e in self.entries:
if path == e.path:
return True
return False
def is_dir(self, path):
""" Check if path is a directory. """
for t in self.trees:
if path == t.path:
return True
return False
def get_file_mimetype(self, path):
""" Get mimetype of file stored in ``path``. """
if self.is_dir(path):
return 'inode/directory'
for blob in self.blobs:
if blob.path == path:
return blob.mime_type
def set_content(self, path, content, commit_msg=None):
""" Add new content in ``path``. """
_do_commit(self.repo, path, content, commit_msg)
# Update internal informations
self.parse()
def put_uploaded_file(self, path, ufile, commit_msg=None):
""" Put an uploaded file to the repository. """
# Re-parse to be sure
self.parse()
# Get absolute path to the file
abspath = os.path.join(self.gitdir, path)
# Make directory for the file
try:
os.makedirs(os.path.dirname(abspath))
except OSError:
pass
# Write the file
with open(abspath, 'wb') as f:
for chunk in ufile.chunks():
f.write(chunk)
# Add it to the repository
import sys
print >>sys.stderr, type(path), path
self.repo.index.add([path.encode('utf-8')])
# And commit
if not commit_msg:
commit_msg = ugettext(u'Upload document: {0}').format(path).encode('utf-8')
self.repo.index.commit(commit_msg)
# Update internal informations
self.parse()
def get_content(self, path):
""" Get content of file stored in ``path``. """
for blob in self.blobs:
if blob.path == path:
return blob.data_stream.read(), blob.name, blob.mime_type
def rm_content(self, path):
""" Remove file located at ``path``. | """
self.repo.index.remove([path.encode('utf-8' | )])
self.repo.index.commit(ugettext(u'Update Wiki: {0} deleted'.format(path)).encode('utf-8'))
self.parse()
def commit(self, message):
""" Create an empty commit """
c = Commit.create_from_tree(self.repo, self.repo.tree(), message, head=True)
def get_folder_tree(self, path):
""" Get list of files contained in ``path``. """
for tree in self.trees:
if tree.path == path:
ret = []
ret = ret + [{'path': b.path, 'name': b.name, 'type': b.mime_type} for b in tree.blobs]
ret = ret + [{'path': t.path, 'name': t.name, 'type': 'inode/directory'} for t in tree.trees]
return ret
def get_file_history(self, path):
""" Get history for a file """
return [self.repo.commit(line.split(' ', 1)[0]) for line in self.repo.git.log('--pretty=oneline', '--', path.encode('utf-8')).splitlines()]
def get_history(self, limit=None):
""" Get repository's history """
if limit:
return [self.repo.commit(line.split(' ', 1)[0]) for line in self.repo.git.log('--pretty=oneline', '-{0}'.format(limit)).splitlines()]
return [self.repo.commit(line.split(' ', 1)[0]) for line in self.repo.git.log('--pretty=oneline').splitlines()]
def get_file_diffs(self, path):
""" Get diffs for a file """
diffs = {'diffs': []}
if self.exists(path):
commits = self.get_file_history(path)
for c in commits:
diff = {
'msg': c.message,
'date': datetime.fromtimestamp(c.authored_date),
'author': c.author.name,
'sha': c.hexsha,
'path': path,
}
if c.parents:
diff['parent_sha'] = c.parents[0].hexsha
diffs['diffs'].append(diff)
return diffs
def get_diffs(self, limit=None):
""" Return repository's diffs. """
commits = self.get_history(limit=limit)
diffs = {'diffs': []}
for c in commits:
diff = {
'msg': c.message,
'date': datetime.fromtimestamp(c.authored_date),
'author': c.author.name,
'sha': c.hexsha
}
if c.parents:
diff['parent_sha'] = c.parents[0].hexsha
diffs['diffs'].append(diff)
return diffs
def get_tree(self):
""" Get full tree of repository as json. """
ret = {'node': {
'name': '/',
'path': '/',
'type': 'tree',
'children': []
}}
# Get all paths from the repository
for e in self.entries:
spath = e.path.split('/')
# We do not want the __media__ in our tree
if spath[0] == '__media__':
continue
node = ret['node']
# Build tree before inserting node
for d in spath[:-1]:
new_node = {'node': {
'name': d,
|
tensorflow/adanet | adanet/core/ensemble_builder_test.py | Python | apache-2.0 | 31,597 | 0.00557 | """Test AdaNet ensemble single graph implementation.
Copyright 2018 The AdaNet Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
from absl.testing import parameterized
from adanet import tf_compat
from adanet.core.ensemble_builder import _EnsembleBuilder
from adanet.core.ensemble_builder import _SubnetworkManager
from adanet.core.summary import Summary
import adanet.core.testing_utils as tu
from adanet.ensemble import Candidate as EnsembleCandidate
from adanet.ensemble import ComplexityRegularizedEnsembler
from adanet.ensemble import MeanEnsemble
from adanet.ensemble import MeanEnsembler
from adanet.ensemble import MixtureWeightType
from adanet.subnetwork import Builder
from adanet.subnetwork import Subnetwork
import tensorflow.compat.v1 as tf_v1
import tensorflow.compat.v2 as tf
# pylint: disable=g-direct-tensorflow-import
from tensorflow.python.eager import context
from tensorflow.python.framework import test_util
from tensorflow.python.training import training as train
from tensorflow.python.training import training_util
# pylint: enable=g-direct-tensorflow-import
from tensorflow_estimator.python.estimator.head import binary_class_head
from tensorflow_estimator.python.estimator.head import multi_head as multi_head_lib
class _Builder(Builder):
def __init__(self,
subnetwork_train_op_fn,
mixture_weights_train_op_fn,
use_logits_last_layer,
seed=42,
multi_head=False):
self._subnetwork_train_op_fn = subnetwork_train_op_fn
self._mixture_weights_train_op_fn = mixture_weights_train_op_fn
self._use_logits_last_layer = use_logits_last_layer
self._seed = seed
self._multi_head = multi_head
@property
def name(self):
return "test"
def build_subnetwork(self,
features,
logits_dimension,
training,
iteration_step,
summary,
previous_ensemble=None):
assert features is not None
assert training is not None
assert iteration_step is not None
assert summary is not None
# Trainable variables collection should always be empty when
# build_subnetwork is called.
assert not tf_compat.v1.get_collection(
tf_compat.v1.GraphKeys.TRAINABLE_VARIABLES)
# Subnetworks get iteration steps instead of global steps.
step_name = "subnetwork_test/iteration_step"
assert step_name == tf_compat.tensor_name(
tf_compat.v1.train.get_global_step())
assert step_name == tf_compat.tensor_name(train.get_global_step())
assert step_name == tf_compat.tensor_name(training_util.get_global_step())
assert step_name == tf_compat.tensor_name(tf_v1.train.get_global_step())
assert step_name == tf_compat.tensor_name(
tf_compat.v1.train.get_or_create_global_step())
assert step_name == tf_compat.tensor_name(train.get_or_create_global_step())
assert step_name == tf_compat.tensor_name(
training_util.get_or_create_global_step())
assert step_name == tf_compat.tensor_name(
tf_v1.train.get_or_create_global_step())
# Subnetworks get scoped summaries.
assert "fake_scalar" == tf_compat.v1.summary.scalar("scalar", 1.)
assert "fake_image" == tf_compat.v1.summary.image("image", 1.)
assert "fake_histogram" == tf_compat.v1.summary.histogram("histogram", 1.)
assert "fake_audio" == tf_compat.v1.summary.audio("audio", 1., 1.)
last_layer = tu.dummy_tensor(shape=(2, 3))
def logits_fn(logits_dim):
return tf_compat.v1.layers.dense(
last_layer,
units=logits_dim,
kernel_initializer=tf_compat.v1.glorot_uniform_initializer(
seed=self._seed))
if self._multi_head:
logits = {
"head1": logits_fn(logits_dimension / 2),
"head2": logits_fn(logits_dimension / 2)
}
last_layer = {"head1": last_layer, "head2": last_layer}
else:
logits = logits_fn(logits_dimension)
return Subnetwork(
last_layer=logits if self._use_logits_last_layer else last_layer,
logits=logits,
complexity=2,
persisted_tensors={})
def build_subnetwork_train_op(self, subnetwork, loss, var_list, labels,
iteration_step, summary, previous_ensemble):
assert iteration_step is not None
assert summary is not None
return self._subnetwork_train_op_fn(loss, var_list)
def build_mixture_weights_train_op(self, loss, var_list, logits, labels,
iteration_step, summary):
assert iteration_step is not None
asser | t summary is not None
return self._mixture_weights_train_op_fn(loss, var_list)
class _BuilderPrunerAll(_Builder):
"""Removed previous ensemble completely."""
def prune_previous_ensemble(self, previous_ensemble):
return []
class _BuilderPrunerLeaveOne(_Builder):
"""Removed previous ensemble completely."""
def prune_previous_ensemble(self, previous_ensemble):
if previous_ensemble:
return [0]
return []
class _FakeSummary(Summary):
"""A fake adanet.Summary."" | "
def scalar(self, name, tensor, family=None):
return "fake_scalar"
def image(self, name, tensor, max_outputs=3, family=None):
return "fake_image"
def histogram(self, name, values, family=None):
return "fake_histogram"
def audio(self, name, tensor, sample_rate, max_outputs=3, family=None):
return "fake_audio"
@contextlib.contextmanager
def current_scope(self):
yield
class EnsembleBuilderTest(tu.AdanetTestCase):
@parameterized.named_parameters(
{
"testcase_name": "no_previous_ensemble",
"want_logits": [[.016], [.117]],
"want_loss": 1.338,
"want_adanet_loss": 1.338,
"want_ensemble_trainable_vars": 1,
}, {
"testcase_name": "mean_ensembler",
"want_logits": [[.621], [.979]],
"want_loss": 1.3702,
"want_adanet_loss": 1.3702,
"want_ensemble_trainable_vars": 0,
"ensembler_class": MeanEnsembler,
"want_predictions": {
MeanEnsemble.MEAN_LAST_LAYER: [[-0.2807, -0.1377, -0.6763],
[0.0245, -0.8935, -0.8284]],
}
}, {
"testcase_name": "no_previous_ensemble_prune_all",
"want_logits": [[.016], [.117]],
"want_loss": 1.338,
"want_adanet_loss": 1.338,
"want_ensemble_trainable_vars": 1,
"subnetwork_builder_class": _BuilderPrunerAll
}, {
"testcase_name": "no_previous_ensemble_prune_leave_one",
"want_logits": [[.016], [.117]],
"want_loss": 1.338,
"want_adanet_loss": 1.338,
"want_ensemble_trainable_vars": 1,
"subnetwork_builder_class": _BuilderPrunerLeaveOne
}, {
"testcase_name": "default_mixture_weight_initializer_scalar",
"mixture_weight_initializer": None,
"mixture_weight_type": MixtureWeightType.SCALAR,
"use_logits_last_layer": True,
"want_logits": [[.580], [.914]],
"want_loss": 1.362,
"want_adanet_loss": 1.362,
"want_ensemble_trainable_vars": 1,
}, {
"testcase_name": "default_mixture_weight_initializer_vector",
"mixture_weight_initializer": None,
"mixture_weight_type": MixtureWeightType.VECTOR,
"use_logits_last_layer": True,
"want_logits": [[.580], [.914]] |
z/github-loc | githubloc/config.py | Python | mit | 345 | 0 | import os
import githubloc.util as util
config_file = '.githubloc.ini'
home = os.path.expanduser('~')
config_file_with_path = os.path.join(home, config_file)
util.check_ | if_not_create(config_ | file_with_path, 'config/githubloc.ini')
config = util.parse_config(config_file_with_path)
conf = {
'token': os.path.expanduser(config['token']),
}
|
dinomite/uaParser | uaParser/test/test_user_agent_parser.py | Python | apache-2.0 | 6,889 | 0.001887 | #!/usr/bin/python2.5
#
# Copyright 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the 'License')
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""User Agent Parser Unit Tests."""
__author__ = 'slamm@google.com (Stephen Lamm)'
import unittest
from uaParser.lib import user_agent_parser
CHROME_UA_STRING = (
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/530.1 '
'(KHTML, like Gecko) Chrome/2.0.169.1 Safari/530.1')
TEST_STRINGS = (
# ((family, v1, v2, v3), user_agent_string)
#(('', '', '', '').
# '', {}),
(('RockMelt', '0', '8', '34'),
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/534.3 '
'(KHTML, like Gecko) RockMelt/0.8.34.841 Chrome/6.0.472.63 '
'Safari/534.3,gzip(gfe),gzip(gfe)', {}),
(('Firefox Beta', '4', '0', 'b4'),
'Mozilla/5.0 (X11; Linux i686 (x86_64); rv:2.0b4) Gecko/20100818 '
'Firefox/4.0b4', {}),
(('Firefox', '3', '6', '12'),
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.12) '
'Gecko/20101027 Ubuntu/10.04 (lucid) Firefox/3.6.12', {}),
(('Firefox (Shiretoko)', '3', '5', '1pre'),
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.1.1pre) '
'Gecko/20090717 Ubuntu/9.04 (jaunty) Shiretoko/3.5.1pre', {}),
(('Firefox Beta', '4', '0', 'b8pre'),
'Mozilla/5.0 (X11; Linux x86_64; rv:2.0b8pre) Gecko/20101031 '
'Firefox-4.0/4.0b8pre', {}),
(('Konqueror', '4', '3', '1'),
'Mozilla/5.0 (X11; U; Linux; de-DE) AppleWebKit/527 '
'(KHTML, like Gecko, Safari/419.3) konqueror/4.3.1,gzip(gfe)', {}),
(('Other', None, None, None),
'SomethingWeNeverKnewExisted', {}),
(('Chrome Frame (Sleipnir 2)', '2', '0', '169'),
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; '
'chromeframe; .NET CLR 2.0.50727; .NET CLR 3.0.4506.2152; .NET CLR '
'3.5.30729; Sleipnir 2.8.5),gzip(gfe),gzip(gfe)',
{'js_user_agent_string': CHROME_UA_STRING}),
(('Chrome Frame (IE 8)', '2', '0', '169'),
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; '
'chromeframe; SLCC1; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR '
'3.0.30729),gzip(gfe),gzip(gfe)',
{'js_user_agent_string': CHROME_UA_STRING}),
# Chrome Frame installed but not enabled
(('IE', '8', '0', None),
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; GTB6; '
'chromeframe; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR '
'3.0.04506.648; .NET CLR 3.5.21022; .NET CLR 3.0.4506.2152; .NET CLR '
'3.5.30729),gzip(gfe),gzip(gfe)',
{'js_user_agent_string': 'Mozilla/4.0 (compatible; MSIE 8.0; '
'Windows NT 5.1; Trident/4.0; chromeframe; .NET CLR 2.0.50727; '
'.NET CLR 1.1.4322; .NET CLR 3.0.04506.648; .NET CLR 3.5.21022; '
'.NET CLR 3.0.4506.2152; .NET CLR 3.5.30729)'}),
(('IE Platform Preview', '9', '0', '1'),
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; GTB6; '
'.NET CLR 2.0.50727; .NET CLR 1.1.4322),gzip(gfe),gzip(gfe)',
{'js_user_agent_string': 'Mozilla/4.0 (compatible; MSIE 8.0; '
'Windows NT 5.1; Trident/4.0; .NET CLR 2.0.50727; .NET CLR 1.1.4322)',
'js_user_agent_family': 'IE Platform Preview',
'js_user_agent_v1': '9',
'js_user_agent_v2': '0',
'js_user_agent_v3': '1'}),
(('Midori', '0', '2', None),
'Midori/0.2 (X11; Linux; U; en-us) WebKit/531.2 ,gzip(gfe),gzip(gfe)',
{}),
(('MozillaDeveloperPreview', '3', '7', 'a1'),
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.3a1) '
'Gecko/20100208 MozillaDeveloperPreview/3.7a1 '
'(.NET CLR 3.5.30729),gzip(gfe),gzip(gfe)', {}),
(('Opera', '10', '53', None),
'Opera/9.80 (Windows NT 5.1; U; ru) Presto/2.5.24 Version/10.53',
{}),
(('Opera Mobile', '10', '00', None),
'Opera/9.80 (S60; SymbOS; Opera Mobi/275; U; es-ES) '
'Presto/2.4.13 Version/10.00,gzip(gfe),gzip(gfe)', {}),
(('Palm webOS', '1', '2', None),
'Mozilla/5.0 (webOS/1.2; U; en-US) AppleWebKit/525.27.1 '
'(KHTML, like Gecko) Version/1.0 Safari/525.27.1 '
'Desktop/1.0,gzip(gfe),gzip(gfe)', {}),
(('iPad', '3', '2', None),
'Mozilla/5.0 (iPad; U; CPU OS 3_2 like Mac OS X; en-us) '
'AppleWebKit/531.21.10 (KHTML, like Gecko) Version/4.0.4 Mobile/7B367 '
'Safari/531.21.10,gzip(gfe),gzip(gfe)', {}),
(('Dolfin', '2', '0', None),
'Mozilla/5.0 (SAMSUNG; SAMSUNG-GT-S8500/S8500XXJEE; U; Bada/1.0; nl-nl) '
'AppleWebKit/533.1 (KHTML, like Gecko) Dolfin/2.0 Mobile WVGA '
'SMM-MMS/1.2.0 OPN-B', {}),
(('BOLT', '2', '101', None),
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; BOLT/2.101) '
'AppleWebKit/530 (KHTML, like Gecko) Version/4.0 '
'Safari/530.17,gzip(gfe),gzip(gfe)', {}),
(('Blackberry', '6', '0', '0'),
'Mozilla/5.0 (BlackBerry; U; BlackBerry 9800; en-GB) AppleWebKit/534.1+ '
'(KHTML, like Gecko) Version/6.0.0.141 Mobile '
| 'Safari/534.1+,gzip(gfe),gzip(gfe)', {}),
)
class ParseTest(unittest.TestCase):
| def testStrings(self):
for (family, v1, v2, v3), user_agent_string, kwds in TEST_STRINGS:
self.assertEqual((family, v1, v2, v3),
user_agent_parser.Parse(user_agent_string, **kwds))
class GetFiltersTest(unittest.TestCase):
def testGetFiltersNoMatchesGiveEmptyDict(self):
user_agent_string = 'foo'
filters = user_agent_parser.GetFilters(
user_agent_string, js_user_agent_string=None)
self.assertEqual({}, filters)
def testGetFiltersJsUaPassedThrough(self):
user_agent_string = 'foo'
filters = user_agent_parser.GetFilters(
user_agent_string, js_user_agent_string='bar')
self.assertEqual({'js_user_agent_string': 'bar'}, filters)
def testGetFiltersJsUserAgentFamilyAndVersions(self):
user_agent_string = ('Mozilla/4.0 (compatible; MSIE 8.0; '
'Windows NT 5.1; Trident/4.0; GTB6; .NET CLR 2.0.50727; '
'.NET CLR 3.0.4506.2152; .NET CLR 3.5.30729)')
filters = user_agent_parser.GetFilters(
user_agent_string, js_user_agent_string='bar',
js_user_agent_family='foo')
self.assertEqual({'js_user_agent_string': 'bar',
'js_user_agent_family': 'foo'}, filters)
if __name__ == '__main__':
unittest.main()
|
typefj/django-miniurl | shortener/models.py | Python | mit | 702 | 0 | from django.db import models
from django.contrib.sites.models import Site
# Create your models here.
class Link(models. | Model):
url = models.URLField(max_length=512)
site = models.ForeignKey(Site, on_delete=models.SET_NULL, null=True)
request_times = models.PositiveIntegerField(default=0)
updated = models.DateTimeField(auto_now=True)
created = models.DateTimeField(auto_now_add=True)
def __str__(self):
return '{}-{}'.format(self.pk, self.url)
class RateLimit(models.Model):
| ip = models.GenericIPAddressField(unique=True)
start_time = models.DateTimeField()
count = models.PositiveIntegerField(default=0)
def __str__(self):
return self.ip
|
manipopopo/tensorflow | tensorflow/contrib/gan/python/estimator/python/gan_estimator_impl.py | Python | apache-2.0 | 13,797 | 0.005074 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A TFGAN-backed GAN Estimator."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import enum
from tensorflow.contrib.framework.python.ops import variables as variable_lib
from tensorflow.contrib.gan.python import namedtuples as tfgan_tuples
from tensorflow.contrib.gan.python import train as tfgan_train
from tensorflow.contrib.gan.python.eval.python import summaries as tfgan_summaries
from tensorflow.python.estimator import estimator
from tensorflow.python.estimator import model_fn as model_fn_lib
from tensorflow.python.framework import ops
from tensorflow.python.ops import metrics as metrics_lib
from tensorflow.python.ops import variable_scope
from tensorflow.python.util import tf_inspect as inspect
__all__ = [
'GANEstimator',
'SummaryType'
]
class SummaryType(enum.IntEnum):
NONE = 0
VARIABLES = 1
IMAGES = 2
IMAGE_COMPARISON = 3
_summary_type_map = {
SummaryType.VARIABLES: tfgan_summaries.add_gan_model_summaries,
SummaryType.IMAGES: tfgan_summaries.add_gan_model_image_summaries,
SummaryType.IMAGE_COMPARISON: tfgan_summaries.add_image_comparison_summaries, # pylint:disable=line-too-long
}
class GANEstimator(estimator.Estimator):
"""An estimator for Generative Adversarial Networks (GANs).
This Estimator is backed by TFGAN. The network functions follow the TFGAN API
except for one exception: if either `generator_fn` or `discriminator_fn` have
an argument called `mode`, then the tf.Estimator mode is passed in for that
argument. This helps with operations like batch normalization, which have
different train and evaluation behavior.
Example:
```python
import tensorflow as tf
tfgan = tf.contrib.gan
# See TFGAN's `train.py` for a description of the generator and
# discriminator API.
def generator_fn(generator_inputs):
...
return generated_data
def discriminator_fn(data, conditioning):
...
return logits
# Create GAN estimator.
gan_estimator = tfgan.estimator.GANEstimator(
model_dir,
generator_fn=generator_fn,
discriminator_fn=discriminator_fn,
generator_loss_fn=tfgan.losses.wasserstein_generator_loss,
discriminator_loss_fn=tfgan.losses.wasserstein_discriminator_loss,
generator_optimizer=tf.train.AdamOptimizer(0.1, 0.5),
discriminator_optimizer=tf.train.AdamOptimizer(0.1, 0.5))
# Train estimator.
gan_estimator.train(train_input_fn, st | eps)
# Eva | luate resulting estimator.
gan_estimator.evaluate(eval_input_fn)
# Generate samples from generator.
predictions = np.array([
x for x in gan_estimator.predict(predict_input_fn)])
```
"""
def __init__(self,
model_dir=None,
generator_fn=None,
discriminator_fn=None,
generator_loss_fn=None,
discriminator_loss_fn=None,
generator_optimizer=None,
discriminator_optimizer=None,
get_hooks_fn=None,
get_eval_metric_ops_fn=None,
add_summaries=None,
use_loss_summaries=True,
config=None):
"""Initializes a GANEstimator instance.
Args:
model_dir: Directory to save model parameters, graph and etc. This can
also be used to load checkpoints from the directory into a estimator
to continue training a previously saved model.
generator_fn: A python function that takes a Tensor, Tensor list, or
Tensor dictionary as inputs and returns the outputs of the GAN
generator. See `TFGAN` for more details and examples. Additionally, if
it has an argument called `mode`, the Estimator's `mode` will be passed
in (ex TRAIN, EVAL, PREDICT). This is useful for things like batch
normalization.
discriminator_fn: A python function that takes the output of
`generator_fn` or real data in the GAN setup, and `generator_inputs`.
Outputs a Tensor in the range [-inf, inf]. See `TFGAN` for more details
and examples.
generator_loss_fn: The loss function on the generator. Takes a `GANModel`
tuple.
discriminator_loss_fn: The loss function on the discriminator. Takes a
`GANModel` tuple.
generator_optimizer: The optimizer for generator updates, or a function
that takes no arguments and returns an optimizer. This function will
be called when the default graph is the `GANEstimator`'s graph, so
utilities like `tf.contrib.framework.get_or_create_global_step` will
work.
discriminator_optimizer: Same as `generator_optimizer`, but for the
discriminator updates.
get_hooks_fn: A function that takes a `GANTrainOps` tuple and returns a
list of hooks. These hooks are run on the generator and discriminator
train ops, and can be used to implement the GAN training scheme.
Defaults to `train.get_sequential_train_hooks()`.
get_eval_metric_ops_fn: A function that takes a `GANModel`, and returns a
dict of metric results keyed by name. The output of this function is
passed into `tf.estimator.EstimatorSpec` during evaluation.
add_summaries: `None`, a single `SummaryType`, or a list of `SummaryType`.
use_loss_summaries: If `True`, add loss summaries. If `False`, does not.
If `None`, uses defaults.
config: `RunConfig` object to configure the runtime settings.
Raises:
ValueError: If loss functions aren't callable.
ValueError: If `use_loss_summaries` isn't boolean or `None`.
ValueError: If `get_hooks_fn` isn't callable or `None`.
"""
if not callable(generator_loss_fn):
raise ValueError('generator_loss_fn must be callable.')
if not callable(discriminator_loss_fn):
raise ValueError('discriminator_loss_fn must be callable.')
if use_loss_summaries not in [True, False, None]:
raise ValueError('use_loss_summaries must be True, False or None.')
if get_hooks_fn is not None and not callable(get_hooks_fn):
raise TypeError('get_hooks_fn must be callable.')
def _model_fn(features, labels, mode):
"""GANEstimator model function."""
if mode not in [model_fn_lib.ModeKeys.TRAIN, model_fn_lib.ModeKeys.EVAL,
model_fn_lib.ModeKeys.PREDICT]:
raise ValueError('Mode not recognized: %s' % mode)
real_data = labels # rename inputs for clarity
generator_inputs = features # rename inputs for clarity
# Make GANModel, which encapsulates the GAN model architectures.
gan_model = _get_gan_model(
mode, generator_fn, discriminator_fn, real_data, generator_inputs,
add_summaries)
# Make the EstimatorSpec, which incorporates the GANModel, losses, eval
# metrics, and optimizers (if required).
return _get_estimator_spec(
mode, gan_model, generator_loss_fn, discriminator_loss_fn,
get_eval_metric_ops_fn, generator_optimizer, discriminator_optimizer,
get_hooks_fn)
super(GANEstimator, self).__init__(
model_fn=_model_fn, model_dir=model_dir, config=config)
def _get_gan_model(
mode, generator_fn, discriminator_fn, real_data, generator_inputs,
add_summaries, generator_scope='Generator'):
"""Makes the GANModel tuple, which encapsulates the |
PaddlePaddle/Paddle | python/paddle/fluid/tests/unittests/test_elementwise_nn_grad.py | Python | apache-2.0 | 12,057 | 0.001161 | # Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
import numpy as np
import paddle.fluid as fluid
import paddle.fluid.layers as layers
import paddle.fluid.core as core
import gradient_checker
from decorator_helper import prog_scope
class TestElementwiseMulDoubleGradCheck(unittest.TestCase):
@prog_scope()
def func(self, place):
# the shape of input variable should be clearly specified, not inlcude -1.
shape = [2, 3, 4, 5]
eps = 0.005
dtype = np.float64
x = layers.data('x', shape, False, dtype)
y = layers.data('y', shape, False, dtype)
x.persistable = True
y.persistable = True
out = layers.elementwise_mul(x, y)
x_arr = np.random.uniform(-1, 1, shape).astype(dtype)
y_arr = np.random.uniform(-1, 1, shape).astype(dtype)
gradient_checker.double_grad_check(
[x, y], out, x_init=[x_arr, y_arr], place=place, eps=eps)
def test_grad(self):
places = [fluid.CPUPlace()]
if core.is_compiled_with_cuda():
places.append(fluid.CUDAPlace(0))
for p in places:
self.func(p)
class TestElementwiseMulBroadcastDoubleGradCheck(unittest.TestCase):
@prog_scope()
def func(self, place):
# the shape of input variable should be clearly specified, not inlcude -1.
shape = [2, 3, 4, 5]
eps = 0.005
dtype = np.float64
x = layers.data('x', shape, False, dtype)
y = layers.data('y', shape[:-1], False, dtype)
x.persistable = True
y.persistable = True
out = layers.elementwise_mul(x, y, axis=0)
x_arr = np.random.uniform(-1, 1, shape).astype(dtype)
y_arr = np.random.uniform(-1, 1, shape[:-1]).astype(dtype)
gradient_checker.double_grad_check(
[x, y], out, x_init=[x_arr, y_arr], place=place, eps=eps)
def test_grad(self):
places = [fluid.CPUPlace()]
if core.is_compiled_with_cuda():
places.append(fluid.CUDAPlace(0))
for p in places:
self.func(p)
class TestElementwiseAddDoubleGradCheck(unittest.TestCase):
@prog_scope()
def func(self, place):
# the shape of input variable should be clearly specified, not inlcude -1.
shape = [2, 3, 4, 5]
eps = 0.005
dtype = np.float64
x = layers.data('x', shape, False, dtype)
y = layers.data('y', shape, False, dtype)
x.persistable = True
y.persistable = True
out = layers.elementwise_add(x, y)
x_arr = np.random.uniform(-1, 1, shape).astype(dtype)
y_arr = np.random.uniform(-1, 1, shape).astype(dtype)
gradient_checker.double_grad_check(
[x, y], out, x_init=[x_arr, y_arr], place=place, eps=eps)
def test_grad(self):
places = [fluid.CPUPlace()]
if core.is_compiled_with_cuda():
places.append(fluid.CUDAPlace(0))
for p in places:
self.func(p)
class TestElementwiseAddBroadcastDoubleGradCheck(unittest.TestCase):
@prog_scope()
| def func(self, place):
# the shape of input variable should be clearly specified, not inlcude -1.
shape = [2, 3, 4, 5]
eps = 0.005
dtype = np.float64
x = layers.data('x', shape, False, dtype)
y = layers.data('y', shape[:-1], False, dtype)
x.persistable = True
y.persistable = True
out = layers.elementwise_add(x, y, axis=0)
| x_arr = np.random.uniform(-1, 1, shape).astype(dtype)
y_arr = np.random.uniform(-1, 1, shape[:-1]).astype(dtype)
gradient_checker.double_grad_check(
[x, y], out, x_init=[x_arr, y_arr], place=place, eps=eps)
def test_grad(self):
places = [fluid.CPUPlace()]
if core.is_compiled_with_cuda():
places.append(fluid.CUDAPlace(0))
for p in places:
self.func(p)
class TestElementwiseSubDoubleGradCheck(unittest.TestCase):
@prog_scope()
def func(self, place):
# the shape of input variable should be clearly specified, not inlcude -1.
shape = [2, 3, 4, 5]
eps = 0.005
dtype = np.float64
x = layers.data('x', shape, False, dtype)
y = layers.data('y', shape, False, dtype)
x.persistable = True
y.persistable = True
out = layers.elementwise_sub(x, y)
x_arr = np.random.uniform(-1, 1, shape).astype(dtype)
y_arr = np.random.uniform(-1, 1, shape).astype(dtype)
gradient_checker.double_grad_check(
[x, y], out, x_init=[x_arr, y_arr], place=place, eps=eps)
def test_grad(self):
places = [fluid.CPUPlace()]
if core.is_compiled_with_cuda():
places.append(fluid.CUDAPlace(0))
for p in places:
self.func(p)
class TestElementwiseSubBroadcastDoubleGradCheck(unittest.TestCase):
@prog_scope()
def func(self, place):
# the shape of input variable should be clearly specified, not inlcude -1.
shape = [2, 3, 4, 5]
eps = 0.005
dtype = np.float64
x = layers.data('x', shape, False, dtype)
y = layers.data('y', shape[:-1], False, dtype)
x.persistable = True
y.persistable = True
out = layers.elementwise_sub(x, y, axis=0)
x_arr = np.random.uniform(-1, 1, shape).astype(dtype)
y_arr = np.random.uniform(-1, 1, shape[:-1]).astype(dtype)
gradient_checker.double_grad_check(
[x, y], out, x_init=[x_arr, y_arr], place=place, eps=eps)
def test_grad(self):
places = [fluid.CPUPlace()]
if core.is_compiled_with_cuda():
places.append(fluid.CUDAPlace(0))
for p in places:
self.func(p)
class TestElementwiseDivDoubleGradCheck(unittest.TestCase):
@prog_scope()
def func(self, place):
# the shape of input variable should be clearly specified, not inlcude -1.
shape = [2, 3, 4, 5]
eps = 0.0001
dtype = np.float64
x = layers.data('x', shape, False, dtype)
y = layers.data('y', shape, False, dtype)
x.persistable = True
y.persistable = True
out = layers.elementwise_div(x, y, axis=0)
x_arr = np.random.uniform(-1, 1, shape).astype(dtype)
y_arr = np.random.uniform(-1, 1, shape).astype(dtype)
y_arr[np.abs(y_arr) < 0.005] = 0.02
gradient_checker.double_grad_check(
[x, y], out, x_init=[x_arr, y_arr], place=place, eps=eps, atol=1e-3)
def test_grad(self):
places = [fluid.CPUPlace()]
if core.is_compiled_with_cuda():
places.append(fluid.CUDAPlace(0))
for p in places:
self.func(p)
class TestElementwiseDivBroadcastDoubleGradCheck(unittest.TestCase):
@prog_scope()
def func(self, place):
# the shape of input variable should be clearly specified, not inlcude -1.
shape = [2, 3, 4, 5]
eps = 0.0001
dtype = np.float64
x = layers.data('x', shape, False, dtype)
y = layers.data('y', shape[1:-1], False, dtype)
x.persistable = True
y.persistable = True
out = layers.elementwise_div(x, y, axis=1)
x_arr = np.random.uniform(-1, 1, shape).astype(dtype)
y_arr = np.random.uniform(-1, 1, shape[1:-1]).astype(dtype)
y_arr[np.abs(y_arr) < 0.005] = 0.02
gradient_checker.double_grad_check(
[x, y], out, x_init=[x_arr, y_arr], place=place, eps=eps, atol=1e-3)
def test_grad(self):
places = [fluid |
asciinema/asciinema | tests/test_helper.py | Python | gpl-3.0 | 415 | 0 | import sys
from codecs import Strea | mReader
from io import StringIO
from typing import Optional, TextIO, Union
stdout: Optional[Union[TextIO, StreamReader]] = None
class Test:
def setUp(self) -> None:
global stdout # pylint: disable=glob | al-statement
self.real_stdout = sys.stdout
sys.stdout = stdout = StringIO()
def tearDown(self) -> None:
sys.stdout = self.real_stdout
|
benschmaus/catapult | telemetry/telemetry/testing/fakes/__init__.py | Python | bsd-3-clause | 15,827 | 0.011183 | # Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Provides fakes for several of Telemetry's internal objects.
These allow code like story_runner and Benchmark to be run and tested
without compiling or starting a browser. Class names prepended with an
underscore are intended to be implementation details, and should not
be subclassed; however, some, like _FakeBrowser, have public APIs that
may need to be called in tests.
"""
from telemetry.internal.backends.chrome_inspector import websocket
from telemetry.internal.browser import browser_options
from telemetry.internal.platform import system_info
from telemetry.page import shared_page_state
from telemetry.util import image_util
from telemetry.testing.internal import fake_gpu_info
from types import ModuleType
# Classes and functions which are intended to be part of the public
# fakes API.
class FakePlatform(object):
def __init__(self):
self._network_controller = None
self._tracing_controller = None
self._has_battor = False
self._os_na | me = 'FakeOS'
self._device_type_name = 'abc'
self._is_svelte = False
self._is_aosp = True
@property
def is_host_platform(self):
raise NotImplementedError
@property
def network_controller(s | elf):
if self._network_controller is None:
self._network_controller = _FakeNetworkController()
return self._network_controller
@property
def tracing_controller(self):
if self._tracing_controller is None:
self._tracing_controller = _FakeTracingController()
return self._tracing_controller
def Initialize(self):
pass
def CanMonitorThermalThrottling(self):
return False
def IsThermallyThrottled(self):
return False
def HasBeenThermallyThrottled(self):
return False
def GetArchName(self):
raise NotImplementedError
def SetOSName(self, name):
self._os_name = name
def GetOSName(self):
return self._os_name
def GetOSVersionName(self):
raise NotImplementedError
def GetOSVersionDetailString(self):
raise NotImplementedError
def StopAllLocalServers(self):
pass
def WaitForBatteryTemperature(self, _):
pass
def HasBattOrConnected(self):
return self._has_battor
def SetBattOrDetected(self, b):
assert isinstance(b, bool)
self._has_battor = b
# TODO(rnephew): Investigate moving from setters to @property.
def SetDeviceTypeName(self, name):
self._device_type_name = name
def GetDeviceTypeName(self):
return self._device_type_name
def SetIsSvelte(self, b):
assert isinstance(b, bool)
self._is_svelte = b
def IsSvelte(self):
if self._os_name != 'android':
raise NotImplementedError
return self._is_svelte
def SetIsAosp(self, b):
assert isinstance(b, bool)
self._is_aosp = b
def IsAosp(self):
return self._is_aosp and self._os_name == 'android'
class FakeLinuxPlatform(FakePlatform):
def __init__(self):
super(FakeLinuxPlatform, self).__init__()
self.screenshot_png_data = None
self.http_server_directories = []
self.http_server = FakeHTTPServer()
@property
def is_host_platform(self):
return True
def GetDeviceTypeName(self):
return 'Desktop'
def GetArchName(self):
return 'x86_64'
def GetOSName(self):
return 'linux'
def GetOSVersionName(self):
return 'trusty'
def GetOSVersionDetailString(self):
return ''
def CanTakeScreenshot(self):
return bool(self.screenshot_png_data)
def TakeScreenshot(self, file_path):
if not self.CanTakeScreenshot():
raise NotImplementedError
img = image_util.FromBase64Png(self.screenshot_png_data)
image_util.WritePngFile(img, file_path)
return True
def SetHTTPServerDirectories(self, paths):
self.http_server_directories.append(paths)
class FakeHTTPServer(object):
def UrlOf(self, url):
del url # unused
return 'file:///foo'
class FakePossibleBrowser(object):
def __init__(self, execute_on_startup=None,
execute_after_browser_creation=None):
self._returned_browser = _FakeBrowser(FakeLinuxPlatform())
self.browser_type = 'linux'
self.supports_tab_control = False
self.is_remote = False
self.execute_on_startup = execute_on_startup
self.execute_after_browser_creation = execute_after_browser_creation
@property
def returned_browser(self):
"""The browser object that will be returned through later API calls."""
return self._returned_browser
def Create(self, finder_options):
if self.execute_on_startup is not None:
self.execute_on_startup()
del finder_options # unused
if self.execute_after_browser_creation is not None:
self.execute_after_browser_creation(self._returned_browser)
return self.returned_browser
@property
def platform(self):
"""The platform object from the returned browser.
To change this or set it up, change the returned browser's
platform.
"""
return self.returned_browser.platform
def IsRemote(self):
return self.is_remote
def SetCredentialsPath(self, _):
pass
class FakeSharedPageState(shared_page_state.SharedPageState):
def __init__(self, test, finder_options, story_set):
super(FakeSharedPageState, self).__init__(test, finder_options, story_set)
def _GetPossibleBrowser(self, test, finder_options):
p = FakePossibleBrowser()
self.ConfigurePossibleBrowser(p)
return p
def ConfigurePossibleBrowser(self, possible_browser):
"""Override this to configure the PossibleBrowser.
Can make changes to the browser's configuration here via e.g.:
possible_browser.returned_browser.returned_system_info = ...
"""
pass
def DidRunStory(self, results):
# TODO(kbr): add a test which throws an exception from DidRunStory
# to verify the fix from https://crrev.com/86984d5fc56ce00e7b37ebe .
super(FakeSharedPageState, self).DidRunStory(results)
class FakeSystemInfo(system_info.SystemInfo):
def __init__(self, model_name='', gpu_dict=None, command_line=''):
if gpu_dict == None:
gpu_dict = fake_gpu_info.FAKE_GPU_INFO
super(FakeSystemInfo, self).__init__(model_name, gpu_dict, command_line)
class _FakeBrowserFinderOptions(browser_options.BrowserFinderOptions):
def __init__(self, execute_on_startup=None,
execute_after_browser_creation=None, *args, **kwargs):
browser_options.BrowserFinderOptions.__init__(self, *args, **kwargs)
self.fake_possible_browser = \
FakePossibleBrowser(
execute_on_startup=execute_on_startup,
execute_after_browser_creation=execute_after_browser_creation)
def CreateBrowserFinderOptions(browser_type=None, execute_on_startup=None,
execute_after_browser_creation=None):
"""Creates fake browser finder options for discovering a browser."""
return _FakeBrowserFinderOptions(
browser_type=browser_type,
execute_on_startup=execute_on_startup,
execute_after_browser_creation=execute_after_browser_creation)
# Internal classes. Note that end users may still need to both call
# and mock out methods of these classes, but they should not be
# subclassed.
class _FakeBrowser(object):
def __init__(self, platform):
self._tabs = _FakeTabList(self)
# Fake the creation of the first tab.
self._tabs.New()
self._returned_system_info = FakeSystemInfo()
self._platform = platform
self._browser_type = 'release'
self._is_crashed = False
@property
def platform(self):
return self._platform
@platform.setter
def platform(self, incoming):
"""Allows overriding of the fake browser's platform object."""
assert isinstance(incoming, FakePlatform)
self._platform = incoming
@property
def returned_system_info(self):
"""The object which will be returned from calls to GetSystemInfo."""
return self._returned_system_info
@returned_system_info.setter
def returned_system_info(self, incoming):
"""Allows overriding of the returned SystemInfo object.
Incoming argument must be an instance of FakeSystemInfo. |
angelblue05/Embytest.Kodi | resources/lib/librarysync.py | Python | gpl-2.0 | 55,974 | 0.003001 | # -*- coding: utf-8 -*-
##################################################################################################
import sqlite3
import threading
from datetime import datetime, timedelta, time
import xbmc
import xbmcgui
import xbmcvfs
import api
import utils
import clientinfo
import downloadutils
import itemtypes
import embydb_functions as embydb
import kodidb_functions as kodidb
import read_embyserver as embyserver
import userclient
import videonodes
##################################################################################################
class LibrarySync(threading.Thread):
_shared_state = {}
stop_thread = False
suspend_thread = False
# Track websocketclient updates
addedItems = []
updateItems = []
userdataItems = []
removeItems = []
forceLibraryUpdate = False
refresh_views = False
def __init__(self):
self.__dict__ = self._shared_state
self.monitor = xbmc.Monitor()
self.clientInfo = clientinfo.ClientInfo()
self.addonName = self.clientInfo.getAddonName()
self.doUtils = downloadutils.DownloadUtils().downloadUrl
self.user = userclient.UserClient()
self.emby = embyserver.Read_EmbyServer()
self.vnodes = videonodes.VideoNodes()
threading.Thread.__init__(self)
def logMsg(self, msg, lvl=1):
className = self.__class__.__name__
utils.logMsg("%s %s" % (self.addonName, className), msg, lvl)
def progressDialog(self, title, forced=False):
dialog = None
if utils.settings('dbSyncIndicator') == "true" or forced:
dialog = xbmcgui.DialogProgressBG()
dialog.create("Emby for Kodi", title)
self.logMsg("Show progress dialog: %s" % title, 2)
return dialog
def startSync(self):
settings = utils.settings
# Run at start up - optional to use the server plugin
if settings('SyncInstallRunDone') == "true":
# Validate views
self.refreshViews()
completed = False
# Verify if server plugin is installed.
if settings('serverSync') == "true":
# Try to use fast start up
url = "{server}/emby/Plugins?format=json"
result = self.doUtils(url)
for plugin in result:
if plugin['Name'] == "Emby.Kodi Sync Queue":
self.logMsg("Found server plugin.", 2)
completed = self.fastSync()
if not completed:
# Fast sync failed or server plugin is not found
completed = ManualSync().sync()
else:
# Install sync is not completed
completed = self.fullSync()
return completed
def fastSync(self):
log = self.logMsg
doUtils = self.doUtils
lastSync = utils.settings('LastIncrementalSync')
if not lastSync:
lastSync = "2010-01-01T00:00:00Z"
lastSyncTime = utils.convertdate(lastSync)
log("Last sync run: %s" % lastSyncTime, 1)
# get server RetentionDateTime
url = "{server}/emby/Emby.Kodi.SyncQueue/GetServerDateTime?format=json"
result = doUtils(url)
retention_time = "2010-01-01T00:00:00Z"
if result and result.get('RetentionDateTime'):
retention_time = result['RetentionDateTime']
#Try/except equivalent
'''
try:
retention_time = result['RetentionDateTime']
except (TypeError, KeyError):
retention_time = "2010-01-01T00:00:00Z"
'''
retention_time = utils.convertdate(retention_time)
log("RetentionDateTime: %s" % retention_time, 1)
# if last sync before retention time do a full sync
if retention_time > lastSyncTime:
log("Fast sync server retention insufficient, fall back to full sync", 1)
return False
url = "{server}/emby/Emby.Kodi.SyncQueue/{UserId}/GetItems?format=json"
params = {'LastUpdateDT': lastSync}
result = doUtils(url, parameters=params)
try:
processlist = {
'added': result['ItemsAdded'],
'update': result['ItemsUpdated'],
'userdata': result['UserDataChanged'],
'remove': result['ItemsRemoved']
}
except (KeyError, TypeError):
log("Failed to retrieve latest updates using fast sync.", 1)
return False
else:
log("Fast sync changes: %s" % result, 1)
for action in processlist:
self.triage_items(action, processlist[action])
return True
def saveLastSync(self):
log = self.logMsg
# Save last sync time
overlap = 2
url = "{server}/emby/Emby.Kodi.SyncQueue/GetServerDateTime?format=json"
result = self.doUtils(url)
try: # datetime fails when used more than once, TypeError
server_time = result['ServerDateTime']
server_time = utils.convertdate(server_time)
except Exception as e:
# If the server plugin is not installed or an error happened.
log("An exception occurred: %s" % e, 1)
time_now = datetime.utcnow()-timedelta(minutes=overlap)
lastSync = time_now.strftime('%Y-%m-%dT%H:%M:%SZ')
log("New sync time: client time -%s min: %s" % (overlap, lastSync), 1)
else:
lastSync = (server_time - timedelta(minutes=overlap)).strftime('%Y-%m-%dT%H:%M:%SZ')
log("New sync time: server time -%s min: %s" % (overlap, lastSync), 1)
finally:
utils.settings('LastIncrementalSync', value=lastSync)
def shouldStop(self):
# Checkpoint during the syncing process
if self.monitor.abortRequested():
return True
elif utils.window('emby_shouldStop') == "true":
return True
else: # Keep going
return False
def dbCommit(self, connection):
log = self.logMsg
window = utils.window
# Central commit, verifies if Kodi database update is running
kodidb_scan = window('emby_kodiScan') == "true"
while kodidb_scan:
log("Kodi scan is running. Waiting...", 1)
kodidb_scan = window('emby_kodiScan') == "true"
if self.shouldStop():
log("Commit unsuccessful. Sync terminated.", 1)
break
if self.monitor.waitForAbort(1):
# Abort was requested while waiting. We should exit
log("Commit unsuccessful.", 1)
break
else:
connection.commit()
log("Commit successful.", 1)
def fullSync(self, manualrun=False, repair=False, forceddialog=False):
log = sel | f.logMsg
window = utils.window
settings = utils.setting | s
# Only run once when first setting up. Can be run manually.
emby = self.emby
music_enabled = utils.settings('enableMusic') == "true"
xbmc.executebuiltin('InhibitIdleShutdown(true)')
screensaver = utils.getScreensaver()
utils.setScreensaver(value="")
window('emby_dbScan', value="true")
# Add sources
utils.sourcesXML()
embyconn = utils.kodiSQL('emby')
embycursor = embyconn.cursor()
# Create the tables for the emby database
# emby, view, version
embycursor.execute(
"""CREATE TABLE IF NOT EXISTS emby(
emby_id TEXT UNIQUE, media_folder TEXT, emby_type TEXT, media_type TEXT, kodi_id INTEGER,
kodi_fileid INTEGER, kodi_pathid INTEGER, parent_id INTEGER, checksum INTEGER)""")
embycursor.execute(
"""CREATE TABLE IF NOT EXISTS view(
view_id TEXT UNIQUE, view_name TEXT, media_type TEXT, kodi_tagid INTEGER)""")
embycursor.execute("CREATE TABLE IF NOT EXISTS version(idVersion TEXT)")
embyconn.co |
mxrrow/zaicoin | src/deps/boost/tools/build/v2/test/symlink.py | Python | mit | 845 | 0.001183 | #!/usr/bin/python
# Copyright 2003 Dave Abrahams
# Copyright 2003 Vladimir Prus
# Distributed u | nder the Boost Software License, Version 1.0.
# (See accompanying file LICENSE_1_0.txt or http://www.boost.org/LICENSE_1_0.txt)
# Test the 'symlink' rule.
import os
import BoostBuild
if os.name != 'posix':
print "The symlink tests c | an be run on posix only."
import sys
sys.exit(1)
t = BoostBuild.Tester()
t.write("jamroot.jam", "import gcc ;")
t.write("jamfile.jam", """
exe hello : hello.cpp ;
symlink hello_release : hello/<variant>release ;
symlink hello_debug : hello/<variant>debug ;
symlink links/hello_release : hello/<variant>release ;
""")
t.write("hello.cpp", """
int main() {}
""")
t.run_build_system()
t.expect_addition([
'hello_debug.exe',
'hello_release.exe',
'links/hello_release.exe'])
t.cleanup()
|
wilsonssun/baseball-gamethread | app.py | Python | bsd-3-clause | 9,712 | 0.008237 | import functools
import os
import re
from collections import namedtuple
from datetime import datetime, time, timedelta
from flask import Flask, request, render_template, jsonify
from raven.contrib.flask.utils import get_data_from_request
from dateutil.parser import parse as parse_datetime
import requests
from pyquery import PyQuery
import pytz
app = Flask(__name__)
Division = namedtuple("Division", ["name", "teams"])
Team = namedtuple("Team", ["name", "shortcode", "subreddit", "espn_url"])
DIVISIONS = [
Division("NL West", [
Team(
"Arizona Diamondbacks", "ARI", "azdiamondbacks",
"http://espn.go.com/mlb/team/_/name/ari/arizona-diamondbacks",
),
Team(
"Colorado Rockies", "COL", "ColoradoRockies",
"http://espn.go.com/mlb/team/_/name/col/colorado-rockies",
),
Team(
"Los Angeles Dodgers", "LAD", "Dodgers",
"http://espn.go.com/mlb/team/_/name/lad/los-angeles-dodgers",
),
Team(
"San Diego Padres", "SD", "Padres",
"http://espn.go.com/mlb/team/_/name/sd/san-diego-padres",
),
Team(
"San Francisco Giants", "SFG", "SFGiants",
"http://espn.go.com/mlb/team/_/name/sf/san-francisco-giants"
),
]),
Division("NL Central", [
Team(
"Chicago Cubs", "CHC", "Cubs",
"http://espn.go.com/mlb/team/_/name/chc/chicago-cubs"
),
Team(
"Cincinnati Reds", "CIN", "Reds",
"http://espn.go.com/mlb/team/_/name/cin/cincinnati-reds"
),
Team(
"Houston Astros", "HOU", "Astros",
"http://espn.go.com/mlb/team/_/name/hou/houston-astros"
),
Team(
"Milwaukee Brewers", "MIL", "Brewers",
"http://espn.go.com/mlb/team/_/name/mil/milwaukee-brewers"
),
Team(
"Pittsburgh Pirates", "PIT", "Buccos",
"http://espn.go.com/mlb/team/_/name/pit/pittsburgh-pirates"
),
Team(
"St. Louis Cardinals", "STL", "Cardinals",
"http://espn.go.com/mlb/team/_/name/stl/st-louis-cardinals"
),
]),
Division("NL East", [
Team(
"Atlanta Braves", "ATL", "Braves",
"http://espn.go.com/mlb/team/_/name/atl/atlanta-braves"
),
Team(
"Miami Marlins", "MIA", "letsgofish",
"http://espn.go.com/mlb/team/_/name/mia/miami-marlins"
),
Team(
"New York Mets", "NYM", "NewYorkMets",
"http://espn.go.com/mlb/team/_/name/nym/new-york-mets"
),
Team(
"Philadelphia Phillies", "PHI", "Phillies",
"http://espn.go.com/mlb/team/_/name/phi/philadelphia-phillies"
),
Team(
"Washington Nationals", "WSH", "Nationals",
"http://espn.go.com/mlb/team/_/name/wsh/washington-nationals"
),
]),
Division("AL West", [
Team(
"Los Angeles Angels", "LAA", "AngelsBaseball",
"http://espn.go.com/mlb/team/_/name/laa/los-angeles-angels"
),
Team(
"Oakland Athletics", "OAK", "oaklandathletics",
"http://espn.go.com/mlb/team/_/name/oak/oakland-athletics"
),
Team(
"Seattle Mariners", "SEA", "Mariners",
"http://espn.go.com/mlb/team/_/name/sea/seattle-mariners"
),
Team(
"Texas Rangers", "TEX", "TexasRangers",
"http://espn.go.com/mlb/team/_/name/tex/texas-rangers"
),
]),
Division("AL Central", [
Team(
"Chicago White Sox", "CHW", "WhiteSox",
"http://espn.go.com/mlb/team/_/name/chw/chicago-white-sox"
),
Team(
"Cleveland Indians", "CLE", "WahoosTipi",
"http://espn.go.com/mlb/team/_/name/cle/cleveland-indians"
),
Team(
"Detroit Tigers", "DET", "MotorCityKitties",
"http://espn.go.com/mlb/team/_/name/det/detroit-tigers"
),
Team(
"Kansas City Royals", "KC", "KCRoyals",
"http://espn.go.com/mlb/team/_/name/kc/kansas-city-royals"
),
Team(
"Minnesota Twins", "MIN", "MinnesotaTwins",
"http://espn.go.com/mlb/team/_/name/min/minnesota-twins"
),
]),
Division("AL East", [
Team(
"Baltimore Orioles", "BAL", "Orioles",
"http://espn.go.com/mlb/team/_/name/bal/baltimore-orioles"
),
Team(
"Boston Red Sox", "BOS", "RedSox",
"http://espn.go.com/mlb/team/_/name/bos/boston-red-sox"
),
Team(
"New York Yankees", "NYY", "Yankees",
"http://espn.go.com/mlb/team/_/name/nyy/new-york-yankees"
),
Team(
"Tampa Bay Rays", "TB", "TampaBayRays",
"http://espn.go.com/mlb/team/_/name/tb/tampa-bay-rays"
),
Team(
"Toronto Blue Jays", "TOR", "TorontoBlueJays",
"http://espn.go.com/mlb/team/_/name/tor/toronto-blue-jays"
),
]),
]
MLB_SHORTCODE_MAP = {
"LAD": "LAN",
"SD" : "SDN",
"NYM": "NYN",
"SFG": "SFN",
"KC" : "KCA",
"CHC": "CHN",
"STL": "SLN",
"CHW": "CHA",
"TB" : "TBA",
"NYY": "NYA",
"WSH": "WAS",
"LAA": "ANA",
}
def get_team(shortcode):
for div in DIVISIONS:
for team in div.teams:
if team.shortcode == shortcode:
return team
raise LookupError
@app.route("/")
def home():
return render_template("home.html", divisions=DIVISIONS)
MLB_URL = "http://mlb.mlb.com/mlb/gameday/index.jsp?gid={year}_{month}_{day}_{away}mlb_{home}mlb_1&mode=preview"
PROB_URL = "http://mlb.mlb.com/mlb/gameday/index.jsp?gid={year}_{month}_{day}_{away}mlb_{home}mlb_1&mode=probables"
PLAYER_URL = "http://mlb.mlb.com/team/player.jsp?player_id={pid}"
def error(msg):
return jsonify(error=msg)
def handle_errors(func):
@functools.wraps(func)
def inner(*args, **kwargs):
try:
return func(*args, **kwargs)
except Exception:
if sentry is None:
raise
sentry.client.capture('Exception',
data=get_data_from_request(request),
extra={
'app': app,
},
)
return error("Uh oh. Something went wrong on our end. We've "
"dispatched trained monkeys to investigate.")
return inner
MLB_RECORD_RE = re.compile(r"\((?P<wins>\d+)-(?P<losses>\d+)\)")
def find_espn_record(team):
r = requests.get(team.espn_url)
r.raise_for_status()
page = PyQuery(r.text)
text = page("#sub-branding").find(".sub-title").text()
record = text.split(",", 1)[0]
return record.split("-")
@app.route("/generate/", methods=["POST"])
@handle_errors
def generate():
try:
away = get_team(request.form["away"])
home = get_team(request.form["home"])
except LookupError:
return error("Please select a team.")
today = pytz.timezone("US/Eastern").fromutc(datetime.utcnow()).date()
mlb_away_shortcode = MLB_SHORTCODE_MAP.get(away.shortcode, away.shortcode).lower()
mlb_home_shortcode = MLB_SHORTCODE_MAP.get(home.shortcode, home.shortcode).lower()
mlb_url = MLB_URL.format(
year=today.year,
month=str(today.month).zfill(2),
day=str(today.day).zfill(2),
away=mlb_away_shortcode,
home= | mlb_home_shortcode,
)
r = requests.get(mlb_url)
if r.status_code == 500:
return error("These teams don't seem to be playing each other tonight.")
r.rais | e_for_status()
info = re.search('(?<=<li id="preview-header-info">)[a-zA-Z0-9 /.,:]+', r.text).group(0)
prob_url = PROB_URL.format(
year=today.year,
month=str(today.month).zfill(2),
day=str(today.day).zfill(2),
away=mlb_away_shortcode,
home=mlb_home_shortcode,
)
r = requests.get(prob_url)
pids = re.findall('(?<=<a href="/team/player.jsp\?player_id=)[0-9]+', r.text)
pps = re.findall('(? |
1995parham/yepc | yepc/core/to_c.py | Python | gpl-3.0 | 5,989 | 0.002004 | from ..domain.symtable import SymbolTable
class YEPCToC:
def __init__(self, quadruples, symtable):
self.quadruples = quadruples
self.symtable = symtable
self.env = {}
def store_env(self, symbol_table):
# Push the environment from the symbol table
self.env[symbol_table.name] = []
code = "/* Store the environment of %s */\n" % symbol_table.name
q = []
q.append(symbol_table)
while bool(q):
t = q.pop()
for symbol in t.symbols:
if isinstance(t.symbols[symbol], str):
qn = t.get_symbol_name(symbol)
code += '\tstack_push(yepc_stack, &%s, sizeof(%s));\n' % (qn, t.symbols[symbol])
self.env[symbol_table.name].append((qn, t.symbols[symbol]))
elif isinstance(t.symbols[symbol], SymbolTable):
s = t.symbols[symbol]
if s.type == 'scope':
q.append(s)
return code
def restore_env(self, symbol_table, return_storage):
# Pop the environment from the symbol table
code = "/* Restore the environment of %s */\n" % symbol_table.name
for (name, t | ype) in reversed(self.env[symbol_table.name]):
if name == return_storage:
code += '\tstack_pop(yepc_stack, NULL, 0);\n'
else:
code += '\tstack_pop(yepc_stack, &%s, sizeof(%s));\n' % (name, type)
del self.env[symbol_ | table.name]
return code
def to_c(self):
c_code = ""
# Objects array for allocating and deallocating
objects = []
# Includes :)
c_code += "#include <stdio.h>\n"
c_code += "#include <stdlib.h>\n"
c_code += "#include <setjmp.h>\n"
c_code += '\n'
c_code += '#include "stack.h"\n'
c_code += '\n'
# Variable declaration based on BFS
q = []
q.append(self.symtable)
while bool(q):
t = q.pop()
for symbol in t.symbols:
if isinstance(t.symbols[symbol], str):
qn = t.get_symbol_name(symbol)
if 'struct' in t.symbols[symbol]:
objects.append((qn, t.symbols[symbol][:-1]))
if symbol in t.meta and 'size' in t.meta[symbol]:
# We see an array
c_code += '%s %s[%d];\n' % (t.symbols[symbol][:-1], qn, t.meta[symbol]['size'])
else:
c_code += '%s %s;\n' % (t.symbols[symbol], qn)
elif isinstance(t.symbols[symbol], SymbolTable):
s = t.symbols[symbol]
if s.type == 'record':
c_code += '%s {\n' % s.name[:-1]
for (name, type) in s.symbols.items():
if name in s.meta and 'size' in s.meta[name]:
# We see an array
c_code += '\t%s %s[%d];\n' % (type[:-1], name[1:], s.meta[name]['size'])
else:
c_code += "\t%s %s;\n" % (type, name[1:])
c_code += '};\n'
else:
q.append(s)
c_code += '\n'
# Main !
c_code += "int main(){\n"
# Stack initiation
c_code += "\tstruct stack *yepc_stack;\n"
c_code += "\n"
c_code += "\tyepc_stack = stack_create();\n"
# Object creation
c_code += "\n\t/* Object initiation */\n"
for (name, type) in objects:
c_code += "\t%s = malloc(sizeof(%s));\n" % (name, type)
c_code += "\n"
# QuadRuples to code
for i, entry in enumerate(self.quadruples):
op = entry.op
arg1 = entry.arg_one
arg2 = entry.arg_two
result = entry.result
line = ''
if op == 'if':
line += "if (" + str(arg1) + ")"
elif op == 'goto':
line += "goto " + self.make_label(arg1) + ";"
elif op == '+':
line += str(result) + " = " + str(arg1) + " + " + str(arg2) + ";"
elif op == '-':
line += str(result) + " = " + str(arg1) + " - " + str(arg2) + ";"
elif op == '*':
line += str(result) + " = " + str(arg1) + " * " + str(arg2) + ";"
elif op == '/':
line += str(result) + " = " + str(arg1) + " / " + str(arg2) + ";"
elif op == '%':
line += str(result) + " = " + str(arg1) + " % " + str(arg2) + ";"
elif op == "=":
line += str(result) + " = " + str(arg1) + ";"
elif op == "rand":
line += str(result) + " = rand();"
elif op == "push":
line += "stack_push(yepc_stack, &%s, sizeof(%s));" % (arg1, arg2)
elif op == "pop":
if result != '':
line += "stack_pop(yepc_stack, &%s, sizeof(%s));" % (result, arg1)
else:
line += "stack_pop(yepc_stack, NULL, 0);"
elif op == "seek":
line += "stack_seek(yepc_stack, %d, &%s, sizeof(%s));" % (arg1, result, arg2)
elif op == "return":
line += "return 0;"
elif op == "setjmp":
line += "%s = setjmp(%s);" % (result, arg1)
elif op == "longjmp":
line += "longjmp(%s, %s);" % (arg1, arg2)
elif op == "store_env":
line += self.store_env(arg1)
elif op == "restore_env":
line += self.restore_env(arg1, arg2)
c_code += "\t%s: %s\n" % (self.make_label(i), line)
c_code += "}"
print("ouput.c generated")
return c_code
def make_label(self, index):
return "L" + str(index)
|
apache/incubator-airflow | airflow/migrations/versions/4446e08588_dagrun_start_end.py | Python | apache-2.0 | 1,372 | 0.001458 | #
# Licensed to the Apache Software | Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with th | e License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""dagrun start end
Revision ID: 4446e08588
Revises: 561833c1c74b
Create Date: 2015-12-10 11:26:18.439223
"""
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = '4446e08588'
down_revision = '561833c1c74b'
branch_labels = None
depends_on = None
def upgrade():
op.add_column('dag_run', sa.Column('end_date', sa.DateTime(), nullable=True))
op.add_column('dag_run', sa.Column('start_date', sa.DateTime(), nullable=True))
def downgrade():
op.drop_column('dag_run', 'start_date')
op.drop_column('dag_run', 'end_date')
|
praekelt/jmbo-superhero | superhero/models.py | Python | bsd-3-clause | 269 | 0 | from django.utils.translation import ugettext as _
from django.db import models
from jmbo.models import ModelBase
class Sup | erhero(ModelBase):
name = models.CharField(m | ax_length=256, editable=False)
class Meta:
verbose_name_plural = _("Superheroes")
|
nisavid/spruce-settings | spruce/settings/_conf.py | Python | lgpl-3.0 | 3,013 | 0.000996 | """Conf format
The conf format is registered by default. It reads and writes settings
using :mod:`ConfigParser` at locations that are similar to typical Unix
configuration files---that is, in :file:`.conf` files specific to each
component scope under :file:`/etc/{organization}` for system-wide
settings and under :file:`~/.{organization}` for user-specific settings.
"""
__copyright__ = "Copyright (C) 2014 Ivan D Vasin"
__docformat__ = "restructuredtext"
import ConfigParser as _configparser
# TODO: (Python 3)
#import configparser as _configparser
import os as _os
from . import _core
from . import _exc
def _read_settings(file_, keys):
settings = {}
parser = _configparser.RawConfigParser(allow_no_value=True)
parser.read(file_)
if keys == ['']:
for section in parser.sections():
for subkey, value in parser.items(section):
settings[section + '/' + subkey] = value
else:
for key in keys:
section, _, subkey = key.rpartition('/')
if not section:
section = _configparser.DEFAULTSECT
if parser.has_section(section) \
and parser.has_option(section, subkey):
try:
settings[key] = parser.get(section, subkey)
except _configparser.Error as exc:
raise _exc.MalformedSettingsLocation(message=str(exc))
else:
settings[key] = None
return settings
def _write_settings(file_, settings):
if settings:
# FIXME
raise NotImplementedError('writing \'conf\' settings is not yet'
' implemented')
_core.Settings.register_format('conf', extension='.conf',
read_func=_read_settings,
write_func=_write_settings)
_homedir = _os.path.expanduser('~')
_paths = {('system', 'organ | ization'):
_os.path.join(_os.path.sep, 'etc', '{organization}',
'{organization}{extension}'),
('system', 'application'):
_os.path.join(_os.path.sep, 'etc', '{organization}',
'{application}{extension} | '),
('system', 'subsystem'):
_os.path.join(_os.path.sep, 'etc', '{organization}',
'{application}', '{subsystem}{extension}'),
('user', 'organization'):
_os.path.join(_homedir, '.{organization}',
'{organization}{extension}'),
('user', 'application'):
_os.path.join(_homedir, '.{organization}',
'{application}{extension}'),
('user', 'subsystem'):
_os.path.join(_homedir, '.{organization}', '{application}',
'{subsystem}{extension}'),
}
for (_base_scope, _component_scope), _path in _paths.iteritems():
_core.Settings.set_path('conf', _base_scope, _component_scope, _path)
|
nimasmi/wagtail | wagtail/images/api/v2/views.py | Python | bsd-3-clause | 747 | 0.002677 | from wagtail.api.v2.filters import FieldsFilter, OrderingFilter, SearchFilter
from wagtail.api.v2.views import BaseAPIViewSet
from ... import get_image_model
from .serializers import ImageSerializer
class ImagesAPIViewSet(BaseAPIViewSet):
base_serializer_class = ImageSerializer
filter_backends | = [FieldsFilter, OrderingFilter, SearchFilter]
body_fields = BaseAPIViewSet.body_fields + ['title', 'width', 'height']
meta_fields = BaseAPIViewSet.meta_fields + ['tags', 'download_url']
listing_def | ault_fields = BaseAPIViewSet.listing_default_fields + ['title', 'tags', 'download_url']
nested_default_fields = BaseAPIViewSet.nested_default_fields + ['title', 'download_url']
name = 'images'
model = get_image_model()
|
neomacedo/ScriptsUteis | Python/checksum_comparator.py | Python | gpl-3.0 | 1,563 | 0.003839 | """
Helcio Macedo
Checksum Verifier v1.0
https://github.com/neomacedo/ScriptsUteis
-----------------------------------------------------------
Script used to compare if local file its the same as remote.
"""
import hashlib
import urllib2
import optparse
# Remote address to file
remote_url = 'https://raw.githubusercontent.com/neomacedo/Area51/master/arquivo_teste.txt'
# Local address to the file
local_url = '../../GitHub/Area51/arquivo_teste.txt'
# Method who will return md5 Checksum [Local]
def get_local_md5_sum(url):
try:
return hashlib.md5(open(local_url, 'rb').read()).hexdigest()
except Exception as ex:
print 'Failed to get remote file checksum! \n Exception: ' + str(ex.message | )
# Method who will return md5 Checksum [Remote]
def get_remote_md5_sum(url):
try:
# | Parse Options
opt = optparse.OptionParser()
opt.add_option('--url', '-u', default=remote_url)
options, args = opt.parse_args()
remote = urllib2.urlopen(options.url)
md5hash = hashlib.md5()
data = remote.read()
md5hash.update(data)
return md5hash.hexdigest()
except Exception as ex:
print 'Failed to get remote file checksum! \n Exception: ' + str(ex.message)
# Main Method
if __name__ == '__main__':
print 'MD5 Local: ' + get_local_md5_sum(local_url)
print 'MD5 Remote: ' + get_remote_md5_sum(remote_url)
if get_local_md5_sum(local_url) == get_remote_md5_sum(remote_url):
print 'Local file its the same as remote file'
# EOF
|
dschien/energy-aggregator | ep/tests/test_celery.py | Python | mit | 3,630 | 0.003581 | import json
from decimal import Decimal
from unittest import skip
# from unittest.mock import patch
import unittest.mock as mock
from celery import current_app
from django.conf import settings
from django.test import TestCase
from ep.models import Site, ScheduleDeviceParameterGroup, DeviceParameter, StateChangeEvent
from ep.tasks import send_msg, scheduled_device_state_change
from ep.tests.static_factories import SiteFactory
from ep_secure_importer.controllers.secure_client import secure_site_name, SecureClient
from django.test import TestCase, modify_settings, override_settings
from prefect.tasks import import_from_site
__author__ = 'schien'
@override_settings(SECURE_SERVERS={'test-server': {'HOST': settings.TEST_SECURE_SERVER_HOST + ':8080',
'WSHOST': settings.TEST_SECURE_SERVER_HOST + ':5678',
'USER': "guest", 'PASSWORD': 'guest'}})
class TaskTest(TestCase):
@classmethod
def setUpTestData(cls):
settings.CELERY_ALWAYS_EAGER = True
curr | ent_app.conf.CELERY_ALWAYS_EAGER = True
# @skip('we can only run this on a fully deployed stack')
def test_messaging(self):
self.assertTrue(send_msg.delay(json.dumps({'test': 1})))
@skip('we can only run this on a fully deployed stack')
def test_import(self):
Site(name='goldney').save()
self.assertTrue(import_from_site.delay('goldney'))
def test_schedule(self):
inst = SecureClient('test-server')
with mock.patch.object(inst, ' | update_device_data',
wraps=inst.update_device_data) as update_device_data:
# with patch('ep_secure_importer.controllers.secure_client.update_device_data') as update_device_data:
update_device_data.return_value = ("server response", 200)
SiteFactory.create(name=secure_site_name)
first = DeviceParameter.objects.all().first()
s = ScheduleDeviceParameterGroup()
s.save()
s.device_parameters.add(first)
current_value = first.measurements.latest()['value']
target_value = current_value + 1
scheduled_device_state_change.delay(device_group_id=s.id, target_value=target_value)
self.assertTrue(update_device_data.called)
call_args = json.loads(update_device_data.call_args[0][0])
cv = call_args['DeviceData']['DPDO'][0]['CV']
self.assertTrue(target_value == Decimal(str(cv)))
def test_trigger_source_heuristic(self):
with mock.patch('pylibmc.Client') as mc:
SiteFactory.create(name="test")
dp = DeviceParameter.objects.all().first()
SecureClient.store_device_state_change_request(dp.id, 1, StateChangeEvent.SCHEDULE)
source = SecureClient.get_trigger_source(dp, 1)
# @patch('ep.signals.device_parameter_state_change.send_robust')
# @httpretty.activate
# def test_detect_setpoint_change(self, mock):
# result = device_parameter_state_change.send_robust(sender=self.__class__, device_parameter_id=123,
# type='test setting', previous='previous', new='new value',
# trigger='test trigger')
# print(result)
# self.assertTrue(mock.called)
# self.assertTrue(mock.call_count == 1)
# self.assertTrue(mock.call_args_list[0][1]['trigger'] == 'test trigger')
|
taschini/morepath | morepath/tests/test_compat.py | Python | bsd-3-clause | 799 | 0 | from morepath import compat
def test_text_type():
assert isinstance(u'foo', compat.text_type)
assert not isinstance(b'foo', compat.text_type)
def test_string_types():
assert isinstance('foo', compat.string_types)
assert isinstance(u'foo', compat.string_types)
if compat.PY3:
assert not isinstance(b'foo', compat.string_types)
else:
assert isinstance(b'foo', compat.string_types)
def test_bytes_():
text = u'Z\N{latin small letter u with diaeresis}rich'
code = compat.bytes_(text)
assert isinstance(code, bytes)
assert code == compat | .bytes_(code)
def test_withclass( | ):
class Meta(type):
pass
class Class(compat.with_metaclass(Meta)):
pass
assert type(Class) == Meta
assert Class.__bases__ == (object,)
|
SMxJrz/Elasticd | test/__init__.py | Python | apache-2.0 | 566 | 0.0053 | import unittest
from elasticd.plugins import BasePlugin
from elasticd.plugins import ResourceLo | cator
from elasticd.plugins import Driver
from elasticd.plugins import Datastore
from elasticd.plugin_manager import PluginManager
import os
import ConfigParser
def get_test_plugin_manager():
config_file = os.path.dirname(os.path.realpath(__file__)) + '/../conf/settings.cfg'
config_file = os.path.realpath(config_file)
config = ConfigParser.ConfigParser()
config.read(config_file | )
_plugin_manager = PluginManager(config)
return _plugin_manager |
xpybuild/xpybuild | tests/correctness/framework/DepGraph/run.py | Python | apache-2.0 | 306 | 0.026144 | from pys | ys.constants import *
from xpybuild.xpybuild_basetest import XpybuildBaseTest
class PySysTest(XpybuildBaseTest):
def execute(self):
self.xpybuild(args=['--depgraph', 'depgraph-output.dot'])
def validate(self):
self.assertDiff(file1='depgraph | -output.dot', file2='ref-depgraph-output.dot')
|
ezequielpereira/Time-Line | libs/wx/tools/Editra/src/eclib/errdlg.py | Python | gpl-3.0 | 11,900 | 0.002353 | ###############################################################################
# Name: errdlg.py #
# Purpose: Error Reporter Dialog #
# Author: Cody Precord <cprecord@editra.org> #
# Copyright: (c) 2009 Co | dy Precord <staff@editra.org> #
# License: wxWindows License #
###############################################################################
"""
Editra Control Library: Error Reporter Dialog
Dialog for displaying exc | eptions and reporting errors to application maintainer.
This dialog is intended as a base class and should be subclassed to fit the
applications needs.
This dialog should be initiated inside of a sys.excepthook handler.
Example:
sys.excepthook = ExceptHook
...
def ExceptionHook(exctype, value, trace):
# Format the traceback
ftrace = ErrorDialog.FormatTrace(exctype, value, trace)
# Ensure that error gets raised to console as well
print ftrace
# If abort has been set and we get here again do a more forcefull shutdown
if ErrorDialog.ABORT:
os._exit(1)
# Prevent multiple reporter dialogs from opening at once
if not ErrorDialog.REPORTER_ACTIVE and not ErrorDialog.ABORT:
dlg = ErrorDialog(ftrace)
dlg.ShowModal()
dlg.Destroy()
@summary: Error Reporter Dialog
"""
__author__ = "Cody Precord <cprecord@editra.org>"
__svnid__ = "$Id: errdlg.py 62466 2009-10-21 23:35:41Z CJP $"
__revision__ = "$Revision: 62466 $"
__all__ = [# Classes
'ErrorDialog', 'ErrorReporter',
# Functions
'TimeStamp']
#----------------------------------------------------------------------------#
# Dependencies
import os
import sys
import platform
import time
import traceback
import wx
#----------------------------------------------------------------------------#
# Globals
#_ = wx.GetTranslation
from gettext import gettext as _
#----------------------------------------------------------------------------#
class ErrorReporter(object):
"""Crash/Error Reporter Service
@summary: Stores all errors caught during the current session.
@note: singleton class
"""
instance = None
_first = True
def __init__(self):
"""Initialize the reporter
@note: The ErrorReporter is a singleton.
"""
# Ensure init only happens once
if self._first:
object.__init__(self)
self._first = False
self._sessionerr = list()
else:
pass
def __new__(cls, *args, **kargs):
"""Maintain only a single instance of this object
@return: instance of this class
"""
if not cls.instance:
cls.instance = object.__new__(cls, *args, **kargs)
return cls.instance
def AddMessage(self, msg):
"""Adds a message to the reporters list of session errors
@param msg: The Error Message to save
"""
if msg not in self._sessionerr:
self._sessionerr.append(msg)
def GetErrorStack(self):
"""Returns all the errors caught during this session
@return: formatted log message of errors
"""
return (os.linesep * 2).join(self._sessionerr)
def GetLastError(self):
"""Gets the last error from the current session
@return: Error Message String
"""
if len(self._sessionerr):
return self._sessionerr[-1]
#-----------------------------------------------------------------------------#
class ErrorDialog(wx.Dialog):
"""Dialog for showing errors and and notifying Editra.org should the
user choose so.
"""
ID_SEND = wx.NewId()
ABORT = False
REPORTER_ACTIVE = False
def __init__(self, parent, id=wx.ID_ANY, title=u'',
pos=wx.DefaultPosition, size=wx.DefaultSize,
style=wx.DEFAULT_DIALOG_STYLE | wx.RESIZE_BORDER,
name="ErrorReporterDlg", message=u''):
"""Initialize the dialog
@param message: Error message to display
"""
ErrorDialog.REPORTER_ACTIVE = True
wx.Dialog.__init__(self, parent, id, title, pos, size, style, name)
# Give message to ErrorReporter
ErrorReporter().AddMessage(message)
# Attributes
self.err_msg = os.linesep.join((self.GetEnvironmentInfo(),
"#---- Traceback Info ----#",
ErrorReporter().GetErrorStack(),
"#---- End Traceback Info ----#"))
# Layout
self._panel = ErrorPanel(self, self.err_msg)
self._DoLayout()
self.SetMinSize(wx.Size(450, 300))
# Event Handlers
self.Bind(wx.EVT_BUTTON, self.OnButton)
self.Bind(wx.EVT_CLOSE, self.OnClose)
# Auto show at end of init
self.CenterOnParent()
def _DoLayout(self):
"""Layout the dialog and prepare it to be shown
@note: Do not call this method in your code
"""
msizer = wx.BoxSizer(wx.VERTICAL)
msizer.Add(self._panel, 1, wx.EXPAND)
self.SetSizer(msizer)
self.SetInitialSize()
#---- Override in Subclass ----#
def Abort(self):
"""Called to abort the application
@note: needs to be overidden in sublcasses
"""
raise NotImplementedError("Abort must be implemented!")
def GetEnvironmentInfo(self):
"""Get the enviromental info / Header of error report
@return: string
"""
info = list()
info.append("#---- Notes ----#")
info.append("Please provide additional information about the crash here")
info.extend(["", ""])
info.append("#---- System Information ----#")
info.append(self.GetProgramName())
info.append("Operating System: %s" % wx.GetOsDescription())
if sys.platform == 'darwin':
info.append("Mac OSX: %s" % platform.mac_ver()[0])
info.append("Python Version: %s" % sys.version)
info.append("wxPython Version: %s" % wx.version())
info.append("wxPython Info: (%s)" % ", ".join(wx.PlatformInfo))
info.append("Python Encoding: Default=%s File=%s" % \
(sys.getdefaultencoding(), sys.getfilesystemencoding()))
info.append("wxPython Encoding: %s" % wx.GetDefaultPyEncoding())
info.append("System Architecture: %s %s" % (platform.architecture()[0], \
platform.machine()))
info.append("Byte order: %s" % sys.byteorder)
info.append("Frozen: %s" % str(getattr(sys, 'frozen', 'False')))
info.append("#---- End System Information ----#")
info.append("")
return os.linesep.join(info)
def GetProgramName(self):
"""Get the program name/version info to include in error report
@return: string
"""
return wx.GetApp().GetAppName()
def Send(self):
"""Called to send error report
@note: needs to be overridden in subclasses
"""
raise NotImplementedError("Send must be implemented!")
#---- End Required overrides ----#
@staticmethod
def FormatTrace(exctype, value, trace):
"""Format the traceback
@return: string
"""
exc = traceback.format_exception(exctype, value, trace)
exc.insert(0, u"*** %s ***%s" % (TimeStamp(), os.linesep))
ftrace = u"".join(exc)
return ftrace
def SetDescriptionLabel(self, label):
"""Set the dialogs main description text
@param label: string
"""
self._panel.SetDescriptionText(label)
def ShowAbortButton(self, show=True):
"""Show/Hide the Abort button
@keyword show: bool
"""
btn = self._panel.FindWindowById(wx.ID_ABORT)
if btn is not None:
btn.Show(show)
self._panel.Layout()
def ShowSendButton(self, show=True):
|
doctaphred/phredutils | zmqrpc.py | Python | gpl-3.0 | 1,764 | 0 | import traceback
from datetime import datetime
from itertools import count
import zmq
def serve(procs, port=None, addr='tcp://*', context=None, debug=False):
"""Make some procedures available for remote calls via ØMQ."""
if context is None:
contex | t = zmq.Context.instance()
with context.socket(zmq.REP) as socket:
if port is None:
port = socket.bind_to_rando | m_port(addr)
else:
socket.bind('{}:{}'.format(addr, port))
print('Serving at {}:{}'.format(addr, port))
print('sending and receiving JSON')
for i in count(1):
idle = datetime.now()
print('{}: waiting for request #{}...'.format(idle, i))
message = socket.poll()
start = datetime.now()
print('{}: received request #{} after {}'
.format(start, i, start - idle))
try:
request = socket.recv_json()
name, *args = request
result = procs[name](*args)
reply = {'result': result}
print(reply)
socket.send_json(reply)
except Exception as exc:
if debug:
traceback.print_exc()
message = '{}: {}'.format(exc.__class__.__name__, exc)
reply = {'error': message}
print(reply)
socket.send_json(reply)
end = datetime.now()
print('{}: replied to #{} after {}'
.format(end, i, end - start))
if __name__ == '__main__':
data = {}
procs = {
'GET': data.__getitem__,
'SET': data.__setitem__,
'DEL': data.__delitem__,
}
serve(procs, 6379) # Look Ma, Redis!
|
arokem/PyEMMA | pyemma/_ext/sklearn/parameter_search.py | Python | bsd-2-clause | 3,395 | 0.000589 | """
--------------------------------------------------------------------------------------------
Extracted from skikit-learn to ensure basic compatibility
without creating an explicit dependency.
For the original code see
http://scikit-learn.org/
and
https://github.com/scikit-learn
--------------------------------------------------------------------------------------------
Parameter estimation tools
"""
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>,
# Gael Varoquaux <gael.varoquaux@normalesup.org>
# Andreas Mueller <amueller@ais.uni-bonn.de>
# Olivier Grisel <olivier.grisel@ensta.org>
# License: BSD 3 clause
from collections import Mapping
from functools import partial
from itertools import product
import operator
class ParameterGrid(object):
"""Grid of parameters with a discrete number of values for each.
Can be used to iterate over parameter value combinations with the
Python built-in function iter.
Parameters
----------
param_grid : dict of string to sequence, or sequence of such
The parameter grid to explore, as a dictionary mapping estimator
parameters to sequences of allowed values.
An empty dict signifies default parameters.
A sequence of dicts signifies a sequence of grids to search, and is
useful to avoid exploring parameter combinations that make no sense
or have no effect. See the examples below.
Examples
--------
>>> from sklearn.grid_search import ParameterGrid
>>> param_grid = {'a': [1, 2], 'b': [True, False]}
>>> list(ParameterGrid(param_grid)) == (
... [{'a': 1, 'b': True}, {'a': 1, 'b': False},
... {'a': 2, 'b': True}, {'a': 2, 'b': False}])
True
>>> grid = [{'kernel': ['linear']}, {'kernel': ['rbf'], 'gamma': [1, 10]}]
>>> list(ParameterGrid(grid)) == [{'kernel': 'linear'},
... {'kernel': 'rbf', 'gamma': 1},
... {'kernel': 'rbf', 'gamma': 10}]
True
See also
--------
:class:`GridSearchCV`:
uses ``ParameterGrid`` to perform a full parallelized parameter search.
"""
def __init__(self, param_grid):
if isinstance(param_grid, Mapping):
# wrap dictionary in a singleton list to support either dict
# or list of dicts
param_grid = [param_grid]
self.param_grid = param_grid
def __iter__(self):
"""Iterate over the points in the grid.
Returns
-------
params : iterator over dict of string to any
Yields dictionaries mapping each estimator parameter to one of its
allowed values.
"""
for p in self.param_grid:
# Always sort the keys of a dictionary, for reproducibility
items = sorted(p.items())
if not items:
yield {}
else:
keys, values = zip(*items)
for v in product(*values):
params = dict(zip(keys, v))
yield params
def __len__(self):
"""Number of points on the | grid."""
# Product function that can handle iterables (np.product can't).
product = partial(reduce, operator.mul)
return sum(product(le | n(v) for v in p.values()) if p else 1
for p in self.param_grid) |
mm22dl/MeinKPS | logger.py | Python | gpl-3.0 | 3,955 | 0.005057 | #! /usr/bin/python
# -*- coding: utf-8 -*-
"""
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Title: logger
Author: David Leclerc
Version: 0.1
Date: 13.04.2018
License: GNU General Public License, Version 3
(http://www.gnu.org/licenses/gpl.html)
Overview: This is a script that generates a logging instance.
Notes: ...
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
"""
# LIBRARIES
import datetime
# USER LIBRARIES
import lib
import path
# CONSTANTS
LEVELS = ["DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL"]
DEFAULT_LEVEL = "INFO"
# CLASSES
class Logger(object):
def __init__(self, name, report = "loop.log", level = DEFAULT_LEVEL):
"""
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
INIT
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
"""
# Store logger name
self.name = name
# Get level index
self.level = LEVELS.index(level)
# Define logging format
self.fmt = "[{:%H:%M:%S.%f}] [{:>16}] [{:>8}] --- {}"
# Define report
self.report = report
def log(self, level, msg, show = True):
"""
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
LOG
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
"""
# Does level allow logging?
if LEVELS.index(level) >= self.level:
# Get current time
now = datetime.datetime.now()
# Format message
msg = self.fmt.format(now, self.name, level, msg)
# Define log directory and touch it
directory = path.Path(path.REPORTS.path + lib.formatDate(now))
directory.touch()
# Log message
with open(directory.path + self.report, "a") as f:
f.write(msg + "\n")
# Print to terminal
if show:
print msg
def debug(self, msg):
"""
~~~~~~~~~~~~~~~~ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
DEBUG
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
"""
# Log message
self.log("DEBUG", msg)
def info(self, msg):
"""
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
INFO
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
"""
# Log message
| self.log("INFO", msg)
def warning(self, msg):
"""
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
WARNING
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
"""
# Log message
self.log("WARNING", msg)
def error(self, msg):
"""
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
ERROR
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
"""
# Log message
self.log("ERROR", msg)
def critical(self, msg):
"""
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
CRITICAL
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
"""
# Log message
self.log("CRITICAL", msg)
def main():
"""
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
MAIN
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
"""
# Instanciate logger
logger = Logger(__name__)
# Write
logger.info("Test")
# Run this when script is called from terminal
if __name__ == "__main__":
main() |
thingsboard/thingsboard-gateway | thingsboard_gateway/gateway/grpc_service/tb_grpc_manager.py | Python | apache-2.0 | 11,720 | 0.00384 | # Copyright 2022. ThingsBoard
# #
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# #
# http://www.apache.org/licenses/LICENSE-2.0
# #
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import asyncio
import logging
from threading import Thread
from time import sleep
import grpc
from simplejson import dumps
from thingsboard_gateway.gateway.constant_enums import DownlinkMessageType, Status
from thingsboard_gateway.gateway.grpc_service.grpc_downlink_converter import GrpcDownlinkConverter
from thingsboard_gateway.gateway.grpc_service.grpc_uplink_converter import GrpcUplinkConverter
from thingsboard_gateway.gateway.grpc_service.tb_grpc_server import TBGRPCServer
from thingsboard_gateway.gateway.proto.messages_pb2 import *
from thingsboard_gateway.gateway.proto.messages_pb2_grpc import add_TBGatewayProtoServiceServicer_to_server
log = logging.getLogger('grpc')
DEFAULT_STATISTICS_DICT = {"MessagesReceived": 0, "MessagesSent": 0}
class TBGRPCServerManager(Thread):
def __init__(self, gateway, config):
super().__init__()
self.daemon = True
self.__gateway = gateway
self.setName("TB GRPC manager thread")
self.__aio_server: grpc.aio.Server = None
self.__register_connector = None
self.__unregister_connector = None
self.__send_data_to_storage = None
self._stopped = False
self.__config = config
self.__grpc_port = config['serverPort']
self.__connectors_sessions = {}
self.__grpc_server = TBGRPCServer(self.incoming_messages_cb)
self.__uplink_converter = GrpcUplinkConverter()
self.__downlink_converter = GrpcDownlinkConverter()
self.sessions = {}
self.start()
def run(self):
log.info("GRPC server started.")
asyncio.run(self.serve(self.__config), debug=True)
while not self._stopped:
sleep(.01)
def incoming_messages_cb(self, session_id, msg: FromConnectorMessage):
log.debug("Connected client with identifier: %s", session_id)
# if session_id not in self.sessions:
# self.sessions[session_id] = {"context": context}
# else:
# log.debug("Existing client context is: %s", self.sessions[session_id])
# self.sessions[session_id]["context"] = context
log.debug("[GRPC] incoming message: %s", msg)
try:
outgoing_message = None
| downlink_converter_config = {"message_type": [DownlinkMessageType.Response], "additional_message": msg}
if msg.HasField("registerConnectorMsg"):
self.__register_connector(session_id, msg.registerConnectorMsg.connectorKey)
outgoing_message = True
elif msg.HasField("unregisterConnectorMsg"):
self.__unregister_c | onnector(session_id, msg.unregisterConnectorMsg.connectorKey)
outgoing_message = True
elif self.sessions.get(session_id) is not None and self.sessions[session_id].get('name') is not None:
if msg.HasField("response"):
if msg.response.ByteSize() == 0:
outgoing_message = True
if msg.HasField("gatewayTelemetryMsg"):
data = self.__convert_with_uplink_converter(msg.gatewayTelemetryMsg)
result_status = self.__gateway.send_to_storage(self.sessions[session_id]['name'], data)
outgoing_message = True
self.__increase_incoming_statistic(session_id)
if msg.HasField("gatewayAttributesMsg"):
data = self.__convert_with_uplink_converter(msg.gatewayAttributesMsg)
result_status = self.__gateway.send_to_storage(self.sessions[session_id]['name'], data)
outgoing_message = True
self.__increase_incoming_statistic(session_id)
if msg.HasField("gatewayClaimMsg"):
data = self.__convert_with_uplink_converter(msg.gatewayClaimMsg)
result_status = self.__gateway.send_to_storage(self.sessions[session_id]['name'], data)
outgoing_message = self.__downlink_converter.convert(downlink_converter_config, result_status)
self.__increase_incoming_statistic(session_id)
if msg.HasField("connectMsg"):
data = self.__convert_with_uplink_converter(msg.connectMsg)
data['name'] = self.sessions[session_id]['name']
result_status = self.__gateway.add_device_async(data)
outgoing_message = self.__downlink_converter.convert(downlink_converter_config, result_status)
self.__increase_incoming_statistic(session_id)
if msg.HasField("disconnectMsg"):
data = self.__convert_with_uplink_converter(msg.disconnectMsg)
data['name'] = self.sessions[session_id]['name']
result_status = self.__gateway.del_device_async(data)
outgoing_message = self.__downlink_converter.convert(downlink_converter_config, result_status)
self.__increase_incoming_statistic(session_id)
if msg.HasField("gatewayRpcResponseMsg"):
data = self.__convert_with_uplink_converter(msg.gatewayRpcResponseMsg)
result_status = self.__gateway.send_rpc_reply(device=data['deviceName'], req_id=data['id'], content=data['data'])
outgoing_message = True
self.__increase_incoming_statistic(session_id)
if msg.HasField("gatewayAttributeRequestMsg"):
outgoing_message = self.__downlink_converter.convert(downlink_converter_config, Status.NOT_FOUND)
pass
else:
outgoing_message = self.__downlink_converter.convert(downlink_converter_config, Status.FAILURE)
if outgoing_message is None:
log.debug("Cannot convert outgoing message!")
elif isinstance(outgoing_message, FromServiceMessage):
self.__grpc_server.write(session_id, outgoing_message)
except ValueError as e:
log.error("Received unknown GRPC message!", e)
def write(self, connector_name, msg: FromServiceMessage, session_id=None):
log.debug("[GRPC] outgoing message: %s", msg)
if session_id is None:
session_id = self.__connectors_sessions.get(connector_name)
if session_id is not None:
self.__grpc_server.write(session_id, msg)
self.__increase_outgoing_statistic(session_id)
else:
log.warning("Cannot write to connector with name %s, session is not found. Client is not registered!", connector_name)
def registration_finished(self, registration_result: Status, session_id, connector_configuration):
additional_message = FromConnectorMessage()
additional_message.registerConnectorMsg.MergeFrom(RegisterConnectorMsg())
if registration_result == Status.SUCCESS:
connector_name = connector_configuration['name']
self.sessions[session_id] = {"config": connector_configuration, "name": connector_name, "statistics": DEFAULT_STATISTICS_DICT}
self.__connectors_sessions[connector_name] = session_id
msg = self.__grpc_server.get_response("SUCCESS", additional_message)
configuration_msg = ConnectorConfigurationMsg()
configuration_msg.connectorName = connector_name
configuration_msg.configuration = dumps(connector_configuration['config'])
msg.connectorConfigurationMsg.MergeFrom(configura |
funbaker/astropy | astropy/modeling/optimizers.py | Python | bsd-3-clause | 7,182 | 0.000139 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Optimization algorithms used in `~astropy.modeling.fitting`.
"""
import warnings
i | mport abc
import numpy as np
from ..utils.exceptions import AstropyUserWarning
__all__ = ["Optimization", "SLSQP", "Simplex"]
# Maximum number o | f iterations
DEFAULT_MAXITER = 100
# Step for the forward difference approximation of the Jacobian
DEFAULT_EPS = np.sqrt(np.finfo(float).eps)
# Default requested accuracy
DEFAULT_ACC = 1e-07
DEFAULT_BOUNDS = (-10 ** 12, 10 ** 12)
class Optimization(metaclass=abc.ABCMeta):
"""
Base class for optimizers.
Parameters
----------
opt_method : callable
Implements optimization method
Notes
-----
The base Optimizer does not support any constraints by default; individual
optimizers should explicitly set this list to the specific constraints
it supports.
"""
supported_constraints = []
def __init__(self, opt_method):
self._opt_method = opt_method
self._maxiter = DEFAULT_MAXITER
self._eps = DEFAULT_EPS
self._acc = DEFAULT_ACC
@property
def maxiter(self):
"""Maximum number of iterations"""
return self._maxiter
@maxiter.setter
def maxiter(self, val):
"""Set maxiter"""
self._maxiter = val
@property
def eps(self):
"""Step for the forward difference approximation of the Jacobian"""
return self._eps
@eps.setter
def eps(self, val):
"""Set eps value"""
self._eps = val
@property
def acc(self):
"""Requested accuracy"""
return self._acc
@acc.setter
def acc(self, val):
"""Set accuracy"""
self._acc = val
def __repr__(self):
fmt = "{0}()".format(self.__class__.__name__)
return fmt
@property
def opt_method(self):
return self._opt_method
@abc.abstractmethod
def __call__(self):
raise NotImplementedError("Subclasses should implement this method")
class SLSQP(Optimization):
"""
Sequential Least Squares Programming optimization algorithm.
The algorithm is described in [1]_. It supports tied and fixed
parameters, as well as bounded constraints. Uses
`scipy.optimize.fmin_slsqp`.
References
----------
.. [1] http://www.netlib.org/toms/733
"""
supported_constraints = ['bounds', 'eqcons', 'ineqcons', 'fixed', 'tied']
def __init__(self):
from scipy.optimize import fmin_slsqp
super().__init__(fmin_slsqp)
self.fit_info = {
'final_func_val': None,
'numiter': None,
'exit_mode': None,
'message': None
}
def __call__(self, objfunc, initval, fargs, **kwargs):
"""
Run the solver.
Parameters
----------
objfunc : callable
objection function
initval : iterable
initial guess for the parameter values
fargs : tuple
other arguments to be passed to the statistic function
kwargs : dict
other keyword arguments to be passed to the solver
"""
kwargs['iter'] = kwargs.pop('maxiter', self._maxiter)
if 'epsilon' not in kwargs:
kwargs['epsilon'] = self._eps
if 'acc' not in kwargs:
kwargs['acc'] = self._acc
# Get the verbosity level
disp = kwargs.pop('verblevel', None)
# set the values of constraints to match the requirements of fmin_slsqp
model = fargs[0]
pars = [getattr(model, name) for name in model.param_names]
bounds = [par.bounds for par in pars if not (par.fixed or par.tied)]
bounds = np.asarray(bounds)
for i in bounds:
if i[0] is None:
i[0] = DEFAULT_BOUNDS[0]
if i[1] is None:
i[1] = DEFAULT_BOUNDS[1]
# older versions of scipy require this array to be float
bounds = np.asarray(bounds, dtype=float)
eqcons = np.array(model.eqcons)
ineqcons = np.array(model.ineqcons)
fitparams, final_func_val, numiter, exit_mode, mess = self.opt_method(
objfunc, initval, args=fargs, full_output=True, disp=disp,
bounds=bounds, eqcons=eqcons, ieqcons=ineqcons,
**kwargs)
self.fit_info['final_func_val'] = final_func_val
self.fit_info['numiter'] = numiter
self.fit_info['exit_mode'] = exit_mode
self.fit_info['message'] = mess
if exit_mode != 0:
warnings.warn("The fit may be unsuccessful; check "
"fit_info['message'] for more information.",
AstropyUserWarning)
return fitparams, self.fit_info
class Simplex(Optimization):
"""
Neald-Mead (downhill simplex) algorithm.
This algorithm [1]_ only uses function values, not derivatives.
Uses `scipy.optimize.fmin`.
References
----------
.. [1] Nelder, J.A. and Mead, R. (1965), "A simplex method for function
minimization", The Computer Journal, 7, pp. 308-313
"""
supported_constraints = ['bounds', 'fixed', 'tied']
def __init__(self):
from scipy.optimize import fmin as simplex
super().__init__(simplex)
self.fit_info = {
'final_func_val': None,
'numiter': None,
'exit_mode': None,
'num_function_calls': None
}
def __call__(self, objfunc, initval, fargs, **kwargs):
"""
Run the solver.
Parameters
----------
objfunc : callable
objection function
initval : iterable
initial guess for the parameter values
fargs : tuple
other arguments to be passed to the statistic function
kwargs : dict
other keyword arguments to be passed to the solver
"""
if 'maxiter' not in kwargs:
kwargs['maxiter'] = self._maxiter
if 'acc' in kwargs:
self._acc = kwargs['acc']
kwargs.pop('acc')
if 'xtol' in kwargs:
self._acc = kwargs['xtol']
kwargs.pop('xtol')
# Get the verbosity level
disp = kwargs.pop('verblevel', None)
fitparams, final_func_val, numiter, funcalls, exit_mode = self.opt_method(
objfunc, initval, args=fargs, xtol=self._acc, disp=disp,
full_output=True, **kwargs)
self.fit_info['final_func_val'] = final_func_val
self.fit_info['numiter'] = numiter
self.fit_info['exit_mode'] = exit_mode
self.fit_info['num_function_calls'] = funcalls
if self.fit_info['exit_mode'] == 1:
warnings.warn("The fit may be unsuccessful; "
"Maximum number of function evaluations reached.",
AstropyUserWarning)
if self.fit_info['exit_mode'] == 2:
warnings.warn("The fit may be unsuccessful; "
"Maximum number of iterations reached.",
AstropyUserWarning)
return fitparams, self.fit_info
|
diegocortassa/TACTIC | src/pyasm/checkin/repo.py | Python | epl-1.0 | 6,768 | 0.008717 | ###########################################################
#
# Copyright (c) 2005, Southpaw Technology
# All Rights Reserved
#
# PROPRIETARY INFORMATION. This software is proprietary to
# Southpaw Technology, and is not to be reproduced, transmitted,
# or disclosed in any way without written permission.
#
#
#
__all__ = ["BaseRepo", "TacticRepo"]
import os, sys, re
from pyasm.common import Environment, System
from pyasm.biz import File
from pyasm.search import FileUndo
from .checkin import CheckinException
class BaseRepo(object):
'''abstract class defining repositories'''
def has_file_codes(self):
return True
def handle_system_commands(self, snapshot, files, file_objects, mode, md5s, source_paths=[], commit=False):
pass
class TacticRepo(BaseRepo):
def handle_system_commands(self, snapshot, files, file_objects, mode, md5s, source_paths=[], file_sizes=[], commit=False):
'''move the tmp files in the appropriate directory'''
# if mode is local then nothing happens here
if mode == 'local':
return
if commit in ['false', False]:
commit = False
else:
commit = True
# inplace mode does not move the file. It just registers the file
# object
if mode == 'inplace':
for i, file in enumerate(files):
file_object = file_objects[i]
to_name = file_object.get_full_file_name()
to_path = file
# This is handled in create_file_types
#file_type = snapshot.get_type_by_file_name(to_name)
#file_object.set_value('type', file_type)
if not os.path.isdir(to_path):
md5_checksum = None
if md5s:
md5_checksum = md5s[i]
if not md5_checksum:
md5_checksum = File.get_md5(to_path)
if md5_checksum:
file_object.set_value("md5", md5_checksum)
if commit:
file_object.commit(triggers="none")
return
for i, file in enumerate(files):
file_object = file_objects[i]
to_name = file_object.get_full_file_name()
file_type = snapshot.get_type_by_file_name(to_name)
if mode == 'preallocate':
to_path = file
else:
lib_dir = snapshot.get_lib_dir(file_type=file_type, file_object=file_object)
# it should have been created in postprocess_snapshot
System().makedirs(lib_dir)
to_path = "%s/%s" % (lib_dir, to_name )
#print "path: ", i, files[i]
#print to_path, os.path.exists(to_path)
# first make sure that the to path does not exist, if so, just skip
if os.path.exists(to_path) and mode not in ['inplace','preallocate']:
raise CheckinException('This path [%s] already exists'%to_path)
# add the file
try:
# inplace undo used to not touch the file,
# now it will be moved to cache on undo
io_action = True
if mode in ['preallocate']:
io_action = False
if mode == 'move':
FileUndo.move( source_paths[i], to_path )
#elif mode == 'copy': # was free_copy
#FileUndo.create( source_paths[i], to_path, io_action=io_action )
# make it look like the files was created in the repository
else: # mode ='create'
md5 = file_object.get_value("md5")
st_size = file_object.get_value("st_size")
rel_dir = file_object.get_value("relative_dir")
if mode == 'copy':
io_action = 'copy'
src_path = source_paths[i]
else:
src_path = files[i]
file_name = to_name
rel_path = "%s/%s" % (rel_dir, file_name)
FileUndo.create( src_path, to_path, io_action=io_action, extra={ "md5": md5, "st_size": st_size, "rel_path": rel_path } )
except IOError as e:
raise CheckinException('IO Error occ | urred. %s' %e.__str__())
# check to see that the file exists.
if not os. | path.exists( to_path ):
if mode in ["inplace", "preallocate"]:
raise CheckinException("File not found in repo at [%s]" % to_path )
else:
raise CheckinException("Failed move [%s] to [%s]" % \
(files[i], to_path) )
file_object.set_value('type', file_type)
if md5s != "ignore" and not os.path.isdir(to_path):
md5_checksum = None
if md5s:
md5_checksum = md5s[i]
if not md5_checksum:
md5_checksum = File.get_md5(to_path)
#md5_checksum = ""
if md5_checksum:
file_object.set_value("md5", md5_checksum)
if commit:
file_object.commit(triggers="none")
__all__.append("S3Repo")
class S3Repo(BaseRepo):
'''This uploads the files to s3 directly'''
def handle_system_commands(self, snapshot, files, file_objects, mode, md5s, source_paths=[], file_sizes=[], commit=False):
try:
import boto3
from botocore.exceptions import ClientError
except:
raise("Python [boto3] module not installed")
session = boto3.Session()
s3_client = session.client('s3')
s3_resource = session.client('s3')
sobject = snapshot.get_parent()
from pyasm.security import Site
site = Site.get_site()
project_code = sobject.get_project_code()
#!!! TEST
bucket = "tactic01"
for i, file in enumerate(files):
file_object = file_objects[i]
to_name = file_object.get_full_file_name()
file_type = snapshot.get_type_by_file_name(to_name)
web_dir = snapshot.get_relative_dir(file_type=file_type, file_object=file_object)
object_name = "%s/%s/%s" % (site, web_dir, to_name)
print("objct: ", object_name)
# push these files to s3
try:
s3_client.upload_file(source_paths[i], bucket, object_name)
except ClientError as e:
raise
|
jucimarjr/IPC_2017-1 | lista04/lista04_lista02_questao11.py | Python | apache-2.0 | 1,164 | 0.013974 | #----------------------------------------------------------------------------------
# Introdução a Programação de Computadores - IPC
# Universidade do Estado do Amazonas - UEA
#
# Adham Lucas da Silva Oliveira 1715310059
# Alexandre Marques Uchôa 1715310028
# André Luís Laborda Neves 1515070006
# Carlos Eduardo Tapudima de Oliveira 1715310030
# Diego Reis Figueira 1515070169
#
#Faça um programa que calcule e mostre o volume de uma esfera sendo fornecido o valor de seu raio (R).
#A fórmula para calcular o volume é: (4/3) * pi * R3. Considere (atribua) para pi o valor 3.14159.
#
#Entrada
# O arquivo de entrada contém um valor de ponto flutuante (dupla precisão), correspondente ao raio da esfera.
#
#Saída
# A saída deverá ser uma mensagem "VOLUME" conforme o exemplo | fornecido abaixo,
#com um espaço antes e um espaço depois da igualdade.
#O valor deverá ser apresentado com 3 casas após o ponto.
#----------------------------------------------------------------------------------
radius = float(input())
pi = | 3.14159
volume = 4/(3*pi*radius**3)
print('volume = %.3f' % volume)
|
melviso/phycpp | beatle/activity/models/ui/dlg/cc/IsClassMethods.py | Python | gpl-2.0 | 2,970 | 0.001684 | """Subclass of IsClassMethods, which is generated by wxFormBuilder."""
from beatle import model
from beatle.lib import wxx
from beatle.activity.models.ui import ui as ui
from beatle.app.utils import cached_type
# Implementing IsClassMethods
class IsClassMethods(ui.IsClassMethods):
"""
This dialog allows to add/remove is_class methods.
"""
@wxx.SetInfo(__doc__)
def __init__(self, parent, container):
"""Dialog initialization"""
super(IsClassMethods, self).__init__(parent)
# container es la clase base
self.container = container
# create a map of feasible casts
self._classes = []
for k in container._deriv:
self.visit(k)
# get current methods
self._is_class_methods = container(model.cc.IsClassMethod)
# create map from names to implementations
self._is_class_method_names = dict([(x._name, x) for x in self._is_class_methods])
# create map from feasible is_class to current impl
self._map = {}
for k in self._classes:
name = k.scoped
name = "is_" + name.replace('::', '_')
if name in self._is_class_method_names:
self._map[name] = (k, self._is_class_method_names[name])
else:
self._map[name] = (k, None)
# do a label insertion remembering state
pos = 0
for k in self._map:
v = self._map[k]
self.m_checkList2.Insert(k, pos, v)
if v[1]:
self.m_checkList2.Check(pos)
pos = pos + 1
def visit(self, k):
"""Add inheritance branch"""
for l in k._deriv:
self.visit(l)
self._classes.append(k)
def get_kwargs(self):
"""Returns kwargs dictionary suitable for objects creation"""
kwargs_list = []
tbool = cached_type(self.container.project, 'bool')
for item in range(0, self.m_checkList2.GetCount()):
v = self.m_checkList2.GetClientData(item)
c = self.m_checkList2.IsChecked(item)
if (c and v[1]) or (not c and not v[1]):
continue
if c:
kwargs = {}
derivative = v[0]
kwargs['parent'] = self.container
kwargs['name'] = 'is_' + derivative.scoped.replace('::', '_')
kwargs['type'] = model.cc.typeinst(
type=tbool, const=True)
kwargs['constmethod'] = True
| kwargs['note'] = 'This method checks if the instance is specialized as {0}'.format(derivative.GetFullLabel())
kwargs['declare'] = True
kwargs['implement'] = True
kwargs['content'] = '\treturn ( dynamic_cast<const {0}*>(this) != nullptr );'.format(derivative.scoped)
kwargs_list.append(kwargs)
else:
| v[1].Delete()
return kwargs_list
|
Gebesa-Dev/Addons-gebesa | stock_warehouse_analytic_id/__openerp__.py | Python | agpl-3.0 | 763 | 0 | # -*- coding: utf-8 -*-
# © <YEAR(S)> <AUTHOR(S)>
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
{
"name": "Account Analytic Wareho | use",
"summary": "Add analytic in stock_warehouse",
"version": "9.0.1.0.0",
"category": "Accounting",
"website": "https://odoo-community.org/",
"author": "<Deysy Mascorro>, Odoo Community Association (OCA)",
"license": "AGPL-3",
"application": Fa | lse,
"installable": True,
"external_dependencies": {
"python": [],
"bin": [],
},
"depends": [
"base",
"account",
"stock"
],
"data": [
"views/stock_warehouse_view.xml",
"views/stock_location_view.xml",
],
"demo": [
],
"qweb": [
]
}
|
jkandasa/integration_tests | cfme/middleware/provider/hawkular.py | Python | gpl-2.0 | 7,860 | 0.001781 | import re
from widgetastic_patternfly import Input, BootstrapSelect
from wrapanapi.hawkular import Hawkular
from cfme.common import TopologyMixin
from cfme.common.provider import DefaultEndpoint, DefaultEndpointForm
from cfme.utils.appliance import Navigatable
from cfme.utils.appliance.implementations.ui import navigate_to
from cfme.utils.varmeth import variable
from . import MiddlewareProvider
from . import _get_providers_page, _db_select_query
from . import download, MiddlewareBase
class HawkularEndpoint(DefaultEndpoint):
@property
def view_value_mapping(self):
return {'security_protocol': self.security_protocol,
'hostname': self.hostname,
'api_port': self.api_port,
}
class HawkularEndpointForm(DefaultEndpointForm):
security_protocol = BootstrapSelect('default_security_protocol')
api_port = Input('default_api_port')
class HawkularProvider(MiddlewareBase, TopologyMixin, MiddlewareProvider):
"""
HawkularProvider class holds provider data. Used to perform actions on hawkular provider page
Args:
name: Name of the provider
endpoints: one or several provider endpoints like DefaultEndpoint. it should be either dict
in format dict{endpoint.name, endpoint, endpoint_n.name, endpoint_n}, list of endpoints or
mere one endpoint
hostname: Hostname/IP of the provider
port: http/https port of hawkular provider
credentials: see Credential inner class.
key: The CFME key of the provider in the yaml.
db_id: database row id of provider
Usage:
myprov = HawkularProvider(name='foo',
endpoints=endpoint,
hostname='localhost',
port=8080,
credentials=Provider.Credential(principal='admin', secret='foobar')))
myprov.create()
myprov.num_deployment(method="ui")
"""
STATS_TO_MATCH = MiddlewareProvider.STATS_TO_MATCH +\
['num_server', 'num_domain', 'num_deployment', 'num_datasource', 'num_messaging']
property_tuples = MiddlewareProvider.property_tuples +\
[('name', 'Name'), ('hostname', 'Host Name'), ('port', 'Port'), ('provider_type', 'Type')]
type_name = "hawkular"
mgmt_class = Hawkular
db_types = ["Hawkular::MiddlewareManager"]
endpoints_form = HawkularEndpointForm
def __init__(self, name=None, endpoints=None, hostname=None, port=None,
credentials=None, key=None,
appliance=None, sec_protocol=None, **kwargs):
Navigatable.__init__(self, appliance=appliance)
self.name = name
s | elf.hostname = hostname
self.port = port
self.provider_type = 'Hawkular'
if not credentials:
credentials = {}
self.creden | tials = credentials
self.key = key
self.sec_protocol = sec_protocol if sec_protocol else 'Non-SSL'
self.db_id = kwargs['db_id'] if 'db_id' in kwargs else None
self.endpoints = self._prepare_endpoints(endpoints)
@property
def view_value_mapping(self):
"""Maps values to view attrs"""
return {
'name': self.name,
'prov_type': 'Hawkular'
}
@variable(alias='db')
def num_deployment(self):
return self._num_db_generic('middleware_deployments')
@num_deployment.variant('ui')
def num_deployment_ui(self, reload_data=True):
self.load_details(refresh=reload_data)
return int(self.get_detail("Relationships", "Middleware Deployments"))
@variable(alias='db')
def num_server(self):
return self._num_db_generic('middleware_servers')
@num_server.variant('ui')
def num_server_ui(self, reload_data=True):
self.load_details(refresh=reload_data)
return int(self.get_detail("Relationships", "Middleware Servers"))
@variable(alias='db')
def num_server_group(self):
res = self.appliance.db.client.engine.execute(
"SELECT count(*) "
"FROM ext_management_systems, middleware_domains, middleware_server_groups "
"WHERE middleware_domains.ems_id=ext_management_systems.id "
"AND middleware_domains.id=middleware_server_groups.domain_id "
"AND ext_management_systems.name='{0}'".format(self.name))
return int(res.first()[0])
@variable(alias='db')
def num_datasource(self):
return self._num_db_generic('middleware_datasources')
@num_datasource.variant('ui')
def num_datasource_ui(self, reload_data=True):
self.load_details(refresh=reload_data)
return int(self.get_detail("Relationships", "Middleware Datasources"))
@variable(alias='db')
def num_domain(self):
return self._num_db_generic('middleware_domains')
@num_domain.variant('ui')
def num_domain_ui(self, reload_data=True):
self.load_details(refresh=reload_data)
return int(self.get_detail("Relationships", "Middleware Domains"))
@variable(alias='db')
def num_messaging(self):
return self._num_db_generic('middleware_messagings')
@num_messaging.variant('ui')
def num_messaging_ui(self, reload_data=True):
self.load_details(refresh=reload_data)
return int(self.get_detail("Relationships", "Middleware Messagings"))
@variable(alias='ui')
def is_refreshed(self, reload_data=True):
self.load_details(refresh=reload_data)
if re.match('Success.*Minute.*Ago', self.get_detail("Status", "Last Refresh")):
return True
else:
return False
@is_refreshed.variant('db')
def is_refreshed_db(self):
ems = self.appliance.db.client['ext_management_systems']
dates = self.appliance.db.client.session.query(ems.created_on,
ems.updated_on).filter(ems.name == self.name).first()
return dates.updated_on > dates.created_on
@variable(alias='ui')
def is_valid(self, reload_data=True):
self.load_details(refresh=reload_data)
if re.match('Valid.*Ok', self.get_detail("Status", "Authentication status")):
return True
else:
return False
@classmethod
def download(cls, extension):
view = _get_providers_page()
download(view, extension)
def load_details(self, refresh=False):
"""Navigate to Details and load `db_id` if not set"""
view = navigate_to(self, 'Details')
if not self.db_id or refresh:
tmp_provider = _db_select_query(
name=self.name, type='ManageIQ::Providers::Hawkular::MiddlewareManager').first()
self.db_id = tmp_provider.id
if refresh:
view.browser.selenium.refresh()
view.flush_widget_cache()
return view
def load_topology_page(self):
return navigate_to(self, 'TopologyFromDetails')
def recheck_auth_status(self):
view = self.load_details(refresh=True)
view.toolbar.authentication.item_select("Re-check Authentication Status")
@staticmethod
def from_config(prov_config, prov_key, appliance=None):
credentials_key = prov_config['credentials']
credentials = HawkularProvider.process_credential_yaml_key(credentials_key)
endpoints = {}
endpoints[HawkularEndpoint.name] = HawkularEndpoint(
**prov_config['endpoints'][HawkularEndpoint.name])
return HawkularProvider(
name=prov_config['name'],
endpoints=endpoints,
key=prov_key,
hostname=prov_config['hostname'],
sec_protocol=prov_config.get('sec_protocol'),
port=prov_config['port'],
credentials={'default': credentials},
appliance=appliance)
|
Neitsch/ASE4156 | authentication/migrations/0002_profile_has_bank_linked.py | Python | apache-2.0 | 462 | 0 | # -*- coding: utf-8 -*-
| # Generated by Django 1.11.5 on 2017-09-21 00:47
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('authentication', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='profile',
name='has_bank_linked',
| field=models.NullBooleanField(default=False),
),
]
|
graphql-python/graphql-core | src/graphql/utilities/ast_to_dict.py | Python | mit | 1,596 | 0.000627 | from typing import Any, Collection, Dict, List, Optional, overload
from ..language import Node, OperationType
from ..pyutils import is_iterable
__all__ = ["ast_to_dict"]
@overload
def ast_to_dict(
node: Node, locations: bool = False, cache: Optional[Dict[Node, Any]] = None
) -> Dict:
...
@overload
def ast_to_dict(
node: Collection[Node],
locations: bool = False,
cache: Optional[Dict[Node, Any]] = None,
) -> List[Node]:
...
@overload
def ast_to_dict(
node: OperationType,
locations: bool = False,
cache: Optional[Dict[Node, Any]] = None,
) -> str:
...
def ast_to_dict(
node: Any, locations: bool = False, cache: Optional[Dict[Node, Any]] = None
) -> Any:
"""Convert a language AST to a nested Python dictionary.
Set `location` to True in order to get the locations as well.
"""
"""Convert a node to a nested Python dictionary."""
if isinstance(node, Node):
if cache is None:
cache = {}
| elif node in cache:
return cache[node]
cache[node] = res = {}
res.update(
{
key: ast_to_dict(getattr(node, key), locations, cache)
for key in ("kind",) + n | ode.keys[1:]
}
)
if locations:
loc = node.loc
if loc:
res["loc"] = dict(start=loc.start, end=loc.end)
return res
if is_iterable(node):
return [ast_to_dict(sub_node, locations, cache) for sub_node in node]
if isinstance(node, OperationType):
return node.value
return node
|
weso/CWR-DataApi | tests/parser/dictionary/decoder/record/test_work_origin.py | Python | mit | 1,959 | 0 | # -*- coding: utf-8 -*-
import unittest
from cwr.parser.decoder.dictionary import | WorkOriginDictionaryDeco | der
from cwr.other import VISAN
"""
Dictionary to Message decoding tests.
The following cases are tested:
"""
__author__ = 'Bernardo Martínez Garrido'
__license__ = 'MIT'
__status__ = 'Development'
class TestWorkOriginDictionaryDecoder(unittest.TestCase):
def setUp(self):
self._decoder = WorkOriginDictionaryDecoder()
def test_encoded(self):
data = {}
data['record_type'] = 'ORN'
data['transaction_sequence_n'] = 3
data['record_sequence_n'] = 15
data['intended_purpose'] = 'PURPOSE'
data['production_title'] = 'TITLE'
data['cd_identifier'] = 'ID134'
data['cut_number'] = 5
data['library'] = 'LIB467'
data['bltvr'] = 'BLTVR'
data['visan'] = 1234567123456789121231
data['production_n'] = 'PROD145'
data['episode_title'] = 'EPISODE'
data['episode_n'] = 'EP145'
data['year_production'] = 1994
data['audio_visual_key'] = 'KEY'
record = self._decoder.decode(data)
self.assertEqual('ORN', record.record_type)
self.assertEqual(3, record.transaction_sequence_n)
self.assertEqual(15, record.record_sequence_n)
self.assertEqual('PURPOSE', record.intended_purpose)
self.assertEqual('TITLE', record.production_title)
self.assertEqual('ID134', record.cd_identifier)
self.assertEqual(5, record.cut_number)
self.assertEqual('LIB467', record.library)
self.assertEqual('BLTVR', record.bltvr)
self.assertEqual(1234567123456789121231, record.visan)
self.assertEqual('PROD145', record.production_n)
self.assertEqual('EPISODE', record.episode_title)
self.assertEqual('EP145', record.episode_n)
self.assertEqual(1994, record.year_production)
self.assertEqual('KEY', record.audio_visual_key)
|
atodorov/anaconda | pyanaconda/ui/gui/spokes/datetime_spoke.py | Python | gpl-2.0 | 42,013 | 0.002428 | # Datetime configuration spoke class
#
# Copyright (C) 2012-2013 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
from pyanaconda.anaconda_loggers import get_module_logger
log = get_module_logger(__name__)
import gi
gi.require_version("Gdk", "3.0")
gi.require_version("Gtk", "3.0")
gi.require_version("TimezoneMap", "1.0")
from gi.repository import Gdk, Gtk, TimezoneMap
from pyanaconda.ui.communication import hubQ
from pyanaconda.ui.common import FirstbootSpokeMixIn
from pyanaconda.ui.gui import GUIObject
from pyanaconda.ui.gui.spokes import NormalSpoke
from pyanaconda.ui.categories.localization import LocalizationCategory
from pyanaconda.ui.gui.utils import gtk_call_once, override_cell_property
from pyanaconda.ui.gui.utils import blockedHandler
from pyanaconda.ui.gui.helpers import GUIDialogInputCheckHandler
from pyanaconda.ui.helpers import InputCheck
from pyanaconda.core import util, constants
from pyanaconda.core.configuration.anaconda import conf
from pyanaconda import isys
from pyanaconda import network
from pyanaconda import ntp
from pyanaconda import flags
from pyanaconda.modules.common.constants.services import TIMEZONE, NETWORK
from pyanaconda.threading import threadMgr, AnacondaThread
from pyanaconda.core.i18n import _, CN_
from pyanaconda.core.async_utils import async_action_wait, async_action_nowait
from pyanaconda.timezone import NTP_SERVICE, get_all_regions_and_timezones, get_timezone, is_valid_timezone
from pyanaconda.localization import get_xlated_timezone, resolve_date_format
from pyanaconda.core.timer import Timer
import datetime
import re
import threading
import time
import locale as locale_mod
import functools
__all__ = ["DatetimeSpoke"]
SERVER_HOSTNAME = 0
SERVER_POOL = 1
SERVER_WORKING = 2
SERVER_USE = 3
DEFAULT_TZ = "America/New_York"
SPLIT_NUMBER_SUFFIX_RE = re.compile(r'([^0-9]*)([-+])([0-9]+)')
def _compare_regions(reg_xlated1, reg_xlated2):
"""Compare two pairs of regions and their translations."""
reg1, xlated1 = reg_xlated1
reg2, xlated2 = reg_xlated2
# sort the Etc timezones to the end
if reg1 == "Etc" and reg2 == "Etc":
return 0
elif reg1 == "Etc":
return 1
elif reg2 == "Etc":
return -1
else:
# otherwise compare the translated names
return locale_mod.strcoll(xlated1, xlated2)
def _compare_cities(city_xlated1, city_xlated2):
"""Compare two paris of cities and their translations."""
# if there are "cities" ending with numbers (like GMT+-X), we need to sort
# them based on their numbers
val1 = city_xlated1[1]
val2 = city_xlated2[1]
match1 = SPLIT_NUMBER_SUFFIX_RE.match(val1)
match2 = SPLIT_NUMBER_SUFFIX_RE.match(val2)
if match1 is None and match2 is None:
# no +-X suffix, just compare the strings
return locale_mod.strcoll(val1, val2)
if match1 is None or match2 is None:
# one with the +-X suffix, compare the prefixes
if match1:
prefix, _sign, _suffix = match1.groups()
return locale_mod.strcoll(prefix, val2)
else:
prefix, _sign, _suffix = match2.groups()
return locale_mod.strcoll(val1, prefix)
# both have the +-X suffix
prefix1, sign1, suffix1 = match1.groups()
prefix2, sign2, suffix2 = match2.groups()
if prefix1 == prefix2:
# same prefixes, let signs determine
def _cmp(a, b):
if a < b:
return -1
elif a > b:
return 1
else:
return 0
return _cmp(int(sign1 + suffix1), int(sign2 + suffix2))
else:
# compare prefixes
return locale_mod.strcoll(prefix1, prefix2)
def _new_date_field_box(store):
"""
Creates new date field box (a combobox and a label in a horizontal box) for
a given store.
"""
box = Gtk.Box(orientation=Gtk.Orientation.HORIZONTAL)
suffix_label = Gtk.Label()
renderer = Gtk.CellRendererText()
combo = Gtk.ComboBox(model=store)
combo.pack_start(renderer, False)
# idx is column 0, string we want to show is 1
combo.add_attribute(renderer, "text", 1)
box.pack_start(combo, False, False, 0)
box.pack_start(suffix_label, Fa | lse, False, 0)
return (box, combo, suffix_label)
class NTPconfigDialog(GUIObject, GUIDialogInputCheckHandler):
builderObjects = ["ntpConfigDialog", "addImage", "serversStore"]
mainWidgetName = "ntpConfigDialog"
uiFile = "spokes/datetime_spoke.glade"
def __init__(self, data, timezone_module):
GUIObject.__init__(self, data)
# Use GUIDIalogInputCheckHandler to manipulate the sensitivity of the
# add butto | n, and check for valid input in on_entry_activated
add_button = self.builder.get_object("addButton")
GUIDialogInputCheckHandler.__init__(self, add_button)
#epoch is increased when serversStore is repopulated
self._epoch = 0
self._epoch_lock = threading.Lock()
self._timezone_module = timezone_module
@property
def working_server(self):
for row in self._serversStore:
if row[SERVER_WORKING] == constants.NTP_SERVER_OK and row[SERVER_USE]:
#server is checked and working
return row[SERVER_HOSTNAME]
return None
@property
def pools_servers(self):
pools = list()
servers = list()
for used_row in (row for row in self._serversStore if row[SERVER_USE]):
if used_row[SERVER_POOL]:
pools.append(used_row[SERVER_HOSTNAME])
else:
servers.append(used_row[SERVER_HOSTNAME])
return (pools, servers)
def _render_working(self, column, renderer, model, itr, user_data=None):
value = model[itr][SERVER_WORKING]
if value == constants.NTP_SERVER_QUERY:
return "dialog-question"
elif value == constants.NTP_SERVER_OK:
return "emblem-default"
else:
return "dialog-error"
def initialize(self):
self.window.set_size_request(500, 400)
workingColumn = self.builder.get_object("workingColumn")
workingRenderer = self.builder.get_object("workingRenderer")
override_cell_property(workingColumn, workingRenderer, "icon-name",
self._render_working)
self._serverEntry = self.builder.get_object("serverEntry")
self._serversStore = self.builder.get_object("serversStore")
self._addButton = self.builder.get_object("addButton")
self._poolCheckButton = self.builder.get_object("poolCheckButton")
# Validate the server entry box
self._serverCheck = self.add_check(self._serverEntry, self._validateServer)
self._serverCheck.update_check_status()
self._initialize_store_from_config()
def _initialize_store_from_config(self):
self._serversStore.clear()
kickstart_ntp_servers = self._timezone_module.NTPServers
if kickstart_ntp_servers:
pools, servers = ntp.internal_to_pools_and_servers(kickstart_ntp_servers)
else:
try:
pools, servers = ntp.get_servers_from_config()
except ntp.NTPconfigError:
log.warning("Failed to load N |
ethifus/commentjson | commentjson/__init__.py | Python | mit | 163 | 0 | from commentjson import dump
from commentjson import dumps
from commentjson import JSONLibraryE | xception
from co | mmentjson import load
from commentjson import loads
|
trabucayre/gnuradio | gr-analog/python/analog/fm_emph.py | Python | gpl-3.0 | 9,632 | 0.006956 | #
# Copyright 2005,2007,2012 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# SPDX-License-Identifier: GPL-3.0-or-later
#
#
from gnuradio import gr, filter
import math
import cmath
class fm_deemph(gr.hier_block2):
r"""
FM Deemphasis IIR filter
Args:
fs: sampling frequency in Hz (float)
tau: Time constant in seconds (75us in US, 50us in EUR) (float)
An analog deemphasis filter:
R
o------/\/\/\/---+----o
|
= C
|
---
Has this transfer function:
1 1
---- ---
RC tau
H(s) = ---------- = ----------
1 1
s + ---- s + ---
RC tau
And has its -3 dB response, due to the pole, at
|H(j w_c)|^2 = 1/2 => s = j w_c = j (1/(RC))
Historically, this corner frequency of analog audio deemphasis filters
been specified by the RC time constant used, called tau.
So w_c = 1/tau.
FWIW, for standard tau values, some standard analog components would be:
tau = 75 us = (50K)(1.5 nF) = (50 ohms)(1.5 uF)
tau = 50 us = (50K)(1.0 nF) = (50 ohms)(1.0 uF)
In specifying tau for this digital deemphasis filter, tau specifies
the *digital* corner frequency, w_c, desired.
The digital deemphasis filter design below, uses the
"bilinear transformation" method of designing digital filters:
1. Convert digital specifications into the analog domain, by prewarping
digital frequency specifications into analog frequencies.
w_a = (2/T)tan(wT/2)
2. Use an analog filter design technique to design the filter.
3. Use the bilinear transformation to convert the analog filter design to a
digital filter design.
H(z) = H(s)|
s = (2/T)(1-z^-1)/(1+z^-1)
w_ca 1 1 - (-1) z^-1
H(z) = ---- * ----------- * -----------------------
2 fs -w_ca -w_ca
1 - ----- 1 + -----
2 fs 2 fs
1 - ----------- z^-1
-w_ca
1 - -----
2 fs
We use this design technique, because it is an easy way to obtain a filter
design with the -6 dB/octave roll-off required of the deemphasis filter.
Jackson, Leland B., _Digital_Filters_and_Signal_Processing_Second_Edition_,
Kluwer Academic Publishers, 1989, pp 201-212
Orfanidis, Sophocles J., _Introduction_to_Signal_Processing_, Prentice Hall,
1996, pp 573-583
"""
def __init__(self, fs, tau=75e-6):
gr.hier_block2.__init__(self, "fm_deemph",
gr.io_signature(1, 1, gr.sizeof_float), # Input signature
gr.io_signature(1, 1, gr.sizeof_float)) # Output signature
# Digital corner frequency
w_c = 1.0 / tau
# Prewarped analog corner frequency
w_ca = 2.0 * fs * math.tan(w_c / (2.0 * fs))
# Resulting digital pole, zero, and gain term from the bilinear
# transformation of H(s) = w_ca / (s + w_ca) to
# H(z) = b0 (1 - z1 z^-1)/(1 - p1 z^-1)
k = -w_ca / (2.0 * fs)
z1 = -1.0
p1 = (1.0 + k) / (1.0 - k)
b0 = -k / (1.0 - k)
btaps = [ b0 * 1.0, b0 * -z1 ]
ataps = [ 1.0, -p1 ]
# Since H(s = 0) = 1.0, then H(z = 1) = 1.0 and has 0 dB gain at DC
if 0:
print("btaps =", btaps)
print("ataps =", ataps)
global plot1
plot1 = gru.gnuplot_freqz(gru.freqz(btaps, ataps), fs, True)
deemph = filter.iir_filter_ffd(btaps, ataps, False)
sel | f.connect(self, deemph, self)
class fm_preemph(gr.hier_block2):
r"""
FM Preemphasis IIR filter.
Args:
fs: sampling frequency in Hz (float)
tau: Time constant in seconds (75us in US, 50us in EUR) (float)
fh: High frequency at which to flatten out (< 0 means default of 0.925*fs/2.0) (float)
An analog pree | mphasis filter, that flattens out again at the high end:
C
+-----||------+
| |
o------+ +-----+--------o
| R1 | |
+----/\/\/\/--+ \
/
\ R2
/
\
|
o--------------------------+--------o
(This fine ASCII rendition is based on Figure 5-15
in "Digital and Analog Communication Systems", Leon W. Couch II)
Has this transfer function:
1
s + ---
R1C
H(s) = ------------------
1 R1
s + --- (1 + --)
R1C R2
It has a corner due to the numerator, where the rise starts, at
|Hn(j w_cl)|^2 = 2*|Hn(0)|^2 => s = j w_cl = j (1/(R1C))
It has a corner due to the denominator, where it levels off again, at
|Hn(j w_ch)|^2 = 1/2*|Hd(0)|^2 => s = j w_ch = j (1/(R1C) * (1 + R1/R2))
Historically, the corner frequency of analog audio preemphasis filters
been specified by the R1C time constant used, called tau.
So
w_cl = 1/tau = 1/R1C; f_cl = 1/(2*pi*tau) = 1/(2*pi*R1*C)
w_ch = 1/tau2 = (1+R1/R2)/R1C; f_ch = 1/(2*pi*tau2) = (1+R1/R2)/(2*pi*R1*C)
and note f_ch = f_cl * (1 + R1/R2).
For broadcast FM audio, tau is 75us in the United States and 50us in Europe.
f_ch should be higher than our digital audio bandwidth.
The Bode plot looks like this:
/----------------
/
/ <-- slope = 20dB/decade
/
-------------/
f_cl f_ch
In specifying tau for this digital preemphasis filter, tau specifies
the *digital* corner frequency, w_cl, desired.
The digital preemphasis filter design below, uses the
"bilinear transformation" method of designing digital filters:
1. Convert digital specifications into the analog domain, by prewarping
digital frequency specifications into analog frequencies.
w_a = (2/T)tan(wT/2)
2. Use an analog filter design technique to design the filter.
3. Use the bilinear transformation to convert the analog filter design to a
digital filter design.
H(z) = H(s)|
s = (2/T)(1-z^-1)/(1+z^-1)
-w_cla
1 + ------
2 fs
1 - ------------ z^-1
-w_cla -w_cla
1 - ------ 1 - ------
2 fs 2 fs
H(z) = ------------ * -----------------------
-w_cha -w_cha
1 - ------ 1 + ------
2 fs 2 fs
1 - ------------ z^-1
-w_cha
1 - ------
2 fs
We use this design technique, because it is an easy way to obtain a filter
design with the 6 dB/octave rise required of the premphasis filter.
Jackson, Leland B., _Digital_Filters_and_Signal_Processing_Second_Edition_,
Kluwer Academic Publishers, 1989, pp 201-212
Orfanidis, Sophocles J., _Introduction_to_Signal_Processing_, Prentice Hall,
1996, pp 573-583
"""
def __init__(self, fs, tau=75e-6, fh=-1.0):
gr.hier_block2.__init__(self, "fm_preemph",
gr.io_signature(1, 1, gr.sizeof_float), # Input signature
gr.io_signature(1, 1, gr.sizeof_float)) # Output signature
# Set fh to something sensible, if needed.
# N.B. fh == fs/2.0 or fh == 0.0 results in a pole on the unit circle
# at z = -1.0 or z = 1.0 respectively. That makes the filter unstable
# and useless.
if fh <= 0.0 or |
appsembler/symposion-openshift-quickstart | setup.py | Python | mit | 618 | 0.02589 | import os
from setuptools import setup, find_packages
from pip.req import parse_requirements
#REQUIREMENTS_FILE = os.path.join( os.path.dirname(__file__), 'requirements.openshift.txt')
PROJECT_NAME = '<your-project-name>'
AUTHOR_NAME = '<your-name>'
AUTHOR_EMAIL = '<your-email-address>'
PROJECT_URL = ''
DESCRIPTION = '<your-project-descripti | on>'
PROJECT_ROOT = os.path.dirname(os.path.abspath(__file__))
setup(name=PROJECT_NAME,
version='1.0', |
author=AUTHOR_NAME,
author_email=AUTHOR_EMAIL,
url=PROJECT_URL,
packages=find_packages(),
include_package_data=True,
description=DESCRIPTION,
)
|
hsoft/pdfmasher | qtlib/tree_model.py | Python | gpl-3.0 | 6,253 | 0.008316 | # Created By: Virgil Dupras
# Created On: 2009-09-14
# Copyright 2013 Hardcoded Software (http://www.hardcoded.net)
#
# This software is licensed under the "BSD" License as described in the "LICENSE" file,
# which should be included with this package. The terms are also available at
# http://www.hardcoded.net/licenses/bsd_license
import logging
from PyQt4.QtCore import QAbstractItemModel, QModelIndex
class NodeContainer:
def __init__(self):
self._subnodes = None
self._ref2node = {}
#--- Protected
def _createNode(self, ref, row):
# This returns a TreeNode instance from ref
raise NotImplementedError()
def _getChildren(self):
# This returns a list of ref instances, not TreeNode instances
raise NotImplementedError()
#--- Public
def invalidate(self):
# Invalidates cached data and list of subnodes without resetting ref2node.
self._subnodes = None
#--- Properties
@property
def subnodes(self):
if self._subnodes is None:
children = self._getChildren()
self._subnodes = []
for index, child in enumerate(children):
if child in self._ref2node:
node = self._ref2node[child]
node.row = index
else:
node = self._createNode(child, index)
self._ref2node[child] = node
self._subnodes.append(node)
return self._subnodes
class TreeNode(NodeContainer):
def __init__(self, model, parent, row):
NodeContainer.__init__(self)
self.model = model
self.parent = parent
self.row = row
@property
def index(self):
return self.model.createIndex(self.row, 0, self)
class RefNode(TreeNode):
"""Node pointing to a reference node.
Use this if your Qt model wraps around a tree model that has iterable nodes.
"""
def __init__(self, model, parent, ref, row):
TreeNode.__init__(self, model, parent, row)
self.ref = ref
def _createNode(self, ref, row):
return RefNode(self.model, self, ref, row)
def _getChildren(self):
return list(self.ref)
# We use a specific TreeNode subclass to easily spot dummy nodes, especially in exception tracebacks.
class DummyNode(TreeNode):
pass
class TreeModel(QAbstractItemModel, NodeContainer):
def __init__(self):
QAbstractItemModel.__init__(self)
NodeContainer.__init__(self)
self._dummyNodes = set() # dummy nodes' reference have to be kept to avoid segfault
#--- Private
def _createDummyNode(self, parent, row):
# In some cases (drag & drop row removal, to be precise), there's a temporary discrepancy
# between a node's subnodes and what the model think it has. This leads to invalid indexes
# being queried. Rather than going through complicated row removal crap, it's simpler to
# just have rows with empty data replacing removed rows for the millisecond that the drag &
# drop lasts. Override this to return a node of the correct type.
return DummyNode(self, parent, row)
def _lastIndex(self):
"""Index of the very last item in the tree.
"""
currentIndex = QModelIndex()
rowCount = self.rowCount(currentIndex)
while rowCount > 0:
currentIndex = self.index(rowCount-1, 0, currentIndex)
rowCount = self.rowCount(currentIndex)
return currentIndex
#--- Overrides
def index(self, row, column, parent):
if not self.subnodes:
return QModelIndex()
node = parent.internalPointer() if parent.isValid() else self
try:
return self.createIndex(row, column, node.subnodes[row])
except IndexError:
logging.debug("Wrong tree index called (%r, %r, %r). | Returning DummyNode",
row, column, node)
parentNode = parent.internalPointer() if parent.isValid() else None
dummy = self._createDummyNode(parentNode, row)
self._dummyNodes.add(dummy)
return self.createIndex(r | ow, column, dummy)
def parent(self, index):
if not index.isValid():
return QModelIndex()
node = index.internalPointer()
if node.parent is None:
return QModelIndex()
else:
return self.createIndex(node.parent.row, 0, node.parent)
def reset(self):
self.invalidate()
self._ref2node = {}
self._dummyNodes = set()
QAbstractItemModel.reset(self)
def rowCount(self, parent=QModelIndex()):
node = parent.internalPointer() if parent.isValid() else self
return len(node.subnodes)
#--- Public
def findIndex(self, rowPath):
"""Returns the QModelIndex at `rowPath`
`rowPath` is a sequence of node rows. For example, [1, 2, 1] is the 2nd child of the
3rd child of the 2nd child of the root.
"""
result = QModelIndex()
for row in rowPath:
result = self.index(row, 0, result)
return result
@staticmethod
def pathForIndex(index):
reversedPath = []
while index.isValid():
reversedPath.append(index.row())
index = index.parent()
return list(reversed(reversedPath))
def refreshData(self):
"""Updates the data on all nodes, but without having to perform a full reset.
A full reset on a tree makes us lose selection and expansion states. When all we ant to do
is to refresh the data on the nodes without adding or removing a node, a call on
dataChanged() is better. But of course, Qt makes our life complicated by asking us topLeft
and bottomRight indexes. This is a convenience method refreshing the whole tree.
"""
columnCount = self.columnCount()
topLeft = self.index(0, 0, QModelIndex())
bottomLeft = self._lastIndex()
bottomRight = self.sibling(bottomLeft.row(), columnCount-1, bottomLeft)
self.dataChanged.emit(topLeft, bottomRight)
|
StevenMaude/sale_GOGgles | GOGgles.py | Python | gpl-3.0 | 2,999 | 0.000333 | #!/usr/bin/env python
# encoding: utf-8
# Copyright 2013 Steven Maude
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import datetime
import time
import sys
import subprocess
import requests
import lxml.html
GO | G_URL = 'http://www.gog.com'
def get_front_page():
"""
Returns content of gog.com front page.
"""
r = requests.get(GOG_URL)
return | r.content
def get_sale_game_title(content):
"""
Return the current on-sale game title
"""
title_xpath = "//div[@class='game__info']/a[@class='game__title']/text()"
etree = lxml.html.fromstring(content)
text = etree.xpath(title_xpath)[0].strip()
return text
def check_title_wanted(current_title, games):
"""
Return True if game is in list of games from args.
Games in args should have underscores instead of spaces.
"""
current_title_with_underscores = current_title.replace(' ', '_')
for game in games:
if current_title_with_underscores == game:
return True
def main():
"""
Warning: quick 30 minute hack.
Usage: sale_GOGgles.py
With no args, continually prints current game on offer otherwise
looks for a match to games in args
e.g. sale_GOGgles.py System_Shock_2 Tomb_Raider_1+2+3
Games must match exactly; get the actual title from the game banner on the
page, and replace spaces with underscores
e.g. for http://www.gog.com/game/rollercoaster_tycoon_3
you'd use sale_GOGgles.py Rollercoaster_Tycoon_3_Platinum!
"""
try:
games = sys.argv[1:]
except IndexError:
print "No games specified"
print "Going to just print current game instead"
games = None
previous_title = None
while True:
content = get_front_page()
current_title = get_sale_game_title(content)
print datetime.datetime.now().time(), current_title
if current_title != previous_title and \
check_title_wanted(current_title, games):
print "Game wanted!"
# here, we could launch a browser pointing to GOG
# uncomment
# Ubuntu launch browser
# subprocess.call(['xdg-open', GOG_URL])
# For Windows, something like this might work (not tested):
# subprocess.call['start', '"www.gog.com"']
previous_title = current_title
time.sleep(30)
if __name__ == '__main__':
main()
|
tgquintela/pySpatialTools | pySpatialTools/tests/test_sampling.py | Python | mit | 2,210 | 0.005882 |
"""
Testing sampling
----------------
testing functions which helps in spatial sampling
"""
#import networkx as nx
#from scipy.sparse import coo_matrix
#from pySpatialTools.utils.artificial_data import\
# generate_random_relations_cutoffs
import numpy as np
from pySpatialTools.Sampling.sampling_from_space import *
from pySpatialTools.Sampling.sampling_from_points import *
from pySpatialTools.Sampling.auxiliary_functions import *
from pySpatialTools.Discretization.Discretization_2d import GridSpatialDisc
def test():
# ## Generate sp_relations
# sp_relations = generate_random_relations_cutoffs(100)
# connected = nx.connected_components(sp_relati | ons.relations)
###########################################################################
###########################################################################
############################## Test sampling ##############################
###########################################################################
## Parameters
n | , n_e = 100, 1000
ngx, ngy = 100, 100
limits = np.array([[0.1, -0.1], [0.5, 0.6]])
disc = GridSpatialDisc((ngx, ngy), xlim=(0, 1), ylim=(0, 1))
p_cats = np.random.randint(0, 10, n_e)
locs = np.random.random((n_e, 2))
region_weighs = np.random.random(ngx*ngy)
point_weighs = np.random.random(n_e)
#### Functions to test
######################
### Sampling core
weighted_sampling_with_repetition(p_cats, n)
weighted_sampling_with_repetition(p_cats, n, point_weighs)
weighted_sampling_without_repetition(p_cats, n)
weighted_sampling_without_repetition(p_cats, n, point_weighs)
weighted_nonfixed_sampling_without_repetition(p_cats, n)
weighted_nonfixed_sampling_without_repetition(p_cats, n, point_weighs)
### Sampling from region
uniform_points_sampling(limits, n)
weighted_region_space_sampling(disc, n, point_weighs)
## Sampling from points
weighted_point_sampling(locs, n)
weighted_point_sampling(locs, n, point_weighs)
uniform_points_points_sampling(limits, locs, n)
weighted_region_points_sampling(disc, locs, n)
weighted_region_points_sampling(disc, locs, n, region_weighs)
|
guanxi55nba/db-improvement | pylib/cqlshlib/helptopics.py | Python | apache-2.0 | 30,976 | 0.000839 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .cql3handling import simple_cql_types
class CQLHelpTopics(object):
def get_help_topics(self):
return [ t[5:] for t in dir(self) if t.startswith('help_') ]
def print_help_topic(self, topic):
getattr(self, 'help_' + topic.lower())()
def help_types(self):
print "\n CQL types recognized by this version of cqlsh:\n"
for t in simple_cql_types:
print ' ' + t
print """
For information on the various recognizable input formats for these
types, or on controlling the formatting of cqlsh query output, see
one of the following topics:
HELP TIMESTAMP_INPUT
HELP BLOB_INPUT
HELP UUID_INPUT
HELP BOOLEAN_INPUT
HELP TEXT_OUTPUT
HELP TIMESTAMP_OUTPUT
"""
def help_timestamp_input(self):
print """
Timestamp input
CQL supports any of the following ISO 8601 formats for timestamp
specification:
yyyy-mm-dd HH:mm
| yyyy-mm-dd HH:mm:ss
yyyy-mm-dd HH:mmZ
yyyy-mm-dd HH:mm:ssZ
yyyy-mm-dd'T'HH:mm
yyyy-mm-dd'T'HH:mmZ
yyyy-mm-dd'T'HH:mm:ss
yyyy-mm-dd'T'HH:mm:ssZ
yyyy-mm-dd
yyyy-mm-ddZ
The Z in these formats refers to an RFC-822 4-digit time zone,
expressing the time zone's difference fr | om UTC. For example, a
timestamp in Pacific Standard Time might be given thus:
2012-01-20 16:14:12-0800
If no time zone is supplied, the current time zone for the Cassandra
server node will be used.
"""
def help_blob_input(self):
print """
Blob input
CQL blob data must be specified in a string literal as hexidecimal
data. Example: to store the ASCII values for the characters in the
string "CQL", use '43514c'.
"""
def help_uuid_input(self):
print """
UUID input
UUIDs may be specified in CQL using 32 hexidecimal characters,
split up using dashes in the standard UUID format:
XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX
"""
def help_boolean_input(self):
print """
Boolean input
CQL accepts the strings 'true' and 'false' (case insensitive)
as input for boolean types.
"""
def help_timestamp_output(self):
print """
Timestamp output
Cqlsh will display timestamps in the following format by default:
yyyy-mm-dd HH:mm:ssZ
which is a format acceptable as CQL timestamp input as well.
The output format can be changed by setting 'time_format' property
in the [ui] section of .cqlshrc file.
"""
def help_text_output(self):
print """
Textual output
When control characters, or other characters which can't be encoded
in your current locale, are found in values of 'text' or 'ascii'
types, it will be shown as a backslash escape. If color is enabled,
any such backslash escapes will be shown in a different color from
the surrounding text.
Unicode code points in your data will be output intact, if the
encoding for your locale is capable of decoding them. If you prefer
that non-ascii characters be shown with Python-style "\\uABCD"
escape sequences, invoke cqlsh with an ASCII locale (for example,
by setting the $LANG environment variable to "C").
"""
help_ascii_output = help_text_output
def help_create_index(self):
print """
CREATE INDEX [<indexname>] ON <cfname> ( <colname> );
A CREATE INDEX statement is used to create a new, automatic secondary
index on the given CQL table, for the named column. A name for the
index itself can be specified before the ON keyword, if desired. A
single column name must be specified inside the parentheses. It is not
necessary for the column to exist on any current rows (Cassandra is
schema-optional), but the column must already have a type (specified
during the CREATE TABLE, or added afterwards with ALTER TABLE).
"""
def help_drop(self):
print """
There are different variants of DROP. For more information, see
one of the following:
HELP DROP_KEYSPACE;
HELP DROP_TABLE;
HELP DROP_INDEX;
"""
def help_drop_keyspace(self):
print """
DROP KEYSPACE <keyspacename>;
A DROP KEYSPACE statement results in the immediate, irreversible
removal of a keyspace, including all column families in it, and all
data contained in those column families.
"""
def help_drop_table(self):
print """
DROP TABLE <tablename>;
A DROP TABLE statement results in the immediate, irreversible
removal of a CQL table and the underlying column family, including all
data contained in it.
"""
help_drop_columnfamily = help_drop_table
def help_drop_index(self):
print """
DROP INDEX <indexname>;
A DROP INDEX statement is used to drop an existing secondary index.
"""
def help_truncate(self):
print """
TRUNCATE <tablename>;
TRUNCATE accepts a single argument for the table name, and permanently
removes all data from it.
"""
def help_create(self):
print """
There are different variants of CREATE. For more information, see
one of the following:
HELP CREATE_KEYSPACE;
HELP CREATE_TABLE;
HELP CREATE_INDEX;
"""
def help_use(self):
print """
USE <keyspacename>;
Tells cqlsh and the connected Cassandra instance that you will be
working in the given keyspace. All subsequent operations on tables
or indexes will be in the context of this keyspace, unless otherwise
specified, until another USE command is issued or the connection
terminates.
As always, when a keyspace name does not work as a normal identifier or
number, it can be quoted using single quotes (CQL 2) or double quotes
(CQL 3).
"""
def help_create_table(self):
print """
CREATE TABLE <cfname> ( <colname> <type> PRIMARY KEY [,
<colname> <type> [, ...]] )
[WITH <optionname> = <val> [AND <optionname> = <val> [...]]];
CREATE TABLE statements create a new CQL table under the current
keyspace. Valid table names are strings of alphanumeric characters and
underscores, which begin with a letter.
Each table requires a primary key, which will correspond to the
underlying columnfamily key and key validator. It's important to
note that the key type you use must be compatible with the partitioner
in use. For example, OrderPreservingPartitioner and
CollatingOrderPreservingPartitioner both require UTF-8 keys.
In cql3 mode, a table can have multiple columns composing the primary
key (see HELP COMPOUND_PRIMARY_KEYS).
For more information, see one of the following:
HELP CREATE_TABLE_TYPES;
HELP CREATE_TABLE_OPTIONS;
"""
help_create |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.