repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
| prefix
stringlengths 0
8.16k
| middle
stringlengths 3
512
| suffix
stringlengths 0
8.17k
|
|---|---|---|---|---|---|---|---|---|
mzdaniel/django-selenium-test-runner
|
tests/manage.py
|
Python
|
bsd-3-clause
| 665
| 0.003008
|
#!/usr/bin/env python
from django.core.management import execute_manager
try:
import settings # Assumed to be in the same directory.
except ImportError:
import sys
sys.stderr
|
.write("Error: Can't find the file 'settings.py' in the directory containing %r. It appears you've customized things.\nYou'll have to run django-admin.py, passing it your settings module.\n(
|
If the file settings.py does indeed exist, it's causing an ImportError somehow.)\n" % __file__)
sys.exit(1)
if __name__ == "__main__":
# Allow Django commands to run even without package installation.
import sys
sys.path = ['..'] + sys.path
execute_manager(settings)
|
edx-solutions/edx-platform
|
lms/djangoapps/courseware/tests/test_word_cloud.py
|
Python
|
agpl-3.0
| 8,875
| 0.000903
|
# -*- coding: utf-8 -*-
"""Word cloud integration tests using mongo modulestore."""
import json
from operator import itemgetter
from xmodule.x_module import STUDENT_VIEW
from .helpers import BaseTestXmodule
class TestWordCloud(BaseTestXmodule):
"""Integration test for word cloud xmodule."""
CATEGORY = "word_cloud"
def _get_resource_url(self, item):
"""
Creates a resource URL for a given asset that is compatible with this old XModule testing stuff.
"""
display_name = self.item_descriptor.display_name.replace(' ', '_')
return "resource/i4x://{}/{}/word_cloud/{}/{}".format(
self.course.id.org, self.course.id.course, display_name, item
)
def _get_users_state(self):
"""Return current state for each user:
{username: json_state}
"""
# check word cloud response for every user
users_state = {}
for user in self.users:
response = self.clients[user.username].post(self.get_url('get_state'))
users_state[user.username] = json.loads(response.content.decode('utf-8'))
return users_state
def _post_words(self, words):
"""Post `words` and return current state for each user:
{username: json_state}
"""
users_state = {}
for user in self.users:
response = self.clients[user.username].post(
self.get_url('submit'),
{'student_words[]': words},
HTTP_X_REQUESTED_WITH='XMLHttpRequest'
)
users_state[user.username] = json.loads(response.content.decode('utf-8'))
return users_state
def _check_response(self, response_contents, correct_jsons):
"""Utility function that compares correct and real responses."""
for username, content in response_contents.items():
# Used in debugger for comparing objects.
# self.maxDiff = None
# We should compare top_words for manually,
# because they are unsorted.
keys_to_compare = set(content.keys()).difference(set(['top_words']))
self.assertDictEqual(
{k: content[k] for k in keys_to_compare},
{k: correct_jsons[username][k] for k in keys_to_compare})
# comparing top_words:
top_words_content = sorted(
content['top_words'],
key=itemgetter('text')
)
top_words_correct = sorted(
correct_jsons[username]['top_words'],
key=itemgetter('text')
)
self.assertListEqual(top_words_content, top_words_correct)
def test_initial_state(self):
"""Inital state of word cloud is correct. Those state that
is sended from server to frontend, when students load word
cloud page.
"""
users_state = self._get_users_state()
self.assertEqual(
''.join(set([
content['status']
for _, content in users_state.items()
])),
'success')
# correct initial data:
correct_initial_data = {
u'status': u'success',
u'student_words': {},
u'total_count': 0,
u'submitted': False,
u'top_words': {},
u'display_student_percents': False
}
for _, response_content in users_state.items():
self.assertEqual(response_content, correct_initial_data)
def test_post_words(self):
"""Students can submit data succesfully.
Word cloud data properly updates after students submit.
"""
input_words = [
"small",
"BIG",
" Spaced ",
" few words",
]
correct_words
|
= [
u"small",
u"big",
u"spaced",
u"few words",
]
users_state = self._post_words(input_words)
self.assertEqual(
''.join(set([
content['status']
for _, content in users_state.items()
|
])),
'success')
correct_state = {}
for index, user in enumerate(self.users):
correct_state[user.username] = {
u'status': u'success',
u'submitted': True,
u'display_student_percents': True,
u'student_words': {word: 1 + index for word in correct_words},
u'total_count': len(input_words) * (1 + index),
u'top_words': [
{
u'text': word, u'percent': 100 / len(input_words),
u'size': (1 + index)
}
for word in correct_words
]
}
self._check_response(users_state, correct_state)
def test_collective_users_submits(self):
"""Test word cloud data flow per single and collective users submits.
Make sures that:
1. Inital state of word cloud is correct. Those state that
is sended from server to frontend, when students load word
cloud page.
2. Students can submit data succesfully.
3. Next submits produce "already voted" error. Next submits for user
are not allowed by user interface, but techically it possible, and
word_cloud should properly react.
4. State of word cloud after #3 is still as after #2.
"""
# 1.
users_state = self._get_users_state()
self.assertEqual(
''.join(set([
content['status']
for _, content in users_state.items()
])),
'success')
# 2.
# Invcemental state per user.
users_state_after_post = self._post_words(['word1', 'word2'])
self.assertEqual(
''.join(set([
content['status']
for _, content in users_state_after_post.items()
])),
'success')
# Final state after all posts.
users_state_before_fail = self._get_users_state()
# 3.
users_state_after_post = self._post_words(
['word1', 'word2', 'word3'])
self.assertEqual(
''.join(set([
content['status']
for _, content in users_state_after_post.items()
])),
'fail')
# 4.
current_users_state = self._get_users_state()
self._check_response(users_state_before_fail, current_users_state)
def test_unicode(self):
input_words = [u" this is unicode Юникод"]
correct_words = [u"this is unicode юникод"]
users_state = self._post_words(input_words)
self.assertEqual(
''.join(set([
content['status']
for _, content in users_state.items()
])),
'success')
for user in self.users:
self.assertListEqual(
list(users_state[user.username]['student_words'].keys()),
correct_words)
def test_handle_ajax_incorrect_dispatch(self):
responses = {
user.username: self.clients[user.username].post(
self.get_url('whatever'),
{},
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
for user in self.users
}
status_codes = {response.status_code for response in responses.values()}
self.assertEqual(status_codes.pop(), 200)
for user in self.users:
self.assertDictEqual(
json.loads(responses[user.username].content.decode('utf-8')),
{
'status': 'fail',
'error': 'Unknown Command!'
}
)
def test_word_cloud_constructor(self):
"""
Make sure that all parameters extracted corre
|
wannabeCitizen/quantifiedSelf
|
app/user_auth.py
|
Python
|
mit
| 6,825
| 0.000586
|
from tornado import gen
from tornado import web
from tornado import ioloop
import uuid
import os
import pickle
from lib.database.users import user_insert
from lib.database.users import get_user
from lib.database.users import get_user_from_email
from lib.database.reservations import create_ticket_reservation
from lib.database.reservations import confirm_ticket_reservation
from lib.database.reservations import change_reservation_showtime
from lib.database.reservations import get_reservations_for_showtime
from lib.database.reservations import get_reservation_for_user
from lib.database.promotion_keys import pop_promotion_key
from lib.database.showtimes import get_showtime
from lib.email_sender import send_reminder
from lib.database.reservations import get_reservations
from lib.basehandler import secured
from lib.email_sender import send_confirmation
from lib.basehandler import BaseHandler
@secured
class UserReminder(BaseHandler):
_ioloop = ioloop.IOLoop().instance()
@web.asynchronous
@gen.coroutine
def get(self):
send_res = []
reservations = yield get_reservations()
past_email = pickle.load(open(
'./data/emails.pkl', 'rb'))
for reservation in reservations:
show_id = reservation['showtime_id']
show_meta = yield get_showtime(show_id)
date_str = show_meta['date_str']
user_id = reservation['user_id']
user = yield get_user(user_id)
name = user['name']
email = user['email']
if email in past_email:
continue
try:
yield send_reminder(email, name, date_str)
send_res.append(email)
except Exception as e:
print("Exception while sending out emails: {0}".format(e))
os.makedirs("./data/", exist_ok=True)
yield gen.sleep(10)
send_res.extend(past_email)
with open('./data/emails.pkl', 'wb+') as fd:
pickle.dump(send_res, fd)
return self.api_response({'reminder_status': "all sent!"})
class UserAuth(BaseHandler):
_ioloop = ioloop.IOLoop().instance()
@web.asynchronous
@gen.coroutine
def post(self):
name = self.get_argument("name", None)
email = self.get_argument("email", None)
showtime_id = self.get_argument("showtime_id", None)
promo_code = self.get_argument('promotion_key', None)
# Validate user and email entries
if name is None or email is None:
return self.error(
403,
"Must provide valid username and email address to continue"
)
# Validate the show time
if showtime_id is None:
return self.error(400, "Must provide 'showtime_id' to proceed.")
showtime = yield get_showtime(showtime_id)
if showtime is None:
return self.error(404, "Could not find the selected showtime.")
if not (yield self.canBookTicketForShowtime(showtime, promo_code)):
return self.error(400, "This showtime is sold out.")
# Grab or create a user
|
user = yield get_user_from_email(email)
if user is not None:
user_id = user['id']
self.set_secure_cookie("user_id", user_id)
# check for any previous confirmed booking
reservation = yield get_reservation_for_user(user_id)
|
if reservation is not None and\
reservation['confirmation_code'] != "":
return self.error(
403,
"Sorry, you already have a ticket for the show."
)
else:
user_id = yield user_insert(name, email, showtime_id)
self.set_secure_cookie("user_id", user_id)
# Create a reservation: note that all previous unconfirmed reservations
# will be lost
yield create_ticket_reservation(showtime["id"], user_id)
@gen.coroutine
def put(self):
ticket_type = self.get_argument("type", "normal")
showtime_id = self.get_argument("showtime_id", None)
user_id = self.get_secure_cookie("user_id", None)
if user_id is None:
return self.error(403, "Must include the user cookie")
# Now grab the reservation
reservation = yield get_reservation_for_user(user_id)
if reservation is None:
return self.error(403, "There is no reservation for this account.")
confirmation_code = str(uuid.uuid1())
if ticket_type == "shitty":
# Confirm a shitty ticket_type
if reservation['showtime_id'] == showtime_id:
yield confirm_ticket_reservation(
reservation['id'], confirmation_code, True)
else:
yield self.change_showtime(showtime_id, reservation,
confirmation_code)
else:
# TODO: check the access_tokens, make sure we have enough.
yield confirm_ticket_reservation(
reservation['id'], confirmation_code, False)
user = yield get_user(user_id)
yield send_confirmation(user['email'], user['name'], confirmation_code)
self.clear_cookie('user_id')
self.api_response({'confirmation_code': confirmation_code})
@gen.coroutine
def change_showtime(self, showtime_id, reservation, confirmation_code):
showtime = yield get_showtime(showtime_id)
if showtime is None:
return self.error(404, "Showtime not found!")
if not (yield self.isShowTimeAvailable(showtime, True)):
return self.error(400, "Ticket is not available any more.")
yield change_reservation_showtime(
reservation['id'], showtime_id)
yield confirm_ticket_reservation(
reservation['id'], confirmation_code, True)
@gen.coroutine
def isShowTimeAvailable(self, showtime, is_shitty=False):
allReservations = yield get_reservations_for_showtime(showtime["id"])
if is_shitty:
fieldName = "max_shitty_booking"
good = list(filter(lambda x: x['is_shitty'] == True, allReservations))
else:
fieldName = "max_normal_booking"
good = list(filter(lambda x: x['is_shitty'] == False, allReservations))
return len(good) < showtime[fieldName]
@gen.coroutine
def canBookTicketForShowtime(self, showtime, promo_code):
if (yield self.isShowTimeAvailable(showtime)):
return True
if promo_code is None:
return False
promotion_key = yield pop_promotion_key(promo_code)
return promotion_key is not None and\
promotion_key['showtime_id'] == showtime['id']
|
0xporky/mgnemu-python
|
mgnemu/routes.py
|
Python
|
mit
| 1,677
| 0
|
# -*- coding: utf-8 -*-
from os import urandom
from flask import Flask
from flask import request
from flask_httpauth import HTTPDigestAuth
from mgnemu.controllers.check_tape import CheckTape
app = Flask(__name__)
app.config['SECRET_KEY'] = str(urandom(24))
auth = HTTPDigestAuth()
@auth.get_password
def get_pw(username):
# TODO: we need add clear auth
return '1'
@auth.generate_nonce
def genera
|
te_nonce():
# TODO: we need add clear auth
return str(urandom(8))
@auth.generate_opaque
def generate_opaqu
|
e():
# TODO: we need add clear auth
return str(urandom(8))
@auth.verify_nonce
def verify_nonce(nonce):
# TODO: we need add clear auth
return True
@auth.verify_opaque
def verify_opaque(opaque):
# TODO: we need add clear auth
return True
@app.route('/cgi/status', methods=['GET'])
def get_status():
return ' { "Status" : "ok" }'
@app.route('/cgi/chk', methods=['GET'])
@auth.login_required
def get_check_tape():
ct = CheckTape(app.config['ap'])
return ct.check_tape
@app.route('/cgi/rep/pay', methods=['GET'])
@auth.login_required
def get_pay_sums():
ct = CheckTape(app.config['ap'])
return ct.payments
@app.route('/cgi/chk', methods=['POST'])
@auth.login_required
def add_check():
result = request.get_json(force=True)
ct = CheckTape(app.config['ap'])
return ct.add_check(result)
@app.route('/cgi/proc/printreport', methods=['GET'])
@auth.login_required
def print_orders():
ct = CheckTape(app.config['ap'])
keys = list(request.args.keys())
if '10' in keys:
return ct.payments
if '0' in keys:
ct.print_z_order()
return ' { "Status" : "ok" }'
|
renardchien/Software-Development-on-Linux--Open-Source-Course-
|
Labs/Lab 4/Student Files/Hello.py
|
Python
|
lgpl-3.0
| 766
| 0.007833
|
#!/usr/bin/env python
#
# Hello World
#
# Copyright 2012 Cody Van De Mark
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 3.0 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser
|
General Public
# License along with this library. If not, see <http://www.gnu.org/licenses/>.
#
#
print("He
|
llo World")
|
paulocsanz/algebra-linear
|
scripts/runge_kutta_2a.py
|
Python
|
agpl-3.0
| 431
| 0.011601
|
#!/usr/bin/env python3
from math import log, exp
def RungeKutta2aEDO (x0, t0, tf, h, dX):
xold = x0
told = t0
ret = []
while (told <= tf):
ret += [(told, xold)]
k1 = dX(xold, told)
k2 = dX(xold + h*k1, told+h)
xold = xold + h/2 * (k1+k2)
told
|
= roun
|
d(told + h,3)
return ret
if __name__ == "__main__":
dX = lambda x, t: t + x
RungeKutta2aEDO(0, 0, 1, 0.1, dX)
|
munhyunsu/Hobby
|
TestResponse/testresponse.py
|
Python
|
gpl-3.0
| 878
| 0.001139
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
import urllib.request
import time
from bs4 impor
|
t BeautifulSoup
def main():
url = 'http://www.pokemonstore.co.kr/shop/main/index.php'
print('For exit, press ctrl + c')
while(True):
try:
with urllib.request.urlopen(url) as f:
|
if f.code == 200:
html = f.read()
soup = BeautifulSoup(html, 'html5lib')
if soup.title is not None:
print(f.code, 'Success', soup.title.text, '\a')
else:
print(f.code, 'Connected but not html')
except urllib.error.HTTPError:
print('May be 404. sleep')
except KeyboardInterrupt:
print('pressed ctrl + c. exit')
break
time.sleep(60)
if __name__ == '__main__':
main()
|
CTSRD-SOAAP/chromium-42.0.2311.135
|
native_client/buildbot/buildbot_lib.py
|
Python
|
bsd-3-clause
| 21,434
| 0.014276
|
#!/usr/bin/python
# Copyright (c) 2012 The Native Client Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import optparse
import os.path
import shutil
import subprocess
import sys
import time
import traceback
ARCH_MAP = {
'32': {
'gyp_arch': 'ia32',
'scons_platform': 'x86-32',
},
'64': {
'gyp_arch': 'x64',
'scons_platform': 'x86-64',
},
'arm': {
'gyp_arch': 'arm',
'scons_platform': 'arm',
},
'mips32': {
'gyp_arch': 'mips32',
'scons_platform': 'mips32',
},
}
def RunningOnBuildbot():
return os.environ.get('BUILDBOT_SLAVE_TYPE') is not None
def GetHostPlatform():
sys_platform = sys.platform.lower()
if sys_platform.startswith('linux'):
return 'linux'
elif sys_platform in ('win', 'win32', 'windows', 'cygwin'):
return 'win'
elif sys_platform in ('darwin', 'mac'):
return 'mac'
else:
raise Exception('Can not determine the platform!')
def SetDefaultContextAttributes(context):
"""
Set default values for the attributes needed by the SCons function, so that
SCons can be run without needing ParseStandardCommandLine
"""
platform = GetHostPlatform()
context['platform'] = platform
context['mode'] = 'opt'
context['default_scons_mode'] = ['opt-host', 'nacl']
context['default_scons_platform'] = ('x86-64' if platform == 'win'
else 'x86-32')
context['android'] = False
context['clang'] = False
context['asan'] = False
context['pnacl'] = False
context['use_glibc'] = False
context['use_breakpad_tools'] = False
context['max_jobs'] = 8
context['scons_args'] = []
# Windows-specific environment manipulation
def SetupWindowsEnvironment(context):
# Poke around looking for MSVC. We should do something more principled in
# the future.
# The name of Program Files can differ, depending on the bittage of Windows.
program_files = r'c:\Program Files (x86)'
if not os.path.exists(program_files):
program_files = r'c:\Program Files'
if not os.path.exists(program_files):
raise Exception('Cannot find the Program Files directory!')
# The location of MSVC can differ depending on the version.
msvc_locs = [
('Microsoft Visual Studio 12.0', 'VS120COMNTOOLS', '2013'),
('Microsoft Visual Studio 10.0', 'VS100COMNTOOLS', '2010'),
('Microsoft Visual Studio 9.0', 'VS90COMNTOOLS', '2008'),
('Microsoft Visual Studio 8.0', 'VS80COMNTOOLS', '2005'),
]
for dirname, comntools_var, gyp_msvs_version in msvc_locs:
msvc = os.path.join(program_files, dirname)
context.SetEnv('GYP_MSVS_VERSION', gyp_msvs_version)
if os.path.exists(msvc):
break
else:
# The break statement did not execute.
raise Exception('Cannot find MSVC!')
# Put MSVC in the path.
vc = os.path.join(msvc, 'VC')
comntools = os.path.join(msvc, 'Common7', 'Tools')
perf = os.path.join(msvc, 'Team Tools', 'Performance Tools')
context.SetEnv('PATH', os.pathsep.join([
context.GetEnv('PATH'),
vc,
comntools,
perf]))
# SCons needs this variable to find vsvars.bat.
# The end slash is needed because the batch files expect it.
context.SetEnv(comntools_var, comntools + '\\')
# This environment variable will SCons to print debug info while it searches
# for MSVC.
context.SetEnv('SCONS_MSCOMMON_DEBUG', '-')
# Needed for finding devenv.
context['msvc'] = msvc
SetupGyp(context, [])
def SetupGyp(context, extra_vars=[]):
context.SetEnv('GYP_GENERATORS', 'ninja')
if RunningOnBuild
|
bot():
goma_opts = [
'use_goma=1',
'gomadir=/b/build/goma',
]
else:
goma_opts = []
context.SetEnv('GYP_DEFINES', ' '.join(
context['gyp_vars'] + goma_opts + extra_vars))
def SetupLinuxEn
|
vironment(context):
SetupGyp(context, ['target_arch='+context['gyp_arch']])
def SetupMacEnvironment(context):
SetupGyp(context, ['target_arch='+context['gyp_arch']])
def SetupAndroidEnvironment(context):
SetupGyp(context, ['OS=android', 'target_arch='+context['gyp_arch']])
context.SetEnv('GYP_GENERATORS', 'ninja')
context.SetEnv('GYP_CROSSCOMPILE', '1')
def ParseStandardCommandLine(context):
"""
The standard buildbot scripts require 3 arguments to run. The first
argument (dbg/opt) controls if the build is a debug or a release build. The
second argument (32/64) controls the machine architecture being targeted.
The third argument (newlib/glibc) controls which c library we're using for
the nexes. Different buildbots may have different sets of arguments.
"""
parser = optparse.OptionParser()
parser.add_option('-n', '--dry-run', dest='dry_run', default=False,
action='store_true', help='Do not execute any commands.')
parser.add_option('--inside-toolchain', dest='inside_toolchain',
default=bool(os.environ.get('INSIDE_TOOLCHAIN')),
action='store_true', help='Inside toolchain build.')
parser.add_option('--android', dest='android', default=False,
action='store_true', help='Build for Android.')
parser.add_option('--clang', dest='clang', default=False,
action='store_true', help='Build trusted code with Clang.')
parser.add_option('--coverage', dest='coverage', default=False,
action='store_true',
help='Build and test for code coverage.')
parser.add_option('--validator', dest='validator', default=False,
action='store_true',
help='Only run validator regression test')
parser.add_option('--asan', dest='asan', default=False,
action='store_true', help='Build trusted code with ASan.')
parser.add_option('--scons-args', dest='scons_args', default =[],
action='append', help='Extra scons arguments.')
parser.add_option('--step-suffix', metavar='SUFFIX', default='',
help='Append SUFFIX to buildbot step names.')
parser.add_option('--no-gyp', dest='no_gyp', default=False,
action='store_true', help='Do not run the gyp build')
parser.add_option('--no-goma', dest='no_goma', default=False,
action='store_true', help='Do not run with goma')
parser.add_option('--use-breakpad-tools', dest='use_breakpad_tools',
default=False, action='store_true',
help='Use breakpad tools for testing')
parser.add_option('--skip-build', dest='skip_build', default=False,
action='store_true',
help='Skip building steps in buildbot_pnacl')
parser.add_option('--skip-run', dest='skip_run', default=False,
action='store_true',
help='Skip test-running steps in buildbot_pnacl')
options, args = parser.parse_args()
if len(args) != 3:
parser.error('Expected 3 arguments: mode arch toolchain')
# script + 3 args == 4
mode, arch, toolchain = args
if mode not in ('dbg', 'opt', 'coverage'):
parser.error('Invalid mode %r' % mode)
if arch not in ARCH_MAP:
parser.error('Invalid arch %r' % arch)
if toolchain not in ('newlib', 'glibc', 'pnacl', 'nacl_clang'):
parser.error('Invalid toolchain %r' % toolchain)
# TODO(ncbray) allow a command-line override
platform = GetHostPlatform()
context['platform'] = platform
context['mode'] = mode
context['arch'] = arch
context['android'] = options.android
# ASan is Clang, so set the flag to simplify other checks.
context['clang'] = options.clang or options.asan
context['validator'] = options.validator
context['asan'] = options.asan
# TODO(ncbray) turn derived values into methods.
context['gyp_mode'] = {
'opt': 'Release',
'dbg': 'Debug',
'coverage': 'Debug'}[mode]
context['gn_is_debug'] = {
'opt': 'false',
'dbg': 'true',
'coverage': 'true'}[mode]
context['gyp_arch'] = ARCH_MAP[arch]['gyp_arch']
context['gyp_vars'] = []
if context['clang']:
context['gyp_vars'].append('clang=1')
|
quickresolve/accel.ai
|
flask-aws/lib/python2.7/site-packages/ebcli/containers/multicontainer.py
|
Python
|
mit
| 2,306
| 0.001735
|
from . import commands
from . import compose
from . import dockerrun
from ..core import fileoperations
from ..objects.exceptions import CommandError
class MultiContainer(object):
"""
Immutable class used to run Multi-containers.
"""
PROJ_NAME = 'elasticbeanstalk'
def __init__(self, fs_handler, soln_stk, opt_env, allow_insecure_ssl=False):
"""
Constructor for MultiContainer.
:param fs_handler: MultiContainerFSHandler: manages file operations
:param soln_stk: SolutionStack: the solution stack
:param opt_env: EnvvarCollector: Optional env (--envvars) variables to add and remove
:param allow_insecure_ssl: bool: allow insecure connection to docker registry
"""
self.fs_handler = fs_handler
self.pathconfig = fs_handler.pathconfig
self.soln_stk = soln_stk
self.opt_env = opt_env
self.allow_insecure_ssl = allow_insecure_ssl
def start(self):
self._containerize()
self._remove()
self._up()
def validate(self):
dockerrun.validate_dockerrun_v2(self.fs_handler.dockerrun)
def is_running(self):
return any(commands.is_running(cid) for cid in self.list_services())
def list_services(self):
compose_path = self.pathconfig.compose_path()
compose_dict = fileoperations._get_yaml_dict(compose_path)
services = compose.iter_services(compose_dict)
# This is the way docker-compose names the containers/services
return ['{}_{}_1'.format(self.PROJ_NAME, s) for s in services]
def _containerize(self):
opt_env = self.opt_env
setenv_env = self.fs_handler.get_setenv_env()
# merged_env contains env. variables from "eb local --envvars x=y ..." and
# "
|
eb local setenv a=b ..." but not ones in Dockerrun.aws.json
merged_env = setenv_env.merge(opt_env)
self.fs_handler.make_docker_compose(merged_env)
def _up(self):
commands.up(compose_path=self.pathconfig.compose_path(),
allow_insecure_ssl=self.allo
|
w_insecure_ssl)
def _remove(self):
for service in self.list_services():
try:
commands.rm_container(service, force=True)
except CommandError:
pass
|
Nth-iteration-labs/streamingbandit
|
app/defaults/E-Greedy/get_action.py
|
Python
|
mit
| 312
| 0.009615
|
e = .1
mean_list = base.List(self.get_theta(key="treatment"), base.Mean, ["control", "treatment"])
if np.random.binomial(1,e) == 1:
self.action["treatment"] = mean_list.random()
|
self.action["propensity"] = 0.1*0.5
else:
self.action["treatment"] = mean_list.max()
|
self.action["propensity"] = (1-e)
|
UltrasoundSam/TekDPO2000
|
TekScope.py
|
Python
|
gpl-3.0
| 7,366
| 0.007738
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# TekScope.py
#
# Copyright 2016 Samuel Hill <samuel.hill@warwick.ac.uk>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
#
#
import visa
import time
import numpy as np
class QueryException(Exception):
'''
Custom exception for showing that query to scope formatted
incorrectly
'''
pass
class DPOScope(object):
'''
Control class for Tektronix DPO2014 oscilloscopes, allowing for
computer control of the scope (setting parameters, etc) and for
reading data from scope.
'''
def __init__(self, scopeIdent=None):
'''
Initiates connections with scope, given a scope identification
string, such as "USB0::1689::883::C000489::0::INSTR" obtained
from visa.ResourceManager.list_resources().
If no scopeIdent is given, it just takes the first instrument
in the resources list
'''
rm = visa.ResourceManager('@py')
if not scopeIdent:
scopeIdent = rm.list_resources()[0]
self.visa = rm.open_resource(scopeIdent)
self.visa.read_termination = '\n'
# Get Make and model
make, model = self.get_param('*IDN?').split(',')[:2]
self.make = str(make)
self.model = str(model)
# Set scope running
self.set_param('ACQuire:STAte RUN')
def __repr__(self):
desc = ('This is a {0} {1} oscilloscope'.format(self.make, self.model))
return desc
def close(self):
'''
Closes object and shuts down connection to scope
'''
self.visa.close()
def open(self):
'''
Opens connection to scope
'''
try:
self.visa.open()
except Exception:
print('Scope already open!')
def set_param(self, message):
'''
Sends message to scope to change parameter
'''
try:
self.visa.write(message)
except AttributeError:
print('Can only send strings as a command')
def get_param(self, message):
'''
Queries scope for parameter value and returns it as a string.
For some reason, scope can only handle one read request before
it needs to be closed and then opened - quirk of DPO2000 series,
I think
'''
try:
# Check to see if valid query request
if not message.endswith('?'):
raise QueryException('Query must finish with ?')
# Send query, and reset scope
ans = self.visa.query(message)
self.visa.close()
self.visa.open()
return ans
except AttributeError:
print('Can only send strings as a command')
def get_data(self, channel):
'''
Gets data from oscilloscope for a given channel (CH1, CH2, etc)
and returns it as a (time, data) tuple.
'''
# Annoyingly, cannot get data in average mode (see self.average)
if self.get_param('ACQuire:MODe?') == 'AVE':
self.set_param('ACQuire:MODe SAMple')
time.sleep(1.)
self.set_param("SELect:{0} 1".format(channel))
# Select Data source and define format (RIBinary - big endian signed 'short' type)
self.set_param('DATa:SOUrce {0};:ENCdg RIBinary'.format(channel))
self.set_param('WFMOutpre:BYT_Nr 2')
# Find out length of signal
rcdlen = int(self.get_param('WFMOutpre:RECOrdlength?'))
# Requesting all the data out
self.set_param('DATa:STARt 1')
self.set_param('DATa:STOP {0}'.format(rcdlen))
self.set_param('DATa:WIDth 2')
# Process all metadata information
self.info = self.preamble()
# Now getting data from scope (C&O to avoid timeout errors)
data = self.visa.query_binary_values('CURVe?', container=np.array, is_big_endian=True, datatype='h')
self.close()
self.open()
# Reconstructing time information
t = self.info['XOffset'] + np.arange(0, self.info['Record_Len'])*self.info['XIncr']
return (t, data * self.info['YMult'])
def preamble(self):
'''
Processes all scope metainformation/preamble so all setting values
are known.
'''
# Read in the preamble and turn each value into a list
Pre = self.get_param('WFMOutpre?').split(';')
# Headers for each value
hdr = ['Byte_Nr', 'Bit_Nr', 'Encoding', 'Bin_Fmt', 'Byte_Ord',
'Params', 'Points_Requested', 'Point_Fmt', 'XUnit',
'XIncr', 'XOffset', 'CHoff', 'YUnit', 'YMult',
'YOffset', 'YZero', 'Composition', 'Record_Len', 'FilterFreq']
metainfo = dict(zip(hdr, Pre))
# Some values are better expressed as floats or integers
intvals = ['Byte_Nr', 'Bit_Nr', 'Record_Len', 'Points_Requested']
floatvals = ['YOffset', 'YZero', 'YMult', 'XIncr', 'XOffset']
for key in intvals:
metainfo[key] = int(metainfo[key])
for key in floatvals:
metainfo[key] = float(metainfo[key])
return metainfo
def average(self, channel, averages=4):
'''
Annoyingly, it doesn't seem like we can get data from the scope
if in averaging mode - so have to implement it ourselves. Use
read data averages number of times to get the data, and then
average it. Consequency, it is very slow!
For more info on this, see
https://forum.tek.com/viewtopic.php?t=136577#p274677
Returns (time, data) tuple
'''
# Round averages up to next power of two
averages = int(2**np.ceil(np.log2(averages)))
# Use self.get_data once to get first data
t, buff = self.get_data(channel)
# Create numpy array with right shape to hold all data
values = np.zeros([averages, len(t)])
values[0] = buff
# Now loop and repeat measurements (not using self.get_data to minimise # queries
for i in xrange(averages-1):
buff = self.visa.query_binary_values('CURVe?', container=np.array, is_big_endian=True, datatype='h'
|
)
values[i+1] = buff * self.info['
|
YMult']
self.close()
self.open()
return (t, values.mean(axis=0))
def reset(self):
'''
Resets scope to default settings
'''
self.set_param('*RST')
|
davidcandal/gr-tfg
|
examples/testNWK.py
|
Python
|
gpl-3.0
| 4,815
| 0.0081
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
##################################################
# GNU Radio Python Flow Graph
# Title: TFG
# Author: David Candal
# Description: SDR ZigBee
# Generated: Mon Sep 19 21:01:18 2016
##################################################
if __name__ == '__main__':
import ctypes
import sys
if sys.platform.startswith('linux'):
try:
x11 = ctypes.cdll.LoadLibrary('libX11.so')
x11.XInitThreads()
except:
print "Warning: failed to XInitThreads()"
import os
import sys
sys.path.append(os.environ.get('GRC_HIER_PATH', os.path.expanduser('~/.grc_gnuradio')))
from PyQt4 import Qt
from gnuradio import blocks
from gnuradio import eng_notation
from gnuradio import gr
from gnuradio.eng_opti
|
on import eng_option
from gnuradio.filter import firdes
from ieee802_15_4_oqpsk_phy import ieee802_15_4_oqpsk_phy # grc-generated hier_block
from optparse import OptionParser
import foo
import pmt
import tfg
class testNWK(gr.top_block, Qt.QWidget):
def __init__(self):
gr.top_block.__init__(self, "TFG")
Qt.QWidget.__init__(self)
self.setWindowTitle("TFG")
try:
self.setWindowIcon(Qt.QIcon.fromTheme('gnuradio-grc'))
except:
|
pass
self.top_scroll_layout = Qt.QVBoxLayout()
self.setLayout(self.top_scroll_layout)
self.top_scroll = Qt.QScrollArea()
self.top_scroll.setFrameStyle(Qt.QFrame.NoFrame)
self.top_scroll_layout.addWidget(self.top_scroll)
self.top_scroll.setWidgetResizable(True)
self.top_widget = Qt.QWidget()
self.top_scroll.setWidget(self.top_widget)
self.top_layout = Qt.QVBoxLayout(self.top_widget)
self.top_grid_layout = Qt.QGridLayout()
self.top_layout.addLayout(self.top_grid_layout)
self.settings = Qt.QSettings("GNU Radio", "testNWK")
self.restoreGeometry(self.settings.value("geometry").toByteArray())
##################################################
# Blocks
##################################################
self.tfg_test_nwk_0 = tfg.test_nwk()
self.tfg_NWK_stack_0 = tfg.NWK_stack(True, True)
self.tfg_MAC_stack_0 = tfg.MAC_stack(False, False)
self.ieee802_15_4_oqpsk_phy_0 = ieee802_15_4_oqpsk_phy()
self.foo_wireshark_connector_0 = foo.wireshark_connector(195, False)
self.foo_packet_pad_0 = foo.packet_pad(False, True, 0.1, 2000, 2000)
self.blocks_message_strobe_0 = blocks.message_strobe(pmt.intern("A"), 1000)
self.blocks_file_sink_0 = blocks.file_sink(gr.sizeof_char*1, '/tmp/sensor.pcap', False)
self.blocks_file_sink_0.set_unbuffered(True)
##################################################
# Connections
##################################################
self.msg_connect((self.blocks_message_strobe_0, 'strobe'), (self.tfg_test_nwk_0, 'entrada'))
self.msg_connect((self.ieee802_15_4_oqpsk_phy_0, 'rxout'), (self.foo_wireshark_connector_0, 'in'))
self.msg_connect((self.ieee802_15_4_oqpsk_phy_0, 'rxout'), (self.tfg_MAC_stack_0, 'in(PHY)'))
self.msg_connect((self.tfg_MAC_stack_0, 'out(PHY)'), (self.foo_wireshark_connector_0, 'in'))
self.msg_connect((self.tfg_MAC_stack_0, 'out(PHY)'), (self.ieee802_15_4_oqpsk_phy_0, 'txin'))
self.msg_connect((self.tfg_MAC_stack_0, 'out(NWK)'), (self.tfg_NWK_stack_0, 'in(MAC)'))
self.msg_connect((self.tfg_NWK_stack_0, 'out(MAC)'), (self.tfg_MAC_stack_0, 'in(NWK)'))
self.msg_connect((self.tfg_test_nwk_0, 'salida'), (self.tfg_NWK_stack_0, 'in(APS)'))
self.connect((self.foo_packet_pad_0, 0), (self.ieee802_15_4_oqpsk_phy_0, 0))
self.connect((self.foo_wireshark_connector_0, 0), (self.blocks_file_sink_0, 0))
self.connect((self.ieee802_15_4_oqpsk_phy_0, 0), (self.foo_packet_pad_0, 0))
def closeEvent(self, event):
self.settings = Qt.QSettings("GNU Radio", "testNWK")
self.settings.setValue("geometry", self.saveGeometry())
event.accept()
def main(top_block_cls=testNWK, options=None):
if gr.enable_realtime_scheduling() != gr.RT_OK:
print "Error: failed to enable real-time scheduling."
from distutils.version import StrictVersion
if StrictVersion(Qt.qVersion()) >= StrictVersion("4.5.0"):
style = gr.prefs().get_string('qtgui', 'style', 'raster')
Qt.QApplication.setGraphicsSystem(style)
qapp = Qt.QApplication(sys.argv)
tb = top_block_cls()
tb.start()
tb.show()
def quitting():
tb.stop()
tb.wait()
qapp.connect(qapp, Qt.SIGNAL("aboutToQuit()"), quitting)
qapp.exec_()
if __name__ == '__main__':
main()
|
RevansChen/online-judge
|
Codewars/8kyu/geometry-basics-circle-area-in-2d/Python/solution1.py
|
Python
|
mit
| 100
| 0.02
|
# Python - 3.
|
6.0
circle_area = lambda circle: round(circle.radius ** 2 * __import__('math')
|
.pi, 6)
|
google/google-ctf
|
third_party/edk2/BaseTools/Source/Python/UPT/Parser/InfSourceSectionParser.py
|
Python
|
apache-2.0
| 5,413
| 0.003141
|
## @file
# This file contained the parser for [Sources] sections in INF file
#
# Copyright (c) 2011 - 2018, Intel Corporation. All rights reserved.<BR>
#
# This program and the accompanying materials are licensed and made available
# under the terms and conditions of the BSD License which accompanies this
# distribution. The full text of the license may be found at
# http://opensource.org/licenses/bsd-license.php
#
# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
#
'''
InfSourceSectionParser
'''
##
# Import Modules
#
import Logger.Log as Logger
from Logger import StringTable as ST
from Logger.ToolError import FORMAT_INVALID
from Parser.InfParserMisc import InfExpandMacro
from Library import DataType as DT
from Library.Parsing import MacroParser
from Library.Misc import GetSplitValueList
from Object.Parser.InfCommonObject import InfLineCommentObject
from Parser.InfParserMisc import InfParserSectionRoot
class InfSourceSectionParser(InfParserSectionRoot):
## InfSourceParser
#
#
def InfSourceParser(self, SectionString, InfSectionObject, FileName):
SectionMacros = {}
ValueList = []
SourceList = []
StillCommentFalg = False
HeaderComments = []
LineComment = None
SectionContent = ''
for Line in SectionString:
SrcLineContent = Line[0]
SrcLineNo = Line[1]
if SrcLineContent.strip() == '':
continue
#
# Found Header Comments
#
if SrcLineContent.strip().startswith(DT.TAB_COMMENT_SPLIT):
#
# Last line is comments, and this line go on.
#
if StillCommentFalg:
HeaderComments.append(Line)
SectionContent += SrcLineContent + DT.END_OF_LINE
continue
#
# First time encounter comment
#
else:
#
# Clear original data
#
HeaderComments = []
HeaderComments.append(Line)
StillCommentFalg = True
SectionContent += SrcLineContent + DT.END_OF_LINE
continue
else:
StillCommentFalg = False
if len(HeaderComments) >= 1:
LineComment = InfLineCommentObject()
LineCommentContent = ''
for Item in HeaderComments:
LineCommentContent += Item[0] + DT.END_OF_LINE
LineComment.SetHeaderComments(LineCommentContent)
#
# Find Tail comment.
#
if SrcLineContent.find(DT.TAB_
|
COMMENT_SPLIT) > -1:
TailComments = SrcLineContent[SrcLineContent.find(DT.TAB_COMMENT_SPLIT):]
SrcLineContent = SrcLineContent[:SrcLineContent.find(DT.TAB_COMMENT_S
|
PLIT)]
if LineComment is None:
LineComment = InfLineCommentObject()
LineComment.SetTailComments(TailComments)
#
# Find Macro
#
Name, Value = MacroParser((SrcLineContent, SrcLineNo),
FileName,
DT.MODEL_EFI_SOURCE_FILE,
self.FileLocalMacros)
if Name is not None:
SectionMacros[Name] = Value
LineComment = None
HeaderComments = []
continue
#
# Replace with Local section Macro and [Defines] section Macro.
#
SrcLineContent = InfExpandMacro(SrcLineContent,
(FileName, SrcLineContent, SrcLineNo),
self.FileLocalMacros,
SectionMacros)
TokenList = GetSplitValueList(SrcLineContent, DT.TAB_VALUE_SPLIT, 4)
ValueList[0:len(TokenList)] = TokenList
#
# Store section content string after MACRO replaced.
#
SectionContent += SrcLineContent + DT.END_OF_LINE
SourceList.append((ValueList, LineComment,
(SrcLineContent, SrcLineNo, FileName)))
ValueList = []
LineComment = None
TailComments = ''
HeaderComments = []
continue
#
# Current section archs
#
ArchList = []
for Item in self.LastSectionHeaderContent:
if Item[1] not in ArchList:
ArchList.append(Item[1])
InfSectionObject.SetSupArchList(Item[1])
InfSectionObject.SetAllContent(SectionContent)
if not InfSectionObject.SetSources(SourceList, Arch = ArchList):
Logger.Error('InfParser',
FORMAT_INVALID,
ST.ERR_INF_PARSER_MODULE_SECTION_TYPE_ERROR % ("[Sources]"),
File=FileName,
Line=Item[3])
|
rahulg/eulerswift
|
setup.py
|
Python
|
mit
| 1,487
| 0.002017
|
import sys
import EulerPy
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
def readme():
with open('README.rst') as f:
return f.re
|
ad()
def requirements():
install_requires = []
with open('requirements.txt') as f:
for line in f:
install_requires.append(line.strip())
# Terminal colors for Windows
if 'win32' in str(sys.platform).lower():
install_requires.append('colorama>=0.2.4')
return install_requires
setup(
name='EulerPy',
|
version=EulerPy.__version__,
description=EulerPy.__doc__.strip(),
long_description=readme(),
url='https://github.com/iKevinY/EulerPy',
author=EulerPy.__author__,
author_email='me@kevinyap.ca',
license=EulerPy.__license__,
packages=['EulerPy'],
entry_points={'console_scripts': ['euler = EulerPy.__main__:main']},
install_requires=requirements(),
classifiers=[
"License :: OSI Approved :: MIT License",
"Topic :: Utilities",
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.2",
"Programming Language :: Python :: 3.3",
],
keywords=['EulerPy', 'euler', 'project-euler', 'projecteuler'],
include_package_data=True,
zip_safe=False,
)
|
plotly/plotly.py
|
packages/python/plotly/plotly/validators/scatter3d/marker/colorbar/_showexponent.py
|
Python
|
mit
| 546
| 0
|
import _plotly_utils.basevalidators
class ShowexponentValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(
self,
|
plotly_name="showexponent",
parent_name="scatter3d.marker.colorbar",
**kwargs
):
super(ShowexponentValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name
|
,
edit_type=kwargs.pop("edit_type", "calc"),
values=kwargs.pop("values", ["all", "first", "last", "none"]),
**kwargs
)
|
iotaledger/iota.lib.py
|
iota/api_async.py
|
Python
|
mit
| 57,097
| 0.000841
|
from typing import Dict, Iterable, Optional
from iota import AdapterSpec, Address, BundleHash, ProposedTransaction, Tag, \
TransactionHash, TransactionTrytes, TryteString, TrytesCompatible
from iota.adapter import BaseAdapter, resolve_adapter
from iota.commands import CustomCommand, core, extended
from iota.crypto.addresses import AddressGenerator
from iota.crypto.types import Seed
__all__ = [
'AsyncIota',
'AsyncStrictIota',
]
class AsyncStrictIota:
"""
Asynchronous API to send HTTP requests for communicating with an IOTA node.
This implementation only exposes the "core" API methods. For a more
feature-complete implementation, use :py:class:`AsyncIota` instead.
References:
- https://docs.iota.org/docs/node-software/0.1/iri/references/api-reference
:param AdapterSpec adapter:
URI string or BaseAdapter instance.
:param Optional[bool] devnet:
Whether to use devnet settings for this instance.
On the devnet, minimum weight magnitude is set to 9, on mainnet
it is 1 by default.
:param Optional[bool] local_pow:
Whether to perform proof-of-work locally by redirecting all calls
to :py:meth:`attach_to_tangle` to
`ccurl pow interface <https://pypi.org/project/PyOTA-PoW/>`_.
See :ref:`README:Optional Local Pow` for more info and
:ref:`find out<pow-label>` how to use it.
"""
def __init__(
self,
adapter: AdapterSpec,
devnet: bool = False,
local_pow: bool = False
) -> None:
"""
:param AdapterSpec adapter:
URI string or BaseAdapter instance.
:param bool devnet:
Whether to use devnet settings for this instance.
On the devnet, minimum weight magnitude is set to 9, on mainnet
it is 1 by default.
:param Optional[bool] local_pow:
Whether to perform proof-of-work locally by redirecting all calls
to :py:meth:`attach_to_tangle` to
`ccurl pow interface <https://pypi.org/project/PyOTA-PoW/>`_.
See :ref:`README:Optional Local Pow` for more info and
:ref:`find out<pow-label>` how to use it.
"""
super().__init__()
if not isinstance(adapter, BaseAdapter):
adapter = resolve_adapter(adapter)
self.adapter: BaseAdapter = adapter
# Note that the `local_pow` parameter is passed to adapter,
# the api class has no notion about it. The reason being,
# that this parameter is used in `AttachToTangeCommand` calls,
# that is called from various api calls (`attach_to_tangle`,
# `send_trytes` or `send_transfer`). Inside `AttachToTangeCommand`,
# we no longer have access to the attributes of the API class, therefore
# `local_pow` needs to be associ
|
ated with the adapter.
# Logically, `local_pow` will decide if the api call does pow
# via pyota-pow extension, or sends the request to a node.
# But technically, the parameter belongs to the adapter.
self.adapter.set_local_pow(local_pow)
self.devnet = devnet
def create_command(self, command: str) -> CustomCommand:
"""
|
Creates a pre-configured CustomCommand instance.
This method is useful for invoking undocumented or experimental
methods, or if you just want to troll your node for awhile.
:param str command:
The name of the command to create.
"""
return CustomCommand(self.adapter, command)
def set_local_pow(self, local_pow: bool) -> None:
"""
Sets the :py:attr:`local_pow` attribute of the adapter of the api
instance. If it is ``True``, :py:meth:`~Iota.attach_to_tangle` command calls
external interface to perform proof of work, instead of sending the
request to a node.
By default, :py:attr:`local_pow` is set to ``False``.
This particular method is needed if one wants to change
local_pow behavior dynamically.
:param bool local_pow:
Whether to perform pow locally.
:returns: None
"""
self.adapter.set_local_pow(local_pow)
@property
def default_min_weight_magnitude(self) -> int:
"""
Returns the default ``min_weight_magnitude`` value to use for
API requests.
"""
return 9 if self.devnet else 14
async def add_neighbors(self, uris: Iterable[str]) -> dict:
"""
Add one or more neighbors to the node. Lasts until the node is
restarted.
:param Iterable[str] uris:
Use format ``<protocol>://<ip address>:<port>``.
Example: ``add_neighbors(['udp://example.com:14265'])``
.. note::
These URIs are for node-to-node communication (e.g.,
weird things will happen if you specify a node's HTTP
API URI here).
:return:
``dict`` with the following structure::
{
'addedNeighbors': int,
Total number of added neighbors.
'duration': int,
Number of milliseconds it took to complete the request.
}
References:
- https://docs.iota.org/docs/node-software/0.1/iri/references/api-reference#addneighbors
"""
return await core.AddNeighborsCommand(self.adapter)(uris=uris)
async def attach_to_tangle(
self,
trunk_transaction: TransactionHash,
branch_transaction: TransactionHash,
trytes: Iterable[TryteString],
min_weight_magnitude: Optional[int] = None,
) -> dict:
"""
Attaches the specified transactions (trytes) to the Tangle by
doing Proof of Work. You need to supply branchTransaction as
well as trunkTransaction (basically the tips which you're going
to validate and reference with this transaction) - both of which
you'll get through the :py:meth:`get_transactions_to_approve` API call.
The returned value is a different set of tryte values which you
can input into :py:meth:`broadcast_transactions` and
:py:meth:`store_transactions`.
:param TransactionHash trunk_transaction:
Trunk transaction hash.
:param TransactionHash branch_transaction:
Branch transaction hash.
:param Iterable[TransactionTrytes] trytes:
List of transaction trytes in the bundle to be attached.
:param Optional[int] min_weight_magnitude:
Minimum weight magnitude to be used for attaching trytes.
14 by default on mainnet, 9 on devnet/devnet.
:return:
``dict`` with the following structure::
{
'trytes': List[TransactionTrytes],
Transaction trytes that include a valid nonce field.
}
References:
- https://docs.iota.org/docs/node-software/0.1/iri/references/api-reference#attachtotangle
"""
if min_weight_magnitude is None:
min_weight_magnitude = self.default_min_weight_magnitude
return await core.AttachToTangleCommand(self.adapter)(
trunkTransaction=trunk_transaction,
branchTransaction=branch_transaction,
minWeightMagnitude=min_weight_magnitude,
trytes=trytes,
)
async def broadcast_transactions(self, trytes: Iterable[TryteString]) -> dict:
"""
Broadcast a list of transactions to all neighbors.
The input trytes for this call are provided by
:py:meth:`attach_to_tangle`.
:param Iterable[TransactionTrytes] trytes:
List of transaction trytes to be broadcast.
:return:
``dict`` with the following structure::
{
'duration': int,
Number of milliseconds it took to complete the request.
}
References:
|
edx/course-discovery
|
course_discovery/apps/course_metadata/migrations/0091_auto_20180727_1844.py
|
Python
|
agpl-3.0
| 1,487
| 0.00269
|
# Generated by Django 1.11.11 on 2018-07-27 18:44
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('course_metadata', '0090_degree_curriculum_reset'),
]
operations = [
migrations.AddField(
model_name='degree',
name='campus_image_desktop',
field=models.ImageField(blank=True, help_text='Provide a campus image to display on desktop displays', null=True, upload_to='media/degree_marketing/campus_images/'),
),
migrations.AddField(
model_name='degree',
name='campus_image_mobile',
field=models.ImageField(blank=True, help_text='Provide a campus image to display on mobile displays', null=True, upload_to='media/degree_marketing/campus_images/'),
),
migrations.AddField(
model_name='degree',
name='campus_image_tablet',
field=models.ImageField(blank=True, help_text='Provide a campus image to display on tablet displays', null=True, upload_to='media/degree_marketing/campus_images/'),
),
migrations.AddField(
model_name='degree',
name='overall_ranking',
field=models.CharField(blank=True, help_text='Overall program ranking (e.g. "#1 in the U.S.")', max_length=255),
),
migrations.AlterModelOptions(
name='degree',
|
options={'verbose_name_plural': 'Degrees'}
|
,
),
]
|
timothycrosley/thedom
|
thedom/social.py
|
Python
|
gpl-2.0
| 14,474
| 0.009811
|
"""
Social.py
Contains elements that enable connecting with external social sites.
Copyright (C) 2015 Timothy Edmund Crosley
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""
import hashlib
import urllib
from . import ClientSide, Factory, Layout
from .Base import Node, TextNode
from .Buttons import Link
from .Display import Image
Factory = Factory.Factory("Social")
class Social(Node):
__slots__ = ('account')
properties = Node.properties.copy()
properties['account'] = {'action':'classAttribute'}
def _create(self, name=None, id=None, parent=None, html="", *kargs, **kwargs):
Node._create(self, None, None, parent, *kargs, **kwargs)
self.account = ""
class TwitterBadge(Social):
"""
Displays a clickable twitter badge.
"""
def toHTML(self, formatted=False, *args, **kwargs):
"""
Returns the twitter badge as defined by the api directly
"""
return ("""<a href="https://twitter.com/%(account)s" class="twitter-follow-button" """ + \
"""data-show-count="false">Follow @%(account)s</a><script>!function(d,s,id){""" + \
"""var js,fjs=d.getElementsByTagName(s)[0];if(!d.getElementById(id)){js=d.createElement""" + \
"""(s);js.id=id;js.src="//platform.twitter.com/widgets.js";fjs.parentNode.insertBefore(""" + \
"""js,fjs);}}(document,"script","twitter-wjs");</script>""") % {'account':self.account}
Factory.addProduct(TwitterBadge)
class TwitterAPI(Node):
__slots__ = ()
def _create(self, name=None, id=None, parent=None, html="", *kargs, **kwargs):
Node._create(self, name, id, parent, *kargs, **kwargs)
self.addScript('window.twttr = (function (d,s,id) {'
'var t, js, fjs = d.getElementsByTagName(s)[0];'
'if (d.getElementById(id)) return; js=d.createElement(s); js.id=id;'
'js.src="https://platform.twitter.com/widgets.js"; fjs.parentNode.insertBefore(js, fjs);'
'return window.twttr || (t = { _e: [], ready: function(f){ t._e.push(f) } });'
'}(document, "script", "twitter-wjs"));')
Factory.addProduct(TwitterAPI)
class Tweet(Link):
__slots__ = ()
properties = Link.properties.copy()
properties['hideCount'] = {'action':'hideCount', 'type':'bool', 'info':"Don't show the number of re-tweets"}
properties['largeButton'] = {'action':'useLargeButton', 'type':'bool', 'info':'User larger tweet button size'}
properties['url'] = {'action':'attribute', 'name':'data-url', 'info':'Set the url the tweet will link to'}
properties['hashtag'] = {'action':'attribute', 'name':'data-hashtags', 'info':'Associated a hashtag to the tweet'}
properties['via'] = {'action':'attribute', 'name':'data-via', 'info':'Associated with another twitter account'}
properties['message'] = {'action':'attribute', 'name':'data-text', 'info':'The tweet message text'}
def _create(self, name=None, id=None, parent=None, *kargs, **kwargs):
Link._create(self, name, id, parent, *kargs, **kwargs)
self.setText("Tweet")
self.addClass("twitter-share-button")
self.setDestination("https://twitter.com/share")
def hideCount(self, hide=True):
if hide:
self.attributes['data-count'] = 'none'
else:
self.attributes.pop('data-count', None)
def useLargeButton(self, use=True):
if use:
self.attributes['data-size'] = 'large'
else:
self.attributes.pop('data-size', None)
def toHTML(self, formatted=False, *args, **kwargs):
"""
Adds the twitter script to the tweet button
"""
html = Link.toHTML(self, formatted, *args, **kwargs)
return html
Factory.addProduct(Tweet)
class GooglePlusAPI(Node):
__slots__ = ()
def _create(self, name=None, id=None, parent=None, html="", *kargs, **kwargs):
Node._create(self, name, id, parent, *kargs, **kwargs)
self.addScript("window.___gcfg = {lang:'en-US','parsetags':'explicit'};"
"(function() {var po = document.createElement('script');"
"po.type = 'text/javascript'; po.async = true;"
"po.src = 'https://apis.google.com/js/plusone.js';"
"var s = document.getElementsByTagName('script')[0]; s.parentNode.insertBefore(po, s);"
"})();")
Factory.addProduct(GooglePlusAPI)
class GooglePlusShare(Layout.Box):
__slots__ = ()
properties = Layout.Box.properties.copy()
properties['size'] = {'action':'attribute', 'name':'data-height', 'type':'int',
'info':"The Size of the of the button, 2 is large"}
properties['url'] = {'action':'attribute', 'name':'data-href', 'info':"The url the google plus button points to"}
def _create(self, name=None, id=None, parent=None, html="", *kargs, **kwargs):
Node._create(self, name, id, parent, *kargs, **kwargs)
self.addClass("g-plus")
self.attributes['data-action'] = "share"
self.attributes['data-annotation'] = "none"
Factory.addProduct(GooglePlusShare)
class GooglePlusBadge(Social):
"""
Displays a clickable google plus badge.
"""
__slots__ = ('link', )
def _create(self, name=None, id=None, parent=None, html="", *kargs, **kwargs):
Social._create(self, None, None, parent, *kargs, **kwargs)
self.link = self.add(Link())
self.link.attributes['rel'] = "publisher"
self.link.addClass("WGooglePlusBadge")
self.link += Image(src="https://ssl.gstatic.com/images/icons/gplus-32.png", alt="Google+")
def _render(self):
self.link.setDestination("https://plus.google.com/%s?prsrc=3" % self.account)
Factory.addProduct(GooglePlusBadge)
class FacebookLike(Social):
"""
Adds a facebook like link to your site
"""
def toHTML(self, formatted=False, *args, **kwargs):
return ("""<div class="fb-like" data-href="https://www.facebook.com/%s" data-send="false""" + \
"""data-layout="button_count" data-width="300" data-show-faces="false"></div>""") % self.account
Factory.addProduct(FacebookLike)
class FacebookAPI(Layout.Box):
"""
Adds facebook api support to your site and optionally calls the init method on it - only add once.
"""
__slots__ = ('loginURL', 'logoutURL', 'appId', 'init')
properties = Node.properties.copy()
properties['appId'] = {'action':'classAttribute'}
properties['init'] = {'action':'classAttribute', 'type':'bool'}
properties['loginURL'] = {'action':'classAttribute'}
properties['logoutURL'] = {'action':'classAttribute'}
class ClientSide(Layout.Box.ClientSide):
def feed(self, name, caption, description, link, picture=None, redirect=None, callback=None):
"""
Posts defined data to the users news feed.
"""
arguments = {'method':'feed', 'name':name, 'caption':caption, 'link':link}
if picture:
arguments['picture'] = picture
if redirect:
arguments['redirect_url'] = redirect
if callback:
return ClientSide.call("FB.ui", arguments, callback)
|
if description
|
:
arguments['description'] = description
return ClientSide.call("FB.
|
leedoowon/MTraceCheck
|
src_main/codegen_common.py
|
Python
|
apache-2.0
| 10,228
| 0.003911
|
#!/usr/bin/python
##########################################################################
#
# MTraceCheck
# Copyright 2017 The Regents of the University of Michigan
# Doowon Lee and Valeria Bertacco
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
##########################################################################
#
# This file should be called from codegen.py
#
####################################################################
# Data section
####################################################################
def generate_data_section(dataName, memLocs, strideType):
assert(memLocs <= 0x10000)
#dataArray = []
#for i in range(memLocs):
# data = [i & 0xFF, (i >> 8) & 0xFF, 0xFF, 0xFF]
# dataArray += data
## Data contents will be initialized in test manager, so just create a placeholder
if (strideType == 0):
dataArray = [0xFF for i in range(memLocs * 4 * 1)]
elif (strideType == 1):
dataArray = [0xFF for i in range(memLocs * 4 * 4)]
elif (strideType == 2):
dataArray = [0xFF for i in range(memLocs * 4 * 16)]
else:
assert(False)
dataFP = open(dataName, "w")
dataFP.write(bytearray(dataArray))
dataFP.close()
####################################################################
# BSS section (section to be written by test threads)
####################################################################
def generate_bss_section(bssName, bssSize):
#bssArray = []
#for i in range(bssSize):
# bssArray += [0x00]
#bssFP = open(bssName, "w")
#bssFP.write(bytearray(bssArray))
#bssFP.close()
# Faster code
bssFP = open(bssName, "wb")
bssFP.seek(bssSize-1)
bssFP.write("\0")
bssFP.close()
####################################################################
# Test manager CPP file
####################################################################
def generate_test_manager(cppName, headerName, threadList, bssBase, bssSizePerThread, signatureSize, regBitWidth, numExecutions, strideType):
# See an example of cpp file at exp/160815_test_manager/test_manager.cpp
# (This example is possibly outdated)
wordTypeString = "uint%d_t" % regBitWidth
cppString = ""
cppString += "#include <stdio.h>\n"
cppString += "#include <stdlib.h>\n"
cppString += "#include <stdint.h>\n"
cppString += "#include <pthread.h>\n"
cppString += "#include <map>\n"
cppString += "#include <vector>\n"
cppString += "#include \"%s\"\n" % headerName
for thread in threadList:
cppString += "extern \"C\" void* thread%d_routine(void*);\n" % thread
cppString += "volatile int thread_spawn_lock = 0;\n"
cppString += "#ifdef EXEC_SYNC\n"
cppString += "volatile int thread_exec_barrier0 = 0;\n"
cppString += "volatile int thread_exec_barrier1 = 0;\n"
cppString += "volatile int thread_exec_barrier_ptr = 0;\n"
cppString += "#endif\n"
cppString += "int main()\n"
cppString += "{\n"
cppString += " int pthread_return;\n"
cppString += " int numThreads = %d;\n" % len(threadList)
cppString += " // Test BSS section initialization\n"
cppString += " %s *bss_address = (%s *) TEST_BSS_SECTION;\n" % (wordTypeString, wordTypeString)
cppString += " for (int i = 0; i < numThreads * TEST_BSS_SIZE_PER_THREAD; i += sizeof(%s)) {\n" % (wordTypeString)
cppString += " *(bss_address++) = 0;\n"
cppString += " }\n"
cppString += " // Test data section initialization\n"
cppString += " uint32_t *data_address= (uint32_t *) TEST_DATA_SECTION;\n"
cppString += " for (int i = 0; i < NUM_SHARED_DATA; i++) {\n"
cppString += " *data_address = (uint32_t) (0xFFFF0000 | i);\n"
if (strideType == 0):
cppString += " data_address++; // strideType = 0\n"
elif (strideType == 1):
cppString += " data_address+=4; // strideType = 1\n"
elif (strideType == 2):
cppString += " data_address+=16; // strideType = 2\n"
else:
assert(False)
cppString += " }\n"
cppString += " pthread_t* threads = (pthread_t *) malloc(sizeof(pthread_t) * numThreads);\n"
for threadIndex in range(len(threadList)):
cppString += " pthread_return = pthread_create(&threads[%d], NULL, thread%d_routine, NULL);\n" % (threadIndex, threadList[threadIndex])
cppString += " for (int t = 0; t < numThreads; t++)\n"
cppString += " pthread_return = pthread_join(threads[t], NULL);\n"
cppString += " std::map<std::vector<%s>, int> signatureMap;\n" % (wordTypeString)
cppString += " std::vector<%s> resultVector;\n" % (wordTypeString)
cppString += " %s *signature = (%s *) TEST_BSS_SECTION;\n" % (wordTypeString, wordTypeString)
cppString += " for (int i = 0; i < EXECUTION_COUNT; i++) {\n"
cppString += " resultVector.clear();\n"
#cppString += "#ifndef NO_PRINT\n"
cppString += "#if 0\n"
cppString += " printf(\"%8d:\", i);\n"
cppString += "#endif\n"
cppString += " for (int t = 0; t < numThreads; t++) {\n"
cppString += " for (int w = 0; w < SIGNATURE_SIZE_IN_WORD; w++) {\n"
# NOTE: SIGNATURE WORD REORDERING
#cppString += " for (int w = SIGNATURE_SIZE_IN_WORD - 1; w >= 0; w--) {\n"
#cppString += " for (int t = 0; t < numThreads; t++) {\n"
cppString += " %s address =
|
(%s) signature + t * TEST_BSS_SIZE_PER_THREAD + w * sizeof(%s);\n" % (wordTypeString, wordTypeString, wordTypeString)
cppString += "
|
%s result = (%s)*(%s*)address;\n" % (wordTypeString, wordTypeString, wordTypeString)
cppString += " resultVector.push_back(result);\n"
#cppString += "#ifndef NO_PRINT\n"
cppString += "#if 0\n"
cppString += " printf(\" 0x%%0%dlx\", result);\n" % (regBitWidth / 8 * 2)
#cppString += " printf(\" 0x%%lx 0x%%0%dlx\", address, result);\n" % signatureSize
cppString += "#endif\n"
cppString += " }\n"
cppString += " }\n"
cppString += " if (signatureMap.find(resultVector) == signatureMap.end())\n"
cppString += " signatureMap[resultVector] = 1;\n"
cppString += " else\n"
cppString += " signatureMap[resultVector]++;\n"
#cppString += "#ifndef NO_PRINT\n"
cppString += "#if 0\n"
cppString += " printf(\"\\n\");\n"
cppString += "#endif\n"
cppString += " signature += SIGNATURE_SIZE_IN_WORD;\n"
cppString += " }\n"
cppString += "#ifndef NO_PRINT\n"
cppString += " for (std::map<std::vector<%s>, int>::iterator it = signatureMap.begin(); it != signatureMap.end(); it++) {\n" % (wordTypeString)
cppString += " for (int i = 0; i < (it->first).size(); i++)\n"
cppString += " printf(\" 0x%%0%dlx\", (it->first)[i]);\n" % (regBitWidth / 8 * 2)
cppString += " printf(\": %d\\n\", it->second);\n"
cppString += " }\n"
cppString += "#endif\n"
cppString += " printf(\"Number of unique results %lu out of %d\\n\", signatureMap.size(), EXECUTION_COUNT);\n"
cppString += " fflush(stdout);\n"
cppString += " return 0;\n"
cppString += "}\n"
cppFP = open(cppName, "w")
cppFP.write(cppString)
cppFP.close()
def manager_common(headerName, dataName, dataBase, memLocs, bssName, bssBase, bssSizePerThread, cppName, threadList, signatureSize, regBitWidth, numExecutions, platform, strideType, verbosity):
if (platform == "linuxpthread"):
# Data section and BSS section
generate_data_section(dataName,
|
dunkhong/grr
|
grr/server/grr_response_server/flows/general/checks_test.py
|
Python
|
apache-2.0
| 4,415
| 0.004304
|
#!/usr/bin/env python
"""Test the collector flows."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import os
from absl import app
from future.utils import iterkeys
from grr_response_core import config
from grr_response_core.lib.parsers import config_file
from grr_response_core.lib.parsers import linux_file_parser
from grr_response_core.lib.rdfvalues import client as rdf_client
from grr_response_server.check_lib import checks
from grr_response_server.check_lib import checks_test_lib
from grr_response_server.flows.general import checks as flow_checks
from grr.test_lib import action_mocks
from grr.test_lib import flow_test_lib
from grr.test_lib import parser_test_lib
from grr.test_lib import test_lib
from grr.test_lib import vfs_test_lib
# pylint: mode=test
class TestCheckFlows(flow_test_lib.FlowTestsBaseclass,
checks_test_lib.HostCheckTest):
checks_loaded = False
def setUp(self):
super(TestCheckFlows, self).setUp()
self.client_id = self.SetupClient(0)
# Only load the checks once.
if self.checks_loaded is False:
self.checks_loaded = self.LoadChecks()
if not self.checks_loaded:
raise RuntimeError("No checks to test.")
self.client_mock = action_mocks.FileFinderClientMock()
def SetupLinuxUser(self):
user = rdf_client.User(username="user1", homedir="/home/user1")
return self.SetupClient(0, system="Linux", users=[user], os_version="12.04")
def SetupWindowsUser(self):
return self.SetupClient(0, system="Windows", os_version="6.2")
def RunFlow(self, client_id):
with vfs_test_lib.FakeTestDataVFSOverrider():
session_id = flow_test_lib.TestFlowHelper(
flow_checks.CheckRunner.__name__,
client_mock=self.client_mock,
client_id=client_id,
token=self.token)
results = flow_test_lib.GetFlowResults(client_id, session_id)
return session_id, {r.check_id: r for r in results}
def LoadChecks(self):
"""Load the checks, returning the names of the checks that were loaded."""
checks.CheckRegistry.Clear()
check_configs = ("sshd.yaml", "sw.yaml", "unix_login.yaml")
cfg_dir = os.path.join(config.CONFIG["Test.data_dir"], "checks")
chk_files = [os.path.join(cfg_dir, f) for f in check_configs]
checks.LoadChecksFromFiles(chk_files)
return list(iterkeys(checks.CheckRegistry.checks))
def testSelectArtifactsForChecks(self):
client_id = self.SetupLinuxUser()
session_id, _ = self.RunFlow(client_id)
state = flow_test_lib.GetFlowState(self.client_id, session_id)
self.assertIn("DebianPackagesStatus", state.artifacts_wanted)
self.assertIn("SshdConfigFile", state.artifacts_wanted)
client_id = self.SetupWindowsUser()
session_id, _ = self.RunFlow(client_id)
state = flow_test_lib.GetFlowState(self.client_id, session_id)
self.assertIn("WMIInstalledSoftware", state.artifacts_wanted)
def testCheckFlowSelectsChecks(self):
"""Confirm the flow runs checks for a target machine."""
client_id = self.SetupLinuxUser()
_, results = self.RunFlow(client_id)
expected = ["SHADOW-HASH", "SSHD-CHECK", "SSHD-PERMS", "SW-CHECK"]
self.assertRanChecks(expected, results)
@parser_test_lib.WithParser("Sshd", config_file.SshdC
|
onfigParser)
@parser_test_lib.WithParser("Pswd", linux_file_parser.LinuxSystemPasswdParser)
def
|
testChecksProcessResultContext(self):
"""Test the flow returns parser results."""
client_id = self.SetupLinuxUser()
_, results = self.RunFlow(client_id)
# Detected by result_context: PARSER
exp = "Found: Sshd allows protocol 1."
self.assertCheckDetectedAnom("SSHD-CHECK", results, exp)
# Detected by result_context: RAW
exp = "Found: The filesystem supports stat."
found = ["/etc/ssh/sshd_config"]
self.assertCheckDetectedAnom("SSHD-PERMS", results, exp, found)
# Detected by result_context: ANOMALY
exp = "Found: Unix system account anomalies."
found = [
"Accounts with invalid gid.", "Mismatched passwd and shadow files."
]
self.assertCheckDetectedAnom("ODD-PASSWD", results, exp, found)
# No findings.
self.assertCheckUndetected("SHADOW-HASH", results)
self.assertCheckUndetected("SW-CHECK", results)
def main(argv):
# Run the full test suite
test_lib.main(argv)
if __name__ == "__main__":
app.run(main)
|
cvdlab/lar-running-demo
|
py/computation/old/step_calcchains_serial_tobinary_filter.py
|
Python
|
mit
| 8,263
| 0.049498
|
# -*- coding: utf-8 -*-
from lar import *
from scipy import *
import json
import scipy
import numpy as np
import time as tm
import gc
from pngstack2array3d import *
import struct
import getopt, sys
import traceback
#
import matplotlib.pyplot as plt
# ------------------------------------------------------------
# Logging & Timer
# ------------------------------------------------------------
logging_level = 0;
# 0 = no_logging
# 1 = few details
# 2 = many details
# 3 = many many details
def log(n, l):
if __name__=="__main__" and n <= logging_level:
for s in l:
print "Log:", s;
timer = 1;
timer_last = tm.time()
def timer_start(s):
global timer_last;
if __name__=="__main__" and timer == 1:
log(3, ["Timer start:" + s]);
timer_last = tm.time();
def timer_stop():
global timer_last;
if __name__=="__main__" and timer == 1:
log(3, ["Timer stop :" + str(tm.time() - timer_last)]);
# ------------------------------------------------------------
# Configuration parameters
# ------------------------------------------------------------
PNG_EXTENSION = ".png"
BIN_EXTENSION = ".bin"
# ------------------------------------------------------------
# Utility toolbox
# ------------------------------------------------------------
def countFilesInADir(directory):
return len(os.walk(directory).next()[2])
def isArrayEmpty(arr):
return all(e == 0 for e in arr)
# ------------------------------------------------------------
def writeOffsetToFile(file, offsetCurr):
file.write( struct.pack('>I', offsetCurr[0]) )
file.write( struct.pack('>I', offsetCurr[1]) )
file.write( struct.pack('>I', offsetCurr[2]) )
# ------------------------------------------------------------
def computeChains(imageHeight,imageWidth,imageDepth, imageDx,imageDy,imageDz, Nx,Ny,Nz, calculateout,bordo3, colors,pixelCalc,centroidsCalc, colorIdx,INPUT_DIR,DIR_O):
beginImageStack = 0
endImage = beginImageStack
MAX_CHAINS = colors
count = 0
fileName = "selettori-"
if (calculateout == True):
fileName = "output-"
saveTheColors = centroidsCalc
saveTheColors = sorted(saveTheColors.reshape(1,colors)[0])
# print str(imageHeight) + '-' + str(imageWidth) + '-' + str(imageDepth)
# print str(imageDx) + '-' + str(imageDy) + '-' + str(imageDz)
# print str(Nx) + '-' + str(Ny) + '-' + str(Nz)
with open(DIR_O+'/'+fileName+str(saveTheColors[colorIdx])+BIN_EXTENSION, "wb") as newFile:
for zBlock in xrange(imageDepth/imageDz):
startImage = endImage
endImage = startImage + imageDz
xEnd, yEnd = 0,0
theImage,colors,theColors = pngstack2array3d(INPUT_DIR, startImage, endImage, colors, pixelCalc, centroidsCalc)
theColors = theColors.reshape(1,colors)
if (sorted(theColors[0]) != saveTheColors):
log(1, [ "Error: colors have changed"] )
sys.exit(2)
for xBlock in xrange(imageHeight/imageDx):
for yBlock in xrange(imageWidth/imageDy):
xStart, yStart = xBlock * imageDx, yBlock * imageDy
xEnd, yEnd = xStart+imageDx, yStart+imageDy
image = theImage[:, xStart:xEnd, yStart:yEnd]
nz,nx,ny = image.shape
count += 1
# Compute a quotient complex of chains with constant field
# ------------------------------------------------------------
chains3D_old = [];
chains3D = None
if (calculateout != True):
chains3D = np.zeros(nx*ny*nz,dtype=int32);
zStart = startImage - beginImageStack;
def addr(x,y,z): return x + (nx) * (y + (ny) * (z))
hasSomeOne = False
if (calculateout == True):
for x in xrange(nx):
for y in xrange(ny):
for z in xrange(nz):
if (image[z,x,y] == saveTheColors[colorIdx]):
chains3D_old.append(addr(x,y,z))
else:
for x in xrange(nx):
for y in xrange(ny):
for z in xrange(nz):
if (image[z,x,y] == saveTheColors[colorIdx]):
hasSomeOne = True
chains3D[addr(x,y,z)] = 1
# Compute the boundary complex of the quotient cell
# ------------------------------------------------------------
objectBoundaryChain = None
if (calculateout == True) and (len(chains3D_old) > 0):
objectBoundaryChain = larBoundaryChain(bordo3,chains3D_old)
# Save
if (calculateout == True):
if (objectBoundaryChain != None):
writeOffsetToFile( newFile, np.array([zStart,xStart,yStart], dtype=int32) )
newFile.write( bytearray( np.array(objectBoundaryChain.toarray().astype('b').flatten()) ) )
else:
if (hasSomeOne != False):
writeOffsetToFile( newFile, np.array([zStart,xStart,yStart], dtype=int32) )
newFile.write( bytearray( np.array(chains3D, dtype=np.dtype('b')) ) )
def runComputation(imageDx,imageDy,imageDz, colors,coloridx,calculateout, V,FV, INPUT_DIR,BEST_IMAGE,BORDER_FILE,DIR_O):
bordo3 = None
if (calculateout == True):
with open(BORDER_FILE, "r") as file:
bordo3_json = json.load(file)
ROWCOUNT = bordo3_json['ROWCOUNT']
COLCOUNT = bordo3_json['COLCOUNT']
ROW = np.asarray(bordo3_json['ROW'], dtype=np.int32)
COL = np.asarray(bordo3_json['COL'], dtype=np.int32)
DATA = np.asarray(bordo3_json['DATA'], dtype=np.int8)
bordo3 = csr_matrix((DATA,COL,ROW),shape=(ROWCOUNT,COLCOUNT));
imageHeight,imageWidth = getImageData(INPUT_DIR+str(BEST_IMAGE)+PNG_EXTENSION)
imageDepth = countFilesInADir(INPUT_DIR)
Nx,Ny,Nz = imageHeight/imageDx, imageWidth/imageDx, imageDepth/imageDz
try:
pixelCalc, centroidsCalc = centroidcalc(INPUT_DIR, BEST_IMAGE, colors)
computeChains(imageHeight,imageWidth,imageDepth, imageDx,imageDy,imageDz, Nx,Ny,Nz, calculateout,bordo3, colors,pixelCalc,centroidsCalc, coloridx,INPUT_DIR,DIR_O)
except:
exc_type, exc_value, exc_traceback = sys.exc_info()
lines = traceback.format_exception(exc_type, exc_value, exc_traceback)
log(1, [ "Error: " + ''.join('!! ' + line for line in lines) ]) # Log it or whatever here
sys.exit(2)
def main(argv):
ARGS_STRING =
|
'Args: -r -b <borderfile> -x <borderX> -y <borderY> -z <borderZ> -i <inputdirectory> -c <colors> -d <coloridx> -o <outputdir> -q <bestimage>'
try:
opts, args = getopt.getopt(argv,"rb:x:y:z:i:c:d:o:q:")
except getopt.GetoptError:
print ARGS_STRING
sys.exit(2)
nx = ny = nz = imageDx = imageDy = imageDz = 64
colors = 2
coloridx = 0
|
mandatory = 6
calculateout = False
#Files
BORDER_FILE = 'bordo3.json'
BEST_IMAGE = ''
DIR_IN = ''
DIR_O = ''
for opt, arg in opts:
if opt == '-x':
nx = ny = nz = imageDx = imageDy = imageDz = int(arg)
mandatory = mandatory - 1
elif opt == '-y':
ny = nz = imageDy = imageDz = int(arg)
elif opt == '-z':
nz = imageDz = int(arg)
elif opt == '-r':
calculateout = True
elif opt == '-i':
DIR_IN = arg + '/'
mandatory = mandatory - 1
elif opt == '-b':
BORDER_FILE = arg
mandatory = mandatory - 1
elif opt == '-o':
mandatory = mandatory - 1
DIR_O = arg
elif opt == '-c':
mandatory = mandatory - 1
colors = int(arg)
elif opt == '-d':
mandatory = mandatory - 1
coloridx = int(arg)
elif opt == '-q':
BEST_IMAGE = int(arg)
if mandatory != 0:
print 'Not all arguments where given'
print ARGS_STRING
sys.exit(2)
if (coloridx >= colors):
print 'Not all arguments where given (coloridx >= colors)'
print ARGS_STRING
sys.exit(2)
def ind(x,y,z): return x + (nx+1) * (y + (ny+1) * (z))
def invertIndex(nx,ny,nz):
nx,ny,nz = nx+1,ny+1,nz+1
def invertIndex0(offset):
a0, b0 = offset / nx, offset % nx
a1, b1 = a0 / ny, a0 % ny
a2, b2 = a1 / nz, a1 % nz
return b0,b1,b2
return invertIndex0
chunksize = nx * ny + nx * nz + ny * nz + 3 * nx * ny * nz
V = [[x,y,z] for z in range(nz+1) for y in range(ny+1) for x in range(nx+1) ]
v2coords = invertIndex(nx,ny,nz)
FV = []
for h in range(len(V)):
x,y,z = v2coords(h)
if (x < nx) and (y < ny): FV.append([h,ind(x+1,y,z),ind(x,y+1,z),ind(x+1,y+1,z)])
if (x < nx) and (z < nz): FV.append([h,ind(x+1,y,z),ind(x,y,z+1),ind(x+1,y,z+1)])
if (y < ny) and (z < nz): FV.append([h,ind(x,y+1,z),ind(x,y,z+1),ind(x,y+1,z+1)])
runComputation(imageDx, imageDy, imageDz, colors, coloridx, calculateout, V, FV, DIR_
|
olbat/distem
|
test/experimental_testing/exps/latency.py
|
Python
|
gpl-3.0
| 1,573
| 0.003814
|
#!/usr/bin/env python
# this program is used to test latency
# don't test RTT bigger than 3 secs - it will break
# we make sure that nothing breaks if there is a packet missing
# this can rarely happen
import select
import socket
import time
import sys
import struct
def pong():
# easy, receive and send back
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.bind(('0.0.0.0', 1234))
while True:
c, addr = s.recvfrom(1)
s.sendto(c, (addr[0], 1235))
if c == 'x':
break
print 'Finished'
return 0
def ping(addr, n):
# send and wait for it back
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.bind(('0.0.0.0', 1235))
succ = 0
errs = 0
while succ != n and errs < 3: # at most 3 lost packets
time.sleep(0.02) # wait a bit
start = time.time()
s.sendto('r', (addr, 1234))
h, _, _ = select.select([s], [], [], 3) # wait 3 seconds
end = time.time()
if h == []: # lost packet
# print '# lost packet'
errs += 1
continue
s.recv(1) # eat the response
succ += 1
print '%.8f' % (end - st
|
art)
for x in xrange(10):
# send many packets to be (almost) sure the other end is done
s.sendto('x', (addr, 1234))
return errs >= 3
if __name__ == '__main__':
if 'ping' in sys.argv:
ret = ping(sys.argv[2], int(sys.argv[3]))
elif 'pong' in sys.argv:
ret = pong()
else:
print 'ping or pong?'
ret = 1
sys.exi
|
t(ret)
|
genehallman/node-berkeleydb
|
deps/db-18.1.40/dist/winmsi/genWix.py
|
Python
|
mit
| 10,795
| 0.036035
|
#
#
# genWix.py is used to generate a WiX .wxs format file that
# can be compiled by the candle.exe WiX compiler.
#
# Usage: python genWix.py <output_file>
#
# The current directory is expected to be the top of a tree
# of built programs, libraries, documentation and files.
#
# The list of directories traversed is at the bottom of this script,
# in "main." Extra directories that do not exist are fine and will
# be ignored. That makes the script a bit more general-purpose.
#
# "Excluded" directories/files are listed below in the GenWix class
# constructor in the excludes variable. These will *not* be included
# in packaging.
#
# The output file is expected to be post-processed using XQuery Update
# to add ComponentGroup elements for the various WiX Feature elements.
#
# The generated output for each directory traversed will look like:
# <Directory Id="dir_dirname_N" Name="dirname">
# <Component DiskId="1" Guid="..." Id="some_id" KeyPath="yes">...
# <File Id="..." Name="..." Source="pathtofile"/>
# <File.../>
# </Component>
# </Directory>
#
# Subdirectories are new elements under each top-level Directory element
#
# NOTE: at this time each top-level directory is its own Component. This
# mechanism does NOT generate multiple Components in a single Directory.
# That should be done as an enhancement to allow, for example, the "bin"
# directory to contain files that are part of multiple Components such
# as "runtime" "java" "sql" etc.
# WiX will do this but this script plus the generateGroups.xq XQuery script
# cannot (yet). Doing that will be a bit of work as well as creating
# additional lists of files that indicate their respective Components.
#
import sys
import os
class GenWix:
def __init__(self, sourcePfx, outfile, dbg):
self.debugOn = dbg
self.componentId = 0
self.indentLevel = 0
self.indentIncr = 2
self.shortId = 0
self.fragName="all"
self.refDirectory = "INSTALLDIR"
self.compPrefix = ""
self.dirPrefix = "dir"
self.sourcePrefix = os.path.normpath(sourcePfx)
# use excludes to exclude paths, e.g. add files to the array:
# ...os.path.normpath("dbxml/test"), os.path.normpath("a/b/c")...
self.excludes = []
self.groups = ["group_csharp", "group_cxx", "group_devo", "group_doc", "group_examples", "group_java", "group_runtime"]
self.groupfiles = ["group.csharp", "group.cxx", "group.devo", "group.doc", "group.examples", "group.java", "group.runtime"]
self.groupcontent = ["","","","","","","","",""]
self.outputFile = outfile
self.out = open(self.outputFile, "ab")
self.out.truncate(0)
self.initGroupFiles()
def __del__(self):
self.out.close()
def initGroupFiles(self):
idx = 0
for file in self.groupfiles:
f = open(file, 'r')
self.groupcontent[idx] = os.path.normpath(f.read())
f.close()
idx = idx + 1
def checkExclude(self, fname):
for ex in self.excludes:
if fname.find(ex) != -1:
return True
return False
# NOTE: this will count leading/trailing '/'
def count(self, path):
return len(path.split("/"))
def nextId(self):
self.componentId = self.componentId + 1
def printComponentId(self, fragname):
return self.makeId("%s_%s_%d"%(self.compPrefix,fragname,self.componentId))
def printDirectoryId(self,dirname):
return self.makeId("%s_%s_%d"%(self.dirPrefix,dirname,self.componentId))
def indent(self, arg):
if arg == "-" and self.indentLevel != 0:
self.indentLevel = self.indentLevel - self.indentIncr
i = 0
while i != self.indentLevel:
self.out.write(" ")
i = i+1
if arg == "+":
self.indentLevel = self.indentLevel + self.indentIncr
def echo(self, arg, indentArg):
self.indent(indentArg)
#sys.stdout.write(arg+"\n")
self.out.write(arg+"\n")
def generateGuid(self):
if sys.version_info[1] < 5:
return "REPLACE_WITH_GUID"
else:
import uuid
return uuid.uuid1()
# used by makeShortName
def cleanName(self, name):
for c in ("-","%","@","!"):
name = name.replace(c,"")
return name
def m
|
akeId(self, id):
tid = id.replace("-","_")
if len(tid) > 70:
#print "chopping string %s"%tid
tid = tid[len(tid)-70:len(tid)]
# id can't start with a number...
i = 0
while 1:
try:
int(tid[i])
except:
break
i = i+1
return tid[i:len(tid)]
return tid
# turn name
|
s into Windows 8.3 names.
# A semi-unique "ID" is inserted, using 3 bytes of hex,
# which gives us a total of 4096 "unique" IDs. If
# that number is exceeded in one class instance, a bad
# name is returned, which will eventually cause a
# recognizable failure. Names look like: ABCD~NNN.EXT
# E.g. NAMEISLONG.EXTLONG => NAME~123.EXT
#
def makeShortName(self, longName):
name = longName.upper()
try:
index = name.find(".")
except ValueError:
index = -1
if index == -1:
if len(name) <= 8:
return longName
after = ""
else:
if index <= 8 and (len(name) - index) <= 4:
return longName
after = "." + name[index+1:index+4]
after = self.cleanName(after)
self.shortId = self.shortId + 1
if self.shortId >= 4096: # check for overflow of ID space
return "too_many_ids.bad" # will cause a failure...
hid = hex(self.shortId)
name = self.cleanName(name) # remove stray chars
# first 5 chars + ~ + Id + . + extension
return name[0:4]+"~"+str(hid)[2:5]+after
def makeFullPath(self, fname, root):
return os.path.join(self.sourcePrefix,os.path.join(root,fname))
def makeNames(self, fname):
return "Name=\'%s\'"%fname
#shortName = self.makeShortName(fname)
#if shortName != fname:
# longName="LongName=\'%s\'"%fname
#else:
# longName=""
#return "Name=\'%s\' %s"%(shortName,longName)
def generateFile(self, fname, root, dirId):
# allow exclusion of individual files
if self.checkExclude(os.path.join(root,fname)):
self.debug("excluding %s\n"%os.path.join(root,fname))
return
idname = self.makeId("%s_%s"%(dirId,fname))
elem ="<File Id=\'%s\' %s Source=\'%s\' />"%(idname,self.makeNames(fname),self.makeFullPath(fname, root))
self.echo(elem,"")
def startDirectory(self, dir, parent):
# use parent dirname as part of name for more uniqueness
self.debug("Starting dir %s"%dir)
self.nextId()
dirId = self.printDirectoryId(dir)
elem ="<Directory Id=\'%s\' %s>"%(dirId,self.makeNames(dir))
self.echo(elem,"+")
return dirId
def endDirectory(self, dir):
self.debug("Ending dir %s"%dir)
self.echo("</Directory>","-")
def startComponent(self, dir, group):
self.debug("Starting Component for dir %s, group %s"%(dir,group))
# Use the group name in the component id so it can be used later
celem ="<Component Id=\'%s\' DiskId='1' KeyPath='yes' Guid=\'%s\'>"%(self.printComponentId(group),self.generateGuid())
self.echo(celem,"+")
def endComponent(self, dir, group):
self.debug("Ending Component for dir %s, group %s"%(dir,group))
self.echo("</Component>","-")
def generatePreamble(self):
# leave off the XML decl and Wix default namespace -- candle.exe
# doesn't seem to care and it makes updating simpler
self.echo("<Wix>","+")
self.echo("<Fragment>","+")
self.echo("<DirectoryRef Id='%s'>"%self.refDirectory,"+")
def generateClose(self):
self.echo("</DirectoryRef>","-")
self.echo("</Fragment>","-")
self.echo("</Wix>","-")
def debug(self, msg):
if self.debugOn:
sys.stdout.write(msg+"\n")
def generateDir(self, dir, path):
fullPath = os.path.join(path,dir)
if self.checkExclude(fullPath):
self.debug("excluding %s\n"%fullPath)
return
# ignore top-level directories that are missing, or other
# errors (e.g. regular file)
try:
files = os.listdir(fullPath)
except:
return
# check for empty dir (this won't detect directories that contain
# only empty directories -- just don't do that...)
if len(files) == 0:
self.debug("skipping empty dir %s"%dir)
|
chrisdjscott/Atoman
|
atoman/filtering/filterer.py
|
Python
|
mit
| 18,810
| 0.005848
|
"""
The filterer object.
@author: Chris Scott
"""
from __future__ import absolute_import
from __future__ import unicode_literals
import copy
import time
import logging
import numpy as np
import six
from six.moves import zip
from .filters import _filtering as filtering_c
from ..system.atoms import elements
from . import voronoi
from .filters import base
from . import filters
from . import atomStructure
from ..rendering import _rendering
class Filterer(object):
"""
Filterer class.
Applies the selected filters in order.
"""
# known atom structure types
knownStructures = atomStructure.knownStructures
# all available filters
defaultFilters = [
"Species",
"Point defects",
"Crop box",
"Cluster",
"Displacement",
"Charge",
"Crop sphere",
"Slice",
"Coordination number",
"Voronoi neighbours",
"Voronoi volume",
"Bond order",
"Atom ID",
"ACNA",
"Slip",
"Bubbles",
]
defaultFilters.sort()
# filters that are compatible with the 'Point defects' filter
defectCompatibleFilters = [
"Crop box",
"Slice",
]
def __init__(self, voronoiOptions):
self.logger = logging.getLogger(__name__)
self.voronoiOptions = voronoiOptions
self._driftCompensation = False
self.reset()
def toggleDriftCompensation(self, driftCompensation):
"""Toggle the drift setting."""
self._driftCompensation = driftCompensation
def reset(self):
"""
Reset to initial state.
"""
self.inputState = None
self.refState = None
self.currentFilters = []
self.currentSettings = []
self.visibleAtoms = np.asarray([], dtype=np.int32)
self.interstitials = np.asarray([], dtype=np.int32)
self.vacancies = np.asarray([], dtype=np.int32)
self.antisites = np.asarray([], dtype=np.int32)
self.onAntisites = np.asarray([], dtype=np.int32)
self.splitInterstitials = np.asarray([], dtype=np.int32)
self.visibleSpecieCount = np.asarray([], dtype=np.int32)
self.vacancySpecieCount = np.asarray([], dtype=np.int32)
self.interstitialSpecieCount = np.asarray([], dtype=np.int32)
self.antisiteSpecieCount = np.asarray([], dtype=np.int32)
self.splitIntSpecieCount = np.asarray([], dtype=np.int32)
self.driftVector = np.zeros(3, np.float64)
self.clusterList = []
self.bubbleList = []
self.structureCounterDicts = {}
self.voronoiAtoms = voronoi.VoronoiAtomsCalculator(self.voronoiOptions)
self.voronoiDefects = voronoi.VoronoiDefectsCalculator(self.voronoiOptions)
self.scalarsDict = {}
self.latticeScalarsDict = {}
self.vectorsDict = {}
self.defectFilterSelected = False
self.bubblesFilterSelected = False
self.spaghettiAtoms = np.asarray([], dtype=np.int32)
def runFilters(self, currentFilters, currentSettings, inputState, refState, sequencer=False):
"""
Run the filters.
"""
# time
runFiltersTime = time.time()
# reset the filterer
self.reset()
# validate the list of filters
defectFilterSelected = False
bubblesFilterSelected = False
for filterName in currentFilters:
if filterName not in self.defaultFilters and not filterName.startswith("Scalar:"):
# TODO: check the scalar exists too
raise ValueError("Unrecognised filter passed to Filterer: '%s'" % filterName)
# check if the defect filter in the list
if filterName == "Point defects":
defectFilterSelected = True
elif filterName == "Bubbles":
bubblesFilterSelected = True
self.logger.debug("Defect filter selected: %s", defectFilterSelected)
self.defectFilterSelected = defectFilterSelected
self.bubblesFilterSelected = bubblesFilterSelected
# store refs to inputs
self.inputState = inputState
self.refState = refState
self.currentFilters = currentFilters
self.currentSettings = currentSettings
# set up visible atoms or defect arrays
if not defectFilterSelected:
self.logger.debug("Setting all atoms visible initially")
self.visibleAtoms = np.arange(inputState.NAtoms, dtype=np.int32)
self.logger.info("%d visible atoms", len(self.visibleAtoms))
# set Lattice scalars
self.logger.debug("Adding initial scalars from inputState")
for scalarsName, scalars in six.iteritems(inputState.scalarsDict):
self.logger.debug(" Adding '%s' scalars", scalarsName)
self.latticeScalarsDict[scalarsName] = copy.deepcopy(scalars)
# set initial vectors
self.logger.debug("Adding initial vectors from inputState")
for vectorsName, vectors in six.iteritems(inputState.vectorsDict):
self.logger.debug(" Adding '%s' vectors", vectorsName)
self.vectorsDict[vectorsName] = vectors
else:
# initialise defect arrays
self.interstitials = np.empty(inputState.NAtoms, dtype=np.int32)
self.vacancies = np.empty(refState.NAtoms, dtype=np.int32)
self.antisites = np.empty(refState.NAtoms, dtype=np.int32)
self.onAntisites = np.empty(refState.NAtoms, dtype=np.int32)
self.splitInterstitials = np.empty(3 * refState.NAtoms, dtype=np.int32)
# drift compensation
if self._driftCompensation:
filtering_c.calculate_drift_vector(inputState.NAtoms, inputState.pos, refState.pos,
refState.cellDims, inputState.PBC, self.driftVector)
self.logger.info("Calculated drift vector: (%f, %f, %f)" % tuple(self.driftVector))
# run filters
applyFiltersTime = time.time()
for filterName, filterSettings in zip(currentFilters, currentSettings):
# determine the name of filter module to be loaded
if filterName.startswith("Scalar: "):
moduleName = "genericScalarFilter"
filterObjectName = "GenericScalarFilter"
else:
words = str(filterName).title().split()
filterObjectName = "%sFilte
|
r" % "".join(words)
moduleName = filterObjectName[:1].lower() + filterObjectName[1:]
self.logger.debug("Loading filter module: '%s'", moduleName)
self.logger.debug("Creating filter object: '%s'", filterObjectName)
# get module
filterModule = getattr(filters, moduleNa
|
me)
# load dialog
filterObject = getattr(filterModule, filterObjectName, None)
if filterObject is None:
self.logger.error("Could not locate filter object for: '%s'", filterName)
else:
self.logger.info("Running filter: '%s'", filterName)
# filter
filterObject = filterObject(filterName)
# construct filter input object
filterInput = base.FilterInput()
filterInput.visibleAtoms = self.visibleAtoms
filterInput.inputState = inputState
filterInput.refState = refState
filterInput.voronoiOptions = self.voronoiOptions
filterInput.bondDict = elements.bondDict
filterInput.NScalars, filterInput.fullScalars = self.makeFullScalarsArray()
filterInput.NVectors, filterInput.fullVectors = self.makeFullVectorsArray()
filterInput.voronoiAtoms = self.voronoiAtoms
filterInput.voronoiDefects = self.voronoiDefects
filterInput.driftCompensation = self
|
grantstephens/pyluno
|
setup.py
|
Python
|
mit
| 1,485
| 0
|
from setuptools import setup, find_pa
|
ckages
with open('pyluno/meta.py') as f:
exec(f.read())
setup(
name='pyluno',
version=__version__,
packages=find_packages(exclude=['tests']),
description='A Luno API for Python',
author='Cayle Sharrock/Grant Stephens',
author_email='grant@stephens.co.za',
scripts=['demo.py'],
install_requires=[
'futures>=3.0.3',
'nose>=1.3.7',
'requests>=2.8.1',
'pandas>=0.17.0',
],
license='MIT',
url=
|
'https://github.com/grantstephens/pyluno',
download_url='https://github.com/grantstephens/pyluno/tarball/%s'
% (__version__, ),
keywords='Luno Bitcoin exchange API',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Topic :: Office/Business :: Financial',
'Topic :: Utilities',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
test_suite='tests',
extras_require={
'test': ['requests-mock>=0.7.0', 'nose'],
}
)
|
HarmonyEnterpriseSolutions/harmony-platform
|
src/gnue/common/datasources/GLoginHandler.py
|
Python
|
gpl-2.0
| 9,003
| 0.026325
|
# GNU Enterprise Common Library - Base Login Handler
#
# Copyright 2000-2007 Free Software Foundation
#
# This file is part of GNU Enterprise.
#
# GNU Enterprise is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation; either
# version 2, or (at your option) any later version.
#
# GNU Enterprise is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied
# warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
# PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public
# License along with program; see the file COPYING. If not,
# write to the Free Software Foundation, Inc., 59 Temple Place
# - Suite 330, Boston, MA 02111-1307, USA.
#
# $Id: GLoginHandler.py 9222 2007-01-08 13:02:49Z johannes $
"""
Classes for login handlers.
"""
__all__ = ['UserCanceledLogin', 'LoginHandler', 'BasicLoginHandler']
import getpass
from gnue.common.apps import errors
# =============================================================================
# Exceptions
# =============================================================================
class UserCanceledLogin (errors.UserError):
"""
User cancelled login request (by pressing <Esc>, hitting the <Abort> button
etc.).
"""
def __init__ (self):
msg = u_("User canceled the login request.")
errors.UserError.__init__ (self, msg)
# =============================================================================
# Base class for all login handler
# =============================================================================
class LoginHandler:
"""
Abstract base class for all login handlers.
A login handler is an object that asks the user for login data. Different
user interfaces (e.g. gtk2, curses, qt3...) implement different login
handlers.
"""
# ---------------------------------------------------------------------------
# Get login information (depreciated)
# ---------------------------------------------------------------------------
def getLogin (self, requiredFields, errortext = None):
"""
DEPRECIATED: get information for the given fields and return a dictionary
@param requiredFields: sequence of [connection name, description, sequence
of fields (name, label, is password)]
@param errortext: message of the last error occured
@raises UserCanceledLogin: if the user canceled the login request
"""
pass
# ---------------------------------------------------------------------------
# Called when the app no longer needs the login handler
# ---------------------------------------------------------------------------
def destroyLoginDialog (self):
"""
DEPRECIATED
"""
pass
# ---------------------------------------------------------------------------
# Ask for all fields given by the field definitions
# ---------------------------------------------------------------------------
def askLogin (self, title, fielddefs, defaultData, lastError = None):
"""
Ask for login information as specified by the given field definitions using
the given default data.
@param title: title for the login dialog
@param fielddefs: sequence of field definitions for InputDialogs
@param defaultData: dictionary with default values
@param lastError: last error message or None
@raises UserCanceledLogin: if the user canceled the login request
@return: dictionary of all keys/values the user has entered.
"""
fields = []
for (label, name, ftype, default, master, elements) in fielddefs:
default = defaultData.get (name, default)
if not ftype in ['label', 'warning', 'image']:
label = "%s:" % label
fields.append ((label, name, ftype, default, master, elements))
if lastError:
errorField = (lastError, None, 'warning', None, None, [])
added = False
for (ix, field) in enumerate (fields):
if not field [2] in ['label', 'warning', 'image']:
fields.insert (ix, errorField)
added = True
break
if not added:
fields.append (errorField)
result = self._askLogin_ (title, fields)
if result is None:
raise UserCanceledLogin
return result
# ---------------------------------------------------------------------------
# Do the dirty work for askLogin
# ---------------------------------------------------------------------------
def _askLogin_ (self, title, fields):
"""
Descendants override this method to do all the dirty work for askLogin ().
This class converts the given field definition sequence into an old style
format as required by getLogin () and finally calls getLogin. This process
will fade out as soon as getLogin is obsolete.
"""
# flatten the blown-up sequence till all support the new style definitions
data = []
labels = []
error = None
for (label, name, ftype, default, master, elements) in fields:
if ftype in ['image']:
continue
elif ftype == 'label':
labels.append (label)
elif ftype == 'warning':
error = label
else:
data.append ((name, label, ftype == 'password'))
try:
name = len (labels) and labels [0] or ''
desc = len (labels) > 1 and labels [1] or ''
result = self.getLogin ([name, desc, data], error)
finally:
self.destroyLoginDialog ()
return result
# =============================================================================
# Class implementing a basic login handler using raw_input and getpass
# ===
|
==========================
|
================================================
class BasicLoginHandler (LoginHandler):
"""
Class implementing a basic login handler using raw_input () and getpass ()
as input methods.
"""
# ---------------------------------------------------------------------------
# Constructor
# ---------------------------------------------------------------------------
def __init__ (self, useDefaults = False, silent = False):
"""
@param useDefaults: if True, does not ask for a field if it has a default
other than None, but uses that default directly
@param silent: if True, no output occurs and it implicitly sets useDefaults
to True.
"""
self.__silent = silent
self.__useDefaults = silent and True or useDefaults
# ---------------------------------------------------------------------------
# Ask for all fields requestd
# ---------------------------------------------------------------------------
def _askLogin_ (self, title, fields):
result = {}
if not self.__silent:
print "*" * 60
print o(title)
print
try:
for (label, name, ftype, default, master, elements) in fields:
if ftype in ['label', 'warning']:
if not self.__silent:
print " %s" % o(label)
elif ftype in ['string', 'password']:
if self.__useDefaults and default is not None:
result [name] = default
else:
if ftype == 'password':
value = getpass.getpass (" %s " % o(label))
else:
# raw_input print's it's argument to stderr, so we have to print
# the label manually here since stderr might be redirected
print " %s" % o(label),
value = raw_input ()
result [name] = [value, default][value is None]
elif ftype in ['dropdown']:
# TODO: sort all fields according to master-detail dependencies and
# then validate the input values using the 'allowedValues' dicts
if self.__useDefaults and default is not None:
result [name] = default
else:
print " %s" % o(label),
result [name] = raw_input ()
except KeyboardInterrupt:
raise UserCanceledLogin
return result
# =============================================================================
# Class implementing a 'silent' login handler
# =============================================================================
class SilentLoginHandler (LoginHandler):
"""
Implementation of a login handler that gets all data preset via parameter and
doesn't communicate with the user at all.
"""
# ---------------------------------------------------------------------------
# Create a new instance
# -----------------
|
caglar10ur/anvio
|
setup.py
|
Python
|
gpl-3.0
| 2,549
| 0.016869
|
import os
import sys
import glob
try:
import numpy
except ImportError:
print "You need to have numpy installed on your system to run setup.py. Sorry!"
sys.exit()
try:
from Cython.Distutils import build_ext
except ImportError:
print "You need to have Cython installed on your system to run setup.py. Sorry!"
sys.exit()
from setuptools import setup, find_packages, Extension
if os.environ.get('USER','') == 'vagrant':
del os.link
os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))
with open(os.path.join(os.path.dirname(__file__), 'README.md')) as readme:
README = readme.read()
include_dirs_for_concoct = [numpy.get_include(), '/opt/local/include/']
setup(
name = "anvio",
version = open('VERSION').read
|
().strip(),
scripts = [script for script in glob.glob('bin/*') if not script.endswith('-OBSOLETE')],
include_package_data = True,
packages = find_packages(),
install_requires = ['bottle>=0.12.7', 'pysam>=0.8.3', 'hcluster>=0.2.0', 'ete2>=2.2', 'scipy', 'scikit-learn>=0.15', 'django>=1.7', 'cython>=0.21a1'],
cmdclass = {'build_ext': build_ext},
ext_modules = [
Extension('anvio.columnp
|
rofile', sources = ['./anvio/extensions/columnprofile.c']),
Extension("anvio.vbgmm", sources=["./anvio/extensions/concoct/vbgmm.pyx", "./anvio/extensions/concoct/c_vbgmm_fit.c"],
libraries =['gsl', 'gslcblas'], include_dirs=include_dirs_for_concoct),
],
author = "anvi'o Authors",
author_email = "a.murat.eren@gmail.com",
description = "An interactive analysis and visualization platform for 'omics data. See https://merenlab.org/projects/anvio for more information",
license = "GPLv3+",
keywords = "metagenomics metatranscriptomics microbiology shotgun genomics MBL pipeline sequencing bam visualization SNP",
url = "https://meren.github.io/projects/anvio/",
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Environment :: Web Environment',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: GNU General Public License v3 or later (GPLv3+)',
'Natural Language :: English',
'Operating System :: MacOS',
'Operating System :: POSIX',
'Programming Language :: Python :: 2.7',
'Programming Language :: JavaScript',
'Programming Language :: C',
'Topic :: Scientific/Engineering',
],
)
|
pchmieli/h2o-3
|
h2o-py/tests/testdir_algos/glrm/pyunit_DEPRECATED_arrests_missingGLRM.py
|
Python
|
apache-2.0
| 3,136
| 0.009885
|
import sys
sys.path.insert(1,"../../../")
import h2o
from tests import pyunit_utils
import numpy as np
def glrm_arrests_miss():
missing_ratios = np.arange(0.1, 1, 0.1).tolist()
print "Importing USArrests.csv data and saving for validation..."
arrests_full = h2o.upload_file(pyunit_utils.locate("smalldata/pca_test/USArrests.csv"))
arrests_full.describe()
totobs = arrests_full.nrow * arrests_full.ncol
train_err = [0]*len(missing_ratios)
valid_err = [0]*len(missing_ratios)
for i in range(len(missing_ratios)):
ratio = missing_ratios[i]
print "Importing USArrests.csv and inserting {0}% missing entries".format(100*ratio)
arrests_miss = h2o.upload_file(pyunit_utils.locate("smalldata/pca_test/USArrests.csv"))
arrests_miss = arrests_miss.insert_missing
|
_values(fraction
|
=ratio)
arrests_miss.describe()
print "H2O GLRM with {0}% missing entries".format(100*ratio)
arrests_glrm = h2o.glrm(x=arrests_miss, validation_frame=arrests_full, k=4, ignore_const_cols=False, loss="Quadratic", regularization_x="None", regularization_y="None", init="PlusPlus", max_iterations=10, min_step_size=1e-6)
arrests_glrm.show()
# Check imputed data and error metrics
glrm_obj = arrests_glrm._model_json['output']['objective']
train_numerr = arrests_glrm._model_json['output']['training_metrics']._metric_json['numerr']
train_caterr = arrests_glrm._model_json['output']['training_metrics']._metric_json['caterr']
valid_numerr = arrests_glrm._model_json['output']['validation_metrics']._metric_json['numerr']
valid_caterr = arrests_glrm._model_json['output']['validation_metrics']._metric_json['caterr']
assert abs(train_numerr - glrm_obj) < 1e-3, "Numeric error on training data was " + str(train_numerr) + " but should equal final objective " + str(glrm_obj)
assert train_caterr == 0, "Categorical error on training data was " + str(train_caterr) + " but should be zero"
assert valid_caterr == 0, "Categorical error on validation data was " + str(valid_caterr) + " but should be zero"
train_numcnt = arrests_glrm._model_json['output']['training_metrics']._metric_json['numcnt']
valid_numcnt = arrests_glrm._model_json['output']['validation_metrics']._metric_json['numcnt']
assert valid_numcnt > train_numcnt, "Number of non-missing numerical entries in training data should be less than validation data"
assert valid_numcnt == totobs, "Number of non-missing numerical entries in validation data was " + str(valid_numcnt) + " but should be " + str(totobs)
train_err[i] = train_numerr
valid_err[i] = valid_numerr
h2o.remove(arrests_glrm._model_json['output']['representation_name'])
for i in range(len(missing_ratios)):
print "Missing ratio: {0}% --> Training error: {1}\tValidation error: {2}".format(missing_ratios[i]*100, train_err[i], valid_err[i])
if __name__ == "__main__":
pyunit_utils.standalone_test(glrm_arrests_miss)
else:
glrm_arrests_miss()
|
wldcordeiro/servo
|
tests/wpt/web-platform-tests/tools/manifest/tests/test_sourcefile.py
|
Python
|
mpl-2.0
| 5,974
| 0.000167
|
from ..sourcefile import SourceFile
def create(filename, contents=b""):
assert isinstance(contents, bytes)
return SourceFile("/", filename, "/", contents=contents)
def items(s):
return [
(item.item_type, item.url)
for item in s.manifest_items()
]
def test_name_is_non_test():
non_tests = [
".gitignore",
".travis.yml",
"MANIFEST.json",
"tools/test.html",
"resources/test.html",
"common/test.html",
"conformance-checkers/test.html",
]
for rel_path in non_tests:
s = create(rel_path)
assert s.name_is_non_test
assert not s.content_is_testharness
assert items(s) == []
def test_name_is_manual():
manual_tests = [
"html/test-manual.html",
"html/test-manual.xhtml",
]
for rel_path in manual_tests:
s = create(rel_path)
assert not s.name_is_non_test
assert s.name_is_manual
assert not s.content_is_testharness
assert items(s) == [("manual", "/" + rel_path)]
def test_worker():
s = create("html/test.worker.js")
assert not s.name_is_non_test
assert not s.name_is_manual
assert not s.name_is_multi_global
assert s.name_is_worker
assert not s.name_is_reference
assert not s.content_is_testharness
assert items(s) == [("testharness", "/html/test.worker")]
def test_multi_global():
s = create("html/test.any.js")
assert not s.name_is_non_test
assert not s.name_is_manual
assert s.name_is_multi_global
assert not s.name_is_worker
assert not s.name_is_reference
assert not s.content_is_testharness
assert items(s) == [
("testharness", "/html/test.any.html"),
("testharness", "/html/test.any.worker"),
]
def test_testharness():
content = b"<script src=/resources/testharness.js></script>"
for ext in ["htm", "html"]:
filename = "html/test." + ext
s = create(filename, content)
assert not s.name_is_non_test
assert not s.name_is_manual
assert not s.name_is_multi_global
assert not s.name_is_worker
assert not s.name_is_reference
assert s.content_is_testharness
assert items(s) == [("testharness", "/" + filename)]
def test_relative_testharness():
content = b"<script src=../resources/testharness.js></script>"
for ext in ["htm", "html"]:
filename = "html/test." + ext
s = create(filename, content)
assert not s.name_is_non_test
assert not s.name_is_manual
assert not s.name_is_multi_global
assert not s.name_is_worker
assert not s.name_is_reference
assert not s.content_is_testharness
assert items(s) == []
def test_testharness_xhtml():
content = b"""
<html xmlns="http://www.w3.org/1999/xhtml">
<head>
<script src="/resources/testharness.js"></script>
<script src="/resources/testharnessreport.js"></script>
</head>
<body/>
</html>
"""
for ext in ["xhtml", "xht", "xml"]:
filename = "html/test." + ext
s = create(filename, content)
assert not s.name_is_non_test
assert not s.name_is_manual
assert not s.name_is_multi_global
assert not s.name_is_worker
assert not s.name_is_reference
assert s.content_is_testharness
assert items(s) == [("testharness", "/" + filename)]
def test_relative_testharness_xhtml():
content = b"""
<html xmlns="http://www.w3.org/1999/xhtml">
<head>
<script src="../resources/testharness.js"></script>
<script src="../resources/testharnessreport.js"></script>
</head>
<body/>
</html>
"""
for ext in ["xhtml", "xht", "xml"]:
filename = "html/test." + ext
s = create(filename, content)
assert not s.name_is_non_test
assert not s.name_is_manual
assert not s.n
|
ame_is_multi_global
assert not s.name_
|
is_worker
assert not s.name_is_reference
assert not s.content_is_testharness
assert items(s) == []
def test_testharness_svg():
content = b"""\
<?xml version="1.0" encoding="UTF-8"?>
<svg xmlns="http://www.w3.org/2000/svg"
xmlns:h="http://www.w3.org/1999/xhtml"
version="1.1"
width="100%" height="100%" viewBox="0 0 400 400">
<title>Null test</title>
<h:script src="/resources/testharness.js"/>
<h:script src="/resources/testharnessreport.js"/>
</svg>
"""
filename = "html/test.svg"
s = create(filename, content)
assert not s.name_is_non_test
assert not s.name_is_manual
assert not s.name_is_multi_global
assert not s.name_is_worker
assert not s.name_is_reference
assert s.root
assert s.content_is_testharness
assert items(s) == [("testharness", "/" + filename)]
def test_relative_testharness_svg():
content = b"""\
<?xml version="1.0" encoding="UTF-8"?>
<svg xmlns="http://www.w3.org/2000/svg"
xmlns:h="http://www.w3.org/1999/xhtml"
version="1.1"
width="100%" height="100%" viewBox="0 0 400 400">
<title>Null test</title>
<h:script src="../resources/testharness.js"/>
<h:script src="../resources/testharnessreport.js"/>
</svg>
"""
filename = "html/test.svg"
s = create(filename, content)
assert not s.name_is_non_test
assert not s.name_is_manual
assert not s.name_is_multi_global
assert not s.name_is_worker
assert not s.name_is_reference
assert s.root
assert not s.content_is_testharness
assert items(s) == []
def test_testharness_ext():
content = b"<script src=/resources/testharness.js></script>"
for filename in ["test", "test.test"]:
s = create("html/" + filename, content)
assert not s.name_is_non_test
assert not s.name_is_manual
assert not s.name_is_multi_global
assert not s.name_is_worker
assert not s.name_is_reference
assert not s.root
assert not s.content_is_testharness
assert items(s) == []
|
nozuono/calibre-webserver
|
setup/installer/windows/freeze.py
|
Python
|
gpl-3.0
| 32,114
| 0.004266
|
#!/usr/bin/env python
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
from __future__ import with_statement
__license__ = 'GPL v3'
__copyright__ = '2009, Kovid Goyal <kovid@kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
import sys, os, shutil, glob, py_compile, subprocess, re, zipfile, time, textwrap
from setup import (Command, modules, functions, basenames, __version__,
__appname__)
from setup.build_environment import msvc, MT, RC, is64bit
from setup.installer.windows.wix import WixMixIn
ICU_DIR = os.environ.get('ICU_DIR', r'Q:\icu')
OPENSSL_DIR = os.environ.get('OPENSSL_DIR', r'Q:\openssl')
QT_DIR = os.environ.get('QT_DIR', 'Q:\\Qt\\current')
QT_DLLS = ['Core', 'Gui', 'Network', 'Svg', 'WebKit', 'Xml', 'XmlPatterns']
SW = r'C:\cygwin\home\kovid\sw'
IMAGEMAGICK = os.path.join(SW, 'build',
'ImageMagick-*\\VisualMagick\\bin')
CRT = r'C:\Microsoft.VC90.CRT'
LZMA = r'Q:\easylzma\build\easylzma-0.0.8'
VERSION = re.sub('[a-z]\d+', '', __version__)
WINVER = VERSION+'.0'
machine = 'X64' if is64bit else 'X86'
DESCRIPTIONS = {
'calibre' : 'The main calibre program',
'ebook-viewer' : 'Viewer for all e-book formats',
'ebook-edit' : 'Edit e-books',
'lrfviewer' : 'Viewer for LRF files',
'ebook-convert': 'Command line interface to the conversion/news download system',
'ebook-meta' : 'Command line interface for manipulating e-book metadata',
'calibredb' : 'Command line interface to the calibre database',
'calibre-launcher' : 'Utility functions common to all executables',
'calibre-debug' : 'Command line interface for calibre debugging/development',
'calibre-customize' : 'Command line interface to calibre plugin system',
'pdfmanipulate' : 'Command line tool to manipulate PDF files',
'calibre-server': 'Standalone calibre content server',
'calibre-parallel': 'calibre worker process',
'calibre-smtp' : 'Command line interface for sending books via email',
'calibre-eject' : 'Helper program for ejecting connected reader devices',
}
def walk(dir):
''' A nice interface to os.walk '''
for record in os.walk(dir):
for f in record[-1]:
yield os.path.join(record[0], f)
class Win32Freeze(Command, WixMixIn):
description = 'Freeze windows calibre installation'
def add_options(self, parser):
parser.add_option('--no-ice', default=False, action='store_true',
help='Disable ICE checks when building MSI (needed when running'
' from cygwin sshd)')
parser.add_option('--msi-compression', '--compress', default='high',
help='Compression when generating installer. Set to none to disable')
parser.add_option('--keep-site', default=False, action='store_true',
help='Keep human readable site.py')
parser.add_option('--verbose', default=0, action="count",
help="Be more verbose")
def run(self, opts):
self.SW = SW
self.portable_uncompressed_size = 0
self.opts = opts
self.src_root = self.d(self.SRC)
self.base = self.j(self.d(self.SRC), 'build', 'winfrozen')
self.rc_template = self.j(self.d(self.a(__file__)), 'template.rc')
self.py_ver = ''.join(map(str, sys.version_info[:2]))
self.lib_dir = self.j(self.base, 'Lib')
self.pylib = self.j(self.base, 'pylib.zip')
self.dll_dir = self.j(self.base, 'DLLs')
self.plugins_dir = os.path.join(self.base, 'plugins2')
self.portable_base = self.j(self.d(self.base), 'Calibre Portable')
self.obj_dir = self.j(self.src_root, 'build', 'launcher')
self.initbase()
self.build_launchers()
self.build_eject()
self.add_plugins()
self.freeze()
self.embed_manifests()
self.install_site_py()
self.archive_lib_dir()
self.remove_CRT_from_manifests()
self.create_installer()
if not is64bit:
self.build_portable()
self.build_portable_installer()
self.sign_installers()
def remove_CRT_from_manifests(self):
'''
The dependency on the CRT is removed from the manifests of all DLLs.
This allows the CRT loaded by the .exe files to be used instead.
'''
search_pat = re.compile(r'(?is)<dependency>.*Microsoft\.VC\d+\.CRT')
repl_pat = re.compile(
r'(?is)<dependency>.*?Microsoft\.VC\d+\.CRT.*?</dependency>')
for dll in (glob.glob(self.j(self.dll_dir, '*.dll')) +
glob.glob(self.j(self.plugins_dir, '*.pyd'))):
bn = self.b(dll)
with open(dll, 'rb') as f:
raw = f.read()
match = search_pat.search(raw)
if match is None:
continue
self.info('Removing CRT dependency from manifest of: %s'%bn)
# Blank out the bytes corresponding to the dependency specification
nraw = repl_pat.sub(lambda m: b' '*len(m.group()), raw)
if len(nraw) != len(raw) or nraw == raw:
raise Exception('Something went wrong with %s'%bn)
with open(dll, 'wb') as f:
f.write(nraw)
def initbase(self):
if self.e(self.base):
shutil.rmtree(self.base)
os.makedirs(self.base)
def add_plugins(self):
self.info('Adding plugins...')
tgt = self.plugins_dir
if os.path.exists(tgt):
shutil.rmtree(tgt)
os.mkdir(tgt)
|
base = self.j(self.SRC, 'calibre', 'plugins')
for f in glob.glob(self.j(base, '*.pyd')):
# We dont want the manifests as the manifest in the exe will be
# used instead
shutil.copy2(f, tgt)
def fix_pyd_bootstraps_in(self, folder):
for dirpath, dirnames, fil
|
enames in os.walk(folder):
for f in filenames:
name, ext = os.path.splitext(f)
bpy = self.j(dirpath, name + '.py')
if ext == '.pyd' and os.path.exists(bpy):
with open(bpy, 'rb') as f:
raw = f.read().strip()
if (not raw.startswith('def __bootstrap__') or not
raw.endswith('__bootstrap__()')):
raise Exception('The file %r has non'
' bootstrap code'%self.j(dirpath, f))
for ext in ('.py', '.pyc', '.pyo'):
x = self.j(dirpath, name+ext)
if os.path.exists(x):
os.remove(x)
def freeze(self):
shutil.copy2(self.j(self.src_root, 'LICENSE'), self.base)
self.info('Adding CRT')
shutil.copytree(CRT, self.j(self.base, os.path.basename(CRT)))
self.info('Adding resources...')
tgt = self.j(self.base, 'resources')
if os.path.exists(tgt):
shutil.rmtree(tgt)
shutil.copytree(self.j(self.src_root, 'resources'), tgt)
self.info('Adding Qt and python...')
shutil.copytree(r'C:\Python%s\DLLs'%self.py_ver, self.dll_dir,
ignore=shutil.ignore_patterns('msvc*.dll', 'Microsoft.*'))
for x in glob.glob(self.j(OPENSSL_DIR, 'bin', '*.dll')):
shutil.copy2(x, self.dll_dir)
for x in glob.glob(self.j(ICU_DIR, 'source', 'lib', '*.dll')):
shutil.copy2(x, self.dll_dir)
for x in QT_DLLS:
x += '4.dll'
if not x.startswith('phonon'):
x = 'Qt'+x
shutil.copy2(os.path.join(QT_DIR, 'bin', x), self.dll_dir)
shutil.copy2(r'C:\windows\system32\python%s.dll'%self.py_ver,
self.dll_dir)
for x in os.walk(r'C:\Python%s\Lib'%self.py_ver):
for f in x[-1]:
if f.lower().endswith('.dll'):
f = self.j(x[0], f)
shutil.copy2(f, self.dll_dir)
shutil.copy2(
r'C:\Python%(v)s\Lib\site-packages\pywin32_system32\pywintypes%(v)s.dll'
% dict(v=self.py_ver),
|
jricardo27/travelhelper
|
travelhelper/apps/lonelyplanet/models/sight.py
|
Python
|
bsd-3-clause
| 8,871
| 0.000789
|
"""
Lonely Planet Sight Model
"""
from __future__ import absolute_import, print_function
import re
from bs4 import BeautifulSoup
from django.db import models
from django.utils.translation import ugettext as _
from core.models.sight import THSight
from core.utils import urllib2
from .abstract import LonelyPlanetAbstractModel
from .parser import get_text
from .place import LPPlace
coords_regex = u'\/place\/{coord},{coord}\/'.format(
coord=u'(-?[0-9\.]*)',
)
currency_regex = u'({currencies})?({price}){exclude_years}{hours}'.format(
currencies=u'\u20AC|Dh',
price=u'[\d\.]+',
exclude_years=u'(?!-?\d?yr)',
hours=u'(?!am|pm|hr|\xBD)',
)
class LPSight(LonelyPlanetAbstractModel):
"""
Represents a Lonely Planet page with information about a City Sight
"""
class Meta(object):
verbose_name = _('Lonely Planet Sight')
ordering = ['name']
lpplace = models.ForeignKey(
LPPlace,
blank=False,
null=False,
related_name='children',
help_text=_('Lonely Planet Place where this sight is located'),
)
nearby_places = models.ManyToManyField(
LPPlace,
blank=True,
related_name='nearby_sights',
help_text=_('Lonely Planet Places nearby this sight'),
)
thsight = models.OneToOneField(
THSight,
blank=True,
null=True,
related_name='lonelyplanet_object',
help_text=_('Travel Helper Sight where this content is parsed'),
)
# Name of the field that refers to the related Travel Helper object
th_related_field = 'thsight'
th_related_model = THSight
@classmethod
def html_extractor(cls):
"""
Return a string with the arguments necessary to split a sight
list from html
"""
return '.stack__content > .grid-wrapper--10 .card__mask'
@classmethod
def html_nearby_extractor(cls):
"""
Return a string with the arguments necessary to extract all the
nearby sights
"""
return '.nearby-cards__content .card__mask'
|
@classmethod
def update_sight(cls, sight, overwrite=False, **kwargs):
"""
Update some properties of the sight
"""
if not sight:
return
verbose = kwargs.get('verbose', 0)
if sight.update_html_source(overwrite=overwrite, verbose=v
|
erbose):
sight.save()
cls._log(
_(u'Saved sight: {name}'),
name=sight.name,
verbose=verbose,
)
@classmethod
def build_from_url(cls, url, parent=None, recursive=True, **kwargs):
"""
Given an url, extract and build a LPSight
"""
verbose = kwargs.get('verbose', 0)
new_sight = None
try:
new_sight = LPSight.objects.get(url=url)
cls._log(
_(u'Sight[{id}]: {name} already in database'),
id=new_sight.id,
name=new_sight,
verbose=verbose,
)
cls.update_sight(
new_sight,
recursive=recursive,
**kwargs
)
except LPSight.DoesNotExist:
try:
new_sight = cls.build_from_html(
BeautifulSoup(urllib2.urlopen(url), 'lxml'),
parent,
recursive,
url=url,
**kwargs
)
except urllib2.URLError:
cls._log(
_(u'Error accessing {url} creating LPSight'),
url=url,
verbose=verbose,
)
return new_sight
@classmethod
def build_from_html(cls, soup_html, parent=None, recursive=True, **kwargs):
"""
Given a soup html object, extract and build a LPSight
"""
if isinstance(soup_html, basestring):
soup_html = BeautifulSoup(soup_html, 'lxml')
sight_url = kwargs['url']
verbose = kwargs.get('verbose', 0)
new_sight = None
try:
new_sight = LPSight.objects.get(url=sight_url)
cls._log(
_(u'Sight[{id}]: {name} already in database'),
name=new_sight,
verbose=verbose,
)
except LPSight.DoesNotExist:
try:
heading = soup_html.find(class_='copy--h1')
# Create a new sight
new_sight = LPSight(
name=heading.text.strip(),
url=sight_url,
lpplace=parent,
)
cls._log(
_(u'Created sight[{id}]: {name}'),
id=new_sight.id,
name=new_sight.name,
verbose=verbose,
)
except AttributeError as ex:
cls._log(
_(u'Error processing sight {url}, is it a valid page?'
u'\n{exception}'),
exception=ex,
url=sight_url,
verbose=verbose,
)
cls.update_sight(
new_sight,
recursive=recursive,
**kwargs
)
return new_sight
@property
def parent(self):
"""
Return the parent of this object
"""
return self.lpplace
def update_children_list_html(self, overwrite=False, **kwargs):
"""
As Sights doesn't have places, ignore that field
"""
return False
def _parse_sight_info(self, soup):
"""
Parse the information contained in a Lonely Planet Sight
attributes = (
'Prices',
'Opening hours',
'More information',
'Address',
'Getting there',
)
"""
if isinstance(soup, basestring):
soup = BeautifulSoup(soup, 'lxml')
dd_list = soup.find_all('dd')
info = {}
for elem in dd_list:
key = get_text(elem.find_previous('dt'))
value = get_text(elem)
if key in info:
info[key] = u'{0}\n{1}'.format(info[key], value)
else:
info[key] = value
info['description'] = get_text(
soup.find_all('div', class_='ttd__section--description')[0]
)
# Extract coordinates [It's probable that they aren't there]
try:
maps_url = soup.select('div .poi-map__container')[0]
# Invert the coordinates, for some reason Lonely Planet has them
# as (longitude, latitude) instead of (latitude, longitute)
info['geopoint'] = 'POINT({longitude} {latitude})'.format(
longitude=maps_url.get('data-longitude'),
latitude=maps_url.get('data-latitude'),
)
print(info['geopoint'])
except IndexError:
pass
try:
images = soup.find_all(
'div',
class_='tab__content'
)[0].find_all('img')
prefix = 'http://'
img_url = images[0].get('src')
if img_url[:len(prefix)] != prefix:
try:
img_url = images[0].get('src').split(prefix)[1]
img_url = u'{0}{1}'.format(prefix, img_url)
except IndexError:
pass
if 'maps.googleapis.com' not in img_url:
info['image'] = img_url
except IndexError:
pass
return info
def parse_price(self, price_string):
"""
Return the result of applying a regex over the string
"""
return re.findall(currency_regex, price_string)
def parse_info(self, **kwargs):
"""
Parse the information contained in this object and populate a
dictionary that can be used to create/update a Travel Helper Sight
"""
data = super(LPSight, self).parse_info(**kwargs)
info = self._parse_sight_
|
nteract/papermill
|
papermill/tests/test_execute.py
|
Python
|
bsd-3-clause
| 16,273
| 0.003318
|
import os
import io
import shutil
import tempfile
import unittest
from functools import partial
from pathlib import Path
from nbformat import validate
try:
from unittest.mock import patch
except ImportError:
from mock import patch
from .. import engines
from ..log import logger
from ..iorw import load_notebook_node
from ..utils import chdir
from ..execute import execute_notebook
from ..exceptions import PapermillExecutionError
from . import get_notebook_path, kernel_name
execute_notebook = partial(execute_notebook, kernel_name=kernel_name)
class TestNotebookHelpers(unittest.TestCase):
def setUp(self):
self.test_dir = tempfile.mkdtemp()
self.notebook_name = 'simple_execute.ipynb'
self.notebook_path = get_notebook_path(self.notebook_name)
self.nb_test_executed_fname = os.path.join(
self.test_dir, 'output_{}'.format(self.notebook_name)
)
def tearDown(self):
shutil.rmtree(self.test_dir)
@patch(engines.__name__ + '.PapermillNotebookClient')
def test_start_timeout(self, preproc_mock):
execute_notebook(self.notebook_path, self.nb_test_executed_fname, start_timeout=123)
args, kwargs = preproc_mock.call_args
expected = [
('timeout', None),
('startup_timeout', 123),
('kernel_name', kernel_name),
('log', logger),
]
actual = set([(key, kwargs[key]) for key in kwargs])
self.assertTrue(
set(expected).issubset(actual),
msg='Expected arguments {} are not a subset of actual {}'.format(expected, actual),
)
@patch(engines.__name__ + '.PapermillNotebookClient')
def test_default_start_timeout(self, preproc_mock):
execute_notebook(self.notebook_path, self.nb_test_executed_fname)
args, kwargs = preproc_mock.call_args
expected = [
('timeout', None),
('startup_timeout', 60),
('kernel_name', kernel_name),
('log', logger),
]
actual = set([(key, kwargs[key]) for key in kwargs])
self.assertTrue(
set(expected).issubset(actual),
msg='Expected arguments {} are not a subset of actual {}'.format(expected, actual),
)
def test_cell_insertion(self):
execute_notebook(self.notebook_path, self.nb_test_executed_fname, {'msg': 'Hello'})
test_nb = load_notebook_node(self.nb_test_executed_fname)
self.assertListEqual(
test_nb.cells[1].get('source').split('\n'), ['# Parameters', 'msg = "Hello"', '']
)
self.assertEqual(test_nb.metadata.papermill.parameters, {'msg': 'Hello'})
def test_no_tags(self):
notebook_name = 'no_parameters.ipynb'
nb_test_executed_fna
|
me = os.path.join(self.test_dir, 'output_{}'.format(notebook_name))
execute_notebook(get_notebook_path(notebook_name), nb_test_executed_fname, {'msg': 'Hello'})
test_nb = load_notebook_node(nb_test_executed_fname)
self.assertListEqual(
test_nb.cells[0].get('source').split('\n'), ['# Parameters', 'msg = "Hello"', '']
)
self.assertEqual(t
|
est_nb.metadata.papermill.parameters, {'msg': 'Hello'})
def test_quoted_params(self):
execute_notebook(self.notebook_path, self.nb_test_executed_fname, {'msg': '"Hello"'})
test_nb = load_notebook_node(self.nb_test_executed_fname)
self.assertListEqual(
test_nb.cells[1].get('source').split('\n'), ['# Parameters', r'msg = "\"Hello\""', '']
)
self.assertEqual(test_nb.metadata.papermill.parameters, {'msg': '"Hello"'})
def test_backslash_params(self):
execute_notebook(
self.notebook_path, self.nb_test_executed_fname, {'foo': r'do\ not\ crash'}
)
test_nb = load_notebook_node(self.nb_test_executed_fname)
self.assertListEqual(
test_nb.cells[1].get('source').split('\n'),
['# Parameters', r'foo = "do\\ not\\ crash"', ''],
)
self.assertEqual(test_nb.metadata.papermill.parameters, {'foo': r'do\ not\ crash'})
def test_backslash_quote_params(self):
execute_notebook(self.notebook_path, self.nb_test_executed_fname, {'foo': r'bar=\"baz\"'})
test_nb = load_notebook_node(self.nb_test_executed_fname)
self.assertListEqual(
test_nb.cells[1].get('source').split('\n'),
['# Parameters', r'foo = "bar=\\\"baz\\\""', ''],
)
self.assertEqual(test_nb.metadata.papermill.parameters, {'foo': r'bar=\"baz\"'})
def test_double_backslash_quote_params(self):
execute_notebook(self.notebook_path, self.nb_test_executed_fname, {'foo': r'\\"bar\\"'})
test_nb = load_notebook_node(self.nb_test_executed_fname)
self.assertListEqual(
test_nb.cells[1].get('source').split('\n'),
['# Parameters', r'foo = "\\\\\"bar\\\\\""', ''],
)
self.assertEqual(test_nb.metadata.papermill.parameters, {'foo': r'\\"bar\\"'})
def test_prepare_only(self):
for example in ['broken1.ipynb', 'keyboard_interrupt.ipynb']:
path = get_notebook_path(example)
result_path = os.path.join(self.test_dir, example)
# Should not raise as we don't execute the notebook at all
execute_notebook(path, result_path, {'foo': r'do\ not\ crash'}, prepare_only=True)
nb = load_notebook_node(result_path)
self.assertEqual(nb.cells[0].cell_type, "code")
self.assertEqual(
nb.cells[0].get('source').split('\n'),
['# Parameters', r'foo = "do\\ not\\ crash"', ''],
)
class TestBrokenNotebook1(unittest.TestCase):
def setUp(self):
self.test_dir = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.test_dir)
def test(self):
path = get_notebook_path('broken1.ipynb')
# check that the notebook has two existing marker cells, so that this test is sure to be
# validating the removal logic (the markers are simulatin an error in the first code cell
# that has since been fixed)
original_nb = load_notebook_node(path)
self.assertEqual(original_nb.cells[0].metadata["tags"], ["papermill-error-cell-tag"])
self.assertIn("In [1]", original_nb.cells[0].source)
self.assertEqual(original_nb.cells[2].metadata["tags"], ["papermill-error-cell-tag"])
result_path = os.path.join(self.test_dir, 'broken1.ipynb')
with self.assertRaises(PapermillExecutionError):
execute_notebook(path, result_path)
nb = load_notebook_node(result_path)
self.assertEqual(nb.cells[0].cell_type, "markdown")
self.assertRegex(
nb.cells[0].source, r'^<span .*<a href="#papermill-error-cell".*In \[2\].*</span>$'
)
self.assertEqual(nb.cells[0].metadata["tags"], ["papermill-error-cell-tag"])
self.assertEqual(nb.cells[1].cell_type, "markdown")
self.assertEqual(nb.cells[2].execution_count, 1)
self.assertEqual(nb.cells[3].cell_type, "markdown")
self.assertEqual(nb.cells[4].cell_type, "markdown")
self.assertEqual(nb.cells[5].cell_type, "markdown")
self.assertRegex(nb.cells[5].source, '<span id="papermill-error-cell" .*</span>')
self.assertEqual(nb.cells[5].metadata["tags"], ["papermill-error-cell-tag"])
self.assertEqual(nb.cells[6].execution_count, 2)
self.assertEqual(nb.cells[6].outputs[0].output_type, 'error')
self.assertEqual(nb.cells[7].execution_count, None)
# double check the removal (the new cells above should be the only two tagged ones)
self.assertEqual(
sum("papermill-error-cell-tag" in cell.metadata.get("tags", []) for cell in nb.cells), 2
)
class TestBrokenNotebook2(unittest.TestCase):
def setUp(self):
self.test_dir = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.test_dir)
def test(self):
path = get_notebook_path('broken2.ipynb')
result_path = os.path.join(self.test_dir, 'broken2.ipynb')
|
goal/uwsgi
|
plugins/emperor_zeromq/uwsgiplugin.py
|
Python
|
gpl-2.0
| 98
| 0
|
NAME = 'em
|
peror_zeromq'
CFLAGS = []
LDFLAGS = []
LIBS = ['-lzmq']
GCC_LIST =
|
['emperor_zeromq']
|
matthew-brett/scipy
|
scipy/integrate/_ivp/tests/test_rk.py
|
Python
|
bsd-3-clause
| 1,326
| 0
|
import pytest
from numpy.testing import assert_allclose, assert_
import numpy as np
from scipy.integrate import RK23, RK45, DOP853
from scipy.integrate._ivp import dop853_coefficients
@pytest.mark.parametrize("solver", [RK23, RK45, DOP853])
def test_coefficient_properties(solver):
assert_allclose(np.sum(solver.B), 1, rtol=1e-15)
assert_allclose(np.sum(solver.A, axis=1), solver.C, rtol=1e-14)
def test_coefficient_properties_dop853():
assert_allclose(np.sum(dop853_coefficients.B), 1, rtol=1e-15)
assert_allclose(np.sum(dop853_coefficients.A, axis=1),
dop853_coefficients.C,
rtol=1e-14)
@pytest.mark.parametrize("solver_class", [RK23, RK45, DOP853])
def test_error_estimation(solver_class):
step = 0.2
solver = solver_class(lambda t, y: y, 0, [1], 1, first_step=step)
solver.step()
error_estimate = solver._estimate_error(solver.K, step)
error = solver.y - np.exp([step])
assert_(np.abs(error) < np.abs(error_estimate))
@pytest.mark.parametrize("solver_class", [RK23, RK45, DOP853])
|
def test_error_estimation_complex(solver_class
|
):
h = 0.2
solver = solver_class(lambda t, y: 1j * y, 0, [1j], 1, first_step=h)
solver.step()
err_norm = solver._estimate_error_norm(solver.K, h, scale=[1])
assert np.isrealobj(err_norm)
|
smokeyfeet/smokeyfeet-registration
|
src/smokeyfeet/registration/migrations/0001_initial.py
|
Python
|
mit
| 4,391
| 0.004555
|
# Generated by Django 3.1 on 2020-08-13 19:23
from django.db import migrations, models
import django.db.models.deletion
import django_countries.fields
import uuid
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='LunchType',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=64)),
('sort_order', models.PositiveIntegerField()),
('unit_price', models.DecimalField(decimal_places=2, default=0, max_digits=12)),
],
options={
|
'ordering': ['sort_order'],
},
),
mi
|
grations.CreateModel(
name='PassType',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('type', models.CharField(choices=[('party', 'Party Pass'), ('full', 'Full Pass')], max_length=32)),
('name', models.CharField(max_length=64)),
('active', models.BooleanField(default=False)),
('sort_order', models.PositiveIntegerField()),
('quantity_in_stock', models.PositiveIntegerField(default=0)),
('unit_price', models.DecimalField(decimal_places=2, default=0, max_digits=12)),
('data', models.JSONField(blank=True)),
],
options={
'ordering': ['sort_order'],
},
),
migrations.CreateModel(
name='Registration',
fields=[
('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('first_name', models.CharField(max_length=64)),
('last_name', models.CharField(max_length=64)),
('email', models.EmailField(max_length=254, unique=True)),
('dance_role', models.CharField(choices=[('leader', 'Leader'), ('follower', 'Follower')], default='leader', max_length=32)),
('residing_country', django_countries.fields.CountryField(max_length=2)),
('workshop_partner_name', models.CharField(blank=True, max_length=128)),
('workshop_partner_email', models.EmailField(blank=True, max_length=254)),
('crew_remarks', models.TextField(blank=True, max_length=4096)),
('total_price', models.DecimalField(decimal_places=2, default=0, max_digits=12)),
('audition_url', models.URLField(blank=True)),
('accepted_at', models.DateTimeField(blank=True, null=True)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('lunch', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='registration.lunchtype')),
('pass_type', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='registration.passtype')),
],
),
migrations.CreateModel(
name='Payment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('mollie_payment_id', models.CharField(blank=True, max_length=64, null=True, unique=True)),
('amount', models.DecimalField(decimal_places=2, default=0, max_digits=12)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('registration', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='registration.registration')),
],
),
migrations.CreateModel(
name='Interaction',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('description', models.TextField(blank=True, max_length=4096)),
('created_at', models.DateTimeField(auto_now_add=True)),
('registration', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='registration.registration')),
],
),
]
|
frankrousseau/weboob
|
weboob/tools/log.py
|
Python
|
agpl-3.0
| 2,262
| 0
|
# -*- coding: utf-8 -*-
# Copyright(C) 2010-2011 Romain Bignon
#
# This file is part of weboob.
#
# weboob is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# weboob is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with weboob. If not, see <http://www.gnu.org/licenses/>.
from __future__ import print_function
import sys
from collections import defaultdict
from logging import addLevelName, Formatter, getLogger as _getLogger
__all__ = ['getLogger', 'createColoredFormatter', 'settings']
RESET_SEQ = "\033[0m"
COLOR_SEQ = "%s%%s" + RESET_SEQ
COLORS = {
'DEBUG': COLOR_SEQ % "\033[0;36m",
'INFO': COLOR_SEQ % "\033[32m",
'WARNING': COLOR_SEQ % "\033[1;33m",
'ERROR': COLOR_SEQ % "\033[1;31m",
'CRITICAL': COLOR_SEQ % ("\033[1;33m\033[1;41m"),
'DEBUG_FILTERS': COLOR_SEQ % "\033[0;35m",
}
DEBUG_FILTERS = 8
addLevelName(DEBUG_FILTERS, 'DEBUG_FILTERS')
# Global settings f logger.
settings = defaultdict(lambda: None)
def getLogger(name, parent=None):
if parent:
name = parent.name + '.' + name
logger = _getLogger(name)
logger.settings = settings
return logger
class ColoredFormatter(Formatter):
"""
Class written by airmind:
http://stackoverflow.com/questions/384076/how-can-i-make-the-python-logging-output-to-be-colored
"""
def format(self, record):
levelname = record.levelname
msg = Formatter.format(self, reco
|
rd)
if levelname in COLORS:
msg = COLORS[levelname] % msg
return msg
def createColoredFormatter(stream, format):
if (sys.platform != 'win32') and stream.isatty():
return ColoredFormatter(format)
else:
return Formatter(format)
if __name__ == '__mai
|
n__':
for levelname, cs in COLORS.items():
print(cs % levelname, end=' ')
|
jaeilepp/eggie
|
mne/io/kit/kit.py
|
Python
|
bsd-2-clause
| 28,437
| 0
|
"""Conversion tool from SQD to FIF
RawKIT class is adapted from Denis Engemann et al.'s mne_bti2fiff.py
"""
# Author: Teon Brooks <teon@nyu.edu>
#
# License: BSD (3-clause)
import os
from os import SEEK_CUR
from struct import unpack
import time
import numpy as np
from scipy import linalg
from ..pick import pick_types
from ...coreg import (read_elp, fit_matched_points, _decimate_points,
get_ras_to_neuromag_trans)
from ...utils import verbose, logger
from ...transforms import apply_trans, als_ras_trans, als_ras_trans_mm
from ..base import _BaseRaw
from ..constants import FIFF
from ..meas_info import Info
from ..tag import _loc_to_trans
from .constants import KIT, KIT_NY, KIT_AD
from .coreg import read_hsp, read_mrk
from ...externals.six import string_types
class RawKIT(_BaseRaw):
"""Raw object from KIT SQD file adapted from bti/raw.py
Parameters
----------
input_fname : str
Path to the sqd file.
mrk : None | str | array_like, shape = (5, 3) | list of str or array_like
Marker points representing the location of the marker coils with
respect to the MEG Sensors, or path to a marker file.
If list, all of the markers will be averaged together.
elp : None | str | array_like, shape = (8, 3)
Digitizer points representing the location of the fiducials and the
marker coils with respect to the digitized head shape, or path to a
file containing these points.
hsp : None | str | array, shape = (n_points, 3)
Digitizer head shape points, or path to head shape file. If more than
10`000 points are in the head shape, they are automatically decimated.
stim : list of int | '<' | '>'
Channel-value correspondence when converting KIT trigger channels to a
Neuromag-style stim channel. For '<', the largest values are assigned
to the first channel (default). For '>', the largest values are
assigned to the last channel. Can also be specified as a list of
trigger channel indexes.
slope : '+' | '-'
How to interpret values on KIT trigger channels when synthesizing a
Neuromag-style stim channel. With '+', a positive slope (low-to-high)
is interpreted as an event. With '-', a negative slope (high-to-low)
is interpreted as an event.
stimthresh : float
The threshold level for accepting voltage changes in KIT trigger
channels as a trigger event.
preload : bool
If True, all data are loaded at initialization.
If False, data are not read until save.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
See Also
--------
mne.io.Raw : Documentation of attribute and methods.
"""
@verbose
def __init__(self, input_fname, mrk=None, elp=None, hsp=None, stim='>',
slope='-', stimthresh=1, preload=False, verbose=None):
logger.info('Extracting SQD Parameters from %s...' % input_fname)
input_fname = os.path.abspath(input_fname)
self._sqd_params = get_sqd_params(input_fname)
self._sqd_params['stimthresh'] = stimthresh
self._sqd_params['fname'] = input_fname
logger.info('Creating Raw.info structure...')
# Raw attributes
self.verbose = verbose
self.preload = False
self._projector = None
self.first_samp = 0
self.last_samp = self._sqd_params['nsamples'] - 1
self.comp = None # no compensation for KIT
self.proj = False
# Create raw.info dict for raw fif object with SQD data
self.info = Info()
self.info['meas_id'] = None
self.info['file_id'] = None
self.info['me
|
as_date'] = int(time.time())
self.info['projs'] = []
self.info['comps'] = []
self.info['lowpass'] = self._sqd_params['lowpass']
self.info['highpass'] = self._sqd_params['highpass']
self.info['sfreq'] = float(self._sqd_params['sfreq'])
# meg channels plus synthetic channel
self.info['nchan'] = self._sqd_params['nchan'] + 1
self.info['bads'] = []
self.info['acq_pars'], self.info['acq_s
|
tim'] = None, None
self.info['filename'] = None
self.info['ctf_head_t'] = None
self.info['dev_ctf_t'] = []
self._filenames = []
self.info['dig'] = None
self.info['dev_head_t'] = None
if isinstance(mrk, list):
mrk = [read_mrk(marker) if isinstance(marker, string_types)
else marker for marker in mrk]
mrk = np.mean(mrk, axis=0)
if (mrk is not None and elp is not None and hsp is not None):
self._set_dig_kit(mrk, elp, hsp)
elif (mrk is not None or elp is not None or hsp is not None):
err = ("mrk, elp and hsp need to be provided as a group (all or "
"none)")
raise ValueError(err)
# Creates a list of dicts of meg channels for raw.info
logger.info('Setting channel info structure...')
ch_names = {}
ch_names['MEG'] = ['MEG %03d' % ch for ch
in range(1, self._sqd_params['n_sens'] + 1)]
ch_names['MISC'] = ['MISC %03d' % ch for ch
in range(1, self._sqd_params['nmiscchan'] + 1)]
ch_names['STIM'] = ['STI 014']
locs = self._sqd_params['sensor_locs']
chan_locs = apply_trans(als_ras_trans, locs[:, :3])
chan_angles = locs[:, 3:]
self.info['chs'] = []
for idx, ch_info in enumerate(zip(ch_names['MEG'], chan_locs,
chan_angles), 1):
ch_name, ch_loc, ch_angles = ch_info
chan_info = {}
chan_info['cal'] = KIT.CALIB_FACTOR
chan_info['logno'] = idx
chan_info['scanno'] = idx
chan_info['range'] = KIT.RANGE
chan_info['unit_mul'] = KIT.UNIT_MUL
chan_info['ch_name'] = ch_name
chan_info['unit'] = FIFF.FIFF_UNIT_T
chan_info['coord_frame'] = FIFF.FIFFV_COORD_DEVICE
if idx <= self._sqd_params['nmegchan']:
chan_info['coil_type'] = FIFF.FIFFV_COIL_KIT_GRAD
chan_info['kind'] = FIFF.FIFFV_MEG_CH
else:
chan_info['coil_type'] = FIFF.FIFFV_COIL_KIT_REF_MAG
chan_info['kind'] = FIFF.FIFFV_REF_MEG_CH
chan_info['eeg_loc'] = None
# create three orthogonal vector
# ch_angles[0]: theta, ch_angles[1]: phi
ch_angles = np.radians(ch_angles)
x = np.sin(ch_angles[0]) * np.cos(ch_angles[1])
y = np.sin(ch_angles[0]) * np.sin(ch_angles[1])
z = np.cos(ch_angles[0])
vec_z = np.array([x, y, z])
length = linalg.norm(vec_z)
vec_z /= length
vec_x = np.zeros(vec_z.size, dtype=np.float)
if vec_z[1] < vec_z[2]:
if vec_z[0] < vec_z[1]:
vec_x[0] = 1.0
else:
vec_x[1] = 1.0
elif vec_z[0] < vec_z[2]:
vec_x[0] = 1.0
else:
vec_x[2] = 1.0
vec_x -= np.sum(vec_x * vec_z) * vec_z
length = linalg.norm(vec_x)
vec_x /= length
vec_y = np.cross(vec_z, vec_x)
# transform to Neuromag like coordinate space
vecs = np.vstack((vec_x, vec_y, vec_z))
vecs = apply_trans(als_ras_trans, vecs)
chan_info['loc'] = np.vstack((ch_loc, vecs)).ravel()
chan_info['coil_trans'] = _loc_to_trans(chan_info['loc'])
self.info['chs'].append(chan_info)
# label trigger and misc channels
for idy, ch_name in enumerate(ch_names['MISC'] + ch_names['STIM'],
self._sqd_params['n_sens']):
chan_info = {}
chan_info['cal'] = KIT.CALIB_FACTOR
chan_info['logno'] = idy
chan_info['scanno'] = idy
chan_info['range'] = 1.0
chan_info['unit'] = FIFF.F
|
rhennigan/code
|
python/spaceshipTrajectory.py
|
Python
|
gpl-2.0
| 1,346
| 0.013373
|
# PROBLEM 3
#
# Modify the below functions acceleration and
# ship_trajectory to plot the trajectory of a
# spacecraft with the given initial position
# and velocity. Use the Forward Euler Method
# to accomplish this.
#from udacityplots import *
import math
import numpy
import matplotlib
h = 1.0 # s
earth_mass = 5.97e24 # kg
gravitational_constant = 6.67e-11 # N m2 / kg2
def acceleration(spaceship_position):
distance = numpy.linalg.norm(spaceship_position)
direction = - spaceship_position / distance
acc = gravitational_constan
|
t * earth_mass / distance ** 2 * direction
return acc
def ship_trajectory():
num_steps = 13000
x = numpy.zeros([num
|
_steps + 1, 2]) # m
v = numpy.zeros([num_steps + 1, 2]) # m / s
x[0, 0] = 15e6
x[0, 1] = 1e6
v[0, 0] = 2e3
v[0, 1] = 4e3
for step in range(num_steps):
x[step + 1] = x[step] + h * v[step]
v[step + 1] = v[step] + h * acceleration(x[step])
return x, v
x, v = ship_trajectory()
#@show_plot
def plot_me():
matplotlib.pyplot.plot(x[:, 0], x[:, 1])
matplotlib.pyplot.scatter(0, 0)
matplotlib.pyplot.axis('equal')
axes = matplotlib.pyplot.gca()
axes.set_xlabel('Longitudinal position in m')
axes.set_ylabel('Lateral position in m')
plot_me()
|
simone-campagna/invoice
|
tests/unittests/test_db_types.py
|
Python
|
apache-2.0
| 11,568
| 0.005014
|
# -*- coding: utf-8 -*-
#
# Copyright 2015 Simone Campagna
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
__author__ = "Simone Campagna"
__all__ = [
'TestInvoiceProgram',
]
import os
import datetime
import unittest
from invoice.database.db_types import Str, StrList, StrTuple, \
Int, IntList, IntTuple, \
Float, FloatList, FloatTuple, \
Date, DateList, DateTuple, \
DateTime, DateTimeList, DateTimeTuple, \
Path, PathList, PathTuple, \
Bool, BoolList, BoolTuple, \
OptionType, BaseSequence
class TestStr(unittest.TestCase):
def test_db_from(self):
self.assertIs(Str.db_from(None), None)
self.assertEqual(Str.db_from("alpha"), "alpha")
def test_db_to(self):
self.assertIs(Str.db_to(None), None)
self.assertEqual(Str.db_to("alpha"), "alpha")
class TestStrList(unittest.TestCase):
def test_db_from(self):
self.assertIs(StrList.db_from(None), None)
self.assertEqual(StrList.db_from("alpha, beta, 10.3, gamma "), ["alpha", "beta", "10.3", "gamma"])
def test_db_to(self):
self.assertIs(StrList.db_to(None), None)
self.assertEqual(StrList.db_to(["alpha", "beta", "10.3", "gamma"]), "alpha,beta,10.3,gamma")
class TestStrTuple(unittest.TestCase):
def test_db_from(self):
self.assertIs(StrTuple.db_from(None), None)
self.assertEqual(StrTuple.db_from("alpha, beta, 10.3, gamma "), ("alpha", "beta", "10.3", "gamma"))
def test_db_to(self):
self.assertIs(StrTuple.db_to(None), None)
self.assertEqual(StrTuple.db_to(("alpha", "beta", "10.3", "gamma")), "alpha,beta,10.3,gamma")
class TestInt(unittest.TestCase):
def test_db_from(self):
self.assertIs(Int.db_from(None), None)
self.assertEqual(Int.db_from("10"), 10)
def test_db_to(self):
self.assertIs(Int.db_to(None), None)
self.assertEqual(Int.db_to(10), "10")
class TestIntList(unittest.TestCase):
def test_db_from(self):
self.assertIs(IntList.db_from(None), None)
self.assertEqual(IntList.db_from("10, 20"), [10, 20])
def test_db_to(self):
self.assertIs(IntList.db_to(None), None)
self.assertEqual(IntList.db_to([10, 20]), "10,20")
class TestIntTuple(unittest.TestCase):
def test_db_from(self):
self.assertIs(IntTuple.db_from(None), None)
self.assertEqual(IntTuple.db_from("10, 20"), (10, 20))
def test_db_to(self):
self.assertIs(IntTuple.db_to(None), None)
self.assertEqual(IntTuple.db_to((10, 20)), "10,20")
class TestFloat(unittest.TestCase):
def test_db_from(self):
self.assertIs(Float.db_from(None), None)
self.assertEqual(Float.db_from("10.5"), 10.5)
def test_db_to(self):
self.assertIs(Float.db_to(None), None)
self.assertEqual(Float.db_to(10.5), "10.5")
class TestFloatList(unittest.TestCase):
def test_db_from(self):
self.assertIs(FloatList.db_from(None), None)
self.assertEqual(FloatList.db_from("10.5,23.32"), [10.5, 23.32])
def test_db_to(self):
self.assertIs(FloatList.db_to(None), None)
self.assertEqual(FloatList.db_to([10.5, 23.32]), "10.5,23.32")
class TestFloatTuple(unittest.TestCase):
def test_db_from(self):
self.assertIs(FloatTuple.db_from(None), None)
self.assertEqual(FloatTuple.db_from("10.5,23.32"), (10.5, 23.32))
def test_db_to(self):
self.assertIs(FloatTuple.db_to(None), None)
self.assertEqual(FloatTuple.db_to((10.5, 23.32)), "10.5,23.32")
class TestDate(unittest.TestCase):
def test_db_from(self):
self.assertIs(Date.db_from(None), None)
self.assertEqual(Date.db_from("2015-01-04"), datetime.date(2015, 1, 4))
def test_db_to(self):
self.assertIs(Date.db_to(None), None)
self.assertEqual(Date.db_to(datetime.date(2015, 1, 4)), "2015-01-04")
class TestDateList(unittest.TestCase):
def test_db_from(self):
self.assertIs(DateList.db_from(None), None)
self.assertEqual(DateList.db_from(" 2015-01-04 , 2014-04-05 "), [datetime.date(2015, 1, 4), datetime.date(2014, 4, 5)])
def test_db_to(self):
self.assertIs(DateList.db_to(None), None)
self.assertEqual(DateList.db_to([datetime.date(2015, 1, 4), datetime.date(2014, 4, 5)]), "2015-01-04,2014-04-05")
class TestDateTuple(unittest.TestCase):
def test_db_from(self):
self.assertIs(DateTuple.db_from(None), None)
self.assertEqual(DateTuple.db_from(" 2015-01-04 , 2014-04-05 "), (datetime.date(2015, 1, 4), datetime.date(2014, 4, 5)))
def test_db_to(self):
self.assertIs(DateTuple.db_to(None), None)
self.assertEqual(DateTuple.db_to((datetime.date(2015, 1, 4), datetime.date(2014, 4, 5))), "2015-01-04,2014-04-05")
class TestDateTime(unittest.TestCase):
def test_db_from(self):
self.assertIs(DateTime.db_from(None), None)
self.assertEqual(DateTime.db_from("2015-01-04 13:34:45"), datetime.datetime(2015, 1, 4, 13, 34, 45))
def test_db_to(self):
self.assertIs(DateTime.db_to(None), None)
self.assertEqual(DateTime.db_to(datetime.datetime(2015, 1, 4, 13, 34, 45)), "2015-01-04 13:34:45")
class TestDateTimeList(unittest.TestCase):
def test_db_from(self):
self.asse
|
rtIs(DateTimeList.db_from(None), None)
self.assertEqual(DateTimeList.db_from("2015-01-04 13:34:45,2014-04-05 02:22:01"), [datetime.datetime(2015, 1, 4, 13, 34, 45), datetime.datetime(2014, 4, 5, 2, 22, 1)])
def test_db_to(self):
self.assertIs(DateTimeList.db_to(None), None)
self.assertEqual(DateTimeList.db_to([datetime.datetime(2015, 1, 4, 13, 34, 45), datetime.datetime(2014, 4, 5, 2, 22, 1)]), "2015-01-04 13:34:45,2014-04-05
|
02:22:01")
class TestDateTimeTuple(unittest.TestCase):
def test_db_from(self):
self.assertIs(DateTimeTuple.db_from(None), None)
self.assertEqual(DateTimeTuple.db_from("2015-01-04 13:34:45,2014-04-05 02:22:01"), (datetime.datetime(2015, 1, 4, 13, 34, 45), datetime.datetime(2014, 4, 5, 2, 22, 1)))
def test_db_to(self):
self.assertIs(DateTimeTuple.db_to(None), None)
self.assertEqual(DateTimeTuple.db_to((datetime.datetime(2015, 1, 4, 13, 34, 45), datetime.datetime(2014, 4, 5, 2, 22, 1))), "2015-01-04 13:34:45,2014-04-05 02:22:01")
class TestPath(unittest.TestCase):
def test_db_from(self):
self.assertIs(Path.db_from(None), None)
f = lambda x: os.path.normpath(os.path.abspath(x))
self.assertEqual(Path.db_from("{}".format(f("alpha"))), f("alpha"))
def test_db_to(self):
self.assertIs(Path.db_to(None), None)
f = lambda x: os.path.normpath(os.path.abspath(x))
self.assertEqual(Path.db_to("alpha"), f("alpha"))
class TestPathList(unittest.TestCase):
def test_db_from(self):
self.assertIs(PathList.db_from(None), None)
f = lambda x: os.path.normpath(os.path.abspath(x))
self.assertEqual(PathList.db_from("{},/b/c,{}".format(f("alpha"), f("d/e"))), [f("alpha"), "/b/c", f("d/e")])
def test_db_to(self):
self.assertIs(PathList.db_to(None), None)
f = lambda x: os.path.normpath(os.path.abspath(x))
self.assertEqual(PathList.db_to(["alpha", "/b/c", "d/e"]), "{},/b/c,{}".format(f("alpha"), f("d/e")))
class TestPathTuple(unittest.TestCase):
def test_db_from(self):
self.assertIs(PathTuple.db_from(None), None)
f = lambda x: os.path.nor
|
scottclowe/python-continuous-integration
|
.github/workflows/system_info.py
|
Python
|
mit
| 575
| 0
|
"""
Print out some
|
handy system info.
"""
import os
import platform
import sys
print("Build system information")
print()
print("sys.version\t\t", sys.version.split("\n"))
print("os.name\t\t\t", os.name)
print("sys.platform\t\t", sys.platform)
print("platform.system()\t", platform.system())
print("platform.machine()\t", platform.machine())
print("platform.platform()\t", platform.platform())
print("platform.version()\t", platform.version())
print("platform.uname()\t", pla
|
tform.uname())
if sys.platform == "darwin":
print("platform.mac_ver()\t", platform.mac_ver())
|
BriData/DBus
|
dbus-mongo-extractor/tests/test_rollbacks.py
|
Python
|
apache-2.0
| 12,580
| 0.000636
|
# Copyright 2013-2016 MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test Mongo Connector's behavior when its source MongoDB system is
experiencing a rollback.
"""
import os
import sys
import time
from pymongo.read_preferences import ReadPreference
from pymongo.write_concern import WriteConcern
sys.path[0:0] = [""]
from mongo_connector.doc_managers.doc_manager_simulator import DocManager
from mongo_connector.locking_dict import LockingDict
from mongo_connector.namespace_config import NamespaceConfig
from mongo_connector.oplog_manager import OplogThread
from mongo_connector.test_utils import (assert_soon,
close_client,
ReplicaSet,
STRESS_COUNT)
from mongo_connector.util import retry_until_ok
from tests import unittest
class TestRollbacks(unittest.TestCase):
def tearDown(self):
close_client(self.primary_conn)
close_client(self.secondary_conn)
try:
self.opman.join()
except RuntimeError:
# OplogThread may not have been started
pass
self.repl_set.stop()
def setUp(self):
# Create a new oplog progress file
try:
os.unlink("oplog.timestamp")
except OSError:
pass
open("oplog.timestamp", "w").close()
# Start a replica set
self.repl_set = ReplicaSet().start()
# Connection to the replica set as a whole
self.main_conn = self.repl_set.client()
# Connection to the primary specifically
self.primary_conn = self.repl_set.primary.client()
# Connection to the secondary specifically
self.secondary_conn = self.repl_set.secondary.client(
read_preference=ReadPreference.SECONDARY_PREFERRED)
# Wipe any test data
self.main_conn.drop_database("test")
# Oplog thread
doc_manager = DocManager()
oplog_progress = LockingDict()
self.opman = OplogThread(
primary_client=self.main_conn,
doc_managers=(doc_manager,),
oplog_progress_dict=oplog_progress,
namespace_config=NamespaceConfig(namespace_set=["test.mc"]),
)
def test_single_target(self):
"""Test with a single replication target"""
self.opman.start()
# Insert first document with primary up
self.main_conn["test"]["mc"].insert_one({"i": 0})
self.assertEqual(self.primary_conn["test"]["mc"].find().count(), 1)
# Make sure the insert is replicated
secondary = self.secondary_conn
assert_soon(lambda: secondary["test"]["mc"].count() == 1,
"first write didn't replicate to secondary")
# Kill the primary
self.repl_set.primary.stop(destroy=False)
# Wait for the secondary to be promoted
assert_soon(lambda: secondary["admin"].command("isMaster")["ismaster"])
# Insert another document. This will be rolled back later
retry_until_ok(self.main_conn["test"]["mc"].insert_one, {"i": 1})
self.assertEqual(secondary["test"]["mc"].count(), 2)
# Wait for replication to doc manager
assert_soon(lambda: len(self.opman.doc_managers[0]._search()) == 2,
"not all writes we
|
re replicated to doc manager")
# Kill the new primary
self.repl_set.secondary.stop(destroy=False)
# Start both servers back up
self.repl_set.primary.start()
primary_admin = self.primary
|
_conn["admin"]
assert_soon(lambda: primary_admin.command("isMaster")["ismaster"],
"restarted primary never resumed primary status")
self.repl_set.secondary.start()
assert_soon(lambda: retry_until_ok(secondary.admin.command,
'replSetGetStatus')['myState'] == 2,
"restarted secondary never resumed secondary status")
assert_soon(lambda:
retry_until_ok(self.main_conn.test.mc.find().count) > 0,
"documents not found after primary/secondary restarted")
# Only first document should exist in MongoDB
self.assertEqual(self.main_conn["test"]["mc"].count(), 1)
self.assertEqual(self.main_conn["test"]["mc"].find_one()["i"], 0)
# Same case should hold for the doc manager
doc_manager = self.opman.doc_managers[0]
assert_soon(lambda: len(doc_manager._search()) == 1,
'documents never rolled back in doc manager.')
self.assertEqual(doc_manager._search()[0]["i"], 0)
# cleanup
self.opman.join()
def test_many_targets(self):
"""Test with several replication targets"""
# OplogThread has multiple doc managers
doc_managers = [DocManager(), DocManager(), DocManager()]
self.opman.doc_managers = doc_managers
self.opman.start()
# Insert a document into each namespace
self.main_conn["test"]["mc"].insert_one({"i": 0})
self.assertEqual(self.primary_conn["test"]["mc"].count(), 1)
# Make sure the insert is replicated
secondary = self.secondary_conn
assert_soon(lambda: secondary["test"]["mc"].count() == 1,
"first write didn't replicate to secondary")
# Kill the primary
self.repl_set.primary.stop(destroy=False)
# Wait for the secondary to be promoted
assert_soon(lambda: secondary.admin.command("isMaster")['ismaster'],
'secondary was never promoted')
# Insert more documents. This will be rolled back later
# Some of these documents will be manually removed from
# certain doc managers, to emulate the effect of certain
# target systems being ahead/behind others
secondary_ids = []
for i in range(1, 10):
secondary_ids.append(
retry_until_ok(self.main_conn["test"]["mc"].insert_one,
{"i": i}).inserted_id)
self.assertEqual(self.secondary_conn["test"]["mc"].count(), 10)
# Wait for replication to the doc managers
def docmans_done():
for dm in self.opman.doc_managers:
if len(dm._search()) != 10:
return False
return True
assert_soon(docmans_done,
"not all writes were replicated to doc managers")
# Remove some documents from the doc managers to simulate
# uneven replication
ts = self.opman.doc_managers[0].get_last_doc()['_ts']
for id in secondary_ids[8:]:
self.opman.doc_managers[1].remove(id, 'test.mc', ts)
for id in secondary_ids[2:]:
self.opman.doc_managers[2].remove(id, 'test.mc', ts)
# Kill the new primary
self.repl_set.secondary.stop(destroy=False)
# Start both servers back up
self.repl_set.primary.start()
primary_admin = self.primary_conn["admin"]
assert_soon(lambda: primary_admin.command("isMaster")['ismaster'],
'restarted primary never resumed primary status')
self.repl_set.secondary.start()
assert_soon(lambda: retry_until_ok(secondary.admin.command,
'replSetGetStatus')['myState'] == 2,
"restarted secondary never resumed secondary status")
assert_soon(lambda:
retry_until_ok(self.primary_conn.test.mc.find().count) > 0,
"documents not found after primary/secondary restarted")
|
mortbauer/openfoam-extend-Breeder-other-scripting-PyFoam
|
PyFoam/Basics/CustomPlotInfo.py
|
Python
|
gpl-2.0
| 5,311
| 0.024666
|
# ICE Revision: $Id$
"""Information about custom plots"""
from PyFoam.Basics.TimeLineCollection import TimeLineCollection
from PyFoam.Basics.FoamFileGenerator import makeString
from PyFoam.RunDictionary.ParsedParameterFile import FoamStringParser,PyFoamParserError
from PyFoam.Error import error
from PyFoam.ThirdParty.six import iteritems
from os import path
def cleanString(data):
if type(data)==str:
if len(data)>0:
if data[0]=='"' and data[-1]=='"':
data=data[1:-1]
elif data in ["true","on","yes"]:
data=True
elif data in ["false","off","no"]:
data=False
return data
def encloseString(data):
if type(data)!=str:
return data
if data.find(' ')<0:
return data
else:
return '"'+data+'"'
class CustomPlotInfo(object):
"""Information about a custom plot"""
nr=1
def __init__(self,raw=None,name=None,enabled=True):
"""@param raw: The raw data. Either a string for the two legacy-formats or a
dictionary for the new format
@param name: Name of the expression (only to be used for the new format)
@param enabled:
|
Should
|
this plot be actually used?"""
self.nr=CustomPlotInfo.nr
CustomPlotInfo.nr+=1
# Setting sensible default values
self.name="Custom%02d" % self.nr
self.theTitle="Custom %d" % self.nr
if name:
self.name+="_"+name
self.id=name
self.theTitle += " - "+name
else:
self.id=self.name
self.expr=None
self.titles=[]
self.accumulation="first"
self.start=None
self.end=None
self.persist=False
self.raisit=False
self.with_="lines"
self.type="regular";
self.master=None
self.progress=None
self.enabled=enabled
self.xlabel="Time [s]"
self.ylabel=None
self.gnuplotCommands=[]
# Legacy format
if raw==None:
self.expr=""
elif type(raw)==str:
if raw[0]=='{':
data=eval(raw)
self.expr=data["expr"]
if "name" in data:
self.name+="_"+data["name"]
self.name=self.name.replace(" ","_").replace(path.sep,"Slash")
self.theTitle+=" - "+data["name"]
if "titles" in data:
self.titles=data["titles"]
for o in ["alternateAxis","logscale","with","ylabel","y2label"]:
if o=="with":
use="with_"
else:
use=o
if o in data:
self.set(use,data[o])
if "accumulation" in data:
self.accumulation=data["accumulation"]
else:
self.expr=raw
# New format
else:
for k in raw:
data=raw[k]
if type(data)==str:
data=cleanString(data)
elif type(data)==list:
data=[cleanString(d) for d in data]
if k=="with":
k="with_"
self.set(k,data)
# Sanity check the data
if self.accumulation not in TimeLineCollection.possibleAccumulations:
error("Accumulation",self.accumulation,"not in the possible values",TimeLineCollection.possibleAccumulations)
if self.expr==None:
error("No expression set by data",raw)
def set(self,key,value):
setattr(self,key,value)
def __str__(self):
return makeString({self.id:self.getDict(wrapStrings=True)})
def getDict(self,wrapStrings=False):
result={}
for d in dir(self):
if (type(getattr(self,d)) in [str,bool,int,list,dict,float]) and d.find("__")<0:
if d=="id" or d=="nr":
pass
else:
key=d.replace("_","")
val=getattr(self,d)
if wrapStrings:
if type(val)==str:
val=encloseString(val)
elif type(val)==list:
val=[encloseString(v) for v in val]
result[key]=val
return result
def readCustomPlotInfo(rawData,useName=None):
"""Determines which of the three possible formats for custom-plotting is used
and returns a list of CustomPlotInfo-objects
@param rawData: a string that contains the raw data"""
info=[]
try:
data=FoamStringParser(rawData,
duplicateCheck=True,
duplicateFail=True)
for k,d in iteritems(data.data):
info.append(CustomPlotInfo(d,name=k))
except PyFoamParserError:
for i,l in enumerate(rawData.split('\n')):
if len(l)>0:
name=useName
if i>0 and name!=None:
name+=("_%d" % i)
info.append(CustomPlotInfo(l,name=name))
return info
def resetCustomCounter():
"""Reset the counter. Use with care"""
CustomPlotInfo.nr=1
# Should work with Python3 and Python2
|
aspaas/ion
|
test/functional/signrawtransactions.py
|
Python
|
mit
| 7,930
| 0.003153
|
#!/usr/bin/env python3
# Copyright (c) 2015-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test transaction signing using the signrawtransaction RPC."""
from test_framework.test_framework import IonTestFramework
from test_framework.util import *
class SignRawTransactionsTest(IonTestFramework):
def __init__(self):
super().__init__()
self.setup_clean_chain = True
self.num_nodes = 1
def successful_signing_test(self):
"""Create and sign a valid raw transaction with one input.
Expected results:
1) The transaction has a complete set of signatures
2) No script verification error occurred"""
privKeys = ['cUeKHd5orzT3mz8P9pxyREHfsWtVfgsfDjiZZBcjUBAaGk1BTj7N', 'cVKpPfVKSJxKqVpE9awvXNWuLHCa5j5tiE7K6zbUSptFpTEtiFrA']
inputs = [
# Valid pay-to-pubkey scripts
{'txid': '9b907ef1e3c26fc71fe4a4b3580bc75264112f95050014157059c736f0202e71', 'vout': 0,
'scriptPubKey': '76a91460baa0f494b38ce3c940dea67f3804dc52d1fb9488ac'},
{'txid': '83a4f6a6b73660e13ee6cb3c6063fa3759c50c9b7521d0536022961898f4fb02', 'vout': 0,
'scriptPubKey': '76a914669b857c03a5ed269d5d85a1ffac9ed5d663072788ac'},
]
outputs = {'mpLQjfK79b7CCV4VMJWEWAj5Mpx8Up5zxB': 0.1}
rawTx = self.nodes[0].createrawtransaction(inputs, outputs)
|
rawTxSigned = self.nodes[0].signrawtransaction(rawTx, inputs, privKeys)
# 1) The transaction has a complete set of signatures
assert 'complete' in rawTxSigned
assert_equal(rawTxSigned['complete'], True)
# 2) No script verification error occurred
assert 'errors' not in rawTxSigned
# Check that signrawtran
|
saction doesn't blow up on garbage merge attempts
dummyTxInconsistent = self.nodes[0].createrawtransaction([inputs[0]], outputs)
rawTxUnsigned = self.nodes[0].signrawtransaction(rawTx + dummyTxInconsistent, inputs)
assert 'complete' in rawTxUnsigned
assert_equal(rawTxUnsigned['complete'], False)
# Check that signrawtransaction properly merges unsigned and signed txn, even with garbage in the middle
rawTxSigned2 = self.nodes[0].signrawtransaction(rawTxUnsigned["hex"] + dummyTxInconsistent + rawTxSigned["hex"], inputs)
assert 'complete' in rawTxSigned2
assert_equal(rawTxSigned2['complete'], True)
assert 'errors' not in rawTxSigned2
def script_verification_error_test(self):
"""Create and sign a raw transaction with valid (vin 0), invalid (vin 1) and one missing (vin 2) input script.
Expected results:
3) The transaction has no complete set of signatures
4) Two script verification errors occurred
5) Script verification errors have certain properties ("txid", "vout", "scriptSig", "sequence", "error")
6) The verification errors refer to the invalid (vin 1) and missing input (vin 2)"""
privKeys = ['cUeKHd5orzT3mz8P9pxyREHfsWtVfgsfDjiZZBcjUBAaGk1BTj7N']
inputs = [
# Valid pay-to-pubkey script
{'txid': '9b907ef1e3c26fc71fe4a4b3580bc75264112f95050014157059c736f0202e71', 'vout': 0},
# Invalid script
{'txid': '5b8673686910442c644b1f4993d8f7753c7c8fcb5c87ee40d56eaeef25204547', 'vout': 7},
# Missing scriptPubKey
{'txid': '9b907ef1e3c26fc71fe4a4b3580bc75264112f95050014157059c736f0202e71', 'vout': 1},
]
scripts = [
# Valid pay-to-pubkey script
{'txid': '9b907ef1e3c26fc71fe4a4b3580bc75264112f95050014157059c736f0202e71', 'vout': 0,
'scriptPubKey': '76a91460baa0f494b38ce3c940dea67f3804dc52d1fb9488ac'},
# Invalid script
{'txid': '5b8673686910442c644b1f4993d8f7753c7c8fcb5c87ee40d56eaeef25204547', 'vout': 7,
'scriptPubKey': 'badbadbadbad'}
]
outputs = {'mpLQjfK79b7CCV4VMJWEWAj5Mpx8Up5zxB': 0.1}
rawTx = self.nodes[0].createrawtransaction(inputs, outputs)
# Make sure decoderawtransaction is at least marginally sane
decodedRawTx = self.nodes[0].decoderawtransaction(rawTx)
for i, inp in enumerate(inputs):
assert_equal(decodedRawTx["vin"][i]["txid"], inp["txid"])
assert_equal(decodedRawTx["vin"][i]["vout"], inp["vout"])
# Make sure decoderawtransaction throws if there is extra data
assert_raises(JSONRPCException, self.nodes[0].decoderawtransaction, rawTx + "00")
rawTxSigned = self.nodes[0].signrawtransaction(rawTx, scripts, privKeys)
# 3) The transaction has no complete set of signatures
assert 'complete' in rawTxSigned
assert_equal(rawTxSigned['complete'], False)
# 4) Two script verification errors occurred
assert 'errors' in rawTxSigned
assert_equal(len(rawTxSigned['errors']), 2)
# 5) Script verification errors have certain properties
assert 'txid' in rawTxSigned['errors'][0]
assert 'vout' in rawTxSigned['errors'][0]
assert 'witness' in rawTxSigned['errors'][0]
assert 'scriptSig' in rawTxSigned['errors'][0]
assert 'sequence' in rawTxSigned['errors'][0]
assert 'error' in rawTxSigned['errors'][0]
# 6) The verification errors refer to the invalid (vin 1) and missing input (vin 2)
assert_equal(rawTxSigned['errors'][0]['txid'], inputs[1]['txid'])
assert_equal(rawTxSigned['errors'][0]['vout'], inputs[1]['vout'])
assert_equal(rawTxSigned['errors'][1]['txid'], inputs[2]['txid'])
assert_equal(rawTxSigned['errors'][1]['vout'], inputs[2]['vout'])
assert not rawTxSigned['errors'][0]['witness']
# Now test signing failure for transaction with input witnesses
p2wpkh_raw_tx = "01000000000102fff7f7881a8099afa6940d42d1e7f6362bec38171ea3edf433541db4e4ad969f00000000494830450221008b9d1dc26ba6a9cb62127b02742fa9d754cd3bebf337f7a55d114c8e5cdd30be022040529b194ba3f9281a99f2b1c0a19c0489bc22ede944ccf4ecbab4cc618ef3ed01eeffffffef51e1b804cc89d182d279655c3aa89e815b1b309fe287d9b2b55d57b90ec68a0100000000ffffffff02202cb206000000001976a9148280b37df378db99f66f85c95a783a76ac7a6d5988ac9093510d000000001976a9143bde42dbee7e4dbe6a21b2d50ce2f0167faa815988ac000247304402203609e17b84f6a7d30c80bfa610b5b4542f32a8a0d5447a12fb1366d7f01cc44a0220573a954c4518331561406f90300e8f3358f51928d43c212a8caed02de67eebee0121025476c2e83188368da1ff3e292e7acafcdb3566bb0ad253f62fc70f07aeee635711000000"
rawTxSigned = self.nodes[0].signrawtransaction(p2wpkh_raw_tx)
# 7) The transaction has no complete set of signatures
assert 'complete' in rawTxSigned
assert_equal(rawTxSigned['complete'], False)
# 8) Two script verification errors occurred
assert 'errors' in rawTxSigned
assert_equal(len(rawTxSigned['errors']), 2)
# 9) Script verification errors have certain properties
assert 'txid' in rawTxSigned['errors'][0]
assert 'vout' in rawTxSigned['errors'][0]
assert 'witness' in rawTxSigned['errors'][0]
assert 'scriptSig' in rawTxSigned['errors'][0]
assert 'sequence' in rawTxSigned['errors'][0]
assert 'error' in rawTxSigned['errors'][0]
# Non-empty witness checked here
assert_equal(rawTxSigned['errors'][1]['witness'], ["304402203609e17b84f6a7d30c80bfa610b5b4542f32a8a0d5447a12fb1366d7f01cc44a0220573a954c4518331561406f90300e8f3358f51928d43c212a8caed02de67eebee01", "025476c2e83188368da1ff3e292e7acafcdb3566bb0ad253f62fc70f07aeee6357"])
assert not rawTxSigned['errors'][0]['witness']
def run_test(self):
self.successful_signing_test()
self.script_verification_error_test()
if __name__ == '__main__':
SignRawTransactionsTest().main()
|
HybridF5/tempest_debug
|
tempest/api/identity/admin/v3/test_services.py
|
Python
|
apache-2.0
| 4,083
| 0
|
# Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.api.identity import base
from tempest.common.utils import data_utils
from tempest.lib import exceptions as lib_exc
from tempest import test
class ServicesTestJSON(base.BaseIdentityV3AdminTest):
def _del_
|
service(self, service_id):
# Used for deleting the services created in this class
self.services_client.delete_service(service_id)
# Checking whether service is deleted successfully
self.assertRaises(lib_exc.NotFound, self.
|
services_client.show_service,
service_id)
@test.attr(type='smoke')
@test.idempotent_id('5193aad5-bcb7-411d-85b0-b3b61b96ef06')
def test_create_update_get_service(self):
# Creating a Service
name = data_utils.rand_name('service')
serv_type = data_utils.rand_name('type')
desc = data_utils.rand_name('description')
create_service = self.services_client.create_service(
type=serv_type, name=name, description=desc)['service']
self.addCleanup(self._del_service, create_service['id'])
self.assertIsNotNone(create_service['id'])
# Verifying response body of create service
expected_data = {'name': name, 'type': serv_type, 'description': desc}
self.assertDictContainsSubset(expected_data, create_service)
# Update description
s_id = create_service['id']
resp1_desc = create_service['description']
s_desc2 = data_utils.rand_name('desc2')
update_service = self.services_client.update_service(
s_id, description=s_desc2)['service']
resp2_desc = update_service['description']
self.assertNotEqual(resp1_desc, resp2_desc)
# Get service
fetched_service = self.services_client.show_service(s_id)['service']
resp3_desc = fetched_service['description']
self.assertEqual(resp2_desc, resp3_desc)
self.assertDictContainsSubset(update_service, fetched_service)
@test.idempotent_id('d1dcb1a1-2b6b-4da8-bbb8-5532ef6e8269')
def test_create_service_without_description(self):
# Create a service only with name and type
name = data_utils.rand_name('service')
serv_type = data_utils.rand_name('type')
service = self.services_client.create_service(
type=serv_type, name=name)['service']
self.addCleanup(self.services_client.delete_service, service['id'])
self.assertIn('id', service)
expected_data = {'name': name, 'type': serv_type}
self.assertDictContainsSubset(expected_data, service)
@test.idempotent_id('e55908e8-360e-439e-8719-c3230a3e179e')
def test_list_services(self):
# Create, List, Verify and Delete Services
service_ids = list()
for _ in range(3):
name = data_utils.rand_name('service')
serv_type = data_utils.rand_name('type')
create_service = self.services_client.create_service(
type=serv_type, name=name)['service']
self.addCleanup(self.services_client.delete_service,
create_service['id'])
service_ids.append(create_service['id'])
# List and Verify Services
services = self.services_client.list_services()['services']
fetched_ids = [service['id'] for service in services]
found = [s for s in fetched_ids if s in service_ids]
self.assertEqual(len(found), len(service_ids))
|
mwiebe/dynd-python
|
dynd/nd/test/test_ctypes_interop.py
|
Python
|
bsd-2-clause
| 2,910
| 0.002405
|
import sys
import unittest
from dynd import nd, ndt
import ctypes
# ToDo: Reenable this with a Cython interface.
#
#class TestCTypesDTypeInterop(unittest.TestCase):
# def test_type_from_ctype_typeobject(self):
# self.assertEqual(ndt.int8, ndt.type(ctypes.c_int8))
# self.assertEqual(ndt.int16, ndt.type(ctypes.c_int16))
# self.assertEqual(ndt.int32, ndt.type(ctypes.c_int32))
# self.assertEqual(ndt.int64, ndt.type(ctypes.c_int64))
# self.assertEqual(ndt.uint8, ndt.type(ctypes.c_uint8))
# self.assertEqual(ndt.uint16, ndt.type(ctypes.c_uint16))
# self.assertEqual(ndt.uint32, ndt.type(ctypes.c_uint32))
# self.assertEqual(ndt.uint64, ndt.type(ctypes.c_uint64))
# self.assertEqual(ndt.uint32, ndt.type(ctypes.c_uint32))
# self.assertEqual(ndt.uint64, ndt.type(ctypes.c_uint64))
# self.assertEqual(ndt.float32, ndt.type(ctypes.c_float))
# self.assertEqual(ndt.float64, ndt.type(ctypes.c_double))
#
# def test_type_from_annotated_ctype_typeobject(self):
# self.assertEqual(ndt.bool, ndt.type(ndt.ctypes.c_dynd_bool))
# self.assertEqual(ndt.complex_float32, ndt.type(ndt.ctypes.c_complex_float32))
# self.assertEqual(ndt.complex_float64, ndt.type(ndt.ctypes.c_complex_float64))
# self.assertEqual(ndt.complex_float32, ndt.type(ndt.ctypes.c_complex64))
# self.assertEqual(ndt.complex_float64, ndt.type(ndt.ctypes.c_complex128))
#
|
# def test_type_from_ctype_struct(
|
self):
# class POINT(ctypes.Structure):
# _fields_ = [('x', ctypes.c_int32), ('y', ctypes.c_int32)]
# self.assertEqual(ndt.make_struct(
# [ndt.int32, ndt.int32],['x', 'y']),
# ndt.type(POINT))
# class DATA(ctypes.Structure):
# _fields_ = [
# ('pos', POINT),
# ('flags', ctypes.c_int8),
# ('size', ctypes.c_float),
# ('vel', POINT)
# ]
# self.assertEqual(ndt.make_struct([POINT, ndt.int8, ndt.float32, POINT],
# ['pos', 'flags', 'size', 'vel']),
# ndt.type(DATA))
#
# def test_type_from_ctypes_carray(self):
# self.assertEqual(ndt.make_fixed_dim(10, ndt.int32),
# ndt.type(ctypes.c_int32 * 10))
# self.assertEqual(ndt.make_fixed_dim((10, 3), ndt.int32),
# ndt.type((ctypes.c_int32 * 3) * 10))
# self.assertEqual(ndt.make_fixed_dim((10, 3, 4), ndt.int32),
# ndt.type(((ctypes.c_int32 * 4) * 3) * 10))
#
# class POINT(ctypes.Structure):
# _fields_ = [('x', ctypes.c_int32), ('y', ctypes.c_int32)]
# self.assertEqual(ndt.make_fixed_dim(10, ndt.type(POINT)),
# ndt.type(POINT * 10))
#
#if __name__ == '__main__':
# unittest.main(verbosity=2)
|
ramramps/mkdocs
|
mkdocs/relative_path_ext.py
|
Python
|
bsd-2-clause
| 4,804
| 0
|
"""
# Relative Path Markdown Extension
During the MkDocs build we rewrite URLs that link to local
Markdown or media files. Using the following pages configuration
we can look at how t
|
he output is changed.
pages:
- ['index.md']
- ['tutorial/install.md']
- ['tutorial/intro.md']
## Markdown URLs
When linking fr
|
om `install.md` to `intro.md` the link would
simply be `[intro](intro.md)`. However, when we build
`install.md` we place it in a directory to create nicer URLs.
This means that the path to `intro.md` becomes `../intro/`
## Media URLs
To make it easier to work with media files and store them all
under one directory we re-write those to all be based on the
root. So, with the following markdown to add an image.

The output would depend on the location of the Markdown file it
was added too.
Source file | Generated Path | Image Path |
------------------- | ----------------- | ---------------------------- |
index.md | / | ./img/initial-layout.png |
tutorial/install.md | tutorial/install/ | ../img/initial-layout.png |
tutorial/intro.md | tutorial/intro/ | ../../img/initial-layout.png |
"""
from __future__ import unicode_literals
import logging
import os
from markdown.extensions import Extension
from markdown.treeprocessors import Treeprocessor
from markdown.util import AMP_SUBSTITUTE
from mkdocs import utils
from mkdocs.exceptions import MarkdownNotFound
log = logging.getLogger(__name__)
def _iter(node):
# TODO: Remove when dropping Python 2.6. Replace this
# function call with note.iter()
return [node] + node.findall('.//*')
def path_to_url(url, nav, strict):
scheme, netloc, path, params, query, fragment = (
utils.urlparse(url))
if scheme or netloc or not path or AMP_SUBSTITUTE in url:
# Ignore URLs unless they are a relative link to a markdown file.
# AMP_SUBSTITUTE is used internally by Markdown only for email,which is
# not a relative link. As urlparse errors on them, skip explicitly
return url
if nav and not utils.is_markdown_file(path):
path = utils.create_relative_media_url(nav, path)
elif nav:
# If the site navigation has been provided, then validate
# the internal hyperlink, making sure the target actually exists.
target_file = nav.file_context.make_absolute(path)
if target_file.startswith(os.path.sep):
target_file = target_file[1:]
if target_file not in nav.source_files:
source_file = nav.file_context.current_file
msg = (
'The page "%s" contained a hyperlink to "%s" which '
'is not listed in the "pages" configuration.'
) % (source_file, target_file)
# In strict mode raise an error at this point.
if strict:
raise MarkdownNotFound(msg)
# Otherwise, when strict mode isn't enabled, log a warning
# to the user and leave the URL as it is.
log.warning(msg)
return url
path = utils.get_url_path(target_file, nav.use_directory_urls)
path = nav.url_context.make_relative(path)
else:
path = utils.get_url_path(path).lstrip('/')
# Convert the .md hyperlink to a relative hyperlink to the HTML page.
fragments = (scheme, netloc, path, params, query, fragment)
url = utils.urlunparse(fragments)
return url
class RelativePathTreeprocessor(Treeprocessor):
def __init__(self, site_navigation, strict):
self.site_navigation = site_navigation
self.strict = strict
def run(self, root):
"""Update urls on anchors and images to make them relative
Iterates through the full document tree looking for specific
tags and then makes them relative based on the site navigation
"""
for element in _iter(root):
if element.tag == 'a':
key = 'href'
elif element.tag == 'img':
key = 'src'
else:
continue
url = element.get(key)
new_url = path_to_url(url, self.site_navigation, self.strict)
element.set(key, new_url)
return root
class RelativePathExtension(Extension):
"""
The Extension class is what we pass to markdown, it then
registers the Treeprocessor.
"""
def __init__(self, site_navigation, strict):
self.site_navigation = site_navigation
self.strict = strict
def extendMarkdown(self, md, md_globals):
relpath = RelativePathTreeprocessor(self.site_navigation, self.strict)
md.treeprocessors.add("relpath", relpath, "_end")
|
jhamrick/nbgrader
|
nbgrader/alembic/versions/50a4d84c131a_add_kernelspecs.py
|
Python
|
bsd-3-clause
| 506
| 0
|
"""add kernelspecs
Revision ID: 50a4d84c131a
Revises: b6d005d67074
Create Date: 2017-06-01 16:48:02.243764
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '50a4d84c131a'
down_revision = 'b6d005d67074'
branch_labels = None
depends_on = None
def upgrade():
op.add_column('notebook', sa.Colum
|
n(
'kernelspec', sa.String(1024), nullable=False,
|
server_default='{}'))
def downgrade():
op.drop_column('notebook', 'kernelspec')
|
benoitc/tproxy
|
tproxy/util.py
|
Python
|
mit
| 3,968
| 0.007056
|
# -*- coding: utf-8 -
#
# This file is part of tproxy released under the MIT license.
# See the NOTICE for more information.
try:
import ctypes
except MemoryError:
# selinux execmem denial
# https://bugzilla.redhat.com/show_bug.cgi?id=488396
ctypes = None
except Impo
|
rtError:
# Python on Solaris compiled with
|
Sun Studio doesn't have ctypes
ctypes = None
import fcntl
import os
import random
import resource
import socket
# add support for gevent 1.0
from gevent import version_info
if version_info[0] >0:
from gevent.os import fork
else:
from gevent.hub import fork
try:
from setproctitle import setproctitle
def _setproctitle(title):
setproctitle("tproxy: %s" % title)
except ImportError:
def _setproctitle(title):
return
MAXFD = 1024
if (hasattr(os, "devnull")):
REDIRECT_TO = os.devnull
else:
REDIRECT_TO = "/dev/null"
def is_ipv6(addr):
try:
socket.inet_pton(socket.AF_INET6, addr)
except socket.error: # not a valid address
return False
return True
def parse_address(netloc, default_port=5000):
if isinstance(netloc, tuple):
return netloc
# get host
if '[' in netloc and ']' in netloc:
host = netloc.split(']')[0][1:].lower()
elif ':' in netloc:
host = netloc.split(':')[0].lower()
elif netloc == "":
host = "0.0.0.0"
else:
host = netloc.lower()
#get port
netloc = netloc.split(']')[-1]
if ":" in netloc:
port = netloc.split(':', 1)[1]
if not port.isdigit():
raise RuntimeError("%r is not a valid port number." % port)
port = int(port)
else:
port = default_port
return (host, port)
def set_owner_process(uid,gid):
""" set user and group of workers processes """
if gid:
try:
os.setgid(gid)
except OverflowError:
if not ctypes:
raise
# versions of python < 2.6.2 don't manage unsigned int for
# groups like on osx or fedora
os.setgid(-ctypes.c_int(-gid).value)
if uid:
os.setuid(uid)
def chown(path, uid, gid):
try:
os.chown(path, uid, gid)
except OverflowError:
if not ctypes:
raise
os.chown(path, uid, -ctypes.c_int(-gid).value)
def get_maxfd():
maxfd = resource.getrlimit(resource.RLIMIT_NOFILE)[1]
if (maxfd == resource.RLIM_INFINITY):
maxfd = MAXFD
return maxfd
def close_on_exec(fd):
flags = fcntl.fcntl(fd, fcntl.F_GETFD)
flags |= fcntl.FD_CLOEXEC
fcntl.fcntl(fd, fcntl.F_SETFD, flags)
def set_non_blocking(fd):
flags = fcntl.fcntl(fd, fcntl.F_GETFL) | os.O_NONBLOCK
fcntl.fcntl(fd, fcntl.F_SETFL, flags)
def daemonize(close=False):
"""\
Standard daemonization of a process.
http://www.svbug.com/documentation/comp.unix.programmer-FAQ/faq_2.html#SEC16
"""
if not 'TPROXY_FD' in os.environ:
try:
if fork():
os._exit(0)
except OSError, e:
sys.stderr.write("fork #1 failed: %s\n" % str(e))
sys.exit(1)
os.setsid()
try:
if fork():
os._exit(0)
except OSError, e:
sys.stderr.write("fork #2 failed: %s\n" % str(e))
sys.exit(1)
os.umask(0)
if close:
maxfd = get_maxfd()
# Iterate through and close all file descriptors.
for fd in range(0, maxfd):
try:
os.close(fd)
except OSError:
# ERROR, fd wasn't open to begin with (ignored)
pass
os.open(REDIRECT_TO, os.O_RDWR)
os.dup2(0, 1)
os.dup2(0, 2)
def seed():
try:
random.seed(os.urandom(64))
except NotImplementedError:
random.seed(random.random())
|
paninetworks/neutron
|
neutron/db/ipam_non_pluggable_backend.py
|
Python
|
apache-2.0
| 22,516
| 0.000133
|
# Copyright (c) 2015 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import netaddr
from oslo_db import exception as db_exc
from oslo_log import log as logging
from sqlalchemy import and_
from sqlalchemy import orm
from sqlalchemy.orm import exc
from neutron.api.v2 import attributes
from neutron.common import constants
from neutron.common import exceptions as n_exc
from neutron.common import ipv6_utils
from neutron.db import ipam_backend_mixin
from neutron.db import models_v2
from neutron.ipam import requests as ipam_req
from neutron.ipam import subnet_alloc
LOG = logging.getLogger(__name__)
class IpamNonPluggableB
|
ackend(ipam_backend_mixin.IpamBackendMixin):
@staticmethod
def _generate_ip(context, subnets):
try:
return IpamNonPluggableBackend._try_generate_ip(context, subnets)
except n_exc.IpAddressGenerationFailure:
IpamNonPluggableBacke
|
nd._rebuild_availability_ranges(context,
subnets)
return IpamNonPluggableBackend._try_generate_ip(context, subnets)
@staticmethod
def _try_generate_ip(context, subnets):
"""Generate an IP address.
The IP address will be generated from one of the subnets defined on
the network.
"""
range_qry = context.session.query(
models_v2.IPAvailabilityRange).join(
models_v2.IPAllocationPool).with_lockmode('update')
for subnet in subnets:
ip_range = range_qry.filter_by(subnet_id=subnet['id']).first()
if not ip_range:
LOG.debug("All IPs from subnet %(subnet_id)s (%(cidr)s) "
"allocated",
{'subnet_id': subnet['id'],
'cidr': subnet['cidr']})
continue
ip_address = ip_range['first_ip']
if ip_range['first_ip'] == ip_range['last_ip']:
# No more free indices on subnet => delete
LOG.debug("No more free IP's in slice. Deleting "
"allocation pool.")
context.session.delete(ip_range)
else:
# increment the first free
new_first_ip = str(netaddr.IPAddress(ip_address) + 1)
ip_range['first_ip'] = new_first_ip
LOG.debug("Allocated IP - %(ip_address)s from %(first_ip)s "
"to %(last_ip)s",
{'ip_address': ip_address,
'first_ip': ip_address,
'last_ip': ip_range['last_ip']})
return {'ip_address': ip_address,
'subnet_id': subnet['id']}
raise n_exc.IpAddressGenerationFailure(net_id=subnets[0]['network_id'])
@staticmethod
def _rebuild_availability_ranges(context, subnets):
"""Rebuild availability ranges.
This method is called only when there's no more IP available or by
_update_subnet_allocation_pools. Calling
_update_subnet_allocation_pools before calling this function deletes
the IPAllocationPools associated with the subnet that is updating,
which will result in deleting the IPAvailabilityRange too.
"""
ip_qry = context.session.query(
models_v2.IPAllocation).with_lockmode('update')
# PostgreSQL does not support select...for update with an outer join.
# No join is needed here.
pool_qry = context.session.query(
models_v2.IPAllocationPool).options(
orm.noload('available_ranges')).with_lockmode('update')
for subnet in sorted(subnets):
LOG.debug("Rebuilding availability ranges for subnet %s",
subnet)
# Create a set of all currently allocated addresses
ip_qry_results = ip_qry.filter_by(subnet_id=subnet['id'])
allocations = netaddr.IPSet([netaddr.IPAddress(i['ip_address'])
for i in ip_qry_results])
for pool in pool_qry.filter_by(subnet_id=subnet['id']):
# Create a set of all addresses in the pool
poolset = netaddr.IPSet(netaddr.IPRange(pool['first_ip'],
pool['last_ip']))
# Use set difference to find free addresses in the pool
available = poolset - allocations
# Generator compacts an ip set into contiguous ranges
def ipset_to_ranges(ipset):
first, last = None, None
for cidr in ipset.iter_cidrs():
if last and last + 1 != cidr.first:
yield netaddr.IPRange(first, last)
first = None
first, last = first if first else cidr.first, cidr.last
if first:
yield netaddr.IPRange(first, last)
# Write the ranges to the db
for ip_range in ipset_to_ranges(available):
available_range = models_v2.IPAvailabilityRange(
allocation_pool_id=pool['id'],
first_ip=str(netaddr.IPAddress(ip_range.first)),
last_ip=str(netaddr.IPAddress(ip_range.last)))
context.session.add(available_range)
@staticmethod
def _allocate_specific_ip(context, subnet_id, ip_address):
"""Allocate a specific IP address on the subnet."""
ip = int(netaddr.IPAddress(ip_address))
range_qry = context.session.query(
models_v2.IPAvailabilityRange).join(
models_v2.IPAllocationPool).with_lockmode('update')
results = range_qry.filter_by(subnet_id=subnet_id)
for ip_range in results:
first = int(netaddr.IPAddress(ip_range['first_ip']))
last = int(netaddr.IPAddress(ip_range['last_ip']))
if first <= ip <= last:
if first == last:
context.session.delete(ip_range)
return
elif first == ip:
new_first_ip = str(netaddr.IPAddress(ip_address) + 1)
ip_range['first_ip'] = new_first_ip
return
elif last == ip:
new_last_ip = str(netaddr.IPAddress(ip_address) - 1)
ip_range['last_ip'] = new_last_ip
return
else:
# Adjust the original range to end before ip_address
old_last_ip = ip_range['last_ip']
new_last_ip = str(netaddr.IPAddress(ip_address) - 1)
ip_range['last_ip'] = new_last_ip
# Create a new second range for after ip_address
new_first_ip = str(netaddr.IPAddress(ip_address) + 1)
new_ip_range = models_v2.IPAvailabilityRange(
allocation_pool_id=ip_range['allocation_pool_id'],
first_ip=new_first_ip,
last_ip=old_last_ip)
context.session.add(new_ip_range)
return
@staticmethod
def _check_unique_ip(context, network_id, subnet_id, ip_address):
"""Validate that the IP address on the subnet is not in use."""
ip_qry = context.session.query(models_v2.IPAllocation)
try:
ip_qry.filter_by(network_id=network_id,
subnet_id=subnet_id,
|
KoehlerSB747/sd-tools
|
src/main/python/util/StatsAccumulator.py
|
Python
|
apache-2.0
| 5,869
| 0.002897
|
#
# Copyright 2008-2015 Semantic Discovery, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import json
import math
from threading import Lock
class StatsAccumulator:
'''
A low-memory helper class to collect statistical samples and provide
summary statistics.
'''
def __init__(self, label='', other=None):
self._modlock = Lock()
self.clear(label)
if not other == None:
if not label == '':
self._label = other._label
self._n = other._n
self._minimum = other._minimum
self._maximum = other._maximum
self._sum = other._sum
self._sos = other._sos
@property
def label(self):
return self._label
@label.setter
def label(self, val):
self._label = val
@property
def n(self):
return self._n
@property
def minimum(self):
return self._minimum
@property
def maximum(self):
return self._maximum
@property
def sum(self):
return self._sum
@property
def sumOfSquares(self):
return self._sos
@property
def mean(self):
return self.getMean()
@property
def standardDeviation(self):
ret
|
urn self.getStandardDeviation()
@property
def variance(self):
return self.getVariance()
def clear(self, label=''):
self._modlock.acquire()
try:
self._label = label
self._n = 0
self._minimum = 0.0
self._maximum = 0.0
self._sum = 0.0
self._sos = 0.0
finally:
self._modlock.release()
def initialize(self, label='', n=0, minimum=0, maximum=0, mean=0, stddev=0, su
|
mmaryInfo=None):
'''
Initialize with the given values, preferring existing values from the dictionary.
'''
if summaryInfo is not None:
if 'label' in summaryInfo:
label = summaryInfo['label']
if 'n' in summaryInfo:
n = summaryInfo['n']
if 'minimum' in summaryInfo:
minimum = summaryInfo['minimum']
if 'maximum' in summaryInfo:
maximum = summaryInfo['maximum']
if 'mean' in summaryInfo:
mean = summaryInfo['mean']
if 'stddev' in summaryInfo:
stddev = summaryInfo['stddev']
self._modlock.acquire()
try:
self._label = label
self._n = n
self._minimum = minimum
self._maximum = maximum
self._sum = mean * n
self._sos = 0 if n == 0 else stddev * stddev * (n - 1.0) + self._sum * self._sum / n
finally:
self._modlock.release()
def summaryInfo(self):
'''
Get a dictionary containing a summary of this instance's information.
'''
result = {
'label': self.label,
'n': self.n,
'minimum': self.minimum,
'maximum': self.maximum,
'mean': self.mean,
'stddev': self.standardDeviation
}
return result
def __str__(self):
return json.dumps(self.summaryInfo(), sort_keys=True)
def add(self, *values):
for value in values:
self._doAdd(value)
def _doAdd(self, value):
self._modlock.acquire()
try:
if self._n == 0:
self._minimum = value
self._maximum = value
else:
if value < self._minimum:
self._minimum = value
if value > self._maximum:
self._maximum = value
self._n += 1
self._sos += (value * value)
self._sum += value
finally:
self._modlock.release()
def getMean(self):
return 0 if self._n == 0 else self._sum / self._n
def getStandardDeviation(self):
return 0 if self._n < 2 else math.sqrt(self.variance)
def getVariance(self):
return 0 if self._n < 2 else (1.0 / (self._n - 1.0)) * (self._sos - (1.0 / self._n) * self._sum * self._sum)
@staticmethod
def combine(label, *statsAccumulators):
'''
Create a new statsAccumulator as if it had accumulated all data from
the given list of stats accumulators.
'''
result = StatsAccumulator(label)
for stats in statsAccumulators:
result.incorporate(stats)
return result
def incorporate(self, other):
'''
Incorporate the other statsAccumulator's data into this as if this had
accumulated the other's along with its own.
'''
if other is None:
return
self._modlock.acquire()
try:
if self._n == 0:
self._minimum = other._minimum
self._maximum = other._maximum
else:
if other._minimum < self._minimum:
self._minimum = other._minimum
if other._maximum > self._maximum:
self._maximum = other._maximum
self._n += other._n
self._sos += other._sos
self._sum += other._sum
finally:
self._modlock.release()
|
ahankinson/pybagit
|
pybagit/multichecksum.py
|
Python
|
mit
| 4,668
| 0.003856
|
#!/usr/bin/env python
__author__ = "Andrew Hankinson (andrew.hankinson@mail.mcgill.ca)"
__version__ = "1.5"
__date__ = "2011"
__copyright__ = "Creative Commons Attribution"
__license__ = """The MIT License
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE."""
import multiprocessing
from optparse import OptionParser
import os
import sys
import hashlib
import codecs
import re
from pybagit.exceptions import *
# declare a default hashalgorithm
HASHALG = 'sha1'
ENCODING = "utf-8"
def write_manifest(datadir, encoding, update=False):
bag_root = os.path.split(os.path.abspath(datadir))[0]
manifest_file = os.path.join(bag_root, "manifest-{0}.txt".format(HASHALG))
checksums = dict()
files_to_checksum = set(dirwalk(datadir))
if update and os.path.isfile(manifest_file):
|
for line in codecs.open(manifest_file, 'rb', encoding):
checksum, file_ = line.strip().split(' ', 1)
full_file = os.path.join(bag_root, file_)
if full_file in files_to_checksum:
|
files_to_checksum.remove(full_file)
checksums[os.path.join(bag_root, file_)] = checksum
p = multiprocessing.Pool(processes=multiprocessing.cpu_count())
result = p.map_async(csumfile, files_to_checksum)
checksums.update((k, v) for v, k in result.get())
p.close()
p.join()
mfile = codecs.open(manifest_file, 'wb', encoding)
for file_, checksum in sorted(checksums.iteritems()):
rp = os.path.relpath(file_, bag_root)
fl = ensure_unix_pathname(rp)
mfile.write(u"{0} {1}\n".format(checksum, fl))
mfile.close()
def dirwalk(datadir):
datafiles = []
for dirpath, dirnames, filenames in os.walk(u"{0}".format(datadir)):
for fn in filenames:
datafiles.append(os.path.join(dirpath, fn))
return datafiles
def csumfile(filename):
""" Based on
http://abstracthack.wordpress.com/2007/10/19/calculating-md5-checksum/
"""
hashalg = getattr(hashlib, HASHALG)() # == 'hashlib.md5' or 'hashlib.sha1'
blocksize = 0x10000
def __upd(m, data):
m.update(data)
return m
fd = open(filename, 'rb')
try:
contents = iter(lambda: fd.read(blocksize), "")
m = reduce(__upd, contents, hashalg)
finally:
fd.close()
return (m.hexdigest(), filename)
def ensure_unix_pathname(pathname):
# it's only windows we have to worry about
if sys.platform != "win32":
return pathname
replace = re.compile(r"\\", re.UNICODE)
fnm = re.sub(replace, "/", pathname)
return fnm
if __name__ == "__main__":
parser = OptionParser()
usage = "%prog [options] arg1 arg2"
parser.add_option("-a", "--algorithm", action="store", help="checksum algorithm to use (sha1|md5)")
parser.add_option("-c", "--encoding", action="store", help="File encoding to write manifest")
parser.add_option("-u", "--update", action="store_true", help="Only update new/removed files")
(options, args) = parser.parse_args()
if options.algorithm:
if not options.algorithm in ('md5', 'sha1'):
raise BagCheckSumNotValid('You must specify either "md5" or "sha1" as the checksum algorithm')
HASHALG = options.algorithm
if options.encoding:
ENCODING = options.encoding
if len(args) < 1:
parser.error("You must specify a data directory")
write_manifest(args[0], ENCODING, update=options.update)
|
jetyang2005/elastalert
|
elastalert/alerts.py
|
Python
|
apache-2.0
| 55,913
| 0.002755
|
# -*- coding: utf-8 -*-
import copy
import datetime
import json
import logging
import subprocess
import sys
import warnings
from email.mime.text import MIMEText
from email.utils import formatdate
from smtplib import SMTP
from smtplib import SMTP_SSL
from smtplib import SMTPAuthenticationError
from smtplib import SMTPException
from socket import error
import boto.sns as sns
import requests
import stomp
from exotel import Exotel
from jira.client import JIRA
from jira.exceptions import JIRAError
from requests.exceptions import RequestException
from staticconf.loader import yaml_loader
from texttable import Texttable
from twilio import TwilioRestException
from twilio.rest import TwilioRestClient
from util import EAException
from util import elastalert_logger
from util import lookup_es_key
from util import pretty_ts
class DateTimeEncoder(json.JSONEncoder):
def default(self, obj):
if hasattr(obj, 'isoformat'):
return obj.isoformat()
else:
return json.JSONEncoder.default(self, obj)
class BasicMatchString(object):
""" Creates a string containing fields in match for the given rule. """
def __init__(self, rule, match):
self.rule = rule
self.match = match
def _ensure_new_line(self):
while self.text[-2:] != '\n\n':
self.text += '\n'
def _add_custom_alert_text(self):
missing = '<MISSING VALUE>'
alert_text = unicode(self.rule.get('alert_text', ''))
if 'alert_text_args' in self.rule:
alert_text_args = self.rule.get('alert_text_args')
alert_text_values = [lookup_es_key(self.match, arg) for arg in alert_text_args]
# Support referencing other top-level rule properties
# This technically may not work if there is a top-level rule property with the same name
#
|
as an es result key, since it would have been matched in the lookup_es_key call above
for i in xrange(len(alert_text_values)):
if alert_text_values[i] is None:
alert_value = self.rule.get(alert_text_args[i])
if alert_value:
alert_text_values[i] = a
|
lert_value
alert_text_values = [missing if val is None else val for val in alert_text_values]
alert_text = alert_text.format(*alert_text_values)
elif 'alert_text_kw' in self.rule:
kw = {}
for name, kw_name in self.rule.get('alert_text_kw').items():
val = lookup_es_key(self.match, name)
# Support referencing other top-level rule properties
# This technically may not work if there is a top-level rule property with the same name
# as an es result key, since it would have been matched in the lookup_es_key call above
if val is None:
val = self.rule.get(name)
kw[kw_name] = missing if val is None else val
alert_text = alert_text.format(**kw)
self.text += alert_text
def _add_rule_text(self):
self.text += self.rule['type'].get_match_str(self.match)
def _add_top_counts(self):
for key, counts in self.match.items():
if key.startswith('top_events_'):
self.text += '%s:\n' % (key[11:])
top_events = counts.items()
if not top_events:
self.text += 'No events found.\n'
else:
top_events.sort(key=lambda x: x[1], reverse=True)
for term, count in top_events:
self.text += '%s: %s\n' % (term, count)
self.text += '\n'
def _add_match_items(self):
match_items = self.match.items()
match_items.sort(key=lambda x: x[0])
for key, value in match_items:
if key.startswith('top_events_'):
continue
value_str = unicode(value)
if type(value) in [list, dict]:
try:
value_str = self._pretty_print_as_json(value)
except TypeError:
# Non serializable object, fallback to str
pass
self.text += '%s: %s\n' % (key, value_str)
def _pretty_print_as_json(self, blob):
try:
return json.dumps(blob, cls=DateTimeEncoder, sort_keys=True, indent=4, ensure_ascii=False)
except UnicodeDecodeError:
# This blob contains non-unicode, so lets pretend it's Latin-1 to show something
return json.dumps(blob, cls=DateTimeEncoder, sort_keys=True, indent=4, encoding='Latin-1', ensure_ascii=False)
def __str__(self):
self.text = ''
if 'alert_text' not in self.rule:
self.text += self.rule['name'] + '\n\n'
self._add_custom_alert_text()
self._ensure_new_line()
if self.rule.get('alert_text_type') != 'alert_text_only':
self._add_rule_text()
self._ensure_new_line()
if self.rule.get('top_count_keys'):
self._add_top_counts()
if self.rule.get('alert_text_type') != 'exclude_fields':
self._add_match_items()
return self.text
class JiraFormattedMatchString(BasicMatchString):
def _add_match_items(self):
match_items = dict([(x, y) for x, y in self.match.items() if not x.startswith('top_events_')])
json_blob = self._pretty_print_as_json(match_items)
preformatted_text = u'{{code:json}}{0}{{code}}'.format(json_blob)
self.text += preformatted_text
class Alerter(object):
""" Base class for types of alerts.
:param rule: The rule configuration.
"""
required_options = frozenset([])
def __init__(self, rule):
elastalert_logger.info("Starting up method:---alerts.__init__---")
self.rule = rule
# pipeline object is created by ElastAlerter.send_alert()
# and attached to each alerters used by a rule before calling alert()
self.pipeline = None
self.resolve_rule_references(self.rule)
def resolve_rule_references(self, root):
# Support referencing other top-level rule properties to avoid redundant copy/paste
if type(root) == list:
# Make a copy since we may be modifying the contents of the structure we're walking
for i, item in enumerate(copy.copy(root)):
if type(item) == dict or type(item) == list:
self.resolve_rule_references(root[i])
else:
root[i] = self.resolve_rule_reference(item)
elif type(root) == dict:
# Make a copy since we may be modifying the contents of the structure we're walking
for key, value in root.copy().iteritems():
if type(value) == dict or type(value) == list:
self.resolve_rule_references(root[key])
else:
root[key] = self.resolve_rule_reference(value)
def resolve_rule_reference(self, value):
strValue = unicode(value)
if strValue.startswith('$') and strValue.endswith('$') and strValue[1:-1] in self.rule:
if type(value) == int:
return int(self.rule[strValue[1:-1]])
else:
return self.rule[strValue[1:-1]]
else:
return value
def alert(self, match):
""" Send an alert. Match is a dictionary of information about the alert.
:param match: A dictionary of relevant information to the alert.
"""
raise NotImplementedError()
def get_info(self):
""" Returns a dictionary of data related to this alert. At minimum, this should contain
a field type corresponding to the type of Alerter. """
return {'type': 'Unknown'}
def create_title(self, matches):
""" Creates custom alert title to be used, e.g. as an e-mail subject or JIRA issue summary.
:param matches: A list of dictionaries of relevant information to the alert.
"""
if 'alert_subject' in self.rule:
return self.create_cu
|
nicholasserra/sentry
|
tests/sentry/api/endpoints/test_group_notes.py
|
Python
|
bsd-3-clause
| 1,576
| 0
|
from __future__ import absolute_import
from sentry.models import Activity
from sentry.testutils import APITestCase
cl
|
ass GroupNoteTest(APITestCase):
def test_simple(self):
group = self.group
activity = Activity.objects.create(
group=group,
project=grou
|
p.project,
type=Activity.NOTE,
user=self.user,
data={'text': 'hello world'},
)
self.login_as(user=self.user)
url = '/api/0/issues/{}/comments/'.format(group.id)
response = self.client.get(url, format='json')
assert response.status_code == 200, response.content
assert len(response.data) == 1
assert response.data[0]['id'] == str(activity.id)
class GroupNoteCreateTest(APITestCase):
def test_simple(self):
group = self.group
self.login_as(user=self.user)
url = '/api/0/issues/{}/comments/'.format(group.id)
response = self.client.post(url, format='json')
assert response.status_code == 400
response = self.client.post(url, format='json', data={
'text': 'hello world',
})
assert response.status_code == 201, response.content
activity = Activity.objects.get(id=response.data['id'])
assert activity.user == self.user
assert activity.group == group
assert activity.data == {'text': 'hello world'}
response = self.client.post(url, format='json', data={
'text': 'hello world',
})
assert response.status_code == 400, response.content
|
nhamplify/aminator
|
aminator/plugins/blockdevice/base.py
|
Python
|
apache-2.0
| 1,648
| 0.00182
|
# -*- coding: utf-8 -*-
#
#
# Copyright 2013 Netflix, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language gover
|
ning permissions and
# limitations under the License.
#
#
"""
aminator.plugins.blockdevice.base
=================================
Base class(es) for block device manager plugins
"""
import abc
import logging
from aminator.plugins.base import BasePlugin
__all__ = ('BaseBlockDevicePlugin',)
log = logging.getLogger(__name__)
|
class BaseBlockDevicePlugin(BasePlugin):
"""
BlockDevicePlugins are context managers and as such, need to implement the context manager protocol
"""
__metaclass__ = abc.ABCMeta
_entry_point = 'aminator.plugins.blockdevice'
@abc.abstractmethod
def __enter__(self):
return self
@abc.abstractmethod
def __exit__(self, typ, val, trc):
if typ: log.exception("Exception: {0}: {1}".format(typ.__name__,val))
return False
def __call__(self, cloud):
"""
By default, BlockDevicePlugins are called using
with blockdeviceplugin(cloud) as device:
pass
Override if need be
"""
self.cloud = cloud
return self
|
khwilson/PynamoDB
|
pynamodb/tests/test_table_connection.py
|
Python
|
mit
| 16,242
| 0.001478
|
"""
Test suite for the table class
"""
import six
from pynamodb.compat import CompatTestCase as TestCase
from pynamodb.connection import TableConnection
from pynamodb.constants import DEFAULT_REGION
from pynamodb.tests.data import DESCRIBE_TABLE_DATA, GET_ITEM_DATA
from pynamodb.tests.response import HttpOK
if six.PY3:
from unittest.mock import patch
else:
from mock import patch
PATCH_METHOD = 'pynamodb.connection.Connection._make_api_call'
class ConnectionTestCase(TestCase):
"""
Tests for the base connection class
"""
def setUp(self):
self.test_table_name = 'ci-table'
self.region = DEFAULT_REGION
def test_create_connection(self):
"""
TableConnection()
"""
conn = TableConnection(self.test_table_name)
self.assertIsNotNone(conn)
def test_create_table(self):
"""
TableConnection.create_table
"""
conn = TableConnection(self.test_table_name)
kwargs = {
'read_capacity_units': 1,
'write_capacity_units': 1,
}
self.assertRaises(ValueError, conn.create_table, **kwargs)
kwargs['attribute_definitions'] = [
{
'attribute_name': 'key1',
'attribute_type': 'S'
},
{
'attribute_name': 'key2',
'attribute_type': 'S'
}
]
self.assertRaises(ValueError, conn.create_table, **kwargs)
kwargs['key_schema'] = [
{
'attribute_name': 'key1',
'key_type': 'hash'
},
{
'attribute_name': 'key2',
'key_type': 'range'
}
]
params = {
'TableName': 'ci-table',
'ProvisionedThroughput': {
'WriteCapacityUnits': 1,
'ReadCapacityUnits': 1
},
'AttributeDefinitions': [
{
'AttributeType': 'S',
'AttributeName': 'key1'
},
{
'AttributeType': 'S',
'AttributeName': 'key2'
}
],
'KeySchema': [
{
'KeyType': 'HASH',
'AttributeName': 'key1'
},
{
'KeyType': 'RANGE',
'AttributeName': 'key2'
}
]
}
with patch(PATCH_METHOD) as req:
req.return_value = {}
conn.create_table(
**kwargs
)
kwargs = req.call_args[0][1]
self.assertEqual(kwargs, params)
def test_delete_table(self):
"""
TableConnection.delete_table
"""
params = {'TableName': 'ci-table'}
with patch(PATCH_METHOD) as req:
req.return_value = HttpOK(), None
conn = TableConnection(self.test_table_name)
conn.delete_table()
kwargs = req.call_args[0][1]
self.assertEqual(kwargs, params)
def test_update_table(self):
"""
TableConnection.update_table
"""
with patch(PATCH_METHOD) as req:
req.return_value = HttpOK(), None
conn = TableConnection(self.test_table_name)
params = {
'ProvisionedThroughput': {
'WriteCapacityUnits': 2,
'ReadCapacityUnits': 2
},
'TableName': self.test_table_name
}
conn.update_table(
read_capacity_units=2,
write_capacity_units=2
)
self.assertEqual(req.call_args[0][1], params)
with patch(PATCH_METHOD) as req:
req.return_value = HttpOK(), None
conn = TableConnection(self.test_table_name)
global_secondary_index_updates = [
{
"index_name": "foo-index",
"read_capacity_units": 2,
"write_capacity_units": 2
}
]
params = {
'TableName': self.test_table_name,
'ProvisionedThroughput': {
'ReadCapacityUnits': 2,
'WriteCapacityUnits': 2,
|
},
'GlobalSecondaryIndexUpdates': [
{
|
'Update': {
'IndexName': 'foo-index',
'ProvisionedThroughput': {
'ReadCapacityUnits': 2,
'WriteCapacityUnits': 2,
}
}
}
]
}
conn.update_table(
read_capacity_units=2,
write_capacity_units=2,
global_secondary_index_updates=global_secondary_index_updates
)
self.assertEqual(req.call_args[0][1], params)
def test_describe_table(self):
"""
TableConnection.describe_table
"""
with patch(PATCH_METHOD) as req:
req.return_value = DESCRIBE_TABLE_DATA
conn = TableConnection(self.test_table_name)
conn.describe_table()
self.assertEqual(conn.table_name, self.test_table_name)
self.assertEqual(req.call_args[0][1], {'TableName': 'ci-table'})
def test_delete_item(self):
"""
TableConnection.delete_item
"""
conn = TableConnection(self.test_table_name)
with patch(PATCH_METHOD) as req:
req.return_value = DESCRIBE_TABLE_DATA
conn.describe_table()
with patch(PATCH_METHOD) as req:
req.return_value = {}
conn.delete_item(
"Amazon DynamoDB",
"How do I update multiple items?")
params = {
'ReturnConsumedCapacity': 'TOTAL',
'Key': {
'ForumName': {
'S': 'Amazon DynamoDB'
},
'Subject': {
'S': 'How do I update multiple items?'
}
},
'TableName': self.test_table_name
}
self.assertEqual(req.call_args[0][1], params)
def test_update_item(self):
"""
TableConnection.delete_item
"""
conn = TableConnection(self.test_table_name)
with patch(PATCH_METHOD) as req:
req.return_value = DESCRIBE_TABLE_DATA
conn.describe_table()
attr_updates = {
'Subject': {
'Value': 'foo-subject',
'Action': 'PUT'
},
}
with patch(PATCH_METHOD) as req:
req.return_value = HttpOK(), {}
conn.update_item(
'foo-key',
attribute_updates=attr_updates,
range_key='foo-range-key',
)
params = {
'Key': {
'ForumName': {
'S': 'foo-key'
},
'Subject': {
'S': 'foo-range-key'
}
},
'AttributeUpdates': {
'Subject': {
'Value': {
'S': 'foo-subject'
},
'Action': 'PUT'
}
},
'ReturnConsumedCapacity': 'TOTAL',
'TableName': 'ci-table'
}
self.assertEqual(req.call_args[0][1], params)
def test_get_item(self):
"""
TableConnection.get_item
"""
conn = TableConnection(self.test_table_name)
with patch(PATCH_METHOD) as req:
req.return_value = DESCRIBE_TABLE_DATA
conn.describe_table()
with patch(PATCH_METHOD) as req:
req.return_value = GET_ITEM_DATA
|
acx2015/ConfigArgParse
|
tests/test_configargparse.py
|
Python
|
mit
| 34,286
| 0.011287
|
import argparse
import configargparse
import functools
import inspect
import logging
import sys
import tempfile
import types
import unittest
# enable logging to simplify debugging
logger = logging.getLogger()
logger.level = logging.DEBUG
stream_handler = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
def replace_error_method(arg_parser):
"""Swap out arg_parser's error(..) method so that instead of calling
sys.exit(..) it just raises an error.
"""
def error_method(self, message):
raise argparse.ArgumentError(None, message)
def exit_method(self, status, message):
self._exit_method_called = True
arg_parser._exit_method_called = False
arg_parser.error = types.MethodType(error_method, arg_parser)
arg_parser.exit = types.MethodType(exit_method, arg_parser)
return arg_parser
class TestCase(unittest.case.TestCase):
def initParser(self, *args, **kwargs):
p = configargparse.ArgParser(*args, **kwargs)
self.parser = replace_error_method(p)
self.add_arg = self.parser.add_argument
self.parse = self.parser.parse_args
self.parse_known = self.parser.parse_known_args
self.format_values = self.parser.format_values
self.format_help = self.parser.format_help
if not hasattr(self, "assertRegex"):
self.assertRegex = self.assertRegexpMatches
if not hasattr(self, "assertRaisesRegex"):
self.assertRaisesRegex = self.assertRaisesRegexp
self.assertParseArgsRaises = functools.partial(self.assertRaisesRegex,
argparse.ArgumentError, callable_obj = self.parse)
return self.parser
class TestBasicUseCases(TestCase):
def setUp(self):
self.initParser(args_for_setting_config_path=[])
def testBasicCase1(self):
## Test command line and config file values
self.add_arg("filenames", nargs="+", help="positional arg")
self.add_arg("-x", "--arg-x", action="store_true")
self.add_arg("-y", "--arg-y", dest="y1", type=int, required=True)
self.add_arg("--arg-z", action="append", type=float, required=True)
# make sure required args are enforced
self.assertParseArgsRaises("too few arg"
if sys.version_info < (3,3) else
"the following arguments are required", args="")
self.assertParseArgsRaises("argument -y/--arg-y is required"
if sys.version_info < (3,3) else
"the following arguments are required: -y/--arg-y",
args="-x --arg-z 11 file1.txt")
self.assertParseArgsRaises("argument --arg-z is required"
if sys.version_info < (3,3) else
"the following arguments are required: --arg-z",
args="file1.txt file2.txt file3.txt -x -y 1")
# check values after setting args on command line
ns = self.parse(args="file1.txt --arg-x -y 3 --arg-z 10",
config_file_contents="")
self.assertListEqual(ns.filenames, ["file1.txt"])
self.assertEqual(ns.arg_x, True)
self.assertEqual(ns.y1, 3)
self.assertEqual(ns.arg_z, [10])
self.assertRegex(self.format_values(),
'Command Line Args: file1.txt --arg-x -y 3 --arg-z 10')
# check values after setting args in config file
ns = self.parse(args="file1.txt file2.txt", config_file_contents="""
# set all required args in config file
arg-x = True
arg-y = 10
arg-z = 30
arg-z = 40
""")
self.assertListEqual(ns.filenames, ["file1.txt", "file2.txt"])
self.assertEqual(ns.arg_x, True)
self.assertEqual(ns.y1, 10)
self.assertEqual(ns.arg_z, [40])
self.assertRegex(self.format_values(),
'Command Line Args: \s+ file1.txt file2.txt\n'
'Config File \(method arg\):\n'
' arg-x: \s+ True\n'
' arg-y: \s+ 10\n'
' arg-z: \s+ 4
|
0\n')
# check values after setting args in both command line and config file
ns = self.parse(args="file1.txt file2.txt --arg-x -y 3 --arg-z 100 ",
config_file_contents="""arg-y = 31.5
arg-z = 30
|
""")
self.format_help()
self.format_values()
self.assertListEqual(ns.filenames, ["file1.txt", "file2.txt"])
self.assertEqual(ns.arg_x, True)
self.assertEqual(ns.y1, 3)
self.assertEqual(ns.arg_z, [100])
self.assertRegex(self.format_values(),
"Command Line Args: file1.txt file2.txt --arg-x -y 3 --arg-z 100")
def testBasicCase2(self, use_groups=False):
## Test command line, config file and env var values
default_config_file = tempfile.NamedTemporaryFile(mode="w", delete=True)
default_config_file.flush()
p = self.initParser(default_config_files=['/etc/settings.ini',
'/home/jeff/.user_settings', default_config_file.name])
p.add_arg('vcf', nargs='+', help='Variant file(s)')
if not use_groups:
self.add_arg('--genome', help='Path to genome file', required=True)
self.add_arg('-v', dest='verbose', action='store_true')
self.add_arg('-g', '--my-cfg-file', required=True,
is_config_file=True)
self.add_arg('-d', '--dbsnp', env_var='DBSNP_PATH')
self.add_arg('-f', '--format',
choices=["BED", "MAF", "VCF", "WIG", "R"],
dest="fmt", metavar="FRMT", env_var="OUTPUT_FORMAT",
default="BED")
else:
g = p.add_argument_group(title="g1")
g.add_arg('--genome', help='Path to genome file', required=True)
g.add_arg('-v', dest='verbose', action='store_true')
g.add_arg('-g', '--my-cfg-file', required=True,
is_config_file=True)
g = p.add_argument_group(title="g2")
g.add_arg('-d', '--dbsnp', env_var='DBSNP_PATH')
g.add_arg('-f', '--format',
choices=["BED", "MAF", "VCF", "WIG", "R"],
dest="fmt", metavar="FRMT", env_var="OUTPUT_FORMAT",
default="BED")
# make sure required args are enforced
self.assertParseArgsRaises("too few arg"
if sys.version_info < (3,3) else
"the following arguments are required: vcf, -g/--my-cfg-file",
args="--genome hg19")
self.assertParseArgsRaises("not found: file.txt", args="-g file.txt")
# check values after setting args on command line
config_file2 = tempfile.NamedTemporaryFile(mode="w", delete=True)
config_file2.flush()
ns = self.parse(args="--genome hg19 -g %s bla.vcf " % config_file2.name)
self.assertEqual(ns.genome, "hg19")
self.assertEqual(ns.verbose, False)
self.assertEqual(ns.dbsnp, None)
self.assertEqual(ns.fmt, "BED")
self.assertListEqual(ns.vcf, ["bla.vcf"])
self.assertRegex(self.format_values(),
'Command Line Args: --genome hg19 -g [^\s]+ bla.vcf\n'
'Defaults:\n'
' --format: \s+ BED\n')
# check precedence: args > env > config > default using the --format arg
default_config_file.write("--format MAF")
default_config_file.flush()
ns = self.parse(args="--genome hg19 -g %s f.vcf " % config_file2.name)
self.assertEqual(ns.fmt, "MAF")
self.assertRegex(self.format_values(),
'Command Line Args: --genome hg19 -g [^\s]+ f.vcf\n'
'Config File \([^\s]+\):\n'
' --format: \s+ MAF\n')
config_file2.write("--format VCF")
config_file2.flush()
ns = self.parse(args="--genome hg19 -g %s f.vcf " % config_file2.name)
self.assertEqual(ns.fmt, "VCF")
self.assertRegex(self.format_values(),
'Command Line Args: --genome hg19 -g [^\s]+ f.vcf\n'
'Config File \([
|
looker/sentry
|
src/sentry/south_migrations/0348_fix_project_key_rate_limit_window_unit.py
|
Python
|
bsd-3-clause
| 83,050
| 0.007851
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import DataMigration
from django.db import models
class Migration(DataMigration):
def forwards(self, orm):
db.commit_transaction()
try:
self._forwards(orm)
except Exception:
db.start_transaction()
raise
db.start_transaction()
def _forwards(self, orm):
from sentry.utils.query import RangeQuerySetWrapperWithProgressBar
ProjectKey = orm['sentry.ProjectKey']
queryset = ProjectKey.objects.filter(rate_limit_window__isnull=False)
for key in RangeQuerySetWrapperWithProgressBar(queryset):
ProjectKey.objects.filter(pk=key.pk).update(
rate_limit_window=key.rate_limit_window * 60)
def backwards(self, orm):
from sentry.utils.query import RangeQuerySetWrapperWithProgressBar
ProjectKey = orm['sentry.ProjectKey']
queryset = ProjectKey.objects.filter(rate_limit_window__isnull=False)
for key in RangeQuerySetWrapperWithProgressBar(queryset):
ProjectKey.objects.filter(pk=key.pk).update(
rate_limit_window=key.rate_limit_window / 60)
models = {
'sentry.activity': {
'Meta': {'object_name': 'Activity'},
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {'null': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.Flexibl
|
eForeignKey', [], {'to': "orm['sentry.Project']"}),
'type': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'null': 'True'})
},
'sentry.apiapplicati
|
on': {
'Meta': {'object_name': 'ApiApplication'},
'allowed_origins': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'client_id': ('django.db.models.fields.CharField', [], {'default': "'edca03fca6594a0bbb3bf8d1de291c64b3ec21abb7ed464d84a3e0e1b87a33ce'", 'unique': 'True', 'max_length': '64'}),
'client_secret': ('sentry.db.models.fields.encrypted.EncryptedTextField', [], {'default': "'a91de422beac427ab54ad0a3cab4610cb37a7022173c452fb87e776457612a40'"}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'homepage_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'default': "'Immortal Cow'", 'max_length': '64', 'blank': 'True'}),
'owner': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"}),
'privacy_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True'}),
'redirect_uris': ('django.db.models.fields.TextField', [], {}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'terms_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True'})
},
'sentry.apiauthorization': {
'Meta': {'unique_together': "(('user', 'application'),)", 'object_name': 'ApiAuthorization'},
'application': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.ApiApplication']", 'null': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'scope_list': ('sentry.db.models.fields.array.ArrayField', [], {'of': ('django.db.models.fields.TextField', [], {})}),
'scopes': ('django.db.models.fields.BigIntegerField', [], {'default': 'None'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.apigrant': {
'Meta': {'object_name': 'ApiGrant'},
'application': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.ApiApplication']"}),
'code': ('django.db.models.fields.CharField', [], {'default': "'69669803fe884840a38d78ab081f2c9b'", 'max_length': '64', 'db_index': 'True'}),
'expires_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2017, 8, 31, 0, 0)', 'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'redirect_uri': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'scope_list': ('sentry.db.models.fields.array.ArrayField', [], {'of': ('django.db.models.fields.TextField', [], {})}),
'scopes': ('django.db.models.fields.BigIntegerField', [], {'default': 'None'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.apikey': {
'Meta': {'object_name': 'ApiKey'},
'allowed_origins': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32'}),
'label': ('django.db.models.fields.CharField', [], {'default': "'Default'", 'max_length': '64', 'blank': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'key_set'", 'to': "orm['sentry.Organization']"}),
'scope_list': ('sentry.db.models.fields.array.ArrayField', [], {'of': ('django.db.models.fields.TextField', [], {})}),
'scopes': ('django.db.models.fields.BigIntegerField', [], {'default': 'None'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'})
},
'sentry.apitoken': {
'Meta': {'object_name': 'ApiToken'},
'application': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.ApiApplication']", 'null': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'expires_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2017, 9, 30, 0, 0)', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'refresh_token': ('django.db.models.fields.CharField', [], {'default': "'2e3f93d366f84c32a20bbaf2536bffad7ff9761de5944afaa0be34690b056b4e'", 'max_length': '64', 'unique': 'True', 'null': 'True'}),
'scope_list': ('sentry.db.models.fields.array.ArrayField', [], {'of': ('django.db.models.fields.TextField', [], {})}),
'scopes': ('django.db.models.fields.BigIntegerField', [], {'default': 'None'}),
'token': ('django.db.models.fields.CharField', [], {'default': "'5526f80c353b4489bce4e550f7d5d2cb724347c2f4184506bebe2bc3536ff27f'", 'unique': 'True', 'max_length': '64'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'se
|
charlie-barnes/dipper-stda
|
pdf.py
|
Python
|
gpl-2.0
| 8,846
| 0.010513
|
#!/usr/bin/env python
#-*- coding: utf-8 -*-
### 2008-2015 Charlie Barnes.
### This program is free software; you can redistribute it and/or modify
### it under the terms of the GNU General Public License as published by
### the Free Software Foundation; either version 2 of the License, or
### (at your option) any later version.
### This program is distributed in the hope that it will be useful,
### but WITHOUT ANY WARRANTY; without even the implied warranty of
### MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
### GNU General Public License for more details.
### You should have received a copy of the GNU General Public License
### along with this program; if not, write to the Free Software
### Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
def repeat_to_length(string_to_expand, length):
|
return (string_to_expand * ((length/len(string_to_expand))+1))[:length]
try:
from fpdf import FPDF
except ImportError:
from p
|
yfpdf import FPDF
class PDF(FPDF):
def __init__(self, orientation,unit,format):
FPDF.__init__(self, orientation=orientation,unit=unit,format=format)
self.toc = []
self.numbering = False
self.num_page_num = 0
self.toc_page_break_count = 1
self.set_left_margin(10)
self.set_right_margin(10)
self.do_header = False
self.type = None
self.toc_length = 0
self.doing_the_list = False
self.vcs = []
self.toc_page_num = 2
self.dataset = None
self.orientation = orientation
self.orientation_changes = [0]
def p_add_page(self):
#if(self.numbering):
self.add_page()
self.num_page_num = self.num_page_num + 1
def num_page_no(self):
return self.num_page_num
def startPageNums(self):
self.numbering = True
def stopPageNums(self):
self.numbering = False
def TOC_Entry(self, txt, level=0):
self.toc.append({'t':txt, 'l':level, 'p':str(self.num_page_no()+self.toc_length)})
def insertTOC(self, location=1, labelSize=20, entrySize=10, tocfont='Helvetica', label='Table of Contents'):
#make toc at end
self.stopPageNums()
self.section = 'Contents'
self.p_add_page()
tocstart = self.page
self.set_font('Helvetica', '', 20)
self.multi_cell(0, 20, 'Contents', 0, 'J', False)
used_pages = []
link_abscissa = {}
for t in self.toc:
#Offset
level = t['l']
if level > 0:
self.cell(level*8)
weight = ''
if level == 0:
weight = 'B'
txxt = t['t']
self.set_font(tocfont, weight, entrySize)
strsize = self.get_string_width(txxt)
self.cell(strsize+2, self.font_size+2, txxt, 0, 0, '', False)
#store the TOC links & position for later use
if self.page_no() not in link_abscissa.keys():
link_abscissa[self.page_no()] = []
link_abscissa[self.page_no()].append([int(t['p']), self.y])
#Filling dots
self.set_font(tocfont, '', entrySize)
PageCellSize = self.get_string_width(t['p'])+2
w = self.w-self.l_margin-self.r_margin-PageCellSize-(level*8)-(strsize+2)
nb = w/self.get_string_width('.')
dots = repeat_to_length('.', int(nb))
self.cell(w, self.font_size+2, dots, 0, 0, 'R')
#Page number of the toc entry
self.cell(PageCellSize, self.font_size+2, str(int(t['p'])), 0, 1, 'R')
if self.toc_page_break_count%2 != 0:
self.section = ''
self.toc_page_break_count = self.toc_page_break_count + 1
self.p_add_page()
#Grab it and move to selected location
n = self.page
ntoc = n - tocstart + 1
last = []
#store toc pages
i = tocstart
while i <= n:
last.append(self.pages[i])
i = i + 1
#move pages
i = tocstart
while i >= (location-1):
self.pages[i+ntoc] = self.pages[i]
i = i - 1
#Put toc pages at insert point
i = 0
while i < ntoc:
self.pages[location + i] = last[i]
#loop through all the TOC links for this page and add them
try:
for linkdata in link_abscissa[tocstart+i]:
self.page = location + i
link = self.add_link()
self.set_link(link, y=0, page=linkdata[0])
self.link(x=self.l_margin, y=linkdata[1], w=self.w-self.r_margin, h=self.font_size+2, link=link)
except KeyError:
pass
i = i + 1
self.page = n
def header(self):
if self.do_header:
self.set_font('Helvetica', '', 8)
self.set_text_color(0, 0, 0)
self.set_line_width(0.1)
if (self.section <> 'Contents' and self.page_no()%2 == 0) or (self.section == 'Contents' and self.toc_page_break_count%2 == 0):
self.cell(0, 5, self.section, 'B', 0, 'L', 0) # even page header
self.cell(0, 5, self.title.replace('\n', ' - '), 'B', 1, 'R', 0) # even page header
elif (self.section <> 'Contents' and self.page_no()%2 == 1) or (self.section == 'Contents' and self.toc_page_break_count%2 == 1):
self.cell(0, 5, self.section, 'B', 1, 'R', 0) #odd page header
if self.type == 'list' and self.doing_the_list == True:
col_width = 12.7#((self.w - self.l_margin - self.r_margin)/2)/7.5
#vc headings
self.set_font('Helvetica', '', 10)
self.set_line_width(0.0)
self.set_y(20)
self.set_x(self.w-(7+col_width+(((col_width*3)+(col_width/4))*len(self.vcs))))
self.cell(col_width, 5, '', '0', 0, 'C', 0)
for vc in sorted(self.vcs):
if vc == None:
vc_head_text = ''
else:
vc_head_text = ''.join(['VC',vc])
self.cell((col_width*3), 5, vc_head_text, '0', 0, 'C', 0)
self.cell(col_width/4, 5, '', '0', 0, 'C', 0)
self.ln()
self.set_x(self.w-(7+col_width+(((col_width*3)+(col_width/4))*len(self.vcs))))
self.set_font('Helvetica', '', 8)
self.cell(col_width, 5, '', '0', 0, 'C', 0)
for vc in sorted(self.vcs):
#colum headings
self.cell(col_width, 5, ' '.join([self.dataset.config.get('List', 'distribution_unit'), 'sqs']), '0', 0, 'C', 0)
self.cell(col_width, 5, 'Records', '0', 0, 'C', 0)
self.cell(col_width, 5, 'Last in', '0', 0, 'C', 0)
self.cell(col_width/4, 5, '', '0', 0, 'C', 0)
self.y0 = self.get_y()
if self.section == 'Contributors' or self.section == 'Contents':
self.set_y(self.y0 + 20)
def footer(self):
self.set_y(-20)
self.set_font('Helvetica','',8)
#only show page numbers in the main body
#if self.num_page_no() >= 4 and self.section != 'Contents' and self.section != 'Index' and self.section != 'Contributors' and self.section != 'References' and self.section != 'Introduction' and self.section != '':
if self.num_page_no() >= 5 and self.section != 'Contents' and self.section != '' and self.section != 'Index' and self.section != 'Contributors' and self.section != 'References' and self.section != 'Introduction':
self.cell(0, 10, str(self.num_page_no()+self.toc_length), '', 0, 'C')
def setcol(self, col):
self.col = col
x = 10 + (col*100)
self.set_left_margin(x)
self.set_x(x)
def accept_page_break(self):
if s
|
xively/node-red-nodes
|
hardware/sensehat/sensehat.py
|
Python
|
apache-2.0
| 6,966
| 0.024978
|
#! /usr/bin/python
#
# Copyright 2016 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Commands:
# C[R,G,B] - clear to colour (or off if no RGB provided)
# R[rot] - rotate by rot (0,90,180,270)
# P[x,y,R,G,B]+ - set individual pixel(s) to a colour
# T[R,G,B[,R,G,B][,S]:]Message - scroll a message (nb: if message contains ':' it must be prefixed with ':')
# if message is a single char, uses show_letter instead
# F[H|V] - flip horizontal|vertical
# X[0|1] - high frequency reporting (accel/gyro/orientation/compass) off|on
# Y[0|1] - low frequency reporting (temperature/humidity/pressure) off|on
# D[0|1] - Set light level low|high
#
# Outputs:
# Xaccel.x,y,z,gyro.x,y,z,orientation.roll,pitch,yaw,compass
# Ytemperature,humidity,pressure
# K[U|L|R|D|E][0|1|2] - joystick event: direction,state
import io
import os
import sys
import glob
import time
import errno
import ctypes
import select
import struct
import inspect
import threading
from sense_hat import SenseHat
EVENT_FORMAT = 'llHHI'
EVENT_SIZE = struct.calcsize(EVENT_FORMAT)
EVENT_NAMES = {103:'U',105:'L',106:'R',108:'D',28:'E'}
def get_stick():
for evdev in glob.glob('/sys/class/input/event*'):
try:
with io.open(os.path.join(evdev, 'device', 'name'), 'r') as f:
if f.read().strip() == 'Raspberry Pi Sense HAT Joystick':
return os.path.join('/dev', 'input', os.path.basename(evdev))
except IOError as e:
sys.exit(1)
sys.exit(1)
stick_file = io.open(get_stick(),'rb')
SH = SenseHat()
SH.set_rotation(0)
SH.clear()
files = [sys.stdin,stick_file]
last_hf_time = time.time()
last_lf_time = time.time()
hf_interval = 0.09 # Approx 10/s
lf_interval = 1
hf_enabled = False
lf_enabled = False
scroll = None
class ScrollThread(threading.Thread):
def __init__(self,fcol,bcol,speed,message):
threading.Thread.__init__(self)
self.fcol = fcol
self.bcol = bcol
self.message = message
self.speed = speed
def run(self):
global SH
old_rotation = SH.rotation
try:
SH.show_message(self.message,text_colour=self.fcol,back_colour=self.bcol,scroll_speed=self.speed)
except:
try:
SH.set_rotation(old_rotation,False)
SH.clear(self.bcol);
except:
pass
def interrupt(self):
if not self.isAlive():
raise threading.ThreadError()
for thread_id, thread_object in threading._active.items():
if thread_object == self:
r = ctypes.pythonapi.PyThreadState_SetAsyncExc(thread_id,ctypes.py_object(StandardError))
if r == 1:
pass
else:
if r > 1:
ctypes.pythonapi.PyThreadState_SetAsyncExc(thread_id, 0)
raise SystemError()
return
def process_command(data):
global hf_enabled, lf_enabled,scroll
if data[0] == "X":
if data[1] == '0':
hf_enabled = False
else:
hf_enabled = True
elif data[0] == "Y":
if data[1] == '0':
lf_enabled = False
else:
lf_enabled = True
elif data[0] == "D":
if data[1] == '0':
SH.low_light = True
else:
SH.low_light = False
else:
if threading.activeCount() == 2:
scroll.interrupt()
while scroll.isAlive():
time.sleep(0.01)
try:
scroll.interrupt()
except:
pass
if data[0] == "R":
SH.set_rotation(float(data[1:]))
elif data[0] == "C":
data = data[1:].strip()
if len(data) > 0:
s = data.split(",")
col = (int(s[0]),int(s[1]),int(s[2]))
else:
col = (0,0,0)
SH.
|
clear(col)
elif data[0] ==
|
"P":
data = data[1:].strip()
s = data.split(',')
for p in range(0,len(s),5):
SH.set_pixel(int(s[p]),int(s[p+1]),int(s[p+2]),int(s[p+3]),int(s[p+4]))
elif data[0] == "T":
data = data[1:]
tcol = (255,255,255)
bcol = (0,0,0)
speed = 0.1
s = data.split(':',1)
if len(s) == 2:
data = s[1]
if len(s[0]) > 0:
c = s[0].split(",")
if len(c) == 1:
speed = float(c[0])
elif len(c) == 3:
tcol = (int(c[0]),int(c[1]),int(c[2]))
if len(c) == 4:
tcol = (int(c[0]),int(c[1]),int(c[2]))
speed = float(c[3])
elif len(c) == 6:
tcol = (int(c[0]),int(c[1]),int(c[2]))
bcol = (int(c[3]),int(c[4]),int(c[5]))
elif len(c) == 7:
tcol = (int(c[0]),int(c[1]),int(c[2]))
bcol = (int(c[3]),int(c[4]),int(c[5]))
speed = float(c[6])
if len(data) > 1:
scroll = ScrollThread(tcol,bcol,speed,data);
scroll.start()
else:
SH.show_letter(data,text_colour=tcol,back_colour=bcol)
elif data[0] == "F":
if data[1] == "H":
SH.flip_h()
elif data[1] == "V":
SH.flip_v()
def idle_work():
global last_hf_time, last_lf_time
now = time.time()
if hf_enabled and (now-last_hf_time > hf_interval):
orientation = SH.get_orientation()
compass = SH.get_compass()
gyro = SH.get_gyroscope_raw()
accel = SH.get_accelerometer_raw()
print("X%0.4f,%0.4f,%0.4f,%0.4f,%0.4f,%0.4f,%0.4f,%0.4f,%0.4f,%0.0f"%(accel['x'],accel['y'],accel['z'],gyro['x'],gyro['y'],gyro['z'],orientation['roll'],orientation['pitch'],orientation['yaw'],compass))
last_hf_time = now
if lf_enabled and (now-last_lf_time > lf_interval):
temperature = SH.get_temperature();
humidity = SH.get_humidity();
pressure = SH.get_pressure();
print("Y%0.2f,%0.2f,%0.2f"%(temperature,humidity,pressure))
last_lf_time = now
def process_joystick():
event = stick_file.read(EVENT_SIZE)
(tv_sec, tv_usec, type, code, value) = struct.unpack(EVENT_FORMAT, event)
if type == 0x01:
print ("K%s%s"%(EVENT_NAMES[code],value))
def main_loop():
# while still waiting for input on at least one file
try:
while files:
ready = select.select(files, [], [], 0.01)[0]
if not ready:
idle_work()
else:
for file in ready:
if file == sys.stdin:
line = file.readline()
if not line: # EOF, remove file from input list
sys.exit(0)
elif line.rstrip(): # optional: skipping empty lines
process_command(line)
else:
process_joystick()
except:
sys.exit(0)
try:
main_loop()
except KeyboardInterrupt:
pass
|
sadig/DC2
|
components/dc2-lib/dc2/lib/exceptions/authentication.py
|
Python
|
gpl-2.0
| 948
| 0
|
# -*- coding: utf-8 -*-
#
# (DC)² - DataCenter Deployment Control
# Copyright (C) 2010, 2011, 2012, 2013, 2014 Stephan Adig <sh@sourcecode.de>
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MER
|
CHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License fo
|
r more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
class KerberosError(Exception):
pass
class KerberosTicketExpired(KerberosError):
pass
|
reidmcy/pressScrapers
|
ubcScraper.py
|
Python
|
gpl-2.0
| 2,909
| 0.008594
|
import requests
from bs4 import BeautifulSoup
import sys
import os
import pandas
import re
targetURL = "http://www.ubcpress.ca/search/subject_list.asp?SubjID=45"
bookLinks = "http://www.ubcpress.ca/search/"
outputDir = "UBC_Output"
def main():
r = requests.get(targetURL)
soup = BeautifulSoup(r.content, "html.parser")
# make a list
book_urls = []
# get titles and links
for link in soup.find_all("a"):
if "title_book.asp" in link.get("href"):
book_urls.append(bookLinks + link.get("href"))
if not os.path.isdir(outputDir):
os.mkdir(outputDir)
os.chdir(outputDir)
booksDict = {
"title" : [],
"authors" : [],
"summary" : [],
"subjects" : [],
"authorBio" : [],
"date" : [],
"ISBN" : [],
}
print("Found {} urls".format(len(book_urls)))
for i, url in enumerate(book_urls):
print("On url index {}".format(i))
r = requests.get(url)
soup = BeautifulSoup(r.content, "html.parser")
print("Getting: {}".format(url))
title = soup.find("span", {"class" : "booktitle"}).text
print("Found: '{}'".format(title))
print("Writing '{}/{}.html'".format
|
(outputDir, title))
with open("{}.html".format(title.replace('/','')), 'wb') as f:
for chunk in r.iter_content(1024):
f.write(chunk)
booksDict['title'].append(title)
booksDict['authors'
|
].append([a.text.strip() for a in soup.find_all("a", {"href" : "#author"})])
mainBodyText = soup.find("td", {"width" : "545", "colspan":"3"}).find("span" , {"class" : "regtext"})
regex = re.match(r"""(.*)About the Author\(s\)(.*)Table of Contents""", mainBodyText.text, flags = re.DOTALL)
if regex is None:
regex = re.match(r"""(.*)About the Author\(s\)(.*)""", mainBodyText.text, flags = re.DOTALL)
booksDict['summary'].append(regex.group(1).strip())
booksDict["authorBio"].append(regex.group(2).strip().split('\n '))
booksDict["authorBio"][-1] = [s.strip() for s in booksDict["authorBio"][-1]]
subjectsLst = []
for sub in mainBodyText.find_all("a"):
try:
if "subject_list.asp?SubjID=" in sub.get("href"):
subjectsLst.append(sub.text)
except TypeError:
pass
booksDict["subjects"].append(subjectsLst)
newstext = soup.find("span", {"class" : "newstext"}).text
regex = re.search(r"Release Date: (.*)(ISBN: \d*)", newstext)
try:
booksDict['date'].append(regex.group(1))
booksDict['ISBN'].append(regex.group(2))
except AttributeError:
booksDict['date'].append(None)
booksDict['ISBN'].append(None)
os.chdir('..')
pandas.DataFrame(booksDict).to_csv("UBCscrape.csv")
if __name__ == "__main__":
main()
|
adiq/MultitestApp
|
multitest/tests.py
|
Python
|
mit
| 3,600
| 0.003611
|
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.test import TestCase, Client
from multitest.models import Test, Question, Answer
class MultitestViewsTests(TestCase):
def setUp(self):
self.user = User.objects.create(username='user', is_active=True, is_staff=False, is_superuser=False)
self.user.set_password('user')
self.user.save()
self.c = Client()
self.c.login(username='user', password='user')
self.stest = Test.objects.create(title='Test, test')
self.squestion = Question.objects.create(question='Is that true?', test=self.stest)
self.sanswer = Answer.objects.create(answer='Yes', is_correct=True, question=self.squestion)
self.sanswer2 = Answer.objects.create(answer='No', is_correct=False, question=self.squestion)
def test_views_guest_access(self):
guest = Clien
|
t()
response = guest.get(reverse('index'))
self.assertTemplateUsed(response, 'multitest/index.html')
response = guest.get(reverse('login'))
self.assertTemplateUsed(response, 'multitest/login.html')
response = guest.get(reverse('register'))
self.assertTemplateUsed(response, 'multitest/register.html')
def test_only_users_access
|
(self):
guest = Client()
response = guest.get(reverse('test', kwargs={'test_id': self.stest.id}))
self.assertTemplateNotUsed(response, 'multitest/test.html')
def test_list_all_tests(self):
response = self.c.get(reverse('index'))
self.failUnlessEqual(response.status_code, 200)
self.assertContains(response, self.stest.title)
def test_display_test(self):
response = self.c.get(reverse('test', kwargs={'test_id': self.stest.id}))
self.failUnlessEqual(response.status_code, 200)
self.assertContains(response, self.squestion.question)
self.assertContains(response, self.sanswer.answer)
self.assertContains(response, self.sanswer2.answer)
def test_display_correct_result(self):
response = self.c.post(reverse('test', kwargs={'test_id': self.stest.id}), {
'question'+str(self.squestion.id)+'[]': self.sanswer.id,
})
self.assertEqual(response.context['points'], 1)
self.assertEqual(response.context['max_points'], 1)
response = self.c.post(reverse('test', kwargs={'test_id': self.stest.id}), {
'question'+str(self.squestion.id)+'[]': self.sanswer2.id,
})
self.assertEqual(response.context['points'], 0)
self.assertEqual(response.context['max_points'], 1)
response = self.c.post(reverse('test', kwargs={'test_id': self.stest.id}), {
'question'+str(self.squestion.id)+'[]': (self.sanswer2.id, self.sanswer.id),
})
self.assertEqual(response.context['points'], 0)
self.assertEqual(response.context['max_points'], 1)
def test_user_login(self):
guest = Client()
response = guest.post(reverse('login'), {'login': 'user', 'password': 'user'})
self.failUnless(response.status_code, 302)
response = guest.get(reverse('index'))
self.assertContains(response, 'Wyloguj')
def test_user_register(self):
users = User.objects.count()
guest = Client()
response = guest.post(reverse('register'),
{'login': 'test2', 'email': 'test2@wp.pl', 'password': 'test2', 'verify': 'warszawa'})
self.failUnless(response.status_code, 302)
self.assertEqual(User.objects.count(), users+1)
|
RonnyPfannschmidt/pluggy
|
src/pluggy/_callers.py
|
Python
|
mit
| 2,097
| 0.000477
|
"""
Call loop machinery
"""
import sys
from ._result import HookCallError, _Result, _raise_wrapfail
def _multicall
|
(hook_name, hook_impls, caller_kwargs, firstresult):
"""Execute a call into multiple python functions/methods and return the
result(s).
``caller_kwargs`` comes from _HookCaller.__call__().
"""
__tracebackhide__ = True
results = []
excinfo = None
try: # run impl and wrapper setup function
|
s in a loop
teardowns = []
try:
for hook_impl in reversed(hook_impls):
try:
args = [caller_kwargs[argname] for argname in hook_impl.argnames]
except KeyError:
for argname in hook_impl.argnames:
if argname not in caller_kwargs:
raise HookCallError(
f"hook call must provide argument {argname!r}"
)
if hook_impl.hookwrapper:
try:
gen = hook_impl.function(*args)
next(gen) # first yield
teardowns.append(gen)
except StopIteration:
_raise_wrapfail(gen, "did not yield")
else:
res = hook_impl.function(*args)
if res is not None:
results.append(res)
if firstresult: # halt further impl calls
break
except BaseException:
excinfo = sys.exc_info()
finally:
if firstresult: # first result hooks return a single value
outcome = _Result(results[0] if results else None, excinfo)
else:
outcome = _Result(results, excinfo)
# run all wrapper post-yield blocks
for gen in reversed(teardowns):
try:
gen.send(outcome)
_raise_wrapfail(gen, "has second yield")
except StopIteration:
pass
return outcome.get_result()
|
mindpin/mindpin_oppia
|
core/domain/rights_manager_test.py
|
Python
|
apache-2.0
| 11,577
| 0
|
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# ht
|
tp://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS"
|
BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for classes and methods relating to user rights."""
__author__ = 'Sean Lip'
from core.domain import config_services
from core.domain import exp_domain
from core.domain import exp_services
from core.domain import rights_manager
import feconf
import test_utils
import unittest
class ExplorationRightsTests(test_utils.GenericTestBase):
"""Test that rights for actions on explorations work as expected."""
def setUp(self):
super(ExplorationRightsTests, self).setUp()
self.register_editor('a@example.com', 'A')
self.register_editor('b@example.com', 'B')
self.register_editor('c@example.com', 'C')
self.register_editor('d@example.com', 'D')
self.register_editor('e@example.com', 'E')
self.register_editor('admin@example.com', 'adm')
self.user_id_a = self.get_user_id_from_email('a@example.com')
self.user_id_b = self.get_user_id_from_email('b@example.com')
self.user_id_c = self.get_user_id_from_email('c@example.com')
self.user_id_d = self.get_user_id_from_email('d@example.com')
self.user_id_e = self.get_user_id_from_email('e@example.com')
self.user_id_admin = self.get_user_id_from_email('admin@example.com')
config_services.set_property(
feconf.ADMIN_COMMITTER_ID, 'admin_emails', ['admin@example.com'])
self.EXP_ID = 'exp_id'
def test_splash_page_demo_exploration(self):
config_services.set_property(
feconf.ADMIN_COMMITTER_ID, 'splash_page_exploration_id', '1')
exp_services.load_demo('1')
self.assertTrue(rights_manager.Actor(self.user_id_a).can_view('1'))
self.assertTrue(rights_manager.Actor(self.user_id_a).can_edit('1'))
self.assertFalse(rights_manager.Actor(self.user_id_a).can_delete('1'))
self.assertTrue(rights_manager.Actor(self.user_id_admin).can_view('1'))
self.assertTrue(rights_manager.Actor(self.user_id_admin).can_edit('1'))
self.assertTrue(
rights_manager.Actor(self.user_id_admin).can_delete('1'))
def test_non_splash_page_demo_exploration(self):
# Note: there is no difference between permissions for demo
# explorations, whether or not they are on the splash page.
exp_services.load_demo('3')
self.assertTrue(rights_manager.Actor(self.user_id_a).can_view('3'))
self.assertTrue(rights_manager.Actor(self.user_id_a).can_edit('3'))
self.assertFalse(rights_manager.Actor(self.user_id_a).can_delete('3'))
self.assertTrue(rights_manager.Actor(self.user_id_admin).can_view('3'))
self.assertTrue(rights_manager.Actor(self.user_id_admin).can_edit('3'))
self.assertTrue(
rights_manager.Actor(self.user_id_admin).can_delete('3'))
def test_ownership(self):
exp = exp_domain.Exploration.create_default_exploration(
self.EXP_ID, 'A title', 'A category')
exp_services.save_new_exploration(self.user_id_a, exp)
rights_manager.assign_role(
self.user_id_a, self.EXP_ID, self.user_id_b,
rights_manager.ROLE_EDITOR)
self.assertTrue(
rights_manager.Actor(self.user_id_a).is_owner(self.EXP_ID))
self.assertFalse(
rights_manager.Actor(self.user_id_b).is_owner(self.EXP_ID))
self.assertFalse(
rights_manager.Actor(self.user_id_admin).is_owner(self.EXP_ID))
def test_newly_created_exploration(self):
exp = exp_domain.Exploration.create_default_exploration(
self.EXP_ID, 'A title', 'A category')
exp_services.save_new_exploration(self.user_id_a, exp)
self.assertTrue(
rights_manager.Actor(self.user_id_a).can_view(self.EXP_ID))
self.assertTrue(
rights_manager.Actor(self.user_id_a).can_edit(self.EXP_ID))
self.assertTrue(
rights_manager.Actor(self.user_id_a).can_delete(self.EXP_ID))
self.assertTrue(
rights_manager.Actor(self.user_id_admin).can_view(self.EXP_ID))
self.assertFalse(
rights_manager.Actor(self.user_id_admin).can_edit(self.EXP_ID))
self.assertFalse(
rights_manager.Actor(self.user_id_admin).can_delete(self.EXP_ID))
self.assertFalse(
rights_manager.Actor(self.user_id_b).can_view(self.EXP_ID))
self.assertFalse(
rights_manager.Actor(self.user_id_b).can_edit(self.EXP_ID))
self.assertFalse(
rights_manager.Actor(self.user_id_b).can_delete(self.EXP_ID))
def test_inviting_collaborator(self):
exp = exp_domain.Exploration.create_default_exploration(
self.EXP_ID, 'A title', 'A category')
exp_services.save_new_exploration(self.user_id_a, exp)
rights_manager.assign_role(
self.user_id_a, self.EXP_ID, self.user_id_b,
rights_manager.ROLE_EDITOR)
self.assertTrue(
rights_manager.Actor(self.user_id_b).can_view(self.EXP_ID))
self.assertTrue(
rights_manager.Actor(self.user_id_b).can_edit(self.EXP_ID))
self.assertFalse(
rights_manager.Actor(self.user_id_b).can_delete(self.EXP_ID))
def test_inviting_playtester(self):
exp = exp_domain.Exploration.create_default_exploration(
self.EXP_ID, 'A title', 'A category')
exp_services.save_new_exploration(self.user_id_a, exp)
self.assertFalse(
rights_manager.Actor(self.user_id_b).can_view(self.EXP_ID))
self.assertFalse(
rights_manager.Actor(self.user_id_b).can_edit(self.EXP_ID))
self.assertFalse(
rights_manager.Actor(self.user_id_b).can_delete(self.EXP_ID))
rights_manager.assign_role(
self.user_id_a, self.EXP_ID, self.user_id_b,
rights_manager.ROLE_VIEWER)
self.assertTrue(
rights_manager.Actor(self.user_id_b).can_view(self.EXP_ID))
self.assertFalse(
rights_manager.Actor(self.user_id_b).can_edit(self.EXP_ID))
self.assertFalse(
rights_manager.Actor(self.user_id_b).can_delete(self.EXP_ID))
def test_setting_rights(self):
exp = exp_domain.Exploration.create_default_exploration(
self.EXP_ID, 'A title', 'A category')
exp_services.save_new_exploration(self.user_id_a, exp)
rights_manager.assign_role(
self.user_id_a, self.EXP_ID, self.user_id_b,
rights_manager.ROLE_VIEWER)
with self.assertRaisesRegexp(Exception, 'Could not assign new role.'):
rights_manager.assign_role(
self.user_id_b, self.EXP_ID, self.user_id_c,
rights_manager.ROLE_VIEWER)
rights_manager.assign_role(
self.user_id_a, self.EXP_ID, self.user_id_b,
rights_manager.ROLE_EDITOR)
with self.assertRaisesRegexp(Exception, 'Could not assign new role.'):
rights_manager.assign_role(
self.user_id_b, self.EXP_ID, self.user_id_c,
rights_manager.ROLE_VIEWER)
rights_manager.assign_role(
self.user_id_a, self.EXP_ID, self.user_id_b,
rights_manager.ROLE_OWNER)
rights_manager.assign_role(
self.user_id_b, self.EXP_ID, self.user_id_c,
rights_manager.ROLE_OWNER)
rights_manager.assign_role(
self.user_id_b, self.EXP_ID, self.user_id_d,
rights_manager.ROLE_EDITOR)
rights_manager.assign_role(
self.user_id_b, self.EXP_ID, self.user_id_e,
|
backmari/moose
|
python/chigger/graphs/Line.py
|
Python
|
lgpl-2.1
| 7,589
| 0.002108
|
#pylint: disable=missing-docstring
#################################################################
# DO NOT MODIFY THIS HEADER #
# MOOSE - Multiphysics Object Oriented Simulation Environment #
# #
# (c) 2010 Battelle Energy Alliance, LLC #
# ALL RIGHTS RESERVED #
# #
# Prepared by Battelle Energy Alliance, LLC #
# Under Contract No. DE-AC07-05ID14517 #
# With the U. S. Department of Energy #
# #
# See COPYRIGHT for full restrictions #
#################################################################
import vtk
import mooseutils
from .. import base
class Line(base.ChiggerObject):
"""
Wrapper for vtk line/point object.
"""
@staticmethod
def getOptions():
opt = base.ChiggerObject.getOptions()
opt.add('x', [], "The x-axis data.")
opt.add('y', [], "The y-axis data.")
opt.add('label', "The plot label (name appearing in legend).", vtype=str)
opt.add('style', '-', "The line style.", allow=['none', '-'])
opt.add('color', "The color of the line to plot.", vtype=list)
opt.add('width', "The width of the line in Points.", vtype=int)
opt.add('corner', 'left-bottom', "The axis corner to place the line.",
allow=['left-bottom', 'right-bottom', 'right-top', 'left-top'])
opt.add('marker', 'none', "Set the marker type.",
allow=['none', 'cross', 'plus', 'square', 'circle', 'diamond'])
opt.add('append', True, "Append new data to the existing data.")
opt.add('tracer', False, "Places both x and y tracing lines, (see 'xtracer' and "
"'ytracer').")
opt.add('xtracer', None, "Place a tracing line that follows the leading x-value (overrides "
"'tracer' option).", vtype=bool)
opt.add('ytracer', None, "Place a tracing line that follows the leading y-value (overrides "
"'tracer' option).", vtype=bool)
return opt
def __init__(self, x_data=None, y_data=None, **kwargs):
super(Line, self).__init__(**kwargs)
# Storage for vtk line/point object
self._vtkplot = None
# Build the vtkTable that stores the data
x =
|
vtk.vtkFloatArray()
x.SetName('x-data')
y = vtk.vtkFloatArray()
y.SetName('y-data')
self._vtktable = vtk.vtkTable()
self._vtktable.AddColumn(x)
self._vtktable.AddColumn(y)
# Storage for tracing lines
self._xtracer = None
self._ytracer = None
# Set x,y data
if x_data:
self.setOption('x', x_data)
if y_data:
self.setOption(
|
'y', y_data)
def setOptions(self, *args, **kwargs):
"""
Update line objects settings.
"""
super(Line, self).setOptions(*args, **kwargs)
tracer = self.getOption('tracer')
if tracer and not self.isOptionValid('xtracer'):
self.setOption('xtracer', True)
if tracer and not self.isOptionValid('ytracer'):
self.setOption('ytracer', True)
def initialize(self):
"""
Called prior to inserting the vtkPlotLine/Points object into the chart.
see Graph::Update
"""
super(Line, self).initialize()
# Create the vtk line or points object
style = self.getOption('style')
if style == '-' and not isinstance(self._vtkplot, vtk.vtkPlotLine):
self._vtkplot = vtk.vtkPlotLine()
self._vtkplot.SetInputData(self._vtktable, 0, 1)
elif style is 'none' and not isinstance(self._vtkplot, vtk.vtkPlotPoints):
self._vtkplot = vtk.vtkPlotPoints()
self._vtkplot.SetInputData(self._vtktable, 0, 1)
# Create tracer lines(s)
if self.getOption('xtracer'):
if self._xtracer is None:
self._xtracer = Line(append=False, width=0.1, color=self.getOption('color'))
self._xtracer.update()
if self.getOption('ytracer'):
if self._ytracer is None:
self._ytracer = Line(append=False, width=0.1, color=self.getOption('color'))
self._ytracer.update()
def getVTKPlot(self):
"""
Return the vtkPlot object for this line.
"""
return self._vtkplot
def update(self, **kwargs):
"""
Update the line object because of data of settings changed.
Users should not need to call this method, it should be called automatically. To update
the data use the 'UpdateData' method.
"""
super(Line, self).update(**kwargs)
# Extract x,y data
if not self.getOption('append'):
self._vtktable.SetNumberOfRows(0)
# Get the x,y data and reset to None so that data doesn't append over and over
x = self.getOption('x')
y = self.getOption('y')
self.setOptions(x=None, y=None)
if (x and y) and (len(x) == len(y)):
for i in range(len(x)): #pylint: disable=consider-using-enumerate
array = vtk.vtkVariantArray()
array.SetNumberOfTuples(2)
array.SetValue(0, x[i])
array.SetValue(1, y[i])
self._vtktable.InsertNextRow(array)
self._vtktable.Modified()
elif (x and y) and (len(x) != len(y)):
mooseutils.MooseException("Supplied x and y data must be same length.")
# Apply the line/point settings
if self.isOptionValid('color'):
self._vtkplot.SetColor(*self.getOption('color'))
if self.isOptionValid('width'):
self._vtkplot.SetWidth(self.getOption('width'))
if self.isOptionValid('label'):
self._vtkplot.SetLabel(self.getOption('label'))
vtk_marker = getattr(vtk.vtkPlotLine, self.getOption('marker').upper())
self._vtkplot.SetMarkerStyle(vtk_marker)
# Label
if not self.isOptionValid('label'):
self._vtkplot.LegendVisibilityOff()
else:
self._vtkplot.LegendVisibilityOn()
# Handle single point data
if self._vtktable.GetNumberOfRows() == 1:
self._vtktable.InsertNextRow(self._vtktable.GetRow(0))
# Tracers
if self._xtracer:
ax = self._vtkplot.GetYAxis()
rmin = ax.GetMinimum()
rmax = ax.GetMaximum()
value = self._vtktable.GetValue(self._vtktable.GetNumberOfRows()-1, 0)
self._xtracer.update(x=[value, value], y=[rmin, rmax])
if self._ytracer:
ax = self._vtkplot.GetXAxis()
rmin = ax.GetMinimum()
rmax = ax.GetMaximum()
value = self._vtktable.GetValue(self._vtktable.GetNumberOfRows()-1, 1)
self._ytracer.update(x=[rmin, rmax], y=[value, value])
def getVTKPlotObjects(self):
"""
Return the vtkPlotLine/vtkPlotPoints object.
see Graph.py
"""
objects = [self._vtkplot]
if self._xtracer:
objects.append(self._xtracer.getVTKPlot())
if self._ytracer:
objects.append(self._ytracer.getVTKPlot())
return objects
|
google/myelin-acorn-electron-hardware
|
third_party/nanopb/generator/nanopb_generator.py
|
Python
|
apache-2.0
| 70,423
| 0.003664
|
#!/usr/bin/env python
from __future__ import unicode_literals
'''Generate header file for nanopb from a ProtoBuf FileDescriptorSet.'''
nanopb_version = "nanopb-0.3.9.2"
import sys
import re
import codecs
from functools import reduce
try:
# Add some dummy imports to keep packaging tools happy.
import google, distutils.util # bbfreeze seems to need these
import pkg_resources # pyinstaller / protobuf 2.5 seem to need these
except:
# Don't care, we will error out later if it is actually important.
pass
try:
import google.protobuf.text_format as text_format
import google.protobuf.descriptor_pb2 as descriptor
except:
sys.stderr.write('''
*************************************************************
*** Could not import the Google protobuf Python libraries ***
*** Try installing package 'python-protobuf' or similar. ***
*************************************************************
''' + '\n')
raise
try:
import proto.nanopb_pb2 as nanopb_pb2
import proto.plugin_pb2 as plugin_pb2
except TypeError:
sys.stderr.write('''
****************************************************************************
*** Got TypeError when importing the protocol definitions for generator. ***
*** This usually means that the protoc in your path doesn't match the ***
*** Python protobuf library version. ***
*** ***
*** Please check the output of the following commands: ***
*** which protoc ***
*** protoc --version ***
*** python -c 'import google.protobuf; print(google.protobuf.__file__)' ***
*** If you are not able to find the python protobuf version using the ***
*** above command, use this command. ***
*** pip freeze | grep -i protobuf ***
****************************************************************************
''' + '\n')
raise
except:
sys.stderr.write('''
********************************************************************
*** Failed to import the protocol definitions for generator. ***
*** You have to run 'make' in the nanopb/generator/proto folder. ***
********************************************************************
''' + '\n')
raise
# ---------------------------------------------------------------------------
# Generation of single fields
# ---------------------------------------------------------------------------
import time
import os.path
# Values are tuple (c type, pb type, encoded size, int_size_allowed)
FieldD = descriptor.FieldDescriptorProto
datatypes = {
FieldD.TYPE_BOOL: ('bool', 'BOOL', 1, False),
FieldD.TYPE_DOUBLE: ('double', 'DOUBLE', 8, False),
FieldD.TYPE_FIXED32: ('uint32_t', 'FIXED32', 4, False),
FieldD.TYPE_FIXED64: ('uint64_t', 'FIXED64', 8, False),
FieldD.TYPE_FLOAT: ('float', 'FLOAT', 4, False),
FieldD.TYPE_INT32: ('int32_t', 'INT32', 10, True),
FieldD.TYPE_INT64: ('int64_t', 'INT64', 10, True),
FieldD.TYPE_SFIXED32: ('int32_t', 'SFIXED32', 4, False),
FieldD.TYPE_SFIXED64: ('int64_t', 'SFIXED64', 8, False),
FieldD.TYPE_SINT32: ('int32_t', 'SINT32', 5, True),
FieldD.TYPE_SINT64: ('int64_t', 'SINT64', 10, True),
FieldD.TYPE_UINT32: ('uint32_t', 'UINT32', 5, True),
FieldD.TYPE_UINT64: ('uint64_t', 'UINT64', 10, True)
}
# Integer size overrides (from .proto settings)
intsizes = {
nanopb_pb2.IS_8: 'int8_t',
nanopb_pb2.IS_16: 'int16_t',
nanopb_pb2.IS_32: 'int32_t',
nanopb_pb2.IS_64: 'int64_t',
}
# String types (for python 2 / python 3 compatibility)
try:
strtypes = (unicode, str)
except NameError:
strtypes = (str, )
class Names:
'''Keeps a set of nested names and formats them to C identifier.'''
def __init__(self, parts = ()):
if isinstance(parts, Names):
parts = parts.parts
elif isinstance(parts, strtypes):
parts = (parts,)
self.parts = tuple(parts)
def __str__(self):
return '_'.join(self.parts)
def __add__(self, other):
if isinstance(other, strtypes):
return Names(self.parts + (other,))
elif isinstance(other, Names):
return Names(self.parts + other.parts)
elif isinstance(other, tuple):
return Names(self.parts + other)
else:
raise ValueError("Name parts should be of type str")
def __eq__(self, other):
return isinstance(other, Names) and self.parts == other.parts
def names_from_type_name(type
|
_name):
'''Parse Names() from FieldDescriptorProto type_name'''
if type_name[0] != '.':
raise NotImplementedError("Lookup of non-absolute type names is not supported")
return Names(type_name[1:].split('.'))
def varint_max_size(max_value):
'''Returns the maximum number of bytes a varint can take when encoded.'''
if max_value < 0:
max_value = 2**64 - max_value
for i in range(1, 11):
|
if (max_value >> (i * 7)) == 0:
return i
raise ValueError("Value too large for varint: " + str(max_value))
assert varint_max_size(-1) == 10
assert varint_max_size(0) == 1
assert varint_max_size(127) == 1
assert varint_max_size(128) == 2
class EncodedSize:
'''Class used to represent the encoded size of a field or a message.
Consists of a combination of symbolic sizes and integer sizes.'''
def __init__(self, value = 0, symbols = []):
if isinstance(value, EncodedSize):
self.value = value.value
self.symbols = value.symbols
elif isinstance(value, strtypes + (Names,)):
self.symbols = [str(value)]
self.value = 0
else:
self.value = value
self.symbols = symbols
def __add__(self, other):
if isinstance(other, int):
return EncodedSize(self.value + other, self.symbols)
elif isinstance(other, strtypes + (Names,)):
return EncodedSize(self.value, self.symbols + [str(other)])
elif isinstance(other, EncodedSize):
return EncodedSize(self.value + other.value, self.symbols + other.symbols)
else:
raise ValueError("Cannot add size: " + repr(other))
def __mul__(self, other):
if isinstance(other, int):
return EncodedSize(self.value * other, [str(other) + '*' + s for s in self.symbols])
else:
raise ValueError("Cannot multiply size: " + repr(other))
def __str__(self):
if not self.symbols:
return str(self.value)
else:
return '(' + str(self.value) + ' + ' + ' + '.join(self.symbols) + ')'
def upperlimit(self):
if not self.symbols:
return self.value
else:
return 2**32 - 1
class Enum:
def __init__(self, names, desc, enum_options):
'''desc is EnumDescriptorProto'''
self.options = enum_options
self.names = names
# by definition, `names` include this enum's name
base_name = Names(names.parts[:-1])
if enum_options.long_names:
self.values = [(names + x.name, x.number) for x in desc.value]
else:
self.values = [(base_name + x.name, x.number) for x in desc.value]
self.value_longnames = [self.names + x.name for x in desc.value]
self.packed = enum_options.packed_enum
def has_negative(self):
for n, v in self.values:
if v < 0:
return True
return False
def encoded_size(self):
return max([varint_max_size(v) for n,v in self.values])
def __str__(self):
result
|
lunzhy/PyShanbay
|
gui/__init__.py
|
Python
|
mit
| 71
| 0.014085
|
#! /usr/bin/env python3
# -*- coding: utf-8
|
-*-
__author__ =
|
'Lunzhy'
|
MSC19950601/TextRank4ZH
|
textrank4zh/TextRank4Keyword.py
|
Python
|
mit
| 7,411
| 0.013252
|
#-*- encoding:utf-8 -*-
'''
Created on Nov 30, 2014
@author: letian
'''
import networkx as nx
from Segmentation import Segmentation
import numpy as np
class TextRank4Keyword(object):
def __init__(self, stop_words_file = None, delimiters = '?!;?!。;…\n'):
'''
`stop_words_file`:默认值为None,此时内部停止词表为空;可以设置为文件路径(字符串),将从停止词文件中提取停止词。
`delimiters`:默认值是`'?!;?!。;…\n'`,用来将文本拆分为句子。
self.words_no_filter:对sentences中每个句子分词而得到的两级列表。
self.words_no_stop_words:去掉words_no_filter中的停止词而得到的两级列表。
self.words_all_filters:保留words_no_stop_words中指定词性的单词而得到的两级列表。
'''
self.text = ''
self.keywords = []
self.seg = Segmentation(stop_words_file=stop_words_file, delimiters=delimiters)
self.words_no_filter = None # 2维列表
self.words_no_stop_words = None
self.words_all_filters = None
self.word_index = {}
self.index_word = {}
self.graph = None
def train(self, text, window = 2, lower = False, speech_tag_filter=True,
vertex_source = 'all_filters',
edge_source = 'no_stop_words'):
'''
`text`:文本内容,字符串。
`window`:窗口大小,int,用来构造单词之间的边。默认值为2。
`lower`:是否将文本转换为小写。默认为False。
`speech_tag_filter`:若值为True,将调用内部的词性列表来过滤生成words_all_filters。
若值为False,words_all_filters与words_no_stop_words相同。
`vertex_source`:选择使用words_no_filter, words_no_stop_words, words_all_filters中的哪一个来构造pagerank对应的图中的节点。
默认值为`'all_filters'`,可选值为`'no_filter', 'no_stop_words', 'all_filters'`。关键词也来自`vertex_source`。
`edge_source`:选择使用words_no_filter, words_no_stop_words, words_all_filters中的哪一个来构造pagerank对应的图中的节点之间的边。
默认值为`'no_stop_words'`,可选值为`'no_filter', 'no_stop_words', 'all_filters'`。边的构造要结合`window`参数。
'''
self.text = text
self.word_index = {}
self.index_word = {}
self.keywords = []
self.graph = None
(_, self.words_no_filter, self.words_no_stop_words, self.words_all_filters) = self.seg.segment(text=text,
lower=lower,
speech_tag_filter=speech_tag_filter)
if vertex_source == 'no_filter':
vertex_source = self.words_no_filter
elif vertex_source == 'no_stop_words':
vertex_source = self.words_no_stop_words
else:
vertex_source = self.words_all_filters
if edge_source == 'no_filter':
edge_source = self.words_no_filter
elif vertex_source == 'all_filters':
edge_source = self.words_all_filters
else:
edge_source = self.words_no_stop_words
index = 0
for words in vertex_source:
for word in words:
if not self.word_index.has_key(word):
self.word_index[word] = index
self.index_word[index] = word
index += 1
words_number = index # 单词数量
self.graph = np.zeros((words_number, words_number))
for word_list in edge_source:
|
for w1, w2 in self.combine(word_list, window):
if not sel
|
f.word_index.has_key(w1):
continue
if not self.word_index.has_key(w2):
continue
index1 = self.word_index[w1]
index2 = self.word_index[w2]
self.graph[index1][index2] = 1.0
self.graph[index2][index1] = 1.0
# for x in xrange(words_number):
# row_sum = np.sum(self.graph[x, :])
# if row_sum > 0:
# self.graph[x, :] = self.graph[x, :] / row_sum
nx_graph = nx.from_numpy_matrix(self.graph)
scores = nx.pagerank(nx_graph) # this is a dict
sorted_scores = sorted(scores.items(), key = lambda item: item[1], reverse=True)
for index, _ in sorted_scores:
self.keywords.append(self.index_word[index])
def combine(self, word_list, window = 2):
'''
构造在window下的单词组合,用来构造单词之间的边。使用了生成器。
word_list: 由单词组成的列表。
windows:窗口大小。
'''
window = int(window)
if window < 2: window = 2
for x in xrange(1, window):
if x >= len(word_list):
break
word_list2 = word_list[x:]
res = zip(word_list, word_list2)
for r in res:
yield r
def get_keywords(self, num = 6, word_min_len = 1):
'''
获取最重要的num个长度大于等于word_min_len的关键词。
返回关键词列表。
'''
result = []
count = 0
for word in self.keywords:
if count >= num:
break
if len(word) >= word_min_len:
result.append(word)
count += 1
return result
def get_keyphrases(self, keywords_num = 12, min_occur_num = 2):
'''
获取关键短语。
获取 keywords_num 个关键词构造在可能出现的短语,要求这个短语在原文本中至少出现的次数为min_occur_num。
返回关键短语的列表。
'''
keywords_set = set(self.get_keywords(num=keywords_num, word_min_len = 1))
keyphrases = set()
one = []
for sentence_list in self.words_no_filter:
for word in sentence_list:
# print '/'.join(one)
# print word
if word in keywords_set:
one.append(word)
else:
if len(one)>1:
keyphrases.add(''.join(one))
one = []
continue
one = []
return [phrase for phrase in keyphrases
if self.text.count(phrase) >= min_occur_num]
if __name__ == '__main__':
import codecs
text = codecs.open('../text/02.txt', 'r', 'utf-8').read()
# text = "坏人"
tr4w = TextRank4Keyword(stop_words_file='../stopword.data')
tr4w.train(text=text, speech_tag_filter=True, lower=True, window=2)
for word in tr4w.get_keywords(10, word_min_len=2):
print word
print '---'
for phrase in tr4w.get_keyphrases(keywords_num=20, min_occur_num= 2):
print phrase
|
Caoimhinmg/PmagPy
|
data_files/LearningPython/ConvertStations.py
|
Python
|
bsd-3-clause
| 408
| 0.031863
|
#!/usr/bin/env python
from __future__ import print_function
import UTM # imports the UTM module
Ellipsoid=23-1 # UTMs code for WGS-84
StationNFO=open('station.list').readlines()
for line in StationNFO:
nfo=line.strip('\n').split()
lat=float(nfo[0])
lon=float(nfo[1])
StaName= nfo[3]
|
Zone,Easting, Northing=UTM.LLtoUTM(Ellipso
|
id,lon,lat)
print(StaName, ': ', Easting, Northing, Zone)
|
Peddle/hue
|
desktop/libs/notebook/src/notebook/connectors/base.py
|
Python
|
apache-2.0
| 5,042
| 0.012297
|
#!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging
from django.utils.translation import ugettext as _
from desktop.lib.exceptions_renderable import PopupException
from desktop.lib.i18n import smart_unicode
from notebook.conf import get_interpreters
LOG = logging.getLogger(__name__)
class SessionExpired(Exception):
pass
class QueryExpired(Exception):
pass
class AuthenticationRequired(Exception):
pass
class QueryError(Exception):
def __init__(self, message, handle=None):
self.message = message or _('No error message, please check the logs.')
self.handle = handle
self.extra = {}
def __unicode__(self):
return smart_unicode(self.message)
class Notebook(object):
def __init__(self, document=None):
self.document = None
if document is not None:
self.data = document.data
sel
|
f.document = document
else:
self.data = json.dumps
|
({
'name': 'My Notebook',
'description': '',
'type': 'notebook',
'snippets': [],
})
def get_json(self):
_data = self.get_data()
return json.dumps(_data)
def get_data(self):
_data = json.loads(self.data)
if self.document is not None:
_data['id'] = self.document.id
_data['is_history'] = self.document.is_history
return _data
def get_str(self):
return '\n\n'.join([snippet['statement_raw'] for snippet in self.get_data()['snippets']])
def get_api(request, snippet):
from notebook.connectors.hiveserver2 import HS2Api
from notebook.connectors.jdbc import JdbcApi
from notebook.connectors.rdbms import RdbmsApi
from notebook.connectors.pig_batch import PigApi
from notebook.connectors.solr import SolrApi
from notebook.connectors.spark_shell import SparkApi
from notebook.connectors.spark_batch import SparkBatchApi
from notebook.connectors.text import TextApi
interpreter = [interpreter for interpreter in get_interpreters(request.user) if interpreter['type'] == snippet['type']]
if not interpreter:
raise PopupException(_('Snippet type %(type)s is not configured in hue.ini') % snippet)
interpreter = interpreter[0]
interface = interpreter['interface']
if interface == 'hiveserver2':
return HS2Api(user=request.user, request=request)
elif interface == 'livy':
return SparkApi(request.user)
elif interface == 'livy-batch':
return SparkBatchApi(request.user)
elif interface == 'text' or interface == 'markdown':
return TextApi(request.user)
elif interface == 'rdbms':
return RdbmsApi(request.user, interpreter=snippet['type'])
elif interface == 'jdbc':
return JdbcApi(request.user, interpreter=interpreter)
elif interface == 'solr':
return SolrApi(request.user, interpreter=interpreter)
elif interface == 'pig':
return PigApi(user=request.user, request=request)
else:
raise PopupException(_('Notebook connector interface not recognized: %s') % interface)
def _get_snippet_session(notebook, snippet):
session = [session for session in notebook['sessions'] if session['type'] == snippet['type']]
if not session:
raise SessionExpired()
else:
return session[0]
# Base API
class Api(object):
def __init__(self, user, interpreter=None, request=None):
self.user = user
self.interpreter = interpreter
self.request = request
def create_session(self, lang, properties=None):
return {
'type': lang,
'id': None,
'properties': properties if not None else []
}
def close_session(self, session):
pass
def fetch_result(self, notebook, snippet, rows, start_over):
pass
def download(self, notebook, snippet, format):
pass
def get_log(self, notebook, snippet, startFrom=None, size=None):
return 'No logs'
def autocomplete(self, snippet, database=None, table=None, column=None, nested=None):
return {}
def progress(self, snippet, logs=None):
return 50
def get_jobs(self, notebook, snippet, logs):
return []
def export_data_as_hdfs_file(self, snippet, target_file, overwrite): raise NotImplementedError()
def export_data_as_table(self, notebook, snippet, destination): raise NotImplementedError()
def export_large_data_to_hdfs(self, notebook, snippet, destination): raise NotImplementedError()
|
HewlettPackard/oneview-ansible
|
library/oneview_server_hardware.py
|
Python
|
apache-2.0
| 13,959
| 0.002866
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
###
# Copyright (2016-2020) Hewlett Packard Enterprise Development LP
#
# Licensed under the Apache License, Version 2.0 (the "License");
# You may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###
ANSIBLE_METADATA = {'status': ['stableinterface'],
'supported_by': 'community',
'metadata_version':
|
'1.1'}
DOCUMENTATION = '''
---
module: oneview_server_hardware
short_description: Manage OneView Server Hardware resources.
description:
- "Provides an interface to manage Server Hardware resources."
version_added: "2.3"
requirements:
- "python >= 2.7.9"
- "hpeOneView >= 5.4.0"
author: "Gustavo Hennig (@GustavoHennig)"
options:
state:
description:
- Indicates
|
the desired state for the Server Hardware resource.
C(present) will ensure data properties are compliant with OneView.
C(absent) will remove the resource from OneView, if it exists.
C(power_state_set) will set the power state of the Server Hardware.
C(refresh_state_set) will set the refresh state of the Server Hardware.
C(ilo_firmware_version_updated) will update the iLO firmware version of the Server Hardware.
C(ilo_state_reset) will reset the iLO state.
C(uid_state_on) will set on the UID state, if necessary.
C(uid_state_off) will set off the UID state, if necessary.
C(enable_maintenance_mode) will set true to the maintenance mode, if necessary.
C(disable_maintenance_mode) will set false to the maintenance mode, if necessary.
C(environmental_configuration_set) will set the environmental configuration of the Server Hardware.
C(multiple_servers_added) will add multiple rack-mount servers.
choices: ['present', 'absent', 'power_state_set', 'refresh_state_set', 'ilo_firmware_version_updated',
'ilo_state_reset','uid_state_on', 'uid_state_off', 'enable_maintenance_mode', 'disable_maintenance_mode',
'environmental_configuration_set', 'multiple_servers_added']
required: true
data:
description:
- List with Server Hardware properties and its associated states.
required: true
extends_documentation_fragment:
- oneview
- oneview.validateetag
'''
EXAMPLES = '''
- name: Add a Server Hardware
oneview_server_hardware:
hostname: 172.16.101.48
username: administrator
password: my_password
api_version: 1200
state: present
data:
hostname : "172.18.6.15"
username : "username"
password : "password"
force : false
licensingIntent: "OneView"
configurationState: "Managed"
delegate_to: localhost
- name: Ensure that the Server Hardware is present and is inserted in the desired scopes
oneview_server_hardware:
hostname: 172.16.101.48
username: administrator
password: my_password
api_version: 1200
state: present
data:
name : "172.18.6.15"
scopeUris:
- '/rest/scopes/00SC123456'
- '/rest/scopes/01SC123456'
delegate_to: localhost
- name: Add multiple rack-mount servers
oneview_server_hardware:
hostname: 172.16.101.48
username: administrator
password: my_password
api_version: 1200
state: multiple_servers_added
data:
mpHostsAndRanges :
- '172.18.6.15'
username : 'username'
password : 'password'
initialScopeUris:
- "/rest/scopes/01SC123456"
licensingIntent: "OneView"
configurationState: "Managed"
delegate_to: localhost
- name: Power Off the server hardware
oneview_server_hardware:
hostname: 172.16.101.48
username: administrator
password: my_password
api_version: 1200
state: power_state_set
data:
name : "172.18.6.15"
powerStateData:
powerState: "Off"
powerControl: "MomentaryPress"
delegate_to: localhost
- name: Refresh the server hardware
oneview_server_hardware:
hostname: 172.16.101.48
username: administrator
password: my_password
api_version: 1200
state: refresh_state_set
data:
name : "172.18.6.15"
refreshStateData:
refreshState : "RefreshPending"
delegate_to: localhost
- name: Update the Server Hardware iLO firmware version
oneview_server_hardware:
hostname: 172.16.101.48
username: administrator
password: my_password
api_version: 1200
state: ilo_firmware_version_updated
data:
name : "172.18.6.15"
delegate_to: localhost
- name: Set the calibrated max power of a server hardware
oneview_server_hardware:
hostname: 172.16.101.48
username: administrator
password: my_password
api_version: 1200
state: environmental_configuration_set
data:
name : "172.18.6.15"
environmentalConfigurationData:
calibratedMaxPower: 2500
delegate_to: localhost
- name: Remove the server hardware by its IP
oneview_server_hardware:
hostname: 172.16.101.48
username: administrator
password: my_password
api_version: 1200
state: absent
data:
name : "172.18.6.15"
delegate_to: localhost
- name: Set the server UID state off
oneview_server_hardware:
hostname: 172.16.101.48
username: administrator
password: my_password
api_version: 1200
state: uid_state_off
data:
name : '0000A66102, bay 12'
delegate_to: localhost
- name: Enable Server Maintenance Mode
oneview_server_hardware:
hostname: 172.16.101.48
username: administrator
password: my_password
api_version: 1200
state: enable_maintenance_mode
data:
name : '0000A66102, bay 12'
delegate_to: localhost
- name: Disable Server Maintenance Mode
oneview_server_hardware:
hostname: 172.16.101.48
username: administrator
password: my_password
api_version: 1200
state: disable_maintenance_mode
data:
name : '0000A66102, bay 12'
delegate_to: localhost
'''
RETURN = '''
server_hardware:
description: Has the OneView facts about the Server Hardware.
returned: On states 'present', 'power_state_set', 'refresh_state_set', and 'ilo_firmware_version_updated'.
Can be null.
type: dict
'''
from ansible.module_utils.oneview import OneViewModule, OneViewModuleResourceNotFound, OneViewModuleValueError
class ServerHardwareModule(OneViewModule):
MSG_ADDED = 'Server Hardware added successfully.'
MSG_ALREADY_PRESENT = 'Server Hardware is already present.'
MSG_POWER_STATE_UPDATED = 'Server Hardware power state changed successfully.'
MSG_REFRESH_STATE_UPDATED = 'Server Hardware refresh state changed successfully.'
MSG_ILO_FIRMWARE_VERSION_UPDATED = 'Server Hardware iLO firmware version updated successfully.'
MSG_ENV_CONFIG_UPDATED = 'Server Hardware calibrated max power updated successfully.'
MSG_SERVER_HARDWARE_NOT_FOUND = 'The provided Server Hardware was not found.'
MSG_UID_STATE_CHANGED = 'Server Hardware UID state changed successfully.'
MSG_MAINTENANCE_MODE_CHANGED = 'Server Hardware Maintenance Mode changed successfully.'
MSG_ILO_STATE_RESET = 'Server Hardware iLO state changed successfully.'
MSG_NOTHING_TO_DO = 'Nothing to do.'
MSG_DELETED = 'Server Hardware deleted successfully.'
MSG_ALREADY_ABSENT = 'Server Hardware is already absent.'
MSG_MANDATORY_FIELD_MISSING = "Mandatory field was not informed: {0}"
MSG_MULTIPLE_RACK_MOUNT_SERVERS_ADDED = "Servers added successfully."
patch_success_message = dict(
ilo_state_reset=MSG_ILO_STATE_RESET,
|
hlin117/statsmodels
|
statsmodels/tsa/statespace/tests/test_tools.py
|
Python
|
bsd-3-clause
| 4,268
| 0.011949
|
"""
Tests for tools
Author: Chad Fulton
License: Simplified-BSD
"""
from __future__ import division, absolute_import, print_function
import numpy as np
import pandas as pd
from statsmodels.tsa.statespace import tools
# from .results import results_sarimax
from numpy.testing import (
assert_equal, assert_array_equal, assert_almost_equal, assert_raises
)
class TestCompanionMatrix(object):
cases = [
(2, np.array([[0,1],[0,0]])),
([1,-1,-2], np.array([[1,1],[2,0]])),
([1,-1,-2,-3], np.array([[1,1,0],[2,0,1],[3,0,0]]))
]
def test_cases(self):
for polynomial, result in self.cases:
assert_equal(tools.companion_matrix(polynomial), result)
class TestDiff(object):
x = np.arange(10)
cases = [
# diff = 1
([1,2,3], 1, None, 1, [1, 1]),
# diff = 2
(x, 2, None, 1, [0]*8),
# diff = 1, seasonal_diff=1, k_seasons=4
(x, 1, 1, 4, [0]*5),
(x**2, 1, 1, 4, [8]*5),
(x**3, 1, 1, 4, [60, 84, 108, 132, 156]),
# diff = 1, seasonal_diff=2, k_seasons=2
(x, 1, 2, 2, [0]*5),
(x**2, 1, 2, 2, [0]*5),
(x**3, 1, 2, 2, [24]*5),
(x**4, 1, 2, 2, [240, 336, 432, 528, 624]),
]
def test_cases(self):
# Basic cases
for series, diff, seasonal_diff, k_seasons, result in self.cases:
# Test numpy array
x = tools.diff(series, diff, seasonal_diff, k_seasons)
assert_almost_equal(x, result)
# Test as Pandas Series
series = pd.Series(series)
# Rewrite to test as n-dimensional array
series = np.c_[series, series]
result = np.c_[result, result]
# Test Numpy array
x = tools.diff(series, diff, seasonal_diff, k_seasons)
assert_almost_equal(x, result)
# Test as Pandas Dataframe
series = pd.DataFrame(series)
x = tools.diff(series, diff, seasonal_diff, k_seasons)
assert_almost_equal(x, result)
class TestIsInvertible(object):
cases = [
([1, -0.5], True),
([1, 1-1e-9], True),
([1, 1], False),
([1, 0.9,0.1], True),
(np.array([1,0.9,0.1]), True),
(pd.Series([1,0.9,0.1]), True)
]
def test_cases(self):
for polynomial, invertible in self.cases:
assert_equal(tools.is_invertible(polynomial), invertible)
class TestConstrainStationaryUnivariate(object):
cases = [
(np.array([2.]), -2./((1+2.**2)**0.5))
]
def test_cases(self):
for unconstrained, constrained in self.cases:
result = tools.constrain_stationary_univariate(unconstrained)
|
assert_equal(result, constrained)
class TestValidateMatrixShape(object):
# name, shape, nrows, ncols, nobs
valid = [
('TEST', (5,2), 5, 2, None),
|
('TEST', (5,2), 5, 2, 10),
('TEST', (5,2,10), 5, 2, 10),
]
invalid = [
('TEST', (5,), 5, None, None),
('TEST', (5,1,1,1), 5, 1, None),
('TEST', (5,2), 10, 2, None),
('TEST', (5,2), 5, 1, None),
('TEST', (5,2,10), 5, 2, None),
('TEST', (5,2,10), 5, 2, 5),
]
def test_valid_cases(self):
for args in self.valid:
# Just testing that no exception is raised
tools.validate_matrix_shape(*args)
def test_invalid_cases(self):
for args in self.invalid:
assert_raises(
ValueError, tools.validate_matrix_shape, *args
)
class TestValidateVectorShape(object):
# name, shape, nrows, ncols, nobs
valid = [
('TEST', (5,), 5, None),
('TEST', (5,), 5, 10),
('TEST', (5,10), 5, 10),
]
invalid = [
('TEST', (5,2,10), 5, 10),
('TEST', (5,), 10, None),
('TEST', (5,10), 5, None),
('TEST', (5,10), 5, 5),
]
def test_valid_cases(self):
for args in self.valid:
# Just testing that no exception is raised
tools.validate_vector_shape(*args)
def test_invalid_cases(self):
for args in self.invalid:
assert_raises(
ValueError, tools.validate_vector_shape, *args
)
|
eviljeff/olympia
|
src/olympia/amo/admin.py
|
Python
|
bsd-3-clause
| 5,736
| 0
|
import functools
from django.contrib import admin
from django.contrib.admin.options import operator
from django.core.exceptions import FieldDoesNotExist
from django.db import models
from django.db.models.constants import LOOKUP_SEP
from .models import FakeEmail
class CommaSearchInAdminMixin:
def get_search_id_field(self, request):
"""
Return the field to use when all search terms are numeric.
Default is to return pk, but in some cases it'll make more sense to
return a foreign key.
"""
return 'pk'
def lookup_needs_distinct(self, opts, lookup_path):
"""
Return True if 'distinct()' should be used to query the given lookup
path. Used by get_search_results() as a replacement of the version used
by django, which doesn't consider our translation fields as needing
distinct (but they do).
"""
rval = admin.utils.lookup_needs_distinct(opts, lookup_path)
lookup_fields = lookup_path.split(LOOKUP_SEP)
# Not pretty but looking up the actual field would require truly
# resolving the field name, walking to any relations we find up until
# the last one, that would be a lot of work for a simple edge case.
if any(field_name in lookup_fields for field_name in
('localized_string', 'localized_string_clean')):
rval = True
return rval
def get_search_results(self, request, queryset, search_term):
"""
Return a tuple containing a queryset to implement the search,
and a boolean indicating if the results may contain duplicates.
Originally copied from Django's, but with the following differences:
- The operator joining the query parts is dynamic: if the search term
contain a comma and no space, then the comma is used as the separator
instead, and the query parts are joined by OR, not AND, allowing
admins to search by a list of ids, emails or usernames and find all
objects in that list.
- If th
|
e search terms are all numeric and there is more than one, then
we also restrict the fields we search to the one returned by
get_search_id_field(request) using a __in ORM lookup directly.
"""
# Apply keyword searches.
def construct_search(f
|
ield_name):
if field_name.startswith('^'):
return "%s__istartswith" % field_name[1:]
elif field_name.startswith('='):
return "%s__iexact" % field_name[1:]
elif field_name.startswith('@'):
return "%s__icontains" % field_name[1:]
# Use field_name if it includes a lookup.
opts = queryset.model._meta
lookup_fields = field_name.split(models.constants.LOOKUP_SEP)
# Go through the fields, following all relations.
prev_field = None
for path_part in lookup_fields:
if path_part == 'pk':
path_part = opts.pk.name
try:
field = opts.get_field(path_part)
except FieldDoesNotExist:
# Use valid query lookups.
if prev_field and prev_field.get_lookup(path_part):
return field_name
else:
prev_field = field
if hasattr(field, 'get_path_info'):
# Update opts to follow the relation.
opts = field.get_path_info()[-1].to_opts
# Otherwise, use the field with icontains.
return "%s__icontains" % field_name
use_distinct = False
search_fields = self.get_search_fields(request)
filters = []
joining_operator = operator.and_
if not (search_fields and search_term):
# return early if we have nothing special to do
return queryset, use_distinct
if ' ' not in search_term and ',' in search_term:
separator = ','
joining_operator = operator.or_
else:
separator = None
search_terms = search_term.split(separator)
all_numeric = all(term.isnumeric() for term in search_terms)
if all_numeric and len(search_terms) > 1:
# if we have multiple numbers assume we're doing a bulk id search
orm_lookup = '%s__in' % self.get_search_id_field(request)
queryset = queryset.filter(**{orm_lookup: search_terms})
else:
orm_lookups = [
construct_search(str(search_field))
for search_field in search_fields]
for bit in search_terms:
or_queries = [models.Q(**{orm_lookup: bit})
for orm_lookup in orm_lookups]
q_for_this_term = models.Q(
functools.reduce(operator.or_, or_queries))
filters.append(q_for_this_term)
use_distinct |= any(
# Use our own lookup_needs_distinct(), not django's.
self.lookup_needs_distinct(self.opts, search_spec)
for search_spec in orm_lookups)
if filters:
queryset = queryset.filter(
functools.reduce(joining_operator, filters))
return queryset, use_distinct
@admin.register(FakeEmail)
class FakeEmailAdmin(admin.ModelAdmin):
list_display = (
'created',
'message',
)
actions = ['delete_selected']
view_on_site = False
def has_add_permission(self, request):
return False
def has_change_permission(self, request, obj=None):
return False
|
rmcgurrin/PyQLab
|
instruments/Digitizers.py
|
Python
|
apache-2.0
| 6,043
| 0.025484
|
"""
For now just Alazar cards but should also support Acquiris.
"""
from Instrument import Instrument
from atom.api import Atom, Str, Int, Float, Bool, Enum, List, Dict, Coerced
import itertools, ast
import enaml
from enaml.qt.qt_application import QtApplication
class Digitizer(Instrument):
pass
class AlazarATS9870(Instrument):
address = Str('1').tag(desc='Location of the card') #For now we only have one
acquireMode = Enum('digitizer', 'averager').tag(desc='Whether the card averages on-board or returns single-shot data')
clockType = Enum('ref')
delay = Float(0.0).tag(desc='Delay from trigger')
samplingRate = Float(100000000).tag(desc='Sampling rate in Hz')
verticalScale = Float(1.0).tag(desc='Peak voltage (V)')
verticalOffset = Float(0.0).tag(desc='Vertical offset (V)')
verticalCoupling = Enum('AC','DC').tag(desc='AC/DC coupling')
bandwidth = Enum('20MHz', 'Full').tag(desc='Input bandwidth filter')
triggerLevel = Float(0.0).tag(desc='Trigger level (mV)')
triggerSource = Enum('A','B','Ext').tag(desc='Trigger source')
triggerCoupling = Enum('AC
|
','DC').tag(desc='Trigger coupling')
triggerSlope = Enum('rising','falling').tag(desc='Trigger slope')
recordLength = Int(1024).tag(desc='Number of samples in each record')
nbrSegments = Int(1).tag(desc='Number of segments in memory')
nbrWaveforms = Int(1).tag(desc='Number of times each segment is repeated')
nbrRoundRobins = Int(1).tag(desc='Number of times entire memory is looped')
def json_encode(s
|
elf, matlabCompatible=False):
if matlabCompatible:
"For the Matlab experiment manager we seperately nest averager, horizontal, vertical settings"
jsonDict = {}
jsonDict['address'] = self.address
jsonDict['deviceName'] = 'AlazarATS9870'
jsonDict['horizontal'] = {'delayTime':self.delay, 'samplingRate':self.samplingRate}
jsonDict['vertical'] = {k:getattr(self,k) for k in ['verticalScale', 'verticalOffset', 'verticalCoupling', 'bandwidth']}
jsonDict['trigger'] = {k:getattr(self,k) for k in ['triggerLevel', 'triggerSource', 'triggerCoupling', 'triggerSlope']}
jsonDict['averager'] = {k:getattr(self,k) for k in ['recordLength', 'nbrSegments', 'nbrWaveforms', 'nbrRoundRobins']}
#Add the other necessities
jsonDict['acquireMode'] = self.acquireMode
jsonDict['clockType'] = self.clockType
else:
jsonDict = super(AlazarATS9870, self).json_encode(matlabCompatible)
return jsonDict
class X6VirtualChannel(Atom):
label = Str()
enableDemodStream = Bool(True).tag(desc='Enable demodulated data stream')
enableDemodResultStream = Bool(True).tag(desc='Enable demod result data stream')
enableRawResultStream = Bool(True).tag(desc='Enable raw result data stream')
IFfreq = Float(10e6).tag(desc='IF Frequency')
demodKernel = Str().tag(desc='Integration kernel vector for demod stream')
demodKernelBias = Str("").tag(desc="Kernel bias for integrated demod stream")
rawKernel = Str().tag(desc='Integration kernel vector for raw stream')
rawKernelBias = Str("").tag(desc="Kernel bias for integrated raw stream")
threshold = Float(0.0).tag(desc='Qubit state decision threshold')
thresholdInvert = Bool(False).tag(desc="Invert thresholder output")
def json_encode(self, matlabCompatible=False):
jsonDict = self.__getstate__()
if matlabCompatible:
import numpy as np
import base64
try:
jsonDict['demodKernel'] = base64.b64encode(eval(self.demodKernel))
except:
jsonDict['demodKernel'] = []
try:
jsonDict['demodKernelBias'] = base64.b64encode(np.array(eval(self.demodKernelBias), dtype=np.complex128))
except:
jsonDict['demodKernelBias'] = []
try:
jsonDict['rawKernel'] = base64.b64encode(eval(self.rawKernel))
except:
jsonDict['rawKernel'] = []
try:
jsonDict['rawKernelBias'] = base64.b64encode(np.array(eval(self.rawKernelBias), dtype=np.complex128))
except:
jsonDict['rawKernelBias'] = []
return jsonDict
class X6(Instrument):
recordLength = Int(1024).tag(desc='Number of samples in each record')
nbrSegments = Int(1).tag(desc='Number of segments in memory')
nbrWaveforms = Int(1).tag(desc='Number of times each segment is repeated')
nbrRoundRobins = Int(1).tag(desc='Number of times entire memory is looped')
enableRawStreams = Bool(False).tag(desc='Enable capture of raw data from ADCs')
# channels = Dict(None, X6VirtualChannel)
channels = Coerced(dict)
digitizerMode = Enum('digitizer', 'averager').tag(desc='Whether the card averages on-board or returns single-shot data')
reference = Enum('external', 'internal').tag(desc='Clock source for 10MHz reference to clock generation tree')
def __init__(self, **traits):
super(X6, self).__init__(**traits)
if not self.channels:
for a, b in itertools.product(range(1,3), range(1,3)):
label = str((a,b))
key = "s{0}{1}".format(a, b)
self.channels[key] = X6VirtualChannel(label=label)
def json_encode(self, matlabCompatible=False):
jsonDict = super(X6, self).json_encode(matlabCompatible)
if matlabCompatible:
# For the Matlab experiment manager we nest averager settings
map(lambda x: jsonDict.pop(x), ['recordLength', 'nbrSegments', 'nbrWaveforms', 'nbrRoundRobins'])
jsonDict['averager'] = {k:getattr(self,k) for k in ['recordLength', 'nbrSegments', 'nbrWaveforms', 'nbrRoundRobins']}
return jsonDict
def update_from_jsondict(self, params):
for chName, chParams in params['channels'].items():
# if this is still a raw dictionary convert to object
if isinstance(chParams, dict):
chParams.pop('x__class__', None)
chParams.pop('x__module__', None)
chParams = X6VirtualChannel(**chParams)
for paramName in chParams.__getstate__().keys():
setattr(self.channels[chName], paramName, getattr(chParams, paramName))
params.pop('channels')
super(X6, self).update_from_jsondict(params)
if __name__ == "__main__":
from Digitizers import X6
digitizer = X6(label='scope')
with enaml.imports():
from DigitizersViews import TestX6Window
app = QtApplication()
view = TestX6Window(instr=digitizer)
view.show()
app.start()
|
ezequielpereira/Time-Line
|
libs64/wx/webkit.py
|
Python
|
gpl-3.0
| 11,969
| 0.009608
|
# This file was created automatically by SWIG 1.3.29.
# Don't modify this file, modify the SWIG interface instead.
"""
wx.webkit.WebKitCtrl for Mac OSX.
"""
import _webkit
import new
new_instancemethod = new.instancemethod
def _swig_setattr_nondynamic(self,class_type,name,value,static=1):
if (name == "thisown"): return self.this.own(value)
if (name == "this"):
if type(value).__name__ == 'PySwigObject':
self.__dict__[name] = value
return
method = class_type.__swig_setmethods__.get(name,None)
if method: return method(self,value)
if (not static) or hasattr(self,name):
self.__dict__[name] = value
else:
raise AttributeError("You cannot add attributes to %s" % self)
def _swig_setattr(self,class_type,name,value):
return _swig_setattr_nondynamic(self,class_type,name,value,0)
def _swig_getattr(self,class_type,name):
if (name == "thisown"): return self.this.own()
method = class_type.__swig_getmethods__.get(name,None)
if method: return method(self)
raise AttributeError,name
def _swig_repr(self):
try: strthis = "proxy of " + self.this.__repr__()
except: strthis = ""
return "<%s.%s; %s >" % (self.__class__.__module__, self.__class__.__name__, strthis,)
import types
try:
_object = types.ObjectType
_newclass = 1
except AttributeError:
class _object : pass
_newclass = 0
del types
def _swig_setattr_nondynamic_method(set):
def set_attr(self,name,value):
if (name == "thisown"): return self.this.own(value)
if hasattr(self,name) or (name == "this"):
set(self,name,value)
else:
raise AttributeError("You cannot add attributes to %s" % self)
return set_attr
import _core
wx = _core
__docfilter__ = wx.__DocFilter(globals())
class WebKitCtrl(_core.Control):
"""Proxy of C++ WebKitCtrl class"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args, **kwargs):
"""
__init__(self, Window parent, int winID=-1, String strURL=EmptyString,
Point pos=De
|
faultPosition, Size size=DefaultSize,
long style=0, Validator validator=DefaultValidator,
String name=WebKitNameStr) -> WebKitCtrl
"""
_webkit.WebKitCtrl_swiginit(self,_webkit.new_WebKitCtrl(*args, **kwargs))
self._setOORInfo(self)
def Create(*args, **kwargs):
"""
Create(self, Window parent, int winID=-1, String strURL=EmptyString,
Point pos=DefaultPosition, Size size=DefaultSize,
lon
|
g style=0, Validator validator=DefaultValidator,
String name=WebKitNameStr) -> bool
"""
return _webkit.WebKitCtrl_Create(*args, **kwargs)
def LoadURL(*args, **kwargs):
"""LoadURL(self, String url)"""
return _webkit.WebKitCtrl_LoadURL(*args, **kwargs)
def CanGoBack(*args, **kwargs):
"""CanGoBack(self) -> bool"""
return _webkit.WebKitCtrl_CanGoBack(*args, **kwargs)
def CanGoForward(*args, **kwargs):
"""CanGoForward(self) -> bool"""
return _webkit.WebKitCtrl_CanGoForward(*args, **kwargs)
def GoBack(*args, **kwargs):
"""GoBack(self) -> bool"""
return _webkit.WebKitCtrl_GoBack(*args, **kwargs)
def GoForward(*args, **kwargs):
"""GoForward(self) -> bool"""
return _webkit.WebKitCtrl_GoForward(*args, **kwargs)
def Reload(*args, **kwargs):
"""Reload(self)"""
return _webkit.WebKitCtrl_Reload(*args, **kwargs)
def Stop(*args, **kwargs):
"""Stop(self)"""
return _webkit.WebKitCtrl_Stop(*args, **kwargs)
def CanGetPageSource(*args, **kwargs):
"""CanGetPageSource(self) -> bool"""
return _webkit.WebKitCtrl_CanGetPageSource(*args, **kwargs)
def GetPageSource(*args, **kwargs):
"""GetPageSource(self) -> String"""
return _webkit.WebKitCtrl_GetPageSource(*args, **kwargs)
def SetPageSource(*args, **kwargs):
"""SetPageSource(self, String source, String baseUrl=EmptyString)"""
return _webkit.WebKitCtrl_SetPageSource(*args, **kwargs)
def GetPageURL(*args, **kwargs):
"""GetPageURL(self) -> String"""
return _webkit.WebKitCtrl_GetPageURL(*args, **kwargs)
def GetPageTitle(*args, **kwargs):
"""GetPageTitle(self) -> String"""
return _webkit.WebKitCtrl_GetPageTitle(*args, **kwargs)
def GetSelection(*args, **kwargs):
"""GetSelection(self) -> String"""
return _webkit.WebKitCtrl_GetSelection(*args, **kwargs)
def CanIncreaseTextSize(*args, **kwargs):
"""CanIncreaseTextSize(self) -> bool"""
return _webkit.WebKitCtrl_CanIncreaseTextSize(*args, **kwargs)
def IncreaseTextSize(*args, **kwargs):
"""IncreaseTextSize(self)"""
return _webkit.WebKitCtrl_IncreaseTextSize(*args, **kwargs)
def CanDecreaseTextSize(*args, **kwargs):
"""CanDecreaseTextSize(self) -> bool"""
return _webkit.WebKitCtrl_CanDecreaseTextSize(*args, **kwargs)
def DecreaseTextSize(*args, **kwargs):
"""DecreaseTextSize(self)"""
return _webkit.WebKitCtrl_DecreaseTextSize(*args, **kwargs)
def Print(*args, **kwargs):
"""Print(self, bool showPrompt=False)"""
return _webkit.WebKitCtrl_Print(*args, **kwargs)
def MakeEditable(*args, **kwargs):
"""MakeEditable(self, bool enable=True)"""
return _webkit.WebKitCtrl_MakeEditable(*args, **kwargs)
def IsEditable(*args, **kwargs):
"""IsEditable(self) -> bool"""
return _webkit.WebKitCtrl_IsEditable(*args, **kwargs)
def RunScript(*args, **kwargs):
"""RunScript(self, String javascript) -> String"""
return _webkit.WebKitCtrl_RunScript(*args, **kwargs)
def SetScrollPos(*args, **kwargs):
"""SetScrollPos(self, int pos)"""
return _webkit.WebKitCtrl_SetScrollPos(*args, **kwargs)
def GetScrollPos(*args, **kwargs):
"""GetScrollPos(self) -> int"""
return _webkit.WebKitCtrl_GetScrollPos(*args, **kwargs)
PageSource = property(GetPageSource,SetPageSource,doc="See `GetPageSource` and `SetPageSource`")
PageTitle = property(GetPageTitle,doc="See `GetPageTitle`")
PageURL = property(GetPageURL,doc="See `GetPageURL`")
ScrollPos = property(GetScrollPos,SetScrollPos,doc="See `GetScrollPos and SetScrollPos`")
Selection = property(GetSelection,doc="See `GetSelection`")
_webkit.WebKitCtrl_swigregister(WebKitCtrl)
cvar = _webkit.cvar
WebKitNameStr = cvar.WebKitNameStr
def PreWebKitCtrl(*args, **kwargs):
"""PreWebKitCtrl() -> WebKitCtrl"""
val = _webkit.new_PreWebKitCtrl(*args, **kwargs)
return val
WEBKIT_STATE_START = _webkit.WEBKIT_STATE_START
WEBKIT_STATE_NEGOTIATING = _webkit.WEBKIT_STATE_NEGOTIATING
WEBKIT_STATE_REDIRECTING = _webkit.WEBKIT_STATE_REDIRECTING
WEBKIT_STATE_TRANSFERRING = _webkit.WEBKIT_STATE_TRANSFERRING
WEBKIT_STATE_STOP = _webkit.WEBKIT_STATE_STOP
WEBKIT_STATE_FAILED = _webkit.WEBKIT_STATE_FAILED
WEBKIT_NAV_LINK_CLICKED = _webkit.WEBKIT_NAV_LINK_CLICKED
WEBKIT_NAV_BACK_NEXT = _webkit.WEBKIT_NAV_BACK_NEXT
WEBKIT_NAV_FORM_SUBMITTED = _webkit.WEBKIT_NAV_FORM_SUBMITTED
WEBKIT_NAV_RELOAD = _webkit.WEBKIT_NAV_RELOAD
WEBKIT_NAV_FORM_RESUBMITTED = _webkit.WEBKIT_NAV_FORM_RESUBMITTED
WEBKIT_NAV_OTHER = _webkit.WEBKIT_NAV_OTHER
wxEVT_WEBKIT_STATE_CHANGED = _webkit.wxEVT_WEBKIT_STATE_CHANGED
wxEVT_WEBKIT_BEFORE_LOAD = _webkit.wxEVT_WEBKIT_BEFORE_LOAD
wxEVT_WEBKIT_NEW_WINDOW = _webkit.wxEVT_WEBKIT_NEW_WINDOW
class WebKitBeforeLoadEvent(_core.CommandEvent):
"""Proxy of C++ WebKitBeforeLoadEvent class"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def IsCancelled(*args, **kwargs):
"""IsCancelled(self) -> bool"""
return _webkit.WebKitBeforeLoadEvent_IsCancelled(*args, **kwargs)
def Cancel(*args, **kwargs):
"""Cancel(self, bool cancel=True)"""
return _webkit.WebKitBeforeLoadEvent_Cancel(*args, **kwargs)
def GetURL(
|
phith0n/mooder
|
archives/migrations/0004_postimage.py
|
Python
|
lgpl-3.0
| 1,192
| 0.004288
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.1 on 2016-10-04 19:14
from __future__ import unicode_literals
import archives.models
from djang
|
o.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('archives', '000
|
3_attachment'),
]
operations = [
migrations.CreateModel(
name='PostImage',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('file', models.ImageField(blank=True, upload_to='images/%Y/%m/%d', validators=[archives.models.check_image_extension], verbose_name='图片')),
('created_time', models.DateTimeField(auto_now_add=True, verbose_name='创建时间')),
('last_modify_time', models.DateTimeField(auto_now=True, verbose_name='修改时间')),
('uploaded_by', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='上传者')),
],
),
]
|
eltonkevani/tempest_el_env
|
tempest/api/image/v2/test_images_tags_negative.py
|
Python
|
apache-2.0
| 1,762
| 0
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a
|
copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless
|
required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
from tempest.api.image import base
from tempest.common.utils import data_utils
from tempest import exceptions
from tempest.test import attr
class ImagesTagsNegativeTest(base.BaseV2ImageTest):
@attr(type=['negative', 'gate'])
def test_update_tags_for_non_existing_image(self):
# Update tag with non existing image.
tag = data_utils.rand_name('tag-')
non_exist_image = str(uuid.uuid4())
self.assertRaises(exceptions.NotFound, self.client.add_image_tag,
non_exist_image, tag)
@attr(type=['negative', 'gate'])
def test_delete_non_existing_tag(self):
# Delete non existing tag.
resp, body = self.create_image(container_format='bare',
disk_format='raw',
is_public=True,
)
image_id = body['id']
tag = data_utils.rand_name('non-exist-tag-')
self.addCleanup(self.client.delete_image, image_id)
self.assertRaises(exceptions.NotFound, self.client.delete_image_tag,
image_id, tag)
|
geocryology/HorizonPy
|
Examples/Example_1/Example_1_v2.py
|
Python
|
gpl-3.0
| 948
| 0.004219
|
################################################################
#
|
##
# Example 1: Converting ArcGIS solar radiation graphics to ##
# horizon coordinate points ##
################################################################
####
# 0. Import packages
####
from horizonpy import arcsky
from os import path
#####
# 1. Set path to Example 1 directory /Horizonpy/Examples/Example_1
#####
EXDIR =
|
r"E:\Users\Nick\Documents\src\HorizonPy\Examples\Example 1"
in_file = path.join(EXDIR, "ArcGIS_Skymap.tif")
out_file = path.join(EXDIR, "horizon_pts.txt")
#####
# 2. Converting raster image to coordinate points for horizon
#####
# create ArcSky object
AS = ArcSky.ArcSky()
# Set the classified pixel value of the sky
AS.setSkyClassValue(200)
# Open raster file
AS.open_new_file(in_file)
# convert to points
AS.write_horizon_file(out_file)
|
szarroug3/X-Ray_Calibre_Plugin
|
lib/utilities.py
|
Python
|
gpl-3.0
| 6,752
| 0.003258
|
# utilities.py
'''General utility functions used throughout plugin'''
import re
import os
import time
import socket
from httplib import HTTPException
from calibre.library import current_library_path
from calibre_plugins.xray_creator.lib.exceptions import PageDoesNotExist
HONORIFICS = 'mr mrs ms esq prof dr fr rev pr atty adv hon pres gov sen ofc pvt cpl sgt maj capt cmdr lt col gen'
HONORIFICS = HONORIFICS.split()
HONORIFICS.extend([x + '.' for x in HONORIFICS])
HONORIFICS += 'miss master sir madam lord dame lady esquire professor doctor father mother brother sister'.split()
HONORIFICS += 'reverend pastor elder rabbi sheikh attorney advocate honorable president governor senator'.split()
HONORIFICS += 'officer private corporal sargent major captain commander lieutenant colonel general'.split()
RELIGIOUS_HONORIFICS = 'fr br sr rev pr'
RELIGIOUS_HONORIFICS = RELIGIOUS_HONORIFICS.split()
RELIGIOUS_HONORIFICS.extend([x + '.' for x in RELIGIOUS_HONORIFICS])
RELIGIOUS_HONORIFICS += 'father mother brother sister reverend pastor elder rabbi sheikh'.split()
DOUBLE_HONORIFICS = 'lord'
# We want all the honorifics to be in the general honorifics list so when we're
# checking if a word is an honorifics, we only need to search in one list
HONORIFICS += RELIGIOUS_HONORIFICS
HONORIFICS += DOUBLE_HONORIFICS
COMMON_WORDS = 'the of de'.split()
HEADERS = {"Content-type": "application/x-www-form-urlencoded", "Accept": "text/html",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64; rv:46.0) Gecko/20100101 Firefox/46.0"}
BOOK_ID_PAT = re.compile(r'\/show\/([\d]+)')
AMAZON_ASIN_PAT = re.compile(r'data\-asin=\"([a-zA-z0-9]+)\"')
GOODREADS_ASIN_PAT = re.compile(r'"asin":"(.+?)"')
GOODREADS_URL_PAT = re.compile(r'href="(\/book\/show\/.+?)"')
LIBRARY = current_library_path().replace('/', os.sep)
def open_url(connection, url, return_redirect_url=False):
'''Tries to open url and return page's html'''
if 'goodreads.com' in url:
url = url[url.find('goodreads.com') + len('goodreads.com'):]
try:
connection.request('GET', url, headers=HEADERS)
response = connection.getresponse()
if response.status == 301 or response.status == 302:
if return_redirect_url:
return response.msg['location']
response = open_url(connection, response.msg['location'])
else:
response = response.read()
except (HTTPException, socket.error):
time.sleep(1)
connection.close()
connection.connect()
connection.request('GET', url, headers=HEADERS)
response = connection.getresponse()
if response.status == 301 or response.status == 302:
if return_redirect_url:
return response.msg['location']
response = open_url(connection, response.msg['location'])
else:
response = response.read()
if 'Page Not Found' in response:
raise PageDoesNotExist('Page not found.')
return response
def auto_expand_aliases(characters):
'''Goes through each character and expands them using fullname_to_possible_aliases without adding duplicates'''
actual_aliases = {}
duplicates = [alias.lower() for aliases in characters.values() for alias in aliases]
for entity_id, aliases in characters.items():
# get all expansions for original name and aliases retrieved from goodreads
expanded_aliases = []
for alias in aliases:
new_aliases = fullname_to_possible_aliases(alias.lower())
expanded_aliases += [new_alias for new_alias in new_aliases if new_alias not in expanded_aliases]
for alias in expanded_aliases:
# if this alias has already been flagged as a duplicate or is a common word, skip it
if alias in duplicates or alias in COMMON_WORDS:
continue
# check if this alias is a duplicate but isn't in the duplicates list
if alias in actual_aliases:
duplicates.append(alias)
actual_aliases.pop(alias)
continue
# at this point, the alias is new -- add it to the dict with the alias as the key and fullname as the value
actual_aliases[alias] = entity_id
return actual_aliases
def fullname_to_possible_aliases(fullname):
'''
Given a full name ("{Title} ChristianName {Middle Names} {Surname}"), return a list of possible aliases
ie. Title Surname, ChristianName Surname, Title ChristianName, {the full name}
The returned aliases are in the order they should match
'''
aliases = []
parts = fullname.split()
title = None
if parts[0].lower() in HONORIFICS:
title_list = []
while parts and parts[0].lower() in HONORIFICS:
title_list.append(parts.pop(0))
title = ' '.join(title_list)
if len(parts) >= 2:
# Assume: {Title} Firstname {Middlenames} Lastname
# Already added the full form, also add Title Lastname, and for some Title Firstname
surname = parts.pop() # This will cover double barrel surnames, we split on whitespace only
christian_name = parts.pop(0)
if title:
# Religious Honorifics usually only use {Title} {ChristianName}
# ie. John Doe could be Father John but usually not Father Doe
if title in RELIGIOUS_HONORIFICS:
aliases.append("%s %s" % (title, christian_name))
# Some titles work as both {Title} {ChristianName} and {Title} {Lastname}
# ie. John Doe could be Lord John or Lord Doe
elif title in DOUBLE_HONORIFICS:
aliases.append("%s %s" % (title, christian_name))
aliases.append("%s %s" % (title, surname))
# Everything else usually goes {Title} {Lastname}
# ie. John Doe could be Captain Doe but usually not Captain John
else:
aliases.append("%s %s" % (title, surname))
# Don't want the for
|
mats {ChristianName}, {Surname} and {ChristianName} {Lastname} in special cases
# i.e. The Lord Ruler should never have "The Ruler", "Lord" or "Ruler" as aliases
# Same for John
|
the Great
if christian_name not in COMMON_WORDS and (len(parts) == 0 or parts[0] not in COMMON_WORDS):
aliases.append(christian_name)
aliases.append(surname)
aliases.append("%s %s" % (christian_name, surname))
elif title:
# Odd, but got Title Name (eg. Lord Buttsworth), so see if we can alias
if len(parts) > 0:
aliases.append(parts[0])
else:
# We've got no title, so just a single word name. No alias needed
pass
return aliases
|
Kismuz/btgym
|
btgym/research/encoder_test/aac.py
|
Python
|
lgpl-3.0
| 30,478
| 0.00233
|
import tensorflow as tf
import numpy as np
import time
import datetime
from btgym.algorithms import BaseAAC
from btgym.algorithms.math_utils import cat_entropy
# from btgym.algorithms.runner.synchro import BaseSynchroRunner
from btgym.research.encoder_test.runner import RegressionRunner
# class EncoderClassifier(BaseAAC):
# """
# `Fake AAC` class meant to test policy state encoder ability to predict price movement
# as an isolated classification/regression problem.
# """
#
# def __init__(
# self,
# runner_config=None,
# trial_source_target_cycle=(1, 0),
# num_episodes_per_trial=1, # one-shot adaptation
# test_slowdown_steps=0,
# episode_sample_params=(1.0, 1.0),
# trial_sample_params=(1.0, 1.0),
# aac_lambda=0,
# class_lambda=1.0,
# class_use_rnn=True,
# _aux_render_modes=('action_prob', 'value_fn', 'lstm_1_h', 'lstm_2_h'),
# _use_target_policy=False,
# name='EncoderClassifier',
# **kwargs
# ):
# try:
# if runner_config is None:
# self.runner_config = {
# 'class_ref': BaseSynchroRunner,
# 'kwargs': {
# 'data_sample_config': {'mode': 0},
# 'test_conditions': {
# 'state': {
# 'metadata': {
# 'trial_type': 1, # only test episode from target dom. considered test one
# 'type': 1
# }
# }
# },
# 'slowdown_steps': test_slowdown_steps,
# 'name': '',
# },
# }
# else:
# self.runner_config = runner_config
#
# # Trials sampling control:
# self.num_source_trials = trial_source_target_cycle[0]
# self.num_target_trials = trial_source_target_cycle[-1]
# self.num_episodes_per_trial = num_episodes_per_trial
#
# self.aac_lambda = aac_lambda
# self.class_lambda = class_lambda
# self.class_use_rnn = class_use_rnn
#
# self.test_slowdown_steps = test_slowdown_steps
#
# self.episode_sample_params = episode_sample_params
# self.trial_sample_params = trial_sample_params
#
# self.global_timestamp = 0
#
# self.current_source_trial = 0
# self.current_target_trial = 0
# self.current_trial_mode = 0 # source
# self.current_episode = 1
#
# super(EncoderClassifier, self).__init__(
# runner_config=self.runner_config,
# aux_render_modes=_aux_render_modes,
# name=name,
# **kwargs
# )
# except:
# msg = '{}.__init()__ exception occurred'.format(name) + \
# '\n\nPress `Ctrl-C` or jupyter:[Kernel]->[Interrupt] for clean exit.\n'
# self.log.exception(msg)
# raise RuntimeError(msg)
#
# def _make_loss(self, pi, pi_prime, name='base', verbose=True, **kwargs):
# """
# Defines policy state encoder classification loss, placeholders and summaries.
#
# Args:
# pi: policy network obj.
# pi_prime: optional policy network obj.
# name: str, name scope
# verbose: summary level
#
# Returns:
# tensor holding estimated loss graph
# list of related summaries
# """
# with tf.name_scope(name):
# # On-policy AAC loss definition:
# pi.on_pi_act_target = tf.placeholder(
# tf.float32, [None, self.ref_env.action_space.n], name="on_policy_action_pl"
# )
# pi.on_pi_adv_target = tf.placeholder(tf.float32, [None], name="on_policy_advantage_pl")
# pi.on_pi_r_target = tf.placeholder(tf.float32, [None], name="on_policy_return_pl")
#
# clip_epsilon = tf.cast(self.clip_epsilon * self.learn_rate_decayed / self.opt_learn_rate, tf.float32)
#
# on_pi_loss, on_pi_summaries = self.on_policy_loss(
# act_target=pi.on_pi_act_target,
# adv_target=pi.on_pi_adv_target,
# r_target=pi.on_pi_r_target,
# pi_logits=pi.on_logits,
# pi_vf=pi.on_vf,
# pi_prime_logits=pi_prime.on_logits,
# entropy_beta=self.model_beta,
# epsilon=clip_epsilon,
# name='on_policy',
# verbose=verbose
# )
#
# # Classification loss for price movements prediction:
#
# # oracle_labels = tf.one_hot(tf.argmax(pi.expert_actions, axis=-1), depth=4)
#
# if self.class_use_rnn:
# class_logits = pi.on_logits
#
# else:
# class_logits = pi.on_simple_logits
#
#
# # class_loss = tf.reduce_mean(
# # tf.nn.softmax_cross_entropy_with_logits_v2(
# # labels=pi.expert_actions,#oracle_labels,
# # logits=class_logits,
# # )
# # )
#
# class_loss = tf.losses.mean_squared_error(
# labels=pi.expert_actions[..., 1:3],
# predictions=tf.nn.softmax(class_logits)[..., 1:3],
# )
# entropy = tf.reduce_mean(cat_entropy(class_logits))
#
# # self.accuracy = tf.metrics
|
.accuracy(
# # labels=tf.argmax(pi.expert_actions, axis=-1),
# # predictions=tf.argmax(class_logits, axis=-1)
# # )
#
# self.accuracy = tf.metrics.accuracy(
# labels=tf.argmax(pi.expert_actions[..., 1:3], axis=-1),
# predictions=tf.argmax(class_logits[..., 1:3], axis=-1)
# )
#
# model_summaries
|
= [
# tf.summary.scalar('class_loss', class_loss),
# tf.summary.scalar('class_accuracy', self.accuracy[0])
# ]
# # Accumulate total loss:
# loss = float(self.class_lambda) * class_loss + float(self.aac_lambda) * on_pi_loss\
# - float(self.model_beta) * entropy
#
# model_summaries += on_pi_summaries
#
# return loss, model_summaries
#
# def _make_train_op(self, pi, pi_prime, pi_global):
# """
# Defines training op graph and supplementary sync operations.
#
# Args:
# pi: policy network obj.
# pi_prime: optional policy network obj.
# pi_global: shared policy network obj. hosted by parameter server
#
# Returns:
# tensor holding training op graph;
# """
#
# # Each worker gets a different set of adam optimizer parameters:
# self.optimizer = tf.train.AdamOptimizer(self.train_learn_rate, epsilon=1e-5)
#
# # Clipped gradients:
# self.grads, _ = tf.clip_by_global_norm(
# tf.gradients(self.loss, pi.var_list),
# 40.0
# )
# self.grads_global_norm = tf.global_norm(self.grads)
# # Copy weights from the parameter server to the local model
# self.sync = self.sync_pi = tf.group(
# *[v1.assign(v2) for v1, v2 in zip(pi.var_list, pi_global.var_list)]
# )
# if self.use_target_policy:
# # Copy weights from new policy model to target one:
# self.sync_pi_prime = tf.group(
# *[v1.assign(v2) for v1, v2 in zip(pi_prime.var_list, pi.var_list)]
# )
# grads_and_vars = list(zip(self.grads, pi_global.var_list))
#
# # Set global_step increment equal to observation space batch size:
# obs_space_keys = list(pi.on_state_in.keys())
#
# assert 'external' in obs_space_keys, \
#
|
mahyarap/httpclient
|
tests/test_httpclient.py
|
Python
|
gpl-3.0
| 1,500
| 0.002
|
#!/usr/bin/env python3
import unittest
import argparse
from httpclient.httpclient import HttpRequest
class HttpRequstTest(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_parse_url(self):
host, port, resource = HttpRequest._parse_url('127.0.0.1')
self.assertEqual(host, '127.0.0.1')
self.assertIs(port, None)
self.assertEqual(resource, '/')
host, port, resource = HttpRequest._parse_url('http://localhost')
self.assertEqual(host, 'localhost')
self.assertIs(port, None)
self.assertEqual(resource, '/')
host, port, resource = HttpRequest._parse_url('http://localhost/foo/bar')
self.assertEqual(host, 'localhost')
self.assertIs(port, None)
self.assertEqual(resource, '/foo/bar')
host, port, resource = HttpRequest._parse_url('http://localhost:80/foo/bar')
self.assertEqual(host, 'localhost')
self.a
|
ssertEqua
|
l(port, 80)
self.assertEqual(resource, '/foo/bar')
def test_send_http_request_options(self):
request = HttpRequest('http://localhost', method='OPTIONS')
response = request.send()
self.assertEqual(response.status, 200)
def test_send_http_request_get(self):
request = HttpRequest('http://localhost')
response = request.send()
self.assertEqual(response.status, 200)
self.assertTrue(response.body)
if __name__ == '__main__':
unittest.main()
|
plotly/plotly.py
|
packages/python/plotly/plotly/validators/heatmap/colorbar/_showexponent.py
|
Python
|
mit
| 518
| 0.001931
|
import _plotly_utils.basevalidators
class ShowexponentValidator(_plotly_utils.basevalidators.Enumerat
|
edValidator):
def __init__(
self, plotly_name="showexponent", parent_name="heatmap.colorbar", **kwargs
):
super(ShowexponentValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
values=kwargs.pop("values", ["all",
|
"first", "last", "none"]),
**kwargs
)
|
igemsoftware2017/USTC-Software-2017
|
biohub/core/plugins/serializers.py
|
Python
|
gpl-3.0
| 391
| 0
|
from
|
rest_framework import serializers
class PluginSerializer(serializers.Serializer):
name = serializers.CharField(read_only=True)
author = serializers.CharField(read_only=True)
title = serializers.CharField(read_only=True)
description = serializers.CharField(read_only=True)
js_url = serializers.CharField(read_only=True)
class Meta:
field
|
s = '__all__'
|
HappyFaceGoettingen/HappyFaceCore
|
modules/dCacheInfoPool.py
|
Python
|
apache-2.0
| 16,649
| 0.007209
|
# -*- coding: utf-8 -*-
#
# Copyright 2012 Institut für Experimentelle Kernphysik - Karlsruher Institut für Technologie
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import hf, lxml, logging, datetime
import parser
from sqlalchemy import *
from lxml import etree
from string import strip
class dCacheInfoPool(hf.module.ModuleBase):
config_keys = {
'global_critical_ratio': ('ratio determines module status: (sum of free space)/(sum of total space)', '0.1'),
'local_critical_ratio': ('ratio determines pool status: pool free/total', '0.02'),
'global_warning_ratio': ('ratio determines module status: (sum of free space)/(sum of total space)', '0.15'),
'local_warning_ratio': ('ratio determines pool status: pool free/total', '0.05'),
'global_critical_poolcriticals': ('module status is critical if more than this amount of pools are critical pools', '1'),
'global_critical_poolwarnings': ('module status is critical if more than this amount of pools are warning pools', '4'),
'global_warning_poolcriticals': ('module status is warning if more than this amount of pools are critical pools', '0'),
'global_warning_poolwarnings': ('module status is warning if more than this amount of pools are warning pools', '0'),
'poolgroups': ('name of the pools, a list is possible', ' cms-disk-only-pools'),
'categories': ('name of the categories to be extracted, poolname and status will always be generated', 'total,free,precious,removable'),
'unit': ('This should be GiB or TiB', 'TiB'),
'source_xml': ('link to the source file', 'both||http://adm-dcache.gridka.de:2286/info/pools'),
'special_overview': ('this parameter allows you to add several new lines to the overview, you have 4 variables(total, free, precious, removable) you can use to define the new line. this adds the line example with the value calculated the way described after =', 'example[%]=(r+t)/(f-p)*100'),
'special_details': ('it is equal to special_overview but adds a new column for details', 'example=(r+t)/(f-p)'),
}
config_hint = ''
table_columns = [
Column('num_pools', INT),
Column('crit_pools', INT),
Column('warn_pools', INT),
Column('total', INT),
Column('free', INT),
Column('precious', INT),
Column('removable', INT),
Column('special_overview', TEXT),
Column('special_details', TEXT),
Column('unit', TEXT),
], []
subtable_columns = {
"details": ([
Column('poolname', TEXT),
Column('total', FLOAT),
Column('free', FLOAT),
Column('precious', FLOAT),
Column('removable', FLOAT),
Column('status', FLOAT),
], []),
}
def prepareAcquisition(self):
# read configuration
try:
self.global_critical_ratio = float(self.config['global_critical_ratio'])
self.local_critical_ratio = float(self.config['local_critical_ratio'])
self.global_warning_ratio = float(self.config['global_warning_ratio'])
self.local_warning_ratio = float(self.config['local_warning_ratio'])
self.global_critical_poolcriticals = int(self.config['global_critical_poolcriticals'])
self.global_critical_poolwarnings = int(self.config['global_critical_poolwarnings'])
self.global_warning_poolcriticals = int(self.config['global_warning_poolcriticals'])
self.global_warning_poolwarnings = int(self.config['global_warning_poolwarnings'])
self.poolgroups = map(strip, self.config['poolgroups'].split(','))
self.unit = self.config['unit']
self.special_overview = self.config['special_overview']
self.special_details = self.config['special_details']
except KeyError, e:
raise hf.exceptions.ConfigError('Required parameter "%s" not specified' % str(e))
if 'source_xml' not in self.config: raise hf.exceptions.ConfigError('source_xml option not set')
self.source_xml = hf.downloadService.addDownload(self.config['source_xml'])
self.details_db_value_list = []
def extractData(self):
data = {}
if self.unit != 'GiB' and self.unit != 'TiB':
self.logger.error(self.unit + ' is not an accepted unit, using TiB instead!')
self.unit = 1024 * 1024 * 1024 * 1024.0
data['unit'] = 'TiB'
elif self.unit == 'GiB':
self.unit = 1024 * 1024 * 1024.0
data['unit'] = 'GiB'
else:
self.unit = 1024 * 1024 * 1024 * 1024.0
data['unit'] = 'TiB'
data['source_url'] = self.source_xml.getSourceUrl()
data['status'] = 1
data['special_overview'] = self.special_overview
data['special_details'] = self.special_details
source_tree = etree.parse(open(self.source_xml.getTmpPath()))
root = source_tree.getroot()
for pools in root:
if pools.tag == '{http://www.dcache.org/2008/01/Info}pools':
for pool in pools:
for poolgroups in pool:
if poolgroups.tag == '{http://www.dcache.org/2008/01/Info}poolgroups':
accept = 'false'
for poolgroupref in poolgroups:
if poolgroupref.get('name') in self.poolgroups:
accept = 'true'
break
if accept == 'true':
for space in pool:
if space.tag == '{http://www.dcache.org/2008/01/Info}space':
appending = {}
appending['poolname'] = pool.get('name')
appending['
|
status'] = 1.0
appending['total'] = 0
appending['free'] = 0
appending['precious'] = 0
appending['removable'] = 0
for metric in space:
|
if metric.get('name') == 'total':
appending['total'] = float(metric.text) / self.unit
elif metric.get('name') == 'free':
appending['free'] = float(metric.text) / self.unit
elif metric.get('name') == 'precious':
appending['precious'] = float(metric.text) / self.unit
elif metric.get('name') == 'removable':
appending['removable'] = float(metric.text) / self.unit
self.details_db_value_list.append(appending)
data['num_pools'] = 0
data['crit_pools'] = 0
data['warn_pools'] = 0
data['total'] = 0
data['free'] = 0
data['precious'] = 0
data['removable'] = 0
for i,pool in enumerate(self.details_db_value_list):
data['num_pools'] += 1
data['total'] += pool['total']
data['free'] += pool['free']
data['precious'] += pool['precious']
data['removable'] += pool['removable']
if (pool['free'] + pool['removable']) / po
|
eguil/ENSO_metrics
|
pmp_driver/parallel_driver.py
|
Python
|
bsd-3-clause
| 5,888
| 0.002548
|
#!/usr/bin/env python
"""
Usage example:
1. First realization per model
./parallel_driver.py -p my_Param_ENSO.py --mip cmip6 --modnames all --realization r1i1p1f1 --metricsCollection ENSO_perf
2. All realizations of individual models
./parallel_driver.py -p my_Param_ENSO.py --mip cmip6 --modnames all --realization all --metricsCollection ENSO_perf
"""
from __future__ import print_function
from argparse import RawTextHelpFormatter
from genutil import StringConstructor
from subprocess import Popen
from PMPdriver_lib import AddParserArgument
from PMPdriver_lib import sort_human
import datetime
import glob
import os
import pcmdi_metrics
import sys
import time
# To avoid below error
# OpenBLAS blas_thread_init: pthread_create failed for thread XX of 96: Resource temporarily unavailable
os.environ['OPENBLAS_NUM_THREADS'] = '1'
# Must be done before any CDAT library is called.
# https://github.com/CDAT/cdat/issues/2213
if 'UVCDAT_ANONYMOUS_LOG' not in os.environ:
os.environ['UVCDAT_ANONYMOUS_LOG'] = 'no'
# =================================================
# Collect user defined options
# -------------------------------------------------
param = AddParserArgument()
# Pre-defined options
mip = param.mip
exp = param.exp
print('mip:', mip)
print('exp:', exp)
# Path to model data as string template
modpath = param.process_templated_argument("modpath")
# Check given model option
models = param.modnames
print('models:', models)
# Include all models if conditioned
if ('all' in [m.lower() for m in models]) or (models == 'all'):
model_index_path = param.modpath.split('/')[-1].split('.').index("%(model)")
models = ([p.split('/')[-1].split('.')[model_index_path] for p in glob.glob(modpath(
mip=mip, exp=exp, model='*', realization='*', variable='ts'))])
# remove duplicates
models = sorted(list(dict.fromkeys(models)), key=lambda s: s.lower())
print('models:', models)
print('number of models:', len(models))
# Realizations
realization = param.realization
if ('all' in [r.lower() for r in realization]) or (realization == 'all'):
realization = '*'
print('realization: ', realization)
# Metrics Collection
mc_name = param.metricsCollection
# case id
case_id = param.case_id
print('case_id:', case_id)
# Output
outdir_template = param.process_templated_argument("results_dir")
outdir = StringConstructor(str(outdir_template(
output_type='%(output_type)',
mip=mip, exp=exp, metricsCollection=mc_name, case_id=case_id)))
# Debug
debug = param.debug
print('debug:', debug)
# =================================================
# Create output directories
# -------------------------------------------------
for output_type in ['graphics', 'diagnostic_results', 'metrics_results']:
if not os.path.exists(outdir(output_type=output_type)):
os.makedirs(outdir(output_type=output_type))
print(outdir(output_type=output_type))
# =================================================
# Generates list of command
# -------------------------------------------------
if mip == "obs2obs":
param_file = './my_Param_ENSO_obs2obs.py'
else:
param_file = './my_Param_ENSO.py'
cmds_list = []
for model in models:
print(' ----- model: ', model, ' ---------------------')
# Find all xmls for the given model
model_path_list = glob.glob(
modpath(mip=mip, exp=exp, model=model, realization="*", variable='ts'))
# sort in nice way
model_path_list = sort_human(model_path_list)
if debug:
print('model_path_list:', model_path_list)
# Find where run can be gripped from given filename template for modpath
print('realization:', realization)
run_in_modpath = modpath(mip=mip, exp=exp, realm='atmos', model=model, realization=realization,
variable='ts').split('/')[-1].split('.').index(realization)
print('run_in_modpath:', run_in_modpath)
# Collect all available runs
runs_list = [model_path.split('/')[-1].split('.')[run_in_modpath] for model_path in model_path_list]
# Adjust realization to be included
if realization in ["all" ,"*"]:
pass
elif realization in ["first"]:
runs_list = runs_list[:1]
else:
runs_list = [realization]
if debug:
print('runs_list (all):', runs_list)
# Generate commends
for run in runs_list:
cmd = ['python', 'PMPdriver_EnsoMetrics.py',
'-p', param_file,
'--mip', mip, '--metricsCollection', mc_name,
'--case_id', case_id,
'--modnames', model,
'--realization', run]
cmds_list.append(cmd)
if debug:
for cmd in cmds_list:
print(' '.join(cmd))
# =================================================
# Run subprocesses in parallel
# -------------------------------------------------
# log dir
log_dir = os.path.join("log", case_id, mc_name)
if not os.path.exists(log
|
_dir):
os.makedirs(log_dir)
# number of tasks to submit at the same time
num_workers = 7
#num_workers = 10
#num_workers = 30
#num_workers = 25
print("Start : %s" % time.ctime())
# submit tasks and wait for subset of tasks to complete
procs_list = []
for p, cmd in enumerate(cmds_list):
timenow
|
= time.ctime()
print(timenow, p, ' '.join(cmd))
model = cmd[-3]
run = cmd[-1]
log_filename = '_'.join(['log_enso', mc_name, mip, exp, model, run, case_id])
log_file = os.path.join(log_dir, log_filename)
with open(log_file+"_stdout.txt", "wb") as out, open(log_file+"_stderr.txt", "wb") as err:
procs_list.append(Popen(cmd, stdout=out, stderr=err))
time.sleep(1)
if ((p > 0 and p % num_workers == 0) or (p == len(cmds_list)-1)):
print('wait...')
for proc in procs_list:
proc.wait()
print("Tasks end : %s" % time.ctime())
procs_list = []
# tasks done
print("End : %s" % time.ctime())
sys.exit('DONE')
|
shikhir-arora/Giesela
|
musicbot/bot.py
|
Python
|
mit
| 30,622
| 0.001665
|
import asyncio
import inspect
import logging
import os
import re
import shutil
import sys
import traceback
from collections import defaultdict
from contextlib import suppress
from datetime import datetime
from random import choice
from textwrap import indent, wrap
import aiohttp
import discord
from discord import Client
from discord.enums import ChannelType
from discord.utils import find
from musicbot import downloader, exceptions, localization
from musicbot.commands.admin_commands import AdminCommands
from musicbot.commands.fun_commands import FunCommands
from musicbot.commands.info_commands import InfoCommands
from musicbot.commands.misc_commands import MiscCommands
from musicbot.commands.player_commands import PlayerCommands
from musicbot.commands.playlist_commands import PlaylistCommands
from musicbot.commands.queue_commands import QueueCommands
from musicbot.commands.tool_commands import ToolCommands
from musicbot.config import Config, ConfigDefaults
from musicbot.constants import VERSION as BOTVERSION
from musicbot.constants import (ABS_AUDIO_CACHE_PATH, AUDIO_CACHE_PATH,
DISCORD_MSG_CHAR_LIMIT)
from musicbot.entry import RadioSongEntry, TimestampEntry, YoutubeEntry
from musicbot.games.game_cah import GameCAH
from musicbot.lib.ui import ui_utils
from musicbot.opus_loader import load_opus_lib
from musicbot.player import MusicPlayer
from musicbot.random_sets import RandomSets
from musicbot.reporting import raven_client
from musicbot.saved_playlists import Playlists
from musicbot.settings import Settings
from musicbot.utils import (Response, get_related_videos, load_file, ordinal,
paginate)
from musicbot.web_author import WebAuthor
from musicbot.web_socket_server import GieselaServer
load_opus_lib()
log = logging.getLogger("Giesela")
stream_handler = logging.StreamHandler()
stream_handler.setLevel(logging.WARNING)
stream_handler.setFormatter(logging.Formatter("{time} - <name> [{levelname}] {message}", style="{"))
file_handler = logging.FileHandler("logs.txt")
file_handler.setLevel(logging.DEBUG)
file_handler.setFormatter(logging.Formatter("{asctime} - <{name}> [{levelname}] {message}", style="{"))
logging.basicConfig(
level=logging.DEBUG,
handlers=[stream_handler, file_handler]
)
class MusicBot(Client, AdminCommands, FunCommands, InfoCommands, MiscCommands, PlayerCommands, PlaylistCommands, QueueCommands, ToolCommands):
def __init__(self):
WebAuthor.bot = self
self.players = {}
self.locks = defaultdict(asyncio.Lock)
self.voice_client_connect_lock = asyncio.Lock()
self.config = Config(ConfigDefaults.options_file)
self.playlists = Playlists(ConfigDefaults.playlists_file)
self.random_sets = RandomSets(ConfigDefaults.random_sets)
self.online_loggers = {}
self.cah = GameCAH(self)
self.blacklist = set(load_file(self.config.blacklist_file))
self.autoplaylist = load_file(self.config.auto_playlist_file)
self.downloader = downloader.Downloader(download_folder=AUDIO_CACHE_PATH)
self.exit_signal = None
self.init_ok = False
self.cached_client_id = None
self.chatters = {}
self.blocked_commands = Settings.get_setting("blocked_commands", default={})
self.users_in_menu = set()
if not self.autoplaylist:
print("Warning: Autoplaylist is empty, disabling.")
self.config.auto_playlist = False
self.use_autoplaylist = self.config.auto_playlist
ssd_defaults = {"last_np_msg": None, "auto_paused": False}
self.server_specific_data = defaultdict(lambda: dict(ssd_defaults))
super().__init__()
self.aiosession = aiohttp.ClientSession(loop=self.loop)
self.http.user_agent += " Giesela/%s" % BOTVERSION
self.load_online_loggers()
def find_home_channel(self, server, most_members=True):
channels_by_member = sorted([channel for channel in server.channels if len(channel.voice_members) > 0], key=lambda channel: len(channel.voice_members), reverse=True)
if most_members and channels_by_member:
channel = channels_by_member[0]
else:
channel = find(
lambda c: c.type == ChannelType.voice and any(x in c.name.lower().split()
for x in ["giesela", "musicbot", "bot", "music", "reign"]),
server.channels
)
if channel is None:
channel = choice(
list(filter(lambda c: c.type == ChannelType.voice, server.channels)))
return channel
def _delete_old_audiocache(self, path=ABS_AUDIO_CACHE_PATH):
try:
shutil.rmtree(path)
return True
except:
try:
os.rename(path, path + "__")
except:
return False
try:
shutil.rmtree(path)
except:
os.rename(path + "__", path)
return False
return True
async def _wait_delete_msg(self, message, after):
await asyncio.sleep(after)
await self.safe_delete_message(message)
async def generate_invite_link(self, *, permissions=None, server=None):
if not self.cached_client_id:
appinfo = await self.application_info()
self.cached_client_id = appinfo.id
return discord.utils.oauth_url(
self.cached_client_id, permissions=permissions, server=server)
def get_global_user(self, user_id):
for server in self.servers:
mem = server.get_member(user_id)
if mem:
return mem
return None
async def get_player(self, server, channel=None):
if isinstance(server, str):
server = self.get_server(server)
with (await self.voice_client_connect_lock):
# if there's already a player for this server
if server.id in self.players:
# but it's not in the right channel
if channel and self.players[server.id].voice_client.channel != channel:
# move that stuff
await self.players[server.id].voice_client.move_to(channel)
else:
voice_client = None
# gotta be sure to get one
while not voice_client:
# create a new voice client in the selected channel (if given) or go to the home channel
with suppress(discord.errors.ConnectionClosed):
voice_client = await self.join_voice_channel(channel or self.find_home_channel(server))
player = MusicPlayer(self, voice_client) \
.on("play", self.on_player_play) \
.on("resume", self.on_player_resume) \
.on("pause", self.on_player_pause) \
.on("stop", self.on_player_stop) \
.on("finished-playing", self.on_player_finished_playing) \
.on("entry-added", self.on_player_entry_added)
print("[PLAYER] Created a new player")
self.players[server.id] = player
return self.players[server.id]
async def on_player_play(self, player, entry):
GieselaServer.send_player_information(
player.voice_client.server.id)
await self.update_now_playing(entry)
channel = entry.meta.get("channel", None)
if channel:
last_np_msg = self.server_specific_data[channel.ser
|
ver][
"last_np_msg"]
if last_np_msg and last_np_msg.channel == channel:
# if the last np message isn't the last message in the channel;
# delete it
async for lmsg in self.logs_from(channel, limit=1):
if lmsg != last_np_msg and last_np_msg:
await self.safe_delete_message(last_np_msg)
self.server
|
_specific_data[channel.server][
"last_np_msg"] = None
|
google/upvote_py2
|
upvote/gae/lib/voting/api_test.py
|
Python
|
apache-2.0
| 56,967
| 0.004055
|
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for voting logic."""
import mock
from google.appengine.ext import ndb
from upvote.gae import settings
from upvote.gae.datastore import test_utils
from upvote.gae.datastore import utils as datastore_utils
from upvote.gae.datastore.models import binary as binary_models
from upvote.gae.datastore.models import host as host_models
from upvote.gae.datastore.models import rule as rule_models
from upvote.gae.datastore.models import user as user_models
from upvote.gae.datastore.models import vote as vote_models
from upvote.gae.lib.testing import basetest
from upvote.gae.lib.voting import api
from upvote.shared import constants
# Done for the sake of brevity.
TABLE = constants.BIGQUERY_TABLE
USER_ROLE = constants.USER_ROLE
VOTING_PROHIBITED_REASONS = constants.VOTING_PROHIBITED_REASONS
def CreateEvent(blockable, host, user):
return test_utils.CreateSantaEvent(
blockable, host_id=host.key.id(), executing_user=user.nickname,
parent=datastore_utils.ConcatenateKeys(user.key, host.key, blockable.key))
class GetBlockableTest(basetest.UpvoteTestCase):
def testFound(self):
sha256 = test_utils.RandomSHA256()
test_utils.CreateSantaBlockable(id=sha256)
self.assertIsNotNone(api._GetBlockable(sha256))
def testNotFound(self):
sha256 = test_utils.RandomSHA256()
test_utils.CreateSantaBlockable(id=sha256)
with self.assertRaises(api.BlockableNotFoundError):
api._GetBlockable('abcdef')
class GetClientTest(basetest.UpvoteTestCase):
def testSupported(self):
blockable = test_utils.CreateSantaBlockable()
client = api._GetClient(blockable)
self.assertEqual(constants.CLIENT.SANTA, client)
def testUnsupported(self):
blockable = test_utils.CreateBlockable()
with self.assertRaises(api.UnsupportedClientError):
api._GetClient(blockable)
class GetRulesForBlockableTest(basetest.UpvoteTestCase):
def testSuccess(self):
blockable = test_utils.CreateBlockable()
self.assertLen(api._GetRulesForBlockable(blockable), 0)
in_effect_rule_count = 7
test_utils.CreateSantaRules(blockable.key, in_effect_rule_count)
not_in_effect_rule_count = 10
test_utils.CreateSantaRules(
blockable.key, not_in_effect_rule_count, in_effect=False)
self.assertLen(api._GetRulesForBlockable(blockable), in_effect_rule_count)
class IsVotingAllowedTest(basetest.UpvoteTestCase):
def testBlockable_NotFound(self):
invalid_key = ndb.Key(binary_models.Blockable, '12345')
with self.assertRaises(api.BlockableNotFoundError):
api.IsVotingAllowed(invalid_key)
def testBlockable_CannotVote(self):
user = test_utils.CreateUser(roles=[USER_ROLE.UNTRUSTED_USER])
blockable = test_utils.CreateBlockable()
with self.LoggedInUser(user=user):
allowed, reason = api.IsVotingAllowed(blockable.key)
self.assertFalse(allowed)
self.assertEqual(VOTING_PROHIBITED_REASONS.INSUFFICIENT_PERMISSION, reason)
def testSantaBundle_NotUploaded(self):
bundle = test_utils.CreateSantaBundle(uploaded_dt=None)
self.assertFalse(bundle.has_been_uploaded)
with self.LoggedInUser():
allowed, reason = api.IsVotingAllowed(bundle.key)
self.assertFalse(allowed)
self.assertEqual(
constants.VOTING_PROHIBITED_REASONS.UPLOADING_BUNDLE, reason)
@mock.patch.object(api.ndb, 'in_transaction', return_value=False)
def testSantaBundle_FlaggedBinary_NotInTransaction(self, mock_in_txn):
# First, create two unflagged binaries.
blockables = test_utils.CreateSantaBlockables(2)
bundle = test_utils.CreateSantaBundle(bundle_binaries=blockables)
with self.LoggedInUser():
allowed, reason = api.IsVotingAllowed(bundle.key)
self.assertTrue(allowed)
# Now flag one of the binaries.
blockables[0].flagged = True
blockables[0].put()
allowed, reason = api.IsVotingAllowed(bundle.key)
self.assertFalse(allowed)
self.assertEqual(
constants.VOTING_PROHIBITED_REASONS.FLAGGED_BINARY, reason)
def testSantaBundle_FlaggedBinary_InTransaction(self):
blockables = test_utils.CreateSantaBlockables(26)
bundle = test_utils.CreateSantaBundle(bundle_binaries=blockables)
# Flag one of the binaries.
blockables[0].flagged = True
blockables[0].put()
# Patch out the flagged checks.
mock_flagged_binary = self.Patch(bundle, 'HasFlaggedBinary')
mock_flagged_cert = self.Patch(bundle, 'HasFlaggedCert')
with self.LoggedInUser():
fn = lambda: api.IsVotingAllowe
|
d(bundle.key)
allowed, reason = ndb.transaction(fn, xg=True)
self.assertTrue(allowed)
self.assertIsNone(reason)
mock_flagged_binary.assert_not_called()
mock_flagged_cert.assert_not_called()
def testSantaBundle_FlaggedCert(self):
santa_certificate = test_utils.CreateSantaCertificate()
blockable = test_utils.CreateSantaBlockable(
|
cert_key=santa_certificate.key)
bundle = test_utils.CreateSantaBundle(bundle_binaries=[blockable])
with self.LoggedInUser():
allowed, reason = api.IsVotingAllowed(bundle.key)
self.assertTrue(allowed)
santa_certificate.flagged = True
santa_certificate.put()
allowed, reason = api.IsVotingAllowed(bundle.key)
self.assertFalse(allowed)
self.assertEqual(constants.VOTING_PROHIBITED_REASONS.FLAGGED_CERT, reason)
def testSantaBlockable_BlacklistedCert(self):
santa_certificate = test_utils.CreateSantaCertificate()
blockable = test_utils.CreateSantaBlockable(cert_key=santa_certificate.key)
test_utils.CreateSantaRule(
santa_certificate.key, rule_type=constants.RULE_TYPE.CERTIFICATE,
policy=constants.RULE_POLICY.BLACKLIST)
with self.LoggedInUser():
allowed, reason = api.IsVotingAllowed(blockable.key)
self.assertFalse(allowed)
self.assertIsNotNone(reason)
def testBlockable_ProhibitedState(self):
for state in constants.STATE.SET_VOTING_PROHIBITED:
blockable = test_utils.CreateBlockable(state=state)
with self.LoggedInUser():
allowed, reason = api.IsVotingAllowed(blockable.key)
self.assertFalse(allowed)
self.assertIsNotNone(reason)
def testBlockable_AdminOnly_UserIsAdmin(self):
for state in constants.STATE.SET_VOTING_ALLOWED_ADMIN_ONLY:
blockable = test_utils.CreateBlockable(state=state)
with self.LoggedInUser(admin=True) as admin:
allowed, reason = api.IsVotingAllowed(blockable.key, current_user=admin)
self.assertTrue(allowed)
self.assertIsNone(reason)
def testBlockable_AdminOnly_UserIsNotAdmin(self):
for state in constants.STATE.SET_VOTING_ALLOWED_ADMIN_ONLY:
blockable = test_utils.CreateBlockable(state=state)
with self.LoggedInUser() as user:
allowed, reason = api.IsVotingAllowed(blockable.key, current_user=user)
self.assertFalse(allowed)
self.assertIsNotNone(reason)
def testBlockable_IsCertificate_UserIsAdmin(self):
cert = test_utils.CreateSantaCertificate()
with self.LoggedInUser(admin=True) as admin:
allowed, reason = api.IsVotingAllowed(cert.key, current_user=admin)
self.assertTrue(allowed)
self.assertIsNone(reason)
def testBlockable_IsCertificate_UserIsNotAdmin(self):
cert = test_utils.CreateSantaCertificate()
with self.LoggedInUser() as user:
allowed, reason = api.IsVotingAllowed(cert.key, current_user=user)
self.assertFalse(allowed)
self.assertIsNotNone(reason)
def testBlockable_VotingIsAllowed(self):
for state in constants.STATE.SET_VOTING_ALLOWED:
blockable = test_utils
|
rcarmo/soup-strainer
|
html5lib/inputstream.py
|
Python
|
mit
| 32,655
| 0.003859
|
from __future__ import absolute_import
import codecs
import re
import types
import sys
from .constants import EOF, spaceCharacters, asciiLetters, asciiUppercase
from .constants import encodings, ReparseException
from . import utils
from io import StringIO
try:
from io import BytesIO
except ImportError:
BytesIO = StringIO
try:
from io import BufferedIOBase
except ImportError:
class BufferedIOBase(object):
pass
#Non-unicode versions of constants for use in the pre-parser
spaceCharactersBytes = frozenset([item.encode(u"ascii") for item in spaceCharacters])
asciiLettersBytes = frozenset([item.encode(u"ascii") for item in asciiLetters])
asciiUppercaseBytes = frozenset([item.encode(u"ascii") for item in asciiUppercase])
spacesAngleBrackets = spaceCharactersBytes | frozenset([">", "<"])
invalid_unicode_re = re.compile(u"[\u0001-\u0008\u000B\u000E-\u001F\u007F-\u009F\uD800-\uDFFF\uFDD0-\uFDEF\uFFFE\uFFFF\U0001FFFE\U0001FFFF\U0002FFFE\U0002FFFF\U0003FFFE\U0003FFFF\U0004FFFE\U0004FFFF\U0005FFFE\U0005FFFF\U0006FFFE\U0006FFFF\U0007FFFE\U0007FFFF\U0008FFFE\U0008FFFF\U0009FFFE\U0009FFFF\U000AFFFE\U000AFFFF\U000BFFFE\U000BFFFF\U000CFFFE\U000CFFFF\U000DFFFE\U000DFFFF\U000EFFFE\U000EFFFF\U000FFFFE\U000FFFFF\U0010FFFE\U0010FFFF]")
non_bmp_invalid_codepoints = set([0x1FFFE, 0x1FFFF, 0x2FFFE, 0x2FFFF, 0x3FFFE,
0x3FFFF, 0x4FFFE, 0x4FFFF, 0x5FFFE, 0x5FFFF,
0x6FFFE, 0x6FFFF, 0x7FFFE, 0x7FFFF, 0x8FFFE,
0x8FFFF, 0x9FFFE, 0x9FFFF, 0xAFFFE, 0xAFFFF,
0xBFFFE, 0xBFFFF, 0xCFFFE, 0xCFFFF, 0xDFFFE,
0xDFFFF, 0xEFFFE, 0xEFFFF, 0xFFFFE, 0xFFFFF,
0x10FFFE, 0x10FFFF])
ascii_punctuation_re = re.compile(u"[\u0009-\u000D\u0020-\u002F\u003A-\u0040\u005B-\u0060\u007B-\u007E]")
# Cache for charsUntil()
charsUntilRegEx = {}
class BufferedStream(object):
u"""Buffering f
|
or streams that do not have buffering of their own
The buffer is implemented as a list of chunks on the assumption that
joining many strings will be slow since it is O(n**2)
"""
def __init__(self, stream):
self.stream = stream
self.buffer = []
s
|
elf.position = [-1,0] #chunk number, offset
__init__.func_annotations = {}
def tell(self):
pos = 0
for chunk in self.buffer[:self.position[0]]:
pos += len(chunk)
pos += self.position[1]
return pos
tell.func_annotations = {}
def seek(self, pos):
assert pos < self._bufferedBytes()
offset = pos
i = 0
while len(self.buffer[i]) < offset:
offset -= pos
i += 1
self.position = [i, offset]
seek.func_annotations = {}
def read(self, str):
if not self.buffer:
return self._readStream(str)
elif (self.position[0] == len(self.buffer) and
self.position[1] == len(self.buffer[-1])):
return self._readStream(str)
else:
return self._readFromBuffer(str)
read.func_annotations = {}
def _bufferedBytes(self):
return sum([len(item) for item in self.buffer])
_bufferedBytes.func_annotations = {}
def _readStream(self, str):
data = self.stream.read(str)
self.buffer.append(data)
self.position[0] += 1
self.position[1] = len(data)
return data
_readStream.func_annotations = {}
def _readFromBuffer(self, str):
remainingBytes = str
rv = []
bufferIndex = self.position[0]
bufferOffset = self.position[1]
while bufferIndex < len(self.buffer) and remainingBytes != 0:
assert remainingBytes > 0
bufferedData = self.buffer[bufferIndex]
if remainingBytes <= len(bufferedData) - bufferOffset:
bytesToRead = remainingBytes
self.position = [bufferIndex, bufferOffset + bytesToRead]
else:
bytesToRead = len(bufferedData) - bufferOffset
self.position = [bufferIndex, len(bufferedData)]
bufferIndex += 1
data = rv.append(bufferedData[bufferOffset:
bufferOffset + bytesToRead])
remainingBytes -= bytesToRead
bufferOffset = 0
if remainingBytes:
rv.append(self._readStream(remainingBytes))
return u"".join(rv)
_readFromBuffer.func_annotations = {}
def HTMLInputStream(source, encoding=None, parseMeta=True, chardet=True):
if hasattr(source, u"read"):
isUnicode = isinstance(source.read(0), unicode)
else:
isUnicode = isinstance(source, unicode)
if isUnicode:
if encoding is not None:
raise TypeError(u"Cannot explicitly set an encoding with a unicode string")
return HTMLUnicodeInputStream(source)
else:
return HTMLBinaryInputStream(source, encoding, parseMeta, chardet)
HTMLInputStream.func_annotations = {}
class HTMLUnicodeInputStream(object):
u"""Provides a unicode stream of characters to the HTMLTokenizer.
This class takes care of character encoding and removing or replacing
incorrect byte-sequences and also provides column and line tracking.
"""
_defaultChunkSize = 10240
def __init__(self, source):
u"""Initialises the HTMLInputStream.
HTMLInputStream(source, [encoding]) -> Normalized stream from source
for use by html5lib.
source can be either a file-object, local filename or a string.
The optional encoding parameter must be a string that indicates
the encoding. If specified, that encoding will be used,
regardless of any BOM or later declaration (such as in a meta
element)
parseMeta - Look for a <meta> element containing encoding information
"""
#Craziness
if len(u"\U0010FFFF") == 1:
self.reportCharacterErrors = self.characterErrorsUCS4
self.replaceCharactersRegexp = re.compile(u"[\uD800-\uDFFF]")
else:
self.reportCharacterErrors = self.characterErrorsUCS2
self.replaceCharactersRegexp = re.compile(u"([\uD800-\uDBFF](?![\uDC00-\uDFFF])|(?<![\uD800-\uDBFF])[\uDC00-\uDFFF])")
# List of where new lines occur
self.newLines = [0]
self.charEncoding = (u"utf-8", u"certain")
self.dataStream = self.openStream(source)
self.reset()
__init__.func_annotations = {}
def reset(self):
self.chunk = u""
self.chunkSize = 0
self.chunkOffset = 0
self.errors = []
# number of (complete) lines in previous chunks
self.prevNumLines = 0
# number of columns in the last line of the previous chunk
self.prevNumCols = 0
#Deal with CR LF and surrogates split over chunk boundaries
self._bufferedCharacter = None
reset.func_annotations = {}
def openStream(self, source):
u"""Produces a file object from source.
source can be either a file object, local filename or a string.
"""
# Already a file object
if hasattr(source, u'read'):
stream = source
else:
stream = StringIO(source)
if (#not isinstance(stream, BufferedIOBase) and
not(hasattr(stream, u"tell") and
hasattr(stream, u"seek")) or
stream is sys.stdin):
stream = BufferedStream(stream)
return stream
openStream.func_annotations = {}
def _position(self, offset):
chunk = self.chunk
nLines = chunk.count(u'\n', 0, offset)
positionLine = self.prevNumLines + nLines
lastLinePos = chunk.rfind(u'\n', 0, offset)
if lastLinePos == -1:
positionColumn = self.prevNumCols + offset
else:
positionColumn = offset - (lastLinePos + 1)
return (positionLine, positionColumn)
_position.func_annotations = {}
def position(self):
|
jasonleaster/Machine_Learning
|
K_Means/tester4.py
|
Python
|
gpl-2.0
| 691
| 0.002894
|
"""
Programmer : EOF
File : tester3.py
Date : 2016.01.10
E-mail : jasonleaster@163.com
Description :
"""
import numpy
from matplotlib import pyplot
from km import KMeans
Original_Data = numpy.array([
[1, 1.5],
[1, 0.5],
[0.5, 0.5],
[1.5, 1.5
|
],
[5, 5],
[6, 5.5],
[4, 5],
|
[5, 1],
[6, 0.5],
[7, 1.5],
[1, 10],
[1.5, 11]
]).transpose()
a = KMeans(Original_Data, K = 3)
for i in range(a.SampleNum):
pyplot.plot(Original_Data[0][i], Original_Data[1][i], "+r", markersize=12)
pyplot.title("Original Training Data (Figure by Jason Leaster)")
pyplot.axis([-2, 14, -2, 14])
pyplot.show()
a.train()
a.show()
|
HappyFaceGoettingen/HappyFaceCore
|
render.py
|
Python
|
apache-2.0
| 2,708
| 0.005174
|
#!/usr/bin/env python
# -*- co
|
ding: utf-8 -*-
#
# Copyright 2012 Institut für Experimentelle Kernphysik - Karlsruher Institut für Technologie
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOU
|
T WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os,sys
if __name__ != '__main__':
# unfortunately we need this rather hacky path change
# because mod_wsgi for some reason does not want to
# set PYTHONPATH as we want it or the interpreted
# doesn't read it, idk.
# __file__ is relative to the cwd, so if the dirname
# is not empty, the cwd is wrong, because HF3 requires
# it to point to the directory of the render.py script.
dirname = os.path.dirname(__file__)
if dirname:
os.chdir(dirname)
sys.path.append(dirname)
import hf, cherrypy, logging
import ConfigParser
import atexit
logger = logging.getLogger(__name__)
hf.hf_dir = os.path.dirname(os.path.abspath(__file__))
hf.configtools.readConfigurationAndEnv()
hf.configtools.setupLogging('render_logging_cfg')
cp_config = {}
for section in hf.config.sections():
if section == "global" or section.startswith("/"):
config = dict(hf.config.items(section))
for key,val in config.iteritems():
try:
config[key] = eval(val)
except ValueError:
pass
cp_config[section] = config
cherrypy.config.update(cp_config)
hf.module.importModuleClasses()
hf.auth.init()
hf.database.connect(implicit_execution = True)
if __name__ == '__main__':
cherrypy.quickstart(root=hf.RootDispatcher(), script_name=hf.config.get("paths", "happyface_url"), config=cp_config)
hf.database.disconnect()
else:
cherrypy.config.update({'environment': 'embedded'})
if cherrypy.__version__.startswith('3.0') and cherrypy.engine.state == 0:
cherrypy.engine.start(blocking=False)
atexit.register(hf.database.disconnect)
atexit.register(cherrypy.engine.stop)
application = cherrypy.Application(root=hf.RootDispatcher(), script_name=hf.config.get("paths", "happyface_url"), config=cp_config)
cherrypy.tree.mount(application)
# FLUP server does not like autoreload.
cherrypy.config.update({'engine.autoreload_on':False})
|
aweisberg/cassandra-dtest
|
upgrade_tests/repair_test.py
|
Python
|
apache-2.0
| 1,656
| 0
|
import time
import pytest
import logging
from repair_tests.repair_test import BaseRepairTest
since = pytest.mark.since
logger = logging.getLogger(__name__)
LEGACY_SSTABLES_JVM_ARGS = ["-Dcassandra.streamdes.initial_mem_buffer_size=1",
"-Dcassandra.streamdes.max_mem_buffer_size=5",
"-Dcassandra.streamdes.max_spill_file_size=16"]
# We don't support directly upgrading from 2.2 to 4.0 so disabling this on 4.0.
# TODO: we should probably not hardcode versions?
@pytest.mark.upgrade_test
@since('3.0', max_version='3.99')
class TestUpgradeRepair(BaseRepairTest):
@since('3.0', max_version='3.99')
def test_repair_after_upgrade(self):
"""
@jira_ticket CASSANDRA-10990
"""
default_install_dir = self.cluster.get_install_dir()
cluster = self.cluster
logger.debug("Setting version to
|
2.2.5")
cluster.set_install_dir(version="2.2.5")
self._populate_cluster()
self._do_upgrade(default_install_dir)
self._repair_and_verify(True)
def _do_upgrade(self, default_install_dir):
cluster = self.cluster
for node in cluster.nodelist():
logger.debug("Upgrading %s to current version" % node.name)
if no
|
de.is_running():
node.flush()
time.sleep(1)
node.stop(wait_other_notice=True)
node.set_install_dir(install_dir=default_install_dir)
node.start(wait_other_notice=True, wait_for_binary_proto=True)
cursor = self.patient_cql_connection(node)
cluster.set_install_dir(default_install_dir)
|
Clarity-89/clarityv2
|
src/clarityv2/crm/migrations/0002_auto_20150924_1716.py
|
Python
|
mit
| 1,692
| 0.004728
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from decimal import Decimal
import autoslug.fields
class Migration(migrations.Migration):
dependencies = [
('crm', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Project',
fields=[
('id', models.AutoField(primary_key=True, serialize=False, auto_created=True, verbose_name='ID')),
('name', models.CharField(max_length=50)),
('slug', autoslug.fields.AutoSlugField(populate_from='name', editable=False)),
('base_rate', models.DecimalField(verbose_name='hourly base rate', decimal_places=2, max_digits=8)),
('flat_fee', models.DecimalField(verbose_name='flat fee', decimal_places=2, max_digits=10)),
('tax_rate', models.DecimalField(choices=[(Decimal('0.06'), 'low'), (Decimal('0.21'), 'high')], verbose_name='tax rate', decimal_places=2, max_digits=4, default=Decimal('0.21'))),
('client', models.ForeignKey(to='crm.Client', on_delete=models.PROTECT)),
],
),
migrations.AlterField(
model_name='contact',
name='city',
field=models.CharField(verbose_name='city', max_length=255, blank=True),
),
migrations.AlterField(
model_name='contact',
name='postal_co
|
de',
field=models.CharField(verbose_name='postal code', max_length=10, blank=True),
|
),
migrations.AlterUniqueTogether(
name='project',
unique_together=set([('client', 'slug')]),
),
]
|
schubergphilis/twitterwall
|
tweety/basic_auth.py
|
Python
|
apache-2.0
| 4,682
| 0.002349
|
# Copyright 2013 Gert Kremer
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You
|
may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permission
|
s and
# limitations under the License.
import base64
from django.http import HttpResponse
from django.contrib.auth import authenticate, login
#############################################################################
#
def view_or_basicauth(view, request, test_func, realm = "", *args, **kwargs):
"""
This is a helper function used by both 'logged_in_or_basicauth' and
'has_perm_or_basicauth' that does the nitty of determining if they
are already logged in or if they have provided proper http-authorization
and returning the view if all goes well, otherwise responding with a 401.
"""
if test_func(request.user):
# Already logged in, just return the view.
#
return view(request, *args, **kwargs)
# They are not logged in. See if they provided login credentials
#
if 'HTTP_AUTHORIZATION' in request.META:
auth = request.META['HTTP_AUTHORIZATION'].split()
if len(auth) == 2:
# NOTE: We are only support basic authentication for now.
#
if auth[0].lower() == "basic":
uname, passwd = base64.b64decode(auth[1]).split(':')
user = authenticate(username=uname, password=passwd)
if user is not None:
if user.is_active:
login(request, user)
request.user = user
return view(request, *args, **kwargs)
# Either they did not provide an authorization header or
# something in the authorization attempt failed. Send a 401
# back to them to ask them to authenticate.
#
response = HttpResponse()
response.status_code = 401
response['WWW-Authenticate'] = 'Basic realm="%s"' % realm
return response
#############################################################################
#
def logged_in_or_basicauth(realm = ""):
"""
A simple decorator that requires a user to be logged in. If they are not
logged in the request is examined for a 'authorization' header.
If the header is present it is tested for basic authentication and
the user is logged in with the provided credentials.
If the header is not present a http 401 is sent back to the
requestor to provide credentials.
The purpose of this is that in several django projects I have needed
several specific views that need to support basic authentication, yet the
web site as a whole used django's provided authentication.
The uses for this are for urls that are access programmatically such as
by rss feed readers, yet the view requires a user to be logged in. Many rss
readers support supplying the authentication credentials via http basic
auth (and they do NOT support a redirect to a form where they post a
username/password.)
Use is simple:
@logged_in_or_basicauth
def your_view:
...
You can provide the name of the realm to ask for authentication within.
"""
def view_decorator(func):
def wrapper(request, *args, **kwargs):
return view_or_basicauth(func, request,
lambda u: u.is_authenticated(),
realm, *args, **kwargs)
return wrapper
return view_decorator
#############################################################################
#
def has_perm_or_basicauth(perm, realm = ""):
"""
This is similar to the above decorator 'logged_in_or_basicauth'
except that it requires the logged in user to have a specific
permission.
Use:
@logged_in_or_basicauth('asforums.view_forumcollection')
def your_view:
...
"""
def view_decorator(func):
def wrapper(request, *args, **kwargs):
return view_or_basicauth(func, request,
lambda u: u.has_perm(perm),
realm, *args, **kwargs)
return wrapper
return view_decorator
|
hvnsweeting/pho
|
setup.py
|
Python
|
mit
| 637
| 0
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
import pho
requisites = []
setup(
name='mpho',
version=pho.__version__,
description='PytHon utility for Organizing tasks',
scripts=['scripts/pho'],
|
long_description=open('README.rst').read(),
author='Viet Hung Nguyen',
author_email='hvn@familug.org',
url='https://github.com/hvnsweeting/pho',
packages=['pho'],
license='MIT',
classifiers=[
'Environment :: Console',
'
|
Topic :: Terminals :: Terminal Emulators/X Terminals',
],
)
|
tongxindao/shiyanlou
|
shiyanlou_cs892/sub.py
|
Python
|
apache-2.0
| 469
| 0
|
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
import numpy as np
fig = plt.figure()
ax1 = fig.add_subplot(1, 2, 1, projection="3d")
x = np.linspace(
|
-6 * np.pi, 6 * np.pi, 1000)
y = np.sin(x)
z = np.cos(x)
ax1.plot(x,
|
y, z)
ax2 = fig.add_subplot(1, 2, 2, projection="3d")
X = np.arange(-2, 2, 0.1)
Y = np.arange(-2, 2, 0.1)
X, Y = np.meshgrid(X, Y)
Z = np.sqrt(X ** 2 + Y ** 2)
ax2.plot_surface(X, Y, Z, cmap=plt.cm.winter)
plt.show()
|
reidlindsay/gostop
|
gostop/core/agent.py
|
Python
|
mit
| 794
| 0
|
from .hand import Hand, TakenCards
class Agent(object):
"""An Agent is a player in the game and may be controll
|
ed by a human or
by computer.
"""
def __init__(self, name):
self.name = name
self.hand = Hand()
self.taken_cards = TakenCards()
self.score = 0
def __str__(self):
return self.name
def get_action(self, state, poss
|
ible_actions):
"""The Agent receives a GameState and must return an action from one
of `possible_actions`.
"""
raise NotImplementedError()
def win(self, state):
"""Notify the Agent of a win for the purpose of record keeping."""
pass
def loss(self, state):
"""Notify the Agent of a loss for the purpose of record keeping."""
pass
|
caio2k/pulseaudio-dlna
|
pulseaudio_dlna/streamserver.py
|
Python
|
gpl-3.0
| 18,157
| 0.00022
|
#!/usr/bin/python
# This file is part of pulseaudio-dlna.
# pulseaudio-dlna is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# pulseaudio-dlna is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with pulseaudio-dlna. If not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
import re
import subprocess
import threading
import setproctitle
import logging
import time
import socket
import select
import gobject
import functools
import atexit
import json
import os
import signal
import BaseHTTPServer
import SocketServer
import pulseaudio_dlna.encoders
import pulseaudio_dlna.recorders
import pulseaudio_dlna.common
from pulseaudio_dlna.plugins.upnp.renderer import (
UpnpContentFeatures, UpnpContentFlags)
logger = logging.getLogger('pulseaudio_dlna.streamserver')
PROTOCOL_VERSION_V10 = 'HTTP/1.0'
PROTOCOL_VERSION_V11 = 'HTTP/1.1'
@functools.total_ordering
class RemoteDevice(object):
def
|
__init__(self, bridge, sock):
self.bridge = bridge
try:
self.ip, self.port = sock.getpeername()
except:
logger.info('Could not get socket IP and Port. Setting to '
'unknown.')
self.ip = 'unknown'
self.port = 'unknown'
def __eq__(self, other):
if isinstance(other, RemoteDevice):
return self.ip == other.ip
|
raise NotImplementedError
def __gt__(self, other):
if isinstance(other, RemoteDevice):
return self.ip > other.ip
raise NotImplementedError
@functools.total_ordering
class ProcessStream(object):
def __init__(self, path, recorder, encoder, manager):
self.path = path
self.recorder = recorder
self.encoder = encoder
self.recorder_process = None
self.encoder_process = None
self.manager = manager
self.sockets = {}
self.timeouts = {}
self.chunk_size = 1024 * 4
self.lock = threading.Lock()
self.client_count = 0
self.reinitialize_count = 0
atexit.register(self.shutdown)
gobject.timeout_add(
10000, self._on_regenerate_reinitialize_count)
class UpdateThread(threading.Thread):
def __init__(self, stream):
threading.Thread.__init__(self)
self.stream = stream
self.is_running = False
self.do_stop = False
self.lock = threading.Lock()
self.lock.acquire()
def run(self):
while True:
if self.do_stop:
break
elif self.is_running is False:
self.lock.acquire()
else:
self.stream.communicate()
logger.info('Thread stopped for "{}".'.format(
self.stream.path))
def stop(self):
self.do_stop = True
self.resume()
def pause(self):
self.is_running = False
def resume(self):
if self.is_running is False:
self.is_running = True
self.lock.release()
self.update_thread = UpdateThread(self)
self.update_thread.daemon = True
self.update_thread.start()
def register(self, bridge, sock, lock_override=False):
try:
if not lock_override:
self.lock.acquire()
device = RemoteDevice(bridge, sock)
logger.info(
'Client {client} registered to stream {path}.'.format(
client=device.ip,
path=self.path))
self.sockets[sock] = device
self.client_count += 1
self.update_thread.resume()
finally:
if not lock_override:
self.lock.release()
def unregister(self, sock, lock_override=False, method=0):
try:
if not lock_override:
self.lock.acquire()
try:
device = self.sockets[sock]
del self.sockets[sock]
sock.close()
except KeyError:
logger.info('A client id tries to unregister a stream which is '
'not registered, this should never happen...')
return
logger.info(
'Client {client} unregistered stream {path} '
'using method {method}.'.format(
client=device.ip,
method=method,
path=self.path))
if device.ip in self.timeouts:
gobject.source_remove(self.timeouts[device.ip])
self.timeouts[device.ip] = gobject.timeout_add(
2000, self._on_delayed_disconnect, device)
self.client_count -= 1
finally:
if not lock_override:
self.lock.release()
def _on_regenerate_reinitialize_count(self):
if self.reinitialize_count > 0:
self.reinitialize_count -= 1
return True
def _on_delayed_disconnect(self, device):
self.timeouts.pop(device.ip)
if len(self.sockets) == 0:
logger.info('Stream closed. '
'Cleaning up remaining processes ...')
self.update_thread.pause()
self.terminate_processes()
self.manager._on_device_disconnect(device, self)
return False
def communicate(self):
try:
self.lock.acquire()
if not self.do_processes_exist():
self.create_processes()
logger.info(
'Processes of {path} initialized ...'.format(
path=self.path))
if not self.do_processes_respond():
self.terminate_processes()
self.create_processes()
logger.info(
'Processes of {path} reinitialized ...'.format(
path=self.path))
data = self.encoder_process.stdout.read(self.chunk_size)
socks = self.sockets.keys()
try:
r, w, e = select.select(socks, socks, [], 0)
except socket.error:
for sock in socks:
try:
r, w, e = select.select([sock], [], [], 0)
except socket.error:
self.unregister(sock, lock_override=True, method=1)
return
for sock in w:
try:
self._send_data(sock, data)
except socket.error:
self.unregister(sock, lock_override=True, method=2)
for sock in r:
if sock in self.sockets:
try:
data = sock.recv(1024)
logger.info(
'Read data from socket "{}"'.format(data))
except socket.error:
logger.error(
'Error while reading from socket ...')
finally:
self.lock.release()
def _send_data(self, sock, data):
bytes_total = len(data)
bytes_sent = 0
while bytes_sent < bytes_total:
bytes_sent += sock.send(data[bytes_sent:])
def do_processes_exist(self):
return (self.encoder_process is not None and
self.recorder_process is not None)
def do_processes_respond(self):
return (self.recorder_process.poll() is None and
self.encoder_process.poll() is None)
def terminate_processes(self):
|
RedHatInsights/insights-core
|
insights/parsers/x86_debug.py
|
Python
|
apache-2.0
| 3,411
| 0
|
"""
Parsers for file ``/sys/kernel/debug/x86/*_enabled`` outputs
============================================================
This module provides the following parsers:
X86PTIEnabled - file ``/sys/kernel/debug/x86/pti_enabled``
----------------------------------------------------------
X86IBPBEnabled - file ``/sys/kernel/debug/x86/ibpb_enabled``
------------------------------------------------------------
X86IBRSEnabled - file ``/sys/kernel/debug/x86/ibrs_enabled``
------------------------------------------------------------
X86RETPEnabled - file ``/sys/kernel/debug/x86/retp_enabled``
------------------------------------------------------------
"""
from insights import parser
from insights import Parser
from insights.specs import Specs
from insights.parsers import SkipException
class X86DebugEnabled(Parser):
"""
Class for parsing file ``/sys/kernel/debug/x86/*_enabled``
Attributes:
value (int): the result parsed of `/sys/kernel/debug/x86/*_enabled`
|
Raises:
|
SkipException: When input content is empty
"""
def parse_content(self, content):
if not content:
raise SkipException("Input content is empty")
# it is a digit
self.value = int(content[0])
@parser(Specs.x86_ibpb_enabled)
class X86IBPBEnabled(X86DebugEnabled):
"""
Class for parsing file ``/sys/kernel/debug/x86/ibpb_enabled``
Typical output of file ``/sys/kernel/debug/x86/retp_enabled`` looks like::
1
Examples:
>>> type(dva)
<class 'insights.parsers.x86_debug.X86IBPBEnabled'>
>>> dva.value
1
Attributes:
value (int): the result parsed of '/sys/kernel/debug/x86/ibpb_enabled'
Raises:
SkipException: When input content is empty
"""
pass
@parser(Specs.x86_ibrs_enabled)
class X86IBRSEnabled(X86DebugEnabled):
"""
Class for parsing file ``/sys/kernel/debug/x86/ibrs_enabled``
Typical output of file ``/sys/kernel/debug/x86/ibrs_enabled`` looks like::
0
Examples:
>>> type(dl)
<class 'insights.parsers.x86_debug.X86IBRSEnabled'>
>>> dl.value
1
Attributes:
value (int): the result parsed of '/sys/kernel/debug/x86/ibrs_enabled'
Raises:
SkipException: When input content is empty
"""
pass
@parser(Specs.x86_pti_enabled)
class X86PTIEnabled(X86DebugEnabled):
"""
Class for parsing file ``/sys/kernel/debug/x86/pti_enabled``
Typical output of file ``/sys/kernel/debug/x86/pti_enabled`` looks like::
0
Examples:
>>> type(dv)
<class 'insights.parsers.x86_debug.X86PTIEnabled'>
>>> dv.value
1
Attributes:
value (int): the result parsed of '/sys/kernel/debug/x86/pti_enabled'
Raises:
SkipException: When input content is empty
"""
pass
@parser(Specs.x86_retp_enabled)
class X86RETPEnabled(X86DebugEnabled):
"""
Class for parsing file ``/sys/kernel/debug/x86/retp_enabled``
Typical output of file ``/sys/kernel/debug/x86/retp_enabled`` looks like::
1
Examples:
>>> type(dval)
<class 'insights.parsers.x86_debug.X86RETPEnabled'>
>>> dval.value
1
Attributes:
value (int): the result parsed of '/sys/kernel/debug/x86/retp_enabled'
Raises:
SkipException: When input content is empty
"""
pass
|
andresmargalef/xbmc-plugin.video.ted.talks
|
resources/lib/settings_test.py
|
Python
|
gpl-2.0
| 1,603
| 0.003119
|
import unittest
import settings
class TestSettings(unittest.TestCase):
def setUp(self):
unittest.TestCase.setUp(self)
self.enable_subtitles = settings.enable_subtitles
self.xbmc_language = settings.xbmc_language
self.subtitle_language = settings.subtitle_language
def tearDown(self):
# This is rubbish. Need to understand how to test Python better.
settings.enable_subtitles = self.enable_subtitles
settings.xbmc_language = self.xbmc_language
settings.subtitle_language = self.subtitle_language
unittest.TestCase.tearDown(self)
def test_get_subtitle_languages_disabled(self):
settings.enable_subtitles = 'false'
self.assertIsNone(settings.get_subtitle_languages())
def test_get_subtitle_languages_enabled_standard(sel
|
f):
settings.enable_subtitles = 'true'
setting
|
s.xbmc_language = 'Portuguese'
settings.subtitle_language = "" # Default is "en", if pref unset then XBMC will replace with "".
self.assertEqual(['pt'], settings.get_subtitle_languages())
def test_get_subtitle_languages_enabled_standard_nomatch(self):
settings.enable_subtitles = 'true'
settings.xbmc_language = 'Klingon'
settings.subtitle_language = ''
self.assertEqual(None, settings.get_subtitle_languages())
def test_get_subtitle_languages_enabled_custom(self):
settings.enable_subtitles = 'true'
settings.subtitle_language = 'en,fr , de ,'
self.assertEqual(['en', 'fr', 'de'], settings.get_subtitle_languages())
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.