code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
|---|---|---|---|---|---|
import unittest
from test.stub_environment import StubEnvironment
from googkit.commands.command import Command
from googkit.commands.sequence import SequenceCommand
from googkit.compat.unittest import mock
class DummyFooCommand(Command):
pass
class DummyBarCommand(Command):
pass
class TestSequenceCommand(unittest.TestCase):
def test_run(self):
class DummySequenceCommand(SequenceCommand):
@classmethod
def _internal_commands(cls):
return [
DummyFooCommand,
DummyBarCommand
]
env = StubEnvironment()
command = DummySequenceCommand(env)
with mock.patch('test.commands.test_sequence.DummyFooCommand') as MockFoo, \
mock.patch('test.commands.test_sequence.DummyBarCommand') as MockBar:
command.run()
self.assertTrue(MockFoo.return_value.run.called)
self.assertTrue(MockBar.return_value.run.called)
# [REVIEW] - Is it possible to test an execution order of those commands?
|
googkit/googkit
|
test/commands/test_sequence.py
|
Python
|
mit
| 1,073
|
'''
This script is intended to run a set of POS experiments
by varying the amount of training data used to produce a curve.
@author: rgeorgi
'''
import argparse
from utils.argutils import configfile, existsfile, writedir
from corpora.POSCorpus import POSCorpus
import os
from interfaces.stanford_tagger import StanfordPOSTagger
from interfaces import stanford_tagger
from eval import pos_eval
from multiprocessing.pool import Pool
from glob import glob
from utils.ConfigFile import ConfigFile
def sub_run(sub_train_path, sub_model_path, raw_test_path, sub_tag_path, test_corpus, sub_corpus):
# Next, train the parser.
stanford_tagger.train(sub_train_path, sub_model_path)
# Now, run it.
stanford_tagger.test(raw_test_path, sub_model_path, sub_tag_path)
num_tokens = sum([len(i) for i in sub_corpus])
# Load the result of the tagging...
result_corpus = POSCorpus.read_slashtags(sub_tag_path)
acc = pos_eval.poseval(result_corpus, test_corpus)
return (num_tokens, acc)
def full_run(c):
# The step of sentences by which to increment.
step_increment = 50
curve_dir = os.path.abspath(writedir(c['curve_dir']))
train_path = c['train_path']
test_path = c['test_path']
train_corpus = POSCorpus.read_slashtags(train_path)
test_corpus = POSCorpus.read_slashtags(test_path)
# Let's go ahead and strip the tags from the test corpus.
raw_test_name = 'test_data.txt'
raw_test_path = os.path.join(curve_dir, raw_test_name)
test_corpus.write(raw_test_name, 'raw', outdir=curve_dir)
# Now, let's add 100 sentences at a time until we max out.
sent_limit = 0
p = Pool(8)
results = {}
while sent_limit < len(train_corpus):
# Adding 100 to the limit, starting from zero, means
# we will get the last <99 instances too.
actual_limit = sent_limit+step_increment
sub_corpus = POSCorpus(train_corpus[0:actual_limit])
# Let's make the x values the number of tokens instead of sentences...
# Let's create the necessary filenames.
sub_train_path = os.path.join(curve_dir, '%d_train.txt' % actual_limit)
sub_model_path = os.path.join(curve_dir, '%d_train.model' % actual_limit)
sub_tag_path = os.path.join(curve_dir, '%d_tagged.txt' % actual_limit)
# Get the number of tokens in the corpus for our x axis...
num_tokens = sum([len(x) for x in sub_corpus])
sub_corpus.write(os.path.basename(sub_train_path), 'slashtags', outdir=curve_dir)
p.apply_async(sub_run, args=[sub_train_path, sub_model_path,
raw_test_path, sub_tag_path,
test_corpus, sub_corpus],
callback=lambda x: results.update({x[0]:x[1]}))
# Now, increase the sentence limit
sent_limit += step_increment
p.close()
p.join()
# Also, define where we will store the curve points.
curve_points = 'curve_data.txt'
curve_f = open(os.path.join(curve_dir, curve_points), 'w')
for size, acc in sorted(results.items(), key=lambda x: x[0]):
curve_f.write('%d,%.2f\n' % (size, acc))
curve_f.close()
if __name__ == '__main__':
p = argparse.ArgumentParser()
p.add_argument('-c', '--conf', required=True)
args = p.parse_args()
confs = glob(args.conf)
for conf in confs:
full_run(ConfigFile(conf))
|
rgeorgi/intent
|
intent/scripts/pos/run_data_curve.py
|
Python
|
mit
| 3,174
|
import unittest
from tests.core import TestCore
from pyrep import PyRep
from pyrep.objects.dummy import Dummy
import numpy as np
from os import path
from pyrep.robots.mobiles.youbot import YouBot
from pyrep.robots.mobiles.turtlebot import TurtleBot
from pyrep.robots.mobiles.line_tracer import LineTracer
ASSET_DIR = path.join(path.dirname(path.abspath(__file__)), 'assets')
# TODO: Extract out youbot to 'test_mobiles_with_arms.py'
MOBILES = [
('YouBot', YouBot),
('LineTracer', LineTracer),
('turtlebot', TurtleBot),
]
class TestMobilesAndConfigurationPaths(TestCore):
def setUp(self):
self.pyrep = PyRep()
self.pyrep.launch(path.join(
ASSET_DIR, 'test_scene_mobiles.ttt'), headless=True)
self.pyrep.step()
self.pyrep.start()
# It is enough to only test the constructor of each mobile (in there we make
# assumptions about the structure of the mobile model). All other tests
# can be run on one mobile.
def test_get_mobile(self):
for mobile_name, mobile_type in MOBILES:
with self.subTest(mobile=mobile_name):
mobile = mobile_type()
self.assertIsInstance(mobile, mobile_type)
def test_get_linear_path(self):
mobile = YouBot()
waypoint = Dummy('youBot_waypoint')
path = mobile.get_linear_path(
waypoint.get_position(), waypoint.get_orientation()[-1])
self.assertIsNotNone(path)
def test_get_nonlinear_path(self):
mobile = YouBot()
waypoint = Dummy('youBot_waypoint')
path = mobile.get_nonlinear_path(
waypoint.get_position(), waypoint.get_orientation()[-1])
self.assertIsNotNone(path)
def test_get_linear_path_and_step(self):
mobile = YouBot()
waypoint = Dummy('youBot_waypoint')
path = mobile.get_linear_path(
waypoint.get_position(), waypoint.get_orientation()[-1])
self.assertIsNotNone(path)
done = False
while not done:
done = path.step()
self.pyrep.step()
self.assertTrue(np.allclose(
mobile.get_position()[:2], waypoint.get_position()[:2],
atol=0.001))
def test_get_linear_path_and_get_end(self):
mobile = YouBot()
waypoint = Dummy('youBot_waypoint')
path = mobile.get_linear_path(
waypoint.get_position(), waypoint.get_orientation()[-1])
path.set_to_end()
self.assertTrue(np.allclose(
mobile.get_position()[:2], waypoint.get_position()[:2],
atol=0.001))
def test_get_linear_path_visualize(self):
mobile = YouBot()
waypoint = Dummy('youBot_waypoint')
path = mobile.get_linear_path(
waypoint.get_position(), waypoint.get_orientation()[-1])
# Check that it does not error
path.visualize()
def test_get_duplicate_mobile(self):
mobile = YouBot(1)
self.assertIsInstance(mobile, YouBot)
def test_copy_mobile(self):
mobile = LineTracer()
new_mobile = mobile.copy()
self.assertNotEqual(mobile, new_mobile)
if __name__ == '__main__':
unittest.main()
|
stepjam/PyRep
|
tests/test_mobiles_and_configuration_paths.py
|
Python
|
mit
| 3,195
|
# -*- coding: utf-8 -*-
from app import app
# Start point
if __name__ == '__main__':
app.run(debug=True)
|
naznadmn/Compression.io
|
run.py
|
Python
|
mit
| 110
|
from L500analysis.data_io.get_cluster_data import GetClusterData
from L500analysis.utils.utils import aexp2redshift
from L500analysis.plotting.tools.figure_formatting import *
from L500analysis.plotting.profiles.tools.profiles_percentile \
import *
from L500analysis.utils.constants import rbins, linear_rbins
from derived_field_functions import *
color = matplotlib.cm.afmhot_r
matplotlib.rcParams['legend.handlelength'] = 0
matplotlib.rcParams['legend.numpoints'] = 1
matplotlib.rcParams['legend.fontsize'] = 12
aexps = [1.0,0.9,0.8,0.7,0.6,0.5,0.45,0.4,0.35]
db_name = 'L500_NR_0'
db_dir = '/home/babyostrich/Documents/Repos/L500analysis/'
profiles_list = ['T_mw', 'r_mid',
'vel_gas_rad_std', 'vel_gas_tan_std',
'vel_gas_rad_avg', 'vel_gas_tan_avg',
'Tnt_cm_per_s_2_r500c',
'Vr2_cm_per_s_2_r500c',
'R/R500c']
halo_properties_list=['r500c','M_total_500c']
Tnt_Vr2_ratio=r"$\Xi=T_{nt}/V^2_{r}$"
fXz1=r"$\Xi/\Xi(z=1)$"
pa = PlotAxes(figname='Tnt_Vr2_ratio_500c',
axes=[[0.15,0.4,0.80,0.55],[0.15,0.15,0.80,0.24]],
axes_labels=[Tnt_Vr2_ratio,fXz1],
xlabel=r"$R/R_{500c}$",
ylog=[True,False],
xlim=(0.2,5),
ylims=[(1e-1,1e2),(0.4,1.6)])
TratioV2={}
plots=[TratioV2]
clkeys=['Tnt_Vr2_ratio_500c']
for aexp in aexps :
cldata = GetClusterData(aexp=aexp,db_name=db_name,
db_dir=db_dir,
profiles_list=profiles_list,
halo_properties_list=halo_properties_list)
Tnt = calculate_profiles_mean_variance(cldata['Tnt_cm_per_s_2_r500c'])
Vr2 = calculate_profiles_mean_variance(cldata['Vr2_cm_per_s_2_r500c'])
TratioV2[aexp] = get_profiles_division_mean_variance(
mean_profile1=Tnt['mean'], var_profile1=Tnt['var'],
mean_profile2=Vr2['mean'], var_profile2=Vr2['var'])
print TratioV2[aexp]['mean']
pa.axes[Tnt_Vr2_ratio].plot( rbins, TratioV2[aexp]['mean'],
color=color(aexp),ls='-',
label="$z=%3.1f$" % aexp2redshift(aexp))
for aexp in aexps :
fractional_evolution = get_profiles_division_mean_variance(
mean_profile1=TratioV2[aexp]['mean'],
var_profile1=TratioV2[aexp]['var'],
mean_profile2=TratioV2[0.5]['mean'],
var_profile2=TratioV2[0.5]['var'],
)
pa.axes[fXz1].plot( rbins, fractional_evolution['mean'],
color=color(aexp),ls='-')
pa.axes[Tnt_Vr2_ratio].tick_params(labelsize=12)
pa.axes[Tnt_Vr2_ratio].tick_params(labelsize=12)
pa.axes[fXz1].set_yticks(arange(0.6,1.4,0.2))
pa.set_legend(axes_label=Tnt_Vr2_ratio,ncol=3,loc='best', frameon=False)
pa.color_legend_texts(axes_label=Tnt_Vr2_ratio)
pa.savefig()
|
cavestruz/L500analysis
|
plotting/profiles/T_Vr_evolution/Tnt_Vr_evolution/plot_Tnt_Vr_r500c.py
|
Python
|
mit
| 2,927
|
import ckan.plugins.toolkit as toolkit
log = __import__('logging').getLogger(__name__)
country_vocab = 'country_names'
def country_names():
import pycountry
# Create the tag vocabulary if it doesn't exist
user = toolkit.get_action('get_site_user')({'ignore_auth': True}, {})
context = {'user': user['name']}
try: # Check if the vocab exists
data = {'id': country_vocab}
toolkit.get_action('vocabulary_show')(context, data)
except toolkit.ObjectNotFound: # It doesn't exist, create the vocab
data = {'name': country_vocab}
vocab = toolkit.get_action('vocabulary_create')(context, data)
country_names = [country.alpha_3
for country in list(pycountry.countries)]
for name in country_names:
data = {'name': name, 'vocabulary_id': vocab['id']}
toolkit.get_action('tag_create')(context, data)
try:
countries = toolkit.get_action('tag_list')(
data_dict={'vocabulary_id': country_vocab})
return countries
except toolkit.ObjectNotFound:
log.debug('Could not find vocabulary')
def country_code_to_name(country_code):
import pycountry
return pycountry.countries.get(alpha_3=country_code).name
def country_options():
return [{'name': country_code_to_name(country_code), 'value': country_code}
for country_code in country_names()]
region_vocab = 'regions'
def regions_data():
return dict(
AFR='Africa',
EAP='East Asia and Pacific',
ECA='Europe and Central Asia',
LCR='Latin America & Caribbean',
SAR='South Asia',
MNA='Middle East and North Africa',
)
def regions():
# Create the vocabulary if it doesn't exist
user = toolkit.get_action('get_site_user')({'ignore_auth': True}, {})
context = {'user': user['name']}
try: # Check if the vocab exists
data = {'id': region_vocab}
toolkit.get_action('vocabulary_show')(context, data)
except toolkit.ObjectNotFound: # It doesn't exist, create the vocab
data = {'name': region_vocab}
vocab = toolkit.get_action('vocabulary_create')(context, data)
for name in regions_data().keys():
data = {'name': name, 'vocabulary_id': vocab['id']}
toolkit.get_action('tag_create')(context, data)
try:
regions = toolkit.get_action('tag_list')(
data_dict={'vocabulary_id': region_vocab})
return regions
except toolkit.ObjectNotFound:
log.debug('Could not find vocabulary')
def region_code_to_name(region_code):
return regions_data().get(region_code, '')
def region_options():
regions_dict = regions_data()
return [{'name': regions_dict[region_code], 'value': region_code}
for region_code in regions()]
topic_vocab = 'topics'
def topics():
# Create the vocabulary if it doesn't exist
user = toolkit.get_action('get_site_user')({'ignore_auth': True}, {})
context = {'user': user['name']}
try: # Check if the vocab exists
data = {'id': topic_vocab}
toolkit.get_action('vocabulary_show')(context, data)
except toolkit.ObjectNotFound: # It doesn't exist, create the vocab
data = {'name': topic_vocab}
vocab = toolkit.get_action('vocabulary_create')(context, data)
topics = '''
Energy demand
Energy efficiency
Renewable energy
Energy access
Resource assessments
Measurement stations
Power system and utilities
Transmission and distribution
Pipeline networks
Physical features
Boundaries
Extractive industries
Project sites
Climate
Demographics
Surveys
Economics and prices
Policies and plans
Information technology
Environment
Transport
Water
Agriculture
Bioenergy
Geothermal
Hydropower
Solar
Wind
Thermal power'''.strip().split('\n')
for name in topics:
data = {'name': name, 'vocabulary_id': vocab['id']}
toolkit.get_action('tag_create')(context, data)
try:
topics = toolkit.get_action('tag_list')(
data_dict={'vocabulary_id': topic_vocab})
return topics
except toolkit.ObjectNotFound:
log.debug('Could not find vocabulary')
def topic_options():
return [{'name': topic, 'value': topic} for topic in topics()]
status_vocab = 'statuses'
def statuses():
return '''
Complete
Obsolete
Ongoing
Planned
Required
Under Development
'''.strip().split('\n')
def status_options():
return [{'name': status, 'value': status}
for status in [''] + statuses()]
|
energy-data/energydata.info
|
ckanext-extrafields/ckanext/extrafields/helpers.py
|
Python
|
mit
| 4,544
|
# coding=utf-8
r"""
This code was generated by
\ / _ _ _| _ _
| (_)\/(_)(_|\/| |(/_ v1.0.0
/ /
"""
from twilio.base import serialize
from twilio.base import values
from twilio.base.instance_context import InstanceContext
from twilio.base.instance_resource import InstanceResource
from twilio.base.list_resource import ListResource
from twilio.base.page import Page
class PhoneNumberList(ListResource):
""" """
def __init__(self, version):
"""
Initialize the PhoneNumberList
:param Version version: Version that contains the resource
:returns: twilio.rest.lookups.v1.phone_number.PhoneNumberList
:rtype: twilio.rest.lookups.v1.phone_number.PhoneNumberList
"""
super(PhoneNumberList, self).__init__(version)
# Path Solution
self._solution = {}
def get(self, phone_number):
"""
Constructs a PhoneNumberContext
:param phone_number: The phone number to fetch in E.164 format
:returns: twilio.rest.lookups.v1.phone_number.PhoneNumberContext
:rtype: twilio.rest.lookups.v1.phone_number.PhoneNumberContext
"""
return PhoneNumberContext(self._version, phone_number=phone_number, )
def __call__(self, phone_number):
"""
Constructs a PhoneNumberContext
:param phone_number: The phone number to fetch in E.164 format
:returns: twilio.rest.lookups.v1.phone_number.PhoneNumberContext
:rtype: twilio.rest.lookups.v1.phone_number.PhoneNumberContext
"""
return PhoneNumberContext(self._version, phone_number=phone_number, )
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Lookups.V1.PhoneNumberList>'
class PhoneNumberPage(Page):
""" """
def __init__(self, version, response, solution):
"""
Initialize the PhoneNumberPage
:param Version version: Version that contains the resource
:param Response response: Response from the API
:returns: twilio.rest.lookups.v1.phone_number.PhoneNumberPage
:rtype: twilio.rest.lookups.v1.phone_number.PhoneNumberPage
"""
super(PhoneNumberPage, self).__init__(version, response)
# Path Solution
self._solution = solution
def get_instance(self, payload):
"""
Build an instance of PhoneNumberInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.lookups.v1.phone_number.PhoneNumberInstance
:rtype: twilio.rest.lookups.v1.phone_number.PhoneNumberInstance
"""
return PhoneNumberInstance(self._version, payload, )
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Lookups.V1.PhoneNumberPage>'
class PhoneNumberContext(InstanceContext):
""" """
def __init__(self, version, phone_number):
"""
Initialize the PhoneNumberContext
:param Version version: Version that contains the resource
:param phone_number: The phone number to fetch in E.164 format
:returns: twilio.rest.lookups.v1.phone_number.PhoneNumberContext
:rtype: twilio.rest.lookups.v1.phone_number.PhoneNumberContext
"""
super(PhoneNumberContext, self).__init__(version)
# Path Solution
self._solution = {'phone_number': phone_number, }
self._uri = '/PhoneNumbers/{phone_number}'.format(**self._solution)
def fetch(self, country_code=values.unset, type=values.unset,
add_ons=values.unset, add_ons_data=values.unset):
"""
Fetch a PhoneNumberInstance
:param unicode country_code: The ISO country code of the phone number
:param unicode type: The type of information to return
:param unicode add_ons: The unique_name of an Add-on you would like to invoke
:param dict add_ons_data: Data specific to the add-on you would like to invoke
:returns: Fetched PhoneNumberInstance
:rtype: twilio.rest.lookups.v1.phone_number.PhoneNumberInstance
"""
params = values.of({
'CountryCode': country_code,
'Type': serialize.map(type, lambda e: e),
'AddOns': serialize.map(add_ons, lambda e: e),
})
params.update(serialize.prefixed_collapsible_map(add_ons_data, 'AddOns'))
payload = self._version.fetch(
'GET',
self._uri,
params=params,
)
return PhoneNumberInstance(self._version, payload, phone_number=self._solution['phone_number'], )
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
context = ' '.join('{}={}'.format(k, v) for k, v in self._solution.items())
return '<Twilio.Lookups.V1.PhoneNumberContext {}>'.format(context)
class PhoneNumberInstance(InstanceResource):
""" """
class Type(object):
LANDLINE = "landline"
MOBILE = "mobile"
VOIP = "voip"
def __init__(self, version, payload, phone_number=None):
"""
Initialize the PhoneNumberInstance
:returns: twilio.rest.lookups.v1.phone_number.PhoneNumberInstance
:rtype: twilio.rest.lookups.v1.phone_number.PhoneNumberInstance
"""
super(PhoneNumberInstance, self).__init__(version)
# Marshaled Properties
self._properties = {
'caller_name': payload.get('caller_name'),
'country_code': payload.get('country_code'),
'phone_number': payload.get('phone_number'),
'national_format': payload.get('national_format'),
'carrier': payload.get('carrier'),
'add_ons': payload.get('add_ons'),
'url': payload.get('url'),
}
# Context
self._context = None
self._solution = {'phone_number': phone_number or self._properties['phone_number'], }
@property
def _proxy(self):
"""
Generate an instance context for the instance, the context is capable of
performing various actions. All instance actions are proxied to the context
:returns: PhoneNumberContext for this PhoneNumberInstance
:rtype: twilio.rest.lookups.v1.phone_number.PhoneNumberContext
"""
if self._context is None:
self._context = PhoneNumberContext(self._version, phone_number=self._solution['phone_number'], )
return self._context
@property
def caller_name(self):
"""
:returns: The name of the phone number's owner
:rtype: dict
"""
return self._properties['caller_name']
@property
def country_code(self):
"""
:returns: The ISO country code for the phone number
:rtype: unicode
"""
return self._properties['country_code']
@property
def phone_number(self):
"""
:returns: The phone number in E.164 format
:rtype: unicode
"""
return self._properties['phone_number']
@property
def national_format(self):
"""
:returns: The phone number, in national format
:rtype: unicode
"""
return self._properties['national_format']
@property
def carrier(self):
"""
:returns: The telecom company that provides the phone number
:rtype: dict
"""
return self._properties['carrier']
@property
def add_ons(self):
"""
:returns: A JSON string with the results of the Add-ons you specified
:rtype: dict
"""
return self._properties['add_ons']
@property
def url(self):
"""
:returns: The absolute URL of the resource
:rtype: unicode
"""
return self._properties['url']
def fetch(self, country_code=values.unset, type=values.unset,
add_ons=values.unset, add_ons_data=values.unset):
"""
Fetch a PhoneNumberInstance
:param unicode country_code: The ISO country code of the phone number
:param unicode type: The type of information to return
:param unicode add_ons: The unique_name of an Add-on you would like to invoke
:param dict add_ons_data: Data specific to the add-on you would like to invoke
:returns: Fetched PhoneNumberInstance
:rtype: twilio.rest.lookups.v1.phone_number.PhoneNumberInstance
"""
return self._proxy.fetch(
country_code=country_code,
type=type,
add_ons=add_ons,
add_ons_data=add_ons_data,
)
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
context = ' '.join('{}={}'.format(k, v) for k, v in self._solution.items())
return '<Twilio.Lookups.V1.PhoneNumberInstance {}>'.format(context)
|
tysonholub/twilio-python
|
twilio/rest/lookups/v1/phone_number.py
|
Python
|
mit
| 9,203
|
class Solution(object):
def summaryRanges(self, nums):
"""
:type nums: List[int]
:rtype: List[str]
"""
if len(nums) == 0:
return []
if len(nums) == 1:
return [str(nums[0])]
ret = []
head = nums[0]
last = nums[0]
for i in range(1, len(nums)):
val = nums[i]
if val-last > 1:
if last == head:
ret.append(str(head))
else:
ret.append('%d->%d' % (head,last))
head = val
last = nums[i]
if last == head:
ret.append(str(head))
else:
ret.append('%d->%d' % (head,last))
return ret
print Solution().summaryRanges([1, 3])
|
xingjian-f/Leetcode-solution
|
228. Summary Ranges QuestionEditorial Solution.py
|
Python
|
mit
| 580
|
#Brought to you by Jeremy Rubin, 2013
config = dict(
# your app's name
name="APP_NAME",
#port - can still be overwritten via cli args
default_port="8000",
#static file dir
static_location="static", # Where static files are located
#useful for enabling devmode/production features
# ie, just write if config['devmode']: do_this_action()
devmode=True,
# If all your route's need to be prefixed, simply add this (ie "/app1")
# Very useful if you want to migrate all routes quickly
# does not prefix static files.
route_prefix=False,
# a good way is to do os.urandom(32).encode("hex") from python shell
cookie_secret="GENERATE ME MORE SECURELY THAN THIS",
#DB Settings
db_pool="test_pool",
db_name="db_name",
)
|
JeremyRubin/tornado-trails
|
config.py
|
Python
|
mit
| 786
|
question_list = [
# (mark, count, [directories])
# math demo
(1,1,'crc/crc_'),
(1,1,'hamming/fill_check_bits_'),
# move to earlier section
(1, 1, 'eop/chapter3/hello_world_io_forward_0'),
(1, 1, 'eop/chapter3/hello_world_io_backward_0'),
# Section 3.2 Assignment
(1, 1, 'eop/chapter3/swap_io_0'), # Figure 3.1
# Section 3.3 Sequencing (no figures)
# Section 3.4 Selection
(1, 1, 'eop/chapter3/compare_io_0'), # Figure 3.2
(1, 1, 'eop/chapter3/three_sort_be_0'), # Figure 3.3
(1, 1, 'eop/chapter3/three_sort_io_0'), # Figure 3.3
(1, 1, 'eop/chapter3/counterfeit_coin_be_0'), # Figure 3.4
(1, 1, 'eop/chapter3/leap_year_io_0'), # Figure 3.5
# Section 3.5 Iteration
(1, 1, 'eop/chapter3/gcd_io_0'), # Figure 3.6
(1, 1, 'eop/chapter3/base_conversion_io_0'), # Figure 3.7
# (1, 1, 'eop/chapter3/make_change_io_0'), # Figure 3.8
(1, 1, 'eop/chapter3/seconds_io_0'), # Figure 3.9
# (1, 1, 'eop/chapter3/conversion_io_0'), # Figure 3.10
(1, 1, 'eop/chapter3/div_mod_io_0'), # Figure 3.11
#(1, 1, 'eop/chapter3/pythogoras_io_0'), # Figure 3.12
# Section 3.7 Functions
(1, 1, 'eop/chapter3/gcd_function_io_0'), # Figure 3.13
# needs work:
(1, 1, 'eop/chapter3/extreme_compounding_io_0'),# Figure 3.14
# Section 3.8 Arrays
# (1, 1, 'eop/chapter3/array_demo_io_0'), # Figure 3.15
# (1, 1, 'eop/chapter3/julian_day_io_0'), # Figure 3.16
# needs work:
(1, 1, 'eop/chapter3/sample_statistics_io_0'), # Figure 3.17
# ***** old
# (1, 1, 'eop/chapter3/gauss_io_0'), # Figure 3.6
(1, 1, 'eop/chapter3/power_io_0'), # Figure 3.N
(1, 1, 'eop/chapter3/power_function_io_0'), # Figure 3.N
]
practice_mode = True
standalone = False
logged = False
log_dir = ''
|
stryder199/RyarkAssignments
|
Assignment2/quizzes/eop_chapter3.py
|
Python
|
mit
| 1,692
|
# -*- coding: utf-8 -*-
#-------------------------------------------------------------------------------
# Name: corpus/virtual.py
# Purpose: Access to the Virtual corpus collection
#
# Authors: Christopher Ariza
#
# Copyright: Copyright © 2010, 2012 Michael Scott Cuthbert and the music21 Project
# License: LGPL or BSD, see license.txt
#-------------------------------------------------------------------------------
'''
The virtual.py module is a library of references to remotely stored music data files,
as well as meta-data necessary to download and, if available, access an already downloaded file.
'''
import unittest
from music21 import common
from music21 import environment
_MOD = 'converter.py'
environLocal = environment.Environment(_MOD)
class VirtualWork(object):
def __init__(self):
self.composer = None
self.title = None
# provide a partial path in the corpus that represents this file
# this path must be unique for each work
self.corpusPath = None
# a list of URLs in order of best usage
# these probably should all be the same format
self.urlList = []
# def _getDstFp(self, dir):
# '''Given a directory (usually the users scratch directory) create
# a file path based on the md5 of the works title. This means that all
# works must have unique titles in the virtual corpus.
# '''
# if dir == None:
# raise ValueError
# return os.path.join(dir, 'm21-' + common.getMd5(self.title) + '.p')
def getUrlByExt(self, extList=None):
'''Given a request for an extension, find a best match for a URL from
the list of known URLs. If ext is None, return the first URL.
'''
if not common.isListLike(extList):
extList = [extList]
if extList == None or extList == [None]:
return [self.urlList[0]] # return a list of all
post = []
for ext in extList:
for url in self.urlList:
unused_format, extFound = common.findFormatExtURL(url)
#environLocal.printDebug([extFound, ext])
if extFound == ext:
post.append(url)
return post # no match
#-------------------------------------------------------------------------------
# keep these in alphabetical order
class BachBWV1007Prelude(VirtualWork):
def __init__(self):
'''
>>> a = corpus.virtual.BachBWV1007Prelude()
>>> a.getUrlByExt('.xml')
['http://kern.ccarh.org/cgi-bin/ksdata?l=cc/bach/cello&file=bwv1007-01.krn&f=xml']
'''
VirtualWork.__init__(self)
self.composer = 'Johann Sebastian Bach'
self.title = 'Prelude from Cello Suite No. 1 in G Major, BWV 1007'
self.corpusPath = 'bach/bwv1007/prelude'
cello = 'http://kern.ccarh.org/cgi-bin/ksdata?l=cc/bach/cello'
self.urlList.append(cello + '&file=bwv1007-01.krn&f=xml')
self.urlList.append(cello + '&file=bwv1007-01.krn&f=kern')
class BachBWV772(VirtualWork):
def __init__(self):
VirtualWork.__init__(self)
self.composer = 'Johann Sebastian Bach'
self.title = 'Invention No. 1 in C Major, BWV 772'
self.corpusPath = 'bach/bwv772'
invention = 'http://kern.ccarh.org/cgi-bin/ksdata?l=osu/classical/bach/inventions'
self.urlList.append(invention + '&file=inven01.krn&f=xml')
class BachBWV773(VirtualWork):
def __init__(self):
VirtualWork.__init__(self)
self.composer = 'Johann Sebastian Bach'
self.title = 'Invention No. 2 in C Minor, BWV 773'
self.corpusPath = 'bach/bwv773'
invention = 'http://kern.ccarh.org/cgi-bin/ksdata?l=osu/classical/bach/inventions'
self.urlList.append(invention + '&file=inven02.krn&f=xml')
self.urlList.append(invention + '&file=inven02.krn&f=kern')
class ColtraneGiantSteps(VirtualWork):
# post operation: needs make accidentals
def __init__(self):
VirtualWork.__init__(self)
self.composer = 'John Coltrane'
self.title = 'Giant Steps'
self.corpusPath = 'coltrane/giantSteps'
self.urlList.append('http://impromastering.com/uploads/transcription_file/file/196/' +
'Giant_Steps__John_Coltrane_C.xml')
class SchubertD576(VirtualWork):
def __init__(self):
VirtualWork.__init__(self)
self.composer = 'Franz Schubert'
self.title = '13 Variations on a Theme by Anselm Hüttenbrenner'
self.corpusPath = 'schubert/d576-1'
self.urlList.append('http://kern.ccarh.org/cgi-bin/ksdata?l=cc/schubert/piano/' +
'd0576&file=d0576-06.krn&f=xml')
class SchubertD5762(VirtualWork):
def __init__(self):
VirtualWork.__init__(self)
self.composer = 'Franz Schubert'
self.title = '13 Variations on a Theme by Anselm Hüttenbrenner'
self.corpusPath = 'schubert/d576-2'
self.urlList.append('http://kern.ccarh.org/cgi-bin/ksdata?l=users/' +
'craig/classical/schubert/piano/d0576&file=d0576-02.krn&f=xml')
class SchubertD5763(VirtualWork):
def __init__(self):
VirtualWork.__init__(self)
self.composer = 'Franz Schubert'
self.title = '13 Variations on a Theme by Anselm Hüttenbrenner'
self.corpusPath = 'schubert/d576-3'
self.urlList.append('http://kern.ccarh.org/cgi-bin/ksdata?l=users/craig/classical/' +
'schubert/piano/d0576&file=d0576-03.krn&f=xml')
class SchubertD5764(VirtualWork):
def __init__(self):
VirtualWork.__init__(self)
self.composer = 'Franz Schubert'
self.title = '13 Variations on a Theme by Anselm Hüttenbrenner'
self.corpusPath = 'schubert/d576-4'
self.urlList.append('http://kern.ccarh.org/cgi-bin/ksdata?l=users/craig/classical/' +
'schubert/piano/d0576&file=d0576-04.krn&f=xml')
class PachelbelCanonD(VirtualWork):
def __init__(self):
VirtualWork.__init__(self)
self.composer = 'Johann Pachelbel'
self.title = 'Canon in D Major'
self.corpusPath = 'pachelbel/canon'
self.urlList.append('http://kern.ccarh.org/cgi-bin/ksdata?l=cc/' +
'pachelbel&file=canon.krn&f=xml')
#-------------------------------------------------------------------------------
class TestExternal(unittest.TestCase):
# interpreter loading
def runTest(self):
pass
def testParseURL(self):
pass
#urlA = 'http://kern.ccarh.org/cgi-bin/ksdata?l=cc/schubert/piano/d0576&file=d0576-06.krn&f=xml'
#urlB = 'http://kern.ccarh.org/cgi-bin/ksdata?l=cc/schubert/piano/d0576&file=d0576-06.krn&f=kern'
#urlC = 'http://kern.ccarh.org/cgi-bin/ksdata?l=cc/bach/cello&file=bwv1007-01.krn&f=xml'
class Test(unittest.TestCase):
def runTest(self):
pass
def testBasic(self):
'''Test copying all objects defined in the virtual corpus module
'''
a = BachBWV1007Prelude()
self.assertNotEquals(a.getUrlByExt(['.xml']), [])
self.assertNotEquals(a.getUrlByExt(['.krn']), [])
#-------------------------------------------------------------------------------
# define presented order in documentation
_DOC_ORDER = []
if __name__ == "__main__":
import music21
music21.mainTest(Test)
#music21.mainTest(Test, TestExternal)
#------------------------------------------------------------------------------
# eof
|
arnavd96/Cinemiezer
|
myvenv/lib/python3.4/site-packages/music21/corpus/virtual.py
|
Python
|
mit
| 7,729
|
from django.core.exceptions import ImproperlyConfigured
from django.core.management.base import BaseCommand
from django.db.models import get_models, get_app
from django.contrib.auth.management import create_permissions
class Command(BaseCommand):
args = '<app app ...>'
help = 'reloads permissions for specified apps, or all apps if no args are specified'
def handle(self, *args, **options):
if not args:
apps = []
for model in get_models():
try:
apps.append(get_app(model._meta.app_label))
except ImproperlyConfigured:
pass
else:
apps = []
for arg in args:
apps.append(get_app(arg))
for app in apps:
create_permissions(app, get_models(), options.get('verbosity', 0))
|
gdoermann/django-limbo
|
limbo/management/commands/update_permissions.py
|
Python
|
mit
| 859
|
from __future__ import unicode_literals
import pytest
from design_patterns.observer import Subject
@pytest.fixture
def subject():
return Subject()
def test_subscribe(subject):
result = []
def func(*args, **kwargs):
result.append((args, kwargs))
subject.subscribe(func)
subject(1, 2, 3, k1='a', k2='b')
assert result == [((1, 2, 3), dict(k1='a', k2='b'))]
del result[:]
subject.subscribe(func)
subject('test2')
assert result == [(('test2', ), {})] * 2
def test_unsubscribe(subject):
result_a = []
def func_a(data):
result_a.append(data)
result_b = []
def func_b(data):
result_b.append(data)
sid_a = subject.subscribe(func_a)
sid_b = subject.subscribe(func_b)
subject('data1')
assert result_a == ['data1']
assert result_b == ['data1']
del result_a[:]
del result_b[:]
sid_a.unsubscribe()
subject('data2')
assert result_a == []
assert result_b == ['data2']
del result_a[:]
del result_b[:]
sid_b.unsubscribe()
subject('data3')
assert result_a == []
assert result_b == []
with pytest.raises(KeyError):
sid_b.unsubscribe()
|
victorlin/design-patterns
|
tests/test_observer.py
|
Python
|
mit
| 1,197
|
#!/usr/bin/env python
import cmd
class Illustrate(cmd.Cmd):
"Illustrate the base class method use."
def cmdloop(self, intro=None):
print('cmdloop(%s)' % intro)
return cmd.Cmd.cmdloop(self, intro)
def preloop(self):
print('preloop()')
def postloop(self):
print('postloop()')
def parseline(self, line):
print('parseline(%s) =>' % line,)
ret = cmd.Cmd.parseline(self, line)
print(ret)
return ret
def onecmd(self, s):
print('onecmd(%s)' % s)
return cmd.Cmd.onecmd(self, s)
def emptyline(self):
print('emptyline()')
return cmd.Cmd.emptyline(self)
def default(self, line):
print('default(%s)' % line)
return cmd.Cmd.default(self, line)
def precmd(self, line):
print('precmd(%s)' % line)
return cmd.Cmd.precmd(self, line)
def postcmd(self, stop, line):
print('postcmd(%s, %s)' % (stop, line))
return cmd.Cmd.postcmd(self, stop, line)
def do_greet(self, line):
print('hello,', line)
def do_EOF(self, line):
"Exit"
return True
if __name__ == '__main__':
Illustrate().cmdloop('Illustrating the methods of cmd.Cmd')
|
tleonhardt/CodingPlayground
|
python/cmd/override_base.py
|
Python
|
mit
| 1,238
|
from collections import Counter
def life(before):
"""
Takes as input a state of Conway's Game of Life, represented as an iterable
of ``(int, int)`` pairs giving the coordinates of living cells, and returns
a `set` of ``(int, int)`` pairs representing the next state
"""
before = set(before)
neighbors = Counter(
(x+i, y+j) for (x,y) in before
for i in [-1,0,1]
for j in [-1,0,1]
if (i,j) != (0,0)
)
return {xy for (xy, n) in neighbors.items()
if n == 3 or (n == 2 and xy in before)}
|
jwodder/ghutil
|
test/data/files/life.py
|
Python
|
mit
| 600
|
from constants import *
import pygame as pg
from time import sleep
from metronome import *
import math
import numpy as np
from copy import deepcopy
from audio import *
from instructions_panel import *
from loop import *
class MusicMaker:
def __init__(self, screen):
self.pitch = 0
self.screen = screen
self.pitch_range = PITCH_RANGE
self.b_left = 0
self.b_middle = 0
self.b_right = 0
self.saved = None
self.events = set()
self.metronome = Metronome(BUFFERS_PER_MEASURE)
self.is_measure = False
self.using_scales = list(range(1,6))
self.scale = self.using_scales[3]
self.scale_height = SCREEN_DIM[1] / len(self.using_scales)
self.background = None
self.background_needs_update = True
self.instructions = InstructionsPanel()
self.audio_player = None
self.audio_player = AudioPlayer(self)
self.audio_player.run()
def do_step(self):
## Avoid the race condition
while self.audio_player == None:
sleep(.1)
## Gather information from metronome, mouse, and keyboard
is_beat = self.metronome.is_beat(self.audio_player.loop_buffer_index)
self.is_measure = self.metronome.is_measure(self.audio_player.loop_buffer_index)
(m_x, m_y) = pygame.mouse.get_pos()
(last_b_left, last_b_middle, last_b_right) = (self.b_left, self.b_middle, self.b_right)
(self.b_left, self.b_middle, self.b_right) = pygame.mouse.get_pressed()
last_keys = keys[:]
keys.clear()
keys.extend(pygame.key.get_pressed())
## Center scales around mouse
if self.b_middle and not last_b_middle:
self.background_needs_update = True
m_x, m_y = self.center_scales_around(m_x, m_y)
## Run events scheduled for the beginning of the step
for e in sorted(list(self.events), key=lambda e: e[0]):
if e[2] == BEGIN_STEP:
if e[1] == NEXT_BUFFER or ( is_beat and e[1] == NEXT_BEAT ) or ( self.is_measure and e[1] == NEXT_MEASURE ):
self.audio_player.do_action(e[0])
self.events.remove(e)
###########################
## Keyboard and mouse input
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
## These events aren't caught by the pygame.mouse methods
elif event.type == pygame.MOUSEBUTTONDOWN:
## Scroll down
if event.button == 5:
self.audio_player.decrease_volume()
## Scroll up
if event.button == 4:
self.audio_player.increase_volume()
## Window resize
elif event.type == pygame.VIDEORESIZE:
w,h = event.size
min_w, min_h = MIN_DIM
w = max(min_w, w)
h = max(min_h, h)
update_screen_size((w,h))
self.background_needs_update = True
self.scale_height = SCREEN_DIM[1] / len(self.using_scales)
self.screen = pygame.display.set_mode(SCREEN_DIM, pygame.RESIZABLE)
## Get the exact pitch from the mouse x coordinate
self.mouse_pitch = self.coord_to_pitch(m_x, coord=0, reverse=False)
## Close the application
if is_key_mod(ESCAPE, None):
self.audio_player.stop_stream()
print("Ending stream...")
## Start and stop recording
if not keys[SPACE] and self.audio_player.loop_recording:
self.events.add(EVENT_STOP_LOOP_REC)
if keys[SPACE] and not self.audio_player.loop_recording:
self.events.add(EVENT_START_LOOP_REC)
## Start and stop playing of all loops
if is_key_mod(K_P, None) and not last_keys[K_P]:
if self.audio_player.loop_playing:
self.events.add(EVENT_STOP_LOOP_PLAY)
else:
self.events.add(EVENT_START_LOOP_PLAY)
## If a loop is selected:
if self.audio_player.active_loops[0] >= 0 and not self.audio_player.loop_recording:
## Move the active loops left/right by one beat (with wrapping)
if is_key_mod(LEFT, None) and not last_keys[LEFT]:
for i in self.audio_player.active_loops:
self.audio_player.loops[i].horizontal_shift(-1*self.metronome.beat_len)
if is_key_mod(RIGHT, None) and not last_keys[RIGHT]:
for i in self.audio_player.active_loops:
self.audio_player.loops[i].horizontal_shift(self.metronome.beat_len)
## Move the active loops left/right by one buffer (with wrapping)
if is_key_mod(LEFT, SHIFT) and not last_keys[LEFT]:
for i in self.audio_player.active_loops:
self.audio_player.loops[i].horizontal_shift(-1)
if is_key_mod(RIGHT, SHIFT) and not last_keys[RIGHT]:
for i in self.audio_player.active_loops:
self.audio_player.loops[i].horizontal_shift(1)
## Toggle mute on the active loops
if is_key_mod(K_M, None) and not last_keys[K_M]:
for i in self.audio_player.active_loops:
self.audio_player.loops[i].toggle_mute()
## Increase and decrease volume of the active loops
if keys[EQUALS] or keys[PLUS] or keys[KP_PLUS]:
for i in self.audio_player.active_loops:
self.audio_player.loops[i].adjust_volume(.02)
if keys[MINUS] or keys[KP_MINUS]:
for i in self.audio_player.active_loops:
self.audio_player.loops[i].adjust_volume(-.02)
## Copy the active loops below them as a group, and mute the copies
if is_key_mod(K_C, CTRL) and not last_keys[K_C]:
loop_copies = [self.audio_player.loops[i].get_copy() for i in self.audio_player.active_loops]
for i,loop in enumerate(loop_copies):
loop.set_mute(True)
self.audio_player.loops.insert(self.audio_player.active_loops[-1]+1+i, loop)
self.audio_player.active_loops = [x+len(loop_copies) for x in self.audio_player.active_loops]
## Move the active loops up and down in the lineup
other_index = -1
loops = self.audio_player.loops
if is_key_mod(UP, ALT) and not last_keys[UP] and self.audio_player.active_loops[0] > 0:
for index in self.audio_player.active_loops:
other_index = (index-1)%len(self.audio_player.loops)
loops[index], loops[other_index] = loops[other_index], loops[index]
self.audio_player.active_loops = [x-1 for x in self.audio_player.active_loops]
elif is_key_mod(DOWN, ALT) and not last_keys[DOWN] and self.audio_player.active_loops[-1] < len(loops)-1:
for index in self.audio_player.active_loops[::-1]:
other_index = (index+1)%len(self.audio_player.loops)
loops[index], loops[other_index] = loops[other_index], loops[index]
self.audio_player.active_loops = [x+1 for x in self.audio_player.active_loops]
## Add the selected loops
if is_key_mod(K_A, None) and not last_keys[K_A]:
while len(self.audio_player.active_loops) > 1:
i = self.audio_player.active_loops[0]
other = self.audio_player.active_loops.pop()
self.audio_player.loops[i].combine(self.audio_player.loops[other])
del self.audio_player.loops[other]
## Pitch shift the selected loops UP/DOWN
if is_key_mod(UP, CTRL) and is_key_mod(UP, SHIFT) and not last_keys[UP]:
for index in self.audio_player.active_loops:
#Shift up one eighth of a tone
self.audio_player.loops[index].pitch_shift(.25)
elif is_key_mod(UP, CTRL) and not last_keys[UP]:
for index in self.audio_player.active_loops:
#Shift up one semitone
self.audio_player.loops[index].pitch_shift(1)
elif is_key_mod(DOWN, CTRL) and is_key_mod(DOWN, SHIFT) and not last_keys[DOWN]:
for index in self.audio_player.active_loops:
#Shift up one eighth of a tone
self.audio_player.loops[index].pitch_shift(-.25)
elif is_key_mod(DOWN, CTRL) and not last_keys[DOWN]:
for index in self.audio_player.active_loops:
#Shift up one semitone
self.audio_player.loops[index].pitch_shift(-1)
## Delete the current loop with backspace or delete
if (is_key_mod(BACKSPACE, None) and not last_keys[BACKSPACE]) or (is_key_mod(DELETE, None) and not last_keys[DELETE]):
for i in self.audio_player.active_loops[::-1]:
del self.audio_player.loops[i]
self.audio_player.active_loops = [self.audio_player.active_loops[0]]
if self.audio_player.active_loops[0] >= len(self.audio_player.loops):
self.audio_player.active_loops[0] -= 1
else: ## Metronome selected (index -1)
##Only allow changes to the metronome when there are no loops:
if len(self.audio_player.loops) == 0:
## Add or subtract from the metronome length
if is_key_mod(LEFT, None) and not last_keys[LEFT]:
self.metronome.change_measure_length(-self.metronome.beats)
if is_key_mod(RIGHT, None) and not last_keys[RIGHT]:
self.metronome.change_measure_length(self.metronome.beats)
## Add or subtract from the metronome beat count
if is_key_mod(LEFT, SHIFT) and not last_keys[LEFT]:
self.metronome.change_beat_count(-1)
if is_key_mod(RIGHT, SHIFT) and not last_keys[RIGHT]:
self.metronome.change_beat_count(1)
## Toggle justify pitch
if is_key_mod(K_J, None) and not last_keys[K_J]:
self.audio_player.justify_pitch = not self.audio_player.justify_pitch
self.background_needs_update = True
for loop in self.audio_player.loops:
loop.recalculate_buffers()
if not self.audio_player.loop_recording:
## Move the active loop indicator up and down
if is_key_mod(UP, None) and not last_keys[UP]:
self.audio_player.active_loops = [ self.audio_player.active_loops[0] % (len(self.audio_player.loops)+1) - 1 ]
if is_key_mod(DOWN, None) and not last_keys[DOWN]:
self.audio_player.active_loops = [ (self.audio_player.active_loops[-1]+2) % (len(self.audio_player.loops)+1) - 1 ]
## Select a range of loops
if is_key_mod(UP, SHIFT) and not is_key_mod(UP, CTRL) and not last_keys[UP] and self.audio_player.active_loops[0] > 0:
self.audio_player.active_loops.insert(0, self.audio_player.active_loops[0]-1)
if is_key_mod(DOWN, SHIFT) and not is_key_mod(DOWN, CTRL) and not last_keys[DOWN] and self.audio_player.active_loops[0] >= 0 and self.audio_player.active_loops[-1] < len(self.audio_player.loops) - 1:
self.audio_player.active_loops.append(self.audio_player.active_loops[-1]+1)
## Multiply metronome and loops a given number of times
for num in range(0,10):
if is_key_mod(NUMS[num], None) and not last_keys[NUMS[num]]:
self.audio_player.multiply_tracks(num)
## Articulating and continuing a note playing
if self.b_left:
if not self.audio_player.playing:
self.audio_player.articulate()
else:
self.audio_player.settle_to_volume()
## Allowing a note to fade away when not left clicking
if not self.b_left:
self.audio_player.volume_decay()
## Identify the current scale by mouse position
self.scale_index = (self.using_scales[0] + int(m_y / SCREEN_DIM[1] * len(self.using_scales))) %12
self.scale = SCALES[self.scale_index]
## Temporarily align to the chromatic scale on the current scale
if (self.b_right):
self.scale = CHROMATIC_SCALE
## Show and hide the instructions (really for QUESTION_MARK, but SLASH is more accepting)
if (keys[SLASH] and not last_keys[SLASH]):
self.instructions.minimized = not self.instructions.minimized
#######################
## Pitch decisionmaking
## Get scale degree of closest pitch
self.closest_pitch = sorted(self.scale, key=lambda x: min(abs((self.mouse_pitch%12)-x), 12 - abs((self.mouse_pitch%12)-x))) [0]
## Put closest pitch in correct octave
self.closest_pitch += math.floor(self.mouse_pitch / 12) * 12
## Correct an error by rounding up if self.mouse_pitch > 11.5
if abs(self.mouse_pitch - self.closest_pitch) > 10:
self.closest_pitch += 12
## In case we switched scales for the chromatic scale, switch back now that we decided on a closest pitch
self.scale = SCALES[self.scale_index]
## Decide whether to align to the closest pitch, or use the mouse pitch
#if not last_b_middle:
if self.b_left or self.audio_player.volume == 0:
if is_key_mod(K_S, None):
self.pitch = self.mouse_pitch
else:
self.pitch = self.closest_pitch
## Run events scheduled for the end of the step
for e in sorted(list(self.events), key=lambda e: e[0]):
if e[2] == END_STEP:
if e[1] == NEXT_BUFFER or ( is_beat and e[1] == NEXT_BEAT ) or ( self.is_measure and e[1] == NEXT_MEASURE ):
self.audio_player.do_action(e[0])
self.events.remove(e)
self.paint_screen()
def center_scales_around(self, m_x, m_y):
range_width = self.pitch_range[1] - self.pitch_range[0]
range_middle = self.pitch_range[1] - range_width // 2
diff = self.closest_pitch - range_middle
self.pitch_range = (self.pitch_range[0]+diff, self.pitch_range[1]+diff)
y_diff = self.scale_index - self.using_scales[len(self.using_scales)//2]
self.using_scales = [(i+y_diff)%12 for i in self.using_scales]
new_m_x = self.pitch_to_coord(self.mouse_pitch)
new_m_y = m_y-y_diff*self.scale_height
pygame.mouse.set_pos(new_m_x, new_m_y)
return new_m_x, new_m_y
def paint_screen(self):
## Draw the mostly unchanging buffered background
if self.background == None or self.background_needs_update:
self.background = self.redraw_background()
self.screen.blit(self.background, (0,0))
## Draw the active notes
y=0
notes = [l.recorded_notes[self.audio_player.loop_buffer_index] for l in self.audio_player.loops if not l.muted]
self.recorded_notes_to_draw = [rn for sublist in notes for rn in sublist]
for i in self.using_scales:
s = SCALES[i]
self.draw_scale_activity(s, y, self.scale is s)
y += self.scale_height
## Draw metronome
self.metronome.paint_self(self.screen, self.audio_player.loop_buffer_index, -1 in self.audio_player.active_loops)
## Draw the loops
y = 60
x = 10
w = self.metronome.measure_len * self.metronome.visual_buffer_width
h = 30
v_margin = 10
for i in range(len(self.audio_player.loops)):
loop = self.audio_player.loops[i]
loop.paint_self(self.screen, (x,y,w,h), i in self.audio_player.active_loops, self.audio_player.loop_recording)
y += h + v_margin
## Draw the instruction panel
self.instructions.paint_self(self.screen)
pygame.display.flip()
'''
Draws the active elements of a scale (row of notes) on the screen.
'''
def draw_scale_activity(self, scale, y, is_active):
notes_to_draw = [rn for rn in self.recorded_notes_to_draw if rn.scale==scale]
if self.scale == scale:
notes_to_draw.append(RecordedNote(-1, self.pitch, self.audio_player.volume, None, self.scale, None, None))
for p in range(self.pitch_range[0], self.pitch_range[1]+1):
p_i = p % 12
if p_i in scale:
x = self.pitch_to_coord(p, coord=0, reverse=False, scale=scale[0])
color = ACTIVE_COLORS[p_i] if is_active and self.closest_pitch == p else INACTIVE_COLORS[p_i]
##Determine line width based on notes_to_draw:
on_this_pitch = [rn for rn in notes_to_draw if rn.pitch == p]
notes_to_draw = [rn for rn in notes_to_draw if not rn in on_this_pitch]
if len(on_this_pitch) > 0:
sum_volume = sum(map(lambda rn: rn.get_loudness(), on_this_pitch))
line_width = max(INACTIVE_NOTE_WIDTH, int(sum_volume*ACTIVE_NOTE_STRETCH))
pygame.draw.line(self.screen, color, (x,y), (x,y+self.scale_height), line_width)
if get_font() and p_i == scale[0]:
l1 = get_font().render(NOTE_NAMES[p_i], 1, color)
self.screen.blit(l1, (x+10, y+self.scale_height-30))
if is_active:
color = INACTIVE_COLORS[scale[0]]
pygame.draw.line(self.screen, color, (0,y), (SCREEN_DIM[0],y), 4)
pygame.draw.line(self.screen, color, (0,y+self.scale_height), (SCREEN_DIM[0],y+self.scale_height), 4)
## The remaining pitches in notes_to_draw are not on a bar
for rn in notes_to_draw:
line_width = max(INACTIVE_NOTE_WIDTH, int(rn.get_loudness() * ACTIVE_NOTE_STRETCH))
x = self.pitch_to_coord(rn.pitch)
pygame.draw.line(self.screen, FREE_NOTE_COLOR, (x, y), (x,y+self.scale_height), line_width)
'''
Draws the inactive scale elements into a buffer image
'''
def redraw_background(self):
self.background_needs_update = False
screen = pygame.Surface(SCREEN_DIM)
screen.fill(BACK_COLOR)
y=0
for i in self.using_scales:
self.draw_scale_background(screen, SCALES[i], y)
y += self.scale_height
return screen
'''
Draws the inactive elements of one scale onto an image
'''
def draw_scale_background(self, screen, scale, y):
pygame.draw.rect(screen, DARK_COLORS[scale[0]], (0,y,SCREEN_DIM[0],self.scale_height))
pygame.draw.line(screen, SCALE_INACTIVE_SEPARATOR_COLOR, (0,y), (SCREEN_DIM[0],y), 1)
pygame.draw.line(screen, SCALE_INACTIVE_SEPARATOR_COLOR, (0,y+self.scale_height), (SCREEN_DIM[0],y+self.scale_height), 1)
for p in range(self.pitch_range[0], self.pitch_range[1]+1):
p_i = p % 12
if p_i in scale:
x = self.pitch_to_coord(p, coord=0, reverse=False, scale=scale[0])
pygame.draw.line(screen, INACTIVE_COLORS[p_i], (x,y), (x,y+self.scale_height), INACTIVE_NOTE_WIDTH)
if get_font() and p_i == scale[0]:
l1 = get_font().render(NOTE_NAMES[p_i], 1, INACTIVE_COLORS[p_i])
screen.blit(l1, (x+10, y+self.scale_height-30))
def coord_to_pitch(self, y, coord=0, reverse=False):
if reverse:
return (self.pitch_range[1] - self.pitch_range[0]) / SCREEN_DIM[coord] * (SCREEN_DIM[coord] - y) + self.pitch_range[0]
else:
return (self.pitch_range[1] - self.pitch_range[0]) / SCREEN_DIM[coord] * y + self.pitch_range[0]
def pitch_to_coord(self, p, coord=0, reverse=False, scale=None):
if scale != None and self.audio_player.justify_pitch:
p = pitch_to_just_pitch(p, scale)
if reverse:
return SCREEN_DIM[coord] - (p - self.pitch_range[0]) / (self.pitch_range[1] - self.pitch_range[0]) * SCREEN_DIM[coord]
else:
return (p - self.pitch_range[0]) / (self.pitch_range[1] - self.pitch_range[0]) * SCREEN_DIM[coord]
|
kenanbit/loopsichord
|
music_maker.py
|
Python
|
mit
| 20,511
|
import _plotly_utils.basevalidators
class TicklabelstepValidator(_plotly_utils.basevalidators.IntegerValidator):
def __init__(
self,
plotly_name="ticklabelstep",
parent_name="scatterternary.marker.colorbar",
**kwargs
):
super(TicklabelstepValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
min=kwargs.pop("min", 1),
**kwargs
)
|
plotly/plotly.py
|
packages/python/plotly/plotly/validators/scatterternary/marker/colorbar/_ticklabelstep.py
|
Python
|
mit
| 519
|
# -*- coding: utf-8 -*-
from ..base import ApiBase
class Order(ApiBase):
_category = 'ihotel'
def list(self, **kwargs):
"""订单列表,方法名称:ihotel.order.list,必须使用 https
http://open.elong.com/wiki/Ihotel.order.list
"""
return self._request('list', https=True, raw=True, **kwargs)
def create(self, **kwargs):
""" 创建订单,方法名称:ihotel.order.create,必须使用 https
http://open.elong.com/wiki/Ihotel.order.create
"""
return self._request('create', https=True, raw=True, **kwargs)
def detail(self, **kwargs):
"""订单详情,方法名称:ihotel.order.detail,必须使用 https
http://open.elong.com/wiki/Ihotel.order.detail
"""
return self._request('detail', https=True, raw=True, **kwargs)
def canceltips(self, **kwargs):
"""订单取消提示,方法名称:ihotel.order.canceltips,必须使用 https
http://open.elong.com/wiki/Ihotel.order.canceltips
"""
return self._request('canceltips', https=True, raw=True, **kwargs)
def cancel(self, **kwargs):
"""订单取消,方法名称:ihotel.order.cancel,必须使用 https
http://open.elong.com/wiki/Ihotel.order.cancel
"""
return self._request('cancel', https=True, raw=True, **kwargs)
|
DeanThompson/pyelong
|
pyelong/api/ihotel/order.py
|
Python
|
mit
| 1,386
|
#!/usr/bin/env python
# -*- coding: utf8 -*-
from datetime import date, datetime, timedelta
try:
import unittest2 as unittest
except ImportError:
import unittest
import agate
import numpy
import agatenumpy
agatenumpy.patch()
class TestNumpy(unittest.TestCase):
def setUp(self):
self.rows = (
('1', 'a', 'True', '10/01/2015', '10/01/2015 12:30 PM', '4h45m'),
('2', 'b', 'False', '11/01/2015', '11/01/2015 12:45 PM', '3h25m'),
('', '', '', '', '', '')
)
self.number_type = agate.Number()
self.text_type = agate.Text()
self.boolean_type = agate.Boolean()
self.date_type = agate.Date()
self.datetime_type = agate.DateTime()
self.timedelta_type = agate.TimeDelta()
self.column_names = ('number', 'text', 'boolean', 'date', 'datetime', 'timedelta')
self.column_types = (self.number_type, self.text_type, self.boolean_type, self.date_type, self.datetime_type, self.timedelta_type)
self.table = agate.Table(self.rows, zip(self.column_names, self.column_types))
def test_from_numpy(self):
data = [
(1, u'a', True, date(2015, 10, 1), datetime(2015, 10, 1, 12, 30), timedelta(hours=4, minutes=45)),
(2, u'b', False, date(2015, 11, 1), datetime(2015, 11, 1, 12, 45), timedelta(hours=3, minutes=25)),
(numpy.nan, u'', False, None, None, None)
]
numpy_types = [
'float_',
'U1',
'bool_',
'datetime64[D]',
'datetime64[us]',
'timedelta64[us]'
]
numpy_table = numpy.rec.array(data, dtype=zip(self.column_names, numpy_types))
# for name, dtype in numpy_table.dtype:
print numpy_table
for column_name in numpy_table.dtype.names:
numpy_type = numpy_table.dtype.fields[column_name][0]
print numpy_type.type, numpy_type.descr
table = agate.Table.from_numpy(numpy_table)
self.assertEqual(len(table.rows), len(numpy_table))
self.assertEqual(table.columns['number'], [1, 2, None])
self.assertEqual(table.columns['text'], [u'a', u'b', u''])
self.assertEqual(table.columns['boolean'], [True, False, False])
self.assertEqual(table.columns['date'], [date(2015, 10, 1), date(2015, 11, 1), None])
self.assertEqual(table.columns['datetime'], [datetime(2015, 10, 1, 12, 30), datetime(2015, 11, 1, 12, 45), None])
self.assertEqual(table.columns['timedelta'], [timedelta(hours=4, minutes=45), timedelta(hours=3, minutes=25), None])
def test_to_numpy(self):
numpy_table = self.table.to_numpy()
print type(numpy_table)
self.assertEqual(len(numpy_table), len(self.table.rows))
numpy.testing.assert_array_equal(numpy_table['number'], [1, 2, numpy.nan])
numpy.testing.assert_array_equal(numpy_table['text'], [u'a', u'b', u''])
numpy.testing.assert_array_equal(numpy_table['boolean'], [True, False, False])
numpy.testing.assert_array_equal(numpy_table['date'], [date(2015, 10, 1), date(2015, 11, 1), None])
numpy.testing.assert_array_equal(numpy_table['datetime'], [datetime(2015, 10, 1, 12, 30), datetime(2015, 11, 1, 12, 45), None])
numpy.testing.assert_array_equal(numpy_table['timedelta'], [timedelta(hours=4, minutes=45), timedelta(hours=3, minutes=25), None])
|
onyxfish/agate-numpy
|
tests/test_agatenumpy.py
|
Python
|
mit
| 3,421
|
"""
Convenience decorators for use in fabfiles.
"""
from __future__ import with_statement
#from Crypto import Random
#from fabric import tasks
from fabric.api import runs_once as _runs_once
#from .context_managers import settings
from burlap.tasks import WrappedCallableTask
def task_or_dryrun(*args, **kwargs):
"""
Decorator declaring the wrapped function to be a new-style task.
May be invoked as a simple, argument-less decorator (i.e. ``@task``) or
with arguments customizing its behavior (e.g. ``@task(alias='myalias')``).
Please see the :ref:`new-style task <task-decorator>` documentation for
details on how to use this decorator.
.. versionchanged:: 1.2
Added the ``alias``, ``aliases``, ``task_class`` and ``default``
keyword arguments. See :ref:`task-decorator-arguments` for details.
.. versionchanged:: 1.5
Added the ``name`` keyword argument.
.. seealso:: `~fabric.docs.unwrap_tasks`, `~fabric.tasks.WrappedCallableTask`
"""
invoked = bool(not args or kwargs)
task_class = kwargs.pop("task_class", WrappedCallableTask)
# if invoked:
# func, args = args[0], ()
# else:
func, args = args[0], ()
def wrapper(func):
return task_class(func, *args, **kwargs)
wrapper.is_task_or_dryrun = True
wrapper.wrapped = func
return wrapper if invoked else wrapper(func)
_METHOD_ATTRIBUTES = ['deploy_before', 'is_post_callback']
def _task(meth):
meth.is_task = True
def wrapper(self, *args, **kwargs):
ret = meth(self, *args, **kwargs)
# Ensure each satchels local variable scope is cleared after every server execution.
self.clear_local_renderer()
return ret
if hasattr(meth, 'is_deployer') or meth.__name__ == 'configure':
# Copy the wrapped method's attributes to the wrapper.
wrapper.__name__ = meth.__name__
for attr in _METHOD_ATTRIBUTES:
if hasattr(meth, attr):
setattr(wrapper, attr, getattr(meth, attr))
return wrapper
return meth
def task(*args, **kwargs):
"""
Decorator for registering a satchel method as a Fabric task.
Can be used like:
@task
def my_method(self):
...
@task(precursors=['other_satchel'])
def my_method(self):
...
"""
precursors = kwargs.pop('precursors', None)
post_callback = kwargs.pop('post_callback', False)
if args and callable(args[0]):
# direct decoration, @task
return _task(*args)
# callable decoration, @task(precursors=['satchel'])
def wrapper(meth):
if precursors:
meth.deploy_before = list(precursors)
if post_callback:
#from burlap.common import post_callbacks
#post_callbacks.append(meth)
meth.is_post_callback = True
return _task(meth)
return wrapper
def runs_once(meth):
"""
A wrapper around Fabric's runs_once() to support our dryrun feature.
"""
from burlap.common import get_dryrun, runs_once_methods
if get_dryrun():
pass
else:
runs_once_methods.append(meth)
_runs_once(meth)
return meth
|
chrisspen/burlap
|
burlap/decorators.py
|
Python
|
mit
| 3,218
|
# Generated from /Users/xudong/git/HTTPIDL/Grammar/HTTPIDLLexer.g4 by ANTLR 4.7
# encoding: utf-8
from __future__ import print_function
from antlr4 import *
from io import StringIO
import sys
def serializedATN():
with StringIO() as buf:
buf.write(u"\3\u608b\ua72a\u8133\ub9ed\u417c\u3be7\u7786\u5964\2")
buf.write(u"+\u013a\b\1\4\2\t\2\4\3\t\3\4\4\t\4\4\5\t\5\4\6\t\6\4")
buf.write(u"\7\t\7\4\b\t\b\4\t\t\t\4\n\t\n\4\13\t\13\4\f\t\f\4\r")
buf.write(u"\t\r\4\16\t\16\4\17\t\17\4\20\t\20\4\21\t\21\4\22\t\22")
buf.write(u"\4\23\t\23\4\24\t\24\4\25\t\25\4\26\t\26\4\27\t\27\4")
buf.write(u"\30\t\30\4\31\t\31\4\32\t\32\4\33\t\33\4\34\t\34\4\35")
buf.write(u"\t\35\4\36\t\36\4\37\t\37\4 \t \4!\t!\4\"\t\"\4#\t#\4")
buf.write(u"$\t$\4%\t%\4&\t&\4\'\t\'\4(\t(\4)\t)\4*\t*\4+\t+\4,\t")
buf.write(u",\4-\t-\3\2\3\2\3\2\3\2\3\2\3\2\3\2\3\2\3\3\3\3\3\3\3")
buf.write(u"\3\3\3\3\3\3\3\3\4\3\4\3\4\3\4\3\5\3\5\3\5\3\5\3\5\3")
buf.write(u"\6\3\6\3\6\3\6\3\6\3\6\3\7\3\7\3\7\3\7\3\7\3\7\3\7\3")
buf.write(u"\7\3\b\3\b\3\b\3\b\3\b\3\b\3\b\3\b\3\t\3\t\3\t\3\t\3")
buf.write(u"\t\3\n\3\n\3\n\3\n\3\13\3\13\3\13\3\13\3\13\3\13\3\f")
buf.write(u"\3\f\3\f\3\f\3\f\3\f\3\f\3\r\3\r\3\r\3\r\3\r\3\r\3\r")
buf.write(u"\3\r\3\16\3\16\3\16\3\16\3\16\3\16\3\16\3\16\3\16\3\17")
buf.write(u"\3\17\3\20\3\20\3\21\3\21\3\22\3\22\3\23\3\23\3\24\3")
buf.write(u"\24\3\25\3\25\3\26\3\26\3\27\3\27\3\30\3\30\3\31\3\31")
buf.write(u"\3\32\3\32\3\33\3\33\3\33\3\33\3\33\3\33\3\34\3\34\3")
buf.write(u"\34\3\34\3\34\3\34\3\34\3\35\3\35\3\35\3\35\3\35\3\35")
buf.write(u"\3\36\3\36\3\36\3\36\3\36\3\36\3\36\3\37\3\37\3\37\3")
buf.write(u"\37\3\37\3 \3 \3 \3 \3 \3 \3 \3!\3!\3!\3!\3!\3!\3!\3")
buf.write(u"\"\3\"\3\"\3\"\3\"\3#\3#\3#\3#\3#\3$\3$\3$\3$\3$\3$\3")
buf.write(u"%\3%\3%\3%\3%\3&\3&\3&\3&\7&\u010f\n&\f&\16&\u0112\13")
buf.write(u"&\3&\3&\3&\3&\3&\3\'\5\'\u011a\n\'\3\'\3\'\3\'\3\'\3")
buf.write(u"(\6(\u0121\n(\r(\16(\u0122\3(\3(\3)\3)\5)\u0129\n)\3")
buf.write(u")\3)\3)\7)\u012e\n)\f)\16)\u0131\13)\3*\3*\3+\3+\3,\3")
buf.write(u",\3-\3-\3\u0110\2.\3\3\5\4\7\5\t\6\13\7\r\b\17\t\21\n")
buf.write(u"\23\13\25\f\27\r\31\16\33\17\35\20\37\21!\22#\23%\24")
buf.write(u"\'\25)\26+\27-\30/\31\61\32\63\33\65\34\67\359\36;\37")
buf.write(u"= ?!A\"C#E$G%I&K\'M(O)Q*S+U\2W\2Y\2\3\2\5\4\2\13\13\"")
buf.write(u"\"\4\2C\\c|\3\2\62;\2\u013d\2\3\3\2\2\2\2\5\3\2\2\2\2")
buf.write(u"\7\3\2\2\2\2\t\3\2\2\2\2\13\3\2\2\2\2\r\3\2\2\2\2\17")
buf.write(u"\3\2\2\2\2\21\3\2\2\2\2\23\3\2\2\2\2\25\3\2\2\2\2\27")
buf.write(u"\3\2\2\2\2\31\3\2\2\2\2\33\3\2\2\2\2\35\3\2\2\2\2\37")
buf.write(u"\3\2\2\2\2!\3\2\2\2\2#\3\2\2\2\2%\3\2\2\2\2\'\3\2\2\2")
buf.write(u"\2)\3\2\2\2\2+\3\2\2\2\2-\3\2\2\2\2/\3\2\2\2\2\61\3\2")
buf.write(u"\2\2\2\63\3\2\2\2\2\65\3\2\2\2\2\67\3\2\2\2\29\3\2\2")
buf.write(u"\2\2;\3\2\2\2\2=\3\2\2\2\2?\3\2\2\2\2A\3\2\2\2\2C\3\2")
buf.write(u"\2\2\2E\3\2\2\2\2G\3\2\2\2\2I\3\2\2\2\2K\3\2\2\2\2M\3")
buf.write(u"\2\2\2\2O\3\2\2\2\2Q\3\2\2\2\2S\3\2\2\2\3[\3\2\2\2\5")
buf.write(u"c\3\2\2\2\7j\3\2\2\2\tn\3\2\2\2\13s\3\2\2\2\ry\3\2\2")
buf.write(u"\2\17\u0081\3\2\2\2\21\u0089\3\2\2\2\23\u008e\3\2\2\2")
buf.write(u"\25\u0092\3\2\2\2\27\u0098\3\2\2\2\31\u009f\3\2\2\2\33")
buf.write(u"\u00a7\3\2\2\2\35\u00b0\3\2\2\2\37\u00b2\3\2\2\2!\u00b4")
buf.write(u"\3\2\2\2#\u00b6\3\2\2\2%\u00b8\3\2\2\2\'\u00ba\3\2\2")
buf.write(u"\2)\u00bc\3\2\2\2+\u00be\3\2\2\2-\u00c0\3\2\2\2/\u00c2")
buf.write(u"\3\2\2\2\61\u00c4\3\2\2\2\63\u00c6\3\2\2\2\65\u00c8\3")
buf.write(u"\2\2\2\67\u00ce\3\2\2\29\u00d5\3\2\2\2;\u00db\3\2\2\2")
buf.write(u"=\u00e2\3\2\2\2?\u00e7\3\2\2\2A\u00ee\3\2\2\2C\u00f5")
buf.write(u"\3\2\2\2E\u00fa\3\2\2\2G\u00ff\3\2\2\2I\u0105\3\2\2\2")
buf.write(u"K\u010a\3\2\2\2M\u0119\3\2\2\2O\u0120\3\2\2\2Q\u0128")
buf.write(u"\3\2\2\2S\u0132\3\2\2\2U\u0134\3\2\2\2W\u0136\3\2\2\2")
buf.write(u"Y\u0138\3\2\2\2[\\\7O\2\2\\]\7G\2\2]^\7U\2\2^_\7U\2\2")
buf.write(u"_`\7C\2\2`a\7I\2\2ab\7G\2\2b\4\3\2\2\2cd\7U\2\2de\7V")
buf.write(u"\2\2ef\7T\2\2fg\7W\2\2gh\7E\2\2hi\7V\2\2i\6\3\2\2\2j")
buf.write(u"k\7I\2\2kl\7G\2\2lm\7V\2\2m\b\3\2\2\2no\7J\2\2op\7G\2")
buf.write(u"\2pq\7C\2\2qr\7F\2\2r\n\3\2\2\2st\7V\2\2tu\7T\2\2uv\7")
buf.write(u"C\2\2vw\7E\2\2wx\7G\2\2x\f\3\2\2\2yz\7E\2\2z{\7Q\2\2")
buf.write(u"{|\7P\2\2|}\7P\2\2}~\7G\2\2~\177\7E\2\2\177\u0080\7V")
buf.write(u"\2\2\u0080\16\3\2\2\2\u0081\u0082\7Q\2\2\u0082\u0083")
buf.write(u"\7R\2\2\u0083\u0084\7V\2\2\u0084\u0085\7K\2\2\u0085\u0086")
buf.write(u"\7Q\2\2\u0086\u0087\7P\2\2\u0087\u0088\7U\2\2\u0088\20")
buf.write(u"\3\2\2\2\u0089\u008a\7R\2\2\u008a\u008b\7Q\2\2\u008b")
buf.write(u"\u008c\7U\2\2\u008c\u008d\7V\2\2\u008d\22\3\2\2\2\u008e")
buf.write(u"\u008f\7R\2\2\u008f\u0090\7W\2\2\u0090\u0091\7V\2\2\u0091")
buf.write(u"\24\3\2\2\2\u0092\u0093\7R\2\2\u0093\u0094\7C\2\2\u0094")
buf.write(u"\u0095\7V\2\2\u0095\u0096\7E\2\2\u0096\u0097\7J\2\2\u0097")
buf.write(u"\26\3\2\2\2\u0098\u0099\7F\2\2\u0099\u009a\7G\2\2\u009a")
buf.write(u"\u009b\7N\2\2\u009b\u009c\7G\2\2\u009c\u009d\7V\2\2\u009d")
buf.write(u"\u009e\7G\2\2\u009e\30\3\2\2\2\u009f\u00a0\7T\2\2\u00a0")
buf.write(u"\u00a1\7G\2\2\u00a1\u00a2\7S\2\2\u00a2\u00a3\7W\2\2\u00a3")
buf.write(u"\u00a4\7G\2\2\u00a4\u00a5\7U\2\2\u00a5\u00a6\7V\2\2\u00a6")
buf.write(u"\32\3\2\2\2\u00a7\u00a8\7T\2\2\u00a8\u00a9\7G\2\2\u00a9")
buf.write(u"\u00aa\7U\2\2\u00aa\u00ab\7R\2\2\u00ab\u00ac\7Q\2\2\u00ac")
buf.write(u"\u00ad\7P\2\2\u00ad\u00ae\7U\2\2\u00ae\u00af\7G\2\2\u00af")
buf.write(u"\34\3\2\2\2\u00b0\u00b1\7\61\2\2\u00b1\36\3\2\2\2\u00b2")
buf.write(u"\u00b3\7}\2\2\u00b3 \3\2\2\2\u00b4\u00b5\7\177\2\2\u00b5")
buf.write(u"\"\3\2\2\2\u00b6\u00b7\7*\2\2\u00b7$\3\2\2\2\u00b8\u00b9")
buf.write(u"\7+\2\2\u00b9&\3\2\2\2\u00ba\u00bb\7&\2\2\u00bb(\3\2")
buf.write(u"\2\2\u00bc\u00bd\7>\2\2\u00bd*\3\2\2\2\u00be\u00bf\7")
buf.write(u"@\2\2\u00bf,\3\2\2\2\u00c0\u00c1\7.\2\2\u00c1.\3\2\2")
buf.write(u"\2\u00c2\u00c3\7?\2\2\u00c3\60\3\2\2\2\u00c4\u00c5\7")
buf.write(u"=\2\2\u00c5\62\3\2\2\2\u00c6\u00c7\7^\2\2\u00c7\64\3")
buf.write(u"\2\2\2\u00c8\u00c9\7K\2\2\u00c9\u00ca\7P\2\2\u00ca\u00cb")
buf.write(u"\7V\2\2\u00cb\u00cc\7\65\2\2\u00cc\u00cd\7\64\2\2\u00cd")
buf.write(u"\66\3\2\2\2\u00ce\u00cf\7W\2\2\u00cf\u00d0\7K\2\2\u00d0")
buf.write(u"\u00d1\7P\2\2\u00d1\u00d2\7V\2\2\u00d2\u00d3\7\65\2\2")
buf.write(u"\u00d3\u00d4\7\64\2\2\u00d48\3\2\2\2\u00d5\u00d6\7K\2")
buf.write(u"\2\u00d6\u00d7\7P\2\2\u00d7\u00d8\7V\2\2\u00d8\u00d9")
buf.write(u"\78\2\2\u00d9\u00da\7\66\2\2\u00da:\3\2\2\2\u00db\u00dc")
buf.write(u"\7W\2\2\u00dc\u00dd\7K\2\2\u00dd\u00de\7P\2\2\u00de\u00df")
buf.write(u"\7V\2\2\u00df\u00e0\78\2\2\u00e0\u00e1\7\66\2\2\u00e1")
buf.write(u"<\3\2\2\2\u00e2\u00e3\7D\2\2\u00e3\u00e4\7Q\2\2\u00e4")
buf.write(u"\u00e5\7Q\2\2\u00e5\u00e6\7N\2\2\u00e6>\3\2\2\2\u00e7")
buf.write(u"\u00e8\7F\2\2\u00e8\u00e9\7Q\2\2\u00e9\u00ea\7W\2\2\u00ea")
buf.write(u"\u00eb\7D\2\2\u00eb\u00ec\7N\2\2\u00ec\u00ed\7G\2\2\u00ed")
buf.write(u"@\3\2\2\2\u00ee\u00ef\7U\2\2\u00ef\u00f0\7V\2\2\u00f0")
buf.write(u"\u00f1\7T\2\2\u00f1\u00f2\7K\2\2\u00f2\u00f3\7P\2\2\u00f3")
buf.write(u"\u00f4\7I\2\2\u00f4B\3\2\2\2\u00f5\u00f6\7H\2\2\u00f6")
buf.write(u"\u00f7\7K\2\2\u00f7\u00f8\7N\2\2\u00f8\u00f9\7G\2\2\u00f9")
buf.write(u"D\3\2\2\2\u00fa\u00fb\7D\2\2\u00fb\u00fc\7N\2\2\u00fc")
buf.write(u"\u00fd\7Q\2\2\u00fd\u00fe\7D\2\2\u00feF\3\2\2\2\u00ff")
buf.write(u"\u0100\7C\2\2\u0100\u0101\7T\2\2\u0101\u0102\7T\2\2\u0102")
buf.write(u"\u0103\7C\2\2\u0103\u0104\7[\2\2\u0104H\3\2\2\2\u0105")
buf.write(u"\u0106\7F\2\2\u0106\u0107\7K\2\2\u0107\u0108\7E\2\2\u0108")
buf.write(u"\u0109\7V\2\2\u0109J\3\2\2\2\u010a\u010b\7\61\2\2\u010b")
buf.write(u"\u010c\7,\2\2\u010c\u0110\3\2\2\2\u010d\u010f\13\2\2")
buf.write(u"\2\u010e\u010d\3\2\2\2\u010f\u0112\3\2\2\2\u0110\u0111")
buf.write(u"\3\2\2\2\u0110\u010e\3\2\2\2\u0111\u0113\3\2\2\2\u0112")
buf.write(u"\u0110\3\2\2\2\u0113\u0114\7,\2\2\u0114\u0115\7\61\2")
buf.write(u"\2\u0115\u0116\3\2\2\2\u0116\u0117\b&\2\2\u0117L\3\2")
buf.write(u"\2\2\u0118\u011a\7\17\2\2\u0119\u0118\3\2\2\2\u0119\u011a")
buf.write(u"\3\2\2\2\u011a\u011b\3\2\2\2\u011b\u011c\7\f\2\2\u011c")
buf.write(u"\u011d\3\2\2\2\u011d\u011e\b\'\2\2\u011eN\3\2\2\2\u011f")
buf.write(u"\u0121\t\2\2\2\u0120\u011f\3\2\2\2\u0121\u0122\3\2\2")
buf.write(u"\2\u0122\u0120\3\2\2\2\u0122\u0123\3\2\2\2\u0123\u0124")
buf.write(u"\3\2\2\2\u0124\u0125\b(\2\2\u0125P\3\2\2\2\u0126\u0129")
buf.write(u"\5U+\2\u0127\u0129\5Y-\2\u0128\u0126\3\2\2\2\u0128\u0127")
buf.write(u"\3\2\2\2\u0129\u012f\3\2\2\2\u012a\u012e\5U+\2\u012b")
buf.write(u"\u012e\5W,\2\u012c\u012e\5Y-\2\u012d\u012a\3\2\2\2\u012d")
buf.write(u"\u012b\3\2\2\2\u012d\u012c\3\2\2\2\u012e\u0131\3\2\2")
buf.write(u"\2\u012f\u012d\3\2\2\2\u012f\u0130\3\2\2\2\u0130R\3\2")
buf.write(u"\2\2\u0131\u012f\3\2\2\2\u0132\u0133\13\2\2\2\u0133T")
buf.write(u"\3\2\2\2\u0134\u0135\t\3\2\2\u0135V\3\2\2\2\u0136\u0137")
buf.write(u"\t\4\2\2\u0137X\3\2\2\2\u0138\u0139\7a\2\2\u0139Z\3\2")
buf.write(u"\2\2\t\2\u0110\u0119\u0122\u0128\u012d\u012f\3\2\3\2")
return buf.getvalue()
class HTTPIDLLexer(Lexer):
atn = ATNDeserializer().deserialize(serializedATN())
decisionsToDFA = [ DFA(ds, i) for i, ds in enumerate(atn.decisionToState) ]
MESSAGE = 1
STRUCT = 2
GET = 3
HEAD = 4
TRACE = 5
CONNECT = 6
OPTIONS = 7
POST = 8
PUT = 9
PATCH = 10
DELETE = 11
REQUEST = 12
RESPONSE = 13
SLASH = 14
LCURLY = 15
RCURLY = 16
LPAREN = 17
RPAREN = 18
DOLLAR = 19
LABRACKET = 20
RABRACKET = 21
COMMA = 22
ASSIGN = 23
SEMICOLON = 24
ESCAPE = 25
INT32 = 26
UINT32 = 27
INT64 = 28
UINT64 = 29
BOOL = 30
DOUBLE = 31
STRING = 32
FILE = 33
BLOB = 34
ARRAY = 35
DICT = 36
COMMENT = 37
NL = 38
WS = 39
IDENT = 40
ANYCHAR = 41
channelNames = [ u"DEFAULT_TOKEN_CHANNEL", u"HIDDEN" ]
modeNames = [ u"DEFAULT_MODE" ]
literalNames = [ u"<INVALID>",
u"'MESSAGE'", u"'STRUCT'", u"'GET'", u"'HEAD'", u"'TRACE'",
u"'CONNECT'", u"'OPTIONS'", u"'POST'", u"'PUT'", u"'PATCH'",
u"'DELETE'", u"'REQUEST'", u"'RESPONSE'", u"'/'", u"'{'", u"'}'",
u"'('", u"')'", u"'$'", u"'<'", u"'>'", u"','", u"'='", u"';'",
u"'\\'", u"'INT32'", u"'UINT32'", u"'INT64'", u"'UINT64'", u"'BOOL'",
u"'DOUBLE'", u"'STRING'", u"'FILE'", u"'BLOB'", u"'ARRAY'",
u"'DICT'" ]
symbolicNames = [ u"<INVALID>",
u"MESSAGE", u"STRUCT", u"GET", u"HEAD", u"TRACE", u"CONNECT",
u"OPTIONS", u"POST", u"PUT", u"PATCH", u"DELETE", u"REQUEST",
u"RESPONSE", u"SLASH", u"LCURLY", u"RCURLY", u"LPAREN", u"RPAREN",
u"DOLLAR", u"LABRACKET", u"RABRACKET", u"COMMA", u"ASSIGN",
u"SEMICOLON", u"ESCAPE", u"INT32", u"UINT32", u"INT64", u"UINT64",
u"BOOL", u"DOUBLE", u"STRING", u"FILE", u"BLOB", u"ARRAY", u"DICT",
u"COMMENT", u"NL", u"WS", u"IDENT", u"ANYCHAR" ]
ruleNames = [ u"MESSAGE", u"STRUCT", u"GET", u"HEAD", u"TRACE", u"CONNECT",
u"OPTIONS", u"POST", u"PUT", u"PATCH", u"DELETE", u"REQUEST",
u"RESPONSE", u"SLASH", u"LCURLY", u"RCURLY", u"LPAREN",
u"RPAREN", u"DOLLAR", u"LABRACKET", u"RABRACKET", u"COMMA",
u"ASSIGN", u"SEMICOLON", u"ESCAPE", u"INT32", u"UINT32",
u"INT64", u"UINT64", u"BOOL", u"DOUBLE", u"STRING", u"FILE",
u"BLOB", u"ARRAY", u"DICT", u"COMMENT", u"NL", u"WS",
u"IDENT", u"ANYCHAR", u"ALPHA", u"DIGIT", u"UNDERSCORE" ]
grammarFileName = u"HTTPIDLLexer.g4"
def __init__(self, input=None, output=sys.stdout):
super(HTTPIDLLexer, self).__init__(input, output=output)
self.checkVersion("4.7")
self._interp = LexerATNSimulator(self, self.atn, self.decisionsToDFA, PredictionContextCache())
self._actions = None
self._predicates = None
|
cikelengfeng/HTTPIDL
|
Sources/Compiler/Parser/HTTPIDLLexer.py
|
Python
|
mit
| 12,882
|
import pdfcoloursplit
import os
import zipfile
from .config import config
from celery import Celery
celery_broker = config["Celery"]["broker"]
celery_backend = config["Celery"]["backend"]
app = Celery("worker", broker=celery_broker, backend=celery_backend)
@app.task
def split_pdf(temp_dir, pdf_filename, duplex, stackable):
os.chdir(temp_dir)
files_written = pdfcoloursplit.split_pdf(pdf_filename, duplex, stackable)
if pdf_filename.lower().endswith(".pdf"):
zip_filename = pdf_filename[:-4] + ".zip"
else:
zip_filename = pdf_filename + ".zip"
with zipfile.ZipFile(zip_filename, "w") as z:
for filename in files_written:
z.write(filename)
return zip_filename
|
camerongray1515/pdf-colour-split
|
pdfcoloursplit_web/pdfcoloursplit_web/worker.py
|
Python
|
mit
| 725
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
# IMPORTANT: only import safe functions as this module will be included in jinja environment
import frappe
import operator
import re, urllib, datetime, math
import babel.dates
from dateutil import parser
from num2words import num2words
import HTMLParser
from html2text import html2text
DATE_FORMAT = "%Y-%m-%d"
TIME_FORMAT = "%H:%M:%S.%f"
DATETIME_FORMAT = DATE_FORMAT + " " + TIME_FORMAT
# datetime functions
def getdate(string_date=None):
"""
Coverts string date (yyyy-mm-dd) to datetime.date object
"""
if not string_date:
return get_datetime().date()
if isinstance(string_date, datetime.datetime):
return string_date.date()
elif isinstance(string_date, datetime.date):
return string_date
# dateutil parser does not agree with dates like 0000-00-00
if not string_date or string_date=="0000-00-00":
return None
return parser.parse(string_date).date()
def get_datetime(datetime_str=None):
if not datetime_str:
return now_datetime()
if isinstance(datetime_str, (datetime.datetime, datetime.timedelta)):
return datetime_str
elif isinstance(datetime_str, (list, tuple)):
return datetime.datetime(datetime_str)
elif isinstance(datetime_str, datetime.date):
return datetime.datetime.combine(datetime_str, datetime.time())
# dateutil parser does not agree with dates like 0000-00-00
if not datetime_str or (datetime_str or "").startswith("0000-00-00"):
return None
return parser.parse(datetime_str)
def to_timedelta(time_str):
if isinstance(time_str, basestring):
t = parser.parse(time_str)
return datetime.timedelta(hours=t.hour, minutes=t.minute, seconds=t.second, microseconds=t.microsecond)
else:
return time_str
def add_to_date(date, years=0, months=0, days=0):
"""Adds `days` to the given date"""
from dateutil.relativedelta import relativedelta
as_string, as_datetime = False, False
if date==None:
date = now_datetime()
if isinstance(date, basestring):
as_string = True
if " " in date:
as_datetime = True
date = parser.parse(date)
date = date + relativedelta(years=years, months=months, days=days)
if as_string:
if as_datetime:
return date.strftime(DATETIME_FORMAT)
else:
return date.strftime(DATE_FORMAT)
else:
return date
def add_days(date, days):
return add_to_date(date, days=days)
def add_months(date, months):
return add_to_date(date, months=months)
def add_years(date, years):
return add_to_date(date, years=years)
def date_diff(string_ed_date, string_st_date):
return (getdate(string_ed_date) - getdate(string_st_date)).days
def time_diff(string_ed_date, string_st_date):
return get_datetime(string_ed_date) - get_datetime(string_st_date)
def time_diff_in_seconds(string_ed_date, string_st_date):
return time_diff(string_ed_date, string_st_date).total_seconds()
def time_diff_in_hours(string_ed_date, string_st_date):
return round(float(time_diff(string_ed_date, string_st_date).total_seconds()) / 3600, 6)
def now_datetime():
dt = convert_utc_to_user_timezone(datetime.datetime.utcnow())
return dt.replace(tzinfo=None)
def _get_time_zone():
return frappe.db.get_system_setting('time_zone') or 'Asia/Kolkata'
def get_time_zone():
if frappe.local.flags.in_test:
return _get_time_zone()
return frappe.cache().get_value("time_zone", _get_time_zone)
def convert_utc_to_user_timezone(utc_timestamp):
from pytz import timezone, UnknownTimeZoneError
utcnow = timezone('UTC').localize(utc_timestamp)
try:
return utcnow.astimezone(timezone(get_time_zone()))
except UnknownTimeZoneError:
return utcnow
def now():
"""return current datetime as yyyy-mm-dd hh:mm:ss"""
if getattr(frappe.local, "current_date", None):
return getdate(frappe.local.current_date).strftime(DATE_FORMAT) + " " + \
now_datetime().strftime(TIME_FORMAT)
else:
return now_datetime().strftime(DATETIME_FORMAT)
def nowdate():
"""return current date as yyyy-mm-dd"""
return now_datetime().strftime(DATE_FORMAT)
def today():
return nowdate()
def nowtime():
"""return current time in hh:mm"""
return now_datetime().strftime(TIME_FORMAT)
def get_first_day(dt, d_years=0, d_months=0):
"""
Returns the first day of the month for the date specified by date object
Also adds `d_years` and `d_months` if specified
"""
dt = getdate(dt)
# d_years, d_months are "deltas" to apply to dt
overflow_years, month = divmod(dt.month + d_months - 1, 12)
year = dt.year + d_years + overflow_years
return datetime.date(year, month + 1, 1)
def get_last_day(dt):
"""
Returns last day of the month using:
`get_first_day(dt, 0, 1) + datetime.timedelta(-1)`
"""
return get_first_day(dt, 0, 1) + datetime.timedelta(-1)
def get_time(time_str):
if isinstance(time_str, datetime.datetime):
return time_str.time()
elif isinstance(time_str, datetime.time):
return time_str
return parser.parse(time_str).time()
def get_datetime_str(datetime_obj):
if isinstance(datetime_obj, basestring):
datetime_obj = get_datetime(datetime_obj)
return datetime_obj.strftime(DATETIME_FORMAT)
def get_user_format():
if getattr(frappe.local, "user_format", None) is None:
frappe.local.user_format = frappe.db.get_default("date_format")
return frappe.local.user_format or "yyyy-mm-dd"
def formatdate(string_date=None, format_string=None):
"""
Convers the given string date to :data:`user_format`
User format specified in defaults
Examples:
* dd-mm-yyyy
* mm-dd-yyyy
* dd/mm/yyyy
"""
date = getdate(string_date) if string_date else now_datetime().date()
if not format_string:
format_string = get_user_format().replace("mm", "MM")
return babel.dates.format_date(date, format_string, locale=(frappe.local.lang or "").replace("-", "_"))
def format_time(txt):
return babel.dates.format_time(get_time(txt), locale=(frappe.local.lang or "").replace("-", "_"))
def format_datetime(datetime_string, format_string=None):
if not datetime_string:
return
datetime = get_datetime(datetime_string)
if not format_string:
format_string = get_user_format().replace("mm", "MM") + " HH:mm:ss"
return babel.dates.format_datetime(datetime, format_string, locale=(frappe.local.lang or "").replace("-", "_"))
def global_date_format(date):
"""returns date as 1 January 2012"""
formatted_date = getdate(date).strftime("%d %B %Y")
return formatted_date.startswith("0") and formatted_date[1:] or formatted_date
def has_common(l1, l2):
"""Returns truthy value if there are common elements in lists l1 and l2"""
return set(l1) & set(l2)
def flt(s, precision=None):
"""Convert to float (ignore commas)"""
if isinstance(s, basestring):
s = s.replace(',','')
try:
num = float(s)
if precision is not None:
num = rounded(num, precision)
except Exception:
num = 0
return num
def cint(s):
"""Convert to integer"""
try: num = int(float(s))
except: num = 0
return num
def cstr(s):
if isinstance(s, unicode):
return s
elif s==None:
return ''
elif isinstance(s, basestring):
return unicode(s, 'utf-8')
else:
return unicode(s)
def rounded(num, precision=0):
"""round method for round halfs to nearest even algorithm aka banker's rounding - compatible with python3"""
precision = cint(precision)
multiplier = 10 ** precision
# avoid rounding errors
num = round(num * multiplier if precision else num, 8)
floor = math.floor(num)
decimal_part = num - floor
if not precision and decimal_part == 0.5:
num = floor if (floor % 2 == 0) else floor + 1
else:
num = round(num)
return (num / multiplier) if precision else num
def remainder(numerator, denominator, precision=2):
precision = cint(precision)
multiplier = 10 ** precision
if precision:
_remainder = ((numerator * multiplier) % (denominator * multiplier)) / multiplier
else:
_remainder = numerator % denominator
return flt(_remainder, precision);
def round_based_on_smallest_currency_fraction(value, currency, precision=2):
smallest_currency_fraction_value = flt(frappe.db.get_value("Currency",
currency, "smallest_currency_fraction_value"))
if smallest_currency_fraction_value:
remainder_val = remainder(value, smallest_currency_fraction_value, precision)
if remainder_val > (smallest_currency_fraction_value / 2):
value += smallest_currency_fraction_value - remainder_val
else:
value -= remainder_val
else:
value = rounded(value)
return flt(value, precision)
def encode(obj, encoding="utf-8"):
if isinstance(obj, list):
out = []
for o in obj:
if isinstance(o, unicode):
out.append(o.encode(encoding))
else:
out.append(o)
return out
elif isinstance(obj, unicode):
return obj.encode(encoding)
else:
return obj
def parse_val(v):
"""Converts to simple datatypes from SQL query results"""
if isinstance(v, (datetime.date, datetime.datetime)):
v = unicode(v)
elif isinstance(v, datetime.timedelta):
v = ":".join(unicode(v).split(":")[:2])
elif isinstance(v, long):
v = int(v)
return v
def fmt_money(amount, precision=None, currency=None):
"""
Convert to string with commas for thousands, millions etc
"""
number_format = None
if currency:
number_format = frappe.db.get_value("Currency", currency, "number_format", cache=True)
if not number_format:
number_format = frappe.db.get_default("number_format") or "#,###.##"
decimal_str, comma_str, number_format_precision = get_number_format_info(number_format)
if precision is None:
precision = number_format_precision
amount = '%.*f' % (precision, flt(amount))
if amount.find('.') == -1:
decimals = ''
else:
decimals = amount.split('.')[1]
parts = []
minus = ''
if flt(amount) < 0:
minus = '-'
amount = cstr(abs(flt(amount))).split('.')[0]
if len(amount) > 3:
parts.append(amount[-3:])
amount = amount[:-3]
val = number_format=="#,##,###.##" and 2 or 3
while len(amount) > val:
parts.append(amount[-val:])
amount = amount[:-val]
parts.append(amount)
parts.reverse()
amount = comma_str.join(parts) + ((precision and decimal_str) and (decimal_str + decimals) or "")
amount = minus + amount
if currency and frappe.defaults.get_global_default("hide_currency_symbol") != "Yes":
symbol = frappe.db.get_value("Currency", currency, "symbol") or currency
amount = symbol + " " + amount
return amount
number_format_info = {
"#,###.##": (".", ",", 2),
"#.###,##": (",", ".", 2),
"# ###.##": (".", " ", 2),
"# ###,##": (",", " ", 2),
"#'###.##": (".", "'", 2),
"#, ###.##": (".", ", ", 2),
"#,##,###.##": (".", ",", 2),
"#,###.###": (".", ",", 3),
"#.###": ("", ".", 0),
"#,###": ("", ",", 0)
}
def get_number_format_info(format):
return number_format_info.get(format) or (".", ",", 2)
#
# convet currency to words
#
def money_in_words(number, main_currency = None, fraction_currency=None):
"""
Returns string in words with currency and fraction currency.
"""
from frappe.utils import get_defaults
_ = frappe._
if not number or flt(number) < 0:
return ""
d = get_defaults()
if not main_currency:
main_currency = d.get('currency', 'INR')
if not fraction_currency:
fraction_currency = frappe.db.get_value("Currency", main_currency, "fraction") or _("Cent")
n = "%.2f" % flt(number)
main, fraction = n.split('.')
if len(fraction)==1: fraction += '0'
number_format = frappe.db.get_value("Currency", main_currency, "number_format", cache=True) or \
frappe.db.get_default("number_format") or "#,###.##"
in_million = True
if number_format == "#,##,###.##": in_million = False
out = main_currency + ' ' + in_words(main, in_million).title()
if cint(fraction):
out = out + ' ' + _('and') + ' ' + in_words(fraction, in_million).title() + ' ' + fraction_currency
return out + ' ' + _('only.')
#
# convert number to words
#
def in_words(integer, in_million=True):
"""
Returns string in words for the given integer.
"""
locale = 'en_IN' if not in_million else frappe.local.lang
integer = int(integer)
try:
ret = num2words(integer, lang=locale)
except NotImplementedError:
ret = num2words(integer, lang='en')
return ret.replace('-', ' ')
def is_html(text):
out = False
for key in ["<br>", "<p", "<img", "<div"]:
if key in text:
out = True
break
return out
# from Jinja2 code
_striptags_re = re.compile(r'(<!--.*?-->|<[^>]*>)')
def strip_html(text):
"""removes anything enclosed in and including <>"""
return _striptags_re.sub("", text)
def escape_html(text):
html_escape_table = {
"&": "&",
'"': """,
"'": "'",
">": ">",
"<": "<",
}
return "".join(html_escape_table.get(c,c) for c in text)
def pretty_date(iso_datetime):
"""
Takes an ISO time and returns a string representing how
long ago the date represents.
Ported from PrettyDate by John Resig
"""
if not iso_datetime: return ''
import math
if isinstance(iso_datetime, basestring):
iso_datetime = datetime.datetime.strptime(iso_datetime, DATETIME_FORMAT)
now_dt = datetime.datetime.strptime(now(), DATETIME_FORMAT)
dt_diff = now_dt - iso_datetime
# available only in python 2.7+
# dt_diff_seconds = dt_diff.total_seconds()
dt_diff_seconds = dt_diff.days * 86400.0 + dt_diff.seconds
dt_diff_days = math.floor(dt_diff_seconds / 86400.0)
# differnt cases
if dt_diff_seconds < 60.0:
return 'just now'
elif dt_diff_seconds < 120.0:
return '1 minute ago'
elif dt_diff_seconds < 3600.0:
return '%s minutes ago' % cint(math.floor(dt_diff_seconds / 60.0))
elif dt_diff_seconds < 7200.0:
return '1 hour ago'
elif dt_diff_seconds < 86400.0:
return '%s hours ago' % cint(math.floor(dt_diff_seconds / 3600.0))
elif dt_diff_days == 1.0:
return 'Yesterday'
elif dt_diff_days < 7.0:
return '%s days ago' % cint(dt_diff_days)
elif dt_diff_days < 31.0:
return '%s week(s) ago' % cint(math.ceil(dt_diff_days / 7.0))
elif dt_diff_days < 365.0:
return '%s months ago' % cint(math.ceil(dt_diff_days / 30.0))
else:
return 'more than %s year(s) ago' % cint(math.floor(dt_diff_days / 365.0))
def comma_or(some_list):
return comma_sep(some_list, frappe._("{0} or {1}"))
def comma_and(some_list):
return comma_sep(some_list, frappe._("{0} and {1}"))
def comma_sep(some_list, pattern):
if isinstance(some_list, (list, tuple)):
# list(some_list) is done to preserve the existing list
some_list = [unicode(s) for s in list(some_list)]
if not some_list:
return ""
elif len(some_list) == 1:
return some_list[0]
else:
some_list = ["'%s'" % s for s in some_list]
return pattern.format(", ".join(frappe._(s) for s in some_list[:-1]), some_list[-1])
else:
return some_list
def new_line_sep(some_list):
if isinstance(some_list, (list, tuple)):
# list(some_list) is done to preserve the existing list
some_list = [unicode(s) for s in list(some_list)]
if not some_list:
return ""
elif len(some_list) == 1:
return some_list[0]
else:
some_list = ["%s" % s for s in some_list]
return format("\n ".join(some_list))
else:
return some_list
def filter_strip_join(some_list, sep):
"""given a list, filter None values, strip spaces and join"""
return (cstr(sep)).join((cstr(a).strip() for a in filter(None, some_list)))
def get_url(uri=None, full_address=False):
"""get app url from request"""
host_name = frappe.local.conf.host_name
if uri and (uri.startswith("http://") or uri.startswith("https://")):
return uri
if not host_name:
if hasattr(frappe.local, "request") and frappe.local.request and frappe.local.request.host:
protocol = 'https' == frappe.get_request_header('X-Forwarded-Proto', "") and 'https://' or 'http://'
host_name = protocol + frappe.local.request.host
elif frappe.local.site:
host_name = "http://{}".format(frappe.local.site)
else:
host_name = frappe.db.get_value("Website Settings", "Website Settings",
"subdomain")
if host_name and "http" not in host_name:
host_name = "http://" + host_name
if not host_name:
host_name = "http://localhost"
if not uri and full_address:
uri = frappe.get_request_header("REQUEST_URI", "")
url = urllib.basejoin(host_name, uri) if uri else host_name
return url
def get_host_name():
return get_url().rsplit("//", 1)[-1]
def get_link_to_form(doctype, name, label=None):
if not label: label = name
return """<a href="{0}">{1}</a>""".format(get_url_to_form(doctype, name), label)
def get_url_to_form(doctype, name):
return get_url(uri = "desk#Form/{0}/{1}".format(quoted(doctype), quoted(name)))
def get_url_to_list(doctype):
return get_url(uri = "desk#List/{0}".format(quoted(doctype)))
operator_map = {
# startswith
"^": lambda (a, b): (a or "").startswith(b),
# in or not in a list
"in": lambda (a, b): operator.contains(b, a),
"not in": lambda (a, b): not operator.contains(b, a),
# comparison operators
"=": lambda (a, b): operator.eq(a, b),
"!=": lambda (a, b): operator.ne(a, b),
">": lambda (a, b): operator.gt(a, b),
"<": lambda (a, b): operator.lt(a, b),
">=": lambda (a, b): operator.ge(a, b),
"<=": lambda (a, b): operator.le(a, b),
"not None": lambda (a, b): a and True or False,
"None": lambda (a, b): (not a) and True or False
}
def evaluate_filters(doc, filters):
'''Returns true if doc matches filters'''
if isinstance(filters, dict):
for key, value in filters.iteritems():
f = get_filter(None, {key:value})
if not compare(doc.get(f.fieldname), f.operator, f.value):
return False
elif isinstance(filters, (list, tuple)):
for d in filters:
f = get_filter(None, d)
if not compare(doc.get(f.fieldname), f.operator, f.value):
return False
return True
def compare(val1, condition, val2):
ret = False
if condition in operator_map:
ret = operator_map[condition]((val1, val2))
return ret
def get_filter(doctype, f):
"""Returns a _dict like
{
"doctype":
"fieldname":
"operator":
"value":
}
"""
from frappe.model import default_fields, optional_fields
if isinstance(f, dict):
key, value = f.items()[0]
f = make_filter_tuple(doctype, key, value)
if not isinstance(f, (list, tuple)):
frappe.throw("Filter must be a tuple or list (in a list)")
if len(f) == 3:
f = (doctype, f[0], f[1], f[2])
elif len(f) != 4:
frappe.throw("Filter must have 4 values (doctype, fieldname, operator, value): {0}".format(str(f)))
f = frappe._dict(doctype=f[0], fieldname=f[1], operator=f[2], value=f[3])
if not f.operator:
# if operator is missing
f.operator = "="
valid_operators = ("=", "!=", ">", "<", ">=", "<=", "like", "not like", "in", "not in")
if f.operator not in valid_operators:
frappe.throw("Operator must be one of {0}".format(", ".join(valid_operators)))
if f.doctype and (f.fieldname not in default_fields + optional_fields):
# verify fieldname belongs to the doctype
meta = frappe.get_meta(f.doctype)
if not meta.has_field(f.fieldname):
# try and match the doctype name from child tables
for df in meta.get_table_fields():
if frappe.get_meta(df.options).has_field(f.fieldname):
f.doctype = df.options
break
return f
def make_filter_tuple(doctype, key, value):
'''return a filter tuple like [doctype, key, operator, value]'''
if isinstance(value, (list, tuple)):
return [doctype, key, value[0], value[1]]
else:
return [doctype, key, "=", value]
def scrub_urls(html):
html = expand_relative_urls(html)
# encoding should be responsibility of the composer
# html = quote_urls(html)
return html
def expand_relative_urls(html):
# expand relative urls
url = get_url()
if url.endswith("/"): url = url[:-1]
def _expand_relative_urls(match):
to_expand = list(match.groups())
if not to_expand[2].startswith("/"):
to_expand[2] = "/" + to_expand[2]
to_expand.insert(2, url)
return "".join(to_expand)
return re.sub('(href|src){1}([\s]*=[\s]*[\'"]?)((?!http)[^\'" >]+)([\'"]?)', _expand_relative_urls, html)
def quoted(url):
return cstr(urllib.quote(encode(url), safe=b"~@#$&()*!+=:;,.?/'"))
def quote_urls(html):
def _quote_url(match):
groups = list(match.groups())
groups[2] = quoted(groups[2])
return "".join(groups)
return re.sub('(href|src){1}([\s]*=[\s]*[\'"]?)((?:http)[^\'">]+)([\'"]?)',
_quote_url, html)
def unique(seq):
"""use this instead of list(set()) to preserve order of the original list.
Thanks to Stackoverflow: http://stackoverflow.com/questions/480214/how-do-you-remove-duplicates-from-a-list-in-python-whilst-preserving-order"""
seen = set()
seen_add = seen.add
return [ x for x in seq if not (x in seen or seen_add(x)) ]
def strip(val, chars=None):
# \ufeff is no-width-break, \u200b is no-width-space
return (val or "").replace("\ufeff", "").replace("\u200b", "").strip(chars)
def to_markdown(html):
text = None
try:
text = html2text(html)
except HTMLParser.HTMLParseError:
pass
return text
|
Amber-Creative/amber-frappe
|
frappe/utils/data.py
|
Python
|
mit
| 20,775
|
import json
import datetime
import traceback
import re
from base64 import b64encode
from ast import literal_eval
from flask import Blueprint, render_template, render_template_string, make_response, url_for, current_app, request, redirect, jsonify, abort, flash, session
from flask_login import login_required, current_user
from ..decorators import operator_role_required, admin_role_required, history_access_required
from ..models.user import User
from ..models.account import Account
from ..models.account_user import AccountUser
from ..models.role import Role
from ..models.server import Server
from ..models.setting import Setting
from ..models.history import History
from ..models.domain import Domain
from ..models.domain_user import DomainUser
from ..models.record import Record
from ..models.domain_template import DomainTemplate
from ..models.domain_template_record import DomainTemplateRecord
from ..models.api_key import ApiKey
from ..models.base import db
from ..lib.schema import ApiPlainKeySchema
apikey_plain_schema = ApiPlainKeySchema(many=True)
admin_bp = Blueprint('admin',
__name__,
template_folder='templates',
url_prefix='/admin')
"""
changeSet is a list of tuples, in the following format
(old_state, new_state, change_type)
old_state: dictionary with "disabled" and "content" keys. {"disabled" : False, "content" : "1.1.1.1" }
new_state: similarly
change_type: "addition" or "deletion" or "status" for status change or "unchanged" for no change
Note: A change in "content", is considered a deletion and recreation of the same record,
holding the new content value.
"""
def get_record_changes(del_rrest, add_rrest):
changeSet = []
delSet = del_rrest['records'] if 'records' in del_rrest else []
addSet = add_rrest['records'] if 'records' in add_rrest else []
for d in delSet: # get the deletions and status changes
exists = False
for a in addSet:
if d['content'] == a['content']:
exists = True
if d['disabled'] != a['disabled']:
changeSet.append( ({"disabled":d['disabled'],"content":d['content']},
{"disabled":a['disabled'],"content":a['content']},
"status") )
break
if not exists: # deletion
changeSet.append( ({"disabled":d['disabled'],"content":d['content']},
None,
"deletion") )
for a in addSet: # get the additions
exists = False
for d in delSet:
if d['content'] == a['content']:
exists = True
# already checked for status change
break
if not exists:
changeSet.append( (None, {"disabled":a['disabled'], "content":a['content']}, "addition") )
continue
for a in addSet: # get the unchanged
exists = False
for c in changeSet:
if c[1] != None and c[1]["content"] == a['content']:
exists = True
break
if not exists:
changeSet.append( ( {"disabled":a['disabled'], "content":a['content']}, {"disabled":a['disabled'], "content":a['content']}, "unchanged") )
return changeSet
# out_changes is a list of HistoryRecordEntry objects in which we will append the new changes
# a HistoryRecordEntry represents a pair of add_rrest and del_rrest
def extract_changelogs_from_a_history_entry(out_changes, history_entry, change_num, record_name=None, record_type=None):
if history_entry.detail is None:
return
if "add_rrests" in history_entry.detail:
detail_dict = json.loads(history_entry.detail.replace("\'", ''))
else: # not a record entry
return
add_rrests = detail_dict['add_rrests']
del_rrests = detail_dict['del_rrests']
for add_rrest in add_rrests:
exists = False
for del_rrest in del_rrests:
if del_rrest['name'] == add_rrest['name'] and del_rrest['type'] == add_rrest['type']:
exists = True
if change_num not in out_changes:
out_changes[change_num] = []
out_changes[change_num].append(HistoryRecordEntry(history_entry, del_rrest, add_rrest, "*"))
break
if not exists: # this is a new record
if change_num not in out_changes:
out_changes[change_num] = []
out_changes[change_num].append(HistoryRecordEntry(history_entry, [], add_rrest, "+")) # (add_rrest, del_rrest, change_type)
for del_rrest in del_rrests:
exists = False
for add_rrest in add_rrests:
if del_rrest['name'] == add_rrest['name'] and del_rrest['type'] == add_rrest['type']:
exists = True # no need to add in the out_changes set
break
if not exists: # this is a deletion
if change_num not in out_changes:
out_changes[change_num] = []
out_changes[change_num].append(HistoryRecordEntry(history_entry, del_rrest, [], "-"))
# only used for changelog per record
if record_name != None and record_type != None: # then get only the records with the specific (record_name, record_type) tuple
if change_num in out_changes:
changes_i = out_changes[change_num]
else:
return
for hre in changes_i: # for each history record entry in changes_i
if 'type' in hre.add_rrest and hre.add_rrest['name'] == record_name and hre.add_rrest['type'] == record_type:
continue
elif 'type' in hre.del_rrest and hre.del_rrest['name'] == record_name and hre.del_rrest['type'] == record_type:
continue
else:
out_changes[change_num].remove(hre)
# records with same (name,type) are considered as a single HistoryRecordEntry
# history_entry is of type History - used to extract created_by and created_on
# add_rrest is a dictionary of replace
# del_rrest is a dictionary of remove
class HistoryRecordEntry:
def __init__(self, history_entry, del_rrest, add_rrest, change_type):
# search the add_rrest index into the add_rrest set for the key (name, type)
self.history_entry = history_entry
self.add_rrest = add_rrest
self.del_rrest = del_rrest
self.change_type = change_type # "*": edit or unchanged, "+" new tuple(name,type), "-" deleted (name,type) tuple
self.changed_fields = [] # contains a subset of : [ttl, name, type]
self.changeSet = [] # all changes for the records of this add_rrest-del_rrest pair
if change_type == "+": # addition
self.changed_fields.append("name")
self.changed_fields.append("type")
self.changed_fields.append("ttl")
self.changeSet = get_record_changes(del_rrest, add_rrest)
elif change_type == "-": # removal
self.changed_fields.append("name")
self.changed_fields.append("type")
self.changed_fields.append("ttl")
self.changeSet = get_record_changes(del_rrest, add_rrest)
elif change_type == "*": # edit of unchanged
if add_rrest['ttl'] != del_rrest['ttl']:
self.changed_fields.append("ttl")
self.changeSet = get_record_changes(del_rrest, add_rrest)
def toDict(self):
return {
"add_rrest" : self.add_rrest,
"del_rrest" : self.del_rrest,
"changed_fields" : self.changed_fields,
"created_on" : self.history_entry.created_on,
"created_by" : self.history_entry.created_by,
"change_type" : self.change_type,
"changeSet" : self.changeSet
}
def __eq__(self, obj2): # used for removal of objects from a list
return True if obj2.toDict() == self.toDict() else False
@admin_bp.before_request
def before_request():
# Manage session timeout
session.permanent = True
# current_app.permanent_session_lifetime = datetime.timedelta(
# minutes=int(Setting().get('session_timeout')))
current_app.permanent_session_lifetime = datetime.timedelta(
minutes=int(Setting().get('session_timeout')))
session.modified = True
@admin_bp.route('/pdns', methods=['GET'])
@login_required
@operator_role_required
def pdns_stats():
if not Setting().get('pdns_api_url') or not Setting().get(
'pdns_api_key') or not Setting().get('pdns_version'):
return redirect(url_for('admin.setting_pdns'))
domains = Domain.query.all()
users = User.query.all()
server = Server(server_id='localhost')
configs = server.get_config()
statistics = server.get_statistic()
history_number = History.query.count()
if statistics:
uptime = list([
uptime for uptime in statistics if uptime['name'] == 'uptime'
])[0]['value']
else:
uptime = 0
return render_template('admin_pdns_stats.html',
domains=domains,
users=users,
configs=configs,
statistics=statistics,
uptime=uptime,
history_number=history_number)
@admin_bp.route('/user/edit/<user_username>', methods=['GET', 'POST'])
@admin_bp.route('/user/edit', methods=['GET', 'POST'])
@login_required
@operator_role_required
def edit_user(user_username=None):
if user_username:
user = User.query.filter(User.username == user_username).first()
create = False
if not user:
return render_template('errors/404.html'), 404
if user.role.name == 'Administrator' and current_user.role.name != 'Administrator':
return render_template('errors/401.html'), 401
else:
user = None
create = True
if request.method == 'GET':
return render_template('admin_edit_user.html',
user=user,
create=create)
elif request.method == 'POST':
fdata = request.form
if create:
user_username = fdata.get('username', '').strip()
user = User(username=user_username,
plain_text_password=fdata.get('password', ''),
firstname=fdata.get('firstname', '').strip(),
lastname=fdata.get('lastname', '').strip(),
email=fdata.get('email', '').strip(),
reload_info=False)
if create:
if not fdata.get('password', ''):
return render_template('admin_edit_user.html',
user=user,
create=create,
blank_password=True)
result = user.create_local_user()
history = History(msg='Created user {0}'.format(user.username),
created_by=current_user.username)
else:
result = user.update_local_user()
history = History(msg='Updated user {0}'.format(user.username),
created_by=current_user.username)
if result['status']:
history.add()
return redirect(url_for('admin.manage_user'))
return render_template('admin_edit_user.html',
user=user,
create=create,
error=result['msg'])
@admin_bp.route('/key/edit/<key_id>', methods=['GET', 'POST'])
@admin_bp.route('/key/edit', methods=['GET', 'POST'])
@login_required
@operator_role_required
def edit_key(key_id=None):
domains = Domain.query.all()
accounts = Account.query.all()
roles = Role.query.all()
apikey = None
create = True
plain_key = None
if key_id:
apikey = ApiKey.query.filter(ApiKey.id == key_id).first()
create = False
if not apikey:
return render_template('errors/404.html'), 404
if request.method == 'GET':
return render_template('admin_edit_key.html',
key=apikey,
domains=domains,
accounts=accounts,
roles=roles,
create=create)
if request.method == 'POST':
fdata = request.form
description = fdata['description']
role = fdata.getlist('key_role')[0]
domain_list = fdata.getlist('key_multi_domain')
account_list = fdata.getlist('key_multi_account')
# Create new apikey
if create:
if role == "User":
domain_obj_list = Domain.query.filter(Domain.name.in_(domain_list)).all()
account_obj_list = Account.query.filter(Account.name.in_(account_list)).all()
else:
account_obj_list, domain_obj_list = [], []
apikey = ApiKey(desc=description,
role_name=role,
domains=domain_obj_list,
accounts=account_obj_list)
try:
apikey.create()
except Exception as e:
current_app.logger.error('Error: {0}'.format(e))
raise ApiKeyCreateFail(message='Api key create failed')
plain_key = apikey_plain_schema.dump([apikey])[0]["plain_key"]
plain_key = b64encode(plain_key.encode('utf-8')).decode('utf-8')
history_message = "Created API key {0}".format(apikey.id)
# Update existing apikey
else:
try:
if role != "User":
domain_list, account_list = [], []
apikey.update(role,description,domain_list, account_list)
history_message = "Updated API key {0}".format(apikey.id)
except Exception as e:
current_app.logger.error('Error: {0}'.format(e))
history = History(msg=history_message,
detail=str({
'key': apikey.id,
'role': apikey.role.name,
'description': apikey.description,
'domains': [domain.name for domain in apikey.domains],
'accounts': [a.name for a in apikey.accounts]
}),
created_by=current_user.username)
history.add()
return render_template('admin_edit_key.html',
key=apikey,
domains=domains,
accounts=accounts,
roles=roles,
create=create,
plain_key=plain_key)
@admin_bp.route('/manage-keys', methods=['GET', 'POST'])
@login_required
@operator_role_required
def manage_keys():
if request.method == 'GET':
try:
apikeys = ApiKey.query.all()
except Exception as e:
current_app.logger.error('Error: {0}'.format(e))
abort(500)
return render_template('admin_manage_keys.html',
keys=apikeys)
elif request.method == 'POST':
jdata = request.json
if jdata['action'] == 'delete_key':
apikey = ApiKey.query.get(jdata['data'])
try:
history_apikey_id = apikey.id
history_apikey_role = apikey.role.name
history_apikey_description = apikey.description
history_apikey_domains = [ domain.name for domain in apikey.domains]
apikey.delete()
except Exception as e:
current_app.logger.error('Error: {0}'.format(e))
current_app.logger.info('Delete API key {0}'.format(apikey.id))
history = History(msg='Delete API key {0}'.format(apikey.id),
detail=str({
'key': history_apikey_id,
'role': history_apikey_role,
'description': history_apikey_description,
'domains': history_apikey_domains
}),
created_by=current_user.username)
history.add()
return make_response(
jsonify({
'status': 'ok',
'msg': 'Key has been removed.'
}), 200)
@admin_bp.route('/manage-user', methods=['GET', 'POST'])
@login_required
@operator_role_required
def manage_user():
if request.method == 'GET':
roles = Role.query.all()
users = User.query.order_by(User.username).all()
return render_template('admin_manage_user.html',
users=users,
roles=roles)
if request.method == 'POST':
#
# post data should in format
# {'action': 'delete_user', 'data': 'username'}
#
try:
jdata = request.json
data = jdata['data']
if jdata['action'] == 'user_otp_disable':
user = User(username=data)
result = user.update_profile(enable_otp=False)
if result:
history = History(
msg='Two factor authentication disabled for user {0}'.
format(data),
created_by=current_user.username)
history.add()
return make_response(
jsonify({
'status':
'ok',
'msg':
'Two factor authentication has been disabled for user.'
}), 200)
else:
return make_response(
jsonify({
'status':
'error',
'msg':
'Cannot disable two factor authentication for user.'
}), 500)
elif jdata['action'] == 'delete_user':
user = User(username=data)
if user.username == current_user.username:
return make_response(
jsonify({
'status': 'error',
'msg': 'You cannot delete yourself.'
}), 400)
# Remove account associations first
user_accounts = Account.query.join(AccountUser).join(
User).filter(AccountUser.user_id == user.id,
AccountUser.account_id == Account.id).all()
for uc in user_accounts:
uc.revoke_privileges_by_id(user.id)
# Then delete the user
result = user.delete()
if result:
history = History(msg='Delete user {0}'.format(data),
created_by=current_user.username)
history.add()
return make_response(
jsonify({
'status': 'ok',
'msg': 'User has been removed.'
}), 200)
else:
return make_response(
jsonify({
'status': 'error',
'msg': 'Cannot remove user.'
}), 500)
elif jdata['action'] == 'revoke_user_privileges':
user = User(username=data)
result = user.revoke_privilege()
if result:
history = History(
msg='Revoke {0} user privileges'.format(data),
created_by=current_user.username)
history.add()
return make_response(
jsonify({
'status': 'ok',
'msg': 'Revoked user privileges.'
}), 200)
else:
return make_response(
jsonify({
'status': 'error',
'msg': 'Cannot revoke user privilege.'
}), 500)
elif jdata['action'] == 'update_user_role':
username = data['username']
role_name = data['role_name']
if username == current_user.username:
return make_response(
jsonify({
'status': 'error',
'msg': 'You cannot change you own roles.'
}), 400)
user = User.query.filter(User.username == username).first()
if not user:
return make_response(
jsonify({
'status': 'error',
'msg': 'User does not exist.'
}), 404)
if user.role.name == 'Administrator' and current_user.role.name != 'Administrator':
return make_response(
jsonify({
'status':
'error',
'msg':
'You do not have permission to change Administrator users role.'
}), 400)
if role_name == 'Administrator' and current_user.role.name != 'Administrator':
return make_response(
jsonify({
'status':
'error',
'msg':
'You do not have permission to promote a user to Administrator role.'
}), 400)
user = User(username=username)
result = user.set_role(role_name)
if result['status']:
history = History(
msg='Change user role of {0} to {1}'.format(
username, role_name),
created_by=current_user.username)
history.add()
return make_response(
jsonify({
'status': 'ok',
'msg': 'Changed user role successfully.'
}), 200)
else:
return make_response(
jsonify({
'status':
'error',
'msg':
'Cannot change user role. {0}'.format(
result['msg'])
}), 500)
else:
return make_response(
jsonify({
'status': 'error',
'msg': 'Action not supported.'
}), 400)
except Exception as e:
current_app.logger.error(
'Cannot update user. Error: {0}'.format(e))
current_app.logger.debug(traceback.format_exc())
return make_response(
jsonify({
'status':
'error',
'msg':
'There is something wrong, please contact Administrator.'
}), 400)
@admin_bp.route('/account/edit/<account_name>', methods=['GET', 'POST'])
@admin_bp.route('/account/edit', methods=['GET', 'POST'])
@login_required
@operator_role_required
def edit_account(account_name=None):
users = User.query.all()
if request.method == 'GET':
if account_name is None:
return render_template('admin_edit_account.html',
account_user_ids=[],
users=users,
create=1)
else:
account = Account.query.filter(
Account.name == account_name).first()
account_user_ids = account.get_user()
return render_template('admin_edit_account.html',
account=account,
account_user_ids=account_user_ids,
users=users,
create=0)
if request.method == 'POST':
fdata = request.form
new_user_list = request.form.getlist('account_multi_user')
# on POST, synthesize account and account_user_ids from form data
if not account_name:
account_name = fdata['accountname']
account = Account(name=account_name,
description=fdata['accountdescription'],
contact=fdata['accountcontact'],
mail=fdata['accountmail'])
account_user_ids = []
for username in new_user_list:
userid = User(username=username).get_user_info_by_username().id
account_user_ids.append(userid)
create = int(fdata['create'])
if create:
# account __init__ sanitizes and lowercases the name, so to manage expectations
# we let the user reenter the name until it's not empty and it's valid (ignoring the case)
if account.name == "" or account.name != account_name.lower():
return render_template('admin_edit_account.html',
account=account,
account_user_ids=account_user_ids,
users=users,
create=create,
invalid_accountname=True)
if Account.query.filter(Account.name == account.name).first():
return render_template('admin_edit_account.html',
account=account,
account_user_ids=account_user_ids,
users=users,
create=create,
duplicate_accountname=True)
result = account.create_account()
history = History(msg='Create account {0}'.format(account.name),
created_by=current_user.username)
else:
result = account.update_account()
history = History(msg='Update account {0}'.format(account.name),
created_by=current_user.username)
if result['status']:
account.grant_privileges(new_user_list)
history.add()
return redirect(url_for('admin.manage_account'))
return render_template('admin_edit_account.html',
account=account,
account_user_ids=account_user_ids,
users=users,
create=create,
error=result['msg'])
@admin_bp.route('/manage-account', methods=['GET', 'POST'])
@login_required
@operator_role_required
def manage_account():
if request.method == 'GET':
accounts = Account.query.order_by(Account.name).all()
for account in accounts:
account.user_num = AccountUser.query.filter(
AccountUser.account_id == account.id).count()
return render_template('admin_manage_account.html', accounts=accounts)
if request.method == 'POST':
#
# post data should in format
# {'action': 'delete_account', 'data': 'accountname'}
#
try:
jdata = request.json
data = jdata['data']
if jdata['action'] == 'delete_account':
account = Account.query.filter(Account.name == data).first()
if not account:
return make_response(
jsonify({
'status': 'error',
'msg': 'Account not found.'
}), 404)
# Remove account association from domains first
for domain in account.domains:
Domain(name=domain.name).assoc_account(None)
# Then delete the account
result = account.delete_account()
if result:
history = History(msg='Delete account {0}'.format(data),
created_by=current_user.username)
history.add()
return make_response(
jsonify({
'status': 'ok',
'msg': 'Account has been removed.'
}), 200)
else:
return make_response(
jsonify({
'status': 'error',
'msg': 'Cannot remove account.'
}), 500)
else:
return make_response(
jsonify({
'status': 'error',
'msg': 'Action not supported.'
}), 400)
except Exception as e:
current_app.logger.error(
'Cannot update account. Error: {0}'.format(e))
current_app.logger.debug(traceback.format_exc())
return make_response(
jsonify({
'status':
'error',
'msg':
'There is something wrong, please contact Administrator.'
}), 400)
class DetailedHistory():
def __init__(self, history, change_set):
self.history = history
self.detailed_msg = ""
self.change_set = change_set
if not history.detail:
self.detailed_msg = ""
return
if 'add_rrest' in history.detail:
detail_dict = json.loads(history.detail.replace("\'", ''))
else:
detail_dict = json.loads(history.detail.replace("'", '"'))
if 'domain_type' in detail_dict and 'account_id' in detail_dict: # this is a domain creation
self.detailed_msg = render_template_string("""
<table class="table table-bordered table-striped">
<tr><td>Domain type:</td><td>{{ domaintype }}</td></tr>
<tr><td>Account:</td><td>{{ account }}</td></tr>
</table>
""",
domaintype=detail_dict['domain_type'],
account=Account.get_name_by_id(self=None, account_id=detail_dict['account_id']) if detail_dict['account_id'] != "0" else "None")
elif 'authenticator' in detail_dict: # this is a user authentication
self.detailed_msg = render_template_string("""
<table class="table table-bordered table-striped" style="width:565px;">
<thead>
<tr>
<th colspan="3" style="background: rgba({{ background_rgba }});">
<p style="color:white;">User {{ username }} authentication {{ auth_result }}</p>
</th>
</tr>
</thead>
<tbody>
<tr>
<td>Authenticator Type:</td>
<td colspan="2">{{ authenticator }}</td>
</tr>
<tr>
<td>IP Address</td>
<td colspan="2">{{ ip_address }}</td>
</tr>
</tbody>
</table>
""",
background_rgba="68,157,68" if detail_dict['success'] == 1 else "201,48,44",
username=detail_dict['username'],
auth_result="success" if detail_dict['success'] == 1 else "failure",
authenticator=detail_dict['authenticator'],
ip_address=detail_dict['ip_address'])
elif 'add_rrests' in detail_dict: # this is a domain record change
# changes_set = []
self.detailed_msg = ""
# extract_changelogs_from_a_history_entry(changes_set, history, 0)
elif 'name' in detail_dict and 'template' in history.msg: # template creation / deletion
self.detailed_msg = render_template_string("""
<table class="table table-bordered table-striped">
<tr><td>Template name:</td><td>{{ template_name }}</td></tr>
<tr><td>Description:</td><td>{{ description }}</td></tr>
</table>
""",
template_name=DetailedHistory.get_key_val(detail_dict, "name"),
description=DetailedHistory.get_key_val(detail_dict, "description"))
elif 'Change domain' in history.msg and 'access control' in history.msg: # added or removed a user from a domain
users_with_access = DetailedHistory.get_key_val(detail_dict, "user_has_access")
self.detailed_msg = render_template_string("""
<table class="table table-bordered table-striped">
<tr><td>Users with access to this domain</td><td>{{ users_with_access }}</td></tr>
<tr><td>Number of users:</td><td>{{ users_with_access | length }}</td><tr>
</table>
""",
users_with_access=users_with_access)
elif 'Created API key' in history.msg or 'Updated API key' in history.msg:
self.detailed_msg = render_template_string("""
<table class="table table-bordered table-striped">
<tr><td>Key: </td><td>{{ keyname }}</td></tr>
<tr><td>Role:</td><td>{{ rolename }}</td></tr>
<tr><td>Description:</td><td>{{ description }}</td></tr>
<tr><td>Accessible domains with this API key:</td><td>{{ linked_domains }}</td></tr>
<tr><td>Accessible accounts with this API key:</td><td>{{ linked_accounts }}</td></tr>
</table>
""",
keyname=DetailedHistory.get_key_val(detail_dict, "key"),
rolename=DetailedHistory.get_key_val(detail_dict, "role"),
description=DetailedHistory.get_key_val(detail_dict, "description"),
linked_domains=DetailedHistory.get_key_val(detail_dict, "domains" if "domains" in detail_dict else "domain_acl"),
linked_accounts=DetailedHistory.get_key_val(detail_dict, "accounts"))
elif 'Delete API key' in history.msg:
self.detailed_msg = render_template_string("""
<table class="table table-bordered table-striped">
<tr><td>Key: </td><td>{{ keyname }}</td></tr>
<tr><td>Role:</td><td>{{ rolename }}</td></tr>
<tr><td>Description:</td><td>{{ description }}</td></tr>
<tr><td>Accessible domains with this API key:</td><td>{{ linked_domains }}</td></tr>
</table>
""",
keyname=DetailedHistory.get_key_val(detail_dict, "key"),
rolename=DetailedHistory.get_key_val(detail_dict, "role"),
description=DetailedHistory.get_key_val(detail_dict, "description"),
linked_domains=DetailedHistory.get_key_val(detail_dict, "domains"))
elif 'Update type for domain' in history.msg:
self.detailed_msg = render_template_string("""
<table class="table table-bordered table-striped">
<tr><td>Domain: </td><td>{{ domain }}</td></tr>
<tr><td>Domain type:</td><td>{{ domain_type }}</td></tr>
<tr><td>Masters:</td><td>{{ masters }}</td></tr>
</table>
""",
domain=DetailedHistory.get_key_val(detail_dict, "domain"),
domain_type=DetailedHistory.get_key_val(detail_dict, "type"),
masters=DetailedHistory.get_key_val(detail_dict, "masters"))
elif 'reverse' in history.msg:
self.detailed_msg = render_template_string("""
<table class="table table-bordered table-striped">
<tr><td>Domain Type: </td><td>{{ domain_type }}</td></tr>
<tr><td>Domain Master IPs:</td><td>{{ domain_master_ips }}</td></tr>
</table>
""",
domain_type=DetailedHistory.get_key_val(detail_dict, "domain_type"),
domain_master_ips=DetailedHistory.get_key_val(detail_dict, "domain_master_ips"))
# check for lower key as well for old databases
@staticmethod
def get_key_val(_dict, key):
return str(_dict.get(key, _dict.get(key.title(), '')))
# convert a list of History objects into DetailedHistory objects
def convert_histories(histories):
changes_set = dict()
detailedHistories = []
j = 0
for i in range(len(histories)):
if histories[i].detail and ('add_rrests' in histories[i].detail or 'del_rrests' in histories[i].detail):
extract_changelogs_from_a_history_entry(changes_set, histories[i], j)
if j in changes_set:
detailedHistories.append(DetailedHistory(histories[i], changes_set[j]))
else: # no changes were found
detailedHistories.append(DetailedHistory(histories[i], None))
j += 1
else:
detailedHistories.append(DetailedHistory(histories[i], None))
return detailedHistories
@admin_bp.route('/history', methods=['GET', 'POST'])
@login_required
@history_access_required
def history():
if request.method == 'POST':
if current_user.role.name != 'Administrator':
return make_response(
jsonify({
'status': 'error',
'msg': 'You do not have permission to remove history.'
}), 401)
h = History()
result = h.remove_all()
if result:
history = History(msg='Remove all histories',
created_by=current_user.username)
history.add()
return make_response(
jsonify({
'status': 'ok',
'msg': 'Changed user role successfully.'
}), 200)
else:
return make_response(
jsonify({
'status': 'error',
'msg': 'Can not remove histories.'
}), 500)
if request.method == 'GET':
doms = accounts = users = ""
if current_user.role.name in [ 'Administrator', 'Operator']:
all_domain_names = Domain.query.all()
all_account_names = Account.query.all()
all_user_names = User.query.all()
for d in all_domain_names:
doms += d.name + " "
for acc in all_account_names:
accounts += acc.name + " "
for usr in all_user_names:
users += usr.username + " "
else: # special autocomplete for users
all_domain_names = db.session.query(Domain) \
.outerjoin(DomainUser, Domain.id == DomainUser.domain_id) \
.outerjoin(Account, Domain.account_id == Account.id) \
.outerjoin(AccountUser, Account.id == AccountUser.account_id) \
.filter(
db.or_(
DomainUser.user_id == current_user.id,
AccountUser.user_id == current_user.id
)).all()
all_account_names = db.session.query(Account) \
.outerjoin(Domain, Domain.account_id == Account.id) \
.outerjoin(DomainUser, Domain.id == DomainUser.domain_id) \
.outerjoin(AccountUser, Account.id == AccountUser.account_id) \
.filter(
db.or_(
DomainUser.user_id == current_user.id,
AccountUser.user_id == current_user.id
)).all()
all_user_names = []
for a in all_account_names:
temp = db.session.query(User) \
.join(AccountUser, AccountUser.user_id == User.id) \
.outerjoin(Account, Account.id == AccountUser.account_id) \
.filter(
db.or_(
Account.id == a.id,
AccountUser.account_id == a.id
)
) \
.all()
for u in temp:
if u in all_user_names:
continue
all_user_names.append(u)
for d in all_domain_names:
doms += d.name + " "
for a in all_account_names:
accounts += a.name + " "
for u in all_user_names:
users += u.username + " "
return render_template('admin_history.html', all_domain_names=doms, all_account_names=accounts, all_usernames=users)
# local_offset is the offset of the utc to the local time
# offset must be int
# return the date converted and simplified
def from_utc_to_local(local_offset, timeframe):
offset = str(local_offset *(-1))
date_split = str(timeframe).split(".")[0]
date_converted = datetime.datetime.strptime(date_split, '%Y-%m-%d %H:%M:%S') + datetime.timedelta(minutes=int(offset))
return date_converted
@admin_bp.route('/history_table', methods=['GET', 'POST'])
@login_required
@history_access_required
def history_table(): # ajax call data
if request.method == 'POST':
if current_user.role.name != 'Administrator':
return make_response(
jsonify({
'status': 'error',
'msg': 'You do not have permission to remove history.'
}), 401)
h = History()
result = h.remove_all()
if result:
history = History(msg='Remove all histories',
created_by=current_user.username)
history.add()
return make_response(
jsonify({
'status': 'ok',
'msg': 'Changed user role successfully.'
}), 200)
else:
return make_response(
jsonify({
'status': 'error',
'msg': 'Can not remove histories.'
}), 500)
detailedHistories = []
lim = int(Setting().get('max_history_records')) # max num of records
if request.method == 'GET':
if current_user.role.name in [ 'Administrator', 'Operator' ]:
base_query = History.query
else:
# if the user isn't an administrator or operator,
# allow_user_view_history must be enabled to get here,
# so include history for the domains for the user
base_query = db.session.query(History) \
.join(Domain, History.domain_id == Domain.id) \
.outerjoin(DomainUser, Domain.id == DomainUser.domain_id) \
.outerjoin(Account, Domain.account_id == Account.id) \
.outerjoin(AccountUser, Account.id == AccountUser.account_id) \
.filter(
db.or_(
DomainUser.user_id == current_user.id,
AccountUser.user_id == current_user.id
))
domain_name = request.args.get('domain_name_filter') if request.args.get('domain_name_filter') != None \
and len(request.args.get('domain_name_filter')) != 0 else None
account_name = request.args.get('account_name_filter') if request.args.get('account_name_filter') != None \
and len(request.args.get('account_name_filter')) != 0 else None
user_name = request.args.get('auth_name_filter') if request.args.get('auth_name_filter') != None \
and len(request.args.get('auth_name_filter')) != 0 else None
min_date = request.args.get('min') if request.args.get('min') != None and len( request.args.get('min')) != 0 else None
if min_date != None: # get 1 day earlier, to check for timezone errors
min_date = str(datetime.datetime.strptime(min_date, '%Y-%m-%d') - datetime.timedelta(days=1))
max_date = request.args.get('max') if request.args.get('max') != None and len( request.args.get('max')) != 0 else None
if max_date != None: # get 1 day later, to check for timezone errors
max_date = str(datetime.datetime.strptime(max_date, '%Y-%m-%d') + datetime.timedelta(days=1))
tzoffset = request.args.get('tzoffset') if request.args.get('tzoffset') != None and len(request.args.get('tzoffset')) != 0 else None
changed_by = request.args.get('user_name_filter') if request.args.get('user_name_filter') != None \
and len(request.args.get('user_name_filter')) != 0 else None
"""
Auth methods: LOCAL, Github OAuth, Azure OAuth, SAML, OIDC OAuth, Google OAuth
"""
auth_methods = []
if (request.args.get('auth_local_only_checkbox') is None \
and request.args.get('auth_oauth_only_checkbox') is None \
and request.args.get('auth_saml_only_checkbox') is None and request.args.get('auth_all_checkbox') is None):
auth_methods = []
if request.args.get('auth_all_checkbox') == "on":
auth_methods.append("")
if request.args.get('auth_local_only_checkbox') == "on":
auth_methods.append("LOCAL")
if request.args.get('auth_oauth_only_checkbox') == "on":
auth_methods.append("OAuth")
if request.args.get('auth_saml_only_checkbox') == "on":
auth_methods.append("SAML")
if request.args.get('domain_changelog_only_checkbox') != None:
changelog_only = True if request.args.get('domain_changelog_only_checkbox') == "on" else False
else:
changelog_only = False
# users cannot search for authentication
if user_name != None and current_user.role.name not in [ 'Administrator', 'Operator']:
histories = []
elif domain_name != None:
if not changelog_only:
histories = base_query \
.filter(
db.and_(
db.or_(
History.msg.like("%domain "+ domain_name) if domain_name != "*" else History.msg.like("%domain%"),
History.msg.like("%domain "+ domain_name + " access control") if domain_name != "*" else History.msg.like("%domain%access control")
),
History.created_on <= max_date if max_date != None else True,
History.created_on >= min_date if min_date != None else True,
History.created_by == changed_by if changed_by != None else True
)
).order_by(History.created_on.desc()).limit(lim).all()
else:
# search for records changes only
histories = base_query \
.filter(
db.and_(
History.msg.like("Apply record changes to domain " + domain_name) if domain_name != "*" \
else History.msg.like("Apply record changes to domain%"),
History.created_on <= max_date if max_date != None else True,
History.created_on >= min_date if min_date != None else True,
History.created_by == changed_by if changed_by != None else True
)
).order_by(History.created_on.desc()) \
.limit(lim).all()
elif account_name != None:
if current_user.role.name in ['Administrator', 'Operator']:
histories = base_query \
.join(Domain, History.domain_id == Domain.id) \
.outerjoin(Account, Domain.account_id == Account.id) \
.filter(
db.and_(
Account.id == Domain.account_id,
account_name == Account.name if account_name != "*" else True,
History.created_on <= max_date if max_date != None else True,
History.created_on >= min_date if min_date != None else True,
History.created_by == changed_by if changed_by != None else True
)
).order_by(History.created_on.desc()) \
.limit(lim).all()
else:
histories = base_query \
.filter(
db.and_(
Account.id == Domain.account_id,
account_name == Account.name if account_name != "*" else True,
History.created_on <= max_date if max_date != None else True,
History.created_on >= min_date if min_date != None else True,
History.created_by == changed_by if changed_by != None else True
)
).order_by(History.created_on.desc()) \
.limit(lim).all()
elif user_name != None and current_user.role.name in [ 'Administrator', 'Operator']: # only admins can see the user login-logouts
histories = History.query \
.filter(
db.and_(
db.or_(
History.msg.like("User "+ user_name + " authentication%") if user_name != "*" and user_name != None else History.msg.like("%authentication%"),
History.msg.like("User "+ user_name + " was not authorized%") if user_name != "*" and user_name != None else History.msg.like("User%was not authorized%")
),
History.created_on <= max_date if max_date != None else True,
History.created_on >= min_date if min_date != None else True,
History.created_by == changed_by if changed_by != None else True
)
) \
.order_by(History.created_on.desc()).limit(lim).all()
temp = []
for h in histories:
for method in auth_methods:
if method in h.detail:
temp.append(h)
break
histories = temp
elif (changed_by != None or max_date != None) and current_user.role.name in [ 'Administrator', 'Operator'] : # select changed by and date filters only
histories = History.query \
.filter(
db.and_(
History.created_on <= max_date if max_date != None else True,
History.created_on >= min_date if min_date != None else True,
History.created_by == changed_by if changed_by != None else True
)
) \
.order_by(History.created_on.desc()).limit(lim).all()
elif (changed_by != None or max_date != None): # special filtering for user because one user does not have access to log-ins logs
histories = base_query \
.filter(
db.and_(
History.created_on <= max_date if max_date != None else True,
History.created_on >= min_date if min_date != None else True,
History.created_by == changed_by if changed_by != None else True
)
) \
.order_by(History.created_on.desc()).limit(lim).all()
elif max_date != None: # if changed by == null and only date is applied
histories = base_query.filter(
db.and_(
History.created_on <= max_date if max_date != None else True,
History.created_on >= min_date if min_date != None else True,
)
).order_by(History.created_on.desc()).limit(lim).all()
else: # default view
if current_user.role.name in [ 'Administrator', 'Operator']:
histories = History.query.order_by(History.created_on.desc()).limit(lim).all()
else:
histories = db.session.query(History) \
.join(Domain, History.domain_id == Domain.id) \
.outerjoin(DomainUser, Domain.id == DomainUser.domain_id) \
.outerjoin(Account, Domain.account_id == Account.id) \
.outerjoin(AccountUser, Account.id == AccountUser.account_id) \
.order_by(History.created_on.desc()) \
.filter(
db.or_(
DomainUser.user_id == current_user.id,
AccountUser.user_id == current_user.id
)).limit(lim).all()
detailedHistories = convert_histories(histories)
# Remove dates from previous or next day that were brought over
if tzoffset != None:
if min_date != None:
min_date_split = min_date.split()[0]
if max_date != None:
max_date_split = max_date.split()[0]
for i, history_rec in enumerate(detailedHistories):
local_date = str(from_utc_to_local(int(tzoffset), history_rec.history.created_on).date())
if (min_date != None and local_date == min_date_split) or (max_date != None and local_date == max_date_split):
detailedHistories[i] = None
# Remove elements previously flagged as None
detailedHistories = [h for h in detailedHistories if h is not None]
return render_template('admin_history_table.html', histories=detailedHistories, len_histories=len(detailedHistories), lim=lim)
@admin_bp.route('/setting/basic', methods=['GET'])
@login_required
@operator_role_required
def setting_basic():
if request.method == 'GET':
settings = [
'maintenance', 'fullscreen_layout', 'record_helper',
'login_ldap_first', 'default_record_table_size',
'default_domain_table_size', 'auto_ptr', 'record_quick_edit',
'pretty_ipv6_ptr', 'dnssec_admins_only',
'allow_user_create_domain', 'allow_user_remove_domain', 'allow_user_view_history', 'bg_domain_updates', 'site_name',
'session_timeout', 'warn_session_timeout', 'ttl_options',
'pdns_api_timeout', 'verify_ssl_connections', 'verify_user_email',
'delete_sso_accounts', 'otp_field_enabled', 'custom_css', 'enable_api_rr_history', 'max_history_records'
]
return render_template('admin_setting_basic.html', settings=settings)
@admin_bp.route('/setting/basic/<path:setting>/edit', methods=['POST'])
@login_required
@operator_role_required
def setting_basic_edit(setting):
jdata = request.json
new_value = jdata['value']
result = Setting().set(setting, new_value)
if (result):
return make_response(
jsonify({
'status': 'ok',
'msg': 'Toggled setting successfully.'
}), 200)
else:
return make_response(
jsonify({
'status': 'error',
'msg': 'Unable to toggle setting.'
}), 500)
@admin_bp.route('/setting/basic/<path:setting>/toggle', methods=['POST'])
@login_required
@operator_role_required
def setting_basic_toggle(setting):
result = Setting().toggle(setting)
if (result):
return make_response(
jsonify({
'status': 'ok',
'msg': 'Toggled setting successfully.'
}), 200)
else:
return make_response(
jsonify({
'status': 'error',
'msg': 'Unable to toggle setting.'
}), 500)
@admin_bp.route('/setting/pdns', methods=['GET', 'POST'])
@login_required
@admin_role_required
def setting_pdns():
if request.method == 'GET':
pdns_api_url = Setting().get('pdns_api_url')
pdns_api_key = Setting().get('pdns_api_key')
pdns_version = Setting().get('pdns_version')
return render_template('admin_setting_pdns.html',
pdns_api_url=pdns_api_url,
pdns_api_key=pdns_api_key,
pdns_version=pdns_version)
elif request.method == 'POST':
pdns_api_url = request.form.get('pdns_api_url')
pdns_api_key = request.form.get('pdns_api_key')
pdns_version = request.form.get('pdns_version')
Setting().set('pdns_api_url', pdns_api_url)
Setting().set('pdns_api_key', pdns_api_key)
Setting().set('pdns_version', pdns_version)
return render_template('admin_setting_pdns.html',
pdns_api_url=pdns_api_url,
pdns_api_key=pdns_api_key,
pdns_version=pdns_version)
@admin_bp.route('/setting/dns-records', methods=['GET', 'POST'])
@login_required
@operator_role_required
def setting_records():
if request.method == 'GET':
_fr = Setting().get('forward_records_allow_edit')
_rr = Setting().get('reverse_records_allow_edit')
f_records = literal_eval(_fr) if isinstance(_fr, str) else _fr
r_records = literal_eval(_rr) if isinstance(_rr, str) else _rr
return render_template('admin_setting_records.html',
f_records=f_records,
r_records=r_records)
elif request.method == 'POST':
fr = {}
rr = {}
records = Setting().defaults['forward_records_allow_edit']
for r in records:
fr[r] = True if request.form.get('fr_{0}'.format(
r.lower())) else False
rr[r] = True if request.form.get('rr_{0}'.format(
r.lower())) else False
Setting().set('forward_records_allow_edit', str(fr))
Setting().set('reverse_records_allow_edit', str(rr))
return redirect(url_for('admin.setting_records'))
def has_an_auth_method(local_db_enabled=None,
ldap_enabled=None,
google_oauth_enabled=None,
github_oauth_enabled=None,
oidc_oauth_enabled=None,
azure_oauth_enabled=None):
if local_db_enabled is None:
local_db_enabled = Setting().get('local_db_enabled')
if ldap_enabled is None:
ldap_enabled = Setting().get('ldap_enabled')
if google_oauth_enabled is None:
google_oauth_enabled = Setting().get('google_oauth_enabled')
if github_oauth_enabled is None:
github_oauth_enabled = Setting().get('github_oauth_enabled')
if oidc_oauth_enabled is None:
oidc_oauth_enabled = Setting().get('oidc_oauth_enabled')
if azure_oauth_enabled is None:
azure_oauth_enabled = Setting().get('azure_oauth_enabled')
return local_db_enabled or ldap_enabled or google_oauth_enabled or github_oauth_enabled or oidc_oauth_enabled or azure_oauth_enabled
@admin_bp.route('/setting/authentication', methods=['GET', 'POST'])
@login_required
@admin_role_required
def setting_authentication():
if request.method == 'GET':
return render_template('admin_setting_authentication.html')
elif request.method == 'POST':
conf_type = request.form.get('config_tab')
result = None
if conf_type == 'general':
local_db_enabled = True if request.form.get(
'local_db_enabled') else False
signup_enabled = True if request.form.get(
'signup_enabled', ) else False
if not has_an_auth_method(local_db_enabled=local_db_enabled):
result = {
'status':
False,
'msg':
'Must have at least one authentication method enabled.'
}
else:
Setting().set('local_db_enabled', local_db_enabled)
Setting().set('signup_enabled', signup_enabled)
result = {'status': True, 'msg': 'Saved successfully'}
elif conf_type == 'ldap':
ldap_enabled = True if request.form.get('ldap_enabled') else False
if not has_an_auth_method(ldap_enabled=ldap_enabled):
result = {
'status':
False,
'msg':
'Must have at least one authentication method enabled.'
}
else:
Setting().set('ldap_enabled', ldap_enabled)
Setting().set('ldap_type', request.form.get('ldap_type'))
Setting().set('ldap_uri', request.form.get('ldap_uri'))
Setting().set('ldap_base_dn', request.form.get('ldap_base_dn'))
Setting().set('ldap_admin_username',
request.form.get('ldap_admin_username'))
Setting().set('ldap_admin_password',
request.form.get('ldap_admin_password'))
Setting().set('ldap_filter_basic',
request.form.get('ldap_filter_basic'))
Setting().set('ldap_filter_group',
request.form.get('ldap_filter_group'))
Setting().set('ldap_filter_username',
request.form.get('ldap_filter_username'))
Setting().set('ldap_filter_groupname',
request.form.get('ldap_filter_groupname'))
Setting().set(
'ldap_sg_enabled', True
if request.form.get('ldap_sg_enabled') == 'ON' else False)
Setting().set('ldap_admin_group',
request.form.get('ldap_admin_group'))
Setting().set('ldap_operator_group',
request.form.get('ldap_operator_group'))
Setting().set('ldap_user_group',
request.form.get('ldap_user_group'))
Setting().set('ldap_domain', request.form.get('ldap_domain'))
Setting().set(
'autoprovisioning', True
if request.form.get('autoprovisioning') == 'ON' else False)
Setting().set('autoprovisioning_attribute',
request.form.get('autoprovisioning_attribute'))
if request.form.get('autoprovisioning')=='ON':
if validateURN(request.form.get('urn_value')):
Setting().set('urn_value',
request.form.get('urn_value'))
else:
return render_template('admin_setting_authentication.html',
error="Invalid urn")
else:
Setting().set('urn_value',
request.form.get('urn_value'))
Setting().set('purge', True
if request.form.get('purge') == 'ON' else False)
result = {'status': True, 'msg': 'Saved successfully'}
elif conf_type == 'google':
google_oauth_enabled = True if request.form.get(
'google_oauth_enabled') else False
if not has_an_auth_method(google_oauth_enabled=google_oauth_enabled):
result = {
'status':
False,
'msg':
'Must have at least one authentication method enabled.'
}
else:
Setting().set('google_oauth_enabled', google_oauth_enabled)
Setting().set('google_oauth_client_id',
request.form.get('google_oauth_client_id'))
Setting().set('google_oauth_client_secret',
request.form.get('google_oauth_client_secret'))
Setting().set('google_token_url',
request.form.get('google_token_url'))
Setting().set('google_oauth_scope',
request.form.get('google_oauth_scope'))
Setting().set('google_authorize_url',
request.form.get('google_authorize_url'))
Setting().set('google_base_url',
request.form.get('google_base_url'))
result = {
'status': True,
'msg':
'Saved successfully. Please reload PDA to take effect.'
}
elif conf_type == 'github':
github_oauth_enabled = True if request.form.get(
'github_oauth_enabled') else False
if not has_an_auth_method(github_oauth_enabled=github_oauth_enabled):
result = {
'status':
False,
'msg':
'Must have at least one authentication method enabled.'
}
else:
Setting().set('github_oauth_enabled', github_oauth_enabled)
Setting().set('github_oauth_key',
request.form.get('github_oauth_key'))
Setting().set('github_oauth_secret',
request.form.get('github_oauth_secret'))
Setting().set('github_oauth_scope',
request.form.get('github_oauth_scope'))
Setting().set('github_oauth_api_url',
request.form.get('github_oauth_api_url'))
Setting().set('github_oauth_token_url',
request.form.get('github_oauth_token_url'))
Setting().set('github_oauth_authorize_url',
request.form.get('github_oauth_authorize_url'))
result = {
'status': True,
'msg':
'Saved successfully. Please reload PDA to take effect.'
}
elif conf_type == 'azure':
azure_oauth_enabled = True if request.form.get(
'azure_oauth_enabled') else False
if not has_an_auth_method(azure_oauth_enabled=azure_oauth_enabled):
result = {
'status':
False,
'msg':
'Must have at least one authentication method enabled.'
}
else:
Setting().set('azure_oauth_enabled', azure_oauth_enabled)
Setting().set('azure_oauth_key',
request.form.get('azure_oauth_key'))
Setting().set('azure_oauth_secret',
request.form.get('azure_oauth_secret'))
Setting().set('azure_oauth_scope',
request.form.get('azure_oauth_scope'))
Setting().set('azure_oauth_api_url',
request.form.get('azure_oauth_api_url'))
Setting().set('azure_oauth_token_url',
request.form.get('azure_oauth_token_url'))
Setting().set('azure_oauth_authorize_url',
request.form.get('azure_oauth_authorize_url'))
Setting().set(
'azure_sg_enabled', True
if request.form.get('azure_sg_enabled') == 'ON' else False)
Setting().set('azure_admin_group',
request.form.get('azure_admin_group'))
Setting().set('azure_operator_group',
request.form.get('azure_operator_group'))
Setting().set('azure_user_group',
request.form.get('azure_user_group'))
Setting().set(
'azure_group_accounts_enabled', True
if request.form.get('azure_group_accounts_enabled') == 'ON' else False)
Setting().set('azure_group_accounts_name',
request.form.get('azure_group_accounts_name'))
Setting().set('azure_group_accounts_name_re',
request.form.get('azure_group_accounts_name_re'))
Setting().set('azure_group_accounts_description',
request.form.get('azure_group_accounts_description'))
Setting().set('azure_group_accounts_description_re',
request.form.get('azure_group_accounts_description_re'))
result = {
'status': True,
'msg':
'Saved successfully. Please reload PDA to take effect.'
}
elif conf_type == 'oidc':
oidc_oauth_enabled = True if request.form.get(
'oidc_oauth_enabled') else False
if not has_an_auth_method(oidc_oauth_enabled=oidc_oauth_enabled):
result = {
'status':
False,
'msg':
'Must have at least one authentication method enabled.'
}
else:
Setting().set(
'oidc_oauth_enabled',
True if request.form.get('oidc_oauth_enabled') else False)
Setting().set('oidc_oauth_key',
request.form.get('oidc_oauth_key'))
Setting().set('oidc_oauth_secret',
request.form.get('oidc_oauth_secret'))
Setting().set('oidc_oauth_scope',
request.form.get('oidc_oauth_scope'))
Setting().set('oidc_oauth_api_url',
request.form.get('oidc_oauth_api_url'))
Setting().set('oidc_oauth_token_url',
request.form.get('oidc_oauth_token_url'))
Setting().set('oidc_oauth_authorize_url',
request.form.get('oidc_oauth_authorize_url'))
Setting().set('oidc_oauth_logout_url',
request.form.get('oidc_oauth_logout_url'))
Setting().set('oidc_oauth_username',
request.form.get('oidc_oauth_username'))
Setting().set('oidc_oauth_firstname',
request.form.get('oidc_oauth_firstname'))
Setting().set('oidc_oauth_last_name',
request.form.get('oidc_oauth_last_name'))
Setting().set('oidc_oauth_email',
request.form.get('oidc_oauth_email'))
Setting().set('oidc_oauth_account_name_property',
request.form.get('oidc_oauth_account_name_property'))
Setting().set('oidc_oauth_account_description_property',
request.form.get('oidc_oauth_account_description_property'))
result = {
'status': True,
'msg':
'Saved successfully. Please reload PDA to take effect.'
}
else:
return abort(400)
return render_template('admin_setting_authentication.html',
result=result)
@admin_bp.route('/templates', methods=['GET', 'POST'])
@admin_bp.route('/templates/list', methods=['GET', 'POST'])
@login_required
@operator_role_required
def templates():
templates = DomainTemplate.query.all()
return render_template('template.html', templates=templates)
@admin_bp.route('/template/create', methods=['GET', 'POST'])
@login_required
@operator_role_required
def create_template():
if request.method == 'GET':
return render_template('template_add.html')
if request.method == 'POST':
try:
name = request.form.getlist('name')[0]
description = request.form.getlist('description')[0]
if ' ' in name or not name or not type:
flash("Please correct your input", 'error')
return redirect(url_for('admin.create_template'))
if DomainTemplate.query.filter(
DomainTemplate.name == name).first():
flash(
"A template with the name {0} already exists!".format(
name), 'error')
return redirect(url_for('admin.create_template'))
t = DomainTemplate(name=name, description=description)
result = t.create()
if result['status'] == 'ok':
history = History(msg='Add domain template {0}'.format(name),
detail=str({
'name': name,
'description': description
}),
created_by=current_user.username)
history.add()
return redirect(url_for('admin.templates'))
else:
flash(result['msg'], 'error')
return redirect(url_for('admin.create_template'))
except Exception as e:
current_app.logger.error(
'Cannot create domain template. Error: {0}'.format(e))
current_app.logger.debug(traceback.format_exc())
abort(500)
@admin_bp.route('/template/create-from-zone', methods=['POST'])
@login_required
@operator_role_required
def create_template_from_zone():
try:
jdata = request.json
name = jdata['name']
description = jdata['description']
domain_name = jdata['domain']
if ' ' in name or not name or not type:
return make_response(
jsonify({
'status': 'error',
'msg': 'Please correct template name'
}), 400)
if DomainTemplate.query.filter(DomainTemplate.name == name).first():
return make_response(
jsonify({
'status':
'error',
'msg':
'A template with the name {0} already exists!'.format(name)
}), 409)
t = DomainTemplate(name=name, description=description)
result = t.create()
if result['status'] == 'ok':
history = History(msg='Add domain template {0}'.format(name),
detail=str({
'name': name,
'description': description
}),
created_by=current_user.username)
history.add()
# After creating the domain in Domain Template in the,
# local DB. We add records into it Record Template.
records = []
domain = Domain.query.filter(Domain.name == domain_name).first()
if domain:
# Query zone's rrsets from PowerDNS API
rrsets = Record().get_rrsets(domain.name)
if rrsets:
for r in rrsets:
name = '@' if r['name'] == domain_name + '.' else r[
'name'].replace('.{}.'.format(domain_name), '')
for record in r['records']:
t_record = DomainTemplateRecord(
name=name,
type=r['type'],
status=False if record['disabled'] else True,
ttl=r['ttl'],
data=record['content'])
records.append(t_record)
result = t.replace_records(records)
if result['status'] == 'ok':
return make_response(
jsonify({
'status': 'ok',
'msg': result['msg']
}), 200)
else:
# Revert the domain template (remove it)
# ff we cannot add records.
t.delete_template()
return make_response(
jsonify({
'status': 'error',
'msg': result['msg']
}), 500)
else:
return make_response(
jsonify({
'status': 'error',
'msg': result['msg']
}), 500)
except Exception as e:
current_app.logger.error(
'Cannot create template from zone. Error: {0}'.format(e))
current_app.logger.debug(traceback.format_exc())
return make_response(
jsonify({
'status': 'error',
'msg': 'Error when applying new changes'
}), 500)
@admin_bp.route('/template/<path:template>/edit', methods=['GET'])
@login_required
@operator_role_required
def edit_template(template):
try:
t = DomainTemplate.query.filter(
DomainTemplate.name == template).first()
records_allow_to_edit = Setting().get_records_allow_to_edit()
quick_edit = Setting().get('record_quick_edit')
ttl_options = Setting().get_ttl_options()
if t is not None:
records = []
for jr in t.records:
if jr.type in records_allow_to_edit:
record = DomainTemplateRecord(
name=jr.name,
type=jr.type,
status='Active' if jr.status else 'Disabled',
ttl=jr.ttl,
data=jr.data,
comment=jr.comment if jr.comment else '')
records.append(record)
return render_template('template_edit.html',
template=t.name,
records=records,
editable_records=records_allow_to_edit,
quick_edit=quick_edit,
ttl_options=ttl_options)
except Exception as e:
current_app.logger.error(
'Cannot open domain template page. DETAIL: {0}'.format(e))
current_app.logger.debug(traceback.format_exc())
abort(500)
return redirect(url_for('admin.templates'))
@admin_bp.route('/template/<path:template>/apply',
methods=['POST'],
strict_slashes=False)
@login_required
def apply_records(template):
try:
jdata = request.json
records = []
for j in jdata['records']:
name = '@' if j['record_name'] in ['@', ''] else j['record_name']
type = j['record_type']
data = j['record_data']
comment = j['record_comment']
status = 0 if j['record_status'] == 'Disabled' else 1
ttl = int(j['record_ttl']) if j['record_ttl'] else 3600
dtr = DomainTemplateRecord(name=name,
type=type,
data=data,
comment=comment,
status=status,
ttl=ttl)
records.append(dtr)
t = DomainTemplate.query.filter(
DomainTemplate.name == template).first()
result = t.replace_records(records)
if result['status'] == 'ok':
jdata.pop('_csrf_token',
None) # don't store csrf token in the history.
history = History(
msg='Apply domain template record changes to domain template {0}'
.format(template),
detail=str(json.dumps(jdata)),
created_by=current_user.username)
history.add()
return make_response(jsonify(result), 200)
else:
return make_response(jsonify(result), 400)
except Exception as e:
current_app.logger.error(
'Cannot apply record changes to the template. Error: {0}'.format(
e))
current_app.logger.debug(traceback.format_exc())
return make_response(
jsonify({
'status': 'error',
'msg': 'Error when applying new changes'
}), 500)
@admin_bp.route('/template/<path:template>/delete', methods=['POST'])
@login_required
@operator_role_required
def delete_template(template):
try:
t = DomainTemplate.query.filter(
DomainTemplate.name == template).first()
if t is not None:
result = t.delete_template()
if result['status'] == 'ok':
history = History(
msg='Deleted domain template {0}'.format(template),
detail=str({'name': template}),
created_by=current_user.username)
history.add()
return redirect(url_for('admin.templates'))
else:
flash(result['msg'], 'error')
return redirect(url_for('admin.templates'))
except Exception as e:
current_app.logger.error(
'Cannot delete template. Error: {0}'.format(e))
current_app.logger.debug(traceback.format_exc())
abort(500)
return redirect(url_for('admin.templates'))
@admin_bp.route('/global-search', methods=['GET'])
@login_required
@operator_role_required
def global_search():
if request.method == 'GET':
domains = []
records = []
comments = []
query = request.args.get('q')
if query:
server = Server(server_id='localhost')
results = server.global_search(object_type='all', query=query)
# Format the search result
for result in results:
if result['object_type'] == 'zone':
# Remove the dot at the end of string
result['name'] = result['name'][:-1]
domains.append(result)
elif result['object_type'] == 'record':
# Remove the dot at the end of string
result['name'] = result['name'][:-1]
result['zone_id'] = result['zone_id'][:-1]
records.append(result)
elif result['object_type'] == 'comment':
# Get the actual record name, exclude the domain part
result['name'] = result['name'].replace(result['zone_id'], '')
if result['name']:
result['name'] = result['name'][:-1]
else:
result['name'] = '@'
# Remove the dot at the end of string
result['zone_id'] = result['zone_id'][:-1]
comments.append(result)
else:
pass
return render_template('admin_global_search.html', domains=domains, records=records, comments=comments)
def validateURN(value):
NID_PATTERN = re.compile(r'^[0-9a-z][0-9a-z-]{1,31}$', flags=re.IGNORECASE)
NSS_PCHAR = '[a-z0-9-._~]|%[a-f0-9]{2}|[!$&\'()*+,;=]|:|@'
NSS_PATTERN = re.compile(fr'^({NSS_PCHAR})({NSS_PCHAR}|/|\?)*$', re.IGNORECASE)
prefix=value.split(':')
if (len(prefix)<3):
current_app.logger.warning( "Too small urn prefix" )
return False
urn=prefix[0]
nid=prefix[1]
nss=value.replace(urn+":"+nid+":", "")
if not urn.lower()=="urn":
current_app.logger.warning( urn + ' contains invalid characters ' )
return False
if not re.match(NID_PATTERN, nid.lower()):
current_app.logger.warning( nid + ' contains invalid characters ' )
return False
if not re.match(NSS_PATTERN, nss):
current_app.logger.warning( nss + ' contains invalid characters ' )
return False
return True
|
ngoduykhanh/PowerDNS-Admin
|
powerdnsadmin/routes/admin.py
|
Python
|
mit
| 82,003
|
import copy
from supriya.realtime.ControlInterface import ControlInterface
class GroupInterface(ControlInterface):
"""
Interface to group controls.
::
>>> server = supriya.Server.default().boot()
>>> group = supriya.Group().allocate()
>>> group.extend([
... supriya.Synth(synthdef=supriya.assets.synthdefs.test),
... supriya.Synth(synthdef=supriya.assets.synthdefs.default),
... supriya.Synth(synthdef=supriya.assets.synthdefs.default),
... ])
::
>>> control = group.controls['amplitude']
::
>>> group.controls['frequency'] = 777
"""
### CLASS VARIABLES ###
__slots__ = ("_group_controls",)
### INITIALIZER ###
def __init__(self, client=None):
self._synth_controls = {}
self._group_controls = {}
self._client = client
### SPECIAL METHODS ###
def __contains__(self, item):
return item in self._synth_controls
def __getitem__(self, item):
return self._group_controls[item]
def __iter__(self):
return iter(sorted(self._group_controls))
def __len__(self):
return len(self._group_controls)
def __repr__(self):
class_name = type(self).__name__
return "<{}: {!r}>".format(class_name, self.client)
def __setitem__(self, items, values):
import supriya.realtime
if not isinstance(items, tuple):
items = (items,)
assert all(_ in self._synth_controls for _ in items)
if not isinstance(values, tuple):
values = (values,)
assert len(items) == len(values)
settings = dict(zip(items, values))
for key, value in settings.items():
for synth in self._synth_controls.get(key, ()):
control = synth.controls[key]
if isinstance(value, supriya.realtime.Bus):
control._map_to_bus(value)
elif value is None:
control._unmap()
else:
control._set_to_number(value)
requests = self._set(**settings)
supriya.commands.RequestBundle(contents=requests).communicate(
server=self.client.server, sync=True
)
### PUBLIC METHODS ###
def add_controls(self, control_interface_dict):
import supriya.realtime
for control_name in control_interface_dict:
if control_name not in self._synth_controls:
self._synth_controls[control_name] = copy.copy(
control_interface_dict[control_name]
)
proxy = supriya.realtime.GroupControl(client=self, name=control_name)
self._group_controls[control_name] = proxy
else:
self._synth_controls[control_name].update(
control_interface_dict[control_name]
)
def as_dict(self):
result = {}
for control_name, node_set in self._synth_controls.items():
result[control_name] = copy.copy(node_set)
return result
def remove_controls(self, control_interface_dict):
for control_name in control_interface_dict:
if control_name not in self._synth_controls:
continue
current_nodes = self._synth_controls[control_name]
nodes_to_remove = control_interface_dict[control_name]
current_nodes.difference_update(nodes_to_remove)
if not current_nodes:
del self._synth_controls[control_name]
del self._group_controls[control_name]
def reset(self):
self._synth_controls.clear()
|
Pulgama/supriya
|
supriya/realtime/GroupInterface.py
|
Python
|
mit
| 3,704
|
import zmq
import argparse
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-s', '--sub', help='django subscription socket', type=str, required=True)
parser.add_argument('-p', '--pub', help='tornado publication socket', type=str, required=True)
args = parser.parse_args()
ctx = zmq.Context()
django_sub = ctx.socket(zmq.SUB)
django_sub.bind(args.sub)
django_sub.setsockopt(zmq.SUBSCRIBE, "")
tornado_pub = ctx.socket(zmq.PUB)
tornado_pub.bind(args.pub)
try:
dev = zmq.device(zmq.FORWARDER, django_sub, tornado_pub)
except KeyboardInterrupt:
pass
if __name__ == '__main__':
main()
|
mike-grayhat/djazator
|
src/djazator/mq.py
|
Python
|
mit
| 674
|
# This relies on each of the submodules having an __all__ variable.
from .core import *
from .constructors import *
__all__ = (['__version__']
+ core.__all__
+ constructors.__all__
)
__version__ = '0.3.0'
|
Evgenus/metaconfig
|
metaconfig/__init__.py
|
Python
|
mit
| 220
|
import pandas
import numpy as np
from os import listdir
from os.path import isfile,join
from math import isnan
path = "../webscraper/course_data/csv/"
filenames = [f for f in listdir(path) if isfile(join(path,f))]
buildings = {}
bNames = ["PCS","FAIR","EHS","ING","ALOF","ALUM","UP","HARR","BH",
"JUB","PH","KOM","VET","AMG","SFA","WMB","JH","TODD","WPS",
"DSB","KUC","FH","BDA","CKNB","LRC","SCI","LIB","BAS","SAG",
"HMA","TCM","HOB","TLC","MB","BRAGG","COE","VA","HC","GC","HONR"]
# initialize matrix
for building in bNames:
if building == "LIB":
n = 350
buildings[building] = [[n for x in range(5)] for y in range(24)]
buildings[building] = [[0 for x in range(5)] for y in range(24)]
for f in filenames:
df = pandas.read_csv(path+f)
n_rows = df.shape[0]
n_cols = df.shape[1]
for row in range(2,n_rows):
room = df.iloc[row,18].split()[0]
if room not in buildings:
buildings[room] = [[0 for x in range(5)] for y in range(24)]
if room != "TBA" and df.iloc[row,16].split()[0] != "RODP":
time = [0,0]
tmpTime = df.iloc[row,9]
if tmpTime.split()[0] == "TBA":
continue
if tmpTime[7:9] == "pm":
time[0] = (int(tmpTime[0:2]) + 12)
else:
time[0] = (int(tmpTime[0:2]))
if tmpTime[-2]+tmpTime[-1] == "pm":
time[1] = (int(tmpTime[-8:-6]) + 12)
else:
time[1] = (int(tmpTime[-8:-6]))
# time[0] is start time, time[1] is end time
days = df.iloc[row,8]
for day in range(len(days)):
if days[day] == 'M':
dayIndex = 0
if days[day] == 'T':
dayIndex = 1
if days[day] == 'W':
dayIndex = 2
if days[day] == 'R':
dayIndex = 3
if days[day] == 'F':
dayIndex = 4
if not isnan(float(df.iloc[row,11])):
for hour in range(time[0]-1,time[1]):
buildings[room][hour][dayIndex] += int(df.iloc[row,11])
if not isnan(float(df.iloc[row,14])):
for hour in range(time[0]-1,time[1]):
buildings[room][hour][dayIndex] += int(df.iloc[row,14])
TotalStudentsInBuildings = []
total = 0
m = 0
for hour in range(24):
for day in range(5):
for room in buildings:
if buildings[room][hour][day] > m:
m = buildings[room][hour][day]
total += buildings[room][hour][day]
TotalStudentsInBuildings.append(total)
total = 0
#print(TotalStudentsInBuildings.index(max(TotalStudentsInBuildings)))
#print(sum(TotalStudentsInBuildings))
TotalStudentsInBuildings = []
total = 0
print(m)
|
sremedios/ParkMT
|
sam_test/stats.py
|
Python
|
mit
| 2,947
|
from .temperature_sensitivity import *
|
philngo/ee-meter
|
eemeter/models/__init__.py
|
Python
|
mit
| 39
|
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
import ConfigParser
LOGIN_FORM_SELECTOR = '.inline_login_form'
ERROR_MSG_SELECTOR = '.input_validation_error_text[style*="display: block"]'
CONTENT_PAGE_ITEM_SELECTOR = '.UserContentList .pagedlist_item'
QUORA_TITLE = 'Quora - The best answer to any question'
HOME_TITLE = 'Quora - Home'
CONTENT_TILE = 'Your Content - Quora'
CONTENT_URL = 'https://www.quora.com/content?content_types=answers'
PROFILE_IMG_SELECTOR = '.nav_item_link .expanded .profile_photo_img'
# driver = webdriver.PhantomJS()
driver = webdriver.Firefox()
config = ConfigParser.ConfigParser()
config.readfp(open(r'config.pcgf'))
username = config.get('credentials', 'username')
userpassword = config.get('credentials', 'password')
driver.set_window_size(1120, 550)
print 'loading quora now'
driver.get("https://quora.com/")
print 'quora loaded'
print 'We have to login with email and password'
email_input = driver.find_element_by_css_selector(
LOGIN_FORM_SELECTOR + ' input[type=text]')
password_input = driver.find_element_by_css_selector(
LOGIN_FORM_SELECTOR + ' input[type=password]')
# assert email_input.is_displayed() and email_input.is_enabled()
# assert password_input.is_displayed() and password_input.is_enabled()
print 'input login details'
email_input.clear()
password_input.clear()
email_input.send_keys(username)
password_input.send_keys(userpassword + Keys.RETURN)
# driver.find_element_by_class_name('submit_button').click()
# hello = driver.find_element_by_class_name('input_validation_error_text')
# print hello
print 'done'
driver.quit()
|
Alafazam/simple_projects
|
python/quora.py
|
Python
|
mit
| 1,739
|
# -*- coding: utf-8 -*-
"""
Created on Mon Aug 31 10:00:32 2015
@author: mje
"""
import mne
from mne.minimum_norm import (make_inverse_operator, apply_inverse,
write_inverse_operator)
import socket
import numpy as np
import matplotlib.pyplot as plt
# Setup paths and prepare raw data
hostname = socket.gethostname()
if hostname == "Wintermute":
data_path = "/home/mje/mnt/caa/scratch/"
n_jobs = 1
else:
data_path = "/projects/MINDLAB2015_MEG-CorticalAlphaAttention/scratch/"
n_jobs = 1
subjects_dir = data_path + "fs_subjects_dir/"
fname_fwd = data_path + '0001-fwd.fif'
fname_cov = data_path + '0001-cov.fif'
fname_evoked = data_path + "0001_p_03_filter_ds_ica-mc_raw_tsss-ave.fif"
snr = 1.0
lambda2 = 1.0 / snr ** 2
# Load data
evoked = mne.read_evokeds(fname_evoked, condition=0, baseline=(None, 0))
forward_meeg = mne.read_forward_solution(fname_fwd, surf_ori=True)
noise_cov = mne.read_cov(fname_cov)
# Restrict forward solution as necessary for MEG
forward_meg = mne.pick_types_forward(forward_meeg, meg=True, eeg=False)
# Alternatively, you can just load a forward solution that is restricted
# make an M/EEG, MEG-only, and EEG-only inverse operators
inverse_operator_meg = make_inverse_operator(evoked.info, forward_meg,
noise_cov,
loose=0.2, depth=0.8)
write_inverse_operator('0001-meg-oct-6-inv.fif',
inverse_operator_meg)
|
MadsJensen/malthe_alpha_project
|
make_inverse_operator.py
|
Python
|
mit
| 1,501
|
import flask_login
from wtforms import form, fields, validators
from werkzeug.security import check_password_hash, generate_password_hash
from models import User
from keepmydevices import app, db
class LoginForm(form.Form):
login = fields.StringField(validators=[validators.required()])
password = fields.PasswordField(validators=[validators.required()])
def validate_login(self, field):
user = self.get_user()
if user is None:
raise validators.ValidationError('Invalid user')
# we're comparing the plaintext pw with the the hash from the db
if not check_password_hash(user.password, self.password.data):
# to compare plain text passwords use
# if user.password != self.password.data:
raise validators.ValidationError('Invalid password')
def get_user(self):
return db.session.query(User).filter_by(login=self.login.data).first()
class RegistrationForm(form.Form):
login = fields.StringField(validators=[validators.required()])
email = fields.StringField()
password = fields.PasswordField(validators=[validators.required()])
def validate_login(self, field):
if db.session.query(User).filter_by(login=self.login.data).count() > 0:
raise validators.ValidationError('Duplicate username')
def init_login():
login_manager = flask_login.LoginManager()
login_manager.init_app(app)
## Add admin
# admin = User.query.filter_by(login='admin').first()
# if not admin:
# admin = User(login="admin", password=generate_password_hash("admin"))
# db.session.add(admin)
# db.session.commit()
# Required by Flask-Login
@login_manager.user_loader
def load_user(user_id):
return db.session.query(User).get(user_id)
|
MiloJiang/KeepMyDevices
|
Server/keepmydevices/login.py
|
Python
|
mit
| 1,802
|
import os
import re
import jinja2
import webapp2
# Set useful fields
root_dir = os.path.dirname(__file__)
template_dir = os.path.join(root_dir, 'templates')
jinja_environment = jinja2.Environment(autoescape=True,
loader=jinja2.FileSystemLoader(template_dir))
app = webapp2.WSGIApplication([
(r'/', 'handlers.home.HomeHandler'),
(r'/signup', 'handlers.signup.SignUpHandler'),
(r'/login', 'handlers.login.LoginHandler'),
(r'/logout', 'handlers.logout.LogoutHandler'),
], debug=True)
|
mattsp1290/rawr
|
routes.py
|
Python
|
mit
| 517
|
import socket
from os import unlink, devnull
import logbook
from logbook.queues import ZeroMQSubscriber
from tests.utils.launcher import Launcher
from tests.utils.setup import Setup
from tests.utils.loop import BooleanLoop, CounterLoop
from tests.utils.driver import LocalStorageDriver
from tests.utils.benchmark import Benchmark, BenchmarkData
from tests.utils.timer import Timer
from tests.utils.units import MB
SMALL = 1 * MB
MEDIUM = 10 * MB
BIG = 100 * MB
FORMAT_STRING = (
u'[{record.time:%H:%M:%S}] '
u'{record.level_name}: {record.channel}: {record.message}'
)
# TODO: this function should raise an exception if something is wrong
def setup_debug(benchmark):
benchmark.tmpsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
benchmark.tmpsock.bind(('localhost', 0))
benchmark.log_uri = 'tcp://{}:{}'.format(*benchmark.tmpsock.getsockname())
benchmark.tmpsock.close()
benchmark.level = logbook.DEBUG if benchmark.verbose else logbook.INFO
benchmark.log_setup = logbook.NestedSetup([
logbook.NullHandler(),
logbook.StderrHandler(
level=benchmark.level, format_string=FORMAT_STRING
),
logbook.FileHandler(devnull),
logbook.Processor(benchmark.launcher.process_record),
])
benchmark.subscriber = ZeroMQSubscriber(benchmark.log_uri, multi=True)
benchmark.subscriber.dispatch_in_background(setup=benchmark.log_setup)
benchmark.launcher(benchmark.log_uri)
def setup_config(benchmark, num):
benchmark.json_file = '{}.json'.format(benchmark.name.replace(' ', '_'))
benchmark.reps = []
for i in range(num):
name = 'rep{}'.format(i + 1)
benchmark.reps.append(LocalStorageDriver(name))
setup = Setup()
for rep in benchmark.reps:
setup.add(rep)
setup.save(benchmark.json_file)
def launcher(benchmark, num):
if num < 1:
benchmark.log.error("You should launch at least 1 driver")
raise
benchmark.launcher = None
setup_config(benchmark, num)
loop = CounterLoop(num + 1)
benchmark.launcher = Launcher(
setup=benchmark.json_file
)
benchmark.launcher.on_referee_started(loop.check)
for i in range(num):
benchmark.launcher.on_driver_started(
loop.check,
driver='rep{}'.format(i + 1)
)
setup_debug(benchmark)
loop.run(timeout=5)
class BenchmarkSimpleCopy(Benchmark):
def launch_onitu(self):
launcher(self, 2)
def stop_onitu(self):
self.launcher.kill()
unlink(self.json_file)
for rep in self.reps:
rep.close()
def setup(self):
self.launch_onitu()
def teardown(self):
self.stop_onitu()
def copy_file(self, filename, size, timeout=10):
self.launcher.unset_all_events()
loop = BooleanLoop()
self.launcher.on_transfer_ended(
loop.stop, d_to='rep2', filename=filename
)
self.reps[0].generate(filename, size)
with Timer() as t:
loop.run(timeout=timeout)
assert (
self.reps[0].checksum(filename) == self.reps[1].checksum(filename)
)
return t.msecs
def test_small(self):
total = BenchmarkData('test_small', 'Copy 1000 times a 1M file')
for i in range(1000):
try:
t = self.copy_file('small{}'.format(i), SMALL)
total.add_result(t)
except BaseException as e:
self.log.warn('Error in test_small')
self.log.warn(e)
return total
def test_medium(self):
total = BenchmarkData('test_medium', 'Copy 100 times a 10M file')
for i in range(100):
try:
t = self.copy_file('medium{}'.format(i), MEDIUM)
total.add_result(t)
except BaseException as e:
self.log.warn('Error in test_medium')
self.log.warn(e)
return total
def test_big(self):
total = BenchmarkData('test_big', 'Copy 10 times a 100M file')
for i in range(10):
try:
t = self.copy_file('big{}'.format(i), BIG)
total.add_result(t)
except BaseException as e:
self.log.warn('Error in test_big')
self.log.warn(e)
return total
class BenchmarkMultipleCopies(Benchmark):
def launch_onitu(self):
launcher(self, 3)
def stop_onitu(self):
self.launcher.kill()
unlink(self.json_file)
for rep in self.reps:
rep.close()
def setup(self):
self.launch_onitu()
def teardown(self):
self.stop_onitu()
def copy_file(self, filename, size, timeout=20):
self.launcher.unset_all_events()
loop = CounterLoop(2)
self.launcher.on_transfer_ended(
loop.check, d_to='rep2', filename=filename
)
self.launcher.on_transfer_ended(
loop.check, d_to='rep3', filename=filename
)
self.reps[0].generate(filename, size)
with Timer() as t:
loop.run(timeout=timeout)
assert (
self.reps[0].checksum(filename) == self.reps[1].checksum(filename)
)
assert (
self.reps[0].checksum(filename) == self.reps[2].checksum(filename)
)
return t.msecs
def test_small(self):
total = BenchmarkData('test_small', 'Copy 1000 times a 1M file')
for i in range(1000):
try:
t = self.copy_file('small{}'.format(i), SMALL)
total.add_result(t)
except BaseException as e:
self.log.warn('Error in test_small')
self.log.warn(e)
return total
def test_medium(self):
total = BenchmarkData('test_medium', 'Copy 100 times a 10M file')
for i in range(100):
try:
t = self.copy_file('medium{}'.format(i), MEDIUM)
total.add_result(t)
except BaseException as e:
self.log.warn('Error in test_medium')
self.log.warn(e)
return total
def test_big(self):
total = BenchmarkData('test_big', 'Copy 10 times a 100M file')
for i in range(10):
try:
t = self.copy_file('big{}'.format(i), BIG)
total.add_result(t)
except BaseException as e:
self.log.warn('Error in test_big')
self.log.warn(e)
return total
if __name__ == '__main__':
bench_simple = BenchmarkSimpleCopy('BENCH_SIMPLE_COPY', verbose=True)
bench_simple.run()
bench_multiple = BenchmarkMultipleCopies(
'BENCH_MULTIPLE_COPIES',
verbose=True
)
bench_multiple.run()
print('{:=^28}'.format(' simple copy '))
bench_simple.display()
print('{:=^28}'.format(' multiple copies '))
bench_multiple.display()
|
onitu/onitu
|
tests/benchmarks/benchmarks.py
|
Python
|
mit
| 6,954
|
from smbio.util.menu import Menu
# "Declare" your menus.
main_menu = Menu('Welcome to the show!', reentrant=True)
sub_menu = Menu('The cool submenu.')
# Add menus as submenus easily.
main_menu.add('Cool stuff', sub_menu)
# Annotate which menu a function belongs to, and what its text should be.
@main_menu.function('An option')
def pzip_test():
print('An option')
@main_menu.function('A better option')
def something_else():
print('A better option')
@sub_menu.function('A cool option')
def cool1():
print('being cool')
@sub_menu.function('A cooler option')
def cool2():
print('being cooler')
# Then, just have the menu display when you'd like.
if __name__ == '__main__':
main_menu.display()
|
brenns10/smbio
|
examples/menu.py
|
Python
|
mit
| 722
|
from yapsy import IPlugin
from extraction import ExtractedDataItem
__author__ = 'adam.jorgensen.za@gmail.com'
class PostProcessedDataItem(ExtractedDataItem):
"""
Overrides the ExtractedDataItem class to provide an indication that an
ExtractedDataItem instance has undergone post-processing.
"""
def __init__(self, seq=None, **kwargs):
self.processed_by = []
self.processing_errors = []
super(PostProcessedDataItem, self).__init__(seq, **kwargs)
class PostProcessingPluginInterface(IPlugin.IPlugin):
"""
Defines an interface for a plugin that processes data extracted from a source and transforms it in some fashion.
"""
def can_process(self, data_model_name, data_model):
"""
Determines whether the plugin can process data associated with a given data model. Returns a bool.
"""
return False
def process(self, data_items, data_model_name, data_model):
"""
For a given data model, processes a list of (UID value, ExtractedDataItem instance) tuples and transforms each
ExtractedDataItem instance into a PostProcessedDataItem instance.
Returns a list of (UID value, PostProcessedDataItem instance) tuples.
"""
return []
|
slventures/jormungand
|
src/jormungand/api/postprocessing.py
|
Python
|
mit
| 1,271
|
from distutils.core import setup
setup(name='GraphRank',
version='0.1.0',
description='GraphRank Core Classes and Utils',
author='Sean Douglas',
author_email='douglas.seanp@gmail.com',
url='https://github.com/ldouguy/GraphRank',
packages=['graphrank'],
)
|
ldouguy/GraphRank
|
setup.py
|
Python
|
mit
| 261
|
import re
from capybara.helpers import desc, normalize_text, toregex
from capybara.queries.base_query import BaseQuery
from capybara.utils import isregex
class TitleQuery(BaseQuery):
"""
Queries the title content of a node.
Args:
expected_title (str | RegexObject): The desired title.
exact (bool, optional): Whether the text should match exactly. Defaults to False.
wait (bool | int | float, optional): Whether and how long to wait for synchronization.
Defaults to :data:`capybara.default_max_wait_time`.
"""
def __init__(self, expected_title, exact=False, wait=None):
self.expected_title = (expected_title if isregex(expected_title)
else normalize_text(expected_title))
self.actual_title = None
self.search_regexp = toregex(expected_title, exact=exact)
self.options = {
"wait": wait}
def resolves_for(self, node):
"""
Resolves this query relative to the given node.
Args:
node (node.Document): The node to be evaluated.
Returns:
bool: Whether the given node matches this query.
"""
self.actual_title = normalize_text(node.title)
return bool(self.search_regexp.search(self.actual_title))
@property
def wait(self):
""" int | float: How long to wait for synchronization. """
return self.normalize_wait(self.options["wait"])
@property
def failure_message(self):
""" str: A message describing the query failure. """
return self._build_message()
@property
def negative_failure_message(self):
""" str: A message describing the negative query failure. """
return self._build_message(" not")
def _build_message(self, negated=""):
verb = "match" if isregex(self.expected_title) else "include"
return "expected {actual}{negated} to {verb} {expected}".format(
actual=desc(self.actual_title),
negated=negated,
verb=verb,
expected=desc(self.expected_title))
|
elliterate/capybara.py
|
capybara/queries/title_query.py
|
Python
|
mit
| 2,115
|
import re
import threading
import time
import unittest
from selenium import webdriver
from app import create_app, db, fake
from app.models import Role, User, Post
class SeleniumTestCase(unittest.TestCase):
client = None
@classmethod
def setUpClass(cls):
# start Chrome
options = webdriver.ChromeOptions()
options.add_argument('headless')
try:
cls.client = webdriver.Chrome(chrome_options=options)
except:
pass
# skip these tests if the browser could not be started
if cls.client:
# create the application
cls.app = create_app('testing')
cls.app_context = cls.app.app_context()
cls.app_context.push()
# suppress logging to keep unittest output clean
import logging
logger = logging.getLogger('werkzeug')
logger.setLevel("ERROR")
# create the database and populate with some fake data
db.create_all()
Role.insert_roles()
fake.users(10)
fake.posts(10)
# add an administrator user
admin_role = Role.query.filter_by(name='Administrator').first()
admin = User(email='john@example.com',
username='john', password='cat',
role=admin_role, confirmed=True)
db.session.add(admin)
db.session.commit()
# start the Flask server in a thread
cls.server_thread = threading.Thread(target=cls.app.run,
kwargs={'debug': False})
cls.server_thread.start()
# give the server a second to ensure it is up
time.sleep(1)
@classmethod
def tearDownClass(cls):
if cls.client:
# stop the flask server and the browser
cls.client.get('http://localhost:5000/shutdown')
cls.client.quit()
cls.server_thread.join()
# destroy database
db.drop_all()
db.session.remove()
# remove application context
cls.app_context.pop()
def setUp(self):
if not self.client:
self.skipTest('Web browser not available')
def tearDown(self):
pass
def test_admin_home_page(self):
# navigate to home page
self.client.get('http://localhost:5000/')
self.assertTrue(re.search('Hello,\s+Stranger!',
self.client.page_source))
# navigate to login page
self.client.find_element_by_link_text('Log In').click()
self.assertIn('<h1>Login</h1>', self.client.page_source)
# login
self.client.find_element_by_name('email').\
send_keys('john@example.com')
self.client.find_element_by_name('password').send_keys('cat')
self.client.find_element_by_name('submit').click()
self.assertTrue(re.search('Hello,\s+john!', self.client.page_source))
# navigate to the user's profile page
self.client.find_element_by_link_text('Profile').click()
self.assertIn('<h1>john</h1>', self.client.page_source)
|
miguelgrinberg/flasky
|
tests/test_selenium.py
|
Python
|
mit
| 3,228
|
import math
import torch
from torch.optim import Optimizer
class GigaWolfOptimizer(Optimizer):
def __init__(self, params, optimizer, optimizer2):
defaults = dict()
self.optimizer = optimizer
self.optimizer2 = optimizer2
super(GigaWolfOptimizer, self).__init__(params, defaults)
def step(self, closure=None):
"""Performs a single optimization step across the two optimizers and uses GigaWolf.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
|
255BITS/HyperGAN
|
hypergan/optimizers/needs_pytorch/giga_wolf_optimizer.py
|
Python
|
mit
| 553
|
from django.db import models
from django.contrib.flatpages.models import FlatPage
class FlatpagesNav(models.Model):
"""Defines whether a flatpage should appear in the main or footer nav and/or if this setting is currently active
or not"""
flatpage = models.ForeignKey(FlatPage)
in_main_nav = models.BooleanField(default=False)
in_footer_nav = models.BooleanField(default=False)
order = models.IntegerField()
active = models.BooleanField(default=True)
class Meta:
ordering = ('order', )
def __unicode__(self):
return '%s (%s)' % (self.flatpage.title, 'Active' if self.active else 'Inactive')
|
Tivix/django-flatpages-nav
|
flatpages_nav/models.py
|
Python
|
mit
| 655
|
import liblo
import sys
import serial
import time
serial_wait_time_s = 0.05
prev_timestamp_s = 0
class Game:
def __init__(self):
self.players = [ Player(1), Player(2) ]
self.waiting_for_headsets = True
def tick(self, serial_conn):
# Game has only two states on the server:
# 1. Waiting for the players to put on their headsets properly
# 2. Headsets are on properly and we're sending serial data to the arduino
self._check_headsets()
if self.waiting_for_headsets:
status_changed = False
for player in self.players:
if player.headset_status.status_changed:
status_changed = True
break
if status_changed:
print "Waiting for headsets..."
for player in self.players:
player.headset_status.status_print()
player.headset_status.status_changed = False
else:
if serial_conn != None and serial_conn.isOpen():
global prev_timestamp_s
global serial_wait_time_s
curr_timestamp_s = time.time()
time_diff = curr_timestamp_s - prev_timestamp_s
if time_diff >= serial_wait_time_s:
print "HERE"
serial_conn.write(chr(0))
for player in self.players:
serial_conn.write(player.serial_alpha())
serial_conn.write(player.serial_beta())
serial_conn.flush()
prev_timestamp_s = curr_timestamp_s
def _check_headsets(self):
players_ready = True
for player in self.players:
if not player.headset_status.all_is_good():
players_ready = False
break
self.waiting_for_headsets = not players_ready
class Player:
def __init__(self, playerNum):
self.headset_status = HeadsetStatus(playerNum)
self.alpha = [0,0,0,0]
self.beta = [0,0,0,0]
def set_alpha(self, args):
self.alpha[0], self.alpha[1], self.alpha[2], self.alpha[3] = args
def set_beta(self, args):
self.beta[0], self.beta[1], self.beta[2], self.beta[3] = args
def serial_alpha(self):
result = chr(1)
try:
result = chr(int((float(sum(self.alpha))/4.0)*255))
if result == chr(0):
result = chr(1);
except:
pass
return result
def serial_beta(self):
result = chr(1)
try:
result = chr(int(float(sum(self.beta))/4.0*255))
if result == chr(0):
result = chr(1);
except:
pass
return result
class HeadsetStatus:
def __init__(self, playerNum):
self.player_num = playerNum
self.left_ear = "bad"
self.left_front = "bad"
self.right_front = "bad"
self.right_ear = "bad"
self.touching_forehead = False
self.status_changed = True
def all_is_good(self):
return (self.left_ear == "good" and self.left_front == "good" and self.right_front == "good" and self.right_ear == "good")
def update_with_horseshoe(self, args):
le, lf, rf, re = args
temp = self._status_num_to_readable(le)
if temp != self.left_ear:
self.status_changed = True
self.left_ear = temp
temp = self._status_num_to_readable(lf)
if temp != self.left_front:
self.status_changed = True
self.left_front = temp
temp = self._status_num_to_readable(rf)
if temp != self.right_front:
self.status_changed = True
self.right_front = temp
temp = self._status_num_to_readable(re)
if temp != self.left_front:
self.status_changed = True
self.right_ear = temp
def update_with_touching_forehead(self, args):
self.touching_forehead = bool(args)
def _status_num_to_readable(self, num):
if num == 1:
return "good"
elif num == 2:
return "ok"
else:
return "bad"
def status_print(self):
print "Player " + str(self.player_num) + " Sensor Status (<left ear>, <left front>, <right front>, <right ear>):"
print self.left_ear + " " + self.left_front + " " + self.right_front + " " + self.right_ear
# Globals
game = Game()
servers = [ None, None ]
def status_callback(path, args, types, src, data):
global game
player_idx = data-1
game.players[player_idx].headset_status.update_with_horseshoe(args)
def touching_forehead_callback(path, args, types, src, data):
global game
player_idx = data-1
game.players[player_idx].headset_status.update_with_touching_forehead(args)
def alpha_callback(path, args, types, src, data):
global game
player_idx = data-1
game.players[player_idx].set_alpha(args)
def beta_callback(path, args, types, src, data):
global game
player_idx = data-1
game.players[player_idx].set_beta(args)
def connect_serial():
serial_conn = None
retryTime = 1.0
while True:
try:
serial_conn = serial.Serial(serial_port, baud_rate, timeout=10.0)
if serial_conn.isOpen():
break
else:
print "Failed to open serial connection, retrying in " + retryTime + " seconds..."
except ValueError as e:
print "Value Error: " + e.strerror
exit(-1)
except OSError as e:
print "OS Error: " + e.strerror
exit(-1)
except serial.SerialException as e:
print "Error setting up serial connection, retrying..."
time.sleep(retryTime)
retryTime = max(10.0, retryTime + 1.0)
return serial_conn
if __name__ == "__main__":
if len(sys.argv) < 5:
print "Usage:"
print "python " + sys.argv[0] + " <osc_port_muse_p1> <osc_port_muse_p2> <serial_port> <baud_rate>"
sys.exit(0)
# Open the OSC server, listening on the specified port
try:
servers[0] = liblo.Server(int(sys.argv[1]))
servers[1] = liblo.Server(int(sys.argv[2]))
except liblo.ServerError, err:
print str(err)
sys.exit()
except ValueError:
print "Ports must be a valid integers."
sys.exit()
count = 1
for server in servers:
server.add_method("/muse/dsp/elements/horseshoe", 'ffff', status_callback, count)
server.add_method("/muse/dsp/elements/touching_forehead", 'i', touching_forehead_callback, count)
server.add_method("/muse/dsp/elements/alpha", 'ffff', alpha_callback, count)
server.add_method("/muse/dsp/elements/beta", 'ffff', beta_callback, count)
count += 1
serial_port = sys.argv[3]
baud_rate = sys.argv[4]
# Attempt to connect to the serial
serial_conn = connect_serial()
while True:
if (serial_conn is None) or (not serial_conn.isOpen()):
serial_conn = connect_serial()
for server in servers:
server.recv(0)
game.tick(serial_conn)
|
bhagman/PourCourtesy
|
muse_server.py
|
Python
|
mit
| 7,372
|
##
# LL(1) parser generated by the Syntax tool.
#
# https://www.npmjs.com/package/syntax-cli
#
# npm install -g syntax-cli
#
# syntax-cli --help
#
# To regenerate run:
#
# syntax-cli \
# --grammar ~/path-to-grammar-file \
# --mode LL1 \
# --output ~/parsermodule.py
##
yytext = ''
yyleng = 0
__ = None
EOF = '$'
def on_parse_begin(string):
pass
def on_parse_end(parsed):
pass
{{{MODULE_INCLUDE}}}
{{{PRODUCTION_HANDLERS}}}
ps = {{{PRODUCTIONS}}}
tks = {{{TOKENS}}}
tbl = {{{TABLE}}}
s = None
{{{TOKENIZER}}}
def set_tokenizer(custom_tokenizer):
global _tokenizer
_tokenizer = custom_tokenizer
def get_tokenizer():
return _tokenizer
def parse(string):
global s
on_parse_begin(string)
if _tokenizer is None:
raise Exception('_tokenizer instance wasn\'t specified.')
_tokenizer.init_string(string)
s = [EOF, {{{START}}}]
t = _tokenizer.get_next_token()
to = None
tt = None
while True:
to = s.pop()
tt = tks[t['type']]
if (to == tt):
t = _tokenizer.get_next_token()
continue
der(to, t, tt)
if not _tokenizer.has_more_tokens() and len(s) <= 1:
break
while len(s) != 1:
der(s.pop(), t, tt)
if s[0] != EOF or t['type'] != EOF:
_parse_error('stack is not empty: ' + str(s) + ', ' + str(t['value']))
return True
def der(to, t, tt):
npn = tbl[to][tt]
if npn is None:
_unexpected_token(t)
s.extend(ps[int(npn)][0])
def _unexpected_token(token):
if token['type'] == EOF:
_unexpected_end_of_input()
_tokenizer.throw_unexpected_token(
token['value'],
token['start_line'],
token['start_column']
)
def _unexpected_end_of_input():
_parse_error('Unexpected end of input.')
def _parse_error(message):
raise Exception('SyntaxError: ' + str(message))
|
DmitrySoshnikov/syntax
|
src/plugins/python/templates/ll.template.py
|
Python
|
mit
| 1,917
|
"""
Revision ID: 0317_uploads_for_all
Revises: 0316_int_letters_permission
Create Date: 2019-05-13 10:44:51.867661
"""
from alembic import op
from app.models import UPLOAD_LETTERS
revision = '0317_uploads_for_all'
down_revision = '0316_int_letters_permission'
def upgrade():
op.execute("""
INSERT INTO
service_permissions (service_id, permission, created_at)
SELECT
id, '{permission}', now()
FROM
services
WHERE
NOT EXISTS (
SELECT
FROM
service_permissions
WHERE
service_id = services.id and
permission = '{permission}'
)
""".format(
permission=UPLOAD_LETTERS
))
def downgrade():
op.execute("DELETE from service_permissions where permission = '{}'".format(
UPLOAD_LETTERS
))
|
alphagov/notifications-api
|
migrations/versions/0317_uploads_for_all.py
|
Python
|
mit
| 918
|
import unittest
import json
from decision import server
class DecisionTestCase(unittest.TestCase):
def setUp(self):
server.app.config['TESTING'] = True
self.client = server.app.test_client()
def test_server(self):
self.assertEquals((self.client.get('/')).status_code, 200)
self.assertEquals((self.client.get('/decisions')).status_code, 405)
self.assertEquals((self.client.get('/doesnotexist')).status_code, 404)
def test_change_name_returns_successful_response(self):
data = {
"action": "change-name-marriage",
"data": {
"iso-country-code": "GB"
},
"context": {
"session-id": "123456",
"transaction-id": "ABCDEFG"
}
}
rv = self.client.post('/decisions',
data=json.dumps(data),
content_type='application/json')
self.assertEquals(rv.status_code, 200)
self.assertEquals(rv.headers.get('content-type'), 'application/json')
def test_change_name_returns_bad_request(self):
data = {
"action": "change-name-marriage",
"context": {
"session-id": "123456",
"transaction-id": "ABCDEFG"
}
}
rv = self.client.post('/decisions',
data=json.dumps(data),
content_type='application/json')
self.assertEquals(rv.status_code, 400)
self.assertEquals(rv.headers.get('content-type'), 'application/json')
|
LandRegistry/decision-alpha
|
tests/test_app.py
|
Python
|
mit
| 1,621
|
from collections import namedtuple
from nose.tools import eq_, raises
from ...features import Feature
from ..sklearn_classifier import ScikitLearnClassifier
from ..test_statistics import table
class FakeIdentityEstimator:
def __init__(self):
self.classes_ = [True, False]
self._params = {}
def get_params(self):
return self._params
def fit(self, vals, labels, sample_weight=None):
return None
def predict(self, vals):
return [vals[0][0]]
def predict_proba(self, vals):
return [[vals[0][0] * True, vals[0][0] * False]]
class FakeIdentityClassifier(ScikitLearnClassifier):
Estimator = FakeIdentityEstimator
def test_sklean_classifier():
skc = FakeIdentityClassifier(
[Feature("foo")], version="0.0.1")
eq_(skc.version, "0.0.1")
cv_feature_values = [
([True], True),
([False], False),
([True], True),
([False], False),
([True], True),
([False], False),
([True], True),
([False], False),
([True], True),
([False], False)
]
table_statistic = table()
# Ensures that one call executes in the local process
skc._generate_test_stats((0, cv_feature_values[:5], cv_feature_values[5:],
[table_statistic]))
test_stats = skc.cross_validate(cv_feature_values,
test_statistics=[table_statistic])
assert 'cross-validation' in test_stats
eq_(test_stats[table_statistic],
{True: {True: 5},
False: {False: 5}})
assert 'table' in skc.format_info(format="json")['test_stats']
@raises(ValueError)
def test_sklearn_format_error():
skc = FakeIdentityClassifier(
[Feature("foo")], version="0.0.1")
skc.format_info(format="foo")
|
yafeunteun/wikipedia-spam-classifier
|
revscoring/revscoring/scorer_models/tests/test_sklearn_classifier.py
|
Python
|
mit
| 1,825
|
# Copyright (C) 2017 Cuckoo Foundation.
# This file is part of Cuckoo Sandbox - http://www.cuckoosandbox.org
# See the file "docs/LICENSE" for copying permission.
from cuckoo.processing.static import ELF
def test_elf_static_info():
assert ELF("tests/files/busybox-i686.elf").run() == {
"file_header": {
"abi_version": 0,
"class": "ELF32",
"data": "2's complement, little endian",
"ei_version": "1 (current)",
"entry_point_address": "0x08048168",
"flags": "0x00000000",
"machine": "Intel 80386",
"magic": "\\x7fELF",
"number_of_program_headers": 3,
"number_of_section_headers": 13,
"os_abi": "UNIX - System V",
"section_header_string_table_index": 12,
"size_of_program_headers": 32,
"size_of_section_headers": 40,
"size_of_this_header": 52,
"start_of_program_headers": 52,
"start_of_section_headers": 898052,
"type": "EXEC (Executable file)",
"version": "0x1",
},
"program_headers": [
{"addr": "0x08048000", "flags": "R E", "size": 896994, "type": "LOAD"},
{"addr": "0x08123000", "flags": "RW", "size": 19012, "type": "LOAD"},
{"addr": "0x00000000", "flags": "RW", "size": 0, "type": "GNU_STACK"},
],
"section_headers": [
{"addr": "0x00000000", "name": "", "size": 0, "type": "NULL"},
{"addr": "0x08048094", "name": ".init", "size": 28, "type": "PROGBITS"},
{"addr": "0x080480b0", "name": ".text", "size": 721180, "type": "PROGBITS"},
{"addr": "0x080f81cc", "name": ".fini", "size": 23, "type": "PROGBITS"},
{"addr": "0x080f81f0", "name": ".rodata", "size": 175602, "type": "PROGBITS"},
{"addr": "0x08123000", "name": ".eh_frame", "size": 4, "type": "PROGBITS"},
{"addr": "0x08123004", "name": ".ctors", "size": 8, "type": "PROGBITS"},
{"addr": "0x0812300c", "name": ".dtors", "size": 8, "type": "PROGBITS"},
{"addr": "0x08123014", "name": ".jcr", "size": 4, "type": "PROGBITS"},
{"addr": "0x08123018", "name": ".got.plt", "size": 12, "type": "PROGBITS"},
{"addr": "0x08123024", "name": ".data", "size": 904, "type": "PROGBITS"},
{"addr": "0x081233b0", "name": ".bss", "size": 18068, "type": "NOBITS"},
{"addr": "0x00000000", "name": ".shstrtab", "size": 86, "type": "STRTAB"},
],
"dynamic_tags": [],
"notes": [],
"relocations": [],
"symbol_tables": [],
}
def test_elf_static_info_tags():
assert ELF("tests/files/ls-x86_64.elf").run()["dynamic_tags"] == [
{"tag": "0x0000000000000001", "type": "NEEDED", "value": "Shared library: [libselinux.so.1]"},
{"tag": "0x0000000000000001", "type": "NEEDED", "value": "Shared library: [libc.so.6]"},
{"tag": "0x000000000000000c", "type": "INIT", "value": "0x00000000004022b8"},
{"tag": "0x000000000000000d", "type": "FINI", "value": "0x0000000000413c8c"},
{"tag": "0x0000000000000019", "type": "INIT_ARRAY", "value": "0x000000000061de00"},
{"tag": "0x000000000000001b", "type": "INIT_ARRAYSZ", "value": "8 (bytes)"},
{"tag": "0x000000000000001a", "type": "FINI_ARRAY", "value": "0x000000000061de08"},
{"tag": "0x000000000000001c", "type": "FINI_ARRAYSZ", "value": "8 (bytes)"},
{"tag": "0x000000006ffffef5", "type": "GNU_HASH", "value": "0x0000000000400298"},
{"tag": "0x0000000000000005", "type": "STRTAB", "value": "0x0000000000401030"},
{"tag": "0x0000000000000006", "type": "SYMTAB", "value": "0x0000000000400358"},
{"tag": "0x000000000000000a", "type": "STRSZ", "value": "1500 (bytes)"},
{"tag": "0x000000000000000b", "type": "SYMENT", "value": "24 (bytes)"},
{"tag": "0x0000000000000015", "type": "DEBUG", "value": "0x0000000000000000"},
{"tag": "0x0000000000000003", "type": "PLTGOT", "value": "0x000000000061e000"},
{"tag": "0x0000000000000002", "type": "PLTRELSZ", "value": "2688 (bytes)"},
{"tag": "0x0000000000000014", "type": "PLTREL", "value": "RELA"},
{"tag": "0x0000000000000017", "type": "JMPREL", "value": "0x0000000000401838"},
{"tag": "0x0000000000000007", "type": "RELA", "value": "0x0000000000401790"},
{"tag": "0x0000000000000008", "type": "RELASZ", "value": "168 (bytes)"},
{"tag": "0x0000000000000009", "type": "RELAENT", "value": "24 (bytes)"},
{"tag": "0x000000006ffffffe", "type": "VERNEED", "value": "0x0000000000401720"},
{"tag": "0x000000006fffffff", "type": "VERNEEDNUM", "value": "1"},
{"tag": "0x000000006ffffff0", "type": "VERSYM", "value": "0x000000000040160c"},
{"tag": "0x0000000000000000", "type": "NULL", "value": "0x0000000000000000"},
]
def test_elf_static_info_symbols():
assert ELF("tests/files/ls-x86_64.elf").run()["symbol_tables"] == [
{"bind": "LOCAL", "ndx_name": "", "type": "NOTYPE", "value": "0x0000000000000000"},
{"bind": "GLOBAL", "ndx_name": "__ctype_toupper_loc", "type": "FUNC", "value": "0x0000000000000000"},
{"bind": "GLOBAL", "ndx_name": "__uflow", "type": "FUNC", "value": "0x0000000000000000"},
{"bind": "GLOBAL", "ndx_name": "getenv", "type": "FUNC", "value": "0x0000000000000000"},
{"bind": "GLOBAL", "ndx_name": "sigprocmask", "type": "FUNC", "value": "0x0000000000000000"},
{"bind": "GLOBAL", "ndx_name": "raise", "type": "FUNC", "value": "0x0000000000000000"},
{"bind": "GLOBAL", "ndx_name": "localtime", "type": "FUNC", "value": "0x0000000000000000"},
{"bind": "GLOBAL", "ndx_name": "__mempcpy_chk", "type": "FUNC", "value": "0x0000000000000000"},
{"bind": "GLOBAL", "ndx_name": "abort", "type": "FUNC", "value": "0x0000000000000000"},
{"bind": "GLOBAL", "ndx_name": "__errno_location", "type": "FUNC", "value": "0x0000000000000000"},
{"bind": "GLOBAL", "ndx_name": "strncmp", "type": "FUNC", "value": "0x0000000000000000"},
{"bind": "WEAK", "ndx_name": "_ITM_deregisterTMCloneTable", "type": "NOTYPE", "value": "0x0000000000000000"},
{"bind": "GLOBAL", "ndx_name": "_exit", "type": "FUNC", "value": "0x0000000000000000"},
{"bind": "GLOBAL", "ndx_name": "strcpy", "type": "FUNC", "value": "0x0000000000000000"},
{"bind": "GLOBAL", "ndx_name": "__fpending", "type": "FUNC", "value": "0x0000000000000000"},
{"bind": "GLOBAL", "ndx_name": "isatty", "type": "FUNC", "value": "0x0000000000000000"},
{"bind": "GLOBAL", "ndx_name": "sigaction", "type": "FUNC", "value": "0x0000000000000000"},
{"bind": "GLOBAL", "ndx_name": "iswcntrl", "type": "FUNC", "value": "0x0000000000000000"},
{"bind": "GLOBAL", "ndx_name": "wcswidth", "type": "FUNC", "value": "0x0000000000000000"},
{"bind": "GLOBAL", "ndx_name": "localeconv", "type": "FUNC", "value": "0x0000000000000000"},
{"bind": "GLOBAL", "ndx_name": "mbstowcs", "type": "FUNC", "value": "0x0000000000000000"},
{"bind": "GLOBAL", "ndx_name": "readlink", "type": "FUNC", "value": "0x0000000000000000"},
{"bind": "GLOBAL", "ndx_name": "clock_gettime", "type": "FUNC", "value": "0x0000000000000000"},
{"bind": "GLOBAL", "ndx_name": "setenv", "type": "FUNC", "value": "0x0000000000000000"},
{"bind": "GLOBAL", "ndx_name": "textdomain", "type": "FUNC", "value": "0x0000000000000000"},
{"bind": "GLOBAL", "ndx_name": "fclose", "type": "FUNC", "value": "0x0000000000000000"},
{"bind": "GLOBAL", "ndx_name": "opendir", "type": "FUNC", "value": "0x0000000000000000"},
{"bind": "GLOBAL", "ndx_name": "getpwuid", "type": "FUNC", "value": "0x0000000000000000"},
{"bind": "GLOBAL", "ndx_name": "bindtextdomain", "type": "FUNC", "value": "0x0000000000000000"},
{"bind": "GLOBAL", "ndx_name": "stpcpy", "type": "FUNC", "value": "0x0000000000000000"},
{"bind": "GLOBAL", "ndx_name": "dcgettext", "type": "FUNC", "value": "0x0000000000000000"},
{"bind": "GLOBAL", "ndx_name": "__ctype_get_mb_cur_max", "type": "FUNC", "value": "0x0000000000000000"},
{"bind": "GLOBAL", "ndx_name": "strlen", "type": "FUNC", "value": "0x0000000000000000"},
{"bind": "GLOBAL", "ndx_name": "__lxstat", "type": "FUNC", "value": "0x0000000000000000"},
{"bind": "GLOBAL", "ndx_name": "__stack_chk_fail", "type": "FUNC", "value": "0x0000000000000000"},
{"bind": "GLOBAL", "ndx_name": "getopt_long", "type": "FUNC", "value": "0x0000000000000000"},
{"bind": "GLOBAL", "ndx_name": "mbrtowc", "type": "FUNC", "value": "0x0000000000000000"},
{"bind": "GLOBAL", "ndx_name": "strchr", "type": "FUNC", "value": "0x0000000000000000"},
{"bind": "GLOBAL", "ndx_name": "getgrgid", "type": "FUNC", "value": "0x0000000000000000"},
{"bind": "GLOBAL", "ndx_name": "__overflow", "type": "FUNC", "value": "0x0000000000000000"},
{"bind": "GLOBAL", "ndx_name": "strrchr", "type": "FUNC", "value": "0x0000000000000000"},
{"bind": "GLOBAL", "ndx_name": "fgetfilecon", "type": "FUNC", "value": "0x0000000000000000"},
{"bind": "GLOBAL", "ndx_name": "gmtime_r", "type": "FUNC", "value": "0x0000000000000000"},
{"bind": "GLOBAL", "ndx_name": "lseek", "type": "FUNC", "value": "0x0000000000000000"},
{"bind": "GLOBAL", "ndx_name": "gettimeofday", "type": "FUNC", "value": "0x0000000000000000"},
{"bind": "GLOBAL", "ndx_name": "__assert_fail", "type": "FUNC", "value": "0x0000000000000000"},
{"bind": "GLOBAL", "ndx_name": "__strtoul_internal", "type": "FUNC", "value": "0x0000000000000000"},
{"bind": "GLOBAL", "ndx_name": "fnmatch", "type": "FUNC", "value": "0x0000000000000000"},
{"bind": "GLOBAL", "ndx_name": "memset", "type": "FUNC", "value": "0x0000000000000000"},
{"bind": "GLOBAL", "ndx_name": "fscanf", "type": "FUNC", "value": "0x0000000000000000"},
{"bind": "GLOBAL", "ndx_name": "ioctl", "type": "FUNC", "value": "0x0000000000000000"},
{"bind": "GLOBAL", "ndx_name": "close", "type": "FUNC", "value": "0x0000000000000000"},
{"bind": "GLOBAL", "ndx_name": "closedir", "type": "FUNC", "value": "0x0000000000000000"},
{"bind": "GLOBAL", "ndx_name": "__libc_start_main", "type": "FUNC", "value": "0x0000000000000000"},
{"bind": "GLOBAL", "ndx_name": "memcmp", "type": "FUNC", "value": "0x0000000000000000"},
{"bind": "GLOBAL", "ndx_name": "_setjmp", "type": "FUNC", "value": "0x0000000000000000"},
{"bind": "GLOBAL", "ndx_name": "fputs_unlocked", "type": "FUNC", "value": "0x0000000000000000"},
{"bind": "GLOBAL", "ndx_name": "calloc", "type": "FUNC", "value": "0x0000000000000000"},
{"bind": "GLOBAL", "ndx_name": "lgetfilecon", "type": "FUNC", "value": "0x0000000000000000"},
{"bind": "GLOBAL", "ndx_name": "strcmp", "type": "FUNC", "value": "0x0000000000000000"},
{"bind": "GLOBAL", "ndx_name": "signal", "type": "FUNC", "value": "0x0000000000000000"},
{"bind": "GLOBAL", "ndx_name": "dirfd", "type": "FUNC", "value": "0x0000000000000000"},
{"bind": "GLOBAL", "ndx_name": "getpwnam", "type": "FUNC", "value": "0x0000000000000000"},
{"bind": "GLOBAL", "ndx_name": "__memcpy_chk", "type": "FUNC", "value": "0x0000000000000000"},
{"bind": "GLOBAL", "ndx_name": "sigemptyset", "type": "FUNC", "value": "0x0000000000000000"},
{"bind": "WEAK", "ndx_name": "__gmon_start__", "type": "NOTYPE", "value": "0x0000000000000000"},
{"bind": "GLOBAL", "ndx_name": "memcpy", "type": "FUNC", "value": "0x0000000000000000"},
{"bind": "GLOBAL", "ndx_name": "getgrnam", "type": "FUNC", "value": "0x0000000000000000"},
{"bind": "GLOBAL", "ndx_name": "getfilecon", "type": "FUNC", "value": "0x0000000000000000"},
{"bind": "GLOBAL", "ndx_name": "tzset", "type": "FUNC", "value": "0x0000000000000000"},
{"bind": "GLOBAL", "ndx_name": "fileno", "type": "FUNC", "value": "0x0000000000000000"},
{"bind": "GLOBAL", "ndx_name": "tcgetpgrp", "type": "FUNC", "value": "0x0000000000000000"},
{"bind": "GLOBAL", "ndx_name": "__xstat", "type": "FUNC", "value": "0x0000000000000000"},
{"bind": "GLOBAL", "ndx_name": "readdir", "type": "FUNC", "value": "0x0000000000000000"},
{"bind": "GLOBAL", "ndx_name": "wcwidth", "type": "FUNC", "value": "0x0000000000000000"},
{"bind": "GLOBAL", "ndx_name": "fflush", "type": "FUNC", "value": "0x0000000000000000"},
{"bind": "GLOBAL", "ndx_name": "nl_langinfo", "type": "FUNC", "value": "0x0000000000000000"},
{"bind": "GLOBAL", "ndx_name": "ungetc", "type": "FUNC", "value": "0x0000000000000000"},
{"bind": "GLOBAL", "ndx_name": "__fxstat", "type": "FUNC", "value": "0x0000000000000000"},
{"bind": "GLOBAL", "ndx_name": "strcoll", "type": "FUNC", "value": "0x0000000000000000"},
{"bind": "GLOBAL", "ndx_name": "__freading", "type": "FUNC", "value": "0x0000000000000000"},
{"bind": "GLOBAL", "ndx_name": "fwrite_unlocked", "type": "FUNC", "value": "0x0000000000000000"},
{"bind": "GLOBAL", "ndx_name": "realloc", "type": "FUNC", "value": "0x0000000000000000"},
{"bind": "GLOBAL", "ndx_name": "stpncpy", "type": "FUNC", "value": "0x0000000000000000"},
{"bind": "GLOBAL", "ndx_name": "fdopen", "type": "FUNC", "value": "0x0000000000000000"},
{"bind": "GLOBAL", "ndx_name": "setlocale", "type": "FUNC", "value": "0x0000000000000000"},
{"bind": "GLOBAL", "ndx_name": "__printf_chk", "type": "FUNC", "value": "0x0000000000000000"},
{"bind": "GLOBAL", "ndx_name": "timegm", "type": "FUNC", "value": "0x0000000000000000"},
{"bind": "GLOBAL", "ndx_name": "strftime", "type": "FUNC", "value": "0x0000000000000000"},
{"bind": "GLOBAL", "ndx_name": "mempcpy", "type": "FUNC", "value": "0x0000000000000000"},
{"bind": "GLOBAL", "ndx_name": "memmove", "type": "FUNC", "value": "0x0000000000000000"},
{"bind": "GLOBAL", "ndx_name": "error", "type": "FUNC", "value": "0x0000000000000000"},
{"bind": "GLOBAL", "ndx_name": "open", "type": "FUNC", "value": "0x0000000000000000"},
{"bind": "GLOBAL", "ndx_name": "fseeko", "type": "FUNC", "value": "0x0000000000000000"},
{"bind": "WEAK", "ndx_name": "_Jv_RegisterClasses", "type": "NOTYPE", "value": "0x0000000000000000"},
{"bind": "GLOBAL", "ndx_name": "unsetenv", "type": "FUNC", "value": "0x0000000000000000"},
{"bind": "GLOBAL", "ndx_name": "strtoul", "type": "FUNC", "value": "0x0000000000000000"},
{"bind": "GLOBAL", "ndx_name": "__cxa_atexit", "type": "FUNC", "value": "0x0000000000000000"},
{"bind": "GLOBAL", "ndx_name": "wcstombs", "type": "FUNC", "value": "0x0000000000000000"},
{"bind": "GLOBAL", "ndx_name": "getxattr", "type": "FUNC", "value": "0x0000000000000000"},
{"bind": "GLOBAL", "ndx_name": "freecon", "type": "FUNC", "value": "0x0000000000000000"},
{"bind": "GLOBAL", "ndx_name": "sigismember", "type": "FUNC", "value": "0x0000000000000000"},
{"bind": "GLOBAL", "ndx_name": "exit", "type": "FUNC", "value": "0x0000000000000000"},
{"bind": "GLOBAL", "ndx_name": "fwrite", "type": "FUNC", "value": "0x0000000000000000"},
{"bind": "GLOBAL", "ndx_name": "__fprintf_chk", "type": "FUNC", "value": "0x0000000000000000"},
{"bind": "WEAK", "ndx_name": "_ITM_registerTMCloneTable", "type": "NOTYPE", "value": "0x0000000000000000"},
{"bind": "GLOBAL", "ndx_name": "fflush_unlocked", "type": "FUNC", "value": "0x0000000000000000"},
{"bind": "GLOBAL", "ndx_name": "mbsinit", "type": "FUNC", "value": "0x0000000000000000"},
{"bind": "GLOBAL", "ndx_name": "iswprint", "type": "FUNC", "value": "0x0000000000000000"},
{"bind": "GLOBAL", "ndx_name": "sigaddset", "type": "FUNC", "value": "0x0000000000000000"},
{"bind": "GLOBAL", "ndx_name": "strstr", "type": "FUNC", "value": "0x0000000000000000"},
{"bind": "GLOBAL", "ndx_name": "__ctype_tolower_loc", "type": "FUNC", "value": "0x0000000000000000"},
{"bind": "GLOBAL", "ndx_name": "__ctype_b_loc", "type": "FUNC", "value": "0x0000000000000000"},
{"bind": "GLOBAL", "ndx_name": "__sprintf_chk", "type": "FUNC", "value": "0x0000000000000000"},
{"bind": "GLOBAL", "ndx_name": "__progname", "type": "OBJECT", "value": "0x000000000061e600"},
{"bind": "GLOBAL", "ndx_name": "_fini", "type": "FUNC", "value": "0x0000000000413c8c"},
{"bind": "GLOBAL", "ndx_name": "optind", "type": "OBJECT", "value": "0x000000000061e610"},
{"bind": "GLOBAL", "ndx_name": "_init", "type": "FUNC", "value": "0x00000000004022b8"},
{"bind": "GLOBAL", "ndx_name": "free", "type": "FUNC", "value": "0x0000000000402340"},
{"bind": "WEAK", "ndx_name": "program_invocation_name", "type": "OBJECT", "value": "0x000000000061e620"},
{"bind": "GLOBAL", "ndx_name": "__bss_start", "type": "NOTYPE", "value": "0x000000000061e600"},
{"bind": "GLOBAL", "ndx_name": "_end", "type": "NOTYPE", "value": "0x000000000061f368"},
{"bind": "GLOBAL", "ndx_name": "__progname_full", "type": "OBJECT", "value": "0x000000000061e620"},
{"bind": "GLOBAL", "ndx_name": "_obstack_memory_used", "type": "FUNC", "value": "0x0000000000412960"},
{"bind": "GLOBAL", "ndx_name": "obstack_alloc_failed_handler", "type": "OBJECT", "value": "0x000000000061e5f8"},
{"bind": "GLOBAL", "ndx_name": "_obstack_begin", "type": "FUNC", "value": "0x0000000000412780"},
{"bind": "GLOBAL", "ndx_name": "_edata", "type": "NOTYPE", "value": "0x000000000061e600"},
{"bind": "GLOBAL", "ndx_name": "stderr", "type": "OBJECT", "value": "0x000000000061e640"},
{"bind": "GLOBAL", "ndx_name": "_obstack_free", "type": "FUNC", "value": "0x00000000004128f0"},
{"bind": "WEAK", "ndx_name": "program_invocation_short_name", "type": "OBJECT", "value": "0x000000000061e600"},
{"bind": "GLOBAL", "ndx_name": "localtime_r", "type": "FUNC", "value": "0x00000000004023a0"},
{"bind": "GLOBAL", "ndx_name": "_obstack_allocated_p", "type": "FUNC", "value": "0x00000000004128c0"},
{"bind": "GLOBAL", "ndx_name": "optarg", "type": "OBJECT", "value": "0x000000000061e618"},
{"bind": "GLOBAL", "ndx_name": "_obstack_begin_1", "type": "FUNC", "value": "0x00000000004127a0"},
{"bind": "GLOBAL", "ndx_name": "_obstack_newchunk", "type": "FUNC", "value": "0x00000000004127c0"},
{"bind": "GLOBAL", "ndx_name": "malloc", "type": "FUNC", "value": "0x0000000000402790"},
{"bind": "GLOBAL", "ndx_name": "stdout", "type": "OBJECT", "value": "0x000000000061e608"},
]
def test_elf_static_info_notes():
assert ELF("tests/files/ls-x86_64.elf").run()["notes"] == [
{
"name": "GNU",
"note": "NT_GNU_ABI_TAG (ABI version tag)\n" +
" OS: Linux, ABI: 2.6.32",
"owner": "GNU",
"size": "0x0000000000000010"
},
{
"name": "GNU",
"note": "NT_GNU_BUILD_ID (unique build ID bitstring)\n" +
" Build ID: eca98eeadafddff44caf37ae3d4b227132861218",
"owner": "GNU",
"size": "0x0000000000000014",
},
]
def test_elf_static_info_relocations():
assert ELF("tests/files/ls-x86_64.elf").run()["relocations"] == [
{
"name": ".rela.dyn",
"entries": [
{
"info": "0x0000004100000006",
"name": "__gmon_start__",
"offset": "0x000000000061dff8",
"type": "R_X86_64_GLOB_DAT",
"value": "0x0000000000000000"
},
{
"info": "0x0000007200000005",
"name": "__progname",
"offset": "0x000000000061e600",
"type": "R_X86_64_COPY",
"value": "0x000000000061e600"
},
{
"info": "0x0000008800000005",
"name": "stdout",
"offset": "0x000000000061e608",
"type": "R_X86_64_COPY",
"value": "0x000000000061e608"
},
{
"info": "0x0000007400000005",
"name": "optind",
"offset": "0x000000000061e610",
"type": "R_X86_64_COPY",
"value": "0x000000000061e610"
},
{
"info": "0x0000008400000005",
"name": "optarg",
"offset": "0x000000000061e618",
"type": "R_X86_64_COPY",
"value": "0x000000000061e618"
},
{
"info": "0x0000007a00000005",
"name": "__progname_full",
"offset": "0x000000000061e620",
"type": "R_X86_64_COPY",
"value": "0x000000000061e620"
},
{
"info": "0x0000007f00000005",
"name": "stderr",
"offset": "0x000000000061e640",
"type": "R_X86_64_COPY",
"value": "0x000000000061e640"
}
],
},
{
"name": ".rela.plt",
"entries": [
{
"info": "0x0000000100000007",
"name": "__ctype_toupper_loc",
"offset": "0x000000000061e018",
"type": "R_X86_64_JUMP_SLOT",
"value": "0x0000000000000000"
},
{
"info": "0x0000000200000007",
"name": "__uflow",
"offset": "0x000000000061e020",
"type": "R_X86_64_JUMP_SLOT",
"value": "0x0000000000000000"
},
{
"info": "0x0000000300000007",
"name": "getenv",
"offset": "0x000000000061e028",
"type": "R_X86_64_JUMP_SLOT",
"value": "0x0000000000000000"
},
{
"info": "0x0000000400000007",
"name": "sigprocmask",
"offset": "0x000000000061e030",
"type": "R_X86_64_JUMP_SLOT",
"value": "0x0000000000000000"
},
{
"info": "0x0000000500000007",
"name": "raise",
"offset": "0x000000000061e038",
"type": "R_X86_64_JUMP_SLOT",
"value": "0x0000000000000000"
},
{
"info": "0x0000007600000007",
"name": "free",
"offset": "0x000000000061e040",
"type": "R_X86_64_JUMP_SLOT",
"value": "0x0000000000402340"
},
{
"info": "0x0000000600000007",
"name": "localtime",
"offset": "0x000000000061e048",
"type": "R_X86_64_JUMP_SLOT",
"value": "0x0000000000000000"
},
{
"info": "0x0000000700000007",
"name": "__mempcpy_chk",
"offset": "0x000000000061e050",
"type": "R_X86_64_JUMP_SLOT",
"value": "0x0000000000000000"
},
{
"info": "0x0000000800000007",
"name": "abort",
"offset": "0x000000000061e058",
"type": "R_X86_64_JUMP_SLOT",
"value": "0x0000000000000000"
},
{
"info": "0x0000000900000007",
"name": "__errno_location",
"offset": "0x000000000061e060",
"type": "R_X86_64_JUMP_SLOT",
"value": "0x0000000000000000"
},
{
"info": "0x0000000a00000007",
"name": "strncmp",
"offset": "0x000000000061e068",
"type": "R_X86_64_JUMP_SLOT",
"value": "0x0000000000000000"
},
{
"info": "0x0000008200000007",
"name": "localtime_r",
"offset": "0x000000000061e070",
"type": "R_X86_64_JUMP_SLOT",
"value": "0x00000000004023a0"
},
{
"info": "0x0000000c00000007",
"name": "_exit",
"offset": "0x000000000061e078",
"type": "R_X86_64_JUMP_SLOT",
"value": "0x0000000000000000"
},
{
"info": "0x0000000d00000007",
"name": "strcpy",
"offset": "0x000000000061e080",
"type": "R_X86_64_JUMP_SLOT",
"value": "0x0000000000000000"
},
{
"info": "0x0000000e00000007",
"name": "__fpending",
"offset": "0x000000000061e088",
"type": "R_X86_64_JUMP_SLOT",
"value": "0x0000000000000000"
},
{
"info": "0x0000000f00000007",
"name": "isatty",
"offset": "0x000000000061e090",
"type": "R_X86_64_JUMP_SLOT",
"value": "0x0000000000000000"
},
{
"info": "0x0000001000000007",
"name": "sigaction",
"offset": "0x000000000061e098",
"type": "R_X86_64_JUMP_SLOT",
"value": "0x0000000000000000"
},
{
"info": "0x0000001100000007",
"name": "iswcntrl",
"offset": "0x000000000061e0a0",
"type": "R_X86_64_JUMP_SLOT",
"value": "0x0000000000000000"
},
{
"info": "0x0000001200000007",
"name": "wcswidth",
"offset": "0x000000000061e0a8",
"type": "R_X86_64_JUMP_SLOT",
"value": "0x0000000000000000"
},
{
"info": "0x0000001300000007",
"name": "localeconv",
"offset": "0x000000000061e0b0",
"type": "R_X86_64_JUMP_SLOT",
"value": "0x0000000000000000"
},
{
"info": "0x0000001400000007",
"name": "mbstowcs",
"offset": "0x000000000061e0b8",
"type": "R_X86_64_JUMP_SLOT",
"value": "0x0000000000000000"
},
{
"info": "0x0000001500000007",
"name": "readlink",
"offset": "0x000000000061e0c0",
"type": "R_X86_64_JUMP_SLOT",
"value": "0x0000000000000000"
},
{
"info": "0x0000001600000007",
"name": "clock_gettime",
"offset": "0x000000000061e0c8",
"type": "R_X86_64_JUMP_SLOT",
"value": "0x0000000000000000"
},
{
"info": "0x0000001700000007",
"name": "setenv",
"offset": "0x000000000061e0d0",
"type": "R_X86_64_JUMP_SLOT",
"value": "0x0000000000000000"
},
{
"info": "0x0000001800000007",
"name": "textdomain",
"offset": "0x000000000061e0d8",
"type": "R_X86_64_JUMP_SLOT",
"value": "0x0000000000000000"
},
{
"info": "0x0000001900000007",
"name": "fclose",
"offset": "0x000000000061e0e0",
"type": "R_X86_64_JUMP_SLOT",
"value": "0x0000000000000000"
},
{
"info": "0x0000001a00000007",
"name": "opendir",
"offset": "0x000000000061e0e8",
"type": "R_X86_64_JUMP_SLOT",
"value": "0x0000000000000000"
},
{
"info": "0x0000001b00000007",
"name": "getpwuid",
"offset": "0x000000000061e0f0",
"type": "R_X86_64_JUMP_SLOT",
"value": "0x0000000000000000"
},
{
"info": "0x0000001c00000007",
"name": "bindtextdomain",
"offset": "0x000000000061e0f8",
"type": "R_X86_64_JUMP_SLOT",
"value": "0x0000000000000000"
},
{
"info": "0x0000001d00000007",
"name": "stpcpy",
"offset": "0x000000000061e100",
"type": "R_X86_64_JUMP_SLOT",
"value": "0x0000000000000000"
},
{
"info": "0x0000001e00000007",
"name": "dcgettext",
"offset": "0x000000000061e108",
"type": "R_X86_64_JUMP_SLOT",
"value": "0x0000000000000000"
},
{
"info": "0x0000001f00000007",
"name": "__ctype_get_mb_cur_max",
"offset": "0x000000000061e110",
"type": "R_X86_64_JUMP_SLOT",
"value": "0x0000000000000000"
},
{
"info": "0x0000002000000007",
"name": "strlen",
"offset": "0x000000000061e118",
"type": "R_X86_64_JUMP_SLOT",
"value": "0x0000000000000000"
},
{
"info": "0x0000002100000007",
"name": "__lxstat",
"offset": "0x000000000061e120",
"type": "R_X86_64_JUMP_SLOT",
"value": "0x0000000000000000"
},
{
"info": "0x0000002200000007",
"name": "__stack_chk_fail",
"offset": "0x000000000061e128",
"type": "R_X86_64_JUMP_SLOT",
"value": "0x0000000000000000"
},
{
"info": "0x0000002300000007",
"name": "getopt_long",
"offset": "0x000000000061e130",
"type": "R_X86_64_JUMP_SLOT",
"value": "0x0000000000000000"
},
{
"info": "0x0000002400000007",
"name": "mbrtowc",
"offset": "0x000000000061e138",
"type": "R_X86_64_JUMP_SLOT",
"value": "0x0000000000000000"
},
{
"info": "0x0000002500000007",
"name": "strchr",
"offset": "0x000000000061e140",
"type": "R_X86_64_JUMP_SLOT",
"value": "0x0000000000000000"
},
{
"info": "0x0000002600000007",
"name": "getgrgid",
"offset": "0x000000000061e148",
"type": "R_X86_64_JUMP_SLOT",
"value": "0x0000000000000000"
},
{
"info": "0x0000002700000007",
"name": "__overflow",
"offset": "0x000000000061e150",
"type": "R_X86_64_JUMP_SLOT",
"value": "0x0000000000000000"
},
{
"info": "0x0000002800000007",
"name": "strrchr",
"offset": "0x000000000061e158",
"type": "R_X86_64_JUMP_SLOT",
"value": "0x0000000000000000"
},
{
"info": "0x0000002900000007",
"name": "fgetfilecon",
"offset": "0x000000000061e160",
"type": "R_X86_64_JUMP_SLOT",
"value": "0x0000000000000000"
},
{
"info": "0x0000002a00000007",
"name": "gmtime_r",
"offset": "0x000000000061e168",
"type": "R_X86_64_JUMP_SLOT",
"value": "0x0000000000000000"
},
{
"info": "0x0000002b00000007",
"name": "lseek",
"offset": "0x000000000061e170",
"type": "R_X86_64_JUMP_SLOT",
"value": "0x0000000000000000"
},
{
"info": "0x0000002c00000007",
"name": "gettimeofday",
"offset": "0x000000000061e178",
"type": "R_X86_64_JUMP_SLOT",
"value": "0x0000000000000000"
},
{
"info": "0x0000002d00000007",
"name": "__assert_fail",
"offset": "0x000000000061e180",
"type": "R_X86_64_JUMP_SLOT",
"value": "0x0000000000000000"
},
{
"info": "0x0000002e00000007",
"name": "__strtoul_internal",
"offset": "0x000000000061e188",
"type": "R_X86_64_JUMP_SLOT",
"value": "0x0000000000000000"
},
{
"info": "0x0000002f00000007",
"name": "fnmatch",
"offset": "0x000000000061e190",
"type": "R_X86_64_JUMP_SLOT",
"value": "0x0000000000000000"
},
{
"info": "0x0000003000000007",
"name": "memset",
"offset": "0x000000000061e198",
"type": "R_X86_64_JUMP_SLOT",
"value": "0x0000000000000000"
},
{
"info": "0x0000003100000007",
"name": "fscanf",
"offset": "0x000000000061e1a0",
"type": "R_X86_64_JUMP_SLOT",
"value": "0x0000000000000000"
},
{
"info": "0x0000003200000007",
"name": "ioctl",
"offset": "0x000000000061e1a8",
"type": "R_X86_64_JUMP_SLOT",
"value": "0x0000000000000000"
},
{
"info": "0x0000003300000007",
"name": "close",
"offset": "0x000000000061e1b0",
"type": "R_X86_64_JUMP_SLOT",
"value": "0x0000000000000000"
},
{
"info": "0x0000003400000007",
"name": "closedir",
"offset": "0x000000000061e1b8",
"type": "R_X86_64_JUMP_SLOT",
"value": "0x0000000000000000"
},
{
"info": "0x0000003500000007",
"name": "__libc_start_main",
"offset": "0x000000000061e1c0",
"type": "R_X86_64_JUMP_SLOT",
"value": "0x0000000000000000"
},
{
"info": "0x0000003600000007",
"name": "memcmp",
"offset": "0x000000000061e1c8",
"type": "R_X86_64_JUMP_SLOT",
"value": "0x0000000000000000"
},
{
"info": "0x0000003700000007",
"name": "_setjmp",
"offset": "0x000000000061e1d0",
"type": "R_X86_64_JUMP_SLOT",
"value": "0x0000000000000000"
},
{
"info": "0x0000003800000007",
"name": "fputs_unlocked",
"offset": "0x000000000061e1d8",
"type": "R_X86_64_JUMP_SLOT",
"value": "0x0000000000000000"
},
{
"info": "0x0000003900000007",
"name": "calloc",
"offset": "0x000000000061e1e0",
"type": "R_X86_64_JUMP_SLOT",
"value": "0x0000000000000000"
},
{
"info": "0x0000003a00000007",
"name": "lgetfilecon",
"offset": "0x000000000061e1e8",
"type": "R_X86_64_JUMP_SLOT",
"value": "0x0000000000000000"
},
{
"info": "0x0000003b00000007",
"name": "strcmp",
"offset": "0x000000000061e1f0",
"type": "R_X86_64_JUMP_SLOT",
"value": "0x0000000000000000"
},
{
"info": "0x0000003c00000007",
"name": "signal",
"offset": "0x000000000061e1f8",
"type": "R_X86_64_JUMP_SLOT",
"value": "0x0000000000000000"
},
{
"info": "0x0000003d00000007",
"name": "dirfd",
"offset": "0x000000000061e200",
"type": "R_X86_64_JUMP_SLOT",
"value": "0x0000000000000000"
},
{
"info": "0x0000003e00000007",
"name": "getpwnam",
"offset": "0x000000000061e208",
"type": "R_X86_64_JUMP_SLOT",
"value": "0x0000000000000000"
},
{
"info": "0x0000003f00000007",
"name": "__memcpy_chk",
"offset": "0x000000000061e210",
"type": "R_X86_64_JUMP_SLOT",
"value": "0x0000000000000000"
},
{
"info": "0x0000004000000007",
"name": "sigemptyset",
"offset": "0x000000000061e218",
"type": "R_X86_64_JUMP_SLOT",
"value": "0x0000000000000000"
},
{
"info": "0x0000004200000007",
"name": "memcpy",
"offset": "0x000000000061e220",
"type": "R_X86_64_JUMP_SLOT",
"value": "0x0000000000000000"
},
{
"info": "0x0000004300000007",
"name": "getgrnam",
"offset": "0x000000000061e228",
"type": "R_X86_64_JUMP_SLOT",
"value": "0x0000000000000000"
},
{
"info": "0x0000004400000007",
"name": "getfilecon",
"offset": "0x000000000061e230",
"type": "R_X86_64_JUMP_SLOT",
"value": "0x0000000000000000"
},
{
"info": "0x0000004500000007",
"name": "tzset",
"offset": "0x000000000061e238",
"type": "R_X86_64_JUMP_SLOT",
"value": "0x0000000000000000"
},
{
"info": "0x0000004600000007",
"name": "fileno",
"offset": "0x000000000061e240",
"type": "R_X86_64_JUMP_SLOT",
"value": "0x0000000000000000"
},
{
"info": "0x0000004700000007",
"name": "tcgetpgrp",
"offset": "0x000000000061e248",
"type": "R_X86_64_JUMP_SLOT",
"value": "0x0000000000000000"
},
{
"info": "0x0000004800000007",
"name": "__xstat",
"offset": "0x000000000061e250",
"type": "R_X86_64_JUMP_SLOT",
"value": "0x0000000000000000"
},
{
"info": "0x0000004900000007",
"name": "readdir",
"offset": "0x000000000061e258",
"type": "R_X86_64_JUMP_SLOT",
"value": "0x0000000000000000"
},
{
"info": "0x0000004a00000007",
"name": "wcwidth",
"offset": "0x000000000061e260",
"type": "R_X86_64_JUMP_SLOT",
"value": "0x0000000000000000"
},
{
"info": "0x0000008700000007",
"name": "malloc",
"offset": "0x000000000061e268",
"type": "R_X86_64_JUMP_SLOT",
"value": "0x0000000000402790"
},
{
"info": "0x0000004b00000007",
"name": "fflush",
"offset": "0x000000000061e270",
"type": "R_X86_64_JUMP_SLOT",
"value": "0x0000000000000000"
},
{
"info": "0x0000004c00000007",
"name": "nl_langinfo",
"offset": "0x000000000061e278",
"type": "R_X86_64_JUMP_SLOT",
"value": "0x0000000000000000"
},
{
"info": "0x0000004d00000007",
"name": "ungetc",
"offset": "0x000000000061e280",
"type": "R_X86_64_JUMP_SLOT",
"value": "0x0000000000000000"
},
{
"info": "0x0000004e00000007",
"name": "__fxstat",
"offset": "0x000000000061e288",
"type": "R_X86_64_JUMP_SLOT",
"value": "0x0000000000000000"
},
{
"info": "0x0000004f00000007",
"name": "strcoll",
"offset": "0x000000000061e290",
"type": "R_X86_64_JUMP_SLOT",
"value": "0x0000000000000000"
},
{
"info": "0x0000005000000007",
"name": "__freading",
"offset": "0x000000000061e298",
"type": "R_X86_64_JUMP_SLOT",
"value": "0x0000000000000000"
},
{
"info": "0x0000005100000007",
"name": "fwrite_unlocked",
"offset": "0x000000000061e2a0",
"type": "R_X86_64_JUMP_SLOT",
"value": "0x0000000000000000"
},
{
"info": "0x0000005200000007",
"name": "realloc",
"offset": "0x000000000061e2a8",
"type": "R_X86_64_JUMP_SLOT",
"value": "0x0000000000000000"
},
{
"info": "0x0000005300000007",
"name": "stpncpy",
"offset": "0x000000000061e2b0",
"type": "R_X86_64_JUMP_SLOT",
"value": "0x0000000000000000"
},
{
"info": "0x0000005400000007",
"name": "fdopen",
"offset": "0x000000000061e2b8",
"type": "R_X86_64_JUMP_SLOT",
"value": "0x0000000000000000"
},
{
"info": "0x0000005500000007",
"name": "setlocale",
"offset": "0x000000000061e2c0",
"type": "R_X86_64_JUMP_SLOT",
"value": "0x0000000000000000"
},
{
"info": "0x0000005600000007",
"name": "__printf_chk",
"offset": "0x000000000061e2c8",
"type": "R_X86_64_JUMP_SLOT",
"value": "0x0000000000000000"
},
{
"info": "0x0000005700000007",
"name": "timegm",
"offset": "0x000000000061e2d0",
"type": "R_X86_64_JUMP_SLOT",
"value": "0x0000000000000000"
},
{
"info": "0x0000005800000007",
"name": "strftime",
"offset": "0x000000000061e2d8",
"type": "R_X86_64_JUMP_SLOT",
"value": "0x0000000000000000"
},
{
"info": "0x0000005900000007",
"name": "mempcpy",
"offset": "0x000000000061e2e0",
"type": "R_X86_64_JUMP_SLOT",
"value": "0x0000000000000000"
},
{
"info": "0x0000005a00000007",
"name": "memmove",
"offset": "0x000000000061e2e8",
"type": "R_X86_64_JUMP_SLOT",
"value": "0x0000000000000000"
},
{
"info": "0x0000005b00000007",
"name": "error",
"offset": "0x000000000061e2f0",
"type": "R_X86_64_JUMP_SLOT",
"value": "0x0000000000000000"
},
{
"info": "0x0000005c00000007",
"name": "open",
"offset": "0x000000000061e2f8",
"type": "R_X86_64_JUMP_SLOT",
"value": "0x0000000000000000"
},
{
"info": "0x0000005d00000007",
"name": "fseeko",
"offset": "0x000000000061e300",
"type": "R_X86_64_JUMP_SLOT",
"value": "0x0000000000000000"
},
{
"info": "0x0000005f00000007",
"name": "unsetenv",
"offset": "0x000000000061e308",
"type": "R_X86_64_JUMP_SLOT",
"value": "0x0000000000000000"
},
{
"info": "0x0000006000000007",
"name": "strtoul",
"offset": "0x000000000061e310",
"type": "R_X86_64_JUMP_SLOT",
"value": "0x0000000000000000"
},
{
"info": "0x0000006100000007",
"name": "__cxa_atexit",
"offset": "0x000000000061e318",
"type": "R_X86_64_JUMP_SLOT",
"value": "0x0000000000000000"
},
{
"info": "0x0000006200000007",
"name": "wcstombs",
"offset": "0x000000000061e320",
"type": "R_X86_64_JUMP_SLOT",
"value": "0x0000000000000000"
},
{
"info": "0x0000006300000007",
"name": "getxattr",
"offset": "0x000000000061e328",
"type": "R_X86_64_JUMP_SLOT",
"value": "0x0000000000000000"
},
{
"info": "0x0000006400000007",
"name": "freecon",
"offset": "0x000000000061e330",
"type": "R_X86_64_JUMP_SLOT",
"value": "0x0000000000000000"
},
{
"info": "0x0000006500000007",
"name": "sigismember",
"offset": "0x000000000061e338",
"type": "R_X86_64_JUMP_SLOT",
"value": "0x0000000000000000"
},
{
"info": "0x0000006600000007",
"name": "exit",
"offset": "0x000000000061e340",
"type": "R_X86_64_JUMP_SLOT",
"value": "0x0000000000000000"
},
{
"info": "0x0000006700000007",
"name": "fwrite",
"offset": "0x000000000061e348",
"type": "R_X86_64_JUMP_SLOT",
"value": "0x0000000000000000"
},
{
"info": "0x0000006800000007",
"name": "__fprintf_chk",
"offset": "0x000000000061e350",
"type": "R_X86_64_JUMP_SLOT",
"value": "0x0000000000000000"
},
{
"info": "0x0000006a00000007",
"name": "fflush_unlocked",
"offset": "0x000000000061e358",
"type": "R_X86_64_JUMP_SLOT",
"value": "0x0000000000000000"
},
{
"info": "0x0000006b00000007",
"name": "mbsinit",
"offset": "0x000000000061e360",
"type": "R_X86_64_JUMP_SLOT",
"value": "0x0000000000000000"
},
{
"info": "0x0000006c00000007",
"name": "iswprint",
"offset": "0x000000000061e368",
"type": "R_X86_64_JUMP_SLOT",
"value": "0x0000000000000000"
},
{
"info": "0x0000006d00000007",
"name": "sigaddset",
"offset": "0x000000000061e370",
"type": "R_X86_64_JUMP_SLOT",
"value": "0x0000000000000000"
},
{
"info": "0x0000006e00000007",
"name": "strstr",
"offset": "0x000000000061e378",
"type": "R_X86_64_JUMP_SLOT",
"value": "0x0000000000000000"
},
{
"info": "0x0000006f00000007",
"name": "__ctype_tolower_loc",
"offset": "0x000000000061e380",
"type": "R_X86_64_JUMP_SLOT",
"value": "0x0000000000000000"
},
{
"info": "0x0000007000000007",
"name": "__ctype_b_loc",
"offset": "0x000000000061e388",
"type": "R_X86_64_JUMP_SLOT",
"value": "0x0000000000000000"
},
{
"info": "0x0000007100000007",
"name": "__sprintf_chk",
"offset": "0x000000000061e390",
"type": "R_X86_64_JUMP_SLOT",
"value": "0x0000000000000000"
},
],
},
]
|
cuckoobox/cuckoo
|
tests/test_elf.py
|
Python
|
mit
| 53,018
|
import copy
import json
import os
import random
import re
import sys
import time
from bs4 import BeautifulSoup
import html2text
from jinja2 import Template
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
import requests
from .settings import TEMPLATES
class KataExistsError(Exception):
pass
class Client:
def __init__(self, args):
"""
a codewars client.
args: fetch arguents
"""
self.driver = webdriver.PhantomJS()
self.driver.set_window_size(1120, 550)
self.args = args
try:
with(open('config.json', 'r')) as json_reader:
self.config = json.load(json_reader)
except FileNotFoundError:
raise FileNotFoundError("config file not found. Please run `kata-scrape init` first.")
def make_kata(self):
"""
create a kata based on args
post:
- description is scraped
- code is scraped
- tests are scraped
"""
self._pick_lang()
print('Finding a kata for {}...'.format(self.language), end='')
self._get_slug()
self.url = 'http://www.codewars.com/kata/{slug}/train/{lang}'.format(
slug=self.slug, lang=self.language
)
try:
os.mkdir(self.slug)
except FileExistsError:
raise KataExistsError("You've already scraped the next {} kata.".format(self.language))
print(' -> {}'.format(self.slug))
self.kata_dir = os.path.join(os.getcwd(), self.slug)
self.driver.get(self.url)
try:
WebDriverWait(self.driver, 10).until_not(
EC.text_to_be_present_in_element((By.ID, 'description'), 'Loading description...')
)
# OTHER WAITS GO HERE...
# I don't know how to wait until the code and tests have been fetched.
# instead I will just initiate a while loop that breaks when a value is found
# both of these boxes MUST have a value
self._scrape_description()
self._scrape_code()
self._write_files()
finally:
self.driver.quit()
def _pick_lang(self):
"""
pick the language to scrape from
"""
self.language = self.args['lang']
if self.language is None:
try:
self.language = random.choice(self.config['languages'])
except (KeyError, IndexError) as e:
raise e("No language given and none specified in config.json")
language_mapping = {"python": "py", "ruby": "rb", "javascript": "js"}
self.language_ext = language_mapping[self.language]
# ~~~ determine the next kata ~~~
def _get_slug(self):
"""
language(str): langauge to pull from
determines a random kata slug
"""
resp_json = self._train_next()
self.name = resp_json['name']
self.slug = resp_json['slug']
def _train_next(self):
"""
post request to train in the next challenge
"""
url = "https://www.codewars.com/api/v1/code-challenges/{}/train".format(self.language)
data = {'strategy': 'random'}
headers = {'Authorization': self.config['api_key']}
resp = requests.post(url, data=data, headers=headers)
return json.loads(resp.text)
# ~~~ scrape content and write to file ~~~
def _scrape_description(self):
"""
scrape the kata description
description is saved to object
"""
print('scraping description', end='')
t0 = time.time()
while True:
try:
soup = BeautifulSoup(self.driver.page_source, 'html.parser')
descrip = soup.select('#description')[0]
break
except IndexError:
if time.time() - t0 < 10:
time.sleep(.5)
continue
else:
# We waited for 10 seconds and we can't find the description
# in the DOM. timeout!
raise RuntimeError('Kata could not be scraped. Please try again later')
self.description = ''.join(
[
html2text.html2text(
str(paragraph)
) for paragraph in descrip.findAll('p')
]
)
print(' -> done')
def _grab_codemirror(self, _id):
"""
grab content from the codemirror div
_id: the id of the div to grab from
"""
code_box = self.driver.find_elements_by_css_selector('#{} .CodeMirror'.format(_id))[0]
return self.driver.execute_script('return arguments[0].CodeMirror.getValue()', code_box)
def _scrape_code(self):
"""
scrape the starter code and tests
values are saved to object
"""
for _id in ['code', 'fixture']:
while True:
print('waiting for {}'.format(_id), end='')
code = self._grab_codemirror(_id)
if code: # move to next element if something was found, otherwise try again.
print(' -> found')
setattr(self, _id, code)
break
def _write_files(self):
"""
write files to disk based on scraped data
"""
self._write_description()
self._write_code()
def _write_description(self):
"""
write the description file
"""
with(open('{slug}/description.md'.format(slug=self.slug), 'w+')) as writer:
descrip_template = open(os.path.join(TEMPLATES, 'description.md.j2'), 'r')
template = Template(
descrip_template.read()
)
descrip_template.close()
params = {
'name': self.name,
'url': self.url,
'description': self.description
}
output = template.render(**params)
writer.write(output)
def _write_code(self):
"""
write code and tests
"""
file_mappings = {'code': 'main', 'fixture': 'tests'}
for k, v in file_mappings.items():
with open('{slug}/{v}.{ext}'.format(slug=self.slug, v=v, ext=self.language_ext), 'w+') as writer:
template_h = open(os.path.join(TEMPLATES, k, '{lang}.j2'.format(lang=self.language)),'r')
template = Template(
template_h.read()
)
template_h.close()
# special exception for javascript When the function is
# scraped we then need to identify its name so we can
# reference it in the tests
if k == 'fixture' and self.language == 'javascript':
p = re.compile('function\s+(.*?)\(')
m = p.search(self.code)
try:
func_name = m.group(1)
except AttributeError:
# maybe the format is like this: var someFunc = function(args)
p2 = re.compile('(\w+)\s*=\s*function')
m2 = p2.search(self.code)
func_name = m2.group(1)
output = template.render({
'code': getattr(self, k),
'func_name': func_name
})
else:
output = template.render({'code': getattr(self, k)})
writer.write(output)
|
jstoebel/kata_scrape
|
kata_scrape/client.py
|
Python
|
mit
| 7,775
|
from OpenGLCffi.GLES2 import params
@params(api='gles2', prms=['face', 'mode'])
def glPolygonModeNV(face, mode):
pass
|
cydenix/OpenGLCffi
|
OpenGLCffi/GLES2/EXT/NV/polygon_mode.py
|
Python
|
mit
| 121
|
# global
import logging
import pandas as pd
import numpy as np
# local
import utils
from core.features import Features
from core.DocVocabulary import DocVocabulary
from core.TermVocabulary import TermVocabulary
from core.msg import TwitterMessageParser
def prepare_problem(vectorizer, task_type, train_table, test_table,
etalon_table):
"""
Main function of vectorization for neural network
"""
message_settings, features_settings = utils.load_embeddings()
features = Features(
TwitterMessageParser(message_settings, task_type),
features_settings)
term_vocabulary = TermVocabulary()
doc_vocabulary = DocVocabulary()
train_problem = utils.create_problem(task_type, 'train', train_table,
vectorizer, features, term_vocabulary,
doc_vocabulary, message_settings)
test_problem = utils.create_problem(task_type, 'test', test_table,
vectorizer, features, term_vocabulary,
doc_vocabulary, message_settings)
return (train_problem, test_problem)
def fill_test_results(y_test, task_type, result_table):
"""
y_test : np.ndarray (None, 3)
answers
task_type : str
'bank' or 'tcc'
result_table : str
output table which should be filled with the predicted result
"""
# TODO: remove duplicated code at predict.py
logging.info("Filling answers in {} ...".format(result_table))
df = pd.read_csv(result_table, sep=',')
sentiment_columns = utils.get_score_columns(task_type)
for msg_index, row_index in enumerate(df.index):
label = np.argmax(y_test[msg_index]) - 1
for column in sentiment_columns:
if not df[column].isnull()[row_index]:
df.loc[row_index, column] = label
# Rewriting table with the filled results
df.to_csv(result_table, sep=',')
del df
def prepare_result_table(test_table, result_table):
logging.info('Create a file for classifier results: {}'.format(
result_table))
result_df = pd.read_csv(test_table, sep=',')
result_df.to_csv(result_table, sep=',')
|
nicolay-r/tone-classifier
|
models/utils_keras.py
|
Python
|
mit
| 2,274
|
import argparse
from collections import OrderedDict
import os
import re
import sys
import types
if sys.version_info >= (3, 0):
from io import StringIO
else:
from StringIO import StringIO
__version__ = "0.9.4"
ACTION_TYPES_THAT_DONT_NEED_A_VALUE = {argparse._StoreTrueAction,
argparse._StoreFalseAction, argparse._CountAction,
argparse._StoreConstAction, argparse._AppendConstAction}
# global ArgumentParser instances
_parsers = {}
def initArgumentParser(name=None, **kwargs):
"""Creates a global ArgumentParser instance with the given name,
passing any args other than "name" to the ArgumentParser constructor.
This instance can then be retrieved using getArgumentParser(..)
"""
if name is None:
name = "default"
if name in _parsers:
raise ValueError(("kwargs besides 'name' can only be passed in the"
" first time. '%s' ArgumentParser already exists: %s") % (
name, _parsers[name]))
kwargs.setdefault('formatter_class', argparse.ArgumentDefaultsHelpFormatter)
kwargs.setdefault('conflict_handler', 'resolve')
_parsers[name] = ArgumentParser(**kwargs)
def getArgumentParser(name=None, **kwargs):
"""Returns the global ArgumentParser instance with the given name. The 1st
time this function is called, a new ArgumentParser instance will be created
for the given name, and any args other than "name" will be passed on to the
ArgumentParser constructor.
"""
if name is None:
name = "default"
if len(kwargs) > 0 or name not in _parsers:
initArgumentParser(name, **kwargs)
return _parsers[name]
class ArgumentDefaultsRawHelpFormatter(
argparse.ArgumentDefaultsHelpFormatter,
argparse.RawTextHelpFormatter,
argparse.RawDescriptionHelpFormatter):
"""HelpFormatter that adds default values AND doesn't do line-wrapping"""
pass
# used while parsing args to keep track of where they came from
_COMMAND_LINE_SOURCE_KEY = "command_line"
_ENV_VAR_SOURCE_KEY = "environment_variables"
_CONFIG_FILE_SOURCE_KEY = "config_file"
_DEFAULTS_SOURCE_KEY = "defaults"
class ArgumentParser(argparse.ArgumentParser):
"""Drop-in replacement for argparse.ArgumentParser that adds support for
environment variables and .ini or .yaml-style config files.
"""
def __init__(self,
prog=None,
usage=None,
description=None,
epilog=None,
version=None,
parents=[],
formatter_class=argparse.HelpFormatter,
prefix_chars='-',
fromfile_prefix_chars=None,
argument_default=None,
conflict_handler='error',
add_help=True,
add_config_file_help=True,
add_env_var_help=True,
auto_env_var_prefix=None,
config_file_parser=None,
default_config_files=[],
ignore_unknown_config_file_keys=False,
allow_unknown_config_file_keys=False, # deprecated
args_for_setting_config_path=[],
config_arg_is_required=False,
config_arg_help_message="config file path",
args_for_writing_out_config_file=[],
write_out_config_file_arg_help_message="takes the current command line "
"args and writes them out to a config file at the given path, then "
"exits"
):
"""Supports all the same args as the argparse.ArgumentParser
constructor, as well as the following additional args.
Additional Args:
add_config_file_help: Whether to add a description of config file
syntax to the help message.
add_env_var_help: Whether to add something to the help message for
args that can be set through environment variables.
auto_env_var_prefix: If set to a string instead of None, all config-
file-settable options will become also settable via environment
variables whose names are this prefix followed by the config
file key, all in upper case. (eg. setting this to "foo_" will
allow an arg like "--arg1" to also be set via env. var FOO_ARG1)
config_file_parser: An instance of a parser to be used for parsing
config files. Default: ConfigFileParser()
default_config_files: When specified, this list of config files will
be parsed in order, with the values from each config file
taking precedence over pervious ones. This allows an application
to look for config files in multiple standard locations such as
the install directory, home directory, and current directory:
["<install dir>/app_config.ini",
"~/.my_app_config.ini",
"./app_config.txt"]
ignore_unknown_config_file_keys: If true, settings that are found
in a config file but don't correspond to any defined
configargparse args will be ignored. If false, they will be
processed and appended to the commandline like other args, and
can be retrieved using parse_known_args() instead of parse_args()
allow_unknown_config_file_keys:
@deprecated
Use ignore_unknown_config_file_keys instead.
If true, settings that are found in a config file but don't
correspond to any defined configargparse args, will still be
processed and appended to the command line (eg. for
parsing with parse_known_args()). If false, they will be ignored.
args_for_setting_config_path: A list of one or more command line
args to be used for specifying the config file path
(eg. ["-c", "--config-file"]). Default: []
config_arg_is_required: When args_for_setting_config_path is set,
set this to True to always require users to provide a config path.
config_arg_help_message: the help message to use for the
args listed in args_for_setting_config_path.
args_for_writing_out_config_file: A list of one or more command line
args to use for specifying a config file output path. If
provided, these args cause configargparse to write out a config
file with settings based on the other provided commandline args,
environment variants and defaults, and then to exit.
(eg. ["-w", "--write-out-config-file"]). Default: []
write_out_config_file_arg_help_message: The help message to use for
the args in args_for_writing_out_config_file.
"""
self._add_config_file_help = add_config_file_help
self._add_env_var_help = add_env_var_help
self._auto_env_var_prefix = auto_env_var_prefix
# extract kwargs that can be passed to the super constructor
kwargs_for_super = {k: v for k, v in locals().items() if k in [
"prog", "usage", "description", "epilog", "version", "parents",
"formatter_class", "prefix_chars", "fromfile_prefix_chars",
"argument_default", "conflict_handler", "add_help" ]}
if sys.version_info >= (3, 3) and "version" in kwargs_for_super:
del kwargs_for_super["version"] # version arg deprecated in v3.3
argparse.ArgumentParser.__init__(self, **kwargs_for_super)
# parse the additionial args
if config_file_parser is None:
self._config_file_parser = ConfigFileParser()
else:
self._config_file_parser = config_file_parser
self._default_config_files = default_config_files
self._ignore_unknown_config_file_keys = ignore_unknown_config_file_keys \
or allow_unknown_config_file_keys
if args_for_setting_config_path:
self.add_argument(*args_for_setting_config_path, dest="config_file",
required=config_arg_is_required, help=config_arg_help_message,
is_config_file_arg=True)
if args_for_writing_out_config_file:
self.add_argument(*args_for_writing_out_config_file,
dest="write_out_config_file_to_this_path",
metavar="CONFIG_OUTPUT_PATH",
help=write_out_config_file_arg_help_message,
is_write_out_config_file_arg=True)
def parse_args(self, args = None, namespace = None,
config_file_contents = None, env_vars = os.environ):
"""Supports all the same args as the ArgumentParser.parse_args(..),
as well as the following additional args.
Additional Args:
args: a list of args as in argparse, or a string (eg. "-x -y bla")
config_file_contents: String. Used for testing.
env_vars: Dictionary. Used for testing.
"""
args, argv = self.parse_known_args(args = args,
namespace = namespace,
config_file_contents = config_file_contents,
env_vars = env_vars)
if argv:
self.error('unrecognized arguments: %s' % ' '.join(argv))
return args
def parse_known_args(self, args = None, namespace = None,
config_file_contents = None, env_vars = os.environ):
"""Supports all the same args as the ArgumentParser.parse_args(..),
as well as the following additional args.
Additional Args:
args: a list of args as in argparse, or a string (eg. "-x -y bla")
config_file_contents: String. Used for testing.
env_vars: Dictionary. Used for testing.
"""
if args is None:
args = sys.argv[1:]
elif type(args) == str:
args = args.split()
else:
args = list(args)
for a in self._actions:
a.is_positional_arg = not a.option_strings
# maps string describing the source (eg. env var) to a settings dict
# to keep track of where values came from (used by print_values())
self._source_to_settings = OrderedDict()
if args:
a_v_pair = (None, list(args)) # copy args list to isolate changes
self._source_to_settings[_COMMAND_LINE_SOURCE_KEY] = {'': a_v_pair}
# handle auto_env_var_prefix __init__ arg by setting a.env_var as needed
if self._auto_env_var_prefix is not None:
for a in self._actions:
config_file_keys = self.get_possible_config_keys(a)
if config_file_keys and not (a.env_var or a.is_positional_arg
or a.is_config_file_arg or a.is_write_out_config_file_arg):
stripped_config_file_key = config_file_keys[0].strip(
self.prefix_chars)
a.env_var = (self._auto_env_var_prefix +
stripped_config_file_key).upper()
# add env var settings to the commandline that aren't there already
env_var_args = []
actions_with_env_var_values = [a for a in self._actions
if not a.is_positional_arg and a.env_var and a.env_var in env_vars
and not already_on_command_line(args, a.option_strings)]
for action in actions_with_env_var_values:
key = action.env_var
value = env_vars[key]
env_var_args += self.convert_setting_to_command_line_arg(
action, key, value)
args += env_var_args
if env_var_args:
self._source_to_settings[_ENV_VAR_SOURCE_KEY] = OrderedDict(
[(a.env_var, (a, env_vars[a.env_var]))
for a in actions_with_env_var_values])
# prepare for reading config file(s)
known_config_keys = {config_key: action for action in self._actions
for config_key in self.get_possible_config_keys(action)}
# open the config file(s)
if config_file_contents:
stream = StringIO(config_file_contents)
stream.name = "method arg"
config_streams = [stream]
else:
config_streams = self._open_config_files(args)
# parse each config file
for stream in config_streams[::-1]:
try:
config_settings = self._config_file_parser.parse(stream)
except ConfigFileParserException as e:
self.error(e)
finally:
if hasattr(stream, "close"):
stream.close()
# add each config setting to the commandline unless it's there already
config_args = []
for key, value in config_settings.items():
if key in known_config_keys:
action = known_config_keys[key]
discard_this_key = already_on_command_line(
args, action.option_strings)
else:
action = None
discard_this_key = self._ignore_unknown_config_file_keys or \
already_on_command_line(
args,
self.get_command_line_key_for_unknown_config_file_setting(key))
if not discard_this_key:
config_args += self.convert_setting_to_command_line_arg(
action, key, value)
source_key = "%s|%s" %(_CONFIG_FILE_SOURCE_KEY, stream.name)
if source_key not in self._source_to_settings:
self._source_to_settings[source_key] = OrderedDict()
self._source_to_settings[source_key][key] = (action, value)
args += config_args
# save default settings for use by print_values()
default_settings = OrderedDict()
for action in self._actions:
cares_about_default_value = (not action.is_positional_arg or
action.nargs in [OPTIONAL, ZERO_OR_MORE])
if (already_on_command_line(args, action.option_strings) or
not cares_about_default_value or
action.default is None or
action.default == SUPPRESS or
type(action) in ACTION_TYPES_THAT_DONT_NEED_A_VALUE):
continue
else:
if action.option_strings:
key = action.option_strings[-1]
else:
key = action.dest
default_settings[key] = (action, str(action.default))
if default_settings:
self._source_to_settings[_DEFAULTS_SOURCE_KEY] = default_settings
# parse all args (including commandline, config file, and env var)
namespace, unknown_args = argparse.ArgumentParser.parse_known_args(
self, args=args, namespace=namespace)
# handle any args that have is_write_out_config_file_arg set to true
user_write_out_config_file_arg_actions = [a for a in self._actions
if getattr(a, "is_write_out_config_file_arg", False)]
if user_write_out_config_file_arg_actions:
output_file_paths = []
for action in user_write_out_config_file_arg_actions:
# check if the user specified this arg on the commandline
output_file_path = getattr(namespace, action.dest, None)
if output_file_path:
# validate the output file path
try:
with open(output_file_path, "w") as output_file:
output_file_paths.append(output_file_path)
except IOError as e:
raise ValueError("Couldn't open %s for writing: %s" % (
output_file_path, e))
if output_file_paths:
# generate the config file contents
config_items = self.get_items_for_config_file_output(
self._source_to_settings, namespace)
contents = self._config_file_parser.serialize(config_items)
for output_file_path in output_file_paths:
with open(output_file_path, "w") as output_file:
output_file.write(contents)
if len(output_file_paths) == 1:
output_file_paths = output_file_paths[0]
self.exit(0, "Wrote config file to " + str(output_file_paths))
return namespace, unknown_args
def get_command_line_key_for_unknown_config_file_setting(self, key):
"""Compute a commandline arg key to be used for a config file setting
that doesn't correspond to any defined configargparse arg (and so
doesn't have a user-specified commandline arg key).
Args:
key: The config file key that was being set.
"""
key_without_prefix_chars = key.strip(self.prefix_chars)
command_line_key = self.prefix_chars[0]*2 + key_without_prefix_chars
return command_line_key
def get_items_for_config_file_output(self, source_to_settings,
parsed_namespace):
"""Does the inverse of config parsing by taking parsed values and
converting them back to a string representing config file contents.
Args:
source_to_settings: the dictionary created within parse_known_args()
parsed_namespace: namespace object created within parse_known_args()
Returns:
an OrderedDict with the items to be written to the config file
"""
config_file_items = OrderedDict()
for source, settings in source_to_settings.items():
if source == _COMMAND_LINE_SOURCE_KEY:
_, existing_command_line_args = settings['']
for action in self._actions:
config_file_keys = self.get_possible_config_keys(action)
if config_file_keys and not action.is_positional_arg and \
already_on_command_line(existing_command_line_args,
action.option_strings):
value = getattr(parsed_namespace, action.dest, None)
if value is not None:
if type(value) is bool:
value = str(value).lower()
elif type(value) is list:
value = "["+", ".join(map(str, value))+"]"
config_file_items[config_file_keys[0]] = value
elif source == _ENV_VAR_SOURCE_KEY:
for key, (action, value) in settings.items():
config_file_keys = self.get_possible_config_keys(action)
if config_file_keys:
value = getattr(parsed_namespace, action.dest, None)
if value is not None:
config_file_items[config_file_keys[0]] = value
elif source.startswith(_CONFIG_FILE_SOURCE_KEY):
for key, (action, value) in settings.items():
config_file_items[key] = value
elif source == _DEFAULTS_SOURCE_KEY:
for key, (action, value) in settings.items():
config_file_keys = self.get_possible_config_keys(action)
if config_file_keys:
value = getattr(parsed_namespace, action.dest, None)
if value is not None:
config_file_items[config_file_keys[0]] = value
return config_file_items
def convert_setting_to_command_line_arg(self, action, key, value):
"""Converts a config file or env var key/value to a list of
commandline args to append to the commandline.
Args:
action: The action corresponding to this setting, or None if this
is a config file setting that doesn't correspond to any
defined configargparse arg.
key: The config file key or env var name
value: The raw value string from the config file or env var
"""
if type(value) != str:
raise ValueError("type(value) != str: %s" % str(value))
args = []
if action is None:
command_line_key = \
self.get_command_line_key_for_unknown_config_file_setting(key)
else:
command_line_key = action.option_strings[-1]
if value.lower() == "true":
if action is not None:
if type(action) not in ACTION_TYPES_THAT_DONT_NEED_A_VALUE:
self.error("%s set to 'True' rather than a value" % key)
args.append( command_line_key )
elif value.startswith("[") and value.endswith("]"):
if action is not None:
if type(action) != argparse._AppendAction:
self.error(("%s can't be set to a list '%s' unless its "
"action type is changed to 'append'") % (key, value))
for list_elem in value[1:-1].split(","):
args.append( command_line_key )
args.append( list_elem.strip() )
else:
if action is not None:
if type(action) in ACTION_TYPES_THAT_DONT_NEED_A_VALUE:
self.error("%s is a flag but is being set to '%s'" % (
key, value))
args.append( command_line_key )
args.append( value )
return args
def get_possible_config_keys(self, action):
"""This method decides which actions can be set in a config file and
what their keys will be. It returns a list of 0 or more config keys that
can be used to set the given action's value in a config file.
"""
keys = []
for arg in action.option_strings:
if any([arg.startswith(2*c) for c in self.prefix_chars]):
keys += [arg[2:], arg] # eg. for '--bla' return ['bla', '--bla']
return keys
def _open_config_files(self, command_line_args):
"""Tries to parse config file path(s) from within command_line_args.
Returns a list of opened config files, including files specified on the
commandline as well as any default_config_files specified in the
constructor that are present on disk.
Args:
command_line_args: List of all args (already split on spaces)
"""
# open any default config files
config_files = [open(f) for f in map(
os.path.expanduser, self._default_config_files) if os.path.isfile(f)]
if not command_line_args:
return config_files
# list actions with is_config_file_arg=True. Its possible there is more
# than one such arg.
user_config_file_arg_actions = [
a for a in self._actions if getattr(a, "is_config_file_arg", False)]
if not user_config_file_arg_actions:
return config_files
for action in user_config_file_arg_actions:
# try to parse out the config file path by using a clean new
# ArgumentParser that only knows this one arg/action.
arg_parser = argparse.ArgumentParser(
prefix_chars=self.prefix_chars,
add_help=False)
arg_parser._add_action(action)
# make parser not exit on error by replacing its error method.
# Otherwise it sys.exits(..) if, for example, config file
# is_required=True and user doesn't provide it.
def error_method(self, message):
pass
arg_parser.error = types.MethodType(error_method, arg_parser)
# check whether the user provided a value
parsed_arg = arg_parser.parse_known_args(args=command_line_args)
if not parsed_arg:
continue
namespace, _ = parsed_arg
user_config_file = getattr(namespace, action.dest, None)
if not user_config_file:
continue
# validate the user-provided config file path
user_config_file = os.path.expanduser(user_config_file)
if not os.path.isfile(user_config_file):
self.error('File not found: %s' % user_config_file)
config_files += [open(user_config_file)]
return config_files
def format_values(self):
"""Returns a string with all args and settings and where they came from
(eg. commandline, config file, enviroment variable or default)
"""
source_key_to_display_value_map = {
_COMMAND_LINE_SOURCE_KEY: "Command Line Args: ",
_ENV_VAR_SOURCE_KEY: "Environment Variables:\n",
_CONFIG_FILE_SOURCE_KEY: "Config File (%s):\n",
_DEFAULTS_SOURCE_KEY: "Defaults:\n"
}
r = StringIO()
for source, settings in self._source_to_settings.items():
source = source.split("|")
source = source_key_to_display_value_map[source[0]] % tuple(source[1:])
r.write(source)
for key, (action, value) in settings.items():
if key:
r.write(" %-19s%s\n" % (key+":", value))
else:
if type(value) is str:
r.write(" %s\n" % value)
elif type(value) is list:
r.write(" %s\n" % ' '.join(value))
return r.getvalue()
def print_values(self, file = sys.stdout):
"""Prints the format_values() string (to sys.stdout or another file)."""
file.write(self.format_values())
def format_help(self):
msg = ""
added_config_file_help = False
added_env_var_help = False
if self._add_config_file_help:
default_config_files = self._default_config_files
cc = 2*self.prefix_chars[0] # eg. --
config_settable_args = [(arg, a) for a in self._actions for arg in
a.option_strings if self.get_possible_config_keys(a) and not
(a.dest == "help" or a.is_config_file_arg or
a.is_write_out_config_file_arg)]
config_path_actions = [a for a in
self._actions if getattr(a, "is_config_file_arg", False)]
if config_settable_args and (default_config_files or
config_path_actions):
self._add_config_file_help = False # prevent duplication
added_config_file_help = True
msg += ("Args that start with '%s' (eg. %s) can also be set in "
"a config file") % (cc, config_settable_args[0][0])
config_arg_string = " or ".join(a.option_strings[0]
for a in config_path_actions if a.option_strings)
if config_arg_string:
config_arg_string = "specified via " + config_arg_string
if default_config_files or config_arg_string:
msg += " (%s)." % " or ".join(default_config_files +
[config_arg_string])
msg += " " + self._config_file_parser.get_syntax_description()
if self._add_env_var_help:
env_var_actions = [(a.env_var, a) for a in self._actions
if getattr(a, "env_var", None)]
for env_var, a in env_var_actions:
env_var_help_string = " [env var: %s]" % env_var
if not a.help:
a.help = ""
if env_var_help_string not in a.help:
a.help += env_var_help_string
added_env_var_help = True
self._add_env_var_help = False # prevent duplication
if added_env_var_help or added_config_file_help:
value_sources = ["defaults"]
if added_config_file_help:
value_sources = ["config file values"] + value_sources
if added_env_var_help:
value_sources = ["environment variables"] + value_sources
msg += (" If an arg is specified in more than one place, then "
"commandline values override %s.") % (
" which override ".join(value_sources))
if msg:
self.description = (self.description or "") + " " + msg
return argparse.ArgumentParser.format_help(self)
class ConfigFileParser(object):
def parse(self, stream):
"""Parses a config file and return a dictionary of settings"""
settings = OrderedDict()
for i, line in enumerate(stream):
line = line.strip()
if not line or line[0] in ["#", ";", "["] or line.startswith("---"):
continue
white_space = "\\s*"
key = "(?P<key>[^:=;#\s]+?)"
value1 = white_space+"[:=]"+white_space+"(?P<value>[^;#]+?)"
value2 = white_space+"[\s]"+white_space+"(?P<value>[^;#\s]+?)"
comment = white_space+"(?P<comment>\\s[;#].*)?"
key_only_match = re.match("^" + key + comment + "$", line)
if key_only_match:
key = key_only_match.group("key")
settings[key] = "true"
continue
key_value_match = re.match("^"+key+value1+comment+"$", line) or \
re.match("^"+key+value2+comment+"$", line)
if key_value_match:
key = key_value_match.group("key")
value = key_value_match.group("value")
settings[key] = value
continue
raise ConfigFileParserException("Unexpected line %s in %s: %s" % \
(i, stream.name, line))
return settings
def serialize(self, items):
"""Does the inverse of config parsing by taking parsed values and
converting them back to a string representing config file contents.
Args:
items: an OrderedDict with items to be written to the config file
Returns:
contents of config file as a string
"""
r = StringIO()
for key, value in items.items():
r.write("%s = %s\n" % (key, value))
return r.getvalue()
def get_syntax_description(self):
msg = ("The recognized syntax for setting (key, value) pairs is based "
"on the INI and YAML formats (e.g. key=value or foo=TRUE). "
"For full documentation of the differences from the standards "
"please refer to the ConfigArgParse documentation.")
return msg
class ConfigFileParserException(Exception):
"""Raised when config file parsing failed.
"""
pass
def add_argument(self, *args, **kwargs):
"""
This method supports the same args as ArgumentParser.add_argument(..)
as well as the additional args below.
Additional Args:
env_var: If set, the value of this environment variable will override
any config file or default values for this arg (but can itself
be overriden on the commandline). Also, if auto_env_var_prefix is
set in the constructor, this env var name will be used instead of
the automatic name.
is_config_file_arg: If True, this arg is treated as a config file path
This provides an alternative way to specify config files in place of
the ArgumentParser(fromfile_prefix_chars=..) mechanism.
Default: False
is_write_out_config_file_arg: If True, this arg will be treated as a
config file path, and, when it is specified, will cause
configargparse to write all current commandline args to this file
as config options and then exit.
Default: False
"""
env_var = kwargs.pop("env_var", None)
is_config_file_arg = kwargs.pop(
"is_config_file_arg", None) or kwargs.pop(
"is_config_file", None) # for backward compat.
is_write_out_config_file_arg = kwargs.pop(
"is_write_out_config_file_arg", None)
action = self.original_add_argument_method(*args, **kwargs)
action.is_positional_arg = not action.option_strings
action.env_var = env_var
action.is_config_file_arg = is_config_file_arg
action.is_write_out_config_file_arg = is_write_out_config_file_arg
if action.is_positional_arg and env_var:
raise ValueError("env_var can't be set for a positional arg.")
if action.is_config_file_arg and type(action) != argparse._StoreAction:
raise ValueError("arg with is_config_file_arg=True must have "
"action='store'")
if action.is_write_out_config_file_arg:
error_prefix = "arg with is_write_out_config_file_arg=True "
if type(action) != argparse._StoreAction:
raise ValueError(error_prefix + "must have action='store'")
if is_config_file_arg:
raise ValueError(error_prefix + "can't also have "
"is_config_file_arg=True")
return action
def already_on_command_line(existing_args, potential_command_line_args):
"""Utility method for checking if any of the existing_args is
already present in existing_args
"""
return any(potential_arg in existing_args
for potential_arg in potential_command_line_args)
# wrap ArgumentParser's add_argument(..) method with the one above
argparse._ActionsContainer.original_add_argument_method = argparse._ActionsContainer.add_argument
argparse._ActionsContainer.add_argument = add_argument
# add all public classes and constants from argparse module's namespace to this
# module's namespace so that the 2 modules are truly interchangeable
HelpFormatter = argparse.HelpFormatter
RawDescriptionHelpFormatter = argparse.RawDescriptionHelpFormatter
RawTextHelpFormatter = argparse.RawTextHelpFormatter
ArgumentDefaultsHelpFormatter = argparse.ArgumentDefaultsHelpFormatter
ArgumentError = argparse.ArgumentError
ArgumentTypeError = argparse.ArgumentTypeError
Action = argparse.Action
FileType = argparse.FileType
Namespace = argparse.Namespace
ONE_OR_MORE = argparse.ONE_OR_MORE
OPTIONAL = argparse.OPTIONAL
REMAINDER = argparse.REMAINDER
SUPPRESS = argparse.SUPPRESS
ZERO_OR_MORE = argparse.ZERO_OR_MORE
# create shorter aliases for the key methods and class names
getArgParser = getArgumentParser
getParser = getArgumentParser
ArgParser = ArgumentParser
Parser = ArgumentParser
argparse._ActionsContainer.add_arg = argparse._ActionsContainer.add_argument
argparse._ActionsContainer.add = argparse._ActionsContainer.add_argument
ArgumentParser.parse = ArgumentParser.parse_args
ArgumentParser.parse_known = ArgumentParser.parse_known_args
RawFormatter = RawDescriptionHelpFormatter
DefaultsFormatter = ArgumentDefaultsHelpFormatter
DefaultsRawFormatter = ArgumentDefaultsRawHelpFormatter
|
lahwaacz/ConfigArgParse
|
configargparse.py
|
Python
|
mit
| 35,213
|
# Copyright 2016 Dietrich Epp.
#
# This file is part of Kitten Teleporter. The Kitten Teleporter source
# code is distributed under the terms of the MIT license.
# See LICENSE.txt for details.
import yaml
from mako import template
CONFIG_DEFAULT = '''\
---
# Local configuration, not checked into source control.
'''
class ConfigError(Exception):
pass
def fset(s):
return ', '.join(repr(x) for x in s)
class Config(object):
__slots__ = [
'config',
'env',
'defs',
'debug',
'server_host',
'server_port',
]
@classmethod
def load(class_, action, config):
"""Load the project configuration."""
paths = [
('tools/base.yaml', False),
('config.yaml', False),
('config_local.yaml', True),
]
infos = []
valid_keys = {'configs', 'server', 'default', 'config', 'env'}
for path, create in paths:
try:
with open(path) as fp:
info = yaml.safe_load(fp)
except FileNotFoundError:
if not create:
raise ConfigError('Missing config file: {}'.format(path))
print('Creating {}'.format(path))
with open(path, 'w') as fp:
fp.write(CONFIG_DEFAULT)
else:
if not info:
continue
extra = set(info.keys()).difference(valid_keys)
if extra:
raise ConfigError(
'{}: Unknown keys: {}.'.format(path, fset(extra)))
infos.append(info)
default = {}
configs = None
all_configs = {'base'}
server = {}
env = []
for info in infos:
try:
configs = info['configs']
all_configs.update(configs)
except KeyError:
pass
try:
server.update(info['server'])
except KeyError:
pass
try:
default.update(info['default'])
except KeyError:
pass
try:
env = info['env']
except KeyError:
pass
if configs is None:
raise ConfigError('Missing configs key.')
configs = set(configs)
valid_server = {'host', 'port'}
extra = set(server.keys()).difference(valid_server)
if extra:
raise ConfigError('Unknown server flag: {}.'.format(fset(extra)))
valid_actions = {'serve', 'build', 'package', 'deploy'}
extra = set(default.keys()).difference(valid_actions)
if extra:
raise ConfigError('Unknown actions: {}.'.format(fset(extra)))
if config is None:
if action is None:
raise ConfigError('No config and no action.')
try:
config = default[action]
except KeyError:
raise ConfigError('No default config for action {}.'
.format(action))
if config not in configs:
raise ConfigError('Invalid config: {!r}.'.format(config))
self = class_()
self.config = config
self.env = env
self.defs = {}
keys = ['base', config]
for info in infos:
try:
iconfig = info['config']
except KeyError:
continue
for key in keys:
try:
kconfig = iconfig[key]
except KeyError:
continue
if kconfig:
self.defs.update(kconfig)
self.debug = self.defs['debug']
self.server_host = server['host']
self.server_port = server['port']
return self
def render(self, **kw):
"""Render the config dictionary."""
expanded = {'title', 'js_header', 'instructions'}
context = dict(kw)
context.update({key: value for key, value in self.defs.items()
if key not in expanded})
context['config'] = self.config
result = dict(context)
for key in expanded:
text = self.defs[key]
try:
result[key] = template.Template(text).render(**context)
except:
print('Could not evaluate template:')
for line in text.splitlines():
print(' ' + line)
raise
env = {}
for key in self.env:
env[key] = result[key]
result['env'] = env
return result
def dump(self, **kw):
print('Configuration:')
for name, value in sorted(self.render(**kw).items()):
if isinstance(value, str):
print(' {}:'.format(name))
for line in value.splitlines():
print(' {}'.format(line))
else:
print(' {}: {!r}'.format(name, value))
if __name__ == '__main__':
import sys
Config.load(None, sys.argv[1]).dump(version='v0.0.0')
|
depp/shifter-children
|
tools/config.py
|
Python
|
mit
| 5,169
|
import os
## This class has static methods for file handling
# @author Adriano Zanette
# @version 1.0
class FileUtils(object):
## test if a file exists
# @author Adriano Zanette
# @version 1.0
# @param filename String File name
# @return Boolean
@staticmethod
def exists(filename):
try:
f = open(filename)
f.close()
return True
except:
return False
## test if is a dir
# @author Adriano Zanette
# @version 1.0
# @param dirName String Directory name
# @return Boolean
@staticmethod
def isDir(dirName):
dirName = dirName.replace('\\', '/')
if os.path.isdir(dirName):
splited = dirName.split('/')
currentDir = splited[len(splited)-1]
if not currentDir.startswith('.'):
return True
else:
return False
else:
return False
## test if is a file
# @author Adriano Zanette
# @version 1.0
# @param filename String file name
# @return Boolean
@staticmethod
def isFile(filename):
if os.path.isfile(filename) and not filename.startswith('.'):
return True
else:
return False
## get files from a paths
# @author Adriano Zanette
# @version 1.0
# @param path String
# @param extensions List Accepted extensions
# @return Boolean
@staticmethod
def getFiles(path, extensions = []):
files = []
if FileUtils.isFile(path):
files = [path]
elif FileUtils.isDir(path):
files = FileUtils.getFilesFromDir(path, extensions)
else:
print 'ERROR: Cannot open '+path
return files
## get all files from a dir
# @author Adriano Zanette
# @version 1.0
# @param dirName String Directory name
# @param extensions List Accepted extensions
# @return List Returns an array with all files from the dir
@staticmethod
def getFilesFromDir(dirName, extensions = []):
fileList = []
for file in os.listdir(dirName):
dirFile = os.path.join(dirName, file)
if FileUtils.isFile(dirFile):
extension = os.path.splitext(dirFile)[1]
if len(extensions) == 0 or extension in extensions:
fileList.append(dirFile)
elif FileUtils.isDir(dirFile):
fileList += FileUtils.getFilesFromDir(dirFile, extensions)
return fileList
|
adzanette/scf-extractor
|
scf-extractor/modules/FileUtils.py
|
Python
|
mit
| 2,348
|
from rest_framework.decorators import detail_route
from rest_framework.response import Response
def get_transition_viewset_method(transition_name, **kwargs):
'''
Create a viewset method for the provided `transition_name`
'''
@detail_route(methods=['post'], **kwargs)
def inner_func(self, request, pk=None, **kwargs):
object = self.get_object()
transition_method = getattr(object, transition_name)
transition_method(by=self.request.user)
if self.save_after_transition:
object.save()
serializer = self.get_serializer(object)
return Response(serializer.data)
return inner_func
def get_viewset_transition_action_mixin(model, **kwargs):
'''
Find all transitions defined on `model`, then create a corresponding
viewset action method for each and apply it to `Mixin`. Finally, return
`Mixin`
'''
instance = model()
class Mixin(object):
save_after_transition = True
transitions = instance.get_all_status_transitions()
transition_names = set(x.name for x in transitions)
for transition_name in transition_names:
setattr(
Mixin,
transition_name,
get_transition_viewset_method(transition_name, **kwargs)
)
return Mixin
|
hzy/drf-fsm-transitions
|
drf_fsm_transitions/viewset_mixins.py
|
Python
|
mit
| 1,307
|
"""
The goal of this challenge is to create a function that will take a list of
names and a bin size and then shuffle those names and return them in a
list of lists where the length of the inner lists matches the bin size.
For example calling the function with a list of names and the size of 2 should return
a list of lists where each inner list has 2 random names. If the the number
of names provided doesn't divide evenly into the bin size and only one name is
remaining add that name to another inner list.
"""
import random
def names_func(a_list, size):
"""
This func should take a list and size, break the list into lists of the
size and return a list of lists.
"""
# Shuffle the names
random.shuffle(a_list)
# Init the groups list (expected to be a list of lists)
groups = list()
while len(a_list) >= size:
# Add the first size elements into our groups list, keep doing that
# until we have less than size elements left
groups.append(a_list[:size])
a_list = a_list[size:]
# If we have not zero elements left, we need to deal with them
# If it's only one element, add it to the last group
if len(a_list) == 1:
groups[-1].append(a_list[0])
# Otherwise, just make it another group
elif len(a_list) > 1:
groups.append(a_list)
return groups
if __name__ == '__main__':
# Any code here will run when you run the command: `python names_challenge.py`
print(names_func(["one", "two", "three", "four", "five"], 2))
|
developerQuinnZ/this_will_work
|
student-work/jeff_beyer/week_1/group_challenge/names_challenge.py
|
Python
|
mit
| 1,543
|
# -*- coding: utf-8 -*-
from PySide import QtCore, QtGui
class Ui_Widget(object):
def setupUi(self, Widget):
Widget.setObjectName("Widget")
Widget.resize(639, 686)
Widget.setMaximumSize(639, 646)
Widget.setMinimumSize(639, 646)
self.imageBox = QtGui.QGroupBox(Widget)
self.imageBox.setGeometry(QtCore.QRect(20, 190, 601, 201))
self.imageBox.setObjectName("imageBox")
self.contentBox = QtGui.QGroupBox(Widget)
self.contentBox.setGeometry(QtCore.QRect(20, 400, 601, 141))
self.contentBox.setObjectName("contentBox")
self.contentTxt = QtGui.QTextEdit(self.contentBox)
self.contentTxt.setGeometry(QtCore.QRect(10, 20, 581, 111))
self.contentTxt.setObjectName("contentTxt")
self.publishBtn = QtGui.QPushButton(Widget)
self.publishBtn.setGeometry(QtCore.QRect(430, 580, 75, 23))
self.publishBtn.setObjectName("publishBtn")
self.cancelBtn = QtGui.QPushButton(Widget)
self.cancelBtn.setGeometry(QtCore.QRect(530, 580, 75, 23))
self.cancelBtn.setObjectName("cancelBtn")
self.formLayoutWidget = QtGui.QWidget(Widget)
self.formLayoutWidget.setGeometry(QtCore.QRect(20, 20, 601, 151))
self.formLayoutWidget.setObjectName("formLayoutWidget")
self.formLayout = QtGui.QFormLayout(self.formLayoutWidget)
self.formLayout.setContentsMargins(0, 0, 0, 0)
self.formLayout.setHorizontalSpacing(30)
self.formLayout.setVerticalSpacing(10)
self.formLayout.setObjectName("formLayout")
self.projectLabel = QtGui.QLabel(self.formLayoutWidget)
self.projectLabel.setObjectName("projectLabel")
self.formLayout.setWidget(0, QtGui.QFormLayout.LabelRole, self.projectLabel)
self.projectComboBox = QtGui.QComboBox(self.formLayoutWidget)
self.projectComboBox.setObjectName("projectComboBox")
self.formLayout.setWidget(0, QtGui.QFormLayout.FieldRole, self.projectComboBox)
self.typeLabel = QtGui.QLabel(self.formLayoutWidget)
self.typeLabel.setObjectName("typeLabel")
self.formLayout.setWidget(1, QtGui.QFormLayout.LabelRole, self.typeLabel)
self.typeComboBox = QtGui.QComboBox(self.formLayoutWidget)
self.typeComboBox.setObjectName("typeComboBox")
self.formLayout.setWidget(1, QtGui.QFormLayout.FieldRole, self.typeComboBox)
self.SALabel = QtGui.QLabel(self.formLayoutWidget)
self.SALabel.setObjectName("SALabel")
self.formLayout.setWidget(2, QtGui.QFormLayout.LabelRole, self.SALabel)
self.SAComboBox = QtGui.QComboBox(self.formLayoutWidget)
self.SAComboBox.setObjectName("SAComboBox")
self.formLayout.setWidget(2, QtGui.QFormLayout.FieldRole, self.SAComboBox)
self.taskLable = QtGui.QLabel(self.formLayoutWidget)
self.taskLable.setObjectName("taskLable")
self.formLayout.setWidget(3, QtGui.QFormLayout.LabelRole, self.taskLable)
self.taskComboBox = QtGui.QComboBox(self.formLayoutWidget)
self.taskComboBox.setObjectName("taskComboBox")
self.formLayout.setWidget(3, QtGui.QFormLayout.FieldRole, self.taskComboBox)
self.fileName = QtGui.QLabel(self.formLayoutWidget)
self.fileName.setObjectName("fileName")
self.formLayout.setWidget(4, QtGui.QFormLayout.LabelRole, self.fileName)
self.FileTxt = QtGui.QLineEdit(self.formLayoutWidget)
self.FileTxt.setObjectName("FileTxt")
self.formLayout.setWidget(4, QtGui.QFormLayout.FieldRole, self.FileTxt)
self.fileType = QtGui.QLabel(self.formLayoutWidget)
self.fileType.setObjectName("fileType")
self.formLayout.setWidget(5, QtGui.QFormLayout.LabelRole, self.fileType)
self.typeComboBox2 = QtGui.QComboBox(self.formLayoutWidget)
self.typeComboBox2.setObjectName("typeComboBox")
self.formLayout.setWidget(5, QtGui.QFormLayout.FieldRole, self.typeComboBox2)
self.retranslateUi(Widget)
QtCore.QMetaObject.connectSlotsByName(Widget)
def retranslateUi(self, Widget):
Widget.setWindowTitle(QtGui.QApplication.translate("Widget", "发布", None, QtGui.QApplication.UnicodeUTF8))
self.imageBox.setTitle(QtGui.QApplication.translate("Widget", "缩略图", None, QtGui.QApplication.UnicodeUTF8))
self.contentBox.setTitle(QtGui.QApplication.translate("Widget", "备注", None, QtGui.QApplication.UnicodeUTF8))
self.publishBtn.setText(QtGui.QApplication.translate("Widget", "发布", None, QtGui.QApplication.UnicodeUTF8))
self.cancelBtn.setText(QtGui.QApplication.translate("Widget", "取消", None, QtGui.QApplication.UnicodeUTF8))
self.projectLabel.setText(QtGui.QApplication.translate("Widget", "所属项目名", None, QtGui.QApplication.UnicodeUTF8))
self.typeLabel.setText(QtGui.QApplication.translate("Widget", "所属类型", None, QtGui.QApplication.UnicodeUTF8))
self.SALabel.setText(QtGui.QApplication.translate("Widget", "Shot&Asset", None, QtGui.QApplication.UnicodeUTF8))
self.taskLable.setText(QtGui.QApplication.translate("Widget", "任务名", None, QtGui.QApplication.UnicodeUTF8))
self.fileName.setText(QtGui.QApplication.translate("Widget", "文件名", None, QtGui.QApplication.UnicodeUTF8))
self.fileType.setText(QtGui.QApplication.translate("Widget", "文件类型", None, QtGui.QApplication.UnicodeUTF8))
|
HH890612/MiliCloud
|
ui/publish_ui.py
|
Python
|
mit
| 5,541
|
__author__ = 'sukrit'
class Provider:
"""
Base Provider class for API Client implementation.
"""
def __init__(self, **kwargs):
super(Provider, self).__init__()
def not_supported(self):
"""
Raises NotImplementedError with a message
:return:
"""
raise NotImplementedError(
'Provider: {} does not support this operation'
.format(self.__class__))
def client_version(self):
raise NotImplementedError("Implementation coming soon...")
def deploy_units(self, template_name, service_data_stream, units=1,
start=True):
"""
:param template_name: Template name must contain '@' param for
deploying multiple instances
:param service_data_stream: Stream cotaining template data
:param units: No. of units to deploy
:return: None
"""
self.not_supported()
def start_units(self, template_name, units=1):
"""
Starts units with given count for a given template.
:param template_name: The template name for the unit.
Note: It assumes that template_name is already installed.
See: deploy_units for installing templates programatically.
:param units: No. of units to deploy
:return: None
"""
self.not_supported()
def deploy(self, service_name, service_data_stream, force_remove=False):
self.not_supported()
def destroy_units_matching(self, service_prefix, exclude_prefix=None):
"""
Destroys unit matching the given prefix.
:param service_prefix: Units with given prefix that needs to be
destriyrf
:type service_prefix: str
:param exclude_prefix: Units with specified prefix should be excluded
from beiing stopped
:type exclude_prefix: str
:return: None
"""
self.not_supported()
def destroy(self, service):
self.not_supported()
def status(self, service_name):
self.not_supported()
def stop_units_matching(self, service_prefix, exclude_prefix=None):
"""
Stops unit matching the given prefix.
:param service_prefix: Units with given prefix that needs to be stopped
:type service_prefix: str
:param exclude_prefix: Units with specified prefix should be excluded
from beiing stopped
:type exclude_prefix: str
:return: None
"""
self.not_supported()
def fetch_units_matching(self, service_prefix, exclude_prefix=None):
"""
Fetch units matching prefix.
:param service_prefix:
:type service_prefix: str
:keyword exclude_prefix: Units with specified prefix should be excluded
from fetch list
:type exclude_prefix: str
:return: list of units where each unit is represented as dict
comprising of
- unit : Name of fleet unit,
- machine : Machine for the unit
- active : Activation status ('activating', 'active')
- sub : Current state of the unit
"""
self.not_supported()
|
totem/fleet-py
|
fleet/client/fleet_base.py
|
Python
|
mit
| 3,221
|
import click
import csv
import datetime
import json
import io
from abc import ABCMeta, abstractmethod
from itertools import groupby
from collections import namedtuple
from soccer import leagueids, leagueproperties
LEAGUE_PROPERTIES = leagueproperties.LEAGUE_PROPERTIES
LEAGUE_IDS = leagueids.LEAGUE_IDS
def get_writer(output_format='stdout', output_file=None):
return globals()[output_format.capitalize()](output_file)
class BaseWriter(object):
__metaclass__ = ABCMeta
def __init__(self, output_file):
self.output_filename = output_file
@abstractmethod
def live_scores(self, live_scores):
pass
@abstractmethod
def team_scores(self, team_scores, time):
pass
@abstractmethod
def team_players(self, team):
pass
@abstractmethod
def standings(self, league_table, league):
pass
@abstractmethod
def league_scores(self, total_data, time):
pass
class Stdout(BaseWriter):
def __init__(self, output_file):
self.Result = namedtuple("Result", "homeTeam, goalsHomeTeam, awayTeam, goalsAwayTeam")
enums = dict(
WIN="red",
LOSE="blue",
TIE="yellow",
MISC="green",
TIME="yellow",
CL_POSITION="green",
EL_POSITION="yellow",
RL_POSITION="red",
POSITION="blue"
)
self.colors = type('Enum', (), enums)
def live_scores(self, live_scores):
"""Prints the live scores in a pretty format"""
scores = sorted(live_scores, key=lambda x: x["league"])
for league, games in groupby(scores, key=lambda x: x["league"]):
self.league_header(league)
for game in games:
self.scores(self.parse_result(game), add_new_line=False)
click.secho(' %s' % Stdout.utc_to_local(game["time"],
use_12_hour_format=False),
fg=self.colors.TIME)
click.echo()
def team_scores(self, team_scores, time, show_datetime, use_12_hour_format):
"""Prints the teams scores in a pretty format"""
for score in team_scores["matches"]:
if score["status"] == "FINISHED":
click.secho("%s\t" % score["utcDate"].split('T')[0],
fg=self.colors.TIME, nl=False)
self.scores(self.parse_result(score))
elif show_datetime:
self.scores(self.parse_result(score), add_new_line=False)
click.secho(' %s' % Stdout.utc_to_local(score["utcDate"],
use_12_hour_format,
show_datetime),
fg=self.colors.TIME)
def team_players(self, team):
"""Prints the team players in a pretty format"""
players = sorted(team, key=lambda d: d['shirtNumber'])
click.secho("%-4s %-25s %-20s %-20s %-15s" %
("N.", "NAME", "POSITION", "NATIONALITY", "BIRTHDAY"),
bold=True,
fg=self.colors.MISC)
fmt = (u"{shirtNumber:<4} {name:<28} {position:<23} {nationality:<23}"
u" {dateOfBirth:<18}")
for player in players:
click.secho(fmt.format(**player), bold=True)
def standings(self, league_table, league):
""" Prints the league standings in a pretty way """
click.secho("%-6s %-30s %-10s %-10s %-10s" %
("POS", "CLUB", "PLAYED", "GOAL DIFF", "POINTS"))
for team in league_table["standings"][0]["table"]:
if team["goalDifference"] >= 0:
team["goalDifference"] = ' ' + str(team["goalDifference"])
# Define the upper and lower bounds for Champions League,
# Europa League and Relegation places.
# This is so we can highlight them appropriately.
cl_upper, cl_lower = LEAGUE_PROPERTIES[league]['cl']
el_upper, el_lower = LEAGUE_PROPERTIES[league]['el']
rl_upper, rl_lower = LEAGUE_PROPERTIES[league]['rl']
team['teamName'] = team['team']['name']
team_str = (u"{position:<7} {teamName:<33} {playedGames:<12}"
u" {goalDifference:<14} {points}").format(**team)
if cl_upper <= team["position"] <= cl_lower:
click.secho(team_str, bold=True, fg=self.colors.CL_POSITION)
elif el_upper <= team["position"] <= el_lower:
click.secho(team_str, fg=self.colors.EL_POSITION)
elif rl_upper <= team["position"] <= rl_lower:
click.secho(team_str, fg=self.colors.RL_POSITION)
else:
click.secho(team_str, fg=self.colors.POSITION)
def league_scores(self, total_data, time, show_datetime,
use_12_hour_format):
"""Prints the data in a pretty format"""
for match in total_data['matches']:
self.scores(self.parse_result(match), add_new_line=not show_datetime)
if show_datetime:
click.secho(' %s' % Stdout.utc_to_local(match["utcDate"],
use_12_hour_format,
show_datetime),
fg=self.colors.TIME)
click.echo()
def league_header(self, league):
"""Prints the league header"""
league_name = " {0} ".format(league)
click.secho("{:=^62}".format(league_name), fg=self.colors.MISC)
click.echo()
def scores(self, result, add_new_line=True):
"""Prints out the scores in a pretty format"""
if result.goalsHomeTeam > result.goalsAwayTeam:
homeColor, awayColor = (self.colors.WIN, self.colors.LOSE)
elif result.goalsHomeTeam < result.goalsAwayTeam:
homeColor, awayColor = (self.colors.LOSE, self.colors.WIN)
else:
homeColor = awayColor = self.colors.TIE
click.secho('%-25s %2s' % (result.homeTeam, result.goalsHomeTeam),
fg=homeColor, nl=False)
click.secho(" vs ", nl=False)
click.secho('%2s %s' % (result.goalsAwayTeam,
result.awayTeam.rjust(25)), fg=awayColor,
nl=add_new_line)
def parse_result(self, data):
"""Parses the results and returns a Result namedtuple"""
def valid_score(score):
return "" if score is None else score
return self.Result(
data["homeTeam"]["name"],
valid_score(data["score"]["fullTime"]["homeTeam"]),
data["awayTeam"]["name"],
valid_score(data["score"]["fullTime"]["awayTeam"]))
@staticmethod
def utc_to_local(time_str, use_12_hour_format, show_datetime=False):
"""Converts the API UTC time string to the local user time."""
if not (time_str.endswith(" UTC") or time_str.endswith("Z")):
return time_str
today_utc = datetime.datetime.utcnow()
utc_local_diff = today_utc - datetime.datetime.now()
if time_str.endswith(" UTC"):
time_str, _ = time_str.split(" UTC")
utc_time = datetime.datetime.strptime(time_str, '%I:%M %p')
utc_datetime = datetime.datetime(today_utc.year,
today_utc.month,
today_utc.day,
utc_time.hour,
utc_time.minute)
else:
utc_datetime = datetime.datetime.strptime(time_str,
'%Y-%m-%dT%H:%M:%SZ')
local_time = utc_datetime - utc_local_diff
if use_12_hour_format:
date_format = '%I:%M %p' if not show_datetime else '%a %d, %I:%M %p'
else:
date_format = '%H:%M' if not show_datetime else '%a %d, %H:%M'
return datetime.datetime.strftime(local_time, date_format)
class Csv(BaseWriter):
def generate_output(self, result):
if not self.output_filename:
for row in result:
click.echo(u','.join(unicode(item) for item in row))
else:
with open(self.output_filename, 'w') as csv_file:
writer = csv.writer(csv_file)
for row in result:
row = [unicode(s).encode('utf-8') for s in row]
writer.writerow(row)
def live_scores(self, live_scores):
"""Store output of live scores to a CSV file"""
headers = ['League', 'Home Team Name', 'Home Team Goals',
'Away Team Goals', 'Away Team Name']
result = [headers]
result.extend([game['league'], game['homeTeamName'],
game['goalsHomeTeam'], game['goalsAwayTeam'],
game['awayTeamName']] for game in live_scores['games'])
self.generate_output(result)
def team_scores(self, team_scores, time):
"""Store output of team scores to a CSV file"""
headers = ['Date', 'Home Team Name', 'Home Team Goals',
'Away Team Goals', 'Away Team Name']
result = [headers]
result.extend([score["utcDate"].split('T')[0],
score['homeTeam']['name'],
score['score']['fullTime']['homeTeam'],
score['score']['fullTime']['awayTeam'],
score['awayTeam']['name']]
for score in team_scores['matches']
if score['status'] == 'FINISHED')
self.generate_output(result)
def team_players(self, team):
"""Store output of team players to a CSV file"""
headers = ['Jersey Number', 'Name', 'Position', 'Nationality',
'Date of Birth']
result = [headers]
result.extend([player['shirtNumber'],
player['name'],
player['position'],
player['nationality'],
player['dateOfBirth']]
for player in team)
self.generate_output(result)
def standings(self, league_table, league):
"""Store output of league standings to a CSV file"""
headers = ['Position', 'Team Name', 'Games Played', 'Goal For',
'Goals Against', 'Goal Difference', 'Points']
result = [headers]
result.extend([team['position'],
team['team']['name'],
team['playedGames'],
team['goalsFor'],
team['goalsAgainst'],
team['goalDifference'],
team['points']]
for team in league_table['standings'][0]['table'])
self.generate_output(result)
def league_scores(self, total_data, time, show_upcoming, use_12_hour_format):
"""Store output of fixtures based on league and time to a CSV file"""
headers = ['League', 'Home Team Name', 'Home Team Goals',
'Away Team Goals', 'Away Team Name']
result = [headers]
league = total_data['competition']['name']
result.extend([league,
score['homeTeam']['name'],
score['score']['fullTime']['homeTeam'],
score['score']['fullTime']['awayTeam'],
score['awayTeam']['name']]
for score in total_data['matches'])
self.generate_output(result)
class Json(BaseWriter):
def generate_output(self, result):
if not self.output_filename:
click.echo(json.dumps(result,
indent=4,
separators=(',', ': '),
ensure_ascii=False))
else:
with io.open(self.output_filename, 'w', encoding='utf-8') as f:
data = json.dumps(result, f, indent=4,
separators=(',', ': '), ensure_ascii=False)
f.write(data)
def live_scores(self, live_scores):
"""Store output of live scores to a JSON file"""
self.generate_output(live_scores['games'])
def team_scores(self, team_scores, time):
"""Store output of team scores to a JSON file"""
data = []
for score in team_scores['matches']:
if score['status'] == 'FINISHED':
item = {'date': score["utcDate"].split('T')[0],
'homeTeamName': score['homeTeam']['name'],
'goalsHomeTeam': score['score']['fullTime']['homeTeam'],
'goalsAwayTeam': score['score']['fullTime']['awayTeam'],
'awayTeamName': score['awayTeam']['name']}
data.append(item)
self.generate_output({'team_scores': data})
def standings(self, league_table, league):
"""Store output of league standings to a JSON file"""
data = []
for team in league_table['standings'][0]['table']:
item = {'position': team['position'],
'teamName': team['team'],
'playedGames': team['playedGames'],
'goalsFor': team['goalsFor'],
'goalsAgainst': team['goalsAgainst'],
'goalDifference': team['goalDifference'],
'points': team['points']}
data.append(item)
self.generate_output({'standings': data})
def team_players(self, team):
"""Store output of team players to a JSON file"""
keys = 'shirtNumber name position nationality dateOfBirth'.split()
data = [{key: player[key] for key in keys} for player in team]
self.generate_output({'players': data})
def league_scores(self, total_data, time):
"""Store output of fixtures based on league and time to a JSON file"""
data = []
for league, score in self.supported_leagues(total_data):
item = {'league': league, 'homeTeamName': score['homeTeamName'],
'goalsHomeTeam': score['result']['goalsHomeTeam'],
'goalsAwayTeam': score['result']['goalsAwayTeam'],
'awayTeamName': score['awayTeamName']}
data.append(item)
self.generate_output({'league_scores': data, 'time': time})
|
Saturn/soccer-cli
|
soccer/writers.py
|
Python
|
mit
| 14,627
|
# Copyright (C) 2016 - Oscar Campos <oscar.campos@member.fsf.org>
# This program is Free Software see LICENSE file for details
# Add any new listsners import in here
from anaconda_go.listeners.linting import BackgroundLinter
from anaconda_go.listeners.autocompletion import GoCompletionEventListener
from anaconda_go.listeners.autoformat import AnacondaGoAutoFormatEventListener
from anaconda_go.listeners.goimports_and_save import GoImportsOnSave
__all__ = [
'BackgroundLinter', 'GoCompletionEventListener',
'AnacondaGoAutoFormatEventListener',
'GoImportsOnSave',
]
|
danalec/dotfiles
|
sublime/.config/sublime-text-3/Packages/anaconda_go/listeners/__init__.py
|
Python
|
mit
| 582
|
import unittest
import json
import os
from pathlib import Path
import shutil
from envvar_helper import add_aws_envvar
class EnvVarHelperTests(unittest.TestCase):
def setUp(self):
base_path = Path(Path.cwd(), "tests", "integration", "fixtures")
self.template = Path(base_path, "template.json")
self.temporary = Path(base_path, "undertest.json")
self.expected = Path(base_path, "expected.json")
shutil.copy2(self.template, self.temporary)
def tearDown(self):
os.remove(self.temporary)
def test_add_envvars(self):
add_aws_envvar("stage1", "key4", "value4", self.temporary)
add_aws_envvar("stage1", "key5", "value5", self.temporary)
with open(self.temporary, "r") as f1:
actual = json.load(f1)
with open(self.expected, "r") as f2:
expected = json.load(f2)
self.assertDictEqual(expected, actual)
|
azam-a/domini
|
tests/integration/test_envvar_helper.py
|
Python
|
mit
| 925
|
# -*- coding: utf-8 -*-
""" Turkey-specific Tables
@copyright: 2015-2016 (c) Sahana Software Foundation
@license: MIT
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
__all__ = ("S3TurkeyIdentityModel",)
from gluon import *
from ..s3 import *
# =============================================================================
class S3TurkeyIdentityModel(S3Model):
""" Model for Turkish Identity Cards """
names = ("tr_identity",)
def model(self):
T = current.T
# -------------------------------------------------------------------------
# Turkish Identity
#
tablename = "tr_identity"
self.define_table(tablename,
self.pr_person_id(),
self.gis_location_id(
widget = S3LocationSelector(levels=("L1", "L2", "L3"),
show_map=False,
),
),
Field("volume_no",
label = T("Volume No"),
),
Field("family_order_no", "integer",
label = T("Family Order No"),
),
Field("order_no", "integer",
label = T("Order No"),
),
*s3_meta_fields()
)
# ---------------------------------------------------------------------
# Return global names to s3.*
#
return {}
# END =========================================================================
|
flavour/ifrc_qa
|
modules/s3db/tr.py
|
Python
|
mit
| 2,818
|
import numpy
class MSRALossFunctionAbs(object):
def __init__(self, distrib, c=None):
self.__x = distrib
self.__dim = distrib.shape[1]
self.__c = c
@property
def x(self):
return self.__x
@property
def dim(self):
return self.__dim
@property
def c(self):
if self.__c is None:
raise AttributeError("The value of c is unset")
return self.__c
@c.setter
def c(self, c):
self.__c = c
def _check_argument(self, m):
if m is None:
m = numpy.zeros((self.__dim,))
else:
if m.shape != (self.__dim,):
raise ValueError("""m must be of shape (%i). Given: %s.""" % (self.__dim, m.shape))
return m
def objective(self, m):
return numpy.sum(m)
def objective_jac(self, m):
return numpy.ones((self.__dim,))
def ineq_constraint(self, m):
return self.c - self.shortfall_risk(m)
def ineq_constraint_jac(self, m):
return self.shortfall_risk_jac(m)
# \mathbb{E} \left( \ell(X - m) \right)
def shortfall_risk(self, m=None):
raise NotImplementedError()
# \mathbb{E} \left( \nabla \ell(X - m) \right)
def shortfall_risk_jac(self, m):
raise NotImplementedError()
|
yarmenti/MSRA
|
lib/msra_loss.py
|
Python
|
mit
| 1,307
|
import ast
class ImportVisitor(ast.NodeVisitor):
def __init__(self, names):
self._names = names
self._info = {}
def visit_Import(self, node):
self._visitImport(node, [a.name for a in node.names])
def visit_ImportFrom(self, node):
parts = [node.module+'.'+a.name for a in node.names]
parts.append(node.module)
self._visitImport(node, parts)
def _visitImport(self, node, imports):
for name in imports:
if name in self._names:
self._info[name] = node.lineno
return
# allow parser to continue to parse the statement's children
super(ImportVisitor, self).generic_visit(node)
def getImportsInfo(self):
return self._info
def _test():
code = """
import stuff
from bananas import taste
from pounds import pence
def foo():
import bacon as face, nose
a = b + 5
"""
print code
print '----'
tree = ast.parse(code)
# print dir(tree)
# print tree.__class__
iv = ImportVisitor(['bacon', 'bananas', 'pounds.pence'])
iv.visit(tree)
print iv.getImportsInfo()
if __name__ == '__main__':
_test()
|
jmorse/cyanide
|
lint-imports/importvisitor.py
|
Python
|
mit
| 1,030
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
from azure.core.rest import HttpRequest
from msrest import Serializer
from ..._vendor import _format_url_section
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, IO, Optional, TypeVar
T = TypeVar('T')
JSONType = Any
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
# fmt: off
def build_list_request(
**kwargs # type: Any
):
# type: (...) -> HttpRequest
"""Get list of schema groups.
Gets the list of schema groups user is authorized to access.
See https://aka.ms/azsdk/python/protocol/quickstart for how to incorporate this request builder
into your code flow.
:keyword api_version: Api Version. The default value is "2021-10". Note that overriding this
default value may result in unsupported behavior.
:paramtype api_version: str
:return: Returns an :class:`~azure.core.rest.HttpRequest` that you will pass to the client's
`send_request` method. See https://aka.ms/azsdk/python/protocol/quickstart for how to
incorporate this response into your code flow.
:rtype: ~azure.core.rest.HttpRequest
Example:
.. code-block:: python
# response body for status code(s): 200
response.json() == {
"schemaGroups": [
"str" # Optional. Array of schema groups.
]
}
"""
api_version = kwargs.pop('api_version', "2021-10") # type: str
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/$schemaGroups')
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
|
Azure/azure-sdk-for-python
|
sdk/schemaregistry/azure-schemaregistry/azure/schemaregistry/_generated/rest/schema_groups/_request_builders.py
|
Python
|
mit
| 2,609
|
import pytest
class TestExample:
def testMultiply2And3Properly(self):
# given
x = 2
y = 3
# when
c = x * y
# then
assert c == 6
|
nokia-wroclaw/innovativeproject-dbshepherd
|
test/TestDummy.py
|
Python
|
mit
| 184
|
"""How accurate is Skyfield's subpoint computation routine?
Let's call it with a varying number of iterations, and see how
accurately it can turn a Topos-generated position back into a latitude,
longitude, and elevation.
"""
from numpy import einsum
from skyfield.api import Topos, load
from skyfield.constants import AU_M, DEG2RAD
from skyfield.earthlib import reverse_terra
def main():
ts = load.timescale()
t = ts.tt(2018, 1, 22, 9, 9, 20)
trial_angles = 10, 20, 30, 40 # the error peaks around 20 degrees
trial_elevations = 0, 6000, AU_M
print(__doc__)
for n in range(1, 5):
print('=== {} iterations ==='.format(n))
print('')
for elevation_m in trial_elevations:
for degrees in trial_angles:
top = Topos(latitude_degrees=degrees, longitude_degrees=123,
elevation_m=elevation_m)
xyz_au = top.at(t).position.au
xyz_au = einsum('ij...,j...->i...', t.M, xyz_au)
lat, lon, elev = reverse_terra(xyz_au, t.gast, n)
lat = lat / DEG2RAD
error_mas = 60.0 * 60.0 * 1000.0 * abs(degrees - lat)
print('latitude {} degrees, elevation {} m'
' -> error of {:.2f} mas'
.format(degrees, elevation_m, error_mas))
print('')
print("""\
Given that iterations=3 pushes the maximum error from tens of mas ("mas"
means " milli-arcsecond") down to hundredths of a mas, it is the value
we have chosen as a default. A fourth iteration, if we ever chose to
perform one, pushes the error down to "0.00 mas".
""")
if __name__ == '__main__':
main()
|
skyfielders/python-skyfield
|
design/subpoint_accuracy.py
|
Python
|
mit
| 1,687
|
"""Detect viral infections via bwa alignment of unaligned reads.
This is primarily useful for cancer samples where viral infection can
inform treatment.
"""
import glob
import os
from bcbio import bam, utils
from bcbio.distributed.transaction import file_transaction
from bcbio.pipeline import datadict as dd
from bcbio.provenance import do
from bcbio.variation import vcfutils
def run(bam_file, data, out_dir):
"""Run viral QC analysis.
"""
viral_target = "gdc-viral"
out = {}
if vcfutils.get_paired_phenotype(data):
viral_refs = [x for x in dd.get_viral_files(data) if os.path.basename(x) == "%s.fa" % viral_target]
if viral_refs and utils.file_exists(viral_refs[0]):
viral_ref = viral_refs[0]
viral_bam = os.path.join(utils.safe_makedir(out_dir),
"%s-%s.bam" % (dd.get_sample_name(data),
utils.splitext_plus(os.path.basename(viral_ref))[0]))
out_file = "%s-counts.txt" % utils.splitext_plus(viral_bam)[0]
if not utils.file_uptodate(out_file, bam_file):
if not utils.file_uptodate(viral_bam, bam_file):
with file_transaction(data, viral_bam) as tx_out_file:
cores = dd.get_num_cores(data)
tmpfile = "%s-tmp" % utils.splitext_plus(tx_out_file)[0]
cmd = ("samtools view -u -f 4 {bam_file} | "
"bamtofastq collate=0 | "
"bwa mem -t {cores} {viral_ref} - | "
"bamsort tmpfile={tmpfile} inputthreads={cores} outputthreads={cores} "
"inputformat=sam index=1 indexfilename={tx_out_file}.bai O={tx_out_file}")
do.run(cmd.format(**locals()), "Compare unmapped reads to viral genome")
with file_transaction(data, out_file) as tx_out_file:
with open(tx_out_file, "w") as out_handle:
out_handle.write("# sample\t%s\n" % dd.get_sample_name(data))
for info in bam.idxstats(viral_bam, data):
if info.aligned > 0:
out_handle.write("%s\t%s\n" % (info.contig, info.aligned))
out["base"] = out_file
return out
def get_files(data):
"""Retrieve pre-installed viral reference files.
"""
all_files = glob.glob(os.path.normpath(os.path.join(os.path.dirname(dd.get_ref_file(data)),
os.pardir, "viral", "*")))
return sorted(all_files)
|
biocyberman/bcbio-nextgen
|
bcbio/qc/viral.py
|
Python
|
mit
| 2,676
|
import numpy as np
import math
from sklearn import datasets, neighbors, linear_model
digits = datasets.load_digits()
X_digits = digits.data
y_digits = digits.target
np.random.seed(0)
indices = np.random.permutation(len(X_digits))
num_samples = len(digits.data)
test_set_size = math.floor(.1 * num_samples)
print "number of samples: ", num_samples
print "test_set_size: " ,test_set_size
digits_X_train = X_digits[indices[:-test_set_size]]
digits_y_train = y_digits[indices[:-test_set_size]]
digits_X_test = X_digits[indices[-test_set_size:]]
digits_y_test = y_digits[indices[-test_set_size:]]
knn = neighbors.KNeighborsClassifier()
knn.fit(digits_X_train, digits_y_train)
print "KNN score: "
print knn.score(digits_X_test, digits_y_test)
logistic = linear_model.LogisticRegression(C=1e5)
logistic.fit(digits_X_train, digits_y_train)
print "Logistic Regression score: "
print logistic.score(digits_X_test, digits_y_test)
# """
# ================================
# Digits Classification Exercise
# ================================
# A tutorial exercise regarding the use of classification techniques on
# the Digits dataset.
# This exercise is used in the :ref:`clf_tut` part of the
# :ref:`supervised_learning_tut` section of the
# :ref:`stat_learn_tut_index`.
# """
# print(__doc__)
# from sklearn import datasets, neighbors, linear_model
# digits = datasets.load_digits()
# X_digits = digits.data
# y_digits = digits.target
# n_samples = len(X_digits)
# X_train = X_digits[:.9 * n_samples]
# y_train = y_digits[:.9 * n_samples]
# X_test = X_digits[.9 * n_samples:]
# y_test = y_digits[.9 * n_samples:]
# knn = neighbors.KNeighborsClassifier()
# logistic = linear_model.LogisticRegression()
# print('KNN score: %f' % knn.fit(X_train, y_train).score(X_test, y_test))
# print('LogisticRegression score: %f'
# % logistic.fit(X_train, y_train).score(X_test, y_test))
|
pieteradejong/python
|
scikitlearn-ex1.py
|
Python
|
mit
| 1,884
|
#!/usr/bin/env python
from flexbe_core import EventState, Logger
from flexbe_core.proxy import ProxyActionClient
# Additional imports can be added inside the following tags
import rospy
import sys
import copy
import numpy as np
import math
import time
import logging
import std_msgs.msg
from geometry_msgs.msg import Pose
from sensor_msgs.msg import JointState
from brics_actuator.msg import JointPositions, JointValue
from tf.transformations import euler_from_quaternion
from lib_inverse_kinematics import Inverse_Kinematics
# [/MANUAL_IMPORT]
'''
Created on 03.04.2017
@author: Alessio Caporali
'''
class IKSolverTrajectory(EventState):
'''
Get IK solution for the box pose and execute trajectory.
--hertz float Loop frequency for publishing each point of the trajectory.
--samples float Number of points in the trajectory.
--offset float How much go down to grasp the cube.
--inclination_ee float Inclination of the End_Effector with respect to the ground.
># pose Pose Target for IK.
<= found IK solution found.
<= unavailable IK soolution not fould.
'''
def __init__(self, hertz, samples, offset, inclination_ee):
'''
Constructor
'''
super(IKSolverTrajectory, self).__init__(outcomes=['found', 'unavailable'], input_keys = ['pose'])
rospy.Subscriber('/joint_states', JointState, self.CallbackJoints)
self.armPub = rospy.Publisher('/arm_controller/position_command', JointPositions, queue_size=10)
rospy.Subscriber('/ee_pose', Pose, self.end_effector_pose)
self.done = 0
self.ok = 0
self.value = [0 for i in range(6)]
self.hertz = hertz
self.samples = samples
self.offset = offset
self.inclination_ee = - inclination_ee
def CallbackJoints(self, joint_states):
self.joint_states = copy.deepcopy(joint_states)
def end_effector_pose(self, pose):
ee_p =[pose.position.x, pose.position.y, pose.position.z]
self.end_effector = ee_p
def publish_arm_joint_positions(self, joint_positions):
desiredPositions = JointPositions()
jointCommands = []
for i in range(5):
joint = JointValue()
joint.joint_uri = "arm_joint_" + str(i+1)
joint.unit = "rad"
joint.value = joint_positions[i]
jointCommands.append(joint)
desiredPositions.positions = jointCommands
self.armPub.publish(desiredPositions)
def trajectory_calc(self, end_effector, target, number, offset_grasp):
number_real = number + number/5
self.q = [0 for i in range(number_real)]
v = [end_effector[0] - target[0], end_effector[1] - target[1], end_effector[2] - abs(target[2])]
dim = np.linalg.norm(v)
print ("Magnitude of Vector: ", dim)
i = 0
k = 0
t = 0
inc = 1.0/number
for i in range(number):
i = i + 1
t = 0 + i*inc
p = np.array(end_effector) - (t*np.array(v))
self.q[i-1] = Inverse_Kinematics(p, self.inclination_ee, self.rotation)
i = 0
inc2 = 1.0/(number/5.0)
for i in range(number/5):
i = i + 1
k = 0 + i*inc2
p2 = [p[0], p[1], (p[2] - k*(offset_grasp/1000.0))]
self.q[i-1+number] = Inverse_Kinematics(p2, self.inclination_ee, self.rotation)
def trajectory_pub(self, joint_q, frequency, samples):
samples_tot = samples + samples/5
r = rospy.Rate(frequency)
i = 0
for i in range(samples_tot):
i = i + 1
self.publish_arm_joint_positions(joint_q[i-1])
r.sleep()
return 1
def execute(self, userdata):
'''
Execute this state
'''
self.done = 0
if self.ok == 1:
if (abs(self.value[0] - self.joint_states.position[10])) <= 0.015:
self.done = self.done + 1
if (abs(self.value[1] - self.joint_states.position[11])) <= 0.015:
self.done = self.done + 1
if (abs(self.value[2] - self.joint_states.position[12])) <= 0.015:
self.done = self.done + 1
if (abs(self.value[3] - self.joint_states.position[13])) <= 0.015:
self.done = self.done + 1
if (abs(self.value[4] - self.joint_states.position[14])) <= 0.015:
self.done = self.done + 1
else:
pass
if self.done == 5 :
return 'found'
else:
pass
def on_enter(self, userdata):
Logger.logwarn('POSE OBJECT: ')
Logger.logwarn(userdata.pose.position)
self.target = [userdata.pose.position.x, userdata.pose.position.y, userdata.pose.position.z]
(roll,pitch,yaw) = euler_from_quaternion([userdata.pose.orientation.x, userdata.pose.orientation.y, userdata.pose.orientation.z, userdata.pose.orientation.w])
self.rotation = yaw
self.trajectory_calc(self.end_effector, self.target, self.samples, self.offset)
n = (self.samples + self.samples/5) -1
self.value = self.q[n]
self.ok = self.trajectory_pub(self.q, self.hertz, self.samples)
def on_exit(self, userdata):
Logger.loginfo('Exiting JointValuesIK State')
|
matteopantano/youbot-thesis
|
states/IK_Solver_Trajectory.py
|
Python
|
mit
| 4,840
|
from __future__ import print_function
from psychopy import sound, monitors, core, visual, event, data, gui, logging, info
import numpy as np
from copy import deepcopy
from math import atan, cos, sin, pi, sqrt, pow
import time, sys, platform, os, StringIO
import pylab
from pandas import DataFrame
from calcUnderOvercorrect import calcOverCorrected
from plotHelpers import plotDataAndPsychometricCurve
dataframeInPsychopy = True #merged 10 December 2014
autopilot = False
quitFinder = False
if quitFinder:
applescript="\'tell application \"Finder\" to quit\'" #quit Finder.
shellCmd = 'osascript -e '+applescript
os.system(shellCmd)
trialClock = core.Clock()
ballStdDev = 0.8
autoLogging = False
participant = 'Hubert'
if autopilot:
participant = 'auto'
fullscr=False
refreshRate = 60
infoFirst = {'Participant':participant, 'Check refresh etc':False, 'Fullscreen (timing errors if not)': fullscr, 'Screen refresh rate': refreshRate }
OK = gui.DlgFromDict(dictionary=infoFirst,
title='Szinte & Cavanagh spatiotopic apparent motion',
order=[ 'Participant','Check refresh etc', 'Fullscreen (timing errors if not)'],
tip={'Check refresh etc': 'To confirm refresh rate and that can keep up, at least when drawing a grating'},
#fixed=['Check refresh etc'])#this attribute can't be changed by the user
)
if not OK.OK:
print('User cancelled from dialog box'); core.quit()
participant = infoFirst['Participant']
checkRefreshEtc = infoFirst['Check refresh etc']
fullscr = infoFirst['Fullscreen (timing errors if not)']
refreshRate = infoFirst['Screen refresh rate']
quitFinder = False
if checkRefreshEtc:
quitFinder = True
if quitFinder:
import os
applescript="\'tell application \"Finder\" to quit\'"
shellCmd = 'osascript -e '+applescript
os.system(shellCmd)
demo=False
respDeadline = 100
if autopilot:
respDeadline = 0.1
timeAndDateStr = time.strftime("%d%b%Y_%H-%M", time.localtime())
if os.path.isdir('.'+os.sep+'data'):
dataDir='data/raw'
else:
print('"data" directory does not exist, so saving data in present working directory')
dataDir='.'
fName = participant+'_spatiotopicMotion_'+timeAndDateStr
fileNameWithPath = os.path.join(dataDir, fName)
saveCodeCmd = 'cp \'' + sys.argv[0] + '\' '+ fileNameWithPath + '.py'
os.system(saveCodeCmd) #save a copy of the code as it was when that subject was run
logFname = fileNameWithPath+'.log'
ppLogF = logging.LogFile(logFname,
filemode='w',#if you set this to 'a' it will append instead of overwriting
level=logging.INFO)#errors, data and warnings will be sent to this logfile
scrn=1 #1 means second screen
widthPix =1440#1024 #monitor width in pixels
heightPix =900#768 #monitor height in pixels
monitorwidth = 33. #39 #monitor width in centimeters
viewdist = 62.; #cm
pixelperdegree = widthPix/ (atan(monitorwidth/viewdist) / np.pi*180)
bgColor = [0,0,0] #"gray background"
allowGUI = False
waitBlank = False
units = 'deg'
monitorname = 'mitsubishi' #in psychopy Monitors Center #Holcombe lab monitor
mon = monitors.Monitor(monitorname,width=monitorwidth, distance=viewdist)#fetch the most recent calib for this monitor
mon.setSizePix( (widthPix,heightPix) )
def openMyStimWindow(): #make it a function because have to do it several times, want to be sure is identical each time
myWin = visual.Window(monitor=mon,size=(widthPix,heightPix),allowGUI=allowGUI,units=units,color=bgColor,colorSpace='rgb',fullscr=fullscr,
screen=scrn,waitBlanking=waitBlank) #Holcombe lab monitor
return myWin
myWin = openMyStimWindow()
myWin.recordFrameIntervals = True #required by RunTimeInfo?
refreshMsg2 = ''
refreshRateWrong = False
if not checkRefreshEtc:
refreshMsg1 = 'REFRESH RATE WAS NOT CHECKED'
else: #checkRefreshEtc
try:
runInfo = info.RunTimeInfo(
# if you specify author and version here, it overrides the automatic detection of __author__ and __version__ in your script
#author='<your name goes here, plus whatever you like, e.g., your lab or contact info>',
#version="<your experiment version info>",
win=myWin, ## a psychopy.visual.Window() instance; None = default temp window used; False = no win, no win.flips()
refreshTest='grating', ## None, True, or 'grating' (eye-candy to avoid a blank screen)
verbose=True, ## True means report on everything
userProcsDetailed=False ## if verbose and userProcsDetailed, return (command, process-ID) of the user's processes
#seems to require internet access, probably for process lookup
)
#print(runInfo)
logging.info(runInfo)
print('Finished runInfo- which assesses the refresh and processes of this computer')
runInfo_failed = False
except:
runInfo_failed = True
refreshMsg1 = ' runInfo call FAILED so dont know refresh rate'
if not runInfo_failed:
refreshSDwarningLevel_ms = 3 ##ms
if runInfo["windowRefreshTimeSD_ms"] > refreshSDwarningLevel_ms:
print("\nThe variability of the refresh rate is high (SD > %.2f ms)." % (refreshSDwarningLevel_ms))
## and here you could prompt the user with suggestions, possibly based on other info:
if runInfo["windowIsFullScr"]:
print("Your window is full-screen, which is good for timing.")
print('Possible issues: internet / wireless? bluetooth? recent startup (not finished)?')
if len(runInfo['systemUserProcFlagged']):
print('other programs running? (command, process-ID):',runInfo['systemUserProcFlagged'])
medianHz = 1000./runInfo['windowRefreshTimeMedian_ms']
refreshMsg1= 'Median frames per second ~='+ str( np.round(medianHz,1) )
refreshRateTolerancePct = 3
pctOff = abs( (medianHz-refreshRate) / refreshRate )
refreshRateWrong = pctOff > (refreshRateTolerancePct/100.)
if refreshRateWrong:
refreshMsg1 += ' BUT'
refreshMsg1 += ' program assumes ' + str(refreshRate)
refreshMsg2 = 'which is off by more than' + str(round(refreshRateTolerancePct,0)) + '%!!'
else:
refreshMsg1 += ', which is close enough to desired val of ' + str( round(refreshRate,1) )
myWinRes = myWin.size
myWin.allowGUI =True
myWin.close() #have to close window to show dialog box
myDlg = gui.Dlg(title="Screen check", pos=(200,400))
myDlg.addText(refreshMsg1, color='Black')
if refreshRateWrong:
myDlg.addText(refreshMsg2, color='Red')
if refreshRateWrong:
logging.error(refreshMsg1+refreshMsg2)
else: logging.info(refreshMsg1+refreshMsg2)
if checkRefreshEtc and (not demo) and (myWinRes != [widthPix,heightPix]).any():
msgWrongResolution = 'Screen apparently NOT the desired resolution of '+ str(widthPix)+'x'+str(heightPix)+ ' pixels!!'
myDlg.addText(msgWrongResolution, color='Red')
logging.error(msgWrongResolution)
print(msgWrongResolution)
myDlg.addText('Note: to abort press ESC at a trials response screen', color=[-1.,1.,-1.]) # color='DimGrey') color names stopped working along the way, for unknown reason
myDlg.show()
if myDlg.OK: #unpack information from dialogue box
pass
else:
print('User cancelled from dialog box.')
logging.flush()
core.quit()
if not demo:
allowGUI = False
myWin = openMyStimWindow()
targetDot = visual.ImageStim(myWin,mask='circle',colorSpace='rgb', color = (-1, 0.7, -1), size=ballStdDev,autoLog=autoLogging, contrast=1, opacity = 1.0)
foilDot = visual.ImageStim(myWin,mask='circle',colorSpace='rgb', color = (.8, 0, 1),size=ballStdDev,autoLog=autoLogging, contrast=1, opacity = 1.0)
blackDot = visual.ImageStim(myWin,mask='circle',colorSpace='rgb', color = (-1,-1,-1),size=ballStdDev,autoLog=autoLogging, contrast=0.5, opacity = 1.0)
beforeTrialsText = visual.TextStim(myWin,pos=(0, 0),colorSpace='rgb',color = (-1,-1,-1),alignHoriz='center', alignVert='center', height = 0.05, units='norm',autoLog=autoLogging)
respPromptText = visual.TextStim(myWin,pos=(0, -.3),colorSpace='rgb',color = (-1,-1,-1),alignHoriz='center', alignVert='center', height = 0.07, units='norm',autoLog=autoLogging)
betweenTrialsText = visual.TextStim(myWin,pos=(0, -.4),colorSpace='rgb',color = (-1,-1,-1),alignHoriz='center', alignVert='center',height=.03,units='norm',autoLog=autoLogging)
NextRemindCountText = visual.TextStim(myWin,pos=(0,-.6),colorSpace='rgb',color= (1,1,1),alignHoriz='center', alignVert='center',height=.05,units='norm',autoLog=autoLogging)
locationOfProbe= np.array([[0,1.5]]) # np.array([[-10,1.5],[0,1.5],[10,1.5]]) #left, centre, right
#Potential other conditions:[-10,6.5],[0,6.5],[10,6.5],[-10,-3.5],[0,-3.5],[10,-3.5]
stimList=[]
for locus in locationOfProbe: #location of the probe for the trial
probeLocationY = locus[1]
for upDown in [False,True]: #switching between probe moving top to bottom; and bottom to top
for startLeft in [False,True]:
for tilt in [-.4,0,.4]: # [-0.6,0,0.6]: # # [-0.875,0,0.875]: #adjusting whether the probe jump is vertical, or slanted. Tilt positive means second position to right
for jitter in [-0.875,0,0.875]:#shifting each condition slightly from the location to ensure participants dont recognise tilted trials by the location of the initial probe
probeLocationX = locus[0]+jitter
stimList.append({'probeX': probeLocationX, 'probeY':probeLocationY, 'startLeft':startLeft, 'upDown': upDown, 'tilt': tilt, 'jitter': jitter})
blockReps = 1 #2
#durations in frames
durWithoutProbe = 0.1
durProbe = 0.4
trials = data.TrialHandler(stimList, blockReps,
extraInfo= {'subject':participant,'durWithoutProbe':durWithoutProbe} ) #will be included in each row of dataframe and wideText
thisTrial = trials.next()
initialDurS = durWithoutProbe
probeFirstDisappearanceS = initialDurS + durProbe
switchCuesS = probeFirstDisappearanceS+durWithoutProbe
probeSecondAppearanceS = switchCuesS + durWithoutProbe
probeSecondDisappearanceS = probeSecondAppearanceS+durProbe
#convert to frames
initialDur = round(initialDurS*refreshRate) #target and foil dot without probe for the first 600 ms
probeFirstDisappearance = round(probeFirstDisappearanceS*refreshRate) # probe disappears for 100 ms whilst target and foil dot remain the same
switchCues = round(switchCuesS*refreshRate) # target and foil dots switch positions for 100 ms
probeSecondAppearance = round(probeSecondAppearanceS*refreshRate) # probe returns on the other side of the horizontal meridian for 400 ms
probeSecondDisappearance = round(probeSecondDisappearanceS*refreshRate) # probe disappears
oneCycleFrames = int( round( probeSecondDisappearance + durWithoutProbe*refreshRate ) )
totFrames = oneCycleFrames*3
def oneFrameOfStim(n,targetDotPos,foilDotPos,probePos1,probePos2): #trial stimulus function
targetDotPosThis = deepcopy(targetDotPos) #dont change starting value
foilDotPosThis = deepcopy(foilDotPos)
twoCycles = oneCycleFrames*2 #First the target dot left->right->left->right to get eye movements in swing of things
cycleFrame = n % oneCycleFrames
if cycleFrame <= initialDur: #show target and foil only, either because first part of trial
pass #dont draw black dot, dont change positions
elif initialDur <= cycleFrame < probeFirstDisappearance: #show first position of probe
if n >= twoCycles: #dont draw probe for first two cycles
blackDot.pos = (probePos1)
blackDot.draw()
elif probeFirstDisappearance <= cycleFrame < switchCues: #after probe first disappearance, but before target moves
pass #dont draw black dot, don't change positions
elif switchCues <= cycleFrame < probeSecondAppearance: #target and foil in exchanged positions
targetDotPosThis *=-1
foilDotPosThis *= -1
elif probeSecondAppearance <= cycleFrame < probeSecondDisappearance: #target and foil, in exchanged positions, probe in new location
targetDotPosThis *=-1
foilDotPosThis *=-1
if n >= twoCycles: #dont draw probe for first two cycles
blackDot.pos = (probePos2)
blackDot.draw()
elif probeSecondDisappearance <= cycleFrame < oneCycleFrames:
targetDotPosThis *=-1
foilDotPosThis *= -1
targetDot.pos= (targetDotPosThis)
foilDot.pos= (foilDotPosThis)
targetDot.draw()
foilDot.draw()
myWin.flip()
expStop = False
nDone = 0
while nDone < trials.nTotal and not expStop:
if nDone ==0:
beforeTrialsText.setText("In this task you are to look directly at the green dot, wherever it moves on the screen. "
"Keep looking at the green dot, but attend to the black dot that will either move upwards or downwards during the "
"trial. At the end of the trial you are required to identify whether the black dot moved (slightly) to the left "
"or the right. Mostly it will have jumped vertically but with a slight left or right offset. "
"Press the left arrow for left, \n"
"or the right arrow for right ")
respPromptText.setText("<---- left right ---->")
beforeTrialsText.draw()
respPromptText.draw()
betweenTrialsText.setText('Press SPACE to continue')
betweenTrialsText.draw()
myWin.flip(clearBuffer=True)
if not autopilot:
keysPressed = event.waitKeys(maxWait = 120, keyList = ['space','escape'], timeStamped = False)
if 'escape' in keysPressed:
print('User cancelled by pressing <escape>'); myWin.close(); core.quit()
myWin.clearBuffer()
if thisTrial['startLeft']:
targetDotPos=np.array([-5,0]) #target of saccades starts on left.
foilDotPos =np.array([5,0])
else: #target starts on right
targetDotPos=np.array([5,0]) #position of the green and grey stimulus for second half of trials - right to left
foilDotPos =np.array([-5,0])
yMultiplier = thisTrial['upDown']
if not thisTrial['upDown']:
yMultiplier = -1
probePos1= [ thisTrial['probeX']-thisTrial['tilt'], thisTrial['probeY']*yMultiplier ]
probePos2 =[ thisTrial['probeX']+thisTrial['tilt'], probePos1[1]*-1 ] #y of second location is simply vertical reflection of position 1
for n in range(totFrames): #Loop for the trial STIMULUS
oneFrameOfStim(n,targetDotPos,foilDotPos,probePos1,probePos2)
core.wait(.1)
respPromptText.setPos([0,-.5]) #low down so doesnt interfere with apparent motion
respPromptText.draw()
myWin.flip(clearBuffer=True)
keysPressed = event.waitKeys(maxWait = respDeadline, keyList = ['left','right','escape'], timeStamped = False)
if keysPressed is None:
keysPressed = ['-99'] #because otherwise testing what's in it gives error
if autopilot and ('escape' not in keysPressed): #optionally person can press key, like esc to abort
keysPressed = ['right']
if 'escape' in keysPressed:
expStop=True
if not expStop:
if 'left' in keysPressed: #recoding key presses as 0 (anticlockwise) or 1 (clockwise) for data analysis
respLeftRight = 0
else:
respLeftRight = 1
if nDone==0: #initiate results dataframe
print(thisTrial) #debugON
df = DataFrame(thisTrial, index=[nDone],
columns = ['jitter','probeX','probeY','startLeft','tilt','upDown']) #columns included purely to specify their order
df['respLeftRight'] = respLeftRight
trials.data.add('respLeftRight', respLeftRight) #psychopy-native way of storing, saving data
else: #add this trial
df['respLeftRight'] = respLeftRight
df= df.append( thisTrial, ignore_index=True ) #ignore because I got no index (rowname)
df['respLeftRight'][nDone] = respLeftRight
trials.data.add('respLeftRight', respLeftRight) #switching to using psychopy-native ways of storing, saving data
print(df.loc[nDone-1:nDone]) #print this trial and previous trial, only because theres no way to print object (single record) in wide format
if nDone< trials.nTotal-1:
betweenTrialsText.draw()
progressMsg = 'Completed trial number ' + str(nDone) + ' (' + str(trials.nTotal) + ' trials in total )'
NextRemindCountText.setText(progressMsg)
NextRemindCountText.draw()
myWin.flip(clearBuffer=True)
keysPressedBetweenTrials = event.waitKeys(maxWait = respDeadline, keyList = ['space','escape'], timeStamped = False)
if keysPressedBetweenTrials is None:
keysPressedBetweenTrials = ['-99'] #because otherwise testing what's in it gives not-iterable error
if autopilot and ('escape' not in keysPressedBetweenTrials): # ( keysPressedBetweenTrials is None):
keysPressedBetweenTrials = ['space']
if 'escape' in keysPressedBetweenTrials:
expStop=True
thisTrial=trials.next()
myWin.clearBuffer()
nDone+=1
logging.flush()
myWin.close()
if expStop:
print("Experiment stopped because user stopped it.")
else:
print("Experiment finished")
if nDone >0:
print("Data was saved on each trial to", fileNameWithPath+'MANUAL.txt')
fileNamePP = fileNameWithPath
dfFromPP = trials.saveAsWideText(fileNamePP)
print("Psychopy's wideText has been saved as", fileNamePP)
#dfFromPP.to_pickle(fileNameWithPath+"_DataFrame.pickle") #doing this to have a dataframe to test plotDataAndPsychometricCurve with in analyzeData.py
fileNamePickle = fileNameWithPath #.psydat will automatically be appended
trials.saveAsPickle(fileNamePickle)
print("Most Psychopy-ic method: trials trialHandler has been saved as", fileNamePickle, "should include copy of code")
#df.dtypes in my case are "objects". you can't take the mean
df = dfFromPP
print('df.dtypes=\n',df.dtypes)
#Fit and plot data
fig = plotDataAndPsychometricCurve(df, dataFileName=None)
figFnameWithPath = os.path.join('figures/', fName + '.png')
pylab.savefig( figFnameWithPath ) #, bbox_inches='tight')
print('The plot has been saved, as', figFnameWithPath)
pylab.show() #pauses until window manually closed. Have to save before calling this, because closing the window loses the figure
|
alexholcombe/spatiotopic-motion
|
dots.py
|
Python
|
mit
| 18,851
|
import os
from uuid import uuid4
import xml.etree.ElementTree as ET
import re
import unicodedata
from time import sleep
import requests
PROCSTATUS_INVALID_RETRY_TRACE = '9714'
PROCSTATUS_USER_NOT_FOUND = '9581'
CARD_TYPE_VISA = 'VISA'
CARD_TYPE_MC = 'MC'
CARD_TYPE_AMEX = 'Amex'
CARD_TYPE_DISCOVER = 'Discover'
CARD_TYPE_JCB = 'JCB'
CARD_TYPES = [
CARD_TYPE_VISA,
CARD_TYPE_MC,
CARD_TYPE_AMEX,
CARD_TYPE_DISCOVER,
CARD_TYPE_JCB,
]
TEST_ENDPOINT_URL_1 = "https://orbitalvar1.chasepaymentech.com"
TEST_ENDPOINT_URL_2 = "https://orbitalvar2.chasepaymentech.com"
ENDPOINT_URL_1 = "https://orbital1.chasepaymentech.com"
ENDPOINT_URL_2 = "https://orbital2.chasepaymentech.com"
CURRENT_DTD_VERSION = "PTI68"
AUTH_PLATFORM_BIN = {
'salem': '000001',
'pns': '000002',
}
valid_credit_pattern = re.compile("""
^(?:4[0-9]{12}(?:[0-9]{3})? # Visa
| 5[1-5][0-9]{14} # MasterCard
| 3[47][0-9]{13} # American Express
| 3(?:0[0-5]|[68][0-9])[0-9]{11} # Diners Club
| 6(?:011|5[0-9]{2})[0-9]{12} # Discover
| (?:2131|1800|35\d{3})\d{11} # JCB
)$
""", re.VERBOSE)
def remove_control_characters(s):
"""
Remove unicode characters that will endanger xml parsing on Chase's end
"""
ue = s.encode('unicode-escape', 'ignore').decode()
return "".join(ch for ch in ue if unicodedata.category(ch)[0] != "C")
def sanitize_address_field(s):
"""
Address fields hould not include any of the following characters:
% | ^ \ /
"""
chars = ["%", "|", "^", "\\", "/"]
return "".join(ch for ch in s if ch not in chars)
def sanitize_phone_field(s):
"""
Phone Number Format
AAAEEENNNNXXXX, where
AAA = Area Code
EEE = Exchange
NNNN = Number
XXXX = Extension
"""
chars = ["(", ")", "-", "."]
return "".join(ch for ch in s if ch not in chars)
class Endpoint(object):
def __init__(self, **kwargs):
"""
Endpoint takes the following constructor params:
merchant_id
username
password
trace_number
production
"""
self.merchant_id = os.getenv('ORBITAL_MERCHANT_ID') or kwargs.get('merchant_id')
self.username = os.getenv('ORBITAL_USERNAME') or kwargs.get('username')
self.password = os.getenv('ORBITAL_PASSWORD') or kwargs.get('password')
self.trace_number = kwargs.get('trace_number', str(uuid4().node))
self.production = kwargs.get('production', False)
if self.production:
self.url = ENDPOINT_URL_1
self.url2 = ENDPOINT_URL_2
else:
self.url = TEST_ENDPOINT_URL_1
self.url2 = TEST_ENDPOINT_URL_2
self.dtd_version = 'application/%s' % CURRENT_DTD_VERSION
self.headers = {
'MIME-Version': "1.1",
'Content-type': self.dtd_version,
'Content-transfer-encoding': "text",
'Request-number': "1",
'Document-type': "Request",
'Trace-number': self.trace_number,
'Interface-Version': "MooreBro 1.1.0",
'MerchantID': str(self.merchant_id),
}
# there are 2 platform options defined in the orbital gateway chase
# Salem - BIN 000001
# PNS - BIN 000002
self.platform = kwargs.pop('platform', 'pns')
def get_platform_bin(self):
try:
return AUTH_PLATFORM_BIN[self.platform.lower()]
except KeyError:
raise KeyError('You have supplied an invalid platform identification,'
'you can choose `Salem` (Stratus) or `PNS`')
def make_request(self, xml):
result = None
# try the first url endpoint, then if there's no success, go to the second
for url in [self.url, self.url2]:
for i in range(3):
if result is not None and result.text is not None:
return result.text
try:
result = requests.post(
url,
data=xml,
headers=self.headers)
if result is not None and result.text is not None:
return result.text
except:
pass
# sleep for 250 ms to avoid rate limiting
sleep(0.25)
return "Could not communicate with Chase"
def convert_amount(self, amount):
"""
Remove decimal, pad zeros for ints.
45.25 -> 4525
54 -> 5400
"""
a = amount.split(".")
if len(a) > 1:
dec = a[1]
if len(dec) == 1:
dec = dec + "0"
amount = a[0] + dec
else:
amount = amount + "00"
return amount
def card_type(self, cc_num):
card = None
try:
if cc_num[0] == "4":
card = "Visa"
elif cc_num[0] == "5":
card = "MC"
elif cc_num[0] == "6":
card = "Discover"
elif cc_num[0:1] in ("34", "37"):
card = "Amex"
elif cc_num[0:3] in ("2131", "1800"):
card = "JCB"
except IndexError:
card = None
return card
def parse_xml(self, xml_file_name, values, default_value=None):
xml_file = os.path.join(os.path.dirname(__file__), xml_file_name)
tree = ET.parse(xml_file)
root = tree.getroot()
values['OrbitalConnectionUsername'] = self.username
values['OrbitalConnectionPassword'] = self.password
values['BIN'] = self.get_platform_bin()
values['CustomerBin'] = self.get_platform_bin()
for key, value in values.items():
elem = root.find(".//%s" % key)
if elem is not None:
elem.text = value or default_value
if elem.text is not None:
elem.text = remove_control_characters(elem.text)
return ET.tostring(root)
def parse_result(self, result):
root = ET.fromstring(result)
resp_elem = root.getchildren()[0]
values = {}
for child_elem in resp_elem:
values[child_elem.tag] = child_elem.text
return values
class Profile(Endpoint):
def __init__(self, **kwargs):
super(Profile, self).__init__(**kwargs)
self.ident = kwargs.get('ident')
self.name = kwargs.get('name')
self.address1 = kwargs.get('address1')
self.address2 = kwargs.get('address2')
self.city = kwargs.get('city')
self.state = kwargs.get('state')
self.zipCode = kwargs.get('zip_code')
self.email = kwargs.get('email')
self.phone = kwargs.get('phone')
self.cc_num = kwargs.get('cc_num')
self.cc_expiry = kwargs.get('cc_expiry')
self.xml = None
def sanitize(self):
if self.name is not None:
self.name = self.name[:30]
if self.address1 is not None:
address1 = sanitize_address_field(self.address1)
self.address1 = address1[:30]
if self.address2 is not None:
address2 = sanitize_address_field(self.address2)
self.address2 = address2[:30]
if self.city is not None:
city = sanitize_address_field(self.city)
self.city = city[:20]
if self.state is not None:
state = sanitize_address_field(self.state)
self.state = state[:2]
if self.zipCode is not None:
self.zipCode = self.zipCode[:5]
if self.email is not None:
self.email = self.email[:50]
if self.phone is not None:
phone = sanitize_phone_field(self.phone)
self.phone = phone[:14]
def parse_result(self, result):
values = super(Profile, self).parse_result(result)
for key in ['CustomerName', 'CustomerAddress1', 'CustomerAddress2',
'CustomerCity']:
if key in values and values[key]:
values[key] = values[key].title()
return values
def create(self):
self.sanitize()
values = {
'CustomerMerchantID': self.merchant_id,
'CustomerName': self.name,
'CustomerAddress1': self.address1,
'CustomerAddress2': self.address2,
'CustomerCity': self.city,
'CustomerState': self.state,
'CustomerZIP': self.zipCode,
'CustomerEmail': self.email,
'CustomerPhone': self.phone,
'CCAccountNum': self.cc_num,
'CCExpireDate': self.cc_expiry,
}
if self.ident:
values['CustomerProfileFromOrderInd'] = 'S'
values['CustomerRefNum'] = self.ident
self.xml = self.parse_xml(
"profile_create.xml",
values,
default_value="")
self.result = self.make_request(self.xml)
return self.parse_result(self.result)
def read(self):
values = {
'CustomerMerchantID': self.merchant_id,
'CustomerRefNum': self.ident
}
self.xml = self.parse_xml("profile_read.xml", values)
self._result = self.make_request(self.xml)
return self.parse_result(self._result)
def update(self):
self.sanitize()
values = {
'CustomerMerchantID': self.merchant_id,
'CustomerRefNum': self.ident,
'CustomerName': self.name,
'CustomerAddress1': self.address1,
'CustomerAddress2': self.address2,
'CustomerCity': self.city,
'CustomerState': self.state,
'CustomerZIP': self.zipCode,
'CustomerEmail': self.email,
'CustomerPhone': self.phone,
'CCAccountNum': self.cc_num,
'CCExpireDate': self.cc_expiry,
}
self.xml = self.parse_xml("profile_update.xml", values)
self._result = self.make_request(self.xml)
return self.parse_result(self._result)
def destroy(self):
values = {
'CustomerMerchantID': self.merchant_id,
'CustomerRefNum': self.ident,
}
self.xml = self.parse_xml("profile_destroy.xml", values)
self._result = self.make_request(self.xml)
return self.parse_result(self._result)
class Order(Endpoint):
"""
MessageType
A = Authorize
AC = Authorize Capture
FC = Force Capture
R = Refund
"""
def __init__(self, **kwargs):
super(Order, self).__init__(**kwargs)
self.message_type = kwargs.get('message_type') # <MessageType>
self.cc_num = kwargs.get('cc_num') # <AccountNum>
self.cc_expiry = kwargs.get('cc_expiry') # <Exp>
self.cvv_indicator = kwargs.get('cvv_indicator') or \
kwargs.get('cvd_indicator') # <CardSecValInd>
self.cvv = kwargs.get('cvv') or kwargs.get('cvd') # <CardSecVal>
self.customer_num = kwargs.get('customer_num') # <CustomerRefNum>
self.order_id = kwargs.get('order_id') # <OrderID>
self.amount = kwargs.get('amount') # <Amount>
self.zipCode = kwargs.get('zip_code') # <AVSzip>
self.address1 = kwargs.get('address1') # <AVSaddress1>
self.address2 = kwargs.get('address2') # <AVSaddress2>
self.city = kwargs.get('city') # <AVScity>
self.state = kwargs.get('state') # <AVSstate>
self.phone = kwargs.get('phone') # <AVSphoneNum>
self.prior_auth_id = kwargs.get('prior_auth_id') # <PriorAuthID>
self.tx_ref_num = kwargs.get('tx_ref_num') # <TxRefNum>
self.new_customer = kwargs.get('new_customer', False)
def sanitize(self):
if self.address1 is not None:
address1 = sanitize_address_field(self.address1)
self.address1 = address1[:30]
if self.address2 is not None:
address2 = sanitize_address_field(self.address2)
self.address2 = address2[:30]
if self.city is not None:
city = sanitize_address_field(self.city)
self.city = city[:20]
if self.state is not None:
state = sanitize_address_field(self.state)
self.state = state[:2]
if self.zipCode is not None:
self.zipCode = self.zipCode[:5]
if self.phone is not None:
phone = sanitize_phone_field(self.phone)
self.phone = phone[:14]
def card_sec_val_ind(self):
"""
Card Security Presence Indicator
For Discover/Visa
1 Value is Present
2 Value on card but illegible
9 Cardholder states data not available
Null if not Visa/Discover
"""
if not self.cvv:
return None
if self.cvv_indicator:
return self.cvv_indicator
# Quick check for card type
if self.card_type(self.cc_num) in ('Visa', 'Discover'):
if self.cc_expiry and len(self.cc_expiry) > 0:
return "1"
else:
return "9"
return None
def charge(self):
self.sanitize()
values = {
'MerchantID': self.merchant_id,
'MessageType': self.message_type or "AC",
'AccountNum': self.cc_num,
'Exp': self.cc_expiry,
'CardSecValInd': self.card_sec_val_ind(),
'CardSecVal': self.cvv,
'OrderID': self.order_id,
'Amount': self.convert_amount(self.amount),
'CustomerRefNum': self.customer_num,
'AVSzip': self.zipCode,
'AVSaddress1': self.address1,
'AVSaddress2': self.address2,
'AVScity': self.city,
'AVSstate': self.state,
'AVSphoneNum': self.phone,
'PriorAuthID': self.prior_auth_id,
'TxRefNum': self.tx_ref_num,
}
if self.new_customer:
values['CustomerProfileFromOrderInd'] = "A"
values['CustomerProfileOrderOverrideInd'] = "NO"
# Validation, TBD
if self.message_type not in ['A', 'AC', 'FC', 'R']:
pass
self.xml = self.parse_xml("order_new.xml", values)
self._result = self.make_request(self.xml)
return self.parse_result(self._result)
def authorize(self):
self.message_type = 'A'
return self.charge()
def authorize_capture(self):
self.message_type = 'AC'
return self.charge()
def force_capture(self):
self.message_type = 'FC'
return self.charge()
def refund(self):
self.message_type = 'R'
return self.charge()
class MarkForCapture(Endpoint):
def __init__(self, **kwargs):
super(MarkForCapture, self).__init__(**kwargs)
self.order_id = kwargs.get('order_id') # <OrderID>
self.amount = kwargs.get('amount') # <Amount>
self.tx_ref_num = kwargs.get('tx_ref_num') # <TxRefNum>
def request(self):
values = {
'MerchantID': self.merchant_id,
'OrderID': self.order_id,
'Amount': self.convert_amount(self.amount),
'TxRefNum': self.tx_ref_num,
}
self.xml = self.parse_xml("mark_for_capture.xml", values)
self._result = self.make_request(self.xml)
return self.parse_result(self._result)
class Reversal(Endpoint):
def __init__(self, **kwargs):
super(Reversal, self).__init__(**kwargs)
self.tx_ref_num = kwargs.get('tx_ref_num') # <TxRefNum>
self.tx_ref_idx = kwargs.get('tx_ref_idx') # <TxRefIdx>
self.amount = kwargs.get('amount') # <AdjustedAmt>
self.order_id = kwargs.get('order_id') # <OrderID>
# <OnlineReversalInd>
self.online_reversal_ind = kwargs.get('online_reversal_ind')
def reversal(self):
self.online_reversal_ind = "Y"
values = {
'MerchantID': self.merchant_id,
'TxRefNum': self.tx_ref_num,
'TxRefIdx': self.tx_ref_idx,
'OrderID': self.order_id,
'OnlineReversalInd': self.online_reversal_ind,
}
if self.amount:
values['AdjustedAmt'] = self.convert_amount(self.amount)
self.xml = self.parse_xml("reversal.xml", values)
self._result = self.make_request(self.xml)
return self.parse_result(self._result)
def void(self):
values = {
'MerchantID': self.merchant_id,
'TxRefNum': self.tx_ref_num,
'TxRefIdx': self.tx_ref_idx,
'OrderID': self.order_id,
}
if self.amount:
values['AdjustedAmt'] = self.convert_amount(self.amount)
self.xml = self.parse_xml("reversal.xml", values)
self._result = self.make_request(self.xml)
return self.parse_result(self._result)
|
dave2328/chase
|
chase/chase.py
|
Python
|
mit
| 16,997
|
from functools import partial
from graphql.validation import ScalarLeafsRule
from .harness import assert_validation_errors
assert_errors = partial(assert_validation_errors, ScalarLeafsRule)
assert_valid = partial(assert_errors, errors=[])
def describe_validate_scalar_leafs():
def valid_scalar_selection():
assert_valid(
"""
fragment scalarSelection on Dog {
barks
}
"""
)
def object_type_missing_selection():
assert_errors(
"""
query directQueryOnObjectWithoutSubFields {
human
}
""",
[
{
"message": "Field 'human' of type 'Human'"
" must have a selection of subfields."
" Did you mean 'human { ... }'?",
"locations": [(3, 15)],
},
],
)
def interface_type_missing_selection():
assert_errors(
"""
{
human { pets }
}
""",
[
{
"message": "Field 'pets' of type '[Pet]'"
" must have a selection of subfields."
" Did you mean 'pets { ... }'?",
"locations": [(3, 23)],
},
],
)
def valid_scalar_selection_with_args():
assert_valid(
"""
fragment scalarSelectionWithArgs on Dog {
doesKnowCommand(dogCommand: SIT)
}
"""
)
def scalar_selection_not_allowed_on_boolean():
assert_errors(
"""
fragment scalarSelectionsNotAllowedOnBoolean on Dog {
barks { sinceWhen }
}
""",
[
{
"message": "Field 'barks' must not have a selection"
" since type 'Boolean' has no subfields.",
"locations": [(3, 21)],
},
],
)
def scalar_selection_not_allowed_on_enum():
assert_errors(
"""
fragment scalarSelectionsNotAllowedOnEnum on Cat {
furColor { inHexDec }
}
""",
[
{
"message": "Field 'furColor' must not have a selection"
" since type 'FurColor' has no subfields.",
"locations": [(3, 24)],
},
],
)
def scalar_selection_not_allowed_with_args():
assert_errors(
"""
fragment scalarSelectionsNotAllowedWithArgs on Dog {
doesKnowCommand(dogCommand: SIT) { sinceWhen }
}
""",
[
{
"message": "Field 'doesKnowCommand' must not have a selection"
" since type 'Boolean' has no subfields.",
"locations": [(3, 48)],
},
],
)
def scalar_selection_not_allowed_with_directives():
assert_errors(
"""
fragment scalarSelectionsNotAllowedWithDirectives on Dog {
name @include(if: true) { isAlsoHumanName }
}
""",
[
{
"message": "Field 'name' must not have a selection"
" since type 'String' has no subfields.",
"locations": [(3, 39)],
},
],
)
def scalar_selection_not_allowed_with_directives_and_args():
assert_errors(
"""
fragment scalarSelectionsNotAllowedWithDirectivesAndArgs on Dog {
doesKnowCommand(dogCommand: SIT) @include(if: true) { sinceWhen }
}
""",
[
{
"message": "Field 'doesKnowCommand' must not have a selection"
" since type 'Boolean' has no subfields.",
"locations": [(3, 67)],
},
],
)
|
graphql-python/graphql-core
|
tests/validation/test_scalar_leafs.py
|
Python
|
mit
| 4,160
|
from rexec import FileWrapper
from django.http import HttpResponse
from django.shortcuts import get_object_or_404
from ..models.attachment import Attachment
def download(request, id):
attachment = get_object_or_404(Attachment, pk=id)
wrapper = FileWrapper(attachment.file)
response = HttpResponse(wrapper, content_type='text/plain')
response['Content-Length'] = attachment.file.size
return response
|
jmescuderojustel/codeyourblogin-python-django-1.7
|
src/blog/controllers/attachment_controller.py
|
Python
|
mit
| 423
|
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: POGOProtos/Networking/Requests/Messages/UseItemXpBoostMessage.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from POGOProtos.Inventory import ItemId_pb2 as POGOProtos_dot_Inventory_dot_ItemId__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='POGOProtos/Networking/Requests/Messages/UseItemXpBoostMessage.proto',
package='POGOProtos.Networking.Requests.Messages',
syntax='proto3',
serialized_pb=_b('\nCPOGOProtos/Networking/Requests/Messages/UseItemXpBoostMessage.proto\x12\'POGOProtos.Networking.Requests.Messages\x1a!POGOProtos/Inventory/ItemId.proto\"F\n\x15UseItemXpBoostMessage\x12-\n\x07item_id\x18\x01 \x01(\x0e\x32\x1c.POGOProtos.Inventory.ItemIdb\x06proto3')
,
dependencies=[POGOProtos_dot_Inventory_dot_ItemId__pb2.DESCRIPTOR,])
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_USEITEMXPBOOSTMESSAGE = _descriptor.Descriptor(
name='UseItemXpBoostMessage',
full_name='POGOProtos.Networking.Requests.Messages.UseItemXpBoostMessage',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='item_id', full_name='POGOProtos.Networking.Requests.Messages.UseItemXpBoostMessage.item_id', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=147,
serialized_end=217,
)
_USEITEMXPBOOSTMESSAGE.fields_by_name['item_id'].enum_type = POGOProtos_dot_Inventory_dot_ItemId__pb2._ITEMID
DESCRIPTOR.message_types_by_name['UseItemXpBoostMessage'] = _USEITEMXPBOOSTMESSAGE
UseItemXpBoostMessage = _reflection.GeneratedProtocolMessageType('UseItemXpBoostMessage', (_message.Message,), dict(
DESCRIPTOR = _USEITEMXPBOOSTMESSAGE,
__module__ = 'POGOProtos.Networking.Requests.Messages.UseItemXpBoostMessage_pb2'
# @@protoc_insertion_point(class_scope:POGOProtos.Networking.Requests.Messages.UseItemXpBoostMessage)
))
_sym_db.RegisterMessage(UseItemXpBoostMessage)
# @@protoc_insertion_point(module_scope)
|
obiben/pokemongo-api
|
pogo/POGOProtos/Networking/Requests/Messages/UseItemXpBoostMessage_pb2.py
|
Python
|
mit
| 2,699
|
import common
import music_queue
import pagination
import artists
import audio_tracks
@route('/music/music/collections_menu')
def GetCollectionsMenu(title):
oc = ObjectContainer(title2=unicode(L(title)))
oc.add(DirectoryObject(
key=Callback(HandleCollections, title=L('All Collections')),
title=unicode(L('All Collections'))
))
oc.add(DirectoryObject(
key=Callback(music_queue.GetQueue, filter='collection__id', title=L('Favorite Collections')),
title=unicode(L('Favorite Collections'))
))
add_search_collections(oc)
return oc
@route('/music/music/collections')
def HandleCollections(title, page=1, **params):
oc = ObjectContainer()
page = int(page)
limit = common.get_elements_per_page()
offset = (page-1)*limit
response = service.get_collections(limit=limit, offset=offset)
oc.title2 = unicode(L('Collections')) + ' (' + str(response['meta']['total_count']) + ')'
for media in response['objects']:
id = media['id']
name = media['title']
thumb = media['thumbnail']
key = Callback(HandleCollection, collection__id=id, title=name, thumb=thumb)
oc.add(DirectoryObject(key=key, title=unicode(name), thumb=thumb))
add_search_collections(oc)
common.add_pagination_to_response(response, page)
pagination.append_controls(oc, response, callback=HandleCollections, title=title, page=page)
return oc
@route('/music/music/collection')
def HandleCollection(title, collection__id, thumb):
oc = ObjectContainer(title2=unicode(L(title)))
key = Callback(audio_tracks.HandleAudioTracks, name=title, collection__id=collection__id, thumb=thumb)
oc.add(DirectoryObject(key=key, title=unicode(title), thumb=thumb))
music_queue.append_controls(oc, name=title, thumb=thumb, collection__id=collection__id)
return oc
@route('/music/music/search_music_collections')
def SearchMusicCollections(title, query, page=1, **params):
page = int(page)
limit = common.get_elements_per_page()
offset = (page-1)*limit
oc = ObjectContainer(title2=unicode(L(title)))
response = service.search_collection(q=query, limit=common.get_elements_per_page(), offset=offset)
for media in artists.BuildArtistsList(response['objects']):
oc.add(media)
common.add_pagination_to_response(response, page)
pagination.append_controls(oc, response, callback=SearchMusicCollections, title=title, query=query, page=page, **params)
return oc
def add_search_collections(oc):
oc.add(InputDirectoryObject(
key=Callback(SearchMusicCollections, title=unicode(L("Collections Search"))),
title=unicode(L("Collections Search")),
thumb=R(SEARCH_ICON)
))
|
shvets/music-plex-plugin
|
src/lib/plex_plugin/Contents/Code/collections.py
|
Python
|
mit
| 2,763
|
# Copyright 2013 Canonical Ltd.
#
# Authors:
# Charm Helpers Developers <juju@lists.ubuntu.com>
"""Charm Helpers ansible - declare the state of your machines.
This helper enables you to declare your machine state, rather than
program it procedurally (and have to test each change to your procedures).
Your install hook can be as simple as::
{{{
import charmhelpers.contrib.ansible
def install():
charmhelpers.contrib.ansible.install_ansible_support()
charmhelpers.contrib.ansible.apply_playbook('playbooks/install.yaml')
}}}
and won't need to change (nor will its tests) when you change the machine
state.
All of your juju config and relation-data are available as template
variables within your playbooks and templates. An install playbook looks
something like::
{{{
---
- hosts: localhost
user: root
tasks:
- name: Add private repositories.
template:
src: ../templates/private-repositories.list.jinja2
dest: /etc/apt/sources.list.d/private.list
- name: Update the cache.
apt: update_cache=yes
- name: Install dependencies.
apt: pkg={{ item }}
with_items:
- python-mimeparse
- python-webob
- sunburnt
- name: Setup groups.
group: name={{ item.name }} gid={{ item.gid }}
with_items:
- { name: 'deploy_user', gid: 1800 }
- { name: 'service_user', gid: 1500 }
...
}}}
Read more online about `playbooks`_ and standard ansible `modules`_.
.. _playbooks: http://www.ansibleworks.com/docs/playbooks.html
.. _modules: http://www.ansibleworks.com/docs/modules.html
"""
import os
import subprocess
import charmhelpers.contrib.templating.contexts
import charmhelpers.core.host
import charmhelpers.core.hookenv
import charmhelpers.fetch
charm_dir = os.environ.get('CHARM_DIR', '')
ansible_hosts_path = '/etc/ansible/hosts'
# Ansible will automatically include any vars in the following
# file in its inventory when run locally.
ansible_vars_path = '/etc/ansible/host_vars/localhost'
def install_ansible_support(from_ppa=True, ppa_location='ppa:rquillo/ansible'):
"""Installs the ansible package.
By default it is installed from the `PPA`_ linked from
the ansible `website`_ or from a ppa specified by a charm config..
.. _PPA: https://launchpad.net/~rquillo/+archive/ansible
.. _website: http://docs.ansible.com/intro_installation.html#latest-releases-via-apt-ubuntu
If from_ppa is empty, you must ensure that the package is available
from a configured repository.
"""
if from_ppa:
charmhelpers.fetch.add_source(ppa_location)
charmhelpers.fetch.apt_update(fatal=True)
charmhelpers.fetch.apt_install('ansible')
with open(ansible_hosts_path, 'w+') as hosts_file:
hosts_file.write('localhost ansible_connection=local')
def apply_playbook(playbook, tags=None):
tags = tags or []
tags = ",".join(tags)
charmhelpers.contrib.templating.contexts.juju_state_to_yaml(
ansible_vars_path, namespace_separator='__',
allow_hyphens_in_keys=False)
call = [
'ansible-playbook',
'-c',
'local',
playbook,
]
if tags:
call.extend(['--tags', '{}'.format(tags)])
subprocess.check_call(call)
class AnsibleHooks(charmhelpers.core.hookenv.Hooks):
"""Run a playbook with the hook-name as the tag.
This helper builds on the standard hookenv.Hooks helper,
but additionally runs the playbook with the hook-name specified
using --tags (ie. running all the tasks tagged with the hook-name).
Example::
hooks = AnsibleHooks(playbook_path='playbooks/my_machine_state.yaml')
# All the tasks within my_machine_state.yaml tagged with 'install'
# will be run automatically after do_custom_work()
@hooks.hook()
def install():
do_custom_work()
# For most of your hooks, you won't need to do anything other
# than run the tagged tasks for the hook:
@hooks.hook('config-changed', 'start', 'stop')
def just_use_playbook():
pass
# As a convenience, you can avoid the above noop function by specifying
# the hooks which are handled by ansible-only and they'll be registered
# for you:
# hooks = AnsibleHooks(
# 'playbooks/my_machine_state.yaml',
# default_hooks=['config-changed', 'start', 'stop'])
if __name__ == "__main__":
# execute a hook based on the name the program is called by
hooks.execute(sys.argv)
"""
def __init__(self, playbook_path, default_hooks=None):
"""Register any hooks handled by ansible."""
super(AnsibleHooks, self).__init__()
self.playbook_path = playbook_path
default_hooks = default_hooks or []
noop = lambda *args, **kwargs: None
for hook in default_hooks:
self.register(hook, noop)
def execute(self, args):
"""Execute the hook followed by the playbook using the hook as tag."""
super(AnsibleHooks, self).execute(args)
hook_name = os.path.basename(args[0])
charmhelpers.contrib.ansible.apply_playbook(
self.playbook_path, tags=[hook_name])
|
jiasir/openstack-trove
|
lib/charmhelpers/contrib/ansible/__init__.py
|
Python
|
mit
| 5,365
|
from __future__ import division, absolute_import, print_function
extensions = []
source_suffix = '.rst'
master_doc = 'index'
project = u'Confuse'
copyright = u'2012, Adrian Sampson'
version = '0.1'
release = '0.1.0'
exclude_patterns = ['_build']
pygments_style = 'sphinx'
# -- Options for HTML output --------------------------------------------------
html_theme = 'default'
htmlhelp_basename = 'Confusedoc'
|
sampsyo/confuse
|
docs/conf.py
|
Python
|
mit
| 416
|
#!/usr/bin/python
# Imports
import requests
from sys import version_info
# Setup
py3 = version_info[0] > 2
if py3:
zip_code = input('Which zip code would you like to check the weather of? ')
else:
zip_code = raw_input("Which zip code would you like to check the weather of? ")
api_key = 'fc8ae934cc756cb097a187d5a0b3e428'
url = 'http://api.openweathermap.org/data/2.5/weather?zip=' + zip_code + ',us&APPID=' + api_key + '&units=imperial'
# Request
data = requests.get(url)
weather = data.json()
# Print Info
print('')
if data.status_code != 200:
print('Error. We could not find this location.')
else:
print('Location: ' + weather['name'])
print('Weather: ' + weather['weather'][0]['description'])
print('Wind: ' + str(weather['wind']['speed']) + 'MPH')
print('Temperature: ' + str(weather['main']['temp']) + 'F')
print('')
|
Belax8/my-pi-projects
|
Misc/weather-api.py
|
Python
|
mit
| 838
|
from typing import Any, Dict, cast
from pytest import mark, param, raises
from graphql.type import (
GraphQLArgument,
GraphQLDirective,
GraphQLEnumType,
GraphQLEnumValue,
GraphQLField,
GraphQLInputField,
GraphQLInputObjectType,
GraphQLInterfaceType,
GraphQLObjectType,
GraphQLScalarType,
GraphQLSchema,
GraphQLUnionType,
)
dummy_type = GraphQLScalarType("DummyScalar")
bad_extensions = [param([], id="list"), param({1: "ext"}, id="non_string_key")]
def bad_extensions_msg(name: str) -> str:
return f"{name} extensions must be a dictionary with string keys."
def describe_type_system_extensions():
def describe_graphql_scalar_type():
def without_extensions():
some_scalar = GraphQLScalarType("SomeScalar")
assert some_scalar.extensions == {}
assert some_scalar.to_kwargs()["extensions"] == {}
def with_extensions():
scalar_extensions = {"SomeScalarExt": "scalar"}
some_scalar = GraphQLScalarType("SomeScalar", extensions=scalar_extensions)
assert some_scalar.extensions is scalar_extensions
assert some_scalar.to_kwargs()["extensions"] is scalar_extensions
@mark.parametrize("extensions", bad_extensions)
def with_bad_extensions(extensions):
with raises(TypeError, match=bad_extensions_msg("SomeScalar")):
# noinspection PyTypeChecker
GraphQLScalarType("SomeScalar", extensions=extensions)
def describe_graphql_object_type():
def without_extensions():
some_object = GraphQLObjectType(
"SomeObject",
{
"someField": GraphQLField(
dummy_type, {"someArg": GraphQLArgument(dummy_type)}
)
},
)
assert some_object.extensions == {}
some_field = some_object.fields["someField"]
assert some_field.extensions == {}
some_arg = some_field.args["someArg"]
assert some_arg.extensions == {}
assert some_object.to_kwargs()["extensions"] == {}
assert some_field.to_kwargs()["extensions"] == {}
assert some_arg.to_kwargs()["extensions"] == {}
def with_extensions():
object_extensions = {"SomeObjectExt": "object"}
field_extensions = {"SomeFieldExt": "field"}
arg_extensions = {"SomeArgExt": "arg"}
some_object = GraphQLObjectType(
"SomeObject",
{
"someField": GraphQLField(
dummy_type,
{
"someArg": GraphQLArgument(
dummy_type, extensions=arg_extensions
)
},
extensions=field_extensions,
)
},
extensions=object_extensions,
)
assert some_object.extensions is object_extensions
some_field = some_object.fields["someField"]
assert some_field.extensions is field_extensions
some_arg = some_field.args["someArg"]
assert some_arg.extensions is arg_extensions
assert some_object.to_kwargs()["extensions"] is object_extensions
assert some_field.to_kwargs()["extensions"] is field_extensions
assert some_arg.to_kwargs()["extensions"] is arg_extensions
@mark.parametrize("extensions", bad_extensions)
def with_bad_extensions(extensions):
with raises(TypeError, match=bad_extensions_msg("SomeObject")):
# noinspection PyTypeChecker
GraphQLObjectType("SomeObject", {}, extensions=extensions)
with raises(TypeError, match=bad_extensions_msg("Field")):
# noinspection PyTypeChecker
GraphQLField(dummy_type, extensions=extensions)
with raises(TypeError, match=bad_extensions_msg("Argument")):
# noinspection PyTypeChecker
GraphQLArgument(dummy_type, extensions=extensions)
def describe_graphql_interface_type():
def without_extensions():
some_interface = GraphQLInterfaceType(
"SomeInterface",
{
"someField": GraphQLField(
dummy_type, {"someArg": GraphQLArgument(dummy_type)}
)
},
)
assert some_interface.extensions == {}
some_field = some_interface.fields["someField"]
assert some_field.extensions == {}
some_arg = some_field.args["someArg"]
assert some_arg.extensions == {}
assert some_interface.to_kwargs()["extensions"] == {}
assert some_field.to_kwargs()["extensions"] == {}
assert some_arg.to_kwargs()["extensions"] == {}
def with_extensions():
interface_extensions = {"SomeInterfaceExt": "interface"}
field_extensions = {"SomeFieldExt": "field"}
arg_extensions = {"SomeArgExt": "arg"}
some_interface = GraphQLInterfaceType(
"SomeInterface",
{
"someField": GraphQLField(
dummy_type,
{
"someArg": GraphQLArgument(
dummy_type, extensions=arg_extensions
)
},
extensions=field_extensions,
)
},
extensions=interface_extensions,
)
assert some_interface.extensions is interface_extensions
some_field = some_interface.fields["someField"]
assert some_field.extensions is field_extensions
some_arg = some_field.args["someArg"]
assert some_arg.extensions is arg_extensions
assert some_interface.to_kwargs()["extensions"] is interface_extensions
assert some_field.to_kwargs()["extensions"] is field_extensions
assert some_arg.to_kwargs()["extensions"] is arg_extensions
@mark.parametrize("extensions", bad_extensions)
def with_bad_extensions(extensions):
with raises(TypeError, match=bad_extensions_msg("SomeInterface")):
# noinspection PyTypeChecker
GraphQLInterfaceType("SomeInterface", {}, extensions=extensions)
def describe_graphql_union_type():
def without_extensions():
some_union = GraphQLUnionType("SomeUnion", [])
assert some_union.extensions == {}
assert some_union.to_kwargs()["extensions"] == {}
def with_extensions():
union_extensions = {"SomeUnionExt": "union"}
some_union = GraphQLUnionType("SomeUnion", [], extensions=union_extensions)
assert some_union.extensions is union_extensions
assert some_union.to_kwargs()["extensions"] is union_extensions
@mark.parametrize("extensions", bad_extensions)
def with_bad_extensions(extensions):
with raises(TypeError, match=bad_extensions_msg("SomeUnion")):
# noinspection PyTypeChecker
GraphQLUnionType("SomeUnion", [], extensions=extensions)
def describe_graphql_enum_type():
def without_extensions():
some_enum = GraphQLEnumType("SomeEnum", {"SOME_VALUE": None})
assert some_enum.extensions == {}
some_value = some_enum.values["SOME_VALUE"]
assert some_value.extensions == {}
assert some_enum.to_kwargs()["extensions"] == {}
assert some_value.to_kwargs()["extensions"] == {}
def with_extensions():
enum_extensions = {"SomeEnumExt": "enum"}
value_extensions = {"SomeValueExt": "value"}
some_enum = GraphQLEnumType(
"SomeEnum",
{"SOME_VALUE": GraphQLEnumValue(extensions=value_extensions)},
extensions=enum_extensions,
)
assert some_enum.extensions is enum_extensions
some_value = some_enum.values["SOME_VALUE"]
assert some_value.extensions is value_extensions
assert some_enum.to_kwargs()["extensions"] is enum_extensions
assert some_value.to_kwargs()["extensions"] is value_extensions
@mark.parametrize("extensions", bad_extensions)
def with_bad_extensions(extensions):
with raises(TypeError, match=bad_extensions_msg("SomeEnum")):
# noinspection PyTypeChecker
GraphQLEnumType(
"SomeEnum", cast(Dict[str, Any], {}), extensions=extensions
)
with raises(TypeError, match=bad_extensions_msg("Enum value")):
# noinspection PyTypeChecker
GraphQLEnumValue(extensions=extensions)
def describe_graphql_input_object_type():
def without_extensions():
some_input_object = GraphQLInputObjectType(
"SomeInputObject", {"someInputField": GraphQLInputField(dummy_type)}
)
assert some_input_object.extensions == {}
some_input_field = some_input_object.fields["someInputField"]
assert some_input_field.extensions == {}
assert some_input_object.to_kwargs()["extensions"] == {}
assert some_input_field.to_kwargs()["extensions"] == {}
def with_extensions():
input_object_extensions = {"SomeInputObjectExt": "inputObject"}
input_field_extensions = {"SomeInputFieldExt": "inputField"}
some_input_object = GraphQLInputObjectType(
"SomeInputObject",
{
"someInputField": GraphQLInputField(
dummy_type, extensions=input_field_extensions
)
},
extensions=input_object_extensions,
)
assert some_input_object.extensions is input_object_extensions
some_input_field = some_input_object.fields["someInputField"]
assert some_input_field.extensions is input_field_extensions
assert (
some_input_object.to_kwargs()["extensions"] is input_object_extensions
)
assert some_input_field.to_kwargs()["extensions"] is input_field_extensions
@mark.parametrize("extensions", bad_extensions)
def with_bad_extensions(extensions):
with raises(TypeError, match=bad_extensions_msg("SomeInputObject")):
# noinspection PyTypeChecker
GraphQLInputObjectType("SomeInputObject", {}, extensions=extensions)
with raises(TypeError, match=bad_extensions_msg("Input field")):
# noinspection PyTypeChecker
GraphQLInputField(dummy_type, extensions=extensions)
def describe_graphql_directive():
def without_extensions():
some_directive = GraphQLDirective(
"SomeDirective", [], {"someArg": GraphQLArgument(dummy_type)}
)
assert some_directive.extensions == {}
some_arg = some_directive.args["someArg"]
assert some_arg.extensions == {}
assert some_directive.to_kwargs()["extensions"] == {}
assert some_arg.to_kwargs()["extensions"] == {}
def with_extensions():
directive_extensions = {"SomeDirectiveExt": "directive"}
arg_extensions = {"SomeArgExt": "arg"}
some_directive = GraphQLDirective(
"SomeDirective",
[],
{"someArg": GraphQLArgument(dummy_type, extensions=arg_extensions)},
extensions=directive_extensions,
)
assert some_directive.extensions is directive_extensions
some_arg = some_directive.args["someArg"]
assert some_arg.extensions is arg_extensions
assert some_directive.to_kwargs()["extensions"] is directive_extensions
assert some_arg.to_kwargs()["extensions"] is arg_extensions
@mark.parametrize("extensions", bad_extensions)
def with_bad_extensions(extensions):
with raises(TypeError, match=bad_extensions_msg("Directive")):
# noinspection PyTypeChecker
GraphQLDirective("SomeDirective", [], extensions=extensions)
def describe_graphql_schema():
def without_extensions():
schema = GraphQLSchema()
assert schema.extensions == {}
assert schema.to_kwargs()["extensions"] == {}
def with_extensions():
schema_extensions = {"schemaExtension": "schema"}
schema = GraphQLSchema(extensions=schema_extensions)
assert schema.extensions is schema_extensions
assert schema.to_kwargs()["extensions"] is schema_extensions
@mark.parametrize("extensions", bad_extensions)
def with_bad_extensions(extensions):
with raises(TypeError, match=bad_extensions_msg("Schema")):
# noinspection PyTypeChecker
GraphQLSchema(extensions=extensions)
|
graphql-python/graphql-core
|
tests/type/test_extensions.py
|
Python
|
mit
| 13,423
|
"""
WSGI config for regex project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "regex.settings")
application = get_wsgi_application()
|
mkhuthir/learnPython
|
Book_learning-python-r1.1/ch10/regex/regex/wsgi.py
|
Python
|
mit
| 387
|
import matplotlib.pyplot as plt
import numpy as np
from moviepy.video.io.bindings import mplfig_to_npimage
import moviepy.editor as mpy
import pdb
import cv2
# DRAW A FIGURE WITH MATPLOTLIB
# duration = 2
# fig_mpl, ax = plt.subplots(1,figsize=(5,3), facecolor='white')
# xx = np.linspace(-2,2,200) # the x vector
# zz = lambda d: np.sinc(xx**2)+np.sin(xx+d) # the (changing) z vector
# ax.set_title("Elevation in y=0")
# ax.set_ylim(-1.5,2.5)
# line, = ax.plot(xx, zz(0), lw=3)
filenameList = sorted(glob.glob('/home/chengeli/Downloads/vis/vis/*.jpg'))
duration = len(filenameList)
fig_mpl,ax = plt.subplots(1)#,figsize=(im.height, im.width))
def make_frame_mpl(t):
# line.set_ydata( zz(2*np.pi*t/duration)) # <= Update the curve
# im = Image.open(filenameList[int(t)])
im = cv2.imread(filenameList[int(t)])
print int(t)
# original_duration = im.info['duration']
# frames = [frame.copy() for frame in ImageSequence.Iterator(im)]
# plt.imshow(im)
plt.imshow(im)
return mplfig_to_npimage(fig_mpl) # RGB image of the figure
# ANIMATE WITH MOVIEPY (UPDATE THE CURVE FOR EACH t). MAKE A GIF.
# import glob as glob
# from PIL import Image, ImageSequence
# filename = sys.argv[1]
fps = 50
animation =mpy.VideoClip(make_frame_mpl, duration=100)
pdb.set_trace()
# animation =mpy.VideoClip(mplfig_to_npimage(fig),duration=duration)
animation.write_gif("~/Desktop/NGSIMvis.gif", fps=fps)
# import numpy as np
# import matplotlib.pyplot as plt
# from moviepy.video.io.bindings import mplfig_to_npimage
# import moviepy.editor as mpy
# fig = plt.figure(facecolor="white") # <- ADDED FACECOLOR FOR WHITE BACKGROUND
# ax = plt.axes()
# x = np.random.randn(10, 1)
# y = np.random.randn(10, 1)
# p = plt.plot(x, y, 'ko')
# time = np.arange(2341973, 2342373)
# last_i = None
# last_frame = None
# def animate(t):
# global last_i, last_frame
# i = int(t)
# if i == last_i:
# return last_frame
# xn = x + np.sin(2 * np.pi * time[i] / 10.0)
# yn = y + np.cos(2 * np.pi * time[i] / 8.0)
# p[0].set_data(xn, yn)
# last_i = i
# last_frame = mplfig_to_npimage(fig)
# return last_frame
# duration = len(time)
# fps = 15
# animation = mpy.VideoClip(animate, duration=duration)
# animation.write_videofile("test.mp4", fps=fps)
|
ChengeLi/VehicleTracking
|
utilities/plot2gif.py
|
Python
|
mit
| 2,284
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
nwid.wiget.base
~~~~~~~~~~~~~~~
This module contains nwid base widget object and data structures.
"""
from __future__ import absolute_import
from .widget import BaseWidget
from .scrollable import Scrollable
|
hbradleyiii/nwid
|
nwid/widget/base/__init__.py
|
Python
|
mit
| 261
|
import math
class Solution(object):
def climbStairs(self, n):
"""
:type n: int
:rtype: int
"""
sqrt5 = math.sqrt(5)
return int(sqrt5 / 5 * (pow((1+sqrt5)/2, n+1) - pow((1-sqrt5)/2, n+1)))
a = Solution()
print a.climbStairs(2)
|
SeisSparrow/Leetcode
|
python/70.py
|
Python
|
mit
| 279
|
import os
ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
def load_path():
return os.path.join(ROOT_DIR,"chromedriver")
|
pallab-gain/slack-daily-update
|
driver/loadpath.py
|
Python
|
mit
| 132
|
from __future__ import division
SPREADS = {
6: [1.0, 1.0, 1.0, 2.0, 2.0, 3.0, 4.0],
5: [1.0, 1.0, 2.0, 2.0, 3.0, 4.0],
4: [1.0, 1.0, 2.0, 3.0, 3.0],
3: [1.0, 1.0, 2.0, 4.0],
2: [1.0, 2.0, 3.0],
1: [1.0, 1.0],
}
def round_to_nearest(x, base):
# nudge towards rounding up (eg 4.5 towards 5) because in python3: "if two
# multiples are equally close, rounding is done toward the even choice"
# ... which makes no sense to me
divided = float(x) / base
rounded = round(divided + 0.01)
return base * int(rounded)
def find_links_in_gap(link_count, start, finish, is_left=False):
size_of_gap = (finish - start) + 1
spread = SPREADS[link_count]
increment = (size_of_gap / sum(spread))
# calculate the intervals to be used, based upon the spread (which
# favours putting the extra links nearer to the current page)
running_total = 0
last_step = 0
steps = []
for i in range(0, link_count):
link = spread[i]
running_total += (increment * link)
step = int(round(running_total))
if step <= last_step or step < 1:
step = last_step + 1
steps.append(step)
last_step = step
links = []
for step in steps:
if is_left:
step = finish + 1 - step
else:
step = step + start - 1
if size_of_gap > (49 * link_count):
step = round_to_nearest(step, 50)
elif size_of_gap > (9 * link_count):
step = round_to_nearest(step, 10)
elif size_of_gap > (4 * link_count):
step = round_to_nearest(step, 5)
# rounding can make useless numbers
if step >= start and step <= finish:
links.append(step)
return links
def concertina(page, total_pages):
# this will cause ValueError if not numbers, correctly aborting
page = int(page)
total_pages = int(total_pages)
# we won't paginate out of the range of pages
if page < 1:
page = 1
if page > total_pages:
page = total_pages
# we can always show 1-10 no matter what, so don't bother calculating it
if total_pages < 11:
return list(range(1, total_pages+1))
# current, first, and last pages are always present
pages = {page: 1, 1: 1, total_pages: 1}
# two to either side of the current page are always present
for x in range(page-2, page+3):
if x > 1 and x < total_pages:
pages[x] = 1
# how many pages are missing from the navigation
pages_to_the_left = page - 4 # remove 1, itself, two beside
if pages_to_the_left < 0:
pages_to_the_left = 0
pages_to_the_right = total_pages - page - 3 # remove last, two beside
if pages_to_the_right < 0:
pages_to_the_right = 0
total_missing = pages_to_the_left + pages_to_the_right
# how many can we put back in on either side, favouring more
# links on the side where there is the bigger gap...
left_ratio = pages_to_the_left/total_missing
right_ratio = pages_to_the_right/total_missing
links_to_the_left = int(round(6 * left_ratio))
links_to_the_right = int(round(6 * right_ratio))
# ...but always with at least one link
if links_to_the_left == 0 and pages_to_the_left:
links_to_the_left = 1
links_to_the_right = 5
if links_to_the_right == 0 and pages_to_the_right:
links_to_the_right = 1
links_to_the_left = 5
# add links only where they are not already present
if page > 4:
pages.update(
dict(
(k, 1) for k in find_links_in_gap(
link_count=links_to_the_left,
start=2,
finish=page - 3,
is_left=True,
)
)
)
if page < (total_pages - 4):
pages.update(
dict(
(k, 1) for k in find_links_in_gap(
link_count=links_to_the_right,
start=page + 3,
finish=total_pages - 1,
)
)
)
return sorted(pages.keys())
|
norm/django-concertina
|
concertina/__init__.py
|
Python
|
mit
| 4,154
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import frappe, sys
from frappe import _
from frappe.utils import cint, flt, now, cstr, strip_html, getdate, get_datetime, to_timedelta
from frappe.model import default_fields
from frappe.model.naming import set_new_name
from frappe.modules import load_doctype_module
from frappe.model import display_fieldtypes
_classes = {}
def get_controller(doctype):
"""Returns the **class** object of the given DocType.
For `custom` type, returns `frappe.model.document.Document`.
:param doctype: DocType name as string."""
from frappe.model.document import Document
if not doctype in _classes:
module_name, custom = frappe.db.get_value("DocType", doctype, ["module", "custom"]) \
or ["Core", False]
if custom:
_class = Document
else:
module = load_doctype_module(doctype, module_name)
classname = doctype.replace(" ", "").replace("-", "")
if hasattr(module, classname):
_class = getattr(module, classname)
if issubclass(_class, BaseDocument):
_class = getattr(module, classname)
else:
raise ImportError, doctype
else:
raise ImportError, doctype
_classes[doctype] = _class
return _classes[doctype]
class BaseDocument(object):
ignore_in_getter = ("doctype", "_meta", "meta", "_table_fields", "_valid_columns")
def __init__(self, d):
self.update(d)
self.dont_update_if_missing = []
if hasattr(self, "__setup__"):
self.__setup__()
@property
def meta(self):
if not hasattr(self, "_meta"):
self._meta = frappe.get_meta(self.doctype)
return self._meta
def update(self, d):
if "doctype" in d:
self.set("doctype", d.get("doctype"))
# first set default field values of base document
for key in default_fields:
if key in d:
self.set(key, d.get(key))
for key, value in d.iteritems():
self.set(key, value)
return self
def update_if_missing(self, d):
if isinstance(d, BaseDocument):
d = d.get_valid_dict()
if "doctype" in d:
self.set("doctype", d.get("doctype"))
for key, value in d.iteritems():
# dont_update_if_missing is a list of fieldnames, for which, you don't want to set default value
if (self.get(key) is None) and (value is not None) and (key not in self.dont_update_if_missing):
self.set(key, value)
def get_db_value(self, key):
return frappe.db.get_value(self.doctype, self.name, key)
def get(self, key=None, filters=None, limit=None, default=None):
if key:
if isinstance(key, dict):
return _filter(self.get_all_children(), key, limit=limit)
if filters:
if isinstance(filters, dict):
value = _filter(self.__dict__.get(key, []), filters, limit=limit)
else:
default = filters
filters = None
value = self.__dict__.get(key, default)
else:
value = self.__dict__.get(key, default)
if value is None and key not in self.ignore_in_getter \
and key in (d.fieldname for d in self.meta.get_table_fields()):
self.set(key, [])
value = self.__dict__.get(key)
return value
else:
return self.__dict__
def getone(self, key, filters=None):
return self.get(key, filters=filters, limit=1)[0]
def set(self, key, value, as_value=False):
if isinstance(value, list) and not as_value:
self.__dict__[key] = []
self.extend(key, value)
else:
self.__dict__[key] = value
def delete_key(self, key):
if key in self.__dict__:
del self.__dict__[key]
def append(self, key, value=None):
if value==None:
value={}
if isinstance(value, (dict, BaseDocument)):
if not self.__dict__.get(key):
self.__dict__[key] = []
value = self._init_child(value, key)
self.__dict__[key].append(value)
# reference parent document
value.parent_doc = self
return value
else:
raise ValueError, "Document attached to child table must be a dict or BaseDocument, not " + str(type(value))[1:-1]
def extend(self, key, value):
if isinstance(value, list):
for v in value:
self.append(key, v)
else:
raise ValueError
def remove(self, doc):
self.get(doc.parentfield).remove(doc)
def _init_child(self, value, key):
if not self.doctype:
return value
if not isinstance(value, BaseDocument):
if "doctype" not in value:
value["doctype"] = self.get_table_field_doctype(key)
if not value["doctype"]:
raise AttributeError, key
value = get_controller(value["doctype"])(value)
value.init_valid_columns()
value.parent = self.name
value.parenttype = self.doctype
value.parentfield = key
if value.docstatus is None:
value.docstatus = 0
if not getattr(value, "idx", None):
value.idx = len(self.get(key) or []) + 1
if not getattr(value, "name", None):
value.__dict__['__islocal'] = 1
return value
def get_valid_dict(self):
d = {}
for fieldname in self.meta.get_valid_columns():
d[fieldname] = self.get(fieldname)
df = self.meta.get_field(fieldname)
if df:
if df.fieldtype=="Check" and not isinstance(d[fieldname], int):
d[fieldname] = cint(d[fieldname])
elif df.fieldtype in ("Datetime", "Date") and d[fieldname]=="":
d[fieldname] = None
elif df.get("unique") and cstr(d[fieldname]).strip()=="":
# unique empty field should be set to None
d[fieldname] = None
return d
def init_valid_columns(self):
for key in default_fields:
if key not in self.__dict__:
self.__dict__[key] = None
for key in self.get_valid_columns():
if key not in self.__dict__:
self.__dict__[key] = None
def get_valid_columns(self):
if self.doctype not in frappe.local.valid_columns:
if self.doctype in ("DocField", "DocPerm") and self.parent in ("DocType", "DocField", "DocPerm"):
from frappe.model.meta import get_table_columns
valid = get_table_columns(self.doctype)
else:
valid = self.meta.get_valid_columns()
frappe.local.valid_columns[self.doctype] = valid
return frappe.local.valid_columns[self.doctype]
def is_new(self):
return self.get("__islocal")
def as_dict(self, no_nulls=False, no_default_fields=False):
doc = self.get_valid_dict()
doc["doctype"] = self.doctype
for df in self.meta.get_table_fields():
children = self.get(df.fieldname) or []
doc[df.fieldname] = [d.as_dict(no_nulls=no_nulls) for d in children]
if no_nulls:
for k in doc.keys():
if doc[k] is None:
del doc[k]
if no_default_fields:
for k in doc.keys():
if k in default_fields:
del doc[k]
for key in ("_user_tags", "__islocal", "__onload", "_starred_by"):
if self.get(key):
doc[key] = self.get(key)
return frappe._dict(doc)
def as_json(self):
return frappe.as_json(self.as_dict())
def get_table_field_doctype(self, fieldname):
return self.meta.get_field(fieldname).options
def get_parentfield_of_doctype(self, doctype):
fieldname = [df.fieldname for df in self.meta.get_table_fields() if df.options==doctype]
return fieldname[0] if fieldname else None
def db_insert(self):
"""INSERT the document (with valid columns) in the database."""
if not self.name:
# name will be set by document class in most cases
set_new_name(self)
d = self.get_valid_dict()
columns = d.keys()
try:
frappe.db.sql("""insert into `tab{doctype}`
({columns}) values ({values})""".format(
doctype = self.doctype,
columns = ", ".join(["`"+c+"`" for c in columns]),
values = ", ".join(["%s"] * len(columns))
), d.values())
except Exception, e:
if e.args[0]==1062:
if "PRIMARY" in cstr(e.args[1]):
if self.meta.autoname=="hash":
# hash collision? try again
self.name = None
self.db_insert()
return
type, value, traceback = sys.exc_info()
frappe.msgprint(_("Duplicate name {0} {1}").format(self.doctype, self.name))
raise frappe.DuplicateEntryError, (self.doctype, self.name, e), traceback
elif "Duplicate" in cstr(e.args[1]):
# unique constraint
self.show_unique_validation_message(e)
else:
raise
else:
raise
self.set("__islocal", False)
def db_update(self):
if self.get("__islocal") or not self.name:
self.db_insert()
return
d = self.get_valid_dict()
columns = d.keys()
try:
frappe.db.sql("""update `tab{doctype}`
set {values} where name=%s""".format(
doctype = self.doctype,
values = ", ".join(["`"+c+"`=%s" for c in columns])
), d.values() + [d.get("name")])
except Exception, e:
if e.args[0]==1062 and "Duplicate" in cstr(e.args[1]):
self.show_unique_validation_message(e)
else:
raise
def show_unique_validation_message(self, e):
type, value, traceback = sys.exc_info()
fieldname, label = str(e).split("'")[-2], None
# unique_first_fieldname_second_fieldname is the constraint name
# created using frappe.db.add_unique
if "unique_" in fieldname:
fieldname = fieldname.split("_", 1)[1]
df = self.meta.get_field(fieldname)
if df:
label = df.label
frappe.msgprint(_("{0} must be unique".format(label or fieldname)))
# this is used to preserve traceback
raise frappe.UniqueValidationError, (self.doctype, self.name, e), traceback
def db_set(self, fieldname, value, update_modified=True):
self.set(fieldname, value)
self.set("modified", now())
self.set("modified_by", frappe.session.user)
frappe.db.set_value(self.doctype, self.name, fieldname, value,
self.modified, self.modified_by, update_modified=update_modified)
def _fix_numeric_types(self):
for df in self.meta.get("fields"):
if df.fieldtype == "Check":
self.set(df.fieldname, cint(self.get(df.fieldname)))
elif self.get(df.fieldname) is not None:
if df.fieldtype == "Int":
self.set(df.fieldname, cint(self.get(df.fieldname)))
elif df.fieldtype in ("Float", "Currency", "Percent"):
self.set(df.fieldname, flt(self.get(df.fieldname)))
if self.docstatus is not None:
self.docstatus = cint(self.docstatus)
def _get_missing_mandatory_fields(self):
"""Get mandatory fields that do not have any values"""
def get_msg(df):
if df.fieldtype == "Table":
return "{}: {}: {}".format(_("Error"), _("Data missing in table"), _(df.label))
elif self.parentfield:
return "{}: {} #{}: {}: {}".format(_("Error"), _("Row"), self.idx,
_("Value missing for"), _(df.label))
else:
return "{}: {}: {}".format(_("Error"), _("Value missing for"), _(df.label))
missing = []
for df in self.meta.get("fields", {"reqd": 1}):
if self.get(df.fieldname) in (None, []) or not strip_html(cstr(self.get(df.fieldname))).strip():
missing.append((df.fieldname, get_msg(df)))
return missing
def get_invalid_links(self, is_submittable=False):
def get_msg(df, docname):
if self.parentfield:
return "{} #{}: {}: {}".format(_("Row"), self.idx, _(df.label), docname)
else:
return "{}: {}".format(_(df.label), docname)
invalid_links = []
cancelled_links = []
for df in self.meta.get_link_fields() + self.meta.get("fields",
{"fieldtype":"Dynamic Link"}):
docname = self.get(df.fieldname)
if docname:
if df.fieldtype=="Link":
doctype = df.options
if not doctype:
frappe.throw(_("Options not set for link field {0}").format(df.fieldname))
else:
doctype = self.get(df.options)
if not doctype:
frappe.throw(_("{0} must be set first").format(self.meta.get_label(df.options)))
# MySQL is case insensitive. Preserve case of the original docname in the Link Field.
value = frappe.db.get_value(doctype, docname, "name", cache=True)
setattr(self, df.fieldname, value)
if not value:
invalid_links.append((df.fieldname, docname, get_msg(df, docname)))
elif (df.fieldname != "amended_from"
and (is_submittable or self.meta.is_submittable) and frappe.get_meta(doctype).is_submittable
and cint(frappe.db.get_value(doctype, docname, "docstatus"))==2):
cancelled_links.append((df.fieldname, docname, get_msg(df, docname)))
return invalid_links, cancelled_links
def _validate_selects(self):
if frappe.flags.in_import:
return
for df in self.meta.get_select_fields():
if df.fieldname=="naming_series" or not (self.get(df.fieldname) and df.options):
continue
options = (df.options or "").split("\n")
# if only empty options
if not filter(None, options):
continue
# strip and set
self.set(df.fieldname, cstr(self.get(df.fieldname)).strip())
value = self.get(df.fieldname)
if value not in options and not (frappe.flags.in_test and value.startswith("_T-")):
# show an elaborate message
prefix = _("Row #{0}:").format(self.idx) if self.get("parentfield") else ""
label = _(self.meta.get_label(df.fieldname))
comma_options = '", "'.join(_(each) for each in options)
frappe.throw(_('{0} {1} cannot be "{2}". It should be one of "{3}"').format(prefix, label,
value, comma_options))
def _validate_constants(self):
if frappe.flags.in_import or self.is_new():
return
constants = [d.fieldname for d in self.meta.get("fields", {"set_only_once": 1})]
if constants:
values = frappe.db.get_value(self.doctype, self.name, constants, as_dict=True)
for fieldname in constants:
if self.get(fieldname) != values.get(fieldname):
frappe.throw(_("Value cannot be changed for {0}").format(self.meta.get_label(fieldname)),
frappe.CannotChangeConstantError)
def _validate_update_after_submit(self):
db_values = frappe.db.get_value(self.doctype, self.name, "*", as_dict=True)
for key, db_value in db_values.iteritems():
df = self.meta.get_field(key)
if df and not df.allow_on_submit and (self.get(key) or db_value):
self_value = self.get_value(key)
if self_value != db_value:
frappe.throw(_("Not allowed to change {0} after submission").format(df.label),
frappe.UpdateAfterSubmitError)
def precision(self, fieldname, parentfield=None):
"""Returns float precision for a particular field (or get global default).
:param fieldname: Fieldname for which precision is required.
:param parentfield: If fieldname is in child table."""
from frappe.model.meta import get_field_precision
if parentfield and not isinstance(parentfield, basestring):
parentfield = parentfield.parentfield
cache_key = parentfield or "main"
if not hasattr(self, "_precision"):
self._precision = frappe._dict()
if cache_key not in self._precision:
self._precision[cache_key] = frappe._dict()
if fieldname not in self._precision[cache_key]:
self._precision[cache_key][fieldname] = None
doctype = self.meta.get_field(parentfield).options if parentfield else self.doctype
df = frappe.get_meta(doctype).get_field(fieldname)
if df.fieldtype in ("Currency", "Float", "Percent"):
self._precision[cache_key][fieldname] = get_field_precision(df, self)
return self._precision[cache_key][fieldname]
def get_formatted(self, fieldname, doc=None, currency=None, absolute_value=False):
from frappe.utils.formatters import format_value
df = self.meta.get_field(fieldname)
if not df and fieldname in default_fields:
from frappe.model.meta import get_default_df
df = get_default_df(fieldname)
val = self.get(fieldname)
if absolute_value and isinstance(val, (int, float)):
val = abs(self.get(fieldname))
if not doc:
doc = getattr(self, "parent_doc", None) or self
return format_value(val, df=df, doc=doc, currency=currency)
def is_print_hide(self, fieldname, df=None, for_print=True):
"""Returns true if fieldname is to be hidden for print.
Print Hide can be set via the Print Format Builder or in the controller as a list
of hidden fields. Example
class MyDoc(Document):
def __setup__(self):
self.print_hide = ["field1", "field2"]
:param fieldname: Fieldname to be checked if hidden.
"""
meta_df = self.meta.get_field(fieldname)
if meta_df and meta_df.get("__print_hide"):
return True
if df:
return df.print_hide
if meta_df:
return meta_df.print_hide
def in_format_data(self, fieldname):
"""Returns True if shown via Print Format::`format_data` property.
Called from within standard print format."""
doc = getattr(self, "parent_doc", self)
if hasattr(doc, "format_data_map"):
return fieldname in doc.format_data_map
else:
return True
def reset_values_if_no_permlevel_access(self, has_access_to, high_permlevel_fields):
"""If the user does not have permissions at permlevel > 0, then reset the values to original / default"""
to_reset = []
for df in high_permlevel_fields:
if df.permlevel not in has_access_to and df.fieldtype not in display_fieldtypes:
to_reset.append(df)
if to_reset:
if self.is_new():
# if new, set default value
ref_doc = frappe.new_doc(self.doctype)
else:
# get values from old doc
if self.parent:
self.parent_doc.get_latest()
ref_doc = [d for d in self.parent_doc.get(self.parentfield) if d.name == self.name][0]
else:
ref_doc = self.get_latest()
for df in to_reset:
self.set(df.fieldname, ref_doc.get(df.fieldname))
def get_value(self, fieldname):
df = self.meta.get_field(fieldname)
val = self.get(fieldname)
return self.cast(val, df)
def cast(self, val, df):
if df.fieldtype in ("Currency", "Float", "Percent"):
val = flt(val)
elif df.fieldtype in ("Int", "Check"):
val = cint(val)
elif df.fieldtype in ("Data", "Text", "Small Text", "Long Text",
"Text Editor", "Select", "Link", "Dynamic Link"):
val = cstr(val)
elif df.fieldtype == "Date":
val = getdate(val)
elif df.fieldtype == "Datetime":
val = get_datetime(val)
elif df.fieldtype == "Time":
val = to_timedelta(val)
return val
def _extract_images_from_text_editor(self):
from frappe.utils.file_manager import extract_images_from_doc
if self.doctype != "DocType":
for df in self.meta.get("fields", {"fieldtype":"Text Editor"}):
extract_images_from_doc(self, df.fieldname)
def _filter(data, filters, limit=None):
"""pass filters as:
{"key": "val", "key": ["!=", "val"],
"key": ["in", "val"], "key": ["not in", "val"], "key": "^val",
"key" : True (exists), "key": False (does not exist) }"""
out = []
for d in data:
add = True
for f in filters:
fval = filters[f]
if fval is True:
fval = ("not None", fval)
elif fval is False:
fval = ("None", fval)
elif not isinstance(fval, (tuple, list)):
if isinstance(fval, basestring) and fval.startswith("^"):
fval = ("^", fval[1:])
else:
fval = ("=", fval)
if not frappe.compare(getattr(d, f, None), fval[0], fval[1]):
add = False
break
if add:
out.append(d)
if limit and (len(out)-1)==limit:
break
return out
|
gangadharkadam/frappecontribution
|
frappe/model/base_document.py
|
Python
|
mit
| 18,678
|
# coding: utf8
# Copyright 2015 Vincent Jacques <vincent@vincent-jacques.net>
from .joint import InterpolatorTestCase, JointTestCase
|
jacquev6/Pynamixel
|
Pynamixel/concepts/tests.py
|
Python
|
mit
| 135
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import inspect
import numpy as np
import emcee
import george
from george import kernels
import os
import sys
currentframe = inspect.currentframe()
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(currentframe)))
parentdir = os.path.dirname(currentdir)
sys.path.insert(0, parentdir)
import profiles
import gpew
import matplotlib.pyplot as pl
def single_kernel_noisemodel(p):
"""
Simple one squared-exponential kernel noise model.
"""
return george.GP(p[0] * kernels.ExpSquaredKernel(p[1]))
def single_kernel_lnprior(p):
amp, xcen, sigma, lna, lnalpha = p
if (-50. < lna < 0. and amp > 0. and sigma > 0. and xcen > 8685 and
xcen < 8690):
return 0.0
return -np.inf
def chi2_lnprior(p):
amp, xcen, sigma = p
if (amp > 0. and sigma > 0. and xcen > 8685 and xcen < 8690):
return 0.0
return -np.inf
d = np.loadtxt('spec.txt').T
sel = (d[0] > 8680) & (d[0] < 8696)
yerr = np.ones_like(d[0][sel]) * 0.01
lines = [(d[0][sel], d[1][sel], yerr)]
pfiles = [profiles.gaussian]
pparn = np.cumsum([0] +\
[len(inspect.getargspec(i)[0]) - 1 for i in pfiles])
###############################################################################
# GP modelled line
initial = [0.28, 8687.82, 1.53, -6.1, 0.3]
nwalkers = 128
ndim = len(initial)
niter = 100
noisemodel = single_kernel_noisemodel
data = [lines, pfiles, pparn, noisemodel, single_kernel_lnprior]
p0 = np.array([np.array(initial) + 1e-2 * np.random.randn(ndim)
for i in xrange(nwalkers)])
sampler = emcee.EnsembleSampler(nwalkers, ndim, gpew.lnprob, args=data)
p0, lnp, _ = sampler.run_mcmc(p0, niter)
sampler.reset()
p = p0[np.argmax(lnp)]
p0 = [p + 1e-2 * np.random.randn(ndim) for i in xrange(nwalkers)]
p0, _, _ = sampler.run_mcmc(p0, niter)
samples = sampler.flatchain
xcen = samples[:, 1]
mxcen = np.mean(xcen)
xs = np.linspace(-8.1, 8.1, 100)
models = []
clean_models = []
ew = []
for s in samples[np.random.randint(len(samples), size=100)]:
pars = s[pparn[0]:pparn[1]]
profile = 1 - pfiles[0](lines[0][0], *pars)
profilexs = 1 - pfiles[0](xs + mxcen, *pars)
clean_models.append(profilexs)
ew.append(np.sum((1 - profilexs[1:]) * (xs[1:] - xs[:-1])))
if noisemodel is not None:
nmp = np.exp(s[pparn[-1]:])
nm = noisemodel(nmp)
nm.compute(lines[0][0], lines[0][2])
m = nm.sample_conditional(lines[0][1] - profile,
xs + mxcen) + profilexs
models.append(m)
offset = 0.0
pl.errorbar(lines[0][0] - mxcen, lines[0][1] + offset, yerr=lines[0][2],
fmt=".k", capsize=0)
pl.text(xs[0], offset + 1.02, '%.2f +- %.2f' % (np.mean(ew),
np.std(ew)))
la = np.array(clean_models).T
lstd = np.std(la, axis=1)
lavg = np.average(la, axis=1)
y1, y2 = lavg + lstd + offset, lavg - lstd + offset
pl.fill_between(xs, y1, y2, alpha=0.3)
gpa = np.array(models).T
gpstd = np.std(gpa, axis=1)
gpavg = np.average(gpa, axis=1)
y1, y2 = gpavg + gpstd + offset, gpavg - gpstd + offset
pl.fill_between(xs, y1, y2, color='r', alpha=0.3)
###############################################################################
# Chi2 modelled line
initial = [0.28, 8687.82, 1.53]
ndim = len(initial)
noisemodel = None
data = [lines, pfiles, pparn, noisemodel, chi2_lnprior]
p0 = np.array([np.array(initial) + 1e-2 * np.random.randn(ndim)
for i in xrange(nwalkers)])
sampler = emcee.EnsembleSampler(nwalkers, ndim, gpew.lnprob, args=data)
p0, lnp, _ = sampler.run_mcmc(p0, niter)
sampler.reset()
p = p0[np.argmax(lnp)]
p0 = [p + 1e-2 * np.random.randn(ndim) for i in xrange(nwalkers)]
p0, _, _ = sampler.run_mcmc(p0, niter)
samples = sampler.flatchain
xcen = samples[:, 1]
mxcen = np.mean(xcen)
clean_models = []
ew = []
for s in samples[np.random.randint(len(samples), size=100)]:
pars = s[pparn[0]:pparn[1]]
profilexs = 1 - pfiles[0](xs + mxcen, *pars)
clean_models.append(profilexs)
ew.append(np.sum((1 - profilexs[1:]) * (xs[1:] - xs[:-1])))
offset = 0.3
pl.errorbar(lines[0][0] - mxcen, lines[0][1] + offset, yerr=lines[0][2],
fmt=".k", capsize=0)
pl.text(xs[0], offset + 1.02, '%.2f +- %.2f' % (np.mean(ew),
np.std(ew)))
la = np.array(clean_models).T
lstd = np.std(la, axis=1)
lavg = np.average(la, axis=1)
y1, y2 = lavg + lstd + offset, lavg - lstd + offset
pl.fill_between(xs, y1, y2, alpha=0.3)
pl.show()
|
mtjvc/gpew
|
examples/chi2_vs_gp.py
|
Python
|
mit
| 4,487
|
"""settings."""
DEFAULT_INDEXES = [
'towns',
]
MAPPINGS = {
'towns': """{
"mappings": {
"town": {
"properties": {
"location": {
"type": "geo_point"
},
"name" : {
"type" : "string",
"index" : "not_analyzed"
}
}
}
}
}"""
}
ELASTIC_SEARCH_URL = 'http://0.0.0.0:9200'
config = {
'host': 'http://0.0.0.0',
'port': 9200,
'url_prefix': 'es',
'use_ssl': True
}
|
CornerstoneLabs/twittermap
|
search/es-towns/settings.py
|
Python
|
mit
| 609
|
"""Creates a table detailing the properties of all VPHAS frames in DR2."""
import os
import numpy as np
from astropy import log
from astropy.table import Table, vstack
from astropy.utils.console import ProgressBar
import surveytools
from surveytools.footprint import VphasExposure
if __name__ == '__main__':
blue_images = Table.read(os.path.join(surveytools.SURVEYTOOLS_DATA, 'vphas-dr2-blue-images.fits'))['image file']
red_images = Table.read(os.path.join(surveytools.SURVEYTOOLS_DATA, 'vphas-dr2-red-images.fits'))['image file']
output_tbl = None
output_fn = 'vphas-dr2-frames.csv'
log.info('Writing {0}'.format(output_fn))
for fn in ProgressBar(np.concatenate((blue_images, red_images))):
try:
exp = VphasExposure(fn)
tbl = exp.frames()
if output_tbl is None:
output_tbl = tbl
else:
output_tbl = vstack([output_tbl, tbl])
except Exception as e:
log.error('{0}: {1}'.format(fn, e))
output_tbl.write(output_fn, format='ascii.ecsv')
|
barentsen/surveytools
|
scripts/create-frame-index.py
|
Python
|
mit
| 1,073
|
from __future__ import absolute_import
from __future__ import unicode_literals
import random
import tqdm
from .gold import GoldParse
from .scorer import Scorer
from .gold import merge_sents
class Trainer(object):
'''Manage training of an NLP pipeline.'''
def __init__(self, nlp, gold_tuples):
self.nlp = nlp
self.gold_tuples = gold_tuples
self.nr_epoch = 0
def epochs(self, nr_epoch, augment_data=None, gold_preproc=False):
cached_golds = {}
def _epoch(indices):
for i in tqdm.tqdm(indices):
raw_text, paragraph_tuples = self.gold_tuples[i]
if gold_preproc:
raw_text = None
else:
paragraph_tuples = merge_sents(paragraph_tuples)
if augment_data is None:
docs = self.make_docs(raw_text, paragraph_tuples)
if i in cached_golds:
golds = cached_golds[i]
else:
golds = self.make_golds(docs, paragraph_tuples)
else:
raw_text, paragraph_tuples = augment_data(raw_text, paragraph_tuples)
docs = self.make_docs(raw_text, paragraph_tuples)
golds = self.make_golds(docs, paragraph_tuples)
for doc, gold in zip(docs, golds):
yield doc, gold
indices = list(range(len(self.gold_tuples)))
for itn in range(nr_epoch):
random.shuffle(indices)
yield _epoch(indices)
self.nr_epoch += 1
def update(self, doc, gold):
for process in self.nlp.pipeline:
if hasattr(process, 'update'):
loss = process.update(doc, gold, itn=self.nr_epoch)
else:
process(doc)
return doc
def evaluate(self, dev_sents, gold_preproc=False):
scorer = Scorer()
for raw_text, paragraph_tuples in dev_sents:
if gold_preproc:
raw_text = None
else:
paragraph_tuples = merge_sents(paragraph_tuples)
docs = self.make_docs(raw_text, paragraph_tuples)
golds = self.make_golds(docs, paragraph_tuples)
for doc, gold in zip(docs, golds):
for process in self.nlp.pipeline:
process(doc)
scorer.score(doc, gold)
return scorer
def make_docs(self, raw_text, paragraph_tuples):
if raw_text is not None:
return [self.nlp.tokenizer(raw_text)]
else:
return [self.nlp.tokenizer.tokens_from_list(sent_tuples[0][1])
for sent_tuples in paragraph_tuples]
def make_golds(self, docs, paragraph_tuples):
if len(docs) == 1:
return [GoldParse.from_annot_tuples(docs[0], sent_tuples[0])
for sent_tuples in paragraph_tuples]
else:
return [GoldParse.from_annot_tuples(doc, sent_tuples[0])
for doc, sent_tuples in zip(docs, paragraph_tuples)]
|
oroszgy/spaCy.hu
|
spacy/train.py
|
Python
|
mit
| 3,105
|
""" Recent METARs containing some pattern """
import json
import memcache
import psycopg2.extras
from paste.request import parse_formvars
from pyiem.reference import TRACE_VALUE
from pyiem.util import get_dbconn, html_escape
json.encoder.FLOAT_REPR = lambda o: format(o, ".2f")
def trace(val):
"""Nice Print"""
if val == TRACE_VALUE:
return "T"
return val
def get_data(q):
"""Get the data for this query"""
pgconn = get_dbconn("iem")
cursor = pgconn.cursor(cursor_factory=psycopg2.extras.DictCursor)
data = {"type": "FeatureCollection", "features": []}
# Fetch the values
countrysql = ""
if q == "snowdepth":
datasql = "substring(raw, ' 4/([0-9]{3})')::int"
wheresql = "raw ~* ' 4/'"
elif q == "i1":
datasql = "ice_accretion_1hr"
wheresql = "ice_accretion_1hr >= 0"
elif q == "i3":
datasql = "ice_accretion_3hr"
wheresql = "ice_accretion_3hr >= 0"
elif q == "i6":
datasql = "ice_accretion_6hr"
wheresql = "ice_accretion_6hr >= 0"
elif q == "fc":
datasql = "''"
wheresql = "'FC' = ANY(wxcodes)"
elif q == "gr":
datasql = "''"
wheresql = "'GR' = ANY(wxcodes)"
elif q == "pno":
datasql = "''"
wheresql = "raw ~* ' PNO'"
elif q in ["50", "50A"]:
datasql = "greatest(sknt, gust)"
wheresql = "(sknt >= 50 or gust >= 50)"
if q == "50":
countrysql = "and country = 'US'"
else:
return json.dumps(data)
cursor.execute(
f"""
select id, network, name, st_x(geom) as lon, st_y(geom) as lat,
valid at time zone 'UTC' as utc_valid, {datasql} as data, raw
from current_log c JOIN stations t on (c.iemid = t.iemid)
WHERE network ~* 'ASOS' {countrysql}
and {wheresql} ORDER by valid DESC
"""
)
for i, row in enumerate(cursor):
data["features"].append(
{
"type": "Feature",
"id": i,
"properties": {
"station": row["id"],
"network": row["network"],
"name": row["name"],
"value": trace(row["data"]),
"metar": row["raw"],
"valid": row["utc_valid"].strftime("%Y-%m-%dT%H:%M:%SZ"),
},
"geometry": {
"type": "Point",
"coordinates": [row["lon"], row["lat"]],
},
}
)
return json.dumps(data)
def application(environ, start_response):
"""see how we are called"""
field = parse_formvars(environ)
q = field.get("q", "snowdepth")[:10]
cb = field.get("callback", None)
headers = [("Content-type", "application/vnd.geo+json")]
mckey = f"/geojson/recent_metar?callback={cb}&q={q}"
mc = memcache.Client(["iem-memcached:11211"], debug=0)
res = mc.get(mckey)
if not res:
res = get_data(q)
mc.set(mckey, res, 300)
if cb is None:
data = res
else:
data = f"{html_escape(cb)}({res})"
start_response("200 OK", headers)
return [data.encode("ascii")]
if __name__ == "__main__":
print(get_data("50A"))
|
akrherz/iem
|
htdocs/geojson/recent_metar.py
|
Python
|
mit
| 3,238
|
"""Tests for the Lutron Caseta integration."""
class MockBridge:
"""Mock Lutron bridge that emulates configured connected status."""
def __init__(self, can_connect=True):
"""Initialize MockBridge instance with configured mock connectivity."""
self.can_connect = can_connect
self.is_currently_connected = False
self.buttons = {}
self.areas = {}
self.occupancy_groups = {}
self.scenes = self.get_scenes()
self.devices = self.get_devices()
async def connect(self):
"""Connect the mock bridge."""
if self.can_connect:
self.is_currently_connected = True
def is_connected(self):
"""Return whether the mock bridge is connected."""
return self.is_currently_connected
def get_devices(self):
"""Return devices on the bridge."""
return {
"1": {"serial": 1234, "name": "bridge", "model": "model", "type": "type"}
}
def get_devices_by_domain(self, domain):
"""Return devices on the bridge."""
return {}
def get_scenes(self):
"""Return scenes on the bridge."""
return {}
async def close(self):
"""Close the mock bridge connection."""
self.is_currently_connected = False
|
rohitranjan1991/home-assistant
|
tests/components/lutron_caseta/__init__.py
|
Python
|
mit
| 1,289
|
import RPi.GPIO as GPIO
import time
i1 = int(raw_input("How many seconds do you want the red light to flash?"))
i2 = int(raw_input("How many seconds do you want the yellow light to flash?"))
i3 = int(raw_input("How many seconds do you want the green light to flash?"))
i4 = int(raw_input("How many seconds do you want the blue light to flash?"))
GPIO.setmode(GPIO.BCM)
GPIO.setup(18, GPIO.OUT)
GPIO.setup(23, GPIO.OUT)
GPIO.setup(24, GPIO.OUT)
GPIO.setup(25, GPIO.OUT)
while True:
GPIO.output(24, True)
time.sleep(i3)
GPIO.output(24, False)
GPIO.output(23, True)
time.sleep(i2)
GPIO.output(23, False)
GPIO.output(18, True)
time.sleep(i1)
GPIO.output(18, False)
for x in range(0, i4):
GPIO.output(25, True)
time.sleep(0.5)
GPIO.output(25, False)
time.sleep(0.5)
|
johnlbullock/Raspberry_Pi_Code
|
Traffic_Light_Input.py
|
Python
|
mit
| 833
|
class Approx(object):
def __init__(self,val):
self._val = val
self._epsilon = 0.01
def epsilon(self,epsilon):
self._epsilon = epsilon
return self
def __eq__(self,other):
return abs(other - self._val) <= self._epsilon*abs(other + self._val)/2
|
CD3/config-makover
|
test/utils.py
|
Python
|
mit
| 269
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('admin_ddjj_app', '0025_auto_20150715_2023'),
]
operations = [
migrations.AlterField(
model_name='bienpersona',
name='persona',
field=models.ForeignKey(blank=True, to='admin_ddjj_app.Persona', null=True),
preserve_default=True,
),
]
|
lanacioncom/ddjj_admin_lanacion
|
admin_ddjj_app/migrations/0026_auto_20150716_1821.py
|
Python
|
mit
| 488
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-03-18 21:21
from __future__ import unicode_literals
import courses.fields
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('courses', '0002_content_file_image_text_video'),
]
operations = [
migrations.AlterModelOptions(
name='content',
options={'ordering': ['order']},
),
migrations.AlterModelOptions(
name='module',
options={'ordering': ['order']},
),
migrations.AddField(
model_name='content',
name='order',
field=courses.fields.OrderField(blank=True, default=0),
preserve_default=False,
),
migrations.AddField(
model_name='module',
name='order',
field=courses.fields.OrderField(blank=True, default=0),
preserve_default=False,
),
]
|
pauljherrera/avantiweb
|
courses/migrations/0003_auto_20170318_1821.py
|
Python
|
mit
| 973
|
# -*- coding: utf-8 -*-
import logging
import threading
import storj.exception as sjexc
from PyQt4 import QtCore, QtGui
from .engine import StorjEngine
from .file_download import SingleFileDownloadUI
from .file_mirror import FileMirrorsListUI
from .file_upload import SingleFileUploadUI
from .qt_interfaces.file_manager_ui import Ui_FileManager
from .utilities.tools import Tools
class FileManagerUI(QtGui.QMainWindow):
"""Files section."""
__logger = logging.getLogger('%s.FileManagerUI' % __name__)
def __init__(self, parent=None, bucketid=None):
QtGui.QWidget.__init__(self, parent)
self.file_manager_ui = Ui_FileManager()
self.file_manager_ui.setupUi(self)
# connect ComboBox change listener
QtCore.QObject.connect(
self.file_manager_ui.bucket_select_combo_box,
QtCore.SIGNAL('currentIndexChanged(const QString&)'),
self.createNewFileListUpdateThread)
# create bucket action
QtCore.QObject.connect(
self.file_manager_ui.file_mirrors_bt, QtCore.SIGNAL('clicked()'),
self.open_mirrors_list_window)
# create bucket action
QtCore.QObject.connect(
self.file_manager_ui.quit_bt, QtCore.SIGNAL('clicked()'),
self.close)
# create bucket action
QtCore.QObject.connect(
self.file_manager_ui.file_download_bt, QtCore.SIGNAL('clicked()'),
self.open_single_file_download_window)
# delete selected file
QtCore.QObject.connect(
self.file_manager_ui.file_delete_bt, QtCore.SIGNAL('clicked()'),
self.delete_selected_file)
# delete selected file
QtCore.QObject.connect(
self.file_manager_ui.new_file_upload_bt, QtCore.SIGNAL('clicked()'),
self.open_single_file_upload_window)
self.storj_engine = StorjEngine()
self.createNewBucketResolveThread()
def open_single_file_upload_window(self):
self.single_file_upload_window = SingleFileUploadUI(self)
self.single_file_upload_window.show()
def delete_selected_file(self):
self.current_bucket_index = self.file_manager_ui.bucket_select_combo_box.currentIndex()
self.current_selected_bucket_id = self.bucket_id_list[self.current_bucket_index]
tablemodel = self.file_manager_ui.files_list_tableview.model()
rows = sorted(set(index.row() for index in
self.file_manager_ui.files_list_tableview.selectedIndexes()))
selected = False
for row in rows:
selected = True
# get file ID index
index = tablemodel.index(row, 3)
# get file name index
index_filename = tablemodel.index(row, 0)
# we suppose data are strings
selected_file_id = str(tablemodel.data(index).toString())
selected_file_name = str(tablemodel.data(index_filename).toString())
msgBox = QtGui.QMessageBox(
QtGui.QMessageBox.Question,
'Question',
'Are you sure you want to delete this file? File name: %s' % selected_file_name,
(QtGui.QMessageBox.Yes | QtGui.QMessageBox.No))
result = msgBox.exec_()
self.__logger.debug(result)
if result == QtGui.QMessageBox.Yes:
try:
self.storj_engine.storj_client.file_remove(
str(self.current_selected_bucket_id), str(selected_file_id))
# update files list
self.createNewFileListUpdateThread()
QtGui.QMessageBox.about(
self,
'Success',
'File "%s" was deleted successfully' % selected_file_name)
except sjexc.StorjBridgeApiError as e:
self.__logger.error(e)
QtGui.QMessageBox.about(
self,
'Error',
'Bridge exception occured while trying to delete file: %s' % e)
except Exception as e:
self.__logger.error(e)
QtGui.QMessageBox.about(
self,
'Error',
'Unhandled exception occured while trying to delete file: %s' % e)
if not selected:
QtGui.QMessageBox.about(
self,
'Information',
'Please select file which you want to delete')
return True
def open_mirrors_list_window(self):
self.current_bucket_index = self.file_manager_ui.bucket_select_combo_box.currentIndex()
self.current_selected_bucket_id = self.bucket_id_list[self.current_bucket_index]
tablemodel = self.file_manager_ui.files_list_tableview.model()
rows = sorted(set(index.row() for index in
self.file_manager_ui.files_list_tableview.selectedIndexes()))
i = 0
for row in rows:
self.__logger.info('Row %d is selected' % row)
index = tablemodel.index(row, 3) # get file ID
# We suppose data are strings
selected_file_id = str(tablemodel.data(index).toString())
self.file_mirrors_list_window = FileMirrorsListUI(self, str(self.current_selected_bucket_id),
selected_file_id)
self.file_mirrors_list_window.show()
i += 1
if i == 0:
QtGui.QMessageBox.about(
self,
'Warning!',
'Please select file from file list!')
self.__logger.debug(1)
def createNewFileListUpdateThread(self):
download_thread = threading.Thread(target=self.update_files_list, args=())
download_thread.start()
def update_files_list(self):
self.tools = Tools()
# initialize model for inserting to table
model = QtGui.QStandardItemModel(1, 1)
model.setHorizontalHeaderLabels(['File name', 'File size', 'Mimetype', 'File ID'])
self.current_bucket_index = self.file_manager_ui.bucket_select_combo_box.currentIndex()
self.current_selected_bucket_id = self.bucket_id_list[self.current_bucket_index]
i = 0
try:
for self.file_details in self.storj_engine.storj_client.bucket_files(str(self.current_selected_bucket_id)):
item = QtGui.QStandardItem(str(self.file_details['filename'].replace('[DECRYPTED]', "")))
model.setItem(i, 0, item) # row, column, item (StandardItem)
file_size_str = self.tools.human_size(int(self.file_details["size"])) # get human readable file size
item = QtGui.QStandardItem(str(file_size_str))
model.setItem(i, 1, item) # row, column, item (QQtGui.StandardItem)
item = QtGui.QStandardItem(str(self.file_details['mimetype']))
model.setItem(i, 2, item) # row, column, item (QStandardItem)
item = QtGui.QStandardItem(str(self.file_details['id']))
model.setItem(i, 3, item) # row, column, item (QStandardItem)
i = i + 1
self.__logger.info(self.file_details)
except sjexc.StorjBridgeApiError as e:
self.__logger.error(e)
self.file_manager_ui.files_list_tableview.clearFocus()
self.file_manager_ui.files_list_tableview.setModel(model)
self.file_manager_ui.files_list_tableview.horizontalHeader().setResizeMode(QtGui.QHeaderView.Stretch)
def createNewBucketResolveThread(self):
download_thread = threading.Thread(target=self.initialize_bucket_select_combobox, args=())
download_thread.start()
def initialize_bucket_select_combobox(self):
self.buckets_list = []
self.bucket_id_list = []
self.storj_engine = StorjEngine() # init StorjEngine
i = 0
try:
for bucket in self.storj_engine.storj_client.bucket_list():
self.buckets_list.append(str(bucket.name)) # append buckets to list
self.bucket_id_list.append(str(bucket.id)) # append buckets to list
i = i + 1
except sjexc.StorjBridgeApiError as e:
QtGui.QMessageBox.about(
self,
'Unhandled bucket resolving exception',
'Exception: %s' % e)
self.file_manager_ui.bucket_select_combo_box.addItems(self.buckets_list)
def open_single_file_download_window(self):
self.current_bucket_index = self.file_manager_ui.bucket_select_combo_box.currentIndex()
self.current_selected_bucket_id = self.bucket_id_list[self.current_bucket_index]
tablemodel = self.file_manager_ui.files_list_tableview.model()
rows = sorted(set(index.row() for index in
self.file_manager_ui.files_list_tableview.selectedIndexes()))
i = 0
for row in rows:
self.__logger.info('Row %d is selected', row)
# get file ID
index = tablemodel.index(row, 3)
# we suppose data are strings
selected_file_id = str(tablemodel.data(index).toString())
self.file_mirrors_list_window = SingleFileDownloadUI(
self, str(self.current_selected_bucket_id), selected_file_id)
self.file_mirrors_list_window.show()
i += 1
if i == 0:
QtGui.QMessageBox.about(self, 'Warning!', 'Please select file from file list!')
self.__logger.debug(1)
|
lakewik/storj-gui-client
|
UI/file_manager.py
|
Python
|
mit
| 9,693
|
import contextlib
from io import StringIO
from unittest import TestCase
import docopt
from torba.testcase import AsyncioTestCase
from lbry.extras.cli import normalize_value, main
from lbry.extras.system_info import get_platform
from lbry.extras.daemon.Daemon import Daemon
class CLITest(AsyncioTestCase):
@staticmethod
def shell(argv):
actual_output = StringIO()
with contextlib.redirect_stdout(actual_output):
with contextlib.redirect_stderr(actual_output):
try:
main(argv)
except SystemExit as e:
print(e.args[0])
return actual_output.getvalue().strip()
def test_guess_type(self):
self.assertEqual('0.3.8', normalize_value('0.3.8'))
self.assertEqual('0.3', normalize_value('0.3'))
self.assertEqual(3, normalize_value('3'))
self.assertEqual(3, normalize_value(3))
self.assertEqual(
'VdNmakxFORPSyfCprAD/eDDPk5TY9QYtSA==',
normalize_value('VdNmakxFORPSyfCprAD/eDDPk5TY9QYtSA==')
)
self.assertTrue(normalize_value('TRUE'))
self.assertTrue(normalize_value('true'))
self.assertTrue(normalize_value('TrUe'))
self.assertFalse(normalize_value('FALSE'))
self.assertFalse(normalize_value('false'))
self.assertFalse(normalize_value('FaLsE'))
self.assertTrue(normalize_value(True))
self.assertEqual('3', normalize_value('3', key="uri"))
self.assertEqual('0.3', normalize_value('0.3', key="uri"))
self.assertEqual('True', normalize_value('True', key="uri"))
self.assertEqual('False', normalize_value('False', key="uri"))
self.assertEqual('3', normalize_value('3', key="file_name"))
self.assertEqual('3', normalize_value('3', key="name"))
self.assertEqual('3', normalize_value('3', key="download_directory"))
self.assertEqual('3', normalize_value('3', key="channel_name"))
self.assertEqual(3, normalize_value('3', key="some_other_thing"))
def test_help(self):
self.assertIn('lbrynet [-v] [--api HOST:PORT]', self.shell(['--help']))
# start is special command, with separate help handling
self.assertIn('--share-usage-data', self.shell(['start', '--help']))
# publish is ungrouped command, returns usage only implicitly
self.assertIn('publish (<name> | --name=<name>)', self.shell(['publish']))
# publish is ungrouped command, with explicit --help
self.assertIn('Create or replace a stream claim at a given name', self.shell(['publish', '--help']))
# account is a group, returns help implicitly
self.assertIn('Return the balance of an account', self.shell(['account']))
# account is a group, with explicit --help
self.assertIn('Return the balance of an account', self.shell(['account', '--help']))
# account add is a grouped command, returns usage implicitly
self.assertIn('account_add (<account_name> | --account_name=<account_name>)', self.shell(['account', 'add']))
# account add is a grouped command, with explicit --help
self.assertIn('Add a previously created account from a seed,', self.shell(['account', 'add', '--help']))
def test_help_error_handling(self):
# person tries `help` command, then they get help even though that's invalid command
self.assertIn('--config FILE', self.shell(['help']))
# help for invalid command, with explicit --help
self.assertIn('--config FILE', self.shell(['nonexistant', '--help']))
# help for invalid command, implicit
self.assertIn('--config FILE', self.shell(['nonexistant']))
def test_version_command(self):
self.assertEqual(
"lbrynet {lbrynet_version}".format(**get_platform()), self.shell(['--version'])
)
def test_valid_command_daemon_not_started(self):
self.assertEqual(
"Could not connect to daemon. Are you sure it's running?",
self.shell(["publish", 'asd'])
)
def test_deprecated_command_daemon_not_started(self):
actual_output = StringIO()
with contextlib.redirect_stdout(actual_output):
main(["channel", "new", "@foo", "1.0"])
self.assertEqual(
actual_output.getvalue().strip(),
"channel_new is deprecated, using channel_create.\n"
"Could not connect to daemon. Are you sure it's running?"
)
class DaemonDocsTests(TestCase):
def test_can_parse_api_method_docs(self):
failures = []
for name, fn in Daemon.callable_methods.items():
try:
docopt.docopt(fn.__doc__, ())
except docopt.DocoptLanguageError as err:
failures.append(f"invalid docstring for {name}, {err.message}")
except docopt.DocoptExit:
pass
if failures:
self.fail("\n" + "\n".join(failures))
|
lbryio/lbry
|
lbry/tests/unit/test_cli.py
|
Python
|
mit
| 4,993
|
# The MIT License (MIT)
# Copyright (c) 2016-2017 HIS e. G.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
from response import retJson
from configDb import fetchConfigs
def sendHistory(maximumNumber):
""" sends the last $maximimNumber configurations to the client """
try:
maximum = int(maximumNumber)
except ValueError:
maximum = 100
configs = fetchConfigs(maximum);
retJson(configs)
|
hsiegel/postsai-commitstop
|
permissions/sendHistory.py
|
Python
|
mit
| 1,442
|