repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
| prefix
stringlengths 0
8.16k
| middle
stringlengths 3
512
| suffix
stringlengths 0
8.17k
|
|---|---|---|---|---|---|---|---|---|
robotics-at-maryland/qubo
|
src/autonomy/src/tortuga_states.py
|
Python
|
mit
| 3,069
| 0.018573
|
#! /usr/bin/env python
# State machine for tortuga 2016 competition
# States given by kanga can be found at: imgur.com/C5KLSCJ
import roslib; roslib.load_manifest('smach')
import rospy
import smach
import smach_ros
#TODO
class gate_search(smach.State):
def __init__(self):
smach.State.__init__(self,
outcomes = ['found', 'not_found'])
def execute(self):
class gate_travel(smach.State):
def __init__(self):
(self,
outcomes = ['passed', 'failed'])
class sonar_search(smach.State):
def __init__(self):
(self,
outcomes = ['found', 'not_found'],
output_keys = ['sonar_position_vector'])
class sonar_travel(smach.State):
def __init__(self):
(self,
outcomes = ['centered', 'not_centered'],
input_keys = ['sonar_position_vector'])
class search_bin_cover(smach.State):
def __init__(self):
(self,
outcomes = ['bins', 'octagon'])
class do_bin_task(smach.State):
def __init__(self):
(self,
outcomes = ['placed', 'lifted', 'incomplete'])
class surface(smach.State):
def __init__(self):
(self,
outcomes = ['inside', 'oh_no'])
def main():
rospy.init_node('toruga_ai_node')
sm = smach.StateMAchine(outcomes=['done'])
sm.userdata.sonar_position_vector = [0,0,0]
with sm:
smach.StateMachine.add('gate_search', gate_search(),
transitions={'found':'gate_travel',
'not_found':'gate_search'})
smach.StateMachine.add('gate_travel', gate_travel(),
transitions={'passed':'sonar_search',
'failed':'gate_search'})
smach.StateMachine.add('sonar_search', sonar_search(),
transitions={'found':'sonar_travel',
|
'not_found':'sonar_search'},
remapping={'sonar_position_vector':'sonar_position_vector'})
smach.StateMachine.add('sonar_travel', sonar_travel(),
transitions={'centered':'search_bin_cover',
'not_centered':'sonar_travel'},
remapping={'sonar_position_vector':'sonar_position_vector'})
smach.StateMachine.add('search_bin_cover', search_bin_cover(),
tranistions={'bins':'do_bin_task',
'octagon':'surface',})
smach.StateMachine.add('do_bin_task', do_bin_task(),
transitions={'placed':'sonar_search',
'lifted':'sonar_search',
'incomplete':'sonar_search'})
smach.StateMachine.add('surface', surface(),
transitions={'inside':'sonar_search',
'oh_no':'done'})
|
|
MTG/essentia
|
test/src/unittests/temporal/test_effectiveduration.py
|
Python
|
agpl-3.0
| 2,995
| 0.004341
|
#!/usr/bin/env python
# Copyright (C) 2006-2021 Music Technology Group - Universitat Pompeu Fabra
#
# This file is part of Essentia
#
# Essentia is free software: you can redistribute it and/or modify it under
# the terms of the GNU Affero General Public License as published by the Free
# Software Foundation (FSF), either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the Affero GNU General Public License
# version 3 along with this program. If not, see http://www.gnu.org/licenses/
from essentia_test import *
from random import randint
class TestEffectiveDuration(TestCase):
def testEmpty(self):
input = []
self.assertEqual(EffectiveDuration()(input), 0.0)
def testZero(self):
input = [0]*100
self.assertAlmostEqual(EffectiveDuration()(input), 0.)
def testOne(self):
input = [0.3]
self.assertAlmostEqual(EffectiveDuration()(input), 1/44100.0)
input = [0]
self.assertAlmostEqual(EffectiveDuration()(input), 0)
input = [100]
self.assertAlmostEqual(EffectiveDuration()(input), 1/44100.0)
def testThresholdRatioZero(self):
input = [1] * 100
self.assertAlmostEqual(EffectiveDuration(thresholdRatio=0.)(input),
100/44100.)
def testThresholdRatioOne(self):
input = [1,0,0,0,0] * 100
self.ass
|
ertAlmostEqual(EffectiveDuration(thresholdRatio=1.)(input),
100/44100.0)
def test30Sec(self):
input = [randint(41, 100) for x in range(44100*30)]
self.assertAlmostEqual(EffectiveDuration()(input), 30)
def test15SecOf30Sec(self):
input1 = [randint(41, 100) for x in range(44100*15)]
|
input1[0] = 100 # to ensure that at least one element is 100
input2 = [randint(0, 39) for x in range(44100*15)]
input = input1 + input2
self.assertAlmostEqual(EffectiveDuration()(input), 15)
def testNegative20SecOf40Sec(self):
# Note: this test assumes the thresholdRatio is 40%
input1 = [randint(-100, -41) for x in range(44100*10)]
input2 = [randint(0, 39) for x in range(44100*10)]
input3 = [randint(41, 100) for x in range(44100*10)]
input3[0] = 100 # to ensure that at least one element is 100
input4 = [randint(-39, 0) for x in range(44100*10)]
input = input1 + input2 + input3 + input4
self.assertAlmostEqual(EffectiveDuration()(input), 20)
def testBadSampleRate(self):
self.assertConfigureFails(EffectiveDuration(), { 'sampleRate' : 0 })
suite = allTests(TestEffectiveDuration)
if __name__ == '__main__':
TextTestRunner(verbosity=2).run(suite)
|
mysql/mysql-utilities
|
mysql/utilities/command/rpl_admin.py
|
Python
|
gpl-2.0
| 45,372
| 0.000242
|
#
# Copyright (c) 2010, 2016, Oracle and/or its affiliates. All rights reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; version 2 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
"""
This file contains the replication administration tools for managine a
simple master-to-slaves topology.
"""
import logging
import os
import sys
import time
from datetime import datetime, timedelta
from mysql.utilities.exception import UtilRplError
from mysql.utilities.common.gtid import gtid_set_itemize
from mysql.utilities.common.ip_parser import hostname_is_ip
from mysql.utilities.common.messages import (ERROR_SAME_MASTER,
ERROR_USER_WITHOUT_PRIVILEGES,
HOST_IP_WARNING,
EXTERNAL_SCRIPT_DOES_NOT_EXIST,
INSUFFICIENT_FILE_PERMISSIONS)
from mysql.utilities.common.tools import ping_host, execute_script
from mysql.utilities.common.format import print_list
from mysql.utilities.common.topology import Topology
from mysql.utilities.command.failover_console import FailoverConsole
from mysql.utilities.command.failover_daemon import FailoverDaemon
from mysql.utilities.common.server import check_hostname_alias, get_port
_VALID_COMMANDS_TEXT = """
Available Commands:
elect - perform best slave election and report best slave
failover - conduct failover from master to best slave
gtid - show status of global transaction id variables
also displays uuids for all servers
health - display the replication health
reset - stop and reset all slaves
start - start all slaves
stop - stop all slaves
switchover - perform slave promotion
Note:
elect, gtid and health require --master and either
--slaves or --discover-slaves-login;
failover requires --slaves;
switchover requires --master, --new-master and either
--slaves or --discover-slaves-login;
start, stop and reset require --slaves (and --master is optional)
"""
_VALID_COMMANDS = ["elect", "failover", "gtid", "health", "reset", "start",
"stop", "switchover"]
_SLAVE_COMMANDS = ["reset", "start", "stop"]
_MASTER_COLS = ["Host", "Port", "Binary Log File", "Position"]
_SLAVE_COLS = ["Host", "Port", "Master Log File", "Position", "Seconds Behind"]
_GTID_COLS = ["host", "port", "role", "gtid"]
_FAILOVER_ERROR = "%sCheck server for errors and run the mysqlrpladmin " + \
"utility to perform manual failover."
_FAILOVER_ERRNO = 911
_DATE_FORMAT = '%Y-%m-%d %H:%M:%S %p'
_DATE_LEN = 22
_ERRANT_TNX_ERROR = "Errant transaction(s) found on slave(s)."
_GTID_ON_REQ = "{action} requires GTID_MODE=ON for all servers."
WARNING_SLEEP_TIME = 10
def get_valid_rpl_command_text():
"""Provide list of valid command descriptions to caller.
"""
return _VALID_COMMANDS_TEXT
def get_valid_rpl_commands():
"""Provide list of valid commands to caller.
"""
return _VALID_COMMANDS
def purge_log(filename, age):
"""Purge old log entries
This method deletes rows from the log file older than the age specified
in days.
filename[in] filename of log fil
age[in] age in days
Returns bool - True = success, Fail = error reading/writing log file
"""
if not os.path.exists(filename):
print "NOTE: Log file '%s' does not exist. Will be created." % filename
return True
# Read a row, check age. If > today + age, delete row.
# Ignore user markups and other miscellaneous entries.
try:
log = open(filename, "r")
log_entries = log.readlines()
log.close()
threshold = datetime.now() - timedelta(days=age)
start = 0
for row in log_entries:
# Check age here
try:
row_time = time.strptime(row[0:_DATE_LEN], _DATE_FORMAT)
row_age = datetime(*row_time[:6])
if row_age < threshold:
start += 1
elif start == 0:
return True
else:
break
except:
start += 1 # Remove invalid formatted lines
log = open(filename, "w")
log.writelines(log_entries[start:])
log.close()
except:
return False
return True
def skip_slaves_trx(gtid_set, slaves_cnx_val, options):
"""Skip transactions on slaves.
This method skips the given transactions (GTID set) on all the specified
slaves. That is, an empty transaction is injected for each GTID in
the given set for one of each slaves. In case a slave already has an
executed transaction for a given GTID then that GTID is ignored for this
slave.
gtid_set[in] String representing the set of GTIDs to skip.
slaves_cnx_val[in] List of the dictionaries with the connection
values for each target slave.
options[in] Dictionary of options (dry_run, verbosity).
Throws an UtilError exception if an error occurs during the execution.
"""
verbosity = options.get('verbosity')
dryrun = options.get('dry_run')
# Connect to slaves.
rpl_topology = Topology(None, slaves_cnx_val, options)
# Check required privileges.
errors = rpl_topology.check_privileges(skip_master=True)
if errors:
err_details = ''
for err in errors:
err_msg = ERROR_USER_WITHOUT_PRIVILEGES.format(
user=err[0], host=err[1], port=err[2],
operation='inject empty transactions', req_privileges=err[3])
err_details = '{0}{1}\n'.format(err_details, err_msg)
err_details.strip()
raise UtilRplError("Not enough privileges.\n{0}".format(err_details))
# GTID must be enabled on all servers.
srv_list = rpl_topology.get_servers_with_gtid_not_on()
if srv_list:
if verbosity:
print("# Slaves with GTID not enabled:")
for srv in srv_list:
msg = "# - GTID_MODE={0} on {1}:{2}".format(srv[2], srv[0],
srv[1])
print(msg)
raise UtilRplError(_GTID_ON_REQ.format(action='Transaction skip'))
if dryrun:
print("#")
print("# WARNING: Executing utility in dry run mode (read only).")
# Get GTID set that can be skipped, i.e., not in GTID_EXECUTED.
gtids_by_slave = rpl_topology.slaves_gtid_subtract_executed(gtid_set)
# Output GTID set that will be skipped.
print("#")
print("# GTID set to be skipped for each server:")
has_gtid_to_skip = False
for host, port, gtids_to_skip in gtids_by_slave:
if not gtids_to_skip:
gtids_to_skip = 'None'
else:
# Set flag to indicate that there is at least one GTID to skip.
has_gtid_to_skip = True
print("# - {0}@{1}: {2}".format(host, port, gtids_to_skip))
# Create dictionary to directly access the slaves instances.
slaves_dict = rpl_topology.get_slaves_dict()
# Skip transactions for the given list of slaves.
print("#")
# pylint: disable=R0101
if has_gtid_to_skip:
for host, port, gtids_to_skip in gtids_by_slave:
if gtids_to_skip:
|
# Decompose GTID set into a list of single transactions.
gtid_items = gtid_set_itemize(gtids_to_skip)
dryrun_mark = '(dry run) ' if dryrun else ''
|
print("
|
google-code/ampatu
|
models/db.py
|
Python
|
agpl-3.0
| 4,254
| 0.010343
|
# -*- coding: utf-8 -*-
#########################################################################
## This scaffolding model makes your app work on Google App Engine too
#########################################################################
if request.env.web2py_runtime_gae: # if running on Google App Engine
db = DAL('gae') # connect to Google BigTable
session.connect(request, response, db = db) # and store sessions and tickets there
### or use the following lines to store sessions in Memcache
# from gluon.contrib.memdb import MEMDB
# from google.appengine.api.memcache import Client
# session.connect(request, response, db = MEMDB(Client()))
else: # else use a normal relational database
db = DAL('sqlite://storage.sqlite') # if not, use SQLite or other DB
## if no need for session
# session.forget()
#########################################################################
## Here is sample code if you need for
## - email capabilities
## - authentication (registration, login, logout, ... )
## - authorization (role based authorization)
## - services (xml, csv, json, xmlrpc, jsonrpc, amf, rss)
## - crud actions
## (more options discussed in gluon/tools.py)
#########################################################################
from gluon.tools import *
mail = Mail() # mailer
auth = Auth(globals(),db) # authentication/authorization
crud = Crud(globals(),db) # for CRUD helpers using auth
service = Service(globals()) # for json, xml, jsonrpc, xmlrpc, amfrpc
plugins = PluginManager()
mail.settings.server = 'logging' or 'smtp.gmail.com:587' # your SMTP server
mail.settings.sender = 'you@gmail.com' # your email
mail.settings.login = 'username:password' # your credentials or None
auth.settings.hmac_key = 'sha512:2a404a87-ac69-4edd-bfce-f84d671bdd42' # before define_tables()
auth.define_tables() # creates all needed tables
auth.settings.mailer = mail # for user email verification
auth.settings.registration_requires_verification = False
auth.settings.registration_requires_approval = False
auth.messages.verify_email = 'Click on the link http://'+request.env.http_host+URL(r=request,c='default',f='user',args=['verify_email'])+'/%(key)s to verify your email'
auth.settings.reset_password_requires_verification = True
auth.messages.reset_password = 'Click on the link http://'+request.env.http_host+URL(r=request,c='default',f='user',args=['reset_password'])+'/%(key)s to reset your password'
#########################################################################
## If you need to
|
use OpenID, Facebook, MySpace, Twitter, Linkedin, etc.
## register with janrain.com, uncomment and customize following
# from gluon.contrib.login_methods.rpx_account import RPXAccount
# auth.settings.actions_disabled=['register','change_password','request_reset_password']
# auth.settings.login_form = RPXAccount(request, api_key='...',domain='...',
# url = "http://localhost:8000/%s/default/user/login" % request.application)
## other login methods are in gluon/contrib/login_methods
#########
|
################################################################
crud.settings.auth = None # =auth to enforce authorization on crud
#########################################################################
## Define your tables below (or better in another model file) for example
##
## >>> db.define_table('mytable',Field('myfield','string'))
##
## Fields can be 'string','text','password','integer','double','boolean'
## 'date','time','datetime','blob','upload', 'reference TABLENAME'
## There is an implicit 'id integer autoincrement' field
## Consult manual for more options, validators, etc.
##
## More API examples for controllers:
##
## >>> db.mytable.insert(myfield='value')
## >>> rows=db(db.mytable.myfield=='value').select(db.mytable.ALL)
## >>> for row in rows: print row.id, row.myfield
#########################################################################
##db = DAL("pgsql://reingart:mypass@localhost:5432/911", pool_size=10)
|
sinraf96/electrum
|
qa/rpc-tests/bip9-softforks.py
|
Python
|
mit
| 9,959
| 0.004519
|
#!/usr/bin/env python3
# Copyright (c) 2015 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from test_framework.test_framework import ComparisonTestFramework
from test_framework.util import *
from test_framework.mininode import CTransaction, NetworkThread
from test_framework.blocktools import create_coinbase, create_block
from test_framework.comptool import TestInstance, TestManager
from test_framework.script import CScript, OP_1NEGATE, OP_CHECKSEQUENCEVERIFY, OP_DROP
from io import BytesIO
import time
import itertools
'''
This test is meant to exercise BIP forks
Connect to a single node.
regtest lock-in with 108/144 block signalling
activation after a further 144 blocks
mine 2 block and save coinbases for later use
mine 141 blocks to transition from DEFINED to STARTED
mine 100 blocks signalling readiness and 44 not in order to fail to change state this period
mine 108 blocks signalling readiness and 36 blocks not signalling readiness (STARTED->LOCKED_IN)
mine a further 143 blocks (LOCKED_IN)
test that enforcement has not triggered (which triggers ACTIVE)
test that enforcement has triggered
'''
class BIP9SoftForksTest(ComparisonTestFramework):
def __init__(self):
self.num_nodes = 1
def setup_network(self):
self.nodes = start_nodes(1, self.options.tmpdir,
extra_args=[['-debug', '-whitelist=127.0.0.1']],
binary=[self.options.testbinary])
def run_test(self):
self.test = TestManager(self, self.options.tmpdir)
self.test.add_all_connections(self.nodes)
NetworkThread().start() # Start up network handling in another thread
self.test.run()
def create_transaction(self, node, coinbase, to_address, amount):
from_txid = node.getblock(coinbase)['tx'][0]
inputs = [{ "txid" : from_txid, "vout" : 0}]
outputs = { to_address : amount }
rawtx = node.createrawtransaction(inputs, outputs)
tx = CTransaction()
f = BytesIO(hex_str_to_bytes(rawtx))
tx.deserialize(f)
tx.nVersion = 2
return tx
def sign_transaction(self, node, tx):
signresult = node.signrawtransaction(bytes_to_hex_str(tx.serialize()))
tx = CTransaction()
f = BytesIO(hex_str_to_bytes(signresult['hex']))
tx.deserialize(f)
return tx
def generate_blocks(self, number, version, test_blocks = []):
for i in range(number):
block = create_block(self.tip, create_coinbase(self.height), self.last_block_time + 1)
block.nVersion = version
block.rehash()
block.solve()
test_blocks.append([block, True])
self.last_block_time += 1
self.tip = block.sha256
self.height += 1
return test_blocks
def get_bip9_status(self, key):
info = self.nodes[0].getblockchaininfo()
for row in info['bip9_softforks']:
if row['id'] == key:
return row
raise In
|
dexError ('key:"%s" not found' % key)
def test_BIP(self, bipName, activated_version, invalidate, invalidatePostSignature, bitno):
# generate some coins for later
self.coinbase_blocks = self.nodes[
|
0].generate(2)
self.height = 3 # height of the next block to build
self.tip = int("0x" + self.nodes[0].getbestblockhash(), 0)
self.nodeaddress = self.nodes[0].getnewaddress()
self.last_block_time = int(time.time())
assert_equal(self.get_bip9_status(bipName)['status'], 'defined')
tmpl = self.nodes[0].getblocktemplate({})
assert(bipName not in tmpl['rules'])
assert(bipName not in tmpl['vbavailable'])
assert_equal(tmpl['vbrequired'], 0)
assert_equal(tmpl['version'], 0x20000000)
# Test 1
# Advance from DEFINED to STARTED
test_blocks = self.generate_blocks(141, 4)
yield TestInstance(test_blocks, sync_every_block=False)
assert_equal(self.get_bip9_status(bipName)['status'], 'started')
tmpl = self.nodes[0].getblocktemplate({})
assert(bipName not in tmpl['rules'])
assert_equal(tmpl['vbavailable'][bipName], bitno)
assert_equal(tmpl['vbrequired'], 0)
assert(tmpl['version'] & activated_version)
# Test 2
# Fail to achieve LOCKED_IN 100 out of 144 signal bit 1
# using a variety of bits to simulate multiple parallel softforks
test_blocks = self.generate_blocks(50, activated_version) # 0x20000001 (signalling ready)
test_blocks = self.generate_blocks(20, 4, test_blocks) # 0x00000004 (signalling not)
test_blocks = self.generate_blocks(50, activated_version, test_blocks) # 0x20000101 (signalling ready)
test_blocks = self.generate_blocks(24, 4, test_blocks) # 0x20010000 (signalling not)
yield TestInstance(test_blocks, sync_every_block=False)
assert_equal(self.get_bip9_status(bipName)['status'], 'started')
tmpl = self.nodes[0].getblocktemplate({})
assert(bipName not in tmpl['rules'])
assert_equal(tmpl['vbavailable'][bipName], bitno)
assert_equal(tmpl['vbrequired'], 0)
assert(tmpl['version'] & activated_version)
# Test 3
# 108 out of 144 signal bit 1 to achieve LOCKED_IN
# using a variety of bits to simulate multiple parallel softforks
test_blocks = self.generate_blocks(58, activated_version) # 0x20000001 (signalling ready)
test_blocks = self.generate_blocks(26, 4, test_blocks) # 0x00000004 (signalling not)
test_blocks = self.generate_blocks(50, activated_version, test_blocks) # 0x20000101 (signalling ready)
test_blocks = self.generate_blocks(10, 4, test_blocks) # 0x20010000 (signalling not)
yield TestInstance(test_blocks, sync_every_block=False)
assert_equal(self.get_bip9_status(bipName)['status'], 'locked_in')
tmpl = self.nodes[0].getblocktemplate({})
assert(bipName not in tmpl['rules'])
# Test 4
# 143 more version 536870913 blocks (waiting period-1)
test_blocks = self.generate_blocks(143, 4)
yield TestInstance(test_blocks, sync_every_block=False)
assert_equal(self.get_bip9_status(bipName)['status'], 'locked_in')
tmpl = self.nodes[0].getblocktemplate({})
assert(bipName not in tmpl['rules'])
# Test 5
# Check that the new rule is enforced
spendtx = self.create_transaction(self.nodes[0],
self.coinbase_blocks[0], self.nodeaddress, 1.0)
invalidate(spendtx)
spendtx = self.sign_transaction(self.nodes[0], spendtx)
spendtx.rehash()
invalidatePostSignature(spendtx)
spendtx.rehash()
block = create_block(self.tip, create_coinbase(self.height), self.last_block_time + 1)
block.nVersion = activated_version
block.vtx.append(spendtx)
block.hashMerkleRoot = block.calc_merkle_root()
block.rehash()
block.solve()
self.last_block_time += 1
self.tip = block.sha256
self.height += 1
yield TestInstance([[block, True]])
assert_equal(self.get_bip9_status(bipName)['status'], 'active')
tmpl = self.nodes[0].getblocktemplate({})
assert(bipName in tmpl['rules'])
assert(bipName not in tmpl['vbavailable'])
assert_equal(tmpl['vbrequired'], 0)
assert(not (tmpl['version'] & (1 << bitno)))
# Test 6
# Check that the new sequence lock rules are enforced
spendtx = self.create_transaction(self.nodes[0],
self.coinbase_blocks[1], self.nodeaddress, 1.0)
invalidate(spendtx)
spendtx = self.sign_transaction(self.nodes[0], spendtx)
spendtx.rehash()
invalidatePostSignature(spendtx)
spendtx.rehash()
block = create_block(self.tip, create_coinbase(self.height), self.last_block_time + 1)
block.nVersion = 5
block.vtx.append(spendtx)
block.hashMerkleRoot = block.cal
|
seecr/meresco-lucene
|
meresco/lucene/composedquery.py
|
Python
|
gpl-2.0
| 11,941
| 0.003015
|
## begin license ##
#
# "Meresco Lucene" is a set of components and tools to integrate Lucene into Meresco
#
# Copyright (C) 2013-2016, 2019-2021 Seecr (Seek You Too B.V.) https://seecr.nl
# Copyright (C) 2013-2014 Stichting Bibliotheek.nl (BNL) http://www.bibliotheek.nl
# Copyright (C) 2015-2016, 2019 Koninklijke Bibliotheek (KB) http://www.kb.nl
# Copyright (C) 2016, 2020-2021 Stichting Kennisnet https://www.kennisnet.nl
# Copyright (C) 2021 Data Archiving and Network Services https://dans.knaw.nl
# Copyright (C) 2021 SURF https://www.surf.nl
# Copyright (C) 2021 The Netherlands Institute for Sound and Vision https://beeldengeluid.nl
#
# This file is part of "Meresco Lucene"
#
# "Meresco Lucene" is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# "Meresco Lucene" is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with "Meresco Lucene"; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
## end license ##
from .utils import simplifiedDict
from meresco.components.json import JsonDict
from simplejson.decoder import JSONDecodeError
class ComposedQuery(object):
def __init__(self, resultsFromCore, query=None):
self.cores = set()
self._queries = {}
self._filterQueries = {}
self._excludeFilterQueries = {}
self._facets = {}
self._drilldownQueries = {}
self._otherCoreFacetFilters = {}
self._rankQueries = {}
self._matches = {}
self._unites = []
self._sortKeys = []
self.resultsFrom = resultsFromCore
if query:
self.setCoreQuery(resultsFromCore, query=query)
else:
self.cores.add(resultsFromCore)
def _makeProperty(name, defaultValue=None):
return property(
fget=lambda self: getattr(self, name, defaultValue),
fset=lambda self, value: setattr(self, name, value)
)
stop = _makeProperty('_stop')
start = _makeProperty('_start')
sortKeys = _makeProperty('_sortKeys')
suggestionRequest = _makeProperty('_suggestionRequest')
dedupField = _makeProperty('_dedupField')
dedupSortField = _makeProperty('_dedupSortField')
storedFields = _makeProperty('_storedFields')
clustering = _makeProperty('_clustering')
clusteringConfig = _makeProperty('_clusteringConfig')
unqualifiedTermFields = _makeProperty('_unqualifiedTermFields')
rankQueryScoreRatio = _makeProperty('_rankQueryScoreRatio')
relationalFilterJson = _makeProperty('_rel
|
ationalFilterJson')
del _makeProperty
def setCoreQuery(self, core, query, filterQueries=None, facets=None):
self.cores.add(core)
self._qu
|
eries[core] = query
if not filterQueries is None:
for filterQuery in filterQueries:
self.addFilterQuery(core, filterQuery)
if not facets is None:
for facet in facets:
self.addFacet(core, facet)
return self
def addFilterQuery(self, core, query):
self.cores.add(core)
self._filterQueries.setdefault(core, []).append(query)
return self
def addExcludeFilterQuery(self, core, query):
self.cores.add(core)
self._excludeFilterQueries.setdefault(core, []).append(query)
return self
def addFacet(self, core, facet):
self.cores.add(core)
self._facets.setdefault(core, []).append(facet)
return self
def addDrilldownQuery(self, core, drilldownQuery):
self.cores.add(core)
self._drilldownQueries.setdefault(core, []).append(drilldownQuery)
return self
def addOtherCoreFacetFilter(self, core, query):
self.cores.add(core)
self._otherCoreFacetFilters.setdefault(core, []).append(query)
return self
def setRankQuery(self, core, query):
self.cores.add(core)
self._rankQueries[core] = query
return self
def addMatch(self, matchCoreASpec, matchCoreBSpec):
self._matches[(matchCoreASpec['core'], matchCoreBSpec['core'])] = (matchCoreASpec, matchCoreBSpec)
resultsFromCoreSpecFound = False
for matchCoreSpec in [matchCoreASpec, matchCoreBSpec]:
coreName = matchCoreSpec['core']
if coreName == self.resultsFrom:
resultsFromCoreSpecFound = True
try:
matchCoreSpec['uniqueKey']
except KeyError:
raise ValueError("Match for result core '%s' must have a uniqueKey specification." % self.resultsFrom)
if not resultsFromCoreSpecFound:
raise ValueError("Match that does not include resultsFromCore ('%s') not yet supported" % self.resultsFrom)
return self
def addUnite(self, uniteCoreASpec, uniteCoreBSpec):
if len(self.unites) > 0:
raise ValueError("No more than 1 addUnite supported")
for uniteCoreSpec in (uniteCoreASpec, uniteCoreBSpec):
self.cores.add(uniteCoreSpec['core'])
self._unites.append(Unite(self, uniteCoreASpec, uniteCoreBSpec))
return self
def addSortKey(self, sortKey):
core = sortKey.get('core', self.resultsFrom)
self.cores.add(core)
self._sortKeys.append(sortKey)
def queryFor(self, core):
return self._queries.get(core)
def excludeFilterQueriesFor(self, core):
return self._excludeFilterQueries.get(core, [])
def filterQueriesFor(self, core):
return self._filterQueries.get(core, [])
def facetsFor(self, core):
return self._facets.get(core, [])
def drilldownQueriesFor(self, core):
return self._drilldownQueries.get(core, [])
def otherCoreFacetFiltersFor(self, core):
return self._otherCoreFacetFilters.get(core, [])
def rankQueryFor(self, core):
return self._rankQueries.get(core)
def keyName(self, core, otherCore):
if core == otherCore: #TODO: Needed for filters/rank's in same core as queried core
for matchCoreASpec, matchCoreBSpec in self._matches.values():
if matchCoreASpec['core'] == core:
coreSpec = matchCoreASpec
break
elif matchCoreBSpec['core'] == core:
coreSpec = matchCoreBSpec
break
else:
coreSpec, _ = self._matchCoreSpecs(core, otherCore)
return coreSpec.get('uniqueKey', coreSpec.get('key'))
def keyNames(self, core):
keyNames = set()
for coreName in self.cores:
if coreName != core:
keyNames.add(self.keyName(core, coreName))
return keyNames
def queriesFor(self, core):
return [q for q in [self.queryFor(core)] + self.filterQueriesFor(core) if q]
@property
def unites(self):
return self._unites[:]
@property
def filterQueries(self):
return self._filterQueries.items()
@property
def numberOfUsedCores(self):
return len(self.cores)
def isSingleCoreQuery(self):
return self.numberOfUsedCores == 1
def coresInMatches(self):
return set(c for matchKey in self._matches.keys() for c in matchKey)
def validate(self):
for core in self.cores:
if core == self.resultsFrom:
continue
try:
self._matchCoreSpecs(self.resultsFrom, core)
except KeyError:
raise ValueError("No match set for cores %s" % str((self.resultsFrom, core)))
if self.relationalFilterJson:
try:
JsonDict.loads(self.relationalFilterJson)
except JSONDecodeError:
raise ValueError("Value '%s' for 're
|
googleapis/python-analytics-admin
|
samples/generated_samples/analyticsadmin_v1alpha_generated_analytics_admin_service_approve_display_video360_advertiser_link_proposal_sync.py
|
Python
|
apache-2.0
| 1,697
| 0.002946
|
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for ApproveDisplayVideo360AdvertiserLinkProposal
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-analytics-admin
# [START analyticsadmin_v1alpha_generated_AnalyticsAdminService_ApproveDisplayVideo360A
|
dvertiserLinkProposal_sync]
from google.analytics import admin_v1alpha
def sample_approve_
|
display_video360_advertiser_link_proposal():
# Create a client
client = admin_v1alpha.AnalyticsAdminServiceClient()
# Initialize request argument(s)
request = admin_v1alpha.ApproveDisplayVideo360AdvertiserLinkProposalRequest(
name="name_value",
)
# Make the request
response = client.approve_display_video360_advertiser_link_proposal(request=request)
# Handle the response
print(response)
# [END analyticsadmin_v1alpha_generated_AnalyticsAdminService_ApproveDisplayVideo360AdvertiserLinkProposal_sync]
|
dennisding/ether
|
protocols/game_to_game.py
|
Python
|
apache-2.0
| 472
| 0.040254
|
# -*- encoding:utf-8 -*-
from network.protocol import pr
|
otocol, pdef, pret
class GameToGame:
create_stub_proxy = protocol(
pdef('Eid', 'eid'),
pdef('St
|
r', 'name'),
pdef('Int', 'gameid'),
)
entity_msg = protocol(
pdef('Eid', 'eid'),
pdef('Bytes', 'data'),
)
entity_msg_with_return = protocol(
pdef('Eid', 'eid'),
pdef('Token', 'token'),
pdef('Bytes', 'data')
)
entity_msg_return = protocol(
pdef('Token', 'token'),
pdef('Bytes', 'data'),
)
|
pramodh-bn/learn-data-edx
|
Week 7/qp.py
|
Python
|
unlicense
| 6,393
| 0.020178
|
# -*- coding: utf-8 -*-
"""
Created on Fri Nov 15 15:55:28 2013
@author: dyanna
"""
import numpy as np
from sklearn.svm import SVC
def getSample(pointA, pointB, numberOfPoints):
pointList = list(zip(np.random.uniform(-1,1.00,numberOfPoints),np.random.uniform(-1,1.00,numberOfPoints)))
sample = np.array([(i[0], i[1], isLeft(pointA, pointB, i)) for i in pointList])
y = sample[:,2]
breakpoint = False
while not breakpoint:
if(len(y[y==-1]) == 0 or len(y[y==1]) == 0):
pointList = list(zip(np.random.uniform(-1,1.00,numberOfPoints),np.random.uniform(-1,1.00,numberOfPoints)))
sample = np.array([(i[0], i[1], isLeft(pointA, pointB, i)) for i in pointList])
y = sample[:,2]
else:
breakpoint = True
return sample
def getRandomLine():
return list(zip(np.random.uniform(-1,1.00,2),np.random.uniform(-1,1.00,2)))
def getPoints(numberOfPoints):
pointList = list(zip(np.random.uniform(-1,1.00,numberOfPoints),np.random.uniform(-1,1.00,numberOfPoints)))
return pointList
def isLeft(a, b, c):
return 1 if ((b[0] - a[0])*(c[1] - a[1]) - (b[1] - a[1])*(c[0] - a[0])) > 0 else -1;
def sign(x):
return 1 if x > 0 else -1
def getMisMatchesQP(data, clf):
#print(data)
data_x = np.c_[data[:,0], data[:,1]]
results = clf.predict(data_x)
#print(np.sign(results))
print("mismatch ", float(len(data) - np.sum(np.sign(results) == np.sign(data[:,2])))/len(data))
print("score ", clf.score(data_x, data[:,2]))
return float(len(data) - np.sum(np.sign(results) == np.sign(data[:,2])))/len(data)
def doMonteCarloQP(pointa, pointb, clf, nopoint):
#print "weights ", weight
points = [(np.random.uniform(-1,1), np.random.uniform(-1,1)) for i in range(nopoint)]
#print points
dataset_Monte = np.array([(i[0],i[1], isLeft(pointa,pointb,i)) for i in points])
#print dataset_Monte
return getMisMatchesQP(dataset_Monte, clf)
def doPLA(sample):
w = np.array([0,0,0])
iteration = 0
it = 0
while True:#(it < 10):
iteration = iteration + 1
it
|
= it + 1
mismatch = list()
for i in sample:
#print("point in question ", i , " weight ", w)
yy = w[0] + w[1] * i[0] + w[2] * i[1]
#print("this is after applying weight to a point ",yy)
point = [i[0], i[1], sign(yy)]
if any(np
|
.equal(sample, point).all(1)):
#print "point not in sample"
if(point[2] == -1):
mismatch.append((1, (i[0]), (i[1])))
else:
mismatch.append((-1, -(i[0]), -(i[1])))
#print " length ", len(mismatch), " mismatch list ",mismatch
if(len(mismatch) > 0):
#find a random point and update w
choiceIndex = np.random.randint(0, len(mismatch))
choice = mismatch[choiceIndex]
#print("choice ", choice)
w = w + choice
#print "new weight ", w
else:
break
#print("this is the iteration ", iteration)
#print("this is the weight ", w)
#montelist = [monetcarlo((x1,y1),(x2,y2),w,10000) for i in range(5)]
#print("Montelist " , montelist)
#monteavg = sum([i for i in montelist])/10
return w, iteration
def getMisMatches(data, weights):
#print data
list1 = np.empty(len(data))
list1.fill(weights[0])
results = list1+ weights[1]*data[:,0]+weights[2]*data[:,1]
results = -1 * results
return float(len(data) - np.sum(np.sign(results) == np.sign(data[:,2])))/len(data)
def doMonteCarloNP(pointa, pointb, weights, nopoint):
#print "weights ", weight
points = [(np.random.uniform(-1,1), np.random.uniform(-1,1)) for i in range(nopoint)]
#print points
dataset_Monte = np.array([(i[0],i[1], isLeft(pointa,pointb,i)) for i in points])
#print dataset_Monte
return getMisMatches(dataset_Monte, weights)
if __name__ == "__main__":
'''X = np.array([[-1,-1],[-2,-1], [1,1], [2,1]])
y = np.array([1,1,2,2])
clf = SVC()
clf.fit(X,y)
print(clf.predict([[-0.8,-1]]))'''
#clf = SVC()
clf = SVC(C = 1000, kernel = 'linear')
monteavgavgQP = list()
monteavgavgPLA = list()
approxavgQP = list()
vectornumberavg = list()
predictavg = list()
for j in range(1):
#clf = SVC(C = 1000, kernel = 'linear')
monteavgQP = list()
monteavgPLA = list()
approxQP = list()
vectoravg = list()
for k in range(1000):
nopoints = 100
line = getRandomLine()
sample = getSample(line[0], line[1], nopoints)
#print(sample)
X = np.c_[sample[:,0], sample[:,1]]
y = sample[:,2]
#print(y)
clf.fit(X,y)
#print(clf.score(X,y))
w, it = doPLA(sample)
#print(len(clf.support_vectors_))
#print(clf.support_vectors_)
#print(clf.support_)
vectoravg.append(len(clf.support_vectors_))
#print(clf.predict(clf.support_vectors_)==1)
#print(clf.predict(clf.support_vectors_))
#print(clf.coef_)
montelistQP = [doMonteCarloQP(line[0], line[1], clf, 500) for i in range(1)]
qpMonte = sum(montelistQP)/len(montelistQP)
monteavgQP.append(sum(montelistQP)/len(montelistQP))
montelist = [ doMonteCarloNP(line[0], line[1], w, 500) for i in range(1)]
plaMonte = sum(montelist)/len(montelist)
monteavgPLA.append(plaMonte)
if(montelistQP < monteavgPLA):
approxQP.append(1)
else:
approxQP.append(0)
#print(sum(monteavgQP)/len(monteavgQP))
#print(sum(monteavgPLA)/len(monteavgPLA))
#print(sum(approxQP)/len(approxQP))
monteavgavgQP.append(sum(monteavgQP)/len(monteavgQP))
monteavgavgPLA.append(sum(monteavgPLA)/len(monteavgPLA))
approxavgQP.append(sum(approxQP)/len(approxQP))
vectornumberavg.append(sum(vectoravg)/len(vectoravg))
print(sum(monteavgavgQP)/len(monteavgavgQP))
print(sum(monteavgavgPLA)/len(monteavgavgPLA))
print("how good is it? ", sum(approxavgQP)/len(approxavgQP))
print("how good is it? ", sum(vectornumberavg)/len(vectornumberavg))
|
yosi-dediashvili/SubiT
|
src/Settings/Updaters/WinUpdaterHelper.py
|
Python
|
gpl-3.0
| 5,462
| 0.005859
|
""" An external application for performing the update after is was downloaded
from the net. This file is compiled into an exe file for windows platform
and launch when the ApplyUpdate method of WinUpdater is called.
The module will extract the zip file if it exists, and execute the update
manifest for the windows (update_manifest_for_win.py) platform if it exists
inside the zip.
The executable should get the path to the update file as the first arg,
otherwise, immidiate exit will be performed. Any other parameters that was
passed to SubiT in the first place, should be passed after update file
parameter.
"""
import sys
import zipfile
import os
import shutil
from Settings.Config import CONFIG_FILE_NAME, SubiTConfig
from Utils import GetProgramDir
_subit_dir = None
def SubitDir():
global _subit_dir
if not _subit_dir:
# Because we're running from the standalone of the updater, the
# function will return The inner directory, and not SubiT's root one
_subit_dir = GetProgramDir().replace('Settings\\Updaters', '')
return _subit_dir
def PrintOut(message):
print message
MANIFEST_NAME = 'update_manifest_for_win.py'
SUBIT_PATH = os.path.join(SubitDir(), 'SubiT.exe')
def start_subit():
PrintOut('Starting SubiT')
args = sys.argv
# Replace the updater path with subit's path
args[0] = SUBIT_PATH
# remove the update_file_path from the parameters
args.pop(1)
os.execv(SUBIT_PATH, args)
def main(update_file_path):
if not os.path.exists(update_file_path):
PrintOut('Update file is missing: %s' % update_file_path)
return
with zipfile.ZipFile(update_file_path) as zfile:
# Path of the config file inside the zip file
_config_path_in_zip = '%s/%s' % \
('Settings', CONFIG_FIL
|
E_NAME)
if MANIFEST_NAME in zfile.namelist():
PrintOut('There is a manifest in the zip file')
manifest_content = zfile.open(MANIFEST_NAME).read()
exec(manifest_content)
else:
PrintOut('No manifest file in update zip')
for _file_info in zfile.infolist():
|
# If the file is the config file, we place it under a different
# name in the Settings directory, and call the upgrade() method
# of SubiTConfig, and remove it afterwards.
if _file_info.filename == _config_path_in_zip:
_new_config_file_name = _config_path_in_zip.replace\
(CONFIG_FILE_NAME, 'new_config.ini')
_new_config_full_path = os.path.join\
(SubitDir(), _new_config_file_name)
_file_info.filename = _new_config_file_name
PrintOut('Extracting new config file: %s' % _new_config_file_name)
zfile.extract(_file_info, SubitDir())
PrintOut('New config file extracted: %s' % _new_config_file_name)
# Call the __init__ of SubiTConfig in order to set our path to
# the config file correctly
SubiTConfig(os.path.join\
(SubitDir(), 'Settings', CONFIG_FILE_NAME))
SubiTConfig.Singleton().upgrade(_new_config_full_path)
os.remove(_new_config_full_path)
# For each folder in the zip that is placed under the root and
# is not the Settings directory, we Check if it's already
# exists in the program's dir, and if so, remove it before
# extracting the new file. We do so in order to prevent
# conflicts between files and duplication (in providers for
# example, i.e. same provider but with different file name)
elif (_file_info.file_size == 0 and
# str.index gives the first index of '/'. We use that
# fact to check if the first index is at the end of the
# filename and therefor, the directory is under the root
# and not inside another directory.
_file_info.filename.index('/') == \
len(_file_info.filename) - 1 and
_file_info.filename != 'Settings/'):
_dir_name = os.path.join\
(SubitDir(), _file_info.filename.replace('/', ''))
if os.path.exists(_dir_name):
PrintOut('Deleting: %s' % _dir_name)
try:
shutil.rmtree(_dir_name)
except Exception as eX:
PrintOut('Failed on deletion: %s->%s' % (_dir_name, eX))
# Extract all the files except for the manifest which we already
# took care of.
elif _file_info.filename != MANIFEST_NAME:
PrintOut('Extracting: %s' % _file_info.filename)
zfile.extract(_file_info, SubitDir())
PrintOut('Extracted: %s' % _file_info.filename)
PrintOut('Removing update file')
os.remove(update_file_path)
start_subit()
if __name__ == '__main__':
if len(sys.argv) > 1:
main(sys.argv[1])
else:
PrintOut('Update file path should be passed as 2nd arg!')
PrintOut('Usage: %s <update file> [original args]' % os.path.basename(sys.executable))
|
Oozemeister/Gypsy
|
modules/pubmsg/fortunes.py
|
Python
|
gpl-2.0
| 393
| 0.020356
|
import random
class fortunes:
def on_pubmsg(self,
|
nick, connection, event):
message = event.arguments()[0]
source = event.source().split('!')[0]
if message.startswith(".fortune"):
fortunestxt = open("modules/pubmsg/fortunes", 'r')
fortuneslist = fortu
|
nestxt.read().splitlines()
response = random.choice(fortuneslist)
connection.privmsg(event.target(), response)
|
Junji110/elephant
|
elephant/spectral.py
|
Python
|
bsd-3-clause
| 20,790
| 0.000192
|
# -*- coding: utf-8 -*-
"""
Identification of spectral properties in analog signals (e.g., the power
spectrum).
:copyright: Copyright 2015-2016 by the Elephant team, see AUTHORS.txt.
:license: Modified BSD, see LICENSE.txt for details.
"""
import warnings
import numpy as np
import scipy.signal
import scipy.fftpack as fftpack
import scipy.signal.signaltools as signaltools
from scipy.signal.windows import get_window
from six import string_types
import quantities as pq
import neo
def _welch(x, y, fs=1.0, window='hanning', nperseg=256, noverlap=None,
nfft=None, detrend='constant', scaling='density', axis=-1):
"""
A helper function to estimate cross spectral density using Welch's method.
This function is a slightly modified version of `scipy.signal.welch()` with
modifications based on `matplotlib.mlab._spectral_helper()`.
Welch's method [1]_ computes an estimate of the cross spectral density
by dividing the data into overlapping segments, computing a modified
periodogram for each segment and averaging the cross-periodograms.
Parameters
----------
x, y : array_like
Time series of measurement values
fs : float, optional
Sampling frequency of the `x` and `y` time series in units of Hz.
Defaults to 1.0.
window : str or tuple or array_like, optional
Desired window to use. See `get_window` for a list of windows and
required parameters. If `window` is array_like it will be used
directly as the window and its length will be used for nperseg.
Defaults to 'hanning'.
nperseg : int, optional
Length of each segment. Defaults to 256.
noverlap: int, optional
Number of points to overlap between segments. If None,
``noverlap = nperseg / 2``. Defaults to None.
nfft : int, optional
Length of the FFT used, if a zero padded FFT is desired. If None,
the FFT length is `nperseg`. Defaults to None.
detrend : str or function, optional
Specifies how to detrend each segment. If `detrend` is a string,
it is passed as the ``type`` argument to `detrend`. If it is a
function, it takes a segment and returns a detrended segment.
Defaults to 'constant'.
scaling : { 'density', 'spectrum' }, optional
Selects between computing the power spectral density ('density')
where Pxx has units of V**2/Hz if x is measured in V and computing
the power spectrum ('spectrum') where Pxx has units of V**2 if x is
measured in V. Defaults to 'density'.
axis : int, optional
Axis along which the periodogram is computed; the default is over
the last axis (i.e. ``axis=-1``).
Returns
-------
f : ndarray
Array of sample frequencies.
Pxy : ndarray
Cross spectral density or cross spectrum of x and y.
Notes
-----
An appropriate amount of overlap will depend on the choice of window
and on your requirements. For the default 'hanning' window an
overlap of 50% is a reasonable trade off between accurately estimating
the signal power, while not over counting any of the data. Narrower
windows may require a larger overlap.
If `noverlap` is 0, this method is equivalent to Bartlett's method [2]_.
References
----------
.. [1] P. Welch, "The use of the fast Fourier transform for the
estimation of power spectra: A method based on time averaging
over short, modified periodograms", IEEE Trans. Audio
Electroacoust. vol. 15, pp. 70-73, 1967.
.. [2] M.S. Bartlett, "Periodogram Analysis and Continuous Spectra",
Biometrika, vol. 37, pp. 1-16, 1950.
"""
# TODO: This function should be replaced by `scipy.signal.csd()`, which
# will appear in SciPy 0.16.0.
# The checks for if y is x are so that we can use the same function to
# obtain both power spectrum and cross spectrum without doing extra
# calculations.
same_data = y is x
# Make sure we're dealing with a numpy array. If y and x were the same
# object to start with, keep them that way
x = np.asarray(x)
if same_data:
y = x
else:
if x.shape != y.shape:
raise ValueError("x and y must be of the same shape.")
y = np.asarray(y)
if x.size == 0:
return np.empty(x.shape), np.empty(x.shape)
if axis != -1:
x = np.rollaxis(x, axis, len(x.shape))
if not same_data:
y = np.rollaxis(y, axis, len(y.shape))
if x.shape[-1] < nperseg:
warnings.warn('nperseg = %d, is greater than x.shape[%d] = %d, using '
'nperseg = x.shape[%d]'
% (nperseg, axis, x.shape[axis], axis))
nperseg = x.shape[-1]
if isinstance(window, string_types) or type(window) is tuple:
win = get_window(window, nperseg)
else:
win = np.asarray(window)
if len(win.shape) != 1:
raise ValueError('window must be 1-D')
if win.shape[0] > x.shape[-1]:
raise ValueError('window is longer than x.')
nperseg = win.shape[0]
if scaling == 'density':
scale = 1.0 / (fs * (win * win).sum())
elif scaling == 'spectrum':
scale = 1.0 / win.sum()**2
else:
raise ValueError('Unknown scaling: %r' % scaling)
if noverlap is None:
noverlap = nperseg // 2
elif noverlap >= nperseg:
raise ValueError('noverlap must be less than nperseg.')
if nfft is None:
nfft = nperseg
elif nfft < nperseg:
raise ValueError('nfft must be greater than or equal to nperseg.')
if not hasattr(detrend, '__call__'):
detrend_func = lambda seg: signaltools.detrend(seg, type=detrend)
elif axis != -1:
# Wrap this function so that it receives a shape that it could
# reasonably expect to receive.
def detrend_func(seg):
seg = np.rollaxis(seg, -1, axis)
seg = detrend(seg)
return np.rollaxis(seg, axis, len(seg.shape))
else:
detrend_func = detrend
step = nperseg - noverlap
indices = np.arange(0, x.shape[-1] - nperseg + 1, step)
for k, ind in enumerate(indices):
x_dt = detrend_func(x[..., ind:ind + nperseg])
xft = fftpack.fft(x_dt * win, nfft)
if same_data:
yft = xft
else:
y_dt = detrend_func(y[..., ind:ind + nperseg])
yft = fftpack.fft(y_dt * win, nfft)
if k == 0:
Pxy = (xft * yft.conj())
else:
Pxy *= k / (k + 1.0)
Pxy += (xft * yft.conj()) / (k + 1.0)
Pxy *= scale
f = fftpack.fftfreq(nf
|
ft, 1.0 / fs)
if axis != -1:
Pxy = np.rollaxis(Pxy, -1, axis)
return f, Pxy
def welch_psd(signal, num_seg=8, len_seg=None, freq_res=None, overlap=0.5,
fs=1.0, window='hanning', nfft=None, detrend='constant',
return_onesided=True, scaling='density', axis=-1):
"""
Estimates power spectrum dens
|
ity (PSD) of a given AnalogSignal using
Welch's method, which works in the following steps:
1. cut the given data into several overlapping segments. The degree of
overlap can be specified by parameter *overlap* (default is 0.5,
i.e. segments are overlapped by the half of their length).
The number and the length of the segments are determined according
to parameter *num_seg*, *len_seg* or *freq_res*. By default, the
data is cut into 8 segments.
2. apply a window function to each segment. Hanning window is used by
default. This can be changed by giving a window function or an
array as parameter *window* (for details, see the docstring of
`scipy.signal.welch()`)
3. compute the periodogram of each segment
4. average the obtained periodograms to yield PSD estimate
These steps are implemented in `scipy.signal`, and this function is a
wrapper which provides a proper set of parameters to
`scipy.signal.welch()`. Some parameters for scipy.signal.welch(), such as
|
koodaamo/jetstream
|
tests/test_jetstream.py
|
Python
|
gpl-3.0
| 392
| 0.005102
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
test_jetstream
----------------------------------
Tests for `jetstream` module.
"""
import unittest
from jetstream i
|
mport jetstream
class TestJetstream(unittest.TestCase):
def setUp(self):
pass
def test_something(self):
pass
def tearDown(self):
pass
if __name__ == '__main__':
unitte
|
st.main()
|
adrien-mogenet/opengossip
|
src/python/analyzer/hierarchical-clustering.py
|
Python
|
gpl-3.0
| 7,280
| 0.001099
|
#!/usr/bin/env python
# This file is part of OpenGossip.
#
# This program is free software: you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or (at your
# option) any later version. This program is distributed in the hope that it
# will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty
# of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser
# General Public License for more details. You should have received a copy
# of the GNU Lesser General Public License along with this program. If not,
# see <http://www.gnu.org/licenses/>.
# Adapted from Jan Erik Solem
# Link:
# http://www.janeriksolem.net/2009/04/hierarchical-clustering-in-python.html
# Partially taken and modified from the example in "Programming Collective
# Intelligence" by Toby Segaran (O'Reilly Media 2007, page 33).
import sys
import math
import numpypy # Comment if you don't use pypy
from numpy import *
from ringbuffer import RingBuffer
from numericringbuffer import NumericRingBuffer
BUFFER_SIZE = 20
def squared_euclidian(v, w):
"""Square euclidian distance function between two vectors"""
return sum((v - w) ** 2)
def euclidian(v, w):
"""Standard euclidian distance function, for standard use"""
return sqrt(sum((v - w) ** 2))
def merge_vectors(v, w):
"""Compute an average merged vector from v and w."""
return [(v[i] + w[i]) / 2.0 for i in range(len(v))]
class ClusterNode:
"""Represents a node of hierarchical tree that will be built by our
algorithm."""
def __init__(self, vec, left=None, right=None, distance=0.0, id=None,
meta={}):
"""Ctor
Args:
vector: vector of features
left: left ClusterNode's child
right: right ClusterNode's child
distance: current's distance
id: used when clustering. Positive id means it's a leaf
count: used for weighted average
meta: map of extra metadata."""
self.left = left
self.right = right
self.vec = vec
self.id = id
self.distance = distance
self.meta = meta
class HierarchicalClassifier(object):
def __init__(self, output_folder):
"""Constructor. 'Counter' is the number of values we have added
so far."""
self.values = NumericRingBuffer(BUFFER_SIZE)
self.nodes = []
self.counter = 0
self.output = open(output_folder + '/anomalies.dat', 'wb')
self.orig = open(output_folder + '/original-serie.dat', 'wb')
def __del__(self):
"""Destructor. Properly close opened file descriptors."""
self.output.close()
self.orig.close()
def add(self, value):
"""Add a new value. We store the curr
|
ent number of the value in the
map of metadata in 'n' key."""
self.values.append(value)
self.counter += 1
if self.values.size > 1:
self.orig.write(str(value) + '\n')
vector = [
s
|
elf.values.mean(),
self.values.shannon_entropy(),
self.values.variance(),
self.values.expected_value(),
]
metadata = { 'n': self.counter, 'v': value }
self.nodes.append(ClusterNode(vec=vector, meta=metadata))
def build_set_rec(self, tree, marker):
"""Fill an array recursively from given tree."""
if not tree:
return []
current = []
if tree.id > 0:
current = [(tree.meta['n'], marker)]
return current + self.build_set_rec(tree.left, marker) \
+ self.build_set_rec(tree.right, marker)
def build_sets(self, tree):
"""Build two classes from the given tree."""
return [] + self.build_set_rec(tree.left, 0) \
+ self.build_set_rec(tree.right, 1)
def find_anomalies(self):
"""Try to find anomalies according to what we have seen so far."""
tree = self.hcluster(self.nodes, squared_euclidian)
sets = self.build_sets(tree)
sets = sorted(sets, key = lambda elt: elt[0])
for elt in sets:
self.output.write(str(int(elt[0])) + ' ' + str(elt[1]) + '\n')
def hcluster(self, nodes, distance=euclidian):
"""Classif list of elements.
Principle: each row start within it's individual cluster, then the
matrix is processed to find closest rows until each row fits in a
global hierarchical tree.
Args:
nodes: array of ClusterNode's
distance: function computing distance between 2 vectors"""
distances = {} # cache of (v, w) distances
currentclustid = -1
# clusters are initially just the individual rows
clust = [ClusterNode(vec=array(nodes[i].vec), id=i,
meta=nodes[i].meta) \
for i in range(len(nodes))]
while len(clust) > 1:
print('%d remaining clusters' % len(clust))
lowestpair = (0, 1)
closest = distance(clust[0].vec, clust[1].vec)
# loop through every pair looking for the smallest distance
# v_id and w_id are made local variable to avoid slow lookup
# several times. The try/except statement is prefered as well
# for performance issues (compared to `key not in distances`)
for i in range(len(clust)):
for j in range(i + 1, len(clust)):
v_id = clust[i].id
w_id = clust[j].id
try:
d = distances[(v_id, w_id)]
except KeyError:
distances[(v_id, w_id)] = \
distance(clust[i].vec, clust[j].vec)
d = distances[(v_id, w_id)]
if d < closest:
closest = d
lowestpair = (i, j)
# calculate the average of the two clusters
merged_vector = merge_vectors(clust[lowestpair[0]].vec,
clust[lowestpair[1]].vec)
# create the new cluster
newcluster = ClusterNode(array(merged_vector),
left=clust[lowestpair[0]],
right=clust[lowestpair[1]],
distance=closest,
id=currentclustid)
# cluster ids that weren't in the original set are negative
currentclustid -= 1
del clust[lowestpair[1]]
del clust[lowestpair[0]]
clust.append(newcluster)
return clust[0]
if __name__ == "__main__":
if len(sys.argv) != 3:
print('Usage: hierarchical-clustering.py INPUT OUTPUT_FOLDER')
sys.exit(1)
filename = sys.argv[1]
output = sys.argv[2]
c = HierarchicalClassifier(output)
f = open(filename, 'rb')
for line in f:
c.add(float(line.split(' ')[1].replace(',', '.')))
c.find_anomalies()
f.close()
|
rajeevs1992/pyhealthvault
|
src/healthvaultlib/itemtypes/medication.py
|
Python
|
mit
| 865
| 0
|
from lxml import etree
from healthvaultlib.util
|
s.xmlutils import XmlUtils
from healthvaultlib.itemtypes.healthrecorditem import HealthRecordItem
class Medication(HealthRecordItem):
def __init__(self, thing_xml=None):
super(Medication, self).__init__()
self.type_id = '30cafccc-047d-4288-94ef-643571f7919d'
if thing_xml is not None:
|
self.thing_xml = thing_xml
self.parse_thing()
def __str__(self):
return 'Medication'
def parse_thing(self):
super(Medication, self).parse_thing()
xmlutils = XmlUtils(self.thing_xml)
def write_xml(self):
thing = super(Medication, self).write_xml()
data_xml = etree.Element('data-xml')
medication = etree.Element('medication')
data_xml.append(medication)
thing.append(data_xml)
return thing
|
hamish2014/batchOpenMPI
|
examples/ex3.py
|
Python
|
mit
| 719
| 0.027816
|
"""
example with 'calls_expected' in addtoBatch used
"""
import batchOpenMPI
def f_mult(x) :
return x*2.0
f = batchOpenMPI.batchFunction(f_mult) #creating function wrapper
batchOpenMPI.begin
|
_MPI_loop() # both the workers and the master process run the same code up until here
f.addtoBatch(4,calls_expected=4)
batchOpenMPI.processBatch() #get the workers to calculate all the inputs
res = [f(4),f(4),f(4)]
print(res)
#another test
f.addtoBatch(1)
batchOpenMPI.processBatch()
|
#get the workers to calculate all the inputs
res = f(1), f(1)
batchOpenMPI.end_MPI_loop(print_stats=True) #releases workers
print("*** jobs executed by workers should be 2 ,(5 calls made),jobs uncollected should = 1, jobs_master=1")
|
Caranarq/01_Dmine
|
04_Edificaciones/P0406/P0406.py
|
Python
|
gpl-3.0
| 2,794
| 0.006102
|
# -*- coding: utf-8 -*-
"""
Started on mon, apr 23rd, 2018
@author: carlos.arana
"""
# Librerias utilizadas
import pandas as pd
import sys
module_path = r'D:\PCCS\01_Dmine\Scripts'
if module_path not in sys.path:
sys.path.append(module_path)
from VarInt.VarInt import VarInt
from classes.Meta import Meta
from Compilador.Compilador import compilar
"""
Las librerias locales utilizadas renglones arriba se encuentran disponibles en las siguientes direcciones:
SCRIPT: | DISPONIBLE EN:
------ | ------------------------------------------------------------------------------------
VarInt | https://github.com/INECC-PCCS/01_Dmine/tree/master/Scripts/VarInt
Meta | https://github.com/INECC-PCCS/01_Dmine/tree/master/Scripts/Classes
Compilador | https://github.com/INECC-PCCS/01_Dmine/tree/master/Scripts/Compilador
"""
# Documentacion del Parametro ----------------------------------------------------
|
-----------------------------------
# Descripciones del Parametro
M = Meta
M.ClaveParametro = 'P0406'
M.NombreParametro = 'Viviendas urbanas en PCU U1
|
y U2'
M.DescParam = 'Numero de viviendas dentro de Perimetros de Contención Urbana tipo U1 o U2, por ciudad'
M.UnidadesParam = 'Numero de viviendas'
M.TituloParametro = 'VPCU' # Para nombrar la columna del parametro
M.PeriodoParam = '2018'
M.TipoInt = 1
# Handlings
M.ParDtype = 'float'
M.TipoVar = 'C' # (Tipos de Variable: [C]ontinua, [D]iscreta [O]rdinal, [B]inaria o [N]ominal)
M.array = []
M.TipoAgr = 'sum'
# Descripciones del proceso de Minería
M.nomarchivodataset = 'Rep_Viv_Vig'
M.extarchivodataset = 'xlsx'
M.ContenidoHojaDatos = 'Viviendas (Tipo, Segmento, ubicacion en PCU)'
M.ClaveDataset = r'SNIIV'
M.ActDatos = '2017'
M.Agregacion = 'Se sumó el total de viviendas en PCU U1 o U2 para los municipios que integran cada ciudad del SUN'
# Descripciones generadas desde la clave del parámetro
M.getmetafromds = 1
Meta.fillmeta(M)
# Construccion del Parámetro -----------------------------------------------------------------------------------------
# Cargar dataset inicial
dataset = pd.read_excel(M.DirFuente + '\\' + M.ArchivoDataset,
sheetname='DATOS', dtype={'CVE_MUN': 'str'})
dataset.set_index('CVE_MUN', inplace=True)
dataset = dataset.rename_axis('CVE_MUN')
dataset.head(2)
# Generar dataset para parámetro y Variable de Integridad
dataset = dataset[(dataset['Ubicación PCU 2015'] == 'U1') | (dataset['Ubicación PCU 2015'] == 'U2')]
dsvar = 'Viviendas'
par_dataset = dataset[dsvar]
par_dataset = par_dataset.to_frame(name = M.ClaveParametro)
par_dataset, variables_dataset = VarInt(par_dataset, dataset, tipo=M.TipoInt)
# Compilacion
compilar(M, dataset, par_dataset, variables_dataset)
|
deepmind/deepmind-research
|
catch_carry/trajectories.py
|
Python
|
apache-2.0
| 8,650
| 0.00659
|
# Copyright 2020 Deepmind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Mocap trajectory that assumes props start stationary on pedestals."""
import copy
import enum
import itertools
from dm_control.locomotion.mocap import mocap_pb2
from dm_control.locomotion.mocap import trajectory
from dm_control.utils import transformations
import numpy as np
_PEDESTAL_SIZE = (0.2, 0.2, 0.02)
_MAX_SETTLE_STEPS = 100
@enum.unique
class ClipSegment(enum.Enum):
"""Annotations for subsegments within a warehouse clips."""
# Clip segment corresponding to a walker approaching an object
APPROACH = 1
# Clip segment corresponding to a walker picking up an object.
PICKUP = 2
# Clip segment corresponding to the "first half" of the walker carrying an
# object, beginning from the walker backing away from a pedestal with
# object in hand.
CARRY1 = 3
# Clip segment corresponding to the "second half" of the walker carrying an
# object, ending in the walker approaching a pedestal the object in hand.
CARRY2 = 4
# Clip segment corresponding to a walker putting down an object on a pedestal.
PUTDOWN = 5
# Clip segment corresponding to a walker backing off after successfully
# placing an object on a pedestal.
BACKOFF = 6
def _get_rotated_bounding_box(size, quaternion):
"""Calculates the bounding box of a rotated 3D box.
Args:
size: An array of length 3 specifying the half-lengths of a box.
quaternion: A unit quaternion specifying the box's orientation.
Returns:
An array of length 3 specifying the half-lengths of the bounding box of
the rotated box.
"""
corners = ((size[0], size[1], size[2]),
(size[0], size[1], -size[2]),
(size[0], -size[1], size[2]),
(-size[0], size[1], size[2]))
rotated_corners = tuple(
transformations.quat_rotate(quaternion, corner) for corner in corners)
return np.amax(np.abs(rotated_corners), axis=0)
def _get_prop_z_extent(prop_proto, quaternion):
"""Calculates the "z-extent" of the prop in given orientation.
This is the distance from the centre of the prop to its lowest point in the
world frame, taking into account the prop's orientation.
Args:
prop_proto: A `mocap_pb2.Prop` protocol buffer defining a prop.
quaternion: A unit quaternion specifying the prop's orientation.
Returns:
the distance from the centre of the prop to its lowest point in
|
the
world frame in the specified orientation.
"""
if prop_proto.shape == mocap_pb2.Prop.BOX:
return _get_rotated_bounding_box(prop_proto.size, quaternion)[2]
elif prop_proto.shape == mocap_pb2.Prop.SPHERE:
return prop_proto.size[0]
else:
raise NotImplementedError(
'Unsupported prop shape: {}'.format(prop_proto.shape))
class WarehouseTrajectory(trajectory.Trajectory):
|
"""Mocap trajectory that assumes props start stationary on pedestals."""
def infer_pedestal_positions(self, num_averaged_steps=30,
ground_height_tolerance=0.1,
proto_modifier=None):
proto = self._proto
if proto_modifier is not None:
proto = copy.copy(proto)
proto_modifier(proto)
if not proto.props:
return []
positions = []
for timestep in itertools.islice(proto.timesteps, num_averaged_steps):
positions_for_timestep = []
for prop_proto, prop_timestep in zip(proto.props, timestep.props):
z_extent = _get_prop_z_extent(prop_proto, prop_timestep.quaternion)
positions_for_timestep.append([prop_timestep.position[0],
prop_timestep.position[1],
prop_timestep.position[2] - z_extent])
positions.append(positions_for_timestep)
median_positions = np.median(positions, axis=0)
median_positions[:, 2][median_positions[:, 2] < ground_height_tolerance] = 0
return median_positions
def get_props_z_extent(self, physics):
timestep = self._proto.timesteps[self._get_step_id(physics.time())]
out = []
for prop_proto, prop_timestep in zip(self._proto.props, timestep.props):
z_extent = _get_prop_z_extent(prop_proto, prop_timestep.quaternion)
out.append(z_extent)
return out
class SinglePropCarrySegmentedTrajectory(WarehouseTrajectory):
"""A mocap trajectory class that automatically segments prop-carry clips.
The algorithm implemented in the class only works if the trajectory consists
of exactly one walker and one prop. The value of `pedestal_zone_distance`
the exact nature of zone crossings are determined empirically from the
DeepMindCatchCarry dataset, and are likely to not work well outside of this
setting.
"""
def __init__(self,
proto,
start_time=None,
end_time=None,
pedestal_zone_distance=0.65,
start_step=None,
end_step=None,
zero_out_velocities=True):
super(SinglePropCarrySegmentedTrajectory, self).__init__(
proto, start_time, end_time, start_step=start_step, end_step=end_step,
zero_out_velocities=zero_out_velocities)
self._pedestal_zone_distance = pedestal_zone_distance
self._generate_segments()
def _generate_segments(self):
pedestal_position = self.infer_pedestal_positions()[0]
# First we find the timesteps at which the walker cross the pedestal's
# vicinity zone. This should happen exactly 4 times: enter it to pick up,
# leave it, enter it again to put down, and leave it again.
was_in_pedestal_zone = False
crossings = []
for i, timestep in enumerate(self._proto.timesteps):
pedestal_dist = np.linalg.norm(
timestep.walkers[0].position[:2] - pedestal_position[:2])
if pedestal_dist > self._pedestal_zone_distance and was_in_pedestal_zone:
crossings.append(i)
was_in_pedestal_zone = False
elif (pedestal_dist <= self._pedestal_zone_distance and
not was_in_pedestal_zone):
crossings.append(i)
was_in_pedestal_zone = True
if len(crossings) < 3:
raise RuntimeError(
'Failed to segment the given trajectory: '
'walker should cross the pedestal zone\'s boundary >= 3 times '
'but got {}'.format(len(crossings)))
elif len(crossings) == 3:
crossings.append(len(self._proto.timesteps) - 1)
elif len(crossings) > 4:
crossings = [crossings[0], crossings[1], crossings[-2], crossings[-1]]
# Identify the pick up event during the first in-zone interval.
start_position = np.array(self._proto.timesteps[0].props[0].position)
end_position = np.array(self._proto.timesteps[-1].props[0].position)
pick_up_step = crossings[1] - 1
while pick_up_step > crossings[0]:
prev_position = self._proto.timesteps[pick_up_step - 1].props[0].position
if np.linalg.norm(start_position[2] - prev_position[2]) < 0.001:
break
pick_up_step -= 1
# Identify the put down event during the second in-zone interval.
put_down_step = crossings[2]
while put_down_step <= crossings[3]:
next_position = self._proto.timesteps[put_down_step + 1].props[0].position
if np.linalg.norm(end_position[2] - next_position[2]) < 0.001:
break
put_down_step += 1
carry_halfway_step = int((crossings[1] + crossings[2]) / 2)
self._segment_intervals = {
ClipSegment.APPROACH: (0, crossings[0]),
ClipSegment.PICKUP: (crossings[0], pick_up_step),
ClipSegment.CARRY1: (pick_up_step, carry_halfway_step),
ClipSegment.CARRY2: (carry_halfway_step, crossings[2]),
C
|
bountyfunding/bountyfunding
|
bountyfunding/core/payment/paypal_standard.py
|
Python
|
agpl-3.0
| 2,667
| 0.003
|
import requests
import urllib
from bountyfunding.core.config import config
from bountyfunding.core.models import db, Payment
from bountyfunding.core.const import PaymentGateway
from bountyfunding.core.errors import Error
from bountyfunding.core.payment import get_paypal_url
class PayPalStandardGateway:
def create_payment(self, project_id, sponsorship, return_url):
"""
Returns authorization URL
"""
if not
|
return_url:
raise Error('return_url cannot be blank')
receiver_email = config[project_id].PAYPAL_RECEIVER_EMAIL
args = {
"cmd": "_donations",
"business": receiver_email,
"item_name": "Bounty",
"amount": sponsorship.amount,
"currency_code": "EUR",
|
"no_note": 1,
"no_shipping": 1,
"return": return_url,
"cancel_return": return_url
}
redirect_url = get_paypal_url(project_id) + "?" + urllib.urlencode(args)
payment = Payment(sponsorship.project_id, sponsorship.sponsorship_id, PaymentGateway.PAYPAL_STANDARD)
payment.url = redirect_url
return payment
def process_payment(self, project_id, sponsorship, payment, details):
"""
Validates payment
"""
transaction_id = details["tx"]
payload = {
"cmd": "_notify-synch",
"at": config[project_id].PAYPAL_PDT_ACCESS_TOKEN,
"tx": transaction_id
}
# Check for reused transaction ID
if db.session.query(db.exists().where(Payment.gateway_id==transaction_id)).scalar():
return False
r = requests.post(get_paypal_url(project_id), data=payload)
lines = r.text.strip().splitlines()
if len(lines) == 0:
return False
# Check for SUCCESS word
if not lines.pop(0).strip() == "SUCCESS":
return False
# Payment validation
retrieved_payment = {}
for line in lines:
key, value = line.strip().split('=')
retrieved_payment[key] = urllib.unquote_plus(value)
receiver_email = config[project_id].PAYPAL_RECEIVER_EMAIL
# Check recipient email
if retrieved_payment['business'] != receiver_email:
return False
# Check currency
if retrieved_payment['mc_currency'] != "EUR":
return False
# Check amount
if float(retrieved_payment['mc_gross']) != sponsorship.amount:
return False
# Store transaction ID
payment.gateway_id = transaction_id
return True
|
thenenadx/forseti-security
|
tests/common/gcp_api/compute_test.py
|
Python
|
apache-2.0
| 1,822
| 0
|
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
#
|
WITHOUT WARRAN
|
TIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests the Compute client."""
import mock
from tests.unittest_utils import ForsetiTestCase
from google.cloud.security.common.gcp_api import _base_client
from google.cloud.security.common.gcp_api import compute
from tests.common.gcp_api.test_data import fake_firewall_rules
class ComputeTest(ForsetiTestCase):
"""Test the Compute client."""
@mock.patch.object(_base_client.BaseClient, '__init__', autospec=True)
def setUp(self, mock_base_client):
"""Set up."""
self.client = compute.ComputeClient()
def test_get_firewall_rules(self):
self.client.service = mock.MagicMock()
self.client.rate_limiter = mock.MagicMock()
self.client._build_paged_result = mock.MagicMock()
self.client._build_paged_result.return_value = (
fake_firewall_rules.PAGED_RESULTS)
firewall_rules = self.client.get_firewall_rules('aaaaa')
self.assertTrue(self.client.service.firewalls.called)
self.assertTrue(
mock.call().list(project='aaaaa')
in self.client.service.firewalls.mock_calls)
self.assertEquals(fake_firewall_rules.EXPECTED_RESULTS,
firewall_rules)
if __name__ == '__main__':
unittest.main()
|
gangadhar-kadam/nassimapp
|
selling/report/customers_not_buying_since_long_time/customers_not_buying_since_long_time.py
|
Python
|
agpl-3.0
| 1,985
| 0.039798
|
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import webnotes
from webnotes.utils import cint
def execute(filters=None):
if not filters: filters ={}
days_since_last_order = filters.get("days_since_last_order")
if cint(days_since_last_order) <= 0:
webnotes.msgprint("Please mention positive value in 'Days Since Last Order' field",raise_exception=1)
columns = get_columns()
customers = get_so_details()
data = []
for cust in customers:
if cint(cust[8]) >= cint(days_since_last_order):
cust.insert(7,get_last_so_amt(cust[0]))
data.append(cust)
return columns, data
def get_so_details():
return webnotes.conn.sql("""select
cust.name,
cust.customer_name,
cust.territory,
cust.customer_group,
count(distinct(so.name)) as 'num_of_order',
sum(net_total) as 'total_order_value',
sum(if(so.status = "Stopped",
so.net_total * so.per_delivered/100,
so.net_total)) as 'total_order_considered',
max(so.transaction_date) as 'last_sales_order_date',
DATEDIFF(CURDATE(), max(so.transaction_date)) as 'days_since_last_order'
from `tabCustomer` cust, `tabSales Order` so
where cust.name = so.customer and so.docstatus = 1
group by cust.name
order by 'days_since_last_order' desc """,as_list=1)
def get_last_so_amt(customer):
res = webnotes.conn.sql("""select net_total from `tabSales Order`
where customer ='%(customer)s' and docstatus = 1 order by transaction_date desc
limit 1""" % {'customer':customer})
return res and res[0][0] or 0
def get_columns():
return [
"Customer:Link/Customer:120",
"Customer Name:Da
|
ta:120",
"Territory::120",
"Customer Group::120",
"Number of Order::120",
"Total Order Value:Currency:120",
"Total Order Considered:Currency:160",
"Last Order Amount:Currency:160",
"La
|
st Sales Order Date:Date:160",
"Days Since Last Order::160"
]
|
ujdhesa/unisubs
|
utils/testeditor.py
|
Python
|
agpl-3.0
| 5,102
| 0.000392
|
# -*- coding: utf-8 -*-
# Amara, universalsubtitles.org
#
# Copyright (C) 2013 Participatory Culture Foundation
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see
# http://www.gnu.org/licenses/agpl-3.0.html.
from django.core.urlresolvers import reverse
import simplejson as json
from utils import test_factories
class TestEditor(object):
"""Simulates the editor widget for unit tests"""
def __init__(self, client, video, original_language_code=None,
base_language_code=None, mode=None):
"""Construct a TestEditor
:param client: django TestClient object for HTTP requests
:param video: Video object to edit
:param original_language_code: language code for the video audio.
Should be set if and only if the primary_audio_language_code hasn't
been set for the video.
:param base_language_code: base language code for to use for
translation tasks.
:param mode: one of ("review", "approve" or None)
"""
self.client = client
self.video = video
self.base_language_code = base_language_code
if original_language_code is None:
self.original_language_code = video.primary_audio_language_code
else:
if video.primary_audio_language_code is not None:
raise AssertionError(
"primary_audio_language_code is set (%r)" %
video.primary_audio_language_code)
self.original_language_code = original_language_code
self.mode = mode
self.task_approved = None
self.task_id = None
self.task_notes = None
self.task_type = None
def set_task_data(self, task, approved, notes):
"""Set data for the task that this edit is for.
:param task: Task object
:param approved: did the user approve the task. Should be one of the
values of Task.APPROVED_IDS.
:param notes: String to set for notes
"""
type_map = {
10: 'subtitle',
20: 'translate',
30: 'review',
40: 'approve',
}
self.task_id = task.id
self.task_type = type_map[task.type]
self.task_notes = notes
self.task_approved = approved
def _submit_widget_rpc(self, method, **data):
"""POST data to the widget:rpc view."""
url = reverse('widget:rpc', args=(method,))
post_data = dict((k, json.dumps(v)) for k, v in data.items())
response = self.client.post(url, post_data)
response_data = json.loads(response.content)
if 'error' in response_data:
raise AssertionError("Error calling widget rpc method %s:\n%s" %
(method, response_data['error']))
return response_data
def run(self, language_code, completed=True, save_for_la
|
ter=False):
"""Make the HTTP requests to simulate the editor
We will use test_factories.dxfp_sample() for the subtitle data.
:param language_code: code for the language of these subtitles
:param completed: simulate the completed checkbox being set
:param save_for_later: simulate the save for later button
"""
self._submit_widget_rpc('fetch_start_dialog_contents',
video_id=self.video.video_id)
existing_language = self.video
|
.subtitle_language(language_code)
if existing_language is not None:
subtitle_language_pk = existing_language.pk
else:
subtitle_language_pk = None
response_data = self._submit_widget_rpc(
'start_editing',
video_id=self.video.video_id,
language_code=language_code,
original_language_code=self.original_language_code,
base_language_code=self.base_language_code,
mode=self.mode,
subtitle_language_pk=subtitle_language_pk)
session_pk = response_data['session_pk']
self._submit_widget_rpc('finished_subtitles',
completed=completed,
save_for_later=save_for_later,
session_pk=session_pk,
subtitles=test_factories.dxfp_sample('en'),
task_approved=self.task_approved,
task_id=self.task_id,
task_notes=self.task_notes,
task_type=self.task_type)
|
adamcunnington/Flask-API
|
setup.py
|
Python
|
gpl-3.0
| 1,574
| 0
|
#!/usr/bin/env python3
from setuptools import setup, find_packages
def get_readme():
with open("README.md") as f:
return f.read()
setup(name="Flask-API",
version="0.1",
description=("A library for Flask that provides a basic barebones web "
"API to be used and extended."),
long_description=get_readme(),
author="Adam Cunnington",
author_email="ac@adamcunnington.info",
license="GPLv3",
classifiers=[
"Development Status :: 1 - Planning",
"Environment :: Web Environment",
"Framework :: Flask
|
",
"Intended Audience :: End Users/Desktop",
"License :: OSI Approved :: GNU General Public License v3 (GPLv3)",
"Natural Language :: English",
"Operating System :: OS Independent",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :
|
: Python :: 3.0",
"Programming Language :: Python :: 3.1",
"Programming Language :: Python :: 3.2",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
"Topic :: Internet :: WWW/HTTP :: Dynamic Content"],
keywords="adamcunnington flask api webapi",
packages=find_packages(exclude=".virtualenv"),
install_requires=[
"coverage",
"Flask",
"Flask-HTTPAuth",
"Flask-SQLAlchemy",
"nose"],
test_requires=[
""])
|
Homebrain/Homebrain
|
homebrain/agents/devicemanager/__init__.py
|
Python
|
mit
| 66
| 0
|
# Imp
|
ort the agent class
from .devicemanager import DeviceManager
| |
plotly/python-api
|
packages/python/plotly/plotly/validators/scatterpolar/_hoverlabel.py
|
Python
|
mit
| 2,061
| 0.000485
|
import _plotly_utils.basevalidators
class HoverlabelValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(self, plotly_name="hoverlabel", parent_name="scatterpolar", **kwargs):
super(HoverlabelValida
|
tor, self).__init__(
plotly_name=plotly_name,
parent_name=paren
|
t_name,
data_class_str=kwargs.pop("data_class_str", "Hoverlabel"),
data_docs=kwargs.pop(
"data_docs",
"""
align
Sets the horizontal alignment of the text
content within hover label box. Has an effect
only if the hover label text spans more two or
more lines
alignsrc
Sets the source reference on Chart Studio Cloud
for align .
bgcolor
Sets the background color of the hover labels
for this trace
bgcolorsrc
Sets the source reference on Chart Studio Cloud
for bgcolor .
bordercolor
Sets the border color of the hover labels for
this trace.
bordercolorsrc
Sets the source reference on Chart Studio Cloud
for bordercolor .
font
Sets the font used in hover labels.
namelength
Sets the default length (in number of
characters) of the trace name in the hover
labels for all traces. -1 shows the whole name
regardless of length. 0-3 shows the first 0-3
characters, and an integer >3 will show the
whole name if it is less than that many
characters, but if it is longer, will truncate
to `namelength - 3` characters and add an
ellipsis.
namelengthsrc
Sets the source reference on Chart Studio Cloud
for namelength .
""",
),
**kwargs
)
|
ratschlab/ASP
|
examples/undocumented/python_modular/evaluation_meansquaredlogerror_modular.py
|
Python
|
gpl-2.0
| 727
| 0.023384
|
from tools.load import LoadMatrix
from numpy import random
lm=LoadMatrix()
N = 100
random.seed
|
(17)
ground_t
|
ruth = abs(random.randn(N))
predicted = abs(random.randn(N))
parameter_list = [[ground_truth,predicted]]
def evaluation_meansquaredlogerror_modular(ground_truth, predicted):
from shogun.Features import RegressionLabels
from shogun.Evaluation import MeanSquaredLogError
ground_truth_labels = RegressionLabels(ground_truth)
predicted_labels = RegressionLabels(predicted)
evaluator = MeanSquaredLogError()
mse = evaluator.evaluate(predicted_labels,ground_truth_labels)
return mse
if __name__=='__main__':
print('EvaluationMeanSquaredLogError')
evaluation_meansquaredlogerror_modular(*parameter_list[0])
|
michaupl/materialsapp
|
cuts/__init__.py
|
Python
|
apache-2.0
| 76
| 0.013158
|
# coding: utf-8
from __future__ im
|
port unicode_lite
|
rals
DETAIL_TYPE = 'cut'
|
rafaellc28/Latex2MiniZinc
|
latex2minizinc/GenIndexingExpression.py
|
Python
|
mit
| 258
| 0.031008
|
from GenObj import *
class GenIndexingExpres
|
sion(GenObj):
def __init__(self, name, value):
super(GenIndexingExpression, self).__init__(name)
self.value = value
def getValue(self):
return self.value
def setValue(self, value):
self
|
.value = value
|
marineam/nagcat
|
python/snapy/netsnmp/unittests/__init__.py
|
Python
|
apache-2.0
| 6,157
| 0.001624
|
# snapy - a python snmp library
#
# Copyright (C) 2009 ITA Software, Inc.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# version 2 as published by the Free Softw
|
are Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more det
|
ails.
import os
import socket
import signal
import warnings
import twisted
from twisted.internet import defer, error, process, protocol, reactor
from twisted.python import log, versions
from twisted.trial import unittest
def pick_a_port():
# XXX: Not perfect, there is a race condition between
# the close and snmpd's bind. However the other way
# would be to hook into snmpd's bind() call...
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.bind(('127.0.0.1', 0))
host, port = sock.getsockname()
sock.close()
return port
class LoggingProtocol(protocol.ProcessProtocol):
"""Log snmpd output via the twisted logging api"""
def __init__(self, factory):
self.factory = factory
def outReceived(self, data):
self.factory.stdout += data
for line in data.splitlines():
log.msg("snmpd: %s" % line)
def errReceived(self, data):
self.factory.stderr += data
for line in data.splitlines():
if line.startswith("NET-SNMP"):
self.factory.started()
log.err("snmpd: %s" % line)
def processEnded(self, status):
if isinstance(status.value, error.ProcessDone):
log.msg("snmpd: exit(0)")
self.factory.done(None)
elif isinstance(status.value, error.ProcessTerminated):
log.err("snmpd: exit(%s)" % status.value.exitCode)
self.factory.done(status)
else:
log.err("snmpd: %s" % status)
self.factory.done(status)
class Server(process.Process):
"""Run snmpd"""
# Limit snmpd to only load these modules, this speeds things up
modules = ('override', 'hr_system', 'system_mib')
def __init__(self):
self._deferred = defer.Deferred()
self._address = defer.Deferred()
self._timeout = None
self.conf = "%s/snmpd.conf" % os.path.dirname(__file__)
self.socket = "udp:127.0.0.1:%d" % pick_a_port()
self.stdout = ""
self.stderr = ""
proto = LoggingProtocol(self)
env = {"PATH": "/bin:/sbin:/usr/bin:/usr/sbin"}
cmd = ("snmpd", "-f", "-C", "-c", self.conf,
"-LE7", "-Ddumpv_recv", "-Ddumph_recv",
"-I", ','.join(self.modules),
"--noPersistentLoad=1", "--noPersistentSave=1",
self.socket)
# Skip test if snmpd doesn't exist
found = False
for path in env['PATH'].split(':'):
if os.path.exists("%s/%s" % (path, cmd[0])):
found = True
break
if not found:
raise unittest.SkipTest("snmpd missing")
super(Server, self).__init__(reactor, cmd[0], cmd, env, None, proto)
def started(self):
log.msg("Ready, snmpd listening on %s" % self.socket)
self._address.callback(self.socket)
def address(self):
return self._address
def stop(self):
assert self.pid and self._deferred
log.msg("Stopping snmpd...")
os.kill(self.pid, signal.SIGTERM)
self._timeout = reactor.callLater(5.0, self.timeout)
return self._deferred
def timeout(self):
assert self.pid
log.msg("Timeout, Killing snmpd...")
os.kill(self.pid, signal.SIGKILL)
self._timeout = None
def done(self, status):
assert self._deferred
if not self._address.called:
self._address.errback(Exception("snmpd failed"))
if self._timeout:
self._timeout.cancel()
self._timeout = None
self._deferred.callback(status)
self._deferred = None
class TestCase(unittest.TestCase):
def setUp(self):
# Twisted < 10.0.0 falsely raises it's zombie warning during tests
if twisted.version < versions.Version("twisted", 10, 0, 0):
warnings.simplefilter("ignore", error.PotentialZombieWarning)
self._running = False
def set_running(result):
self._running = True
self.server = Server()
d = self.server.address()
d.addCallbacks(self.setUpSession, lambda x: None)
d.addCallback(lambda x: self._set_running(True))
d.addErrback(lambda x: self.server.stop())
return d
def _set_running(self, value):
# since we can't do this in lambda
self._running = value
def setUpSession(self, address):
pass
def tearDown(self):
if not self._running:
return
try:
self.tearDownSession()
finally:
d = self.server.stop()
d.addCallback(lambda x: self._set_running(False))
return d
def tearDownSession(self):
pass
def assertVersion(self, version):
self.assertIn("\ndumph_recv: SNMPv%s message\n" % version,
self.server.stderr)
def assertCommand(self, command):
self.assertIn("\ndumpv_recv: Command %s\n" % command,
self.server.stderr)
def finish(self, commands=()):
def checks(result):
self.assertVersion(self.version)
for command in commands:
self.assertCommand(command)
d = self.tearDown()
d.addCallback(checks)
return d
def finishGet(self):
return self.finish(["GET"])
def finishWalk(self):
if self.bulk:
return self.finish(["GET","GETBULK"])
else:
return self.finish(["GET","GETNEXT"])
def finishStrictWalk(self):
if self.bulk:
return self.finish(["GETBULK"])
else:
return self.finish(["GETNEXT"])
|
LegNeato/buck
|
python-dsl/buck_parser/buck.py
|
Python
|
apache-2.0
| 67,707
| 0.001403
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import with_statement
import __builtin__
import __future__
import contextlib
import collections
from pathlib import Path, PurePath
from pywatchman import WatchmanError
from .deterministic_set import DeterministicSet
from .json_encoder import BuckJSONEncoder
from .glob_internal import glob_internal
from .glob_watchman import SyncCookieState, glob_watchman
from .util import Diagnostic, cygwin_adjusted_path, get_caller_frame, is_special, is_in_dir
from .module_whitelist import ImportWhitelistManager
from .profiler import Profiler
from .struct import Struct
import abc
import functools
import imp
import inspect
import json
import optparse
import os
import os.path
import platform
import pywatchman
import re
import select
import sys
import time
import traceback
import types
from typing import Any, Dict, List, Tuple, Optional
try:
# Python 2.6, 2.7, use iterator filter from Python 3
from future_builtins import filter
except ImportError:
# use standard filter (Python 3, Python < 2.6)
pass
# When build files are executed, the functions in this file tagged with
# @provide_for_build will be provided in the build file's local symbol table.
# Those tagged with @provide_as_native_rule will be present unless
# explicitly disabled by parser.native_rules_enabled_in_build_files
#
# When these functions are called from a build file, they will be passed
# a keyword parameter, build_env, which is a object with information about
# the environment of the build file which is currently being processed.
# It contains the following attributes:
#
# "dirname" - The directory containing the build file.
#
# "base_path" - The base path of the build file.
#
# "cell_name" - The cell name the build file is in.
BUILD_FUNCTIONS = []
NATIVE_FUNCTIONS = []
# Wait this many seconds on recv() or send() in the pywatchman client
# if not otherwise specified in .buckconfig
DEFAULT_WATCHMAN_QUERY_TIMEOUT = 60.0
ORIGINAL_IMPORT = __builtin__.__import__
_LOAD_TARGET_PATH_RE = re.compile(r'^(@?[A-Za-z0-9_]+)?//(.*):(.*)$')
class AbstractContext(object):
"""Superclass of execution contexts."""
__metaclass__ = abc.ABCMeta
@abc.abstractproperty
def includes(self):
"""
:rtype: set[str]
"""
raise NotImplementedError()
@abc.abstractproperty
def used_configs(self):
"""
:rtype: dict[Tuple[str, str], str]
"""
raise NotImplementedError()
@abc.abstractproperty
def used_env_vars(self):
"""
:rtype: dict[str, str]
"""
raise NotImplementedError()
@abc.abstractproperty
def diagnostics(self):
"""
:rtype: list[Diagnostic]
"""
raise NotImplementedError()
def merge(self, other):
"""Merge the context of an included file into the current context.
:param IncludeContext other: the include context to merge.
:rtype: None
"""
self.includes.update(other.includes)
self.diagnostics.extend(other.diagnostics)
self.used_configs.update(other.used_configs)
self.used_env_vars.update(other.used_env_vars)
class BuildFileContext(AbstractContext):
"""The build context used when processing a build file."""
def __init__(self, project_root, base_path, path, dirname, cell_name, allow_empty_globs,
ignore_paths, watchman_client, watchman_watch_root, watchman_project_prefix,
sync_cookie_state, watchman_glob_stat_results,
watchman_use_glob_generator):
self.globals = {}
self._includes = set()
self._used_configs = {}
self._used_env_vars = {}
self._diagnostics = []
self.rules = {}
self.project_root = project_root
self.base_path = base_path
self.path = path
self.cell_name = cell_name
self.dirname = dirname
self.allow_empty_globs = allow_empty_globs
self.ignore_paths = ignore_paths
self.watchman_client = watchman_client
self.watchman_watch_root = watchman_watch_root
self.watchman_project_prefix = watchman_project_prefix
self.sync_cookie_state = sync_cookie_state
self.watchman_glob_stat_results = watchman_glob_stat_results
self.watchman_use_glob_generator = watchman_use_glob_generator
@property
def includes(self):
return self._includes
@property
def used_configs(self):
return self._used_configs
@property
def used_env_vars(self):
return self._used_env_vars
@property
def diagnostics(self):
return self._diagnostics
class IncludeContext(AbstractContext):
"""The build context used when processing an include."""
def __init__(self, cell_name, path):
"""
:param cell_name: a cell name of the current context. Note that this cell name can be
different from the one BUCK file is evaluated in, since it can load extension files
from other cells, which should resolve their loads relative to their own location.
"""
self.cell_name = cell_name
self.path = path
self.globals = {}
self._includes = set()
self._used_configs = {}
self._used_env_vars = {}
self._diagnostics = []
@property
def includes(self):
return self._includes
@property
def used_configs(self):
return self._used_configs
@property
def used_env_vars(self):
return self._used_env_vars
@property
def diagnostics(self):
return self._diagnostics
BuildInclude = collections.namedtuple("BuildInclude", ["cell_name", "path"])
class LazyBuildEnvPartial(object):
"""Pairs a function with a build environment in which it will be executed.
Note that while the function is specified via the constructor, the build
environment must be assigned after construction, for the build environment
currently being used.
To call the function with its build environment, use the invoke() method of
this class, which will forward the arguments from invoke() to the
underlying function.
"""
def __init__(self, func):
self.func = func
self.build_env = None
def invoke(self, *args, **kwargs):
"""Invokes the bound function injecting 'build_env' into **kwargs."""
updated_kwargs = kwargs.copy()
updated_kwargs.update({'build_env': self.build_env})
try:
return self.func(*args, **updated_kwargs)
except TypeError:
missing_args, extra_args = get_mismatched_args(self.func, args, updated_kwargs)
if missing_args or extra_args:
name = '[missing]'
if 'name' in updated_kwargs:
name = updated_kwargs['name']
elif len(args) > 0:
# Optimistically hope that name is the first arg. It generally is...
name = args[0]
raise IncorrectArgumentsException(
self.func.func_name, name, missing_args, extra_args)
raise
HostInfoOs = collections.namedtuple(
'HostInfoOs',
[
'is_linux',
|
'is_macos',
'is_windows',
'is_freebsd',
'is_unknown',
],
)
HostInfoArch = collections.namedtuple(
'HostInfoArch',
[
'is_aarch64',
'is_arm',
'is_armeb',
'is_i386',
'is_mips',
'is_mips64',
'is_mipsel',
'is_mipsel64',
'is_powerpc',
'is_ppc64',
'is_unknown',
'is_x86
|
_64',
]
)
HostInfo = collections.namedtuple('HostInfo', ['os', 'arch'])
__supported_oses = {
'darwin': 'macos',
'windows': 'windows',
'linux': 'linux',
'freebsd': 'freebsd',
}
# Pulled from com.facebook.buck.util.environment.Architecture.java as
# possible values. amd64 and arm64 are remapped, but they may not
# actually be present on most systems
__supported_archs = {
'aarch64': 'aarch64',
'arm': 'arm'
|
splotz90/urh
|
src/urh/dev/gr/SenderThread.py
|
Python
|
gpl-3.0
| 2,568
| 0.003115
|
import select
import socket
import numpy
import numpy as np
import time
import zmq
from urh.dev.gr.AbstractBaseThread import AbstractBaseThread
from urh.util.Logger import logger
class SenderThread(AbstractBaseThread):
MAX_SAMPLES_PER_TRANSMISSION = 65536
def __init__(self, freq, sample_rate, bandwidth, gain, if_gain, baseband_gain, ip='127.0.0.1', parent=None):
super().__init__(freq, sample_rate, bandwidth, gain, if_gain, baseband_gain, False, ip, parent)
self.data = numpy.empty(1, dtype=numpy.complex64)
self.context = zmq.Context()
self.socket = self.context.socket(zmq.PUSH)
self.gr_port = self.socket.bind_to_random_port("tcp://{0}".format(self.ip))
self.max_repeats = 1 # How often shall we send the data?
self.__samples_per_transmission = self.MAX_SAMPLES_PER_TRANSMISSION
@property
def repeat_endless(self):
return self.max_repeats == 0 or self.max_repeats == -1
@property
def samples_per_transmission(self):
return self.__samples_per_transmission
@samples_per_transmission.setter
def samples_per_transmission(self, val: int):
if val >= self.MAX_SAMPLES_PER_TRANSMISSION:
self.__samples_per_transmission = self.MAX_SAMPLES_PER_TRANSMISSION
elif val <= 1:
self.__samples_per_transmission = 1
else:
self.__samples_per_transmission = 2 ** (int(np.log2(val)) - 1)
def run(self):
self.initialize_process()
len_data = len(self.data)
self.current_iteration = self.current_iteration if self.current_iteration is not None else 0
time.sleep(1)
try:
wh
|
ile self.current_index < len_data and not self.isInterruptionRequested():
time.sleep(0.1 * (self.samples_per_transmission / self.MAX_SAMPLES_PER_TRANSMISSION))
self.socket.send(self.data[self.current_index:self.current_index + self.samples_per_transmission].tostring())
self.current_index += self.samples_per_transmission
|
if self.current_index >= len_data:
self.current_iteration += 1
else:
continue
if self.repeat_endless or self.current_iteration < self.max_repeats:
self.current_index = 0
self.current_index = len_data - 1
self.current_iteration = None
self.stop("FIN - All data was sent successfully")
except RuntimeError:
logger.error("Sender thread crashed.")
|
ecino/compassion-accounting
|
account_analytic_attribution/models/__init__.py
|
Python
|
agpl-3.0
| 499
| 0
|
# -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2015-2017 Compassion CH (http://www.compassion.ch)
# Releasing children from poverty in Jesus'
|
name
# @author: Emanuel Cino <ecino@compassion.ch>
#
# The licence is in the file __manifest__.py
#
############
|
##################################################################
from . import account_analytic_attribution
from . import account_analytic_distribution_line
|
fpeyre/shinken
|
shinken/worker.py
|
Python
|
agpl-3.0
| 10,451
| 0.002871
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2009-2014:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
# Gregory Starck, g.starck@gmail.com
# Hartmut Goebel, h.goebel@goebel-consult.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
from Queue import Empty
# In android, we should use threads, not process
is_android = True
try:
import android
except ImportError:
is_android = False
if not is_android:
from multiprocessing import Process, Queue
else:
from Queue import Queue
from threading import Thread as Process
import os
import time
import sys
import signal
import traceback
import cStringIO
from shinken.log import logger, BrokHandler
from shinken.misc.common import setproctitle
class Worker(object):
"""This class is used for poller and reactionner to work.
The worker is a process launch by theses process and read Message in a Queue
(self.s) (slave)
They launch the Check and then send the result in the Queue self.m (master)
they can die if they do not do anything (param timeout)
"""
id = 0 # None
_process = None
_mortal = None
_idletime = None
_timeout = None
_c = None
def __init__(self, id, s, returns_queue, processes_by_worker, mortal=True, timeout=300,
max_plugins_output_length=8192, target=None, loaded_into='unknown',
http_daemon=None):
self.id = self.__class__.id
self.__class__.id += 1
self._mortal = mortal
self._idletime = 0
self._timeout = timeout
self.s = None
self.processes_by_worker = processes_by_worker
self._c = Queue() # Private Control queue for the Worker
# By default, take our own code
if target is None:
target = self.work
self._process = Process(target=self._prework, args=(target, s, returns_queue, self._c))
self.returns_queue = returns_queue
self.max_plugins_output_length = max_plugins_output_length
self.i_am_dying = False
# Keep a trace where the worker is launch from (poller or reactionner?)
self.loaded_into = loaded_into
if os.name != 'nt':
self.http_daemon = http_daemon
else: # windows forker do not like pickle http/lock
self.http_daemon = None
def _prework(self, real_work, *args):
for handler in list(logger.handlers):
if isinstance(handler, BrokHandler):
logger.info("Cleaning BrokHandler %r from logger.handlers..", handler)
logger.removeHandler(handler)
real_work(*args)
def is_mortal(self):
return self._mortal
def start(self):
self._process.start()
# Kill the background process
# AND close correctly the queues (input and output)
# each queue got a thread, so close it too....
def terminate(self):
# We can just terminate process, not threads
if not is_android:
self._process.terminate()
# Is we are with a Manager() way
# there should be not such functions
if hasattr(self._c, 'close'):
self._c.close()
self._c.join_thread()
if hasattr(self.s, 'close'):
self.s.close()
self.s.join_thread()
def join(self, timeout=None):
self._process.join(timeout)
def is_aliv
|
e(self):
return self._process.is_alive()
def is_killable(self):
return self._mortal and self._idletime > self._timeout
def add_idletime(self, time):
self._idletime = sel
|
f._idletime + time
def reset_idle(self):
self._idletime = 0
def send_message(self, msg):
self._c.put(msg)
# A zombie is immortal, so kill not be kill anymore
def set_zombie(self):
self._mortal = False
# Get new checks if less than nb_checks_max
# If no new checks got and no check in queue,
# sleep for 1 sec
# REF: doc/shinken-action-queues.png (3)
def get_new_checks(self):
try:
while(len(self.checks) < self.processes_by_worker):
# print "I", self.id, "wait for a message"
msg = self.s.get(block=False)
if msg is not None:
self.checks.append(msg.get_data())
# print "I", self.id, "I've got a message!"
except Empty, exp:
if len(self.checks) == 0:
self._idletime = self._idletime + 1
time.sleep(1)
# Maybe the Queue() is not available, if so, just return
# get back to work :)
except IOError, exp:
return
# Launch checks that are in status
# REF: doc/shinken-action-queues.png (4)
def launch_new_checks(self):
# queue
for chk in self.checks:
if chk.status == 'queue':
self._idletime = 0
r = chk.execute()
# Maybe we got a true big problem in the
# action launching
if r == 'toomanyopenfiles':
# We should die as soon as we return all checks
logger.error("[%d] I am dying Too many open files %s ... ", self.id, chk)
self.i_am_dying = True
# Check the status of checks
# if done, return message finished :)
# REF: doc/shinken-action-queues.png (5)
def manage_finished_checks(self):
to_del = []
wait_time = 1
now = time.time()
for action in self.checks:
if action.status == 'launched' and action.last_poll < now - action.wait_time:
action.check_finished(self.max_plugins_output_length)
wait_time = min(wait_time, action.wait_time)
# If action done, we can launch a new one
if action.status in ('done', 'timeout'):
to_del.append(action)
# We answer to the master
# msg = Message(id=self.id, type='Result', data=action)
try:
self.returns_queue.put(action)
except IOError, exp:
logger.error("[%d] Exiting: %s", self.id, exp)
sys.exit(2)
# Little sleep
self.wait_time = wait_time
for chk in to_del:
self.checks.remove(chk)
# Little sleep
time.sleep(wait_time)
# Check if our system time change. If so, change our
def check_for_system_time_change(self):
now = time.time()
difference = now - self.t_each_loop
# Now set the new value for the tick loop
self.t_each_loop = now
# return the diff if it need, of just 0
if abs(difference) > 900:
return difference
else:
return 0
# Wrapper function for work in order to catch the exception
# to see the real work, look at do_work
def work(self, s, returns_queue, c):
try:
self.do_work(s, returns_queue, c)
# Catch any exception, try to print it and exit anyway
except Exception, exp:
output = cStringIO.StringIO()
traceback.print_exc(file=output)
logger.error("Worker '%d' exit with an unmanaged exception : %s",
self.id, output.getvalue())
output.close()
# Ok I die now
raise
# id = id of the worker
# s = Global Queue Master->Slave
# m = Queue Slave->Master
# return_queue = que
|
victor-lin/npact
|
virtualenv.py
|
Python
|
bsd-3-clause
| 99,413
| 0.003128
|
#!/usr/bin/env python
"""Create a "virtual" Python installation
"""
__version__ = "13.1.0"
virtualenv_version = __version__ # legacy
import base64
import sys
import os
import codecs
import optparse
import re
import shutil
import logging
import tempfile
import zlib
import errno
import glob
import distutils.sysconfig
from distutils.util import strtobool
import struct
import subprocess
import tarfile
if sys.version_info < (2, 6):
print('ERROR: %s' % sys.exc_info()[1])
print('ERROR: this script requires Python 2.6 or greater.')
sys.exit(101)
try:
basestring
except NameError:
basestring = str
try:
import ConfigParser
except ImportError:
import configparser as ConfigParser
join = os.path.join
py_version = 'python%s.%s' % (sys.version_info[0], sys.version_info[1])
is_jython = sys.platform.startswith('java')
is_pypy = hasattr(sys, 'pypy_version_info')
is_win = (sys.platform == 'win32')
is_cygwin = (sys.platform == 'cygwin')
is_darwin = (sys.platform == 'darwin')
abiflags = getattr(sys, 'abiflags', '')
user_dir = os.path.expanduser('~')
if is_win:
default_storage_dir = os.path.join(user_dir, 'virtualenv')
else:
default_storage_dir = os.path.join(user_dir, '.virtualenv')
default_config_file = os.path.join(default_storage_dir, 'virtualenv.ini')
if is_pypy:
expected_e
|
xe = 'pypy'
elif is_jython:
expected_exe = 'jython'
else:
expected_exe = 'python'
# Return a mapping of version -> Python executable
# Only provided for Windows, where the information in the registry is used
if not is_win:
|
def get_installed_pythons():
return {}
else:
try:
import winreg
except ImportError:
import _winreg as winreg
def get_installed_pythons():
try:
python_core = winreg.CreateKey(winreg.HKEY_LOCAL_MACHINE,
"Software\\Python\\PythonCore")
except WindowsError:
# No registered Python installations
return {}
i = 0
versions = []
while True:
try:
versions.append(winreg.EnumKey(python_core, i))
i = i + 1
except WindowsError:
break
exes = dict()
for ver in versions:
try:
path = winreg.QueryValue(python_core, "%s\\InstallPath" % ver)
except WindowsError:
continue
exes[ver] = join(path, "python.exe")
winreg.CloseKey(python_core)
# Add the major versions
# Sort the keys, then repeatedly update the major version entry
# Last executable (i.e., highest version) wins with this approach
for ver in sorted(exes):
exes[ver[0]] = exes[ver]
return exes
REQUIRED_MODULES = ['os', 'posix', 'posixpath', 'nt', 'ntpath', 'genericpath',
'fnmatch', 'locale', 'encodings', 'codecs',
'stat', 'UserDict', 'readline', 'copy_reg', 'types',
're', 'sre', 'sre_parse', 'sre_constants', 'sre_compile',
'zlib']
REQUIRED_FILES = ['lib-dynload', 'config']
majver, minver = sys.version_info[:2]
if majver == 2:
if minver >= 6:
REQUIRED_MODULES.extend(['warnings', 'linecache', '_abcoll', 'abc'])
if minver >= 7:
REQUIRED_MODULES.extend(['_weakrefset'])
elif majver == 3:
# Some extra modules are needed for Python 3, but different ones
# for different versions.
REQUIRED_MODULES.extend(['_abcoll', 'warnings', 'linecache', 'abc', 'io',
'_weakrefset', 'copyreg', 'tempfile', 'random',
'__future__', 'collections', 'keyword', 'tarfile',
'shutil', 'struct', 'copy', 'tokenize', 'token',
'functools', 'heapq', 'bisect', 'weakref',
'reprlib'])
if minver >= 2:
REQUIRED_FILES[-1] = 'config-%s' % majver
if minver >= 3:
import sysconfig
platdir = sysconfig.get_config_var('PLATDIR')
REQUIRED_FILES.append(platdir)
# The whole list of 3.3 modules is reproduced below - the current
# uncommented ones are required for 3.3 as of now, but more may be
# added as 3.3 development continues.
REQUIRED_MODULES.extend([
#"aifc",
#"antigravity",
#"argparse",
#"ast",
#"asynchat",
#"asyncore",
"base64",
#"bdb",
#"binhex",
#"bisect",
#"calendar",
#"cgi",
#"cgitb",
#"chunk",
#"cmd",
#"codeop",
#"code",
#"colorsys",
#"_compat_pickle",
#"compileall",
#"concurrent",
#"configparser",
#"contextlib",
#"cProfile",
#"crypt",
#"csv",
#"ctypes",
#"curses",
#"datetime",
#"dbm",
#"decimal",
#"difflib",
#"dis",
#"doctest",
#"dummy_threading",
"_dummy_thread",
#"email",
#"filecmp",
#"fileinput",
#"formatter",
#"fractions",
#"ftplib",
#"functools",
#"getopt",
#"getpass",
#"gettext",
#"glob",
#"gzip",
"hashlib",
#"heapq",
"hmac",
#"html",
#"http",
#"idlelib",
#"imaplib",
#"imghdr",
"imp",
"importlib",
#"inspect",
#"json",
#"lib2to3",
#"logging",
#"macpath",
#"macurl2path",
#"mailbox",
#"mailcap",
#"_markupbase",
#"mimetypes",
#"modulefinder",
#"multiprocessing",
#"netrc",
#"nntplib",
#"nturl2path",
#"numbers",
#"opcode",
#"optparse",
#"os2emxpath",
#"pdb",
#"pickle",
#"pickletools",
#"pipes",
#"pkgutil",
#"platform",
#"plat-linux2",
#"plistlib",
#"poplib",
#"pprint",
#"profile",
#"pstats",
#"pty",
#"pyclbr",
#"py_compile",
#"pydoc_data",
#"pydoc",
#"_pyio",
#"queue",
#"quopri",
#"reprlib",
"rlcompleter",
#"runpy",
#"sched",
#"shelve",
#"shlex",
#"smtpd",
#"smtplib",
#"sndhdr",
#"socket",
#"socketserver",
#"sqlite3",
#"ssl",
#"stringprep",
#"string",
#"_strptime",
#"subprocess",
#"sunau",
#"symbol",
#"symtable",
#"sysconfig",
#"tabnanny",
#"telnetlib",
#"test",
#"textwrap",
#"this",
#"_threading_local",
#"threading",
#"timeit",
#"tkinter",
#"tokenize",
#"token",
#"traceback",
#"trace",
#"tty",
#"turtledemo",
#"turtle",
#"unittest",
#"urllib",
#"uuid",
#"uu",
#"wave",
#"weakref",
#"webbrowser",
#"wsgiref",
#"xdrlib",
#"xml",
#"xmlrpc",
#"zipfile",
])
if minver >= 4:
REQUIRED_MODULES.extend([
'operator',
'_collections_abc',
'_bootlocale',
])
if is_pypy:
# these are needed to correctly display the exceptions that may happen
# during the bootstrap
REQUIRED_MODULES.extend(['traceback', 'linecache'])
class Logger(object):
"""
Lo
|
stvstnfrd/edx-platform
|
openedx/core/djangoapps/theming/helpers.py
|
Python
|
agpl-3.0
| 10,226
| 0.002249
|
"""
Helpers for accessing comprehensive theming related variables.
This file is imported at startup. Imports of models or things which import models will break startup on Django 1.9+. If
you need models here, please import them inside the function which uses them.
"""
import os
import re
from logging import getLogger
import crum
from django.conf import settings
from edx_toggles.toggles import SettingToggle
from openedx.core.djangoapps.site_configuration import helpers as configuration_helpers
from openedx.core.djangoapps.theming.helpers_dirs import (
Theme,
get_project_root_name_from_settings,
get_theme_base_dirs_from_settings,
get_theme_dirs,
get_themes_unchecked
)
from openedx.core.lib.cache_utils import request_cached
logger = getLogger(__name__) # pylint: disable=invalid-name
@request_cached()
def get_template_path(relative_path, **kwargs): # lint-amnesty, pylint: disable=unused-argument
"""
The calculated value is cached for the lifetime of the current request.
"""
return relative_path
def is_request_in_themed_site():
"""
This is a proxy function to hide microsite_configuration behind comprehensive theming.
"""
# We need to give priority to theming/site-configuration over microsites
return configuration_helpers.is_site_configuration_enabled()
def get_template_path_with_theme(relative_path):
"""
Returns template path in current site's theme if it finds one there otherwise returns same path.
Example:
>> get_template_path_with_theme('header.html')
'/red-theme/lms/templates/header.html'
Parameters:
relative_path (str): template's path relative to the templates directory e.g. 'footer.html'
Returns:
(str): template path in current site's theme
"""
relative_path = os.path.normpath(relative_path)
theme = get_current_theme()
if not theme:
return relative_path
# strip `/` if present at the start of relative_path
template_name = re.sub(r'^/+', '', relative_path)
template_path = theme.template_path / template_name
absolute_path = theme.path / "templates" / template_name
if absolute_path.exists():
return str(template_path)
else:
return relative_path
def get_all_theme_template_dirs():
"""
Returns template directories for all the themes.
Example:
>> get_all_theme_template_dirs()
[
'/edx/app/edxapp/edx-platform/themes/red-theme/lms/templates/',
]
Returns:
(list): list of directories containing theme templates.
"""
themes = get_themes()
template_paths = list()
for theme in themes:
template_paths.extend(theme.template_dirs)
return template_paths
def get_project_root_name():
"""
Return root name for the current project
Example:
>> get_project_root_name()
'lms'
# from studio
>> get_project_root_name()
'cms'
Returns:
(str): component name of platform e.g lms, cms
"""
return get_project_root_name_from_settings(settings.PROJECT_ROOT)
def strip_site_theme_templates_path(uri):
"""
Remove site template theme path from the uri.
Example:
>> strip_site_theme_templates_path('/red-theme/lms/templates/header.html')
'header.html'
Arguments:
uri (str): template path from which to remove site theme pat
|
h. e.g. '/red-theme/lms/templates/header.html'
Returns:
|
(str): template path with site theme path removed.
"""
theme = get_current_theme()
if not theme:
return uri
templates_path = "/".join([
theme.theme_dir_name,
get_project_root_name(),
"templates"
])
uri = re.sub(r'^/*' + templates_path + '/*', '', uri)
return uri
def get_current_request():
"""
Return current request instance.
Returns:
(HttpRequest): returns current request
"""
return crum.get_current_request()
def get_current_site():
"""
Return current site.
Returns:
(django.contrib.sites.models.Site): returns current site
"""
request = get_current_request()
if not request:
return None
return getattr(request, 'site', None)
def get_current_site_theme():
"""
Return current site theme object. Returns None if theming is disabled.
Returns:
(ecommerce.theming.models.SiteTheme): site theme object for the current site.
"""
# Return None if theming is disabled
if not is_comprehensive_theming_enabled():
return None
request = get_current_request()
if not request:
return None
return getattr(request, 'site_theme', None)
def get_current_theme():
"""
Return current theme object. Returns None if theming is disabled.
Returns:
(ecommerce.theming.models.SiteTheme): site theme object for the current site.
"""
# Return None if theming is disabled
if not is_comprehensive_theming_enabled():
return None
site_theme = get_current_site_theme()
if not site_theme:
return None
try:
return Theme(
name=site_theme.theme_dir_name,
theme_dir_name=site_theme.theme_dir_name,
themes_base_dir=get_theme_base_dir(site_theme.theme_dir_name),
project_root=get_project_root_name()
)
except ValueError as error:
# Log exception message and return None, so that open source theme is used instead
logger.exception(u'Theme not found in any of the themes dirs. [%s]', error)
return None
def current_request_has_associated_site_theme():
"""
True if current request has an associated SiteTheme, False otherwise.
Returns:
True if current request has an associated SiteTheme, False otherwise
"""
request = get_current_request()
site_theme = getattr(request, 'site_theme', None)
return bool(site_theme and site_theme.id)
def get_theme_base_dir(theme_dir_name, suppress_error=False):
"""
Returns absolute path to the directory that contains the given theme.
Args:
theme_dir_name (str): theme directory name to get base path for
suppress_error (bool): if True function will return None if theme is not found instead of raising an error
Returns:
(str): Base directory that contains the given theme
"""
for themes_dir in get_theme_base_dirs():
if theme_dir_name in get_theme_dirs(themes_dir):
return themes_dir
if suppress_error:
return None
raise ValueError(
u"Theme '{theme}' not found in any of the following themes dirs, \nTheme dirs: \n{dir}".format(
theme=theme_dir_name,
dir=get_theme_base_dirs(),
))
def theme_exists(theme_name, themes_dir=None):
"""
Returns True if a theme exists with the specified name.
"""
for theme in get_themes(themes_dir=themes_dir):
if theme.theme_dir_name == theme_name:
return True
return False
def get_themes(themes_dir=None):
"""
get a list of all themes known to the system.
Args:
themes_dir (str): (Optional) Path to themes base directory
Returns:
list of themes known to the system.
"""
if not is_comprehensive_theming_enabled():
return []
if themes_dir is None:
themes_dir = get_theme_base_dirs_unchecked()
return get_themes_unchecked(themes_dir, settings.PROJECT_ROOT)
def get_theme_base_dirs_unchecked():
"""
Return base directories that contains all the themes.
Example:
>> get_theme_base_dirs_unchecked()
['/edx/app/ecommerce/ecommerce/themes']
Returns:
(List of Paths): Base theme directory paths
"""
theme_dirs = getattr(settings, "COMPREHENSIVE_THEME_DIRS", None)
return get_theme_base_dirs_from_settings(theme_dirs)
def get_theme_base_dirs():
"""
Return base directories that contains all the themes.
Ensures comprehensive theming is enabled.
Example:
>> get_theme_base_dirs()
['/edx/app/ecommerce/ecommerce/themes']
Ret
|
OpenDataAlex/dataNexus
|
setup.py
|
Python
|
gpl-2.0
| 1,846
| 0.001083
|
__author__ = 'ameadows'
import sys
from setuptools import setup, find_packages
from setuptools.command.test import test as TestCommand
import datanexus
class Tox(TestCommand):
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
#import here, cause outside the eggs aren'
|
t loaded
import tox
errcode = tox.cmdline(self.test_args)
sys.exit(errcode)
setup(
name='dataNexus',
version=datanexus.__versi
|
on__,
url='https://github.com/OpenDataAlex/dataNexus',
license='GNU GENERAL PUBLIC LICENSE Version 3',
description='Data source version control and lineage tool.',
author='Alex Meadows',
author_email='alexmeadows@bluefiredatasolutions.com',
packages=find_packages(),
install_requires=[
'appdirs==1.4.0',
'configparser==3.5.0b1',
'docutils==0.12',
'ecdsa==0.11',
'GitPython==1.0.1',
'Jinja2==2.7.3',
'MarkupSafe==0.23',
'Pygments==1.6',
'PyYAML==3.11',
'Sphinx==1.2.3',
'SQLAlchemy==0.9.7',
'py==1.4.24',
'tox==1.7.2'
],
tests_require=[
'PyMySQL==0.6.2',
],
classifiers=[
'Programming Language :: Python',
'Development Status :: 4 - Beta',
'Natural Language :: English',
'Environment :: Console',
'Intended Audience :: Developers'
],
cmdclass={'tox': Tox},
keywords='database, version control, data lineage',
test_suite='datanexus.test',
entry_points={
'console_scripts': [
'dataNexus = datanexus.dataNexus:main',
'datanexus = datanexus.dataNexus:main',
]
},
package_data={
'dataNexus': ['.datanexus-settings.yml'],
}
)
|
Katharine/ponytone
|
karaoke/routing.py
|
Python
|
mit
| 328
| 0.003049
|
from channels.routing import route
from .consumer import party_connected, party_disconnected, party_message
karaoke_routing = [
route("websocket.connect", party_connected, path=r"^/party/(?P<party_id>[
|
a-zA-Z0-9_-]
|
+)"),
route("websocket.receive", party_message),
route("websocket.disconnect", party_disconnected)
]
|
Kaian/guake
|
guake/tests/test_about.py
|
Python
|
gpl-2.0
| 650
| 0
|
# -*- coding: utf-8 -*-
# pylint: disable=redefined-outer-name
import os
import py
|
test
from guake import guake_version
from guake.about import AboutDialog
@pytest.fixture
def dialog(mocker):
mocker.patch("guake.simplegladeapp.Gtk.Widget.show_all")
try:
old_os_environ = os.environ
os.environ["LANGUAGE"] = "en_US.UTF-8"
ad = AboutDialog()
yield ad
finally:
os.environ = old_os_environ
def test_version_test(dialog):
assert dialog.get_widget("aboutdialog").get_version() == guake_version()
def test_title
|
(dialog):
assert dialog.get_widget("aboutdialog").get_title() == "About Guake"
|
Synss/pyhard2
|
pyhard2/driver/_bronkhorst.py
|
Python
|
mit
| 7,674
| 0.026323
|
"""FLOW-BUS protocol for Bronkhorst instruments.
The protocol is described in the instruction manual number 9.17.027.
The implementation uses the `Construct library <http://construct.readthedocs.org/en/latest/>`__.
"""
from binascii import unhexlify
import unittest
from construct import *
header = Struct("header",
Byte("length"),
Byte("node")
)
command = Enum(Byte("command"),
status = 0,
write = 1, # <--
write_no_status = 2,
write_with_source = 3,
read = 4, # <--
send_repeat = 5,
stop_process = 6,
start_process = 7,
claim_process = 8,
unclaim_process = 9
)
def Data(label):
return Struct(label,
BitStruct("process",
Flag("chained"),
BitField("number", 7)),
BitStruct("parameter",
Const(Flag("chained"), False),
Enum(BitField("type", 2),
c = 0x00 >> 5, # 0
i = 0x20 >> 5, # 1
f = 0x40 >> 5, # 2
l = 0x40 >> 5, # 2
s = 0x60 >> 5), # 3
BitField("number", 5)),
)
read_command = Struct("request",
Embed(header),
OneOf(command, ['read']),
Data("index"),
Data("data"),
If(lambda ctx: ctx.data.parameter.type == "s",
Const(UBInt8("string_length"), 0)),
Terminator
)
def write_command(type_, secured):
return Struct("send",
Embed(header),
OneOf(command, ['write', 'write_no_status']),
Const(String(None, 3), "\x80\x0a\x40") if secured else Pass,
Data("index"),
dict(
c=UBInt8("value"),
i=UBInt16("value"),
f=BFloat32("value"),
l=UBInt32("value"),
s=Embed(Struct(None, UBInt8("string_length"),
|
IfThenElse("value",
lambda ctx: ctx["string_length"] is 0,
CString(None),
# read string_length bytes (PascalString)
MetaField(None, lambda ctx:
|
ctx["string_length"]))))
)[type_],
Const(String(None, 3), "\x00\x0a\x52") if secured else Pass,
Terminator
)
error_message = Struct("FLOW-BUS error",
Embed(header),
Enum(Byte("error"),
colon_missing = 1,
first_byte = 2,
message_length = 3,
receiver = 4,
communication_error = 5,
sender_timeout = 8,
answer_timeout = 9,
)
)
status_message = Struct("FLOW-BUS status",
Embed(header),
command,
Enum(Byte("status"),
no_error = 0x00,
process_claimed = 0x01,
command_error = 0x02,
process_error = 0x03,
parameter_error = 0x04,
param_type_error = 0x05,
param_value_error = 0x06,
network_not_active = 0x07,
timeout_start_char = 0x08,
timeout_serial_line = 0x09,
hardware_mem_error = 0x0a,
node_number_error = 0x0b,
general_com_error = 0x0c,
read_only_param = 0x0d,
PC_com_error = 0x0e,
no_RS232_connection = 0x0f,
PC_out_of_mem = 0x10,
write_only_param = 0x11,
syst_config_unknown = 0x12,
no_free_node_address = 0x13,
wrong_iface_type = 0x14,
serial_port_error = 0x15,
serial_open_error = 0x16,
com_error = 0x17,
iface_busmaster_error = 0x18,
timeout_ans = 0x19,
no_start_char = 0x1a,
first_digit_error = 0x1b,
host_buffer_overflow = 0x1c,
buffer_overflow = 0x1d,
no_answer_found = 0x1e,
error_closing_connection = 0x1f,
synch_error = 0x20,
send_error = 0x21,
com_error_2 = 0x22,
module_buffer_overflow = 0x23
),
Byte("byte_index"),
Terminator
)
class _Data(object):
class Byte(object):
def __init__(self, number, type="c", chained=False):
self.number = number
self.type = type
self.chained = chained
def __init__(self, process, param, param_type, chained=False):
self.process = _Data.Byte(process, chained=chained)
self.parameter = _Data.Byte(param, param_type)
class Reader(object):
index = 1
def __init__(self, node, process, param, param_type):
self.length = 0
self.node = node
self.command = "read"
self.index = _Data(process, Reader.index, param_type)
self.data = _Data(process, param, param_type)
self.string_length = 0
self.length = len(self.build()) - 1
@classmethod
def fromContext(cls, context):
process = context.subsystem.process
return cls(context.node, process, context.reader,
context._command.type)
def build(self):
"""object to message"""
return read_command.build(self)
@staticmethod
def parse(msg):
"""message to object"""
return read_command.parse(msg)
class Writer(object):
def __init__(self, node, process, param, param_type, secured, value):
self.length = 0
self.node = node
self.command = "write"
self.index = _Data(process, param, param_type, secured)
self.param_type = param_type
self.secured = secured
self.value = value
self.string_length = 0
self.length = len(self.build()) - 1
@classmethod
def fromContext(cls, context):
process = context.subsystem.process
return cls(context.node, process, context.writer,
context._command.type, context._command.access ==
"Access.SEC", context.value)
def build(self):
"""object to message"""
return write_command(self.param_type, self.secured).build(self)
@staticmethod
def parse(msg, type_, secured=False):
"""message to object"""
return write_command(type_, secured).parse(msg)
class Status(object):
@staticmethod
def parse(msg):
return status_message.parse(msg).status
class TestFlowBus(unittest.TestCase):
def setUp(self):
self.msg = dict(status=unhexlify("0403000005"),
read=unhexlify("06030401210121"),
write=unhexlify("06030101213E80"),
secwrite=unhexlify("0C0301800A40E121000A000A52"))
def test_data_builder(self):
self.assertEqual(Data("").build(_Data(10, 2, "i")), unhexlify("0a22"))
def test_reader_builder(self):
self.assertEqual(read_command.build(Reader(3, 1, 1, "c")),
unhexlify("06030401010101"))
def test_writer_builder(self):
self.assertEqual(
write_command("c", False).build(Writer(3, 1, 2, "c", False, 10)),
unhexlify("05030101020a"))
def test_status(self):
msg = self.msg["status"]
self.assertEqual(status_message.parse(msg).command, "status")
def test_status_build(self):
msg = self.msg["status"]
self.assertEqual(status_message.build(status_message.parse(msg)), msg)
def test_read(self):
msg = self.msg["read"]
self.assertEqual(read_command.parse(msg).command, "read")
def test_read_build(self):
msg = self.msg["read"]
self.assertEqual(read_command.build(read_command.parse(msg)), msg)
def test_write(self):
msg = self.msg["write"]
self.assertEqual(write_command("i", False).parse(msg).command, "write")
def test_write_build(self):
msg = self.msg["write"]
self.assertEqual(
write_command("i", False).build(write_command("i", False).parse(msg)),
msg)
def test_sec_write(self):
msg = self.msg["secwrite"]
self.assertEqual(
write_command("i", True).build(write_command("i", True).parse(msg)),
msg)
|
pengzhangdev/PokemonGo-Bot
|
pokemongo_bot/cell_workers/incubate_eggs.py
|
Python
|
mit
| 11,384
| 0.002987
|
from datetime import datetime, timedelta
from pokemongo_bot import inventory
from pokemongo_bot.human_behaviour import sleep
from pokemongo_bot.base_task import BaseTask
from pokemongo_bot.worker_result import WorkerResult
class IncubateEggs(BaseTask):
SUPPORTED_TASK_API_VERSION = 1
last_km_walked = 0
def __init__(self, bot, config):
super(IncubateEggs, self).__init__(bot, config)
def initialize(self):
self.next_update = None
self.ready_breakable_incubators = []
self.ready_infinite_incubators = []
self.used_incubators = []
self.eggs = []
self.km_walked = 0
self.hatching_animation_delay = 4.20
self._process_config()
def _process_config(self):
self.infinite_longer_eggs_first = self.config.get("infinite_longer_eggs_first", False)
self.breakable_longer_eggs_first = self.config.get("breakable_longer_eggs_first", True)
self.min_interval = self.config.get('min_interval', 120)
self.breakable_incubator = self.config.get("breakable", [2,5,10])
self.infinite_incubator = self.config.get("infinite", [2,5,10])
def work(self):
try:
self._check_inventory()
except:
return WorkerResult.ERROR
if self.used_incubators and IncubateEggs.last_km_walked != self.km_walked:
km_left = self.used_incubators[0]['km']-self.km_walked
if km_left <= 0:
if not self._hatch_eggs():
return WorkerResult.ERROR
else:
self.bot.metrics.next_hatching_km(km_left)
if self._should_print():
self._print_eggs()
self._compute_next_update()
IncubateEggs.last_km_walked = self.km_walked
# if there is a ready infinite incubator
if self.ready_infinite_incubators:
# get available eggs
eggs = self._filter_sort_eggs(self.infinite_incubator,
self.infinite_longer_eggs_first)
self._apply_incubators(eggs, self.ready_infinite_incubators)
if self.ready_breakable_incubators:
# get available eggs
eggs = self._filter_sort_eggs(self.breakable_incubator,
self.breakable_longer_
|
eggs_first)
self._apply_incubators(eggs, self.ready_breakable_incubators)
return WorkerResult.SUCCESS
def _filter_sort_eggs(self, allowed, sorting):
eligible_eggs = filter(lambda egg: int(egg["km"]) in allowed, self.eggs)
|
eligible_eggs.sort(key=lambda egg: egg["km"], reverse=sorting)
return eligible_eggs
def _apply_incubators(self, available_eggs, available_incubators):
for incubator in available_incubators:
for egg in available_eggs:
if egg["used"] or egg["km"] == -1:
continue
self.emit_event(
'incubate_try',
level='debug',
formatted="Attempting to apply incubator {incubator_id} to egg {egg_id}",
data={
'incubator_id': incubator['id'],
'egg_id': egg['id']
}
)
ret = self.bot.api.use_item_egg_incubator(
item_id=incubator["id"],
pokemon_id=egg["id"]
)
if ret:
code = ret.get("responses", {}).get("USE_ITEM_EGG_INCUBATOR", {}).get("result", 0)
if code == 1:
self.emit_event(
'incubate',
formatted='Incubating a {distance_in_km} egg.',
data={
'distance_in_km': str(egg['km'])
}
)
egg["used"] = True
incubator["used"] = True
break
elif code == 5 or code == 7:
self.emit_event(
'incubator_already_used',
level='debug',
formatted='Incubator in use.',
)
incubator["used"] = True
break
elif code == 6:
self.emit_event(
'egg_already_incubating',
level='debug',
formatted='Egg already incubating',
)
egg["used"] = True
def _check_inventory(self, lookup_ids=[]):
if lookup_ids:
inventory.refresh_inventory()
matched_pokemon = []
temp_eggs = []
temp_used_incubators = []
temp_ready_breakable_incubators = []
temp_ready_infinite_incubators = []
inv = inventory.jsonify_inventory()
for inv_data in inv:
inv_data = inv_data.get("inventory_item_data", {})
if "egg_incubators" in inv_data:
incubators = inv_data.get("egg_incubators", {}).get("egg_incubator",[])
if isinstance(incubators, basestring): # checking for old response
incubators = [incubators]
for incubator in incubators:
if 'pokemon_id' in incubator:
start_km = incubator.get('start_km_walked', 0)
km_walked = incubator.get('target_km_walked', 0)
temp_used_incubators.append({
"id": incubator.get('id', -1),
"km": km_walked,
"km_needed": (km_walked - start_km)
})
else:
if incubator.get('uses_remaining') is not None:
temp_ready_breakable_incubators.append({
"id": incubator.get('id', -1)
})
else:
temp_ready_infinite_incubators.append({
"id": incubator.get('id', -1)
})
continue
if "pokemon_data" in inv_data:
pokemon = inv_data.get("pokemon_data", {})
if pokemon.get("is_egg", False) and "egg_incubator_id" not in pokemon:
temp_eggs.append({
"id": pokemon.get("id", -1),
"km": pokemon.get("egg_km_walked_target", -1),
"used": False
})
elif 'is_egg' not in pokemon and pokemon['id'] in lookup_ids:
matched_pokemon.append(pokemon)
continue
if "player_stats" in inv_data:
self.km_walked = inv_data.get("player_stats", {}).get("km_walked", 0)
self.used_incubators = temp_used_incubators
if self.used_incubators:
self.used_incubators.sort(key=lambda x: x.get("km"))
self.ready_breakable_incubators = temp_ready_breakable_incubators
self.ready_infinite_incubators = temp_ready_infinite_incubators
self.eggs = temp_eggs
return matched_pokemon
def _hatch_eggs(self):
response_dict = self.bot.api.get_hatched_eggs()
try:
result = reduce(dict.__getitem__, ["responses", "GET_HATCHED_EGGS"], response_dict)
except KeyError:
return WorkerResult.ERROR
pokemon_ids = []
if 'pokemon_id' in result:
pokemon_ids = [id for id in result['pokemon_id']]
stardust = result.get('stardust_awarded', [])
candy = result.get('candy_awarded', [])
xp = result.get('experience_awarded', [])
sleep(self.hatching_animation_delay)
try:
pokemon_data = self._check_inventory(pokemon_ids)
pokemon_list = [inventory.Pokemon(p) for p in pokemon_data]
for pokemon in pokemon_list:
inventory.pokemons(
|
StongeEtienne/dipy
|
dipy/reconst/tests/__init__.py
|
Python
|
bsd-3-clause
| 32
| 0
|
# te
|
sts for re
|
construction code
|
t3dev/odoo
|
addons/test_mass_mailing/tests/test_mail_channel.py
|
Python
|
gpl-3.0
| 2,012
| 0.003479
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from email.utils import formataddr
from odoo.addons.test_mail.tests import common
class TestChannelPartnersNotification(common.MockEmails):
def _join_channel(self, channel, partners):
for partner in partners:
channel.write({'channel_last_seen_partner_ids': [(0, 0, {'partner_id': partner.id})]})
channel.invalidate_cache()
def test_channel_blacklisted_recipients(self):
""" Posting a message on a channel should send one email to all recipients, except the blacklisted ones """
self.test_channel = self.
|
env['mail.c
|
hannel'].create({
'name': 'Test',
'description': 'Description',
'alias_name': 'test',
'public': 'public',
})
self.test_partner = self.env['res.partner'].create({
'name': 'Test Partner',
'email': 'test@example.com',
})
self.blacklisted_partner = self.env['res.partner'].create({
'name': 'Blacklisted Partner',
'email': 'test@black.list',
})
# Set Blacklist
self.env['mail.blacklist'].create({
'email': 'test@black.list',
})
self.env['ir.config_parameter'].set_param('mail.catchall.domain', 'schlouby.fr')
self.test_channel.write({'email_send': True})
self._join_channel(self.test_channel, self.test_partner)
self.test_channel.message_post(body="Test", message_type='comment', subtype='mt_comment')
self.assertEqual(len(self._mails), 1, 'Number of mail incorrect. Should be equal to 1.')
for email in self._mails:
self.assertEqual(
set(email['email_to']),
set([formataddr((self.test_partner.name, self.test_partner.email))]),
'email_to incorrect. Should be equal to "%s"' % (
formataddr((self.test_partner.name, self.test_partner.email))))
|
aschnell/libstorage-ng
|
integration-tests/partitions/shrink.py
|
Python
|
gpl-2.0
| 645
| 0.004651
|
#!/usr/bin/python3
# requirements: partition /dev/sdc1 with size at leas
|
t 1 GiB
from sys import exit
from storage import *
from storageitu import *
set_logger(get_logfile_logger())
environment = Environment(False)
storage = Storage(environment)
storage.probe()
staging = storage.get_staging()
print(staging)
partition = Partition.find_by_name(staging, "/dev/sdc1")
region = partition.get_region()
region.set_length(int(region.get_length() - 512 * MiB / region.get_block_size()))
region = partition.get_partition_
|
table().align(region, AlignPolicy_KEEP_START_ALIGN_END)
partition.set_region(region)
print(staging)
commit(storage)
|
jnaulty/berkeley
|
python_logger/log_pres_2.py
|
Python
|
bsd-3-clause
| 529
| 0.028355
|
#!/usr/bin/python
# Creator: Danie
|
l Wooten
# License: GPL
# import the python logging utility as log
import logging as log
# Set the root logger level ( what messages it will print )
log.basicConfig( level = 10 )
# Some sample messages for the root logger
log.debug( "This is the debug level reporting in" )
log.info( "This is the info level reporting in " )
log.warning( "This is the warning level reporting in" )
log.error( "This is the error level reporting in" )
log.critical( "This
|
is the critical level reporting in" )
|
cnobile2012/inventory
|
inventory/projects/api/views.py
|
Python
|
mit
| 5,580
| 0.001075
|
# -*- coding: utf-8 -*-
#
# inventory/projects/api/views.py
#
"""
Project API Views
"""
__docformat__ = "restructuredtext en"
import logging
from decimal import Decimal
from django.contrib.auth import get_user_model
from rest_framework.generics import (
ListCreateAPIView, RetrieveUpdateDestroyAPIView, RetrieveUpdateAPIView,
RetrieveAPIView)
from rest_framework.exceptions import PermissionDenied, NotAuthenticated
from rest_framework.permissions import IsAuthenticated
from rest_framework.settings import api_settings
from rest_condition import C, And, Or, Not
from inventory.common.api.permissions import (
IsAdminSuperUser, IsAdministrator, IsDefaultUser, IsProjectOwner,
IsProjectManager, IsProjectDefaultUser, IsAnyProjectUser, IsReadOnly,
IsUserActive, CanDelete)
from inventory.common.api.pagination import SmallResultsSetPagination
from inventory.common.api.parsers import parser_factory
from inventory.common.api.renderers import renderer_factory
from inventory.common.api.view_mixins import (
TrapDjangoValidationErrorCreateMixin, TrapDjangoValidationErrorUpdateMixin)
from ..models import InventoryType, Project, Membership
from .serializers import (
InventoryTypeSerializerVer01, MembershipSerializerVer01,
ProjectSerializerVer01)
log = logging.getLogger('api.projects.views')
UserModel = get_user_model()
__all__ = ('InventoryTypeList', 'inventory_type_list',
'InventoryTypeDetail', 'inventory_type_detail',
'ProjectList', 'project_list',
'ProjectDetail', 'project_detail',)
#
# InventoryType
#
class InventoryTypeMixin:
parser_classes = (parser_factory('inventory-types')
+ api_settings.DEFAULT_PARSER_CLASSES)
renderer_classes = (renderer_factory('inventory-types')
+ api_settin
|
gs.DEFAULT_RENDERER_CLA
|
SSES)
def get_serializer_class(self):
serializer = None
if self.request.version == Decimal("1"):
serializer = InventoryTypeSerializerVer01
# elif self.request.version == Decimal("2"):
# serializer = InventoryTypeSerializerVer02
return serializer
class InventoryTypeList(InventoryTypeMixin,
TrapDjangoValidationErrorCreateMixin,
ListCreateAPIView):
"""
InventoryType list endpoint.
"""
queryset = InventoryType.objects.all()
permission_classes = (
And(IsUserActive, IsAuthenticated,
Or(IsAdminSuperUser,
IsAdministrator,
And(IsReadOnly, IsAnyProjectUser)
)
),
)
pagination_class = SmallResultsSetPagination
lookup_field = 'public_id'
inventory_type_list = InventoryTypeList.as_view()
class InventoryTypeDetail(InventoryTypeMixin,
TrapDjangoValidationErrorUpdateMixin,
RetrieveUpdateAPIView):
"""
InventoryType detail endpoint.
"""
queryset = InventoryType.objects.all()
permission_classes = (
And(IsUserActive, IsAuthenticated,
Or(IsAdminSuperUser,
IsAdministrator,
And(IsReadOnly, IsAnyProjectUser)
)
),
)
lookup_field = 'public_id'
inventory_type_detail = InventoryTypeDetail.as_view()
#
# Project
#
class ProjectMixin:
parser_classes = (parser_factory('projects')
+ api_settings.DEFAULT_PARSER_CLASSES)
renderer_classes = (renderer_factory('projects')
+ api_settings.DEFAULT_RENDERER_CLASSES)
def get_serializer_class(self):
serializer = None
if self.request.version == Decimal("1"):
serializer = ProjectSerializerVer01
# elif self.request.version == Decimal("2"):
# serializer = ProjectSerializerVer02
return serializer
def get_queryset(self):
if (self.request.user.is_superuser or
self.request.user._role == UserModel.ADMINISTRATOR):
result = Project.objects.all()
else:
result = Project.objects.filter(
memberships__in=self.request.user.memberships.all())
return result
class ProjectList(TrapDjangoValidationErrorCreateMixin,
ProjectMixin,
ListCreateAPIView):
"""
Project list endpoint.
"""
serializer_class = ProjectSerializerVer01
permission_classes = (
And(IsUserActive,
IsAuthenticated,
Or(IsAdminSuperUser,
IsAdministrator,
And(IsDefaultUser, Not(IsReadOnly)),
IsProjectOwner,
IsProjectManager,
And(IsProjectDefaultUser, IsReadOnly)
),
),
)
pagination_class = SmallResultsSetPagination
lookup_field = 'public_id'
project_list = ProjectList.as_view()
class ProjectDetail(TrapDjangoValidationErrorUpdateMixin,
ProjectMixin,
RetrieveUpdateDestroyAPIView):
"""
Project detail endpoint.
"""
serializer_class = ProjectSerializerVer01
permission_classes = (
And(IsUserActive,
IsAuthenticated,
Or(IsAdminSuperUser,
IsAdministrator,
IsProjectOwner,
And(IsProjectManager,
Not(CanDelete)),
And(IsProjectDefaultUser,
IsReadOnly)
),
),
)
lookup_field = 'public_id'
project_detail = ProjectDetail.as_view()
|
kafana/ubik
|
lib/ubik/fab/filemap.py
|
Python
|
gpl-3.0
| 3,975
| 0.005283
|
# Copyright 2012 Lee Verberne <lee@blarg.org>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied
|
warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import json
import os, os.path
import shutil as sh
import sys
from fabric.api import abort, local, prompt, warn
# Fabric 1.0 changed changed the scope of cd() to only affect remote calls.
# This bit of
|
kludgery maintains compatibility of this file with fabric 0.9,
# but it is only possible because no remote calls are made in this file
try:
from fabric.api import lcd as cd
except ImportError:
from fabric.api import cd
from ubik import builder, packager
# filemap copies files directly from source to root, there is no build step
defenv = builder.BuildEnv('_root','_root','.')
file_map, file_map_table = None, None
def _install_file_map(fmap, installdir):
for src, dst in fmap:
_install(src, os.path.join(installdir,dst))
def _install(src, dst):
if src and os.path.isdir(src):
sh.copytree(src, dst)
else:
if not os.path.exists(os.path.dirname(dst)):
os.makedirs(os.path.dirname(dst))
if src:
sh.copy(src, dst)
def build(pkgtype='deb', env=defenv):
'Builds this package into a directory tree'
if file_map:
_install_file_map(file_map, env.rootdir)
elif file_map_table:
_install_file_map(file_map_table[pkgtype], env.rootdir)
else:
abort("You must register a filemap with this module using register().")
def clean(env=defenv):
'Remove build directory and packages'
with cd(env.srcdir):
local('rm -rf _* *.deb *.rpm', capture=False)
local('find . -name \*.pyc -print -exec rm \{\} \;', capture=False)
def deb(version=None):
'Build a debian package'
package(version, 'deb')
def debiandir(version='0.0', env=defenv):
"Generate DEBIAN dir in rootdir, but don't build package"
if not env.exists('builddir'):
build('deb', env)
packager.DebPackage('package.ini', env).debiandir(version)
def filelist(pkgtype='deb', env=defenv):
'''Outputs default filelist as json (see details)
Generates and prints to stdout a filelist json that can be modified and
used with package.ini's "filelist" option to override the default.
Useful for setting file modes in RPMs'''
if not env.exists('builddir'):
build(pkgtype, env)
packager.Package('package.ini', env).filelist()
def package(version=None, pkgtype='deb', env=defenv):
'Creates deployable packages'
if not version:
version = prompt("What version did you want packaged there, hotshot?")
if not env.exists('builddir'):
warn('Implicitly invoking build')
build(pkgtype, env)
pkg = packager.Package('package.ini', env, pkgtype)
pkg.build(version)
def register(filemap_or_table):
'Register a filemap for use with this module'
global file_map, file_map_table
if isinstance(filemap_or_table, list):
file_map = filemap_or_table
elif isinstance(filemap_or_table, dict):
file_map_table = filemap_or_table
else:
abort("I don't even know what you're talking about.")
def rpm(version=None):
'Build a Red Hat package'
package(version, 'rpm')
def rpmspec(version='0.0', env=defenv):
'Output the generated RPM spec file'
if not env.exists('builddir'):
build('rpm', env)
packager.RpmPackage('package.ini', env).rpmspec(sys.stdout, version)
|
evanthebouncy/nnhmm
|
mnist_haar/data.py
|
Python
|
mit
| 2,672
| 0.026946
|
import numpy as np
import pywt
from scipy.misc import imresize
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
X_L = 10
L = 14
N_BATCH = 50
OBS_SIZE = 30
# ---------------------------- helpers
def vectorize(coords):
retX, retY = np.zeros([L]), np.zeros([L])
retX[coords[0]] = 1.0
retY[coords[1]] = 1.0
return retX, retY
# show dimension of a data object (list of list or a tensor)
def show_dim(lst1):
if hasattr(lst1, '__len__') and len(lst1) > 0:
return [len(lst1), show_dim(lst1[0])]
else:
try:
return lst1.get_shape()
except:
try:
return lst1.shape
except:
return type(lst1)
# -------------------------------------- making the datas
# assume X is already a 2D matrix
def mk_query(X):
avg = np.median(X)
X = X + avg
def query(O):
Ox, Oy = O
if X[Ox][Oy] > 0.0:
return [1.0, 0.0]
else:
return [0.0, 1.0]
return query
def sample_coord():
Ox, Oy = np.random.randint(0,L), np.random.randint(0,L)
if 0 <= Ox < L:
if 0 <= Oy < L:
return Ox, Oy
return sample_coord()
def gen_O(X):
query = mk_query(X)
Ox, Oy = sample_coord()
O = (Ox, Oy)
return O, query(O)
def get_img_class():
img, _x = mnist.train.next_batch(1)
img = np.reshape(img[0], [28, 28])
img = imresize(img, (L,L)) / 255.0
A,(B,C,D) = pywt.dwt2(img, 'haar')
img = np.reshape(np.array([A,B,C,D]), [L, L])
return img, _x
def gen_data():
x = []
obs_x = [[] for i in range(OBS_SIZE)]
obs_y = [[] for i in range(OBS_SIZE)]
obs_tfs = [[] for i in range(OBS_SIZE)]
new_ob_x = []
new_ob_y = []
new_ob_tf = []
imgs = []
for bb in range(N_BATCH):
# generate a hidd
|
en variable X
# get a single thing out
img, _x = get_img_class()
imgs.append(img)
# add to x
x.append(_x[0])
# generate new observation
_new_ob_coord, _new_ob_lab = gen_O(img)
|
_new_ob_x, _new_ob_y = vectorize(_new_ob_coord)
new_ob_x.append(_new_ob_x)
new_ob_y.append(_new_ob_y)
new_ob_tf.append(_new_ob_lab)
# generate observations for this hidden variable x
for ob_idx in range(OBS_SIZE):
_ob_coord, _ob_lab = gen_O(img)
_ob_x, _ob_y = vectorize(_ob_coord)
obs_x[ob_idx].append(_ob_x)
obs_y[ob_idx].append(_ob_y)
obs_tfs[ob_idx].append(_ob_lab)
return np.array(x, np.float32),\
np.array(obs_x, np.float32),\
np.array(obs_y, np.float32),\
np.array(obs_tfs, np.float32),\
np.array(new_ob_x, np.float32),\
np.array(new_ob_y, np.float32),\
np.array(new_ob_tf, np.float32), imgs
|
wschaeferB/autopsy
|
InternalPythonModules/android/googlemaplocation.py
|
Python
|
apache-2.0
| 6,752
| 0.004443
|
"""
Autopsy Forensic Browser
Copyright 2016-2018 Basis Technology Corp.
Contact: carrier <at> sleuthkit <dot> org
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from java.io import File
from java.lang import Class
from java.lang import ClassNotFoundException
from java.lang import Double
from java.lang import Long
from java.sql import Connection
from java.sql import DriverManager
from java.sql import ResultSet
from java.sql import SQLException
from java.sql import Statement
from java.util.logging import Level
from java.util import ArrayList
from org.sleuthkit.autopsy.casemodule import Case
from org.sleuthkit.autopsy.casemodule.services import FileManager
from org.sleuthkit.autopsy.coreutils import Logger
from org.sleuthkit.autopsy.coreutils import MessageNotifyUtil
from org.sleuthkit.autopsy.datamodel import ContentUtils
from org.sleuthkit.autopsy.ingest import IngestJobContext
from org.sleuthkit.datamodel import AbstractFile
from org.sleuthkit.datamodel import Blackboard
from org.sleuthkit.datamodel import Bla
|
ckboardArtifact
from org.sleuthkit.datamodel import BlackboardAttribute
from org.sleuthkit.dat
|
amodel import Content
from org.sleuthkit.datamodel import TskCoreException
from org.sleuthkit.datamodel.Blackboard import BlackboardException
from org.sleuthkit.datamodel.blackboardutils import GeoArtifactsHelper
from org.sleuthkit.datamodel.blackboardutils.attributes import GeoWaypoints
from org.sleuthkit.datamodel.blackboardutils.attributes.GeoWaypoints import Waypoint
import traceback
import general
"""
Finds and parses the Google Maps database.
"""
class GoogleMapLocationAnalyzer(general.AndroidComponentAnalyzer):
def __init__(self):
self._logger = Logger.getLogger(self.__class__.__name__)
self.current_case = None
self.PROGRAM_NAME = "Google Maps History"
self.CAT_DESTINATION = "Destination"
def analyze(self, dataSource, fileManager, context):
try:
self.current_case = Case.getCurrentCaseThrows()
except NoCurrentCaseException as ex:
self._logger.log(Level.WARNING, "No case currently open.", ex)
self._logger.log(Level.WARNING, traceback.format_exc())
return
try:
absFiles = fileManager.findFiles(dataSource, "da_destination_history")
if absFiles.isEmpty():
return
for abstractFile in absFiles:
try:
jFile = File(self.current_case.getTempDirectory(), str(abstractFile.getId()) + abstractFile.getName())
ContentUtils.writeToFile(abstractFile, jFile, context.dataSourceIngestIsCancelled)
self.__findGeoLocationsInDB(jFile.toString(), abstractFile)
except Exception as ex:
self._logger.log(Level.SEVERE, "Error parsing Google map locations", ex)
self._logger.log(Level.SEVERE, traceback.format_exc())
except TskCoreException as ex:
# Error finding Google map locations.
pass
def __findGeoLocationsInDB(self, databasePath, abstractFile):
if not databasePath:
return
try:
artifactHelper = GeoArtifactsHelper(self.current_case.getSleuthkitCase(),
general.MODULE_NAME, self.PROGRAM_NAME, abstractFile)
Class.forName("org.sqlite.JDBC") # load JDBC driver
connection = DriverManager.getConnection("jdbc:sqlite:" + databasePath)
statement = connection.createStatement()
except (ClassNotFoundException) as ex:
self._logger.log(Level.SEVERE, "Error loading JDBC driver", ex)
self._logger.log(Level.SEVERE, traceback.format_exc())
return
except (SQLException) as ex:
# Error opening database.
return
resultSet = None
try:
resultSet = statement.executeQuery(
"SELECT time, dest_lat, dest_lng, dest_title, dest_address, source_lat, source_lng FROM destination_history;")
while resultSet.next():
time = Long.valueOf(resultSet.getString("time")) / 1000
dest_title = resultSet.getString("dest_title")
dest_address = resultSet.getString("dest_address")
dest_lat = GoogleMapLocationAnalyzer.convertGeo(resultSet.getString("dest_lat"))
dest_lng = GoogleMapLocationAnalyzer.convertGeo(resultSet.getString("dest_lng"))
source_lat = GoogleMapLocationAnalyzer.convertGeo(resultSet.getString("source_lat"))
source_lng = GoogleMapLocationAnalyzer.convertGeo(resultSet.getString("source_lng"))
waypointlist = GeoWaypoints()
waypointlist.addPoint(Waypoint(source_lat, source_lng, None, None))
waypointlist.addPoint(Waypoint(dest_lat, dest_lng, None, dest_address))
artifactHelper.addRoute(dest_title, time, waypointlist, None)
except SQLException as ex:
# Unable to execute Google map locations SQL query against database.
pass
except TskCoreException as ex:
self._logger.log(Level.SEVERE, "Failed to add route artifacts.", ex)
self._logger.log(Level.SEVERE, traceback.format_exc())
except BlackboardException as ex:
self._logger.log(Level.WARNING, "Failed to post artifacts.", ex)
self._logger.log(Level.WARNING, traceback.format_exc())
except Exception as ex:
self._logger.log(Level.SEVERE, "Error processing google maps history.", ex)
self._logger.log(Level.SEVERE, traceback.format_exc())
finally:
try:
if resultSet is not None:
resultSet.close()
statement.close()
connection.close()
except Exception as ex:
# Error closing the database.
pass
# add periods 6 decimal places before the end.
@staticmethod
def convertGeo(s):
length = len(s)
if length > 6:
return Double.valueOf(s[0 : length-6] + "." + s[length-6 : length])
else:
return Double.valueOf(s)
|
aldenjenkins/foobargamingwebsite
|
game/wsgi.py
|
Python
|
bsd-3-clause
| 484
| 0
|
"""
WSGI config for game project.
It exposes the WSGI callable as a modu
|
le-level variable named ``application``.
For more information on thi
|
s file, see
https://docs.djangoproject.com/en/1.8/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
from mezzanine.utils.conf import real_project_name
os.environ.setdefault("DJANGO_SETTINGS_MODULE",
"%s.settings" % real_project_name("game"))
application = get_wsgi_application()
|
ShyamSS-95/Bolt
|
example_problems/nonrelativistic_boltzmann/advected_gaussian_pulse_in_p_space/1V/with_only_E1/domain.py
|
Python
|
gpl-3.0
| 243
| 0.045267
|
q1_start = 0
q1_end = 1
N_q1 = 1
q2_start = 0
q2_end = 1
N_q2 = 1
p1_start = -10
p1_e
|
nd = 10
N_p1 = 32
p2_start = -0.5
p2_end =
|
0.5
N_p2 = 1
p3_start = -0.5
p3_end = 0.5
N_p3 = 1
N_ghost_q = 0
N_ghost_p = 0
|
genonfire/bbgo
|
core/utils.py
|
Python
|
mit
| 1,561
| 0
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.shortcuts import render
from django.utils.translation import ugettext as _
def error_page(request, errormsg=''):
"""Show error page with message"""
if not errormsg:
|
errormsg = _('Wrong access')
return render(
reques
|
t,
"error.html",
{
'errormsg': errormsg,
}
)
def error_to_response(request, errormsg=''):
"""Show error response with msg"""
if not errormsg:
errormsg = _('Wrong access')
return render(
request,
"error_response.html",
{
'errormsg': errormsg,
}
)
def get_ipaddress(request):
"""Return ipaddress"""
x_forwarded_for = request.META.get('HTTP_X_FORWARDED_FOR')
if x_forwarded_for:
ip = x_forwarded_for.split(',')[-1].strip()
else:
ip = request.META.get('REMOTE_ADDR')
return ip
def get_referrer(request):
"""Return referrer"""
referrer = request.META['HTTP_REFERER']
return referrer
def get_useragent(request):
"""Return useragent"""
user_agent = request.META['HTTP_USER_AGENT']
return user_agent
def is_mobile(request):
"""Return true if request from Android and iPhone"""
user_agent = request.META['HTTP_USER_AGENT']
if 'Android' in user_agent or 'iPhone' in user_agent:
return True
else:
return False
def get_referer(request):
"""Get referer URL to stay"""
referer = request.META.get('HTTP_REFERER')
return referer
|
pshchelo/vampy
|
calc/output.py
|
Python
|
bsd-3-clause
| 2,387
| 0.008379
|
#!/usr/bin/env python
"""
output of various data for VAMP project
"""
from numpy import ndarray
class DataWriter():
def
|
__init__(self, datadict, title='Data export file'):
self.title=title
|
self.data = datadict
self.allfields = sorted(datadict.keys())
self.numparams= []
self.textparams=[]
self.datafields = self.allfields
self._extract_param_fields()
def set_fields(self, fieldslist):
self.datafields = fieldslist
self._extract_param_fields()
def _extract_param_fields(self):
"""
extract one-item values (parameters) from self.fields
side-effect - changes self.datafields and self.paramfields
by moving some values from one to another
"""
for field in self.datafields:
value = self.data[field]
if not isinstance(value, ndarray):
self.textparams.append(field)
self.datafields.remove(field)
elif value.shape == (2,):
self.numparams.append(field)
self.datafields.remove(field)
def _make_header(self):
header = '#%s\n'%self.title
if len(self.textparams) > 0:
for param in self.textparams:
value = self.data[param]
header += '#%s:\t %s\n'%(param, value)
if len(self.numparams) > 0:
for param in self.numparams:
value = self.data[param]
header += '#%s (+-error): '%param
header += len(value)*'\t%s'%tuple(value)+'\n'
header += '#No.'
for field in self.datafields:
header +='\t%s\t%s_error'%(field, field)
header += '\n'
return header
def write_file(self, filename):
try:
outfile = open(filename, 'w')
except IOError:
mesg = 'Can not open file %s for writing.'%filename
return mesg
outfile.write(self._make_header())
length = self.data[self.datafields[0]].shape[-1]
for i in range(0, length):
line = '%i'%(i+1)
for field in self.datafields:
data = self.data[field][:,i]
line += len(data)*'\t%f'%tuple(data)
line += '\n'
outfile.write(line)
outfile.close()
return
|
jomolinare/kobocat
|
onadata/apps/logger/management/commands/import_instances.py
|
Python
|
bsd-2-clause
| 2,293
| 0
|
#!/usr/bin/env python
# vim: ai ts=4 sts=4 et sw=4 coding=utf-8
import os
from django.contrib.auth.models import User
from django.core.management.base import BaseCommand, CommandError
from django.utils.translation import ugettext as _, ugettext_lazy
from onadata.libs.logger.import_tools import import_instances_from_zip,\
import_instances_from_path
class Command(BaseCommand):
args = 'username path'
help = ugettext_lazy("Import a zip file, a directory containing zip files "
"or a directory of ODK instances")
def _log_import(self, results):
total_count, success_count, errors = results
self.stdout.write(_(
"Total: %(total)d, Imported: %(imported)d, Errors: "
"%(errors)s\n------------------------------\n") % {
'total': total_count, 'imported': success_count,
'errors': errors})
def handle(self, *args, **kwargs):
if len(args) < 2:
raise CommandError(_("Usage: <command> username file/path."))
|
username = args
|
[0]
path = args[1]
try:
user = User.objects.get(username=username)
except User.DoesNotExist:
raise CommandError(_(
"The specified user '%s' does not exist.") % username)
# make sure path exists
if not os.path.exists(path):
raise CommandError(_(
"The specified path '%s' does not exist.") % path)
for dir, subdirs, files in os.walk(path):
# check if the dir has an odk directory
if "odk" in subdirs:
# dont walk further down this dir
subdirs.remove("odk")
self.stdout.write(_("Importing from dir %s..\n") % dir)
results = import_instances_from_path(dir, user)
self.log_import(results)
for file in files:
filepath = os.path.join(path, file)
if os.path.isfile(filepath) and\
os.path.splitext(filepath)[1].lower() == ".zip":
self.stdout.write(_(
"Importing from zip at %s..\n") % filepath)
results = import_instances_from_zip(filepath, user)
self.log_import(results)
|
synapsepayments/synapse_pay-python
|
synapsepay/resources/bank.py
|
Python
|
mit
| 1,073
| 0.021435
|
from ..apibits
|
import *
from .. import Client
from . import *
class
|
Bank(APIResource):
def remove(self, params={}, headers={}):
params = ParamsBuilder.merge({
"bank_id" : self.id,
}, params)
method = APIMethod("post", "/bank/delete", params, headers, self)
json = self.client.execute(method)
return json
# Everything below here is used behind the scenes.
def __init__(self, *args, **kwargs):
super(Bank, self).__init__(*args, **kwargs)
APIResource.register_api_subclass(self, "bank")
_api_attributes = {
"account_class" : {},
"account_number_string" : {},
"account_type" : {},
"address" : {},
"balance" : {},
"bank_name" : {},
"date" : {},
"email" : {},
"id" : {},
"is_buyer_default" : {},
"is_seller_default" : {},
"mfa_verifed" : {},
"name_on_account" : {},
"nickname" : {},
"phone_number" : {},
"resource_uri" : {},
"routing_number_string" : {},
}
|
krkeegan/lib-py-insteon
|
insteon/base_objects.py
|
Python
|
gpl-2.0
| 19,358
| 0.00031
|
import time
import datetime
import pprint
from .helpers import *
class ALDB(object):
def __init__(self, parent):
self._parent = parent
self._aldb = {}
def edit_record(self, position, record):
self._aldb[position] = record
def delete_record(self, position):
del(self._aldb[position])
def get_record(self, position):
return self._aldb[position]
def get_all_records(self):
return self._aldb.copy()
def get_all_records_str(self):
ret = {}
|
for key, value in self._aldb.items():
ret[key] = BYTE_TO_HEX(value)
return ret
def load_aldb_records(self, records):
for key, record in records.items():
self.edit_record(key, bytearray.fromhex(record))
def clear_all_reco
|
rds(self):
self._aldb = {}
def edit_record_byte(self, aldb_pos, byte_pos, byte):
self._aldb[aldb_pos][byte_pos] = byte
def get_matching_records(self, attributes):
'''Returns an array of positions of each records that matches ALL
attributes'''
ret = []
for position, record in self._aldb.items():
parsed_record = self.parse_record(position)
ret.append(position)
for attribute, value in attributes.items():
if parsed_record[attribute] != value:
ret.remove(position)
break
return ret
def parse_record(self, position):
bytes = self._aldb[position]
parsed = {
'record_flag': bytes[0],
'in_use': bytes[0] & 0b10000000,
'controller': bytes[0] & 0b01000000,
'responder': ~bytes[0] & 0b01000000,
'highwater': ~bytes[0] & 0b00000010,
'group': bytes[1],
'dev_addr_hi': bytes[2],
'dev_addr_mid': bytes[3],
'dev_addr_low': bytes[4],
'data_1': bytes[5],
'data_2': bytes[6],
'data_3': bytes[7],
}
for attr in ('in_use', 'controller', 'responder', 'highwater'):
if parsed[attr]:
parsed[attr] = True
else:
parsed[attr] = False
return parsed
def get_linked_obj(self, position):
parsed_record = self.parse_record(position)
high = parsed_record['dev_addr_hi']
mid = parsed_record['dev_addr_mid']
low = parsed_record['dev_addr_low']
return self._parent.plm.get_device_by_addr(BYTE_TO_ID(high, mid, low))
def is_last_aldb(self, key):
ret = True
if self.get_record(key)[0] & 0b00000010:
ret = False
return ret
def is_empty_aldb(self, key):
ret = True
if self.get_record(key)[0] & 0b10000000:
ret = False
return ret
class Device_ALDB(ALDB):
def __init__(self, parent):
super().__init__(parent)
def _get_aldb_key(self, msb, lsb):
offset = 7 - (lsb % 8)
highest_byte = lsb + offset
key = bytes([msb, highest_byte])
return BYTE_TO_HEX(key)
def query_aldb(self):
self.clear_all_records()
if self._parent.attribute('engine_version') == 0:
self.i1_start_aldb_entry_query(0x0F, 0xF8)
else:
dev_bytes = {'msb': 0x00, 'lsb': 0x00}
self._parent.send_command('read_aldb',
'query_aldb',
dev_bytes=dev_bytes)
# It would be nice to link the trigger to the msb and lsb, but we
# don't technically have that yet at this point
trigger_attributes = {
'plm_cmd': 0x51,
'cmd_1': 0x2F,
'from_addr_hi': self._parent.dev_addr_hi,
'from_addr_mid': self._parent.dev_addr_mid,
'from_addr_low': self._parent.dev_addr_low,
}
trigger = Trigger(trigger_attributes)
trigger.trigger_function = lambda: self.i2_next_aldb()
trigger_name = self._parent.dev_addr_str + 'query_aldb'
self._parent.plm._trigger_mngr.add_trigger(trigger_name, trigger)
def i2_next_aldb(self):
# TODO parse by real names on incomming
msb = self._parent.last_rcvd_msg.get_byte_by_name('usr_3')
lsb = self._parent.last_rcvd_msg.get_byte_by_name('usr_4')
if self.is_last_aldb(self._get_aldb_key(msb, lsb)):
self._parent.remove_state_machine('query_aldb')
records = self.get_all_records()
for key in sorted(records):
print(key, ":", BYTE_TO_HEX(records[key]))
self._parent.send_command('light_status_request', 'set_aldb_delta')
else:
if lsb == 0x07:
msb -= 1
lsb = 0xFF
else:
lsb -= 8
dev_bytes = {'msb': msb, 'lsb': lsb}
self._parent.send_command('read_aldb',
'query_aldb',
dev_bytes=dev_bytes)
# Set Trigger
trigger_attributes = {
'plm_cmd': 0x51,
'cmd_1': 0x2F,
'usr_3': msb,
'usr_4': lsb,
'from_addr_hi': self._parent.dev_addr_hi,
'from_addr_mid': self._parent.dev_addr_mid,
'from_addr_low': self._parent.dev_addr_low,
}
trigger = Trigger(trigger_attributes)
trigger.trigger_function = lambda: self.i2_next_aldb()
trigger_name = self._parent.dev_addr_str + 'query_aldb'
self._parent.plm._trigger_mngr.add_trigger(trigger_name, trigger)
def i1_start_aldb_entry_query(self, msb, lsb):
message = self._parent.create_message('set_address_msb')
message._insert_bytes_into_raw({'msb': msb})
message.insteon_msg.device_success_callback = \
lambda: \
self.peek_aldb(lsb)
self._parent._queue_device_msg(message, 'query_aldb')
def peek_aldb(self, lsb):
message = self._parent.create_message('peek_one_byte')
message._insert_bytes_into_raw({'lsb': lsb})
self._parent._queue_device_msg(message, 'query_aldb')
def create_responder(self, controller, d1, d2, d3):
# Device Responder
# D1 On Level D2 Ramp Rate D3 Group of responding device i1 00
# i2 01
pass
def create_controller(responder):
# Device controller
# D1 03 Hops?? D2 00 D3 Group 01 of responding device??
pass
def _write_link(self, linked_obj, is_controller):
if self._parent.attribute('engine_version') == 2:
pass # run i2cs commands
else:
pass # run i1 commands
pass
class PLM_ALDB(ALDB):
def add_record(self, aldb):
position = str(len(self._aldb) + 1)
position = position.zfill(4)
self._aldb[position] = aldb
def have_aldb_cache(self):
# TODO This will return false for an empty aldb as well, do we care?
ret = True
if len(self._aldb) == 0:
ret = False
return ret
def query_aldb(self):
'''Queries the PLM for a list of the link records saved on
the PLM and stores them in the cache'''
self.clear_all_records()
self._parent.send_command('all_link_first_rec', 'query_aldb')
def create_responder(self, controller, *args):
self._write_link(controller, is_plm_controller=False)
def create_controller(self, controller, *args):
self._write_link(controller, is_plm_controller=True)
def _write_link(self, linked_obj, is_plm_controller):
group = linked_obj.group_number
if is_plm_controller:
group = self._parent.group_number
link_bytes = {
'controller': True if i
|
stephenlienharrell/roster-dns-management
|
roster-core/roster_core/db_access.py
|
Python
|
bsd-3-clause
| 41,336
| 0.011128
|
#!/usr/bin/python
# Copyright (c) 2009, Purdue University
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice, this
# list of conditions and the following disclaimer in the documentation and/or
# other materials provided with the distribution.
#
# Neither the name of the Purdue University nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSI
|
NESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY O
|
F LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""This module is an API to access the dnsManagement database.
This module should only be run by servers with authentication layers
that are active. This module does not include authentication, but does
include authorization.
The api that will be exposed by this module is meant for use in a web
application or rpc server. This module is not for use in command line tools.
The two primary uses of this class are:
1. to use convience functions to get large amounts of data out of the db
without large amounts of db queries. For usage on this consult the pydoc
on the individual functions.
2. to Make/Remove/List rows in the database. The method that is used in this
class is based on generic Make/Remove/Lock functions that take specifc
dictionaries that correspond to the table that is being referenced.
Here is an example of how to remove rows from the acls table:
acls_dict = db_instance.GetEmptyRowDict('acls')
acls_dict['acl_name'] = 'test_acl'
db_instance.StartTransaction()
try:
matching_rows = db_instance.ListRow('acls', acls_dict)
for row in matching_rows:
db_instance.RemoveRow('acls', row)
except Exception:
db_instance.EndTransaction(rollback=True)
else:
db_instance.EndTransaction()
Note: MySQLdb.Error can be raised in almost any function in this module. Please
keep that in mind when using this module.
"""
__copyright__ = 'Copyright (C) 2009, Purdue University'
__license__ = 'BSD'
__version__ = '#TRUNK#'
import Queue
import threading
import time
import uuid
import warnings
import MySQLdb
import constants
import data_validation
import embedded_files
import errors
import helpers_lib
import codecs
class dbAccess(object):
"""This class provides the primary interface for connecting and interacting
with the roster database.
"""
def __init__(self, db_host, db_user, db_passwd, db_name, big_lock_timeout,
big_lock_wait, thread_safe=True, ssl=False, ssl_ca=None,
ssl_cert=None, ssl_key=None, ssl_capath=None, ssl_cipher=None,
db_debug=False, db_debug_log=None):
"""Instantiates the db_access class.
Inputs:
db_host: string of the database host name
db_user: string of the user name used to connect to mysql
db_passwd: string of password used to connect to mysql
db_name: string of name of database in mysql server to use
big_lock_timeout: integer of how long the big lock should be valid for
big_lock_wait: integer of how long to wait for proccesses to finish
before locking the database
thread_safe: boolean of if db_acceess should be thread safe
"""
# Do some better checking of these args
self.db_host = db_host
self.db_user = db_user
self.db_passwd = db_passwd
self.db_name = db_name
self.big_lock_timeout = big_lock_timeout
self.big_lock_wait = big_lock_wait
self.ssl = ssl
self.ssl_ca = ssl_ca
self.ssl_settings = {}
self.db_debug = db_debug
self.db_debug_log = db_debug_log
if( self.ssl ):
if( self.ssl_ca ):
self.ssl_settings['ca'] = ssl_ca
else:
raise errors.ConfigError('ssl_ca not specified in config file.')
self.transaction_init = False
self.connection = None
self.cursor = None
# This is generated only when ListRow is called and is then cached for
# the life of the object.
self.foreign_keys = []
self.data_validation_instance = None
self.locked_db = False
self.thread_safe = thread_safe
self.queue = Queue.Queue()
self.now_serving = None
self.queue_update_lock = threading.Lock()
def close(self):
"""Closes a connection that has been opened.
A new connection will be created on StartTransaction.
"""
if( self.connection is not None ):
self.connection.close()
self.connection = None
def cursor_execute(self, execution_string, values={}):
"""This function allows for the capture of every mysql command that
is run in this class.
Inputs:
execution_string: mysql command string
values: dictionary of values for mysql command
"""
if( self.db_debug ):
if( self.db_debug_log ):
#If the execution_string contains a unicode character we must account
#for it. So we need to use the codecs package to write to a utf-8 log
#file, instead of ASCII like the 'normal' open() results in.
debug_log_handle = codecs.open(self.db_debug_log, encoding='utf-8',
mode='a')
debug_log_handle.write(execution_string % values)
debug_log_handle.write('\n')
debug_log_handle.close()
else:
print execution_string % values
try:
self.cursor.execute(execution_string, values)
except MySQLdb.ProgrammingError:
raise
except MySQLdb.Error, e:
if( e[0] in errors.PARSABLE_MYSQL_ERRORS ):
raise errors.DatabaseError(e)
else:
raise
def StartTransaction(self):
"""Starts a transaction.
Also it starts a db connection if none exists or it times out.
Always creates a new cursor.
This function also serializes all requests on this object and if the
big lock has been activated will wait for it to be released.
Raises:
TransactionError: Cannot start new transaction last transaction not
committed or rolled-back.
"""
if( self.thread_safe ):
unique_id = uuid.uuid4()
self.queue.put(unique_id)
while_sleep = 0
while( unique_id != self.now_serving ):
time.sleep(while_sleep)
self.queue_update_lock.acquire()
if( self.now_serving is None ):
self.now_serving = self.queue.get()
self.queue_update_lock.release()
while_sleep = 0.005
else:
if( self.transaction_init ):
raise errors.TransactionError('Cannot start new transaction last '
'transaction not committed or '
'rolled-back.')
if( self.connection is not None ):
try:
self.cursor = self.connection.cursor(MySQLdb.cursors.DictCursor)
self.cursor_execute('DO 0') # NOOP to test connection
except MySQLdb.OperationalError:
self.connection = None
if( self.connection is None ):
if( self.ssl ):
self.connection = MySQLdb.connect(
host=self.db_host, user=self.db_user, passwd=self.db_passwd,
db=self.db_name, use_unicode=True, charset='utf8',
ssl=self.ssl_settings)
else:
self.connection = MySQLd
|
adybbroe/pypps_reader
|
test/test_ppsread.py
|
Python
|
gpl-3.0
| 1,228
| 0.001629
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2013 Adam.Dybbroe
# Author(s):
# Adam.Dybbroe <a000680@c14526.ad.smhi.se>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GN
|
U General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Unittests reading various PPS data files
"""
import os
impor
|
t unittest
from pypps_reader import NwcSafPpsData
# The test data cases:
# FIXME!
class Test(unittest.TestCase):
"""Unit testing the pps reading"""
def setUp(self):
"""Set up"""
return
def test_read(self):
"""Test that it is possible to read pps products"""
return
def tearDown(self):
"""Clean up"""
return
|
radomd92/botjagwar
|
api/importer/rakibolanamalagasy.py
|
Python
|
mit
| 1,609
| 0
|
from threading import Lock
import requests
from api.decorator import critical_section
from api.importer import AdditionalDataImporter
from api.importer import AdditionalDataImporterError
from api.importer.wiktionary import dyn_backend
rmi_lock = Lock()
class DictionaryImporter(AdditionalDataImporter):
def populate_cache(self, language):
rq_params = {
'language': 'eq.' + language
}
response = requests.get(dyn_backend.backend + '/word', rq_params)
query = response.json()
for json in query:
self.word_id_cache[(json['word'], json['language'])] = json['id']
class TenyMalagasyImporter(DictionaryImporter):
data_type = 'tenymalagasy/definition'
class RakibolanaMalagasyImporter(DictionaryImporter):
data_type = 'rakibolana/definition'
@critical_section(rmi_lock)
def write_tif(self, title, language, additional_data):
temp = self.data_type
self.data_type = 'rakibolana/derived'
try:
self.write_additional_data(title, language, additional_data)
except AdditionalDataImporterError as exc:
pass
self.data_type = temp
@critical_section(rmi_lock)
def write_raw(self, title, language, additional_data):
|
temp = self.data_type
self.data_type = 'rakibolana/raw'
tr
|
y:
self.write_additional_data(title, language, additional_data)
except AdditionalDataImporterError as exc:
pass
self.data_type = temp
def get_data(self, template_title: str, wikipage: str, language: str):
pass
|
pkimber/pay
|
pay/tests/test_management_command.py
|
Python
|
apache-2.0
| 280
| 0
|
# -*-
|
encoding: utf-8 -*-
from django.test import TestCase
from pay.management.commands import init_app_pay
class TestCommand(TestCase):
def test_init_app(self):
""" Test the management command """
command
|
= init_app_pay.Command()
command.handle()
|
ruchikd/Algorithms
|
Python/ShortestPalindrome/shortestPalindrome.py
|
Python
|
gpl-3.0
| 230
| 0.052174
|
def sh
|
ortestPalindrome(str):
if str is None:
return
strlen = len(str)
for x in xrange(strlen):
print str[x]
def main():
print "Hello, world"
str = 'aacecaaa'
shortestPalindrome(str)
if __name__ == '__main__':
ma
|
in()
|
amitskwalia/codesters
|
resources/migrations/0004_auto__add_featuredresource__add_unique_featuredresource_topic_resource.py
|
Python
|
mit
| 8,260
| 0.008354
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'FeaturedResource'
db.create_table('resources_featuredresource', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('topic', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['resources.Topic'])),
('resource_type', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['resources.ResourceType'])),
('resource', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['resources.Resource'])),
))
db.send_create_signal('resources', ['FeaturedResource'])
# Adding unique constraint on 'FeaturedResource', fields ['topic', 'resource_type']
db.create_unique('resources_featuredresource', ['topic_id', 'resource_type_id'])
def backwards(self, orm):
# Removing unique constraint on 'FeaturedResource', fields ['topic', 'resourc
|
e_type']
db.delete_unique('resources_featuredresource', ['topic_id', 'resource_type_id'])
# Deleting model 'FeaturedResource'
db.delete_table('resources_featuredresource')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [],
|
{'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'resources.featuredresource': {
'Meta': {'unique_together': "(('topic', 'resource_type'),)", 'object_name': 'FeaturedResource'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'resource': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['resources.Resource']"}),
'resource_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['resources.ResourceType']"}),
'topic': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['resources.Topic']"})
},
'resources.resource': {
'Meta': {'object_name': 'Resource'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'description': ('django.db.models.fields.TextField', [], {'default': "''", 'null': 'True', 'blank': 'True'}),
'help_text': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'rating_score': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'rating_votes': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'blank': 'True'}),
'resource_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['resources.ResourceType']"}),
'show': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'default': "''", 'max_length': '255', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'topics': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['resources.Topic']", 'symmetrical': 'False'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'unique': 'True', 'max_length': '200'})
},
'resources.resourcetype': {
'Meta': {'object_name': 'ResourceType'},
'color': ('django.db.models.fields.CharField', [], {'default': "'purple'", 'unique': 'True', 'max_length': '20'}),
'help_text': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '60'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '255'})
},
'resources.topic': {
'Meta': {'ordering': "['name']", 'object_name': 'Topic'},
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'help_text': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '60'}),
'official_website': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '255'}),
'thumbnail': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'nul
|
colloquium/rhevm-api
|
python/pool-test.py
|
Python
|
lgpl-2.1
| 1,969
| 0.013713
|
#!/usr/bin/env python
# Copyright (C) 2010 Red Hat, Inc.
#
# This is free software; you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation; either version 2.1 of
# the License, or (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this software; if not, write to the Free
# Software Foundation, Inc
|
., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA, or see the FSF site: http://www.fsf.org.
import http
import xmlfmt
import yamlfmt
import jsonfmt
from testutils import *
opts = parseOptions()
(cluster, template) = (None, None)
if len(opts['oargs']) >= 2:
(cluster, template) = opts['oargs'][0:2]
links = http.HEAD_for_links(opts)
for fmt in [xmlfmt]:
t = TestUtils(opts, fmt)
print "=== ", fmt.MEDIA_TYPE,
|
" ==="
for pool in t.get(links['vmpools']):
t.get(pool.href)
if cluster is None:
continue
pool = fmt.VmPool()
pool.name = randomName('foo')
pool.size = "2"
pool.cluster = fmt.Cluster()
pool.cluster.id = t.find(links['clusters'], cluster).id
pool.template = fmt.Template()
pool.template.id = t.find(links['templates'], template).id
pool = t.create(links['vmpools'], pool)
vms_in_pool = []
for vm in t.get(links['vms']):
if not hasattr(vm, "vmpool"):
continue
if vm.vmpool.id == pool.id:
vms_in_pool.append(vm)
assert len(vms_in_pool) == 2, "Expected 2 VMs with pool ID '" + pool.id + "', got " + str(len(vms_in_pool))
for vm in vms_in_pool:
t.syncAction(vm.actions, "detach")
t.delete(vm.href)
t.delete(pool.href)
|
amacd31/bom_data_parser
|
tests/test_observations_json.py
|
Python
|
bsd-3-clause
| 1,454
| 0.008253
|
import os
import numpy as np
import pandas as pd
import six
import unittest
from datetime import datetime
from bom_data_parser import read_obs_json
class ObsJSONTest(unittest.TestCase):
def setUp(self):
self.test_obs_json_file = os.path.join(os.path.dirname(__file__), 'data', 'IDN60901.94767.json')
def test_obs_json(self):
data, attributes = read_obs_json(file(self.test_obs_json_file))
six.assertCountEqual(
self,
attributes.keys(),
[
'station_id',
'name',
'history_product',
'wmo',
'lat',
'lon',
'header',
'notice'
]
)
self.assertEqual(data.index[0], datetime(2014,9,6,11,0,0))
self.assertEqual(data['air_temp'].iloc[0], 15.1)
self.assertEqual(data['press'].iloc[0], 1030.6)
self.assertEqual(data['press_msl'].iloc[0], 1030.6)
self.assertEqual(data['press_qnh'].iloc[0], 1030.6)
self.
|
assertE
|
qual(data['press_tend'].iloc[0], 'F')
self.assertEqual(data['rain_trace'].iloc[0], 3.4)
self.assertEqual(data['rel_hum'].iloc[0], 69)
self.assertEqual(data.index[-1], datetime(2014,9,9,10,30,0))
self.assertEqual(data['air_temp'].iloc[-1], 20.6)
self.assertEqual(data['dewpt'].iloc[-1], 12.0)
self.assertEqual(data['delta_t'].iloc[-1], 4.8)
|
DemocracyClub/UK-Polling-Stations
|
polling_stations/apps/pollingstations/tests/factories.py
|
Python
|
bsd-3-clause
| 591
| 0
|
import factory
from councils.tests.factories import CouncilFactory
from pollingstations.models import PollingS
|
tation, PollingDistrict
class PollingStationFactory(factory.django.DjangoModelFactory):
class Meta:
model = PollingStation
council = factory.SubFactory(CouncilFactory)
internal_council_id = factory.Sequence(la
|
mbda n: f"PS-{n}")
class PollingDistrictFactory(factory.django.DjangoModelFactory):
class Meta:
model = PollingDistrict
council = factory.SubFactory(CouncilFactory)
internal_council_id = factory.Sequence(lambda n: f"PD-{n}")
|
start-jsk/jsk_apc
|
demos/selective_dualarm_stowing/python/selective_dualarm_stowing/models/alex.py
|
Python
|
bsd-3-clause
| 4,007
| 0
|
import chainer
import chainer.functions as F
import chainer.links as L
class Alex(chainer.Chain):
"""Single-GPU AlexNet without partition toward the channel axis."""
def __init__(self, n_class=1000, threshold=0.5, pt_func=None):
self.threshold = threshold
self.pt_func = pt_func
self.n_class = n_class
super(Alex, self).__init__()
with self.init_scope():
self.conv1 = L.Convolution2D(3, 96, 11, stride=4, pad=4)
self.bn1 = L.BatchNormalization(96)
self.conv2 = L.Convolution2D(96, 256, 5, stride=1, pad=1)
self.bn2 = L.BatchNormalization(256)
self.conv3 = L.Convolution2D(256, 384, 3, stride=1, pad=1)
self.conv4 = L.Convolution2D(384, 384, 3, stride=1, pad=1)
self.conv5 = L.Convolution2D(384, 256, 3, stride=1, pad=1)
self.bn5 = L.BatchNormalization(256)
self.fc6 = L.Linear(33280, 4096)
self.fc7 = L.Linear(4096, 4096)
self.fc8 = L.Linear(4096, 2*n_class)
def __call__(self, x, t=None):
n_batch = len(x)
assert n_batch == len(t)
h = F.relu(self.bn1(self.conv1(x)))
h = F.max_pooling_2d(h, 3, stride=2)
h = F.relu(self.bn2(self.conv2(h)))
h = F.max_pooling_2d(h, 3, stride=2)
h = F.relu(self.conv3(h))
h = F.relu(self.conv4(h))
h = F.relu(self.bn5(self.conv5(h)))
h = F.max_pooling_2d(h, 3, stride=3)
if not self.train_conv:
h.unchain_backward()
h = F.dropout(F.relu(self.fc6(h)), ratio=0.5)
h = F.dropout(F.relu(self.fc7(h)), ratio=0.5)
h = self.fc8(h)
h = h.reshape((-1, 2, self.n_class))
h_prob = F.softmax(h, axis=1)[:, 1, :]
self.h_prob = h_prob
if t is None:
assert not chainer.config.train
return
half_n = self.n_class / 2
is_singlearm_mask = t[:, half_n] == -1
# loss for single arm
h_single = h[is_singlearm_mask][:, :, :half_n]
t_single = t[is_singlearm_mask][:, :half_n]
# Requires: https://github.com/chainer/chainer/pull/3310
if h_single.data.shape[0] > 0:
loss_single = F.softmax_cross_entropy(
h_single, t_single, normalize=False)
else:
loss_single = None
# loss for dual arm
h_dual = h[~is_singlearm_mask][:, :, half_n:]
t_dual = t[~is_singlearm_mask][:, half_n:]
# Requires: https://github.com/chainer/chainer/pull/3310
if h_dual.data.shape[0] > 0:
loss_dual = F.softmax_cross_entropy(
h_dual, t_dual, normalize=False)
else:
loss_dual = None
if loss_single is None:
self.loss = loss_dual
elif loss_dual is None:
self.loss = loss_single
else:
self.loss = loss_single + loss_dual
# calculate acc on CPU
h_prob_single = h_prob[is_singlearm_mask][:, :half_n]
h_prob_single = chainer.cuda.to_cpu(h_prob_single.data)
t_single = chainer.cuda.to_cpu(t_single)
h_prob_dual = h_prob[~is_singlearm_mask][:, half_n:]
h_prob_dual = chainer.cuda.to_cpu(h_prob_dual.data)
t_dual = chainer.cuda.to_cpu(t_dual)
label_single = (h_prob_single > self.threshold).astype(self.xp.int32)
label_dual = (h_prob_dual > self.threshold).astype(self.xp.int32)
acc_single = (t_single == label_single).all(axis=1)
acc_single = acc_single.astype(self.xp.int32).flatten()
acc_dual = (t_dual == label_dual).all(axis=1)
acc_dual = acc_dual.astype(self.xp.int32).flatten()
self.acc = self.xp.sum(acc_single) + self.xp.sum(acc_dual)
self.acc = self.a
|
cc / float(len(acc_single) + len(acc_dual))
chainer.re
|
porter.report({
'loss': self.loss,
'acc': self.acc,
}, self)
if chainer.config.train:
return self.loss
|
LowieHuyghe/script-core
|
scriptcore/encoding/encoding.py
|
Python
|
apache-2.0
| 2,861
| 0.00035
|
import sys
class Encoding(object):
@staticmethod
def normalize(value):
"""
Normalize value
:param value: The value
:return: The processed value
"""
# Python 2 vs Python 3
if sys.version_info < (3, 0):
return Encoding.to_ascii(value)
else:
return Encoding.to_unicode(value)
@staticmethod
def to_ascii(value):
"""
To ascii
:param value: The value
:return: The processed value
"""
# Dict
if isinstance(value, dict):
processed_value = {}
for key in value:
if Encoding._is_unicode(key):
processed_key = key.encode('ascii')
else:
processed_key = key
processed_value[processed_key] = Encoding.to_ascii(value[key])
# List
elif isinstance(value, list):
processed_value = []
for value in value:
processed_value.append(Encoding.to_ascii(value))
# Unicode
elif Encoding._is_unicode(value):
processed_value = value.encode('ascii')
else:
processed_value = value
return processed_value
@staticmethod
def to_unicode(value):
"""
To unicode
:param value: The value
:return: The processed value
"""
# Dict
if isinstance(value, dict):
processed_value = {}
for key in value:
if Encoding._is_ascii(key):
processed_key = key.decode('utf-8')
else:
processed_key = key
processed_value[processed_key] = Encoding.to_unicode(value[key])
# List
elif isinstance(value, list):
processed_value = []
for value in value:
processed_value.append(Encoding.to_unicode(value))
# Unicode
elif Encoding._is_ascii(value):
processed_value = value.decode('utf-8')
else:
processed_value = value
return processed_value
@staticmethod
def _is_ascii(value):
"""
Check if ascii
:param value: The value
:return: Ascii or not
"""
# Python 2 vs Python 3
if sys.version_info < (3, 0):
return isinstance(value, str)
else:
return isinstance(value, bytes)
@staticmethod
def _is_unicode(value):
"""
Check i
|
f unicode
:param val
|
ue: The value
:return: Ascii or not
"""
# Python 2 vs Python 3
if sys.version_info < (3, 0):
return isinstance(value, unicode)
else:
return isinstance(value, str)
|
pizzapanther/GAE-Bulk-Mailer
|
bulkmail/middleware.py
|
Python
|
bsd-2-clause
| 2,043
| 0.029369
|
from django import http
from django.conf import settings
from google.appengine.api import users
from .exceptions import ParameterRequired, ApiException
class User (object):
def __init__ (self):
self._email = None
self._is_super = None
self._is_auth = None
self._userid = None
self.user = None
def is_authorized (self):
if self._is_auth is None:
user = users.get_current_user()
if user:
self.user = user
self._email = self.user.email()
self._userid = self.user.user_id()
self._is_super = False
self._is_auth = True
self._is_staff = False
if self._email.lower() in settings.SUPER_ADMINS:
self._is_super = True
self._is_staff = True
elif self._ema
|
il.lower() in settings.STAFF_USERS:
self._is_staff = True
else:
for d in settings.STAFF_DOMAINS:
if self._email.lower().endswith(d):
self._is_staff = True
break
else:
self._is_auth = False
return self._is_auth
@property
def email (self):
if self._is_auth is None:
self.is_authorized()
return self._email
@property
def is_super (self):
if
|
self._is_auth is None:
self.is_authorized()
return self._is_super
@property
def is_staff (self):
if self._is_auth is None:
self.is_authorized()
return self._is_staff
@property
def userid (self):
if self._is_auth is None:
self.is_authorized()
return self._userid
class Session (object):
def process_request (self, request):
request.user = User()
return None
class ApiExceptions (object):
def process_exception (self, request, exception):
if isinstance(exception, ParameterRequired) or isinstance(exception, ApiException):
return http.HttpResponseServerError(exception.message, mimetype="text/plain")
return None
|
ChimeraCoder/GOctober
|
july/game/urls.py
|
Python
|
mit
| 1,757
| 0
|
from django.conf.urls import patterns, url
from july.game import views
urlpatterns = patterns(
'july.game.views',
url(r'^people/$',
views.PlayerList.as_view(),
name='leaderboard'),
url(r'^people/(?P<year>\d{4})/(?P<month>\d{1,2})/((?P<day>\d{1,2})/)?$',
views.PlayerList.as_view(),
name='leaderboard'),
url(r'^teams/$',
views.TeamCollection.as_view(),
name='teams'),
url(r'^teams/(?P<year>\d{4})/(?P<month>\d{1,2})/((?P<day>\d{1,2})/)?$',
views.TeamCollection.as_view(),
name='teams'),
url(r'^teams/(?P<slug>[a-zA-Z0-9\-]+)/$',
views.TeamView.as_view(),
name='team-details'),
url(r'^location/$',
views.LocationCollection.as_view(),
name='locations'),
url(r'^location/(?P<year>\d{4})/(?P<month>\d{1,2})/((?P<day>\d{1,2})/)?$',
views.LocationCollection.as_view(),
name='locations'),
url(r'^location/(?P<slug>[a-zA-Z0-9\-]+)/$',
views.LocationView.as_view(),
name='location-detail'),
url(r'^projects/$',
views.BoardList.as_view(),
name='projects'),
url(r'^projects/(?P<year>\d{4})/(?P<month>\d{1,2})/((?P<
|
day>\d{1,2})/)?$',
views.BoardList.as_view(),
name='projects'),
url(r'^projects/(?P<slug>.+)/$',
views.ProjectView.as_view(),
name='project-details'),
url(r'^languages/$',
views.LanguageBoardList.as_view(),
name='languages'),
url(r'^languages/(?P<year>\d{4})/(?P<month>\d{1,2})/((?P<day>\d{1,2})/)?$',
views.LanguageBoardList.as_view(),
name='languages'),
|
# for local only debug purposes
url(r'^events/(?P<action>pub|sub|ws)/(?P<channel>.*)$',
'events', name='events'),
)
|
Furzoom/learnpython
|
usefull_scripts/multiprocessing/multiprocessing5_1.py
|
Python
|
mit
| 709
| 0.005642
|
import multiprocessing
import sys
import time
def worker_with(f):
fs = open(f, 'a+')
n = 10
while n > 1:
fs.write("Locked acquired via with\n")
time.sleep(1)
n -= 1
fs.close()
def worker_no_with(f):
fs = open(f, 'a+')
n = 10
while n > 1:
fs.write("Locked acquired via direc
|
tly\n")
n -= 1
time.sleep(1)
fs.close()
if __name__ == "__main__":
lock = multiprocessing.Lock()
f = 'file.txt'
w = multiprocessing.Process(target=worker_with, args=(f,))
nw = multiprocessing.Process(target=worker_no_with,
|
args=(f,))
w.start()
nw.start()
print('end')
|
arannasousa/pagseguro_xml
|
exemplos/testes_notificacao.py
|
Python
|
gpl-2.0
| 1,090
| 0.00276
|
# coding=utf-8
# ---------------------------------------------------------------
# Desenvolvedor: Arannã Sousa Santos
# Mês: 12
# Ano: 2015
# Projeto: pagseguro_xml
# e-mail: asousas@live.com
# ---------------------------------------------------------------
import logging
from pagseguro_xml.notificacao import ApiPagSeguroNotificacao_v3, CONST_v3
logger = logging.basicConfig(level=logging.DEBUG)
PAGSEGURO_API_AMB
|
IENTE = u'sandbox'
PAGSEGURO_API_EMAIL = u'seu@email.com'
PAGSEGURO_API_TOKEN_PRODUCAO = u''
PAGSEGURO_API_TOKEN_SANDBOX = u''
CHAVE_NOTIFICACAO = u'AA0000-AA00A0A0AA00-AA00AA000000-AA0000' # ela éh de producao
api = ApiPagSeguroNotificacao_v3(ambiente=CONST_v3.AMBIENTE.SANDBOX)
PAGSEGURO_API_TOKEN = PAGSEGURO_API_TOKEN_PRODUCAO
ok, retorno = api.consulta_notificacao_transacao_v3(PAGSEGURO_API_EMAIL, PAGSEGURO_API_TOKEN, CHAVE_NOTIFICACAO)
if ok:
print u'
|
-' * 50
print retorno.xml
print u'-' * 50
for a in retorno.alertas:
print a
else:
print u'Motivo do erro:', retorno
|
fastcoinproject/fastcoin
|
qa/rpc-tests/rest.py
|
Python
|
mit
| 3,264
| 0.010723
|
#!/usr/bin/env python2
# Copyright (c) 2014 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test REST interface
#
from test_framework import BitcoinTestFramework
from util import *
import json
try:
import http.client as httplib
except ImportError:
import httplib
try:
import urllib.parse as urlparse
except ImportError:
import urlparse
def http_get_call(host, port, path, response_object = 0):
conn = httplib.HTTPConnection(host, port)
conn.request('GET', path)
if response_object:
return conn.getresponse()
return conn.getresponse().read()
class RESTTest (BitcoinTestFramework):
FORMAT_S
|
EPARATOR = "."
def run_test(self):
url = urlparse.urlparse(self.nodes[0].url)
bb_h
|
ash = self.nodes[0].getbestblockhash()
# check binary format
response = http_get_call(url.hostname, url.port, '/rest/block/'+bb_hash+self.FORMAT_SEPARATOR+"bin", True)
assert_equal(response.status, 200)
assert_greater_than(int(response.getheader('content-length')), 10)
# check json format
json_string = http_get_call(url.hostname, url.port, '/rest/block/'+bb_hash+self.FORMAT_SEPARATOR+'json')
json_obj = json.loads(json_string)
assert_equal(json_obj['hash'], bb_hash)
# do tx test
tx_hash = json_obj['tx'][0]['txid'];
json_string = http_get_call(url.hostname, url.port, '/rest/tx/'+tx_hash+self.FORMAT_SEPARATOR+"json")
json_obj = json.loads(json_string)
assert_equal(json_obj['txid'], tx_hash)
# check hex format response
hex_string = http_get_call(url.hostname, url.port, '/rest/tx/'+tx_hash+self.FORMAT_SEPARATOR+"hex", True)
assert_equal(response.status, 200)
assert_greater_than(int(response.getheader('content-length')), 10)
# check block tx details
# let's make 3 tx and mine them on node 1
txs = []
txs.append(self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 11))
txs.append(self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 11))
txs.append(self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 11))
self.sync_all()
# now mine the transactions
newblockhash = self.nodes[1].setgenerate(True, 1)
self.sync_all()
#check if the 3 tx show up in the new block
json_string = http_get_call(url.hostname, url.port, '/rest/block/'+newblockhash[0]+self.FORMAT_SEPARATOR+'json')
json_obj = json.loads(json_string)
for tx in json_obj['tx']:
if not 'coinbase' in tx['vin'][0]: #exclude coinbase
assert_equal(tx['txid'] in txs, True)
#check the same but without tx details
json_string = http_get_call(url.hostname, url.port, '/rest/block/notxdetails/'+newblockhash[0]+self.FORMAT_SEPARATOR+'json')
json_obj = json.loads(json_string)
for tx in txs:
assert_equal(tx in json_obj['tx'], True)
if __name__ == '__main__':
RESTTest ().main ()
|
lantianlz/zx
|
www/stock/sync_stock_data_client.py
|
Python
|
gpl-2.0
| 1,935
| 0.0052
|
# -*- coding: utf-8 -*-
import requests, re, json, time, datetime, traceback, random
from pyquery import PyQuery as pq
# host = "www.a.com:80
|
00"
host = "www.zhixuan.com"
headers = {"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/42.0.2311.90 Safari/537.36"}
def get_stocks():
url = "http://%s/stock/get_stock_json" % host
req = requests.get(url)
return json.loads(req.text)
def sync():
stocks = get_stocks()
for stock in stocks:
time.sleep(4)
url = "http://hq.sinajs.cn/list=%s%s" % (stock['market']
|
, stock['code'])
resp = None
for j in range(3):
try:
resp = requests.get(url, timeout=10, headers=headers)
break
except Exception, e:
print stock, stock.id
print e
if not resp or resp.status_code != 200:
continue
text = resp.text
datas = text.split('="')[1].split('"')[0].split(',')
if len(datas) < 3:
continue
date = datetime.datetime.strptime(datas[-3], "%Y-%m-%d").date()
open_price = datas[1]
high_price = datas[4]
low_price = datas[5]
close_price = datas[3]
volume = datas[8]
turnover = datas[9]
if date < datetime.datetime.now().date() or float(turnover) < 100:
continue
# 提交到服务器
response = requests.post(
"http://%s/stock/sync_stock_data" % host,
data = {
'stock_id': stock['id'],
'date': date,
'open_price': open_price,
'high_price': high_price,
'low_price': low_price,
'close_price': close_price,
'volume': volume,
'turnover': turnover
}
)
if __name__ == "__main__":
sync()
|
tuffery/Frog2
|
frowns/Depict/.happydoc.TkMoleculeDrawer.py
|
Python
|
gpl-3.0
| 4,720
| 0.134322
|
(S'7f2210613c44962221805a1b28aa76d6'
p1
(ihappydoclib.parseinfo.moduleinfo
ModuleInfo
p2
(dp3
S'_namespaces'
p4
((dp5
S'TkDrawer'
p6
(ihappydoclib.parseinfo.classinfo
ClassInfo
p7
(dp8
g4
((dp9
(dp10
tp11
sS'_filename'
p12
S'../python/frowns/Depict/TkMoleculeDrawer.py'
p13
sS'_docstring'
p14
S''
sS'_class_member_info'
p15
(lp16
sS'_name'
p17
g6
sS'_parent'
p18
g2
sS'_comment_info'
p19
(dp20
sS'_base_class_info'
p21
(lp22
S'DrawMolHarness'
p23
aS'TkMixin'
p24
asS'_configuration_values'
p25
(dp26
sS'_class_info'
p27
g9
sS'_function_info'
p28
g10
sS'_comments'
p29
S''
sbsS'TkMixin'
p30
(ihappydoclib.parseinfo.classinfo
ClassInfo
p31
(dp32
g4
((dp33
(dp34
S'pack_forget'
p35
(ihappydoclib.parseinfo.functioninfo
FunctionInfo
p36
(dp37
g4
((dp38
(dp39
tp40
sS'_exception_info'
p41
(dp42
sS'_parameter_names'
p43
(S'self'
p44
tp45
sS'_parameter_info'
p46
(dp47
g44
(NNNtp48
ssg12
g13
sg14
S''
sg17
g35
sg18
g31
sg19
g20
sg25
(dp49
sg2
|
7
g38
sg28
g39
sg29
S''
sbsS'_resize'
p50
(ihappydoclib.parseinfo.functioninfo
FunctionInfo
p51
(dp52
g4
((dp53
(dp54
tp55
sg41
(dp56
sg43
(S'self'
p57
S'event'
p58
tp59
sg46
(dp60
g57
(NNNtp61
sg58
(NNNtp62
ssg12
g13
sg14
S'(event) -> resive the drawing to eve
|
nt.height, event.width'
p63
sg17
g50
sg18
g31
sg19
g20
sg25
(dp64
sg27
g53
sg28
g54
sg29
S''
sbsS'_clear'
p65
(ihappydoclib.parseinfo.functioninfo
FunctionInfo
p66
(dp67
g4
((dp68
(dp69
tp70
sg41
(dp71
sg43
(S'self'
p72
tp73
sg46
(dp74
g72
(NNNtp75
ssg12
g13
sg14
S''
sg17
g65
sg18
g31
sg19
g20
sg25
(dp76
sg27
g68
sg28
g69
sg29
S''
sbsS'_init'
p77
(ihappydoclib.parseinfo.functioninfo
FunctionInfo
p78
(dp79
g4
((dp80
(dp81
tp82
sg41
(dp83
sg43
(S'self'
p84
S'master'
p85
S'height'
p86
S'width'
p87
tp88
sg46
(dp89
g87
(NNNtp90
sg84
(NNNtp91
sg85
(NNNtp92
sg86
(NNNtp93
ssg12
g13
sg14
S''
sg17
g77
sg18
g31
sg19
g20
sg25
(dp94
sg27
g80
sg28
g81
sg29
S''
sbsS'postscript'
p95
(ihappydoclib.parseinfo.functioninfo
FunctionInfo
p96
(dp97
g4
((dp98
(dp99
tp100
sg41
(dp101
sg43
(S'self'
p102
S'*a'
p103
S'*kw'
p104
tp105
sg46
(dp106
g102
(NNNtp107
sg104
(NNNtp108
sg103
(NNNtp109
ssg12
g13
sg14
S'return a postscript image of the current molecule arguments\n are sent to the Tkinter canvas postscript method'
p110
sg17
g95
sg18
g31
sg19
g20
sg25
(dp111
sg27
g98
sg28
g99
sg29
S''
sbsS'_drawLine'
p112
(ihappydoclib.parseinfo.functioninfo
FunctionInfo
p113
(dp114
g4
((dp115
(dp116
tp117
sg41
(dp118
sg43
(S'self'
p119
S'x1'
p120
S'y1'
p121
S'x2'
p122
S'y2'
p123
S'color'
p124
tp125
sg46
(dp126
g123
(NNNtp127
sg124
(NNNtp128
sg119
(NNNtp129
sg122
(NNNtp130
sg121
(NNNtp131
sg120
(NNNtp132
ssg12
g13
sg14
S''
sg17
g112
sg18
g31
sg19
g20
sg25
(dp133
sg27
g115
sg28
g116
sg29
S''
sbsS'grid'
p134
(ihappydoclib.parseinfo.functioninfo
FunctionInfo
p135
(dp136
g4
((dp137
(dp138
tp139
sg41
(dp140
sg43
(S'self'
p141
S'*a'
p142
S'*kw'
p143
tp144
sg46
(dp145
g141
(NNNtp146
sg143
(NNNtp147
sg142
(NNNtp148
ssg12
g13
sg14
S''
sg17
g134
sg18
g31
sg19
g20
sg25
(dp149
sg27
g137
sg28
g138
sg29
S''
sbsS'_drawOval'
p150
(ihappydoclib.parseinfo.functioninfo
FunctionInfo
p151
(dp152
g4
((dp153
(dp154
tp155
sg41
(dp156
sg43
(S'self'
p157
S'x'
S'y'
S'xh'
p158
S'yh'
p159
tp160
sg46
(dp161
S'y'
(NNNtp162
sS'x'
(NNNtp163
sg157
(NNNtp164
sg158
(NNNtp165
sg159
(NNNtp166
ssg12
g13
sg14
S''
sg17
g150
sg18
g31
sg19
g20
sg25
(dp167
sg27
g153
sg28
g154
sg29
S''
sbsS'_drawText'
p168
(ihappydoclib.parseinfo.functioninfo
FunctionInfo
p169
(dp170
g4
((dp171
(dp172
tp173
sg41
(dp174
sg43
(S'self'
p175
S'text'
p176
S'font'
p177
S'fontsize'
p178
S'x'
S'y'
S'color'
p179
S'bg'
p180
tp181
sg46
(dp182
g180
(I1
S'"white"'
Ntp183
sg179
(NNNtp184
sg176
(NNNtp185
sg175
(NNNtp186
sg178
(NNNtp187
sS'y'
(NNNtp188
sS'x'
(NNNtp189
sg177
(NNNtp190
ssg12
g13
sg14
S''
sg17
g168
sg18
g31
sg19
g20
sg25
(dp191
sg27
g171
sg28
g172
sg29
S''
sbsS'pack'
p192
(ihappydoclib.parseinfo.functioninfo
FunctionInfo
p193
(dp194
g4
((dp195
(dp196
tp197
sg41
(dp198
sg43
(S'self'
p199
S'*a'
p200
S'*kw'
p201
tp202
sg46
(dp203
g199
(NNNtp204
sg201
(NNNtp205
sg200
(NNNtp206
ssg12
g13
sg14
S''
sg17
g192
sg18
g31
sg19
g20
sg25
(dp207
sg27
g195
sg28
g196
sg29
S''
sbstp208
sg12
g13
sg14
S''
sg15
(lp209
sg17
g30
sg18
g2
sg19
g20
sg21
(lp210
sg25
(dp211
sg27
g33
sg28
g34
sg29
S''
sbs(dp212
tp213
sS'_import_info'
p214
(ihappydoclib.parseinfo.imports
ImportInfo
p215
(dp216
S'_named_imports'
p217
(dp218
S'Tkinter'
p219
(lp220
S'*'
asS'MoleculeDrawer'
p221
(lp222
S'DrawMolHarness'
p223
assS'_straight_imports'
p224
(lp225
sbsg12
g13
sg14
S''
sg17
S'TkMoleculeDrawer'
p226
sg18
Nsg19
g20
sg25
(dp227
S'include_comments'
p228
I1
sS'cacheFilePrefix'
p229
S'.happydoc.'
p230
sS'useCache'
p231
I1
sS'docStringFormat'
p232
S'StructuredText'
p233
ssg27
g5
sg28
g212
sg29
S''
sbt.
|
djsilenceboy/LearnTest
|
Python_Test/PyFinanceApiSample/com/djs/learn/test/TestCheckYahooFinanceCurrencyData.py
|
Python
|
apache-2.0
| 709
| 0.00141
|
'''
Check Yahoo finance currency data helper.
Update log: (date / version / author : comments)
2017-12-10 / 1.0.0 / Du Jiang : Creation
2017-12-13 / 2.0.0 / Du Jiang : Use new API
'''
from com.djs.learn.financeapi import CheckFinanceDataRequests
__data_type = 1
__inventory_info_file_path = "../../../../etc/CurrencyInfo.csv"
__result_output_file_path = "../../../../Temp/CurrencyDataY.json"
argv = ["-d", __data_type, "-i", __inventory_info_file_path,
"-o", __result_outpu
|
t_file_path]
CheckFinanceDataRequests.main(argv)
'''
Or run:
python CheckFinanceDataRequests.py -d 1 -i "../../../../etc/CurrencyData.csv" -o "../../../../Temp/CurrencyDataY.json"
'''
if
|
__name__ == '__main__':
pass
|
frerepoulet/ZeroNet
|
src/lib/pyasn1/type/useful.py
|
Python
|
gpl-2.0
| 1,159
| 0
|
#
# This file is part of pyasn1 software.
#
# Copyright (c) 2005-2017, Ilya Etingof <etingof@gmail.com>
# License: http://pyasn1.sf.net/license.html
#
from pyasn1.type import univ, char, tag
__all__ = ['ObjectDescriptor', 'GeneralizedTime', 'UTCTime']
NoValue = univ.NoValue
noValue = univ.noValue
class ObjectDescriptor(char.GraphicString):
__doc__ = char.GraphicString.__doc__
#: Default :py:class:`~pyasn1.type.tag.TagSet` object for |ASN.1| objects
tagSet = char.GraphicString.tagSet.tagImplicitly(
tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 7)
)
class GeneralizedTime
|
(char.VisibleString):
__doc__ = char.GraphicString.__doc__
#: Default :py:class:`~pyasn1.type.tag.TagSet` object for |ASN.1| objects
tagSet = char.VisibleString.ta
|
gSet.tagImplicitly(
tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 24)
)
class UTCTime(char.VisibleString):
__doc__ = char.GraphicString.__doc__
#: Default :py:class:`~pyasn1.type.tag.TagSet` object for |ASN.1| objects
tagSet = char.VisibleString.tagSet.tagImplicitly(
tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 23)
)
|
byakatat/selenium-training
|
test_going_through_all_admin_sections.py
|
Python
|
apache-2.0
| 1,705
| 0.00176
|
import pytest
from selenium import webdriver
from selenium.common.exceptions import NoSuchElementException
from selenium.webdriver.common.by import By
@pytest.fixture
def driver(request):
wd = webdriver.Chrome()
request.addfinalizer(wd.quit)
return wd
def is_element_present(driver, *args):
try:
driver.find_element(*args)
return True
except NoSuchElementException:
return False
def test_register_new_user(driver):
# Login to admin
driver.get("http://localhost/litecart/admin/")
driver.find_element_by_xpath("//input[@name='username']").send_keys("admin")
driver.find_element_by_xpath("//input[@name='password']").send_keys("admin")
driver.find_element_by_xpath("//button[@name='login']").click()
# Getting all links from main menu
all_main_links = driver.find_elements_by_css_se
|
lector("#app->a")
# Find length of list with links and starting a loop
for i in range(len(all_main_l
|
inks)):
# Every loop find list with links
all_main_links = driver.find_elements_by_css_selector("#app->a")
# Click on link
link = all_main_links[i]
link.click()
# Asserting presence of header
is_element_present(driver, By.CSS_SELECTOR, "#content>h1")
# Getting all links from submenu
submenu_list = driver.find_elements_by_css_selector("#app->ul>li>a")
if len(submenu_list) > 0:
for j in range(len(submenu_list)):
submenu_list = driver.find_elements_by_css_selector("#app->ul>li>a")
submenu = submenu_list[j]
submenu.click()
is_element_present(driver, By.CSS_SELECTOR, "#content>h1")
|
cmsc421/mobility_base_tools
|
scripts/urdf_remove_pedestal.py
|
Python
|
bsd-3-clause
| 3,158
| 0.0057
|
#!/usr/bin/env python
# Software License Agreement (BSD License)
#
# Copyright (c) 2014-2015, Dataspeed Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of Dataspeed Inc. nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import rospy
import re
match_link = "(.*)<link[^>]*name\s*=\s*\"pedestal\"[^>]*>.*?[^<]<\/link>(.*)"
match_joint = "(.*)<joint[^>]*name\s*=\s*\"pedestal_fixed\"[^>]*>.*?[^<]<\/joint>(.*)"
if __name__ == '__main__':
try:
rospy.init_node('urdf_remove_pedestal', anonymous=True)
param_src = rospy.get_param('~param_src', "/robot_description")
param_dest = rospy.get_param('~param_dest', "/robot_description_mod")
urdf = rospy.get_param(param_src, "")
changed = False
if urdf:
obj = re.match(match_link, urdf, re.S)
if obj:
urdf = obj.group(1) + obj.group(2)
changed = True
rospy.loginfo("Removed link 'pedestal'")
else:
rospy.logwarn("Failed to find link 'pedestal'")
obj = re.match(match_joint, urdf, re.S)
if obj:
urdf = obj.group(1) + obj.group(2)
changed = True
rospy.loginfo("Removed joint 'pedestal_fixed'")
else:
rospy.logwarn("Failed to find joint 'pedestal_f
|
ixed'")
rospy.set_param(param_dest, urdf)
if changed:
rospy.loginfo("Updated parameter '%s'", param_dest)
else:
|
rospy.loginfo("Copied parameter '%s' to '%s'", param_src, param_dest)
else:
rospy.logwarn("Parameter '%s' not found", param_src)
except rospy.ROSInterruptException: pass
|
AnishShah/tensorflow
|
tensorflow/tools/api/tests/api_compatibility_test.py
|
Python
|
apache-2.0
| 12,593
| 0.005559
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ==============================================================================
"""TensorFlow API compatibility tests.
This test ensures all changes to the public API of TensorFlow are intended.
If this test fails, it means a change has been made to the public API. Backwards
incompatible changes are not allowed. You can run the test with
"--update_goldens" flag set to "True" to update goldens when making changes to
the public TF python API.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os
import re
import sys
import unittest
import tensorflow as tf
from tensorflow._api import v2 as tf_v2
from google.protobuf import message
from google.protobuf import text_format
from tensorflow.python.lib.io import file_io
from tensorflow.python.platform import resource_loader
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging as logging
from tensorflow.tools.api.lib import api_objects_pb2
from tensorflow.tools.api.lib import python_object_to_proto_visitor
from tensorflow.tools.common import public_api
from tensorflow.tools.common import traverse
# FLAGS defined at the bottom:
FLAGS = None
# DEFINE_boolean, update_goldens, default False:
_UPDATE_GOLDENS_HELP = """
Update stored golden files if API is updated. WARNING: All API changes
have to be authorized by TensorFlow leads.
"""
# DEFINE_boolean, verbose_diffs, default True:
_VERBOSE_DIFFS_HELP = """
If set to true, print line by line diffs on all libraries. If set to
false, only print which libraries have differences.
"""
_API_GOLDEN_FOLDER_V1 = 'tensorflow/tools/api/golden/v1'
_API_GOLDEN_FOLDER_V2 = 'tensorflow/tools/api/golden/v2'
_TEST_README_FILE = 'tensorflow/tools/api/tests/README.txt'
_UPDATE_WARNING_FILE = 'tensorflow/tools/api/tests/API_UPDATE_WARNING.txt'
def _KeyToFilePath(key, api_version):
"""From a given key, construct a filepath.
Filepath will be inside golden folder for api_version.
"""
def _ReplaceCapsWithDash(matchobj):
match = matchobj.group(0)
return '-%s' % (match.lower())
case_insensitive_key = re.sub('([A-Z]{1})', _ReplaceCapsWithDash, key)
api_folder = (
_API_GOLDEN_FOLDER_V2 if api_version == 2 else _API_GOLDEN_FOLDER_V1)
return os.path.join(api_folder, '%s.pbtxt' % case_insensitive_key)
def _FileNameToKey(filename):
"""From a given filename, construct a key we use for api objects."""
def _ReplaceDashWithCaps(matchobj):
match = matchobj.group(0)
return match[1].upper()
base_filename = os.path.basename(filename)
base_filename_without_ext = os.path.splitext(base_filename)[0]
api_object_key = re.sub(
'((-[a-z]){1})', _ReplaceDashWithCaps, base_filename_without_ext)
return api_object_key
def _VerifyNoSubclassOfMessageVisitor(path, parent, unused_children):
"""A Visitor that crashes on subclasses of generated proto classes."""
# If the traversed object is a proto Message class
if not (isinstance(parent, type) and
issubclass(parent, message.Message)):
return
if parent is message.Message:
return
# Check that it is a direct subclass of Message.
if message.Message not in parent.__bases__:
raise NotImplementedError(
'Object tf.%s is a subclass of a generated proto Message. '
'They are not yet supported by the API tools.' % path)
class ApiCompatibilityTest(test.TestCase):
def __init__(self, *args, **kwargs):
super(ApiCompatibilityTest, self).__init__(*args, **kwargs)
golden_update_warning_filename = os.path.join(
resource_loader.get_root_dir_with_all_resources(),
_UPDATE_WARNING_FILE)
self._update_golden_warning = file_io.read_file_to_string(
golden_update_warning_filename)
test_readme_filename = os.path.join(
resource_loader.get_root_dir_with_all_resources(),
_TEST_README_FILE)
self._test_readme_message = file_io.read_file_to_string(
test_readme_filename)
def _AssertProtoDictEquals(self,
expected_dict,
actual_dict,
verbose=False,
update_goldens=False,
additional_missing_object_message='',
api_version=2):
"""Diff given dicts of protobufs and report differences a readable way.
Args:
expected_dict: a dict of TFAPIObject protos constructed
|
from golden
files.
actual_dict: a ict of TFAPIObject protos constructed by reading from the
TF package linked to the test.
verbose: Whether to log the full diffs, or simply report which files were
different.
|
update_goldens: Whether to update goldens when there are diffs found.
additional_missing_object_message: Message to print when a symbol is
missing.
api_version: TensorFlow API version to test.
"""
diffs = []
verbose_diffs = []
expected_keys = set(expected_dict.keys())
actual_keys = set(actual_dict.keys())
only_in_expected = expected_keys - actual_keys
only_in_actual = actual_keys - expected_keys
all_keys = expected_keys | actual_keys
# This will be populated below.
updated_keys = []
for key in all_keys:
diff_message = ''
verbose_diff_message = ''
# First check if the key is not found in one or the other.
if key in only_in_expected:
diff_message = 'Object %s expected but not found (removed). %s' % (
key, additional_missing_object_message)
verbose_diff_message = diff_message
elif key in only_in_actual:
diff_message = 'New object %s found (added).' % key
verbose_diff_message = diff_message
else:
# Do not truncate diff
self.maxDiffs = None # pylint: disable=invalid-name
# Now we can run an actual proto diff.
try:
self.assertProtoEquals(expected_dict[key], actual_dict[key])
except AssertionError as e:
updated_keys.append(key)
diff_message = 'Change detected in python object: %s.' % key
verbose_diff_message = str(e)
# All difference cases covered above. If any difference found, add to the
# list.
if diff_message:
diffs.append(diff_message)
verbose_diffs.append(verbose_diff_message)
# If diffs are found, handle them based on flags.
if diffs:
diff_count = len(diffs)
logging.error(self._test_readme_message)
logging.error('%d differences found between API and golden.', diff_count)
messages = verbose_diffs if verbose else diffs
for i in range(diff_count):
print('Issue %d\t: %s' % (i + 1, messages[i]), file=sys.stderr)
if update_goldens:
# Write files if requested.
logging.warning(self._update_golden_warning)
# If the keys are only in expected, some objects are deleted.
# Remove files.
for key in only_in_expected:
filepath = _KeyToFilePath(key, api_version)
file_io.delete_file(filepath)
# If the files are only in actual (current library), these are new
# modules. Write them to files. Also record all updates in files.
for key in only_in_actual | set(updated_keys):
filepath = _KeyToFilePath(key, api_version)
file_io.write_string_to_file(
filepath, text_format.MessageToString(actual_dict[key]))
else:
# Fail if we cannot fix the te
|
tomka/CATMAID
|
django/applications/catmaid/tests/apis/test_stacks.py
|
Python
|
gpl-3.0
| 2,238
| 0.001787
|
# -*- coding: utf-8 -*-
import json
from .common import CatmaidApiTestCase
class StacksApiTests(CatmaidApiTestCase):
def test_stack_info(self):
self
|
.fake_authentication()
test_stack_id = 3
response = self.client.get('/%d/stack/%d/info' % (self.test_project_id, test_stack_id))
self.assertStatus(response)
parsed_response = json.loads(response.content.decode('utf-8'))
expected_result = {
"attribution": None,
"broken_slices": {},
"canary_location": {
"
|
x": 0,
"y": 0,
"z": 0
},
"dimension": {
"x": 2048,
"y": 1536,
"z": 460
},
"comment": '<p>©2009 <a href="http://people.epfl.ch/graham.knott">Graham '
'Knott</a>.</p> <p>Public INCF data set available at the <a '
'href="http://www.incf.org/about/nodes/switzerland/data">Swiss '
'INCF Node</a>.</p>',
"description": "",
"metadata": None,
"num_zoom_levels": -1,
"orientation": 0,
"mirrors": [{
"id": 3,
"title": "",
"image_base": "http://incf.ini.uzh.ch/image-stack-fib/",
"file_extension": "jpg",
"tile_height": 256,
"tile_source_type": 1,
"tile_width": 256,
"position": 0
}],
"pid": self.test_project_id,
"ptitle": "Focussed Ion Beam (FIB)",
"placeholder_color": {
"a": 1.0,
"b": 0.0,
"g": 0.0,
"r": 0.0
},
"resolution": {
"x": 5.0,
"y": 5.0,
"z": 9.0
},
"sid": test_stack_id,
"stitle": "Focussed Ion Beam (FIB) stack of Rat Striatum\t",
"translation": {
"x": 0.0,
"y": 0.0,
"z": 0.0
},
"downsample_factors": None,
}
self.assertEqual(expected_result, parsed_response)
|
mrquim/mrquimrepo
|
script.module.myconnpy/lib/examples/engines.py
|
Python
|
gpl-2.0
| 1,881
| 0.002127
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# MySQL Connector/Python - MySQL driver written in Python.
# Copyright (c) 2009, 2013, Oracle and/or its affiliates. All rights reserved.
# MySQL Connector/Python is licensed under the terms of the GPLv2
# <http://www.gnu.org/licenses/old-licenses/gpl-2.0.html>, like most
# MySQL Connectors. There are special exceptions to the terms and
# conditions of the GPLv2 as it is applied to this software, see the
# FOSS License Exception
# <http://www.mysql.com/about/legal/licensing/foss-exception.html>.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
from __future__ import print_function
import sys, os
import mysql.connector
"""
Example using MySQL Connector/Python showing:
* that show engines works..
"""
def main(config):
output = []
db = mysql.connector.Connect(**config)
cursor = db.cursor()
# Select it again and show it
stmt_select = "SHOW ENGINES"
cursor.execute(stmt_select)
rows = cursor.fetchall()
for row in rows:
output.append(repr(row))
db.close()
return output
if __name__ == '__main__':
#
# Configure MySQL login and database to use in config.py
#
from config im
|
port Config
config = Config.dbinfo().copy()
|
out = main(config)
print('\n'.join(out))
|
geolinkedata/geolod-api
|
api/tests/test_triplestores.py
|
Python
|
gpl-2.0
| 1,415
| 0.001413
|
from rest_framewor
|
k.test import APITestCase
from rest_framework.test import APIRequestFactory, APIClient, \
force_authenticate
from api.views import TripleStoreList
from api.tests.common import setup_user
class TestTripleStoresView(APITestCase):
def setUp(self):
self.factory = APIRequ
|
estFactory()
self.client = APIClient()
self.user = setup_user()
self.view = TripleStoreList.as_view()
self.uri = '/v1/geo/data/triple-stores/'
def test_not_authenticated_uri(self):
"""
not authorized access not allowed
"""
request = self.factory.get(self.uri)
response = self.view(request)
response.render()
self.assertEqual(response.status_code, 401,
'Expected Response Code 401, received {0} instead.'
.format(response.status_code))
def test_authenticated_uri(self):
"""
ensure that uri is authorized access only
"""
request = self.factory.get(self.uri)
force_authenticate(request, self.user)
response = self.view(request)
response.render()
self.assertEqual(response.status_code, 200,
'Expected Response Code 200, received {0} instead.'
.format(response.status_code))
def test_post(self):
"""
testing upload a triple store file
"""
pass
|
samuelcolvin/pydantic
|
tests/test_networks_ipaddress.py
|
Python
|
mit
| 17,339
| 0.002653
|
from ipaddress import IPv4Address, IPv4Interface, IPv4Network, IPv6Address, IPv6Interface, IPv6Network
import pytest
from pydantic import BaseModel, IPvAnyAddress, IPvAnyInterface, IPvAnyNetwork, ValidationError
#
# ipaddress.IPv4Address
# ipaddress.IPv6Address
# pydantic.IPvAnyAddress
#
@pytest.mark.parametrize(
'value,cls',
[
('0.0.0.0', IPv4Address),
('1.1.1.1', IPv4Address),
('10.10.10.10', IPv4Address),
('192.168.0.1', IPv4Address),
('255.255.255.255', IPv4Address),
('::1:0:1', IPv6Address),
('ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff', IPv6Address),
(b'\x00\x00\x00\x00', IPv4Address),
(b'\x01\x01\x01\x01', IPv4Address),
(b'\n\n\n\n', IPv4Address),
(b'\xc0\xa8\x00\x01', IPv4Address),
(b'\xff\xff\xff\xff', IPv4Address),
(b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x01', IPv6Address),
(b'\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff', IPv6Address),
(0, IPv4Address),
(16_843_009, IPv4Address),
(168_430_090, IPv4Address),
(3_232_235_521, IPv4Address),
(4_294_967_295, IPv4Address),
(4_294_967_297, IPv6Address),
(340_282_366_920_938_463_463_374_607_431_768_211_455, IPv6Address),
(IPv4Address('192.168.0.1'), IPv4Address),
(IPv6Address('::1:0:1'), IPv6Address),
],
)
def test_ipaddress_success(value, cls):
class Model(BaseModel):
ip: IPvAnyAddress
assert Model(ip=value).ip == cls(value)
@pytest.mark.parametrize(
'value',
[
'0.0.0.0',
'1.1.1.1',
'10.10.10.10',
'192.168.0.1',
'255.255.255.255',
b'\x00\x00\x00\x00',
b'\x01\x01\x01\x01',
b'\n\n\n\n',
b'\xc0\xa8\x00\x01',
b'\xff\xff\xff\xff',
0,
16_843_009,
168_430_090,
3_232_235_521,
4_294_967_295,
IPv4Address('0.0.0.0'),
IPv4Address('1.1.1.1'),
IPv4Address('10.10.10.10'),
IPv4Address('192.168.0.1'),
IPv4Address('255.255.255.255'),
],
)
def test_ipv4address_success(value):
class Model(BaseModel):
ipv4: IPv4Address
assert Model(ipv4=value).ipv4 == IPv4Address(value)
@pytest.mark.parametrize(
'value',
[
'::1:0:1',
'ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff',
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x01',
b'\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff',
4_294_967_297,
340_282_366_920_938_463_463_374_607_431_768_211_455,
IPv6Address('::1:0:1'),
IPv6Address('ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff'),
],
)
def test_ipv6address_success(value):
class Model(BaseModel):
ipv6: IPv6Address
assert Model(ipv6=value).ipv6 == IPv6Address(value)
@pytest.mark.parametrize(
'value,errors',
[
(
'hello,world',
[{'loc': ('ip',), 'msg': 'value is not a valid IPv4 or IPv6 address', 'type': 'value_error.ipvanyaddress'}],
),
(
'192.168.0.1.1.1',
[{'loc': ('ip',), 'msg': 'value is not a valid IPv4 or IPv6 address', 'type': 'value_error.ipvanyaddress'}],
),
(
-1,
[{'loc': ('ip',), 'msg': 'value is not a valid IPv4 or IPv6 address', 'type': 'value_error.ipvanyaddress'}],
),
(
2 ** 128 + 1,
[{'loc': ('ip',), 'msg': 'value is not a valid IPv4 or IPv6 address', 'type': 'value_error.ipvanyaddress'}],
),
],
)
def test_ipaddress_fails(value, errors):
class Model(BaseModel):
ip: IPvAnyAddress
with pytest.raises(ValidationError) as exc_info:
Model(ip=value)
assert exc_info.value.errors() == errors
@pytest.mark.parametrize(
'value,errors',
[
(
'hello,world',
[{'loc': ('ipv4',), 'msg': 'value is not a valid IPv4 address', 'type': 'value_error.ipv4address'}],
),
(
'192.168.0.1.1.1',
[{'loc': ('ipv4',), 'msg': 'value is not a valid IPv4
|
address', 'type': 'value_error.ipv4address'}],
),
(-1, [{'loc': ('ipv4',), 'msg': 'value is
|
not a valid IPv4 address', 'type': 'value_error.ipv4address'}]),
(
2 ** 32 + 1,
[{'loc': ('ipv4',), 'msg': 'value is not a valid IPv4 address', 'type': 'value_error.ipv4address'}],
),
(
IPv6Address('::0:1:0'),
[{'loc': ('ipv4',), 'msg': 'value is not a valid IPv4 address', 'type': 'value_error.ipv4address'}],
),
],
)
def test_ipv4address_fails(value, errors):
class Model(BaseModel):
ipv4: IPv4Address
with pytest.raises(ValidationError) as exc_info:
Model(ipv4=value)
assert exc_info.value.errors() == errors
@pytest.mark.parametrize(
'value,errors',
[
(
'hello,world',
[{'loc': ('ipv6',), 'msg': 'value is not a valid IPv6 address', 'type': 'value_error.ipv6address'}],
),
(
'192.168.0.1.1.1',
[{'loc': ('ipv6',), 'msg': 'value is not a valid IPv6 address', 'type': 'value_error.ipv6address'}],
),
(-1, [{'loc': ('ipv6',), 'msg': 'value is not a valid IPv6 address', 'type': 'value_error.ipv6address'}]),
(
2 ** 128 + 1,
[{'loc': ('ipv6',), 'msg': 'value is not a valid IPv6 address', 'type': 'value_error.ipv6address'}],
),
(
IPv4Address('192.168.0.1'),
[{'loc': ('ipv6',), 'msg': 'value is not a valid IPv6 address', 'type': 'value_error.ipv6address'}],
),
],
)
def test_ipv6address_fails(value, errors):
class Model(BaseModel):
ipv6: IPv6Address
with pytest.raises(ValidationError) as exc_info:
Model(ipv6=value)
assert exc_info.value.errors() == errors
#
# ipaddress.IPv4Network
# ipaddress.IPv6Network
# pydantic.IPvAnyNetwork
#
@pytest.mark.parametrize(
'value,cls',
[
('192.168.0.0/24', IPv4Network),
('192.168.128.0/30', IPv4Network),
('2001:db00::0/120', IPv6Network),
(2 ** 32 - 1, IPv4Network), # no mask equals to mask /32
(20_282_409_603_651_670_423_947_251_286_015, IPv6Network), # /128
(b'\xff\xff\xff\xff', IPv4Network), # /32
(b'\x00\x00\x00\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff', IPv6Network),
(('192.168.0.0', 24), IPv4Network),
(('2001:db00::0', 120), IPv6Network),
(IPv4Network('192.168.0.0/24'), IPv4Network),
],
)
def test_ipnetwork_success(value, cls):
class Model(BaseModel):
ip: IPvAnyNetwork = None
assert Model(ip=value).ip == cls(value)
@pytest.mark.parametrize(
'value,cls',
[
('192.168.0.0/24', IPv4Network),
('192.168.128.0/30', IPv4Network),
(2 ** 32 - 1, IPv4Network), # no mask equals to mask /32
(b'\xff\xff\xff\xff', IPv4Network), # /32
(('192.168.0.0', 24), IPv4Network),
(IPv4Network('192.168.0.0/24'), IPv4Network),
],
)
def test_ip_v4_network_success(value, cls):
class Model(BaseModel):
ip: IPv4Network = None
assert Model(ip=value).ip == cls(value)
@pytest.mark.parametrize(
'value,cls',
[
('2001:db00::0/120', IPv6Network),
(20_282_409_603_651_670_423_947_251_286_015, IPv6Network), # /128
(b'\x00\x00\x00\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff', IPv6Network),
(('2001:db00::0', 120), IPv6Network),
(IPv6Network('2001:db00::0/120'), IPv6Network),
],
)
def test_ip_v6_network_success(value, cls):
class Model(BaseModel):
ip: IPv6Network = None
assert Model(ip=value).ip == cls(value)
@pytest.mark.parametrize(
'value,errors',
[
(
'hello,world',
[{'loc': ('ip',), 'msg': 'value is not a valid IPv4 or IPv6 network', 'type': 'value_error.ipvanynetwork'}],
),
(
'192.168.0.1.1.1/24',
[{'loc': ('ip',), 'msg': 'value is not a valid IPv4 or IPv6 network', 'type':
|
philba/myblog
|
blog/migrations/0008_blogpage_categories.py
|
Python
|
mit
| 513
| 0.001949
|
# -*- coding:
|
utf-8 -*-
# Generated by Django 1.10.5 on 2017-03-10 18:08
from __future__ import unicode_literals
from django.db import migrations
import modelcluster.fields
class Migration(migrations.Migration):
de
|
pendencies = [
('blog', '0007_blogcategory'),
]
operations = [
migrations.AddField(
model_name='blogpage',
name='categories',
field=modelcluster.fields.ParentalManyToManyField(blank=True, to='blog.BlogCategory'),
),
]
|
ohsu-qin/qirest
|
qirest/test/unit/test_seed.py
|
Python
|
bsd-2-clause
| 19,873
| 0.002063
|
from nose.tools import (assert_is_none, assert_is_instance, assert_in,
assert_is_not_none, assert_true, assert_false,
assert_equal)
from datetime import datetime
from mongoengine import connect
from qirest_client.model.subject import Subject
from qirest_client.model.uom import Weight
from qirest_client.model.clinical import (Biopsy, Surgery, Drug)
from qirest.test.helpers import seed
MODELING_RESULT_PARAMS = ['fxl_k_trans', 'fxr_k_trans', 'delta_k_trans', 'v_e', 'tau_i']
"""The test seed modeling result parameters."""
class TestSeed(object):
"""
This TestSeed class tests the seed helper utility.
Note: this test drops the ``qiprofile-test`` Mongo database
at the beginning and end of execution.
"""
def setup(self):
self._connection = connect(db='qiprofile_test')
self._connection.drop_database('qiprofile_test')
self._subjects = seed.seed()
|
def tearDown(self):
self._connection.drop_database('qiprofile_test')
def test_serialization(self):
for saved_sbj in self._subjects:
query = dict(project=saved_sbj.project,
collection=saved_sbj.collection,
number=saved_sbj.number)
fetched_sbj = Subject.objects.get(**query)
self._validate_subject(fe
|
tched_sbj)
SESSION_CNT = dict(
Breast=4,
Sarcoma=3
)
def test_reseed(self):
subjects = seed.seed()
expected = set(str(sbj) for sbj in self._subjects)
actual = set(str(sbj) for sbj in subjects)
assert_equal(actual, expected, "Reseed result is incorrect -"
"\nexpected:\n%s\nfound:\n%s" %
(expected, actual))
def _validate_subject(self, subject):
collections = ((coll.name for coll in seed.COLLECTION_BUILDERS))
assert_in(subject.collection, collections,
"Collection is invalid: %s" % subject.collection)
self._validate_demographics(subject)
self._validate_clincal_data(subject)
self._validate_sessions(subject)
def _validate_demographics(self, subject):
assert_is_not_none(subject.gender, "%s is missing gender" % subject)
def _validate_clincal_data(self, subject):
# There are three treatments.
self._validate_treatments(subject)
# Validate the clinical encounters.
self._validate_clinical_encounters(subject)
def _validate_treatments(self, subject):
# There are three treatments.
treatments = subject.treatments
assert_is_not_none(treatments, "%s has no treatments" % subject)
assert_equal(len(treatments), 3,
"%s Subject %d treatments count is incorrect: %d" %
(subject.collection, subject.number, len(treatments)))
# Breast has neoadjuvant drugs.
if subject.collection == 'Breast':
self._validate_breast_treatments(subject, treatments)
def _validate_breast_treatments(self, subject, treatments):
# Breast has neoadjuvant drugs.
neo_rx = next(((trt for trt in treatments if trt.treatment_type == 'Neoadjuvant')),
None)
assert_is_not_none(neo_rx, ("%s Subject %d is missing a neodjuvant" +
" treatment") % (subject.collection, subject.number))
dosages = neo_rx.dosages
assert_equal(len(dosages), 2,
(("%s session %d neoadjuvant treatment dosage count is" +
" incorrect: %d") % (subject.collection, subject.number, len(dosages))))
# Validate the agent type and dosage unit.
for dosage in dosages:
agent = dosage.agent
assert_is_instance(agent, Drug,
"%s Subject %d neoadjuvant agent is not a drug" %
(subject.collection, subject.number))
amount = dosage.amount
assert_is_not_none(amount, ("%s Subject %d is missing a neodjuvant drug" +
" dosage amount") % (subject.collection, subject.number))
def _validate_clinical_encounters(self, subject):
# There are two clinical encounters.
cln_encs = list(subject.clinical_encounters)
assert_is_not_none(cln_encs, "%s has no encounters" % subject)
assert_equal(len(cln_encs), 2,
"%s Subject %d encounter count is incorrect: %d" %
(subject.collection, subject.number, len(cln_encs)))
# Each encounter has a subject weight.
for enc in cln_encs:
assert_is_not_none(enc.weight, "%s encounter %s is missing the"
" subject weight" % (subject, enc))
assert_is_instance(enc.weight, int,
"%s encounter %s weight type is incorrect: %s" %
(subject, enc, enc.weight.__class__))
# There is a biopsy with a pathology report.
biopsy = next((enc for enc in cln_encs if isinstance(enc, Biopsy)),
None)
assert_is_not_none(biopsy, "%s Subject %d is missing a biopsy" %
(subject.collection, subject.number))
self._validate_pathology(subject, biopsy.pathology)
# Breast pre-neoadjuvant biopsy does not have a RCB.
if subject.collection == 'Breast':
tumor_pathology = biopsy.pathology.tumors[0]
assert_is_none(tumor_pathology.rcb,
"%s biopsy pathology report incorrectly has a RCB"
" status" % subject)
# There is a surgery with a pathology report.
surgery = next((enc for enc in cln_encs if isinstance(enc, Surgery)),
None)
assert_is_not_none(surgery, "%s Subject %d is missing a surgery" %
(subject.collection, subject.number))
assert_is_not_none(surgery.pathology,
"%s surgery is missing a pathology report" % subject)
self._validate_pathology(subject, surgery.pathology)
# Surgery has a RCB.
if subject.collection == 'Breast':
tumor_pathology = surgery.pathology.tumors[0]
assert_is_not_none(tumor_pathology.rcb,
"%s surgery pathology report is missing a"
" RCB status" % subject)
def _validate_pathology(self, subject, pathology_report):
assert_is_not_none(pathology_report, "%s is missing a pathology"
" report" % subject)
assert_false(len(pathology_report.tumors) == 0,
"%s has no pathology tumor report")
for tumor_pathology in pathology_report.tumors:
self._validate_tnm(subject, tumor_pathology.tnm)
# The tumor-specific tests.
if subject.collection == 'Breast':
self._validate_breast_pathology(subject, tumor_pathology)
elif subject.collection == 'Sarcoma':
self._validate_sarcoma_pathology(subject, tumor_pathology)
def _validate_tnm(self, subject, tnm):
assert_is_not_none(tnm, "%s is missing a TNM" % subject)
assert_is_not_none(tnm.tumor_type,
"%s TNM is missing the tumor type" % subject)
assert_is_not_none(tnm.grade,
"%s TNM is missing the grade" % subject)
assert_is_not_none(tnm.size,
"%s TNM is missing the composite size object" %
subject)
assert_is_not_none(tnm.size.tumor_size,
"%s TNM is missing the size score" % subject)
assert_is_not_none(tnm.lymph_status,
"%s TNM is missing the lymph status" % subject)
assert_is_not_none(tnm.lymphatic_vessel_invasion,
"%s TNM is missing the lymphati vessel invasion"
% subject)
|
theopak/glassface
|
server/manage.py
|
Python
|
mit
| 252
| 0
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.
|
environ.setdefault("DJANGO_SETTINGS_MODULE", "glassface.settings")
from django.core.management import execute_from_command_line
execu
|
te_from_command_line(sys.argv)
|
mauricioabreu/speakerfight
|
deck/migrations/0014_event_anonymous_voting.py
|
Python
|
mit
| 485
| 0.002062
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Mig
|
ration(migrations.Migration):
dependencies = [
('deck', '0013_proposal_more_information'),
]
operations = [
migrations.AddField(
model_name='event',
name='anonymous_voting',
field=models.BooleanField(default=False, verbose_name='Anonymous Voting?'),
|
preserve_default=True,
),
]
|
daknuett/BeeKeeper
|
pythons/objs_graphics.py
|
Python
|
agpl-3.0
| 1,259
| 0.039714
|
from gi.repository import Gtk
import os
class ExportDialog(Gtk.Dialog):
def __init__(self,parent,*args):
Gtk.Dialog.__init__(self, "Exportieren", parent, 0,
(Gtk.STOCK_CANCEL, Gtk.ResponseType.CANCEL,
Gtk.STOCK_OK, Gtk.ResponseType.OK))
self.set_default_size(150,150)
self.contentarea=self.get_content_area()
self.selection_type=""
self.selection_folder=""
self.combo_store=Gtk.ListStore(str)
self.combo_store.append(["CSV"])
self.combo_store.append(["XML"])
self.combo=Gtk.ComboBox.new_with_mode
|
l_and_entry(self.combo_store)
self.combo.connect("changed",self.update_select_type)
self.combo.set_entry_text_column(0)
self.contentarea.add(self.combo)
self.filechooser = Gtk.FileChooserButton(Gtk.FileChooserAction.CREATE_FOLDER)
self.filechooser.set_create_folders(True)
self.filechooser.set_action(Gtk.FileChooserAction.SELECT_FOLDER)
self.filechooser.connect("file-set",self.update_select_folder)
self.contentarea.add(self.filechooser)
self.show_all()
|
def update_select_type(self,combo,*args):
treit=combo.get_active_iter()
if(treit == None):
return
self.selection_type=combo.get_model()[treit][0]
return
def update_select_folder(self,chooser,*args):
self.selection_folder=chooser.get_filename()
|
jkonecny12/blivet
|
blivet/tasks/fsmkfs.py
|
Python
|
lgpl-2.1
| 10,426
| 0.000575
|
# fsmkfs.py
# Filesystem formatting classes.
#
# Copyright (C) 2015 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
# Red Hat Author(s): Anne Mulhern <amulhern@redhat.com>
import abc
import shlex
from six import add_metaclass
from ..errors import FSError, FSWriteLabelError, FSWriteUUIDError
from .. import util
from . import availability
from . import fstask
from . import task
@add_metaclass(abc.ABCMeta)
class FSMkfsTask(fstask.FSTask):
can_label = abc.abstractproperty(doc="whether this task labels")
can_set_uuid = abc.abstractproperty(doc="whether this task can set UUID")
can_nodiscard = abc.abstractproperty(doc="whether this task can set nodiscard option")
@add_metaclass(abc.ABCMeta)
class FSMkfs(task.BasicApplication, FSMkfsTask):
"""An abstract class that represents filesystem creation actions. """
description = "mkfs"
label_option = abc.abstractproperty(
doc="Option for setting a filesystem label.")
nodiscard_option = abc.abstractproperty(
doc="Option for setting nodiscrad option for mkfs.")
args = abc.abstractproperty(doc="options for creating filesystem")
@abc.abstractmethod
def get_uuid_args(self, uuid):
"""Return a list of arguments for setting a filesystem UUID.
:param uuid: the UUID to set
:type uuid: str
:rtype: list of str
"""
raise NotImplementedError
# IMPLEMENTATION methods
@property
def can_label(self):
""" Whether this task can label the filesystem.
:returns: True if this task can label the filesystem
:rtype: bool
"""
return self.label_option is not None
@property
def can_set_uuid(self):
"""Whether this task can set the UUID of a filesystem.
:returns: True if UUID can be set
:rtype: bool
"""
return self.get_uuid_args is not None
@property
def can_nodiscard(self):
"""Whether this task can set nodiscard option for a filesystem.
:returns: True if nodiscard can be set
:rtype: bool
"""
return self.nodiscard_option is not None
@property
def _label_options(self):
""" Any labeling options that a particular filesystem may use.
:returns: labeling options
:rtype: list of str
"""
# Do not know how to set label while formatting.
if self.label_option is None:
return []
# No label to set
if self.fs.label is None:
return []
if self.fs.label_format_ok(self.fs.label):
return [self.label_option, self.fs.label]
else:
raise FSWriteLabelError("Choosing not to apply label (%s) during creation of filesystem %s. Label format is unacceptable for this filesystem." % (self.fs.label, self.fs.type))
@property
def _nodiscard_option(self):
""" Any nodiscard options that a particular filesystem may use.
:returns: nodiscard options
:rtype: list of str
"""
# Do not know how to set nodiscard while formatting.
if self.nodiscard_option is None:
return []
# nodiscard option not requested
if not self.fs._mkfs_nodiscard:
return []
return self.nodiscard_option
@property
def _uuid_options(self):
"""Any UUID options that a particular f
|
ilesystem may use.
:returns: UUID options
:rtype: list of str
:raises: FSWriteUUIDError
"""
if self.get_uuid_args is None or self.fs.uuid is None:
return []
if self.fs.uuid_for
|
mat_ok(self.fs.uuid):
return self.get_uuid_args(self.fs.uuid)
else:
raise FSWriteUUIDError("Choosing not to apply UUID (%s) during"
" creation of filesystem %s. UUID format"
" is unacceptable for this filesystem."
% (self.fs.uuid, self.fs.type))
def _format_options(self, options=None, label=False, set_uuid=False, nodiscard=False):
"""Get a list of format options to be used when creating the
filesystem.
:param options: any special options
:type options: list of str or None
:param bool label: if True, label if possible, default is False
:param bool set_uuid: whether set UUID if possible, default is False
"""
options = options or []
if not isinstance(options, list):
raise FSError("options parameter must be a list.")
label_options = self._label_options if label else []
uuid_options = self._uuid_options if set_uuid else []
nodiscard_option = self._nodiscard_option if nodiscard else []
create_options = shlex.split(self.fs.create_options or "")
return (options + self.args + label_options + uuid_options +
nodiscard_option + create_options + [self.fs.device])
def _mkfs_command(self, options, label, set_uuid, nodiscard):
"""Return the command to make the filesystem.
:param options: any special options
:type options: list of str or None
:param label: whether to set a label
:type label: bool
:param set_uuid: whether to set an UUID
:type set_uuid: bool
:param nodiscard: whether to run mkfs with nodiscard option
:type nodiscard: bool
:returns: the mkfs command
:rtype: list of str
"""
return [str(self.ext)] + self._format_options(options, label, set_uuid, nodiscard)
def do_task(self, options=None, label=False, set_uuid=False, nodiscard=False):
"""Create the format on the device and label if possible and desired.
:param options: any special options, may be None
:type options: list of str or NoneType
:param bool label: whether to label while creating, default is False
:param bool set_uuid: whether to set an UUID while creating, default
is False
"""
# pylint: disable=arguments-differ
error_msgs = self.availability_errors
if error_msgs:
raise FSError("\n".join(error_msgs))
options = options or []
cmd = self._mkfs_command(options, label, set_uuid, nodiscard)
try:
ret = util.run_program(cmd)
except OSError as e:
raise FSError(e)
if ret:
raise FSError("format failed: %s" % ret)
class BTRFSMkfs(FSMkfs):
ext = availability.MKFS_BTRFS_APP
label_option = None
nodiscard_option = ["--nodiscard"]
def get_uuid_args(self, uuid):
return ["-U", uuid]
@property
def args(self):
return []
class Ext2FSMkfs(FSMkfs):
ext = availability.MKE2FS_APP
label_option = "-L"
nodiscard_option = ["-E", "nodiscard"]
_opts = []
def get_uuid_args(self, uuid):
return ["-U", uuid]
@property
def args(self):
return self._opts + (["-T", self.fs.fsprofile] if self.fs.fsprofile else [])
class Ext3FSMkfs(Ext2FSMkfs):
_opts = ["-t", "ext3"]
class Ext4FSMkfs(E
|
git-albertomarin/pinger
|
report/parser/writer.py
|
Python
|
mit
| 332
| 0.006024
|
from database.db import Database
from report.db.models import PingerReport
class Writer:
def __init__(self):
self.db = D
|
atabase()
de
|
f report_insert(self, item):
"""
Save Pinger Reports to the database
:param item:
"""
self.db.get_or_create(PingerReport, **item)
|
FlorisTurkenburg/ManyMan
|
b.L_frontend/widgets.py
|
Python
|
gpl-2.0
| 7,562
| 0.000264
|
"""
ManyMan - A Many-core Visualization and Management System
Copyright (C) 2015
University of Amsterdam - Computer Systems Architecture
Jimi van der Woning and Roy Bakker
Extended for big.LITTLE by: Floris Turkenburg
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
from kivy.config import Config
from kivy.graphics import Color, Rectangle
from kivy.logger import Logger
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.button import Button
from kivy.uix.image import Image
from kivy.uix.label import Label
from kivy.uix.textinput import TextInput
from kivy.uix.vkeyboard import VKeyboard
from kivy.uix.widget import Widget
class ImageButton(Button):
"""Button consisting of an image instead of a label."""
def __init__(self, image_url, **kwargs):
if 'text' in kwargs:
del kwargs['text']
kwargs.update({'color': [1., 0., 0., 1.]})
self.image_url = image_url
self.layout = None
self._image = None
super(ImageButton, self).__init__(**kwargs)
self.build()
def build(self):
"""Render the button."""
self.layout = BoxLayout(spacing=5, padding=5)
self._image = Image(
source=self.image_url,
color=(.8, .8, .8, 1),
size_hint_y=None,
height=40
)
self.layout.add_widget(self._image)
self.add_widget(self.layout)
self.bind(pos=self.update_graphics_pos, size=self.update_graphics_size)
def update_graphics_pos(self, instance, value):
"""Handler when the button moves. Move its contents along."""
self.layout.pos = value
def update_graphics_size(self, instance, value):
"""Handler when the button resizes. Resize its contents along."""
self.layout.size = value
def get_image(self):
"""Getter for the button's image."""
return self._image.source
def set_image(self, value):
"""Setter for the button's image."""
self._image.source = value
# Define getters and setters
image = property(get_image, set_image)
class IconButton(Button):
"""Button with an icon on the left."""
def __init__(self, icon_url, **kwargs):
self.text = kwargs.get('text', '')
if self.text:
del kwargs['text']
self.icon_url = icon_url
self.layout = None
self.icon = None
super(IconButton, self).__init__(**kwargs)
self.build()
def build(self):
"""Render the button."""
self.layout = BoxLayout(spacing=5, padding=5)
self.icon = Image(
source=self.icon_url,
color=(.8, .8, .8, 1),
size_hint=(None, None),
size=(40, 40)
)
self.layout.add_widget(self.icon)
self.label = Label(
text=self.text,
size_hint_y=None,
height=40
)
self.layout.add_widget(self.label)
self.add_widget(self.layout)
self.bind(pos=self.update_graphics_pos, size=self.update_graphics_size)
def update_graphics_pos(self, instance, value):
"""Handler when the button moves. Move its contents along."""
self.layout.pos = value
def update_graphics_size(self, instance, value):
"""Handler when the button resizes. Resize its contents along."""
self.layout.size = value
def get_text(self):
"""Getter for the button's text."""
return self.label.text
def set_text(self, value):
"""Setter for the button's text."""
self.label.text = value
def get_image(self):
"""Getter for the button's image."""
return self.icon.source
def set_image(self, value):
"""Setter for the button's image."""
self.icon.source = value
# Define getters and setters
txt = property(get_text, set_text)
image = property(get_image, set_image)
class MyVKeyboard(VKeyboard):
"""
Extended virtual keyboard class of which the keyboards folder can be
changed.
"""
def __init__(self, **kwargs):
self.layout_path = Config.get('settings', 'keyboards_folder')
super(MyVKeyboard, self).__init__(**kwargs)
class MyTextInput(TextInput):
"""
Extended text input class which allows for automatic resize and readonly
text fields.
NOTE: This class is no longer needed as kivy-1.3.0 added the readonly
property. Also as of version 1.9.0, this class crashes, as FocusBehaviour
has been changed.
"""
def __init__(self, **kwargs):
self.readonly = kwargs.get('readonly', False)
self.auto_resize = kwargs.get('auto_resize', False)
super(MyTextInput, self).__init__(**kwargs)
def insert_text(self, substring):
"""Insert the given substring when not readonly."""
if self.readonly:
return
super(MyTextInput, self).insert_text(substring)
def do_backspace(self):
"""Insert a backspace when not readonly."""
if self.readonly:
return
super(MyTextInput, self).do_backspace()
def delete_selection(self):
"""Delete the selection when not readonly."""
if self.readonly:
return
super(MyTextInput, self).delete_selection()
def on_touch_down(self, touch):
"""Handle the touch events when not readonly."""
if self.readonly:
return False
super(MyTextInput, self).on_touch_down(touch)
def on_focus(self, instance, value, *largs):
"""Handle the focus events when not readonly."""
if self.readonly:
return
super(MyTextInput, self).on_focus(instance, value, *largs)
def append_text(self, text):
"""Append given text to the textfield."""
self.set_text(self.text + text)
def set_text(self, text):
"""Set the contents of the textfield to the given text."""
Logger.debug("MyTextInput: Setting text to %s" % text)
self.text = text
if self.auto_resize:
# Resize the textfield when needed
self.height = len(self._lines) * (self.line_height +
self._line_spacing) + self.padding_y * 2
class WRectangle(Widget):
"""Widget version of the Rectangle graphic."""
def __init__(self, **kwargs):
self.color = kwargs.get('color', [1., 1., 1., 1.])
|
super(WRectangle, self).__init__(**kwargs)
self.build()
def build(self):
"""Render the
|
rectangle."""
with self.canvas:
self.c = Color()
self.c.rgba = self.color
self.r = Rectangle(pos=self.pos, size=self.size)
self.bind(pos=self.update_pos, size=self.update_size)
def update_pos(self, instance, value):
"""Handler when the rectangle's position changes."""
self.r.pos = value
def update_size(self, instance, value):
"""Handler when the rectangle's size changes."""
self.r.size = value
|
haroldo-ok/really-old-stuff
|
mastersystem/zxb-sms-2012-02-23/zxb-sms/wip/zxb2wla.py
|
Python
|
apache-2.0
| 11,822
| 0.015564
|
from version import VERSION
import sys, os, getpass
import re
import optparse
def coalesce( *values ):
for f in values:
if f is not None:
return f
class ConversionError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
###
WLA_INCLUDE_TEMPLATE = """
.section "ZX Basic code" free
__START_PROGRAM:
{code}
ret
.ends
.section "ZXB variable init values" free
ZXBASIC_USER_DATA_VALUES:
{var_inits}
ZXBASIC_USER_DATA_VALUES_END:
.ends
.ramsection "ZXBasic Variables" slot 3
ZXBASIC_USER_DATA ds 0
{vars}
ZXBASIC_USER_DATA_END ds 0
.ends
"""
###
WLA_MAIN_TEMPLATE = """
;==============================================================
; WLA-DX banking setup
;==============================================================
.memorymap
defaultslot 0
; rom area
slotsize $4000
slot 0 $0000
slot 1 $4000
slot 2 $8000
; ram area
slotsize $2000
slot 3 $C000
slot 4 $E000
.endme
.rombankmap
bankstotal {bank_count}
banksize $4000
banks {bank_count}
.endro
;==============================================================
; SDSC tag and SMS rom header
;==============================================================
.sdsctag {prg_version},"{prg_name}","{prg_notes}","{prg_author}"
.smstag
.include "{wla_lib_dir}boo
|
t.inc"
;==============================================================
; Includes
;==============================================================
.include "{wla_lib_dir}Useful functions.inc"
.include "{wla_lib_dir}BBC Micro font.inc"
.include "{wla_lib_dir}sprite.inc"
.include "{wla_lib_dir}resource.inc"
{code}
{resource}
"""
###
class WlaConverter(object):
"""Converts an ZXBasic .asm file into an WLA-DX .
|
inc file.
"""
def __init__(self, input = None):
self.setInput(input)
return
# Accessors
def setInput(self, input):
if input == None:
self._input = None
else:
self._input = [x.rstrip() for x in input]
return self
# The actual methods
def convert(self, input = None):
"""Performs the conversion.
Returns a converted list of strings.
"""
if input != None:
self.setInput(input)
self._cleanup_source()
self._separate_sections()
self._generate_variables()
#return self._cleaned
#return self._data
#return self._vars
s = WLA_INCLUDE_TEMPLATE.format(
code = str.join('\n', self._code),
var_inits = str.join('\n', self._var_inits),
vars = str.join('\n', self._vars)
)
# Converts _xxx to zb_xxx
s = re.sub(r'(\W)(_\w+)', r'\1zb\2', s)
return s.split('\n')
def _cleanup_source(self):
"""Removes all of the undesired statements
The result is stored on self._cleaned
"""
# (start-pattern, end-pattern, start-delta, end-delta)
patterns = [
(r'(?im)^\s*org\s+\d+\s*$', None, 0, 1), # org nnnn
(r'(?im)^__START_PROGRAM:\s*$', r'(?im)^\s*ei\s*$', 0, 1), # Start of the program
(r'(?im)^__END_PROGRAM:\s*$', r'(?im)^__CALL_BACK__:\s*$', -3, 2), # End of the program
(r'(?im)^ZXBASIC_USER_DATA_END\s.*$', None, -1, 100) # Junk at end of file
]
outp = self._input[:]
for pat in patterns:
sti = self._find_list_pattern(outp, pat[0])
edi = coalesce(
self._find_list_pattern(outp, pat[1], sti + 1),
sti)
outp[sti + pat[2] : edi + pat[3]] = []
pat = re.compile(r'(?im)^#line\s+\d+\s*.*$')
for i, v in enumerate(outp):
if pat.match(v) != None:
outp[i] = ';' + v
self._cleaned = outp
def _separate_sections(self):
"""Separates data from code
Data is stored on self._data;
Code is stored on self._code.
"""
divider = self._find_list_pattern(self._cleaned, r'(?im)^ZXBASIC_USER_DATA:\s*$')
self._code = self._cleaned[:divider]
self._data = self._cleaned[divider + 1:]
def _find_list_pattern(self, list, pat, start = 0):
"""Scans for the patterns on the list
Returns the index of the first match, or None if not found.
"""
if pat == None:
return None;
p = re.compile(pat)
for i, v in enumerate(list[start:]):
if p.match(v) != None:
return start + i
return None
def _generate_variables(self):
"""Generate the variable declarations and initializations"""
# TODO: This code does require some major cleanup
self._vars = []
self._var_inits = []
var_dic = {} # Holds var size
var_name = ''
var_names = [] # To ensure correct order
def create_var(name):
global var_name
var_name = name
var_names.append(var_name)
var_dic[var_name] = 0
def inc_var_size(sz):
global var_name
var_dic[var_name] = var_dic[var_name] + sz
# (regex, var-template, init-template)
actions = [
#labels
(re.compile(r'(?im)^(\w+):\s*$'),
lambda m : create_var(m.group(1)),
lambda m : '; %s' % m.group(1)),
#defb
(re.compile(r'(?im)^\s*DEFB\s+(.*)$'),
lambda m : inc_var_size(len(m.group(1).strip().split(','))),
lambda m : '\t.db %s' % m.group(1)),
#defw
(re.compile(r'(?im)^\s*DEFW\s+(.*)$'),
lambda m : inc_var_size(len(m.group(1).strip().split(',')) * 2),
lambda m : '\t.dw %s' % m.group(1))
]
for s in self._data:
for rx, vfx, ifx in actions:
m = rx.match(s)
if m != None:
vfx(m)
self._var_inits.append(ifx(m))
for name in var_names:
self._vars.append('\t{0} ds {1}'.format(name, var_dic[name]))
###
class WlaProgram(object):
"""Turns an .inc generated by WlaConverter into a compilable .asm
"""
def __init__(self,
input = None,
bank_count = 4,
prg_version = 0,
prg_name = "ZX Basic program",
prg_notes = "Generated by zxb2wla " + VERSION,
prg_author = "??",
wla_lib_dir = "./",
extra_includes = []):
self._input = input
self._bank_count = bank_count
self._prg_version = prg_version
self._prg_name = prg_name
self._prg_notes = prg_notes
self._prg_author = prg_author
self._wla_lib_dir = wla_lib_dir
self._extra_includes = extra_includes
def convert(self, input=None):
"""Performs the conversion"""
if input != None:
self._input = input
self._code = str.join('\n', self._input)
outp = WLA_MAIN_TEMPLATE.format(
bank_count = self._bank_count,
prg_version = self._prg_version,
prg_name = self._prg_name,
prg_notes = self._prg_notes,
prg_author = self._prg_author,
wla_lib_dir = self._wla_lib_dir,
code = self._code,
resource = str.join('\n', ['.include "%s"' % x for x in self._extra_includes])
)
return outp.split('\n')
###
class Main(object):
"""Reads the commandline and executes the program functions accordingly.
"""
def __init__(self, args):
self._args = args
def execute(self):
err = self._read_arguments()
if err > 0:
return err
|
astone282/vactrack
|
vactrack/Expedia.py
|
Python
|
apache-2.0
| 6,312
| 0.00206
|
import re
import urllib.request
import logging
import math
from bs4 import BeautifulSoup
from Vacation import VacationPackage, Departures, Destinations, Duration
ALL_PAGES = True
DEPARTURE_MAPPING = {
Departures.OTTAWA: "YOW"
}
DEPARTURE_CITY_MAPPING = {
Departures.OTTAWA: "Ottawa"
}
DESTINATION_MAPPING = {
Destinations.MAYAN_RIVIERA: "24%7C3%2C4%2C5%2C7%2C8%2C9%2C10%2C11%2C12%2C14%7C3%2C6%2C130%2C163%2C216%2C273%2C275%2C411%2C422%2C483%2C577%2C603%2C666%2C766%2C775%2C804%2C822%2C836%2C871%2C872%2C873%2C896%2C897%2C908%2C940%2C942%2C974%2C980%2C1001%2C1003%2C1004%2C1006%2C1104%2C1189%2C1350%2C1351%2C1352%2C1373%2C1566%2C1595%2C1607%2C1616%2C1692%2C1695%2C1703%2C1705%2C1708%2C1716%2C1840%2C1900%2C1928%2C2064%2C2082%2C2093%2C2098%2C2114%2C2118%2C2120%2C2172%2C2185%2C2371%2C2565%2C2718%2C2719%2C2739%2C2823%2C3012%2C3062%2C3088%2C3105%2C7700%2C7742%2C8721%2C8791%2C9267%2C9343%2C9422%2C9557%2C9558%2C9575%2C9576%2C10196%2C10314%2C10368%2C10453%2C10647%2C10652%2C10659%2C10663%2C10698%2C10837%2C10849%2C10895%2C10904%2C10971%2C11040%2C11063%2C11116%2C11174%2C11202",
Destinations.ARUBA: "29%7C7%2C14%7C177%2C179%2C958%2C1026%2C1027%2C1028%2C1124%2C1364%2C1564%2C1680%2C1681%2C1733%2C2054%2C10351%2C10650"
}
DESTINATION_CITY_MAPPING = {
Destinations.MAYAN_RIVIERA: "Riviera+Maya",
Destinations.ARUBA: "Aruba"
}
DESTINATION_COUNTRY_MAPPING = {
Destinations.MAYAN_RIVIERA: "Mexico",
Destinations.ARUBA: "Aruba"
}
DURATION_MAPPING = {
Duration.DAYS_7: "7DAYS",
Duration.DAYS_10: "10DAYS"
}
class ExpediaScraper:
def fetch_vacation_packages(self, vacation_request):
expedia_vacation_request = ExpediaVacation_request(vacation_request)
return ExpediaVacationScraper(expedia_vacation_request).fetch_vacation_packages()
class ExpediaVacation_request:
def __init__(self, vacation_request):
self.vacation_request = vacation_request
self.from_code = DEPARTURE_MAPPING[vacation_request.departure_city]
self.origin_city = DEPARTURE_CITY_MAPPING[vacation_request.departure_city]
self.to = DESTINATION_MAPPING[vacation_request.destination]
self.to_city = DESTINATION_CITY_MAPPING[vacation_request.destination]
self.to_country = DESTINATION_COUNTRY_MAPPING[vacation_request.destination]
self.date = vacation_request.date
self.duration = DURATION_MAPPING[vacation_request.duration]
self.occupancy = "D"
self.adults = str(vacation_request.adults)
class ExpediaVacationScraper:
def __init__(self, expedia_vacation_request):
self.expedia_vacation_request = expedia_vacation_request
self.from_code = expedia_vacation_request.from_code
self.origin_city = expedia_vacation_request.origin_city
self.to = expedia_vacation_request.to
self.to_city = expedia_vacation_request.to_city
self.to_country = expedia_vacation_request.to_country
self.date = expedia_vacation_request.date
self.duration = expedia_vacation_request.duration
self.occupancy = expedia_vacation_request.occupancy
self.adults = expedia_vacation_request.adults
self.original_duration = expedia_vacation_request.vacation_request.duration
def fetch_vacation_packages(self):
results = []
page = 0
fetch_page = True
while fetch_page:
fetch_page = False
page += 1
url = "https://www.expedia.ca/all-inclusive-search?origin=" + self.from_code + "&destination=" + self.to + "&fromDate=" + self.date + "&duration=" + self.duration + "&pagingFlag=Y&pageIndex=" + str(
page) + "&occupancy=" + self.occupancy + "&originCityName=" + self.origin_city + "&destinationCityName=" + self.to_city + "&country=" + self.to_country + "&sortBy=&langid=4105&numAdults=" + self.adults + "&numChildren=0&numRooms=1"
logging.debug("Fetching URL " + url)
f = urllib.request.urlopen(url)
html = f.read()
logging.info("Done, parsing results")
soup = Beaut
|
ifulSoup(html, "html.parser")
for tag in soup.find_all("div", class_="flex-card"):
# all stop information
primaryBlock = tag.find_all('div', class_='flex-area-primary')
name = primaryBlock[0].find_all('h5')[0].find_all('a')[0].get_text().strip()
primary_items = primaryBlock[0].find_all('div', class_='sec
|
ondary')
city = primary_items[0].get_text().strip()
descr = primary_items[1].get_text().strip()
dates = primary_items[2].get_text().strip()
oper = primary_items[3].find(text=True).strip()
match = re.search('Operated by (.*),', oper)
oper = match.group(1).strip()
match = re.search('Depart:(.*)Return:(.*)', dates)
depart = match.group(1).strip()
retr = match.group(2).strip()
secondaryBlock = tag.find_all('div', class_='flex-area-secondary')
secondaryItems = secondaryBlock[0].find_all('div', class_='h1')
children = secondaryItems[0].findChildren()
if (len(children) > 1):
cost = children[1].get_text().strip()
else:
cost = children[0].get_text().strip()
finalCost = int(cost.replace(',', '').replace('C$', ''))
package = VacationPackage(name, oper, city, depart, retr, self.original_duration.name, finalCost)
results.append(package)
nav = soup.find_all("nav", class_="pagination")
if len(nav) > 0:
nav = nav[0]
data_per_page = int(nav.attrs['data-per-page'])
total_data = int(nav.attrs['data-total-results'])
number_of_pages = math.ceil(total_data / data_per_page)
if page < number_of_pages and ALL_PAGES:
logging.info("Completed " + str(page) + "/" + str(number_of_pages) + " pages from " + str(
total_data) + " results")
logging.info("Reading next page")
fetch_page = True
logging.info("Parsing complete")
return results
|
PhillsProgrammingExperiments/dyleps
|
dyleps/tier1/exceptions.py
|
Python
|
mit
| 106
| 0.009434
|
from dy
|
leps
|
.utils.exceptions import Tier1Exception
class Tier1ParsingException(Tier1Exception):
pass
|
googleads/google-ads-python
|
google/ads/googleads/v9/services/services/geo_target_constant_service/transports/base.py
|
Python
|
apache-2.0
| 4,445
| 0
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import abc
import typing
import pkg_resources
import google.auth # type: ignore
from google.api_core import gapic_v1
from google.auth import credentials as ga_credentials # type: ignore
from google.ads.googleads.v9.resources.types import geo_target_constant
from google.ads.googleads.v9.services.types import geo_target_constant_service
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution("google-ads",).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
class GeoTargetConstantServiceTransport(metaclass=abc.ABCMeta):
"""Abstract transport class for GeoTargetConstantService."""
AUTH_SCOPES = ("https://www.googleapis.com/auth/adwords",)
def __init__(
self,
*,
host: str = "googleads.googleapis.com",
credentials: ga_credentials.Credentials = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a us
|
er-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're
|
developing
your own client library.
"""
# Save the hostname. Default to port 443 (HTTPS) if none is specified.
if ":" not in host:
host += ":443"
self._host = host
# If no credentials are provided, then determine the appropriate
# defaults.
if credentials is None:
credentials, _ = google.auth.default(scopes=self.AUTH_SCOPES)
# Save the credentials.
self._credentials = credentials
# Lifted into its own function so it can be stubbed out during tests.
self._prep_wrapped_messages(client_info)
def _prep_wrapped_messages(self, client_info):
# Precomputed wrapped methods
self._wrapped_methods = {
self.get_geo_target_constant: gapic_v1.method.wrap_method(
self.get_geo_target_constant,
default_timeout=None,
client_info=client_info,
),
self.suggest_geo_target_constants: gapic_v1.method.wrap_method(
self.suggest_geo_target_constants,
default_timeout=None,
client_info=client_info,
),
}
def close(self):
"""Closes resources associated with the transport.
.. warning::
Only call this method if the transport is NOT shared
with other clients - this may cause errors in other clients!
"""
raise NotImplementedError()
@property
def get_geo_target_constant(
self,
) -> typing.Callable[
[geo_target_constant_service.GetGeoTargetConstantRequest],
geo_target_constant.GeoTargetConstant,
]:
raise NotImplementedError
@property
def suggest_geo_target_constants(
self,
) -> typing.Callable[
[geo_target_constant_service.SuggestGeoTargetConstantsRequest],
geo_target_constant_service.SuggestGeoTargetConstantsResponse,
]:
raise NotImplementedError
__all__ = ("GeoTargetConstantServiceTransport",)
|
benjimons/FIR
|
fir_artifacts/migrations/0002_create_artifacts.py
|
Python
|
gpl-3.0
| 1,876
| 0.002665
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import fir_artifacts.models
class Migration(migrations.Migration):
dependencies = [
('fir_artifacts', '0001_initial'),
('incid
|
ents', '0001_initial')
]
|
operations = [
migrations.CreateModel(
name='Artifact',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('type', models.CharField(max_length=20)),
('value', models.CharField(max_length=200)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='File',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('description', models.CharField(max_length=256)),
('file', models.FileField(upload_to=fir_artifacts.models.upload_path)),
('date', models.DateTimeField(auto_now_add=True)),
],
options={
},
bases=(models.Model,),
),
migrations.AddField(
model_name='File',
name='hashes',
field=models.ManyToManyField(to='fir_artifacts.Artifact', null=True, blank=True),
preserve_default=True,
),
migrations.AddField(
model_name='file',
name='content_type',
field=models.ForeignKey(to='contenttypes.ContentType', null=True),
preserve_default=True,
),
migrations.AddField(
model_name='file',
name='object_id',
field=models.PositiveIntegerField(null=True),
preserve_default=True,
),
]
|
itszero/onenet
|
emulab/onenet/onenet/inject/infect.py
|
Python
|
mit
| 867
| 0.019608
|
from mininet.net import Mininet
from ..util import findPyroObjectOrNone
def monkeypatch(cls):
def decorator(func):
setattr(cls, func.__name__, func)
return func
return decorator
def getOneNo
|
deByName(net, name):
if name in net.nameToNode:
return net.nameToNode[name]
else:
return findPyroObjectOrNone(name)
@monkeypatch(Mininet)
def intakeNodes(self, networks):
for netName in networks:
net = findPyroObjectOrNone(netName)
if self.name == ne
|
t.getName():
continue
print "-- Intake nodes from %s" % net.getName()
for h in net.getHosts():
self.nameToNode[h.getName()] = h
self.hosts.append(h)
for s in net.getSwitches():
self.nameToNode[s.getName()] = s
self.hosts.append(s)
@monkeypatch(Mininet)
def remoteExecute(self, code):
print "-- Executing: %s" % code
exec(code, {'net': self})
|
levilucio/SyVOLT
|
UMLRT2Kiltera_MM/ProcDef.py
|
Python
|
mit
| 3,435
| 0.029403
|
"""
__ProcDef.py_____________________________________________________
Automatically generated AToM3 syntactic object (DO NOT MODIFY DIRECTLY)
Author: gehan
Modified: Sat Aug 30 18:23:40 2014
_________________________________________________________________
"""
from ASGNode import *
from ATOM3Type import *
from ATOM3String import *
from graph_ProcDef import *
class ProcDef(ASGNode, ATOM3Type):
def __init__(self, parent = None):
ASGNode.__init__(self)
ATOM3Type.__init__(self)
self.superTypes = ['Def', 'MetaModelElement_T']
self.graphClass_ = graph_ProcDef
self.isGraphObjectVisual = True
if(hasattr(self, '_setHierarchicalLink')):
self._setHierarchicalLink(False)
if(hasattr(self, '_setHierarchicalNode')):
self._setHierarchicalNode(False)
self.parent = parent
self.cardinality=ATOM3String('1', 20)
self.cardinality=ATOM3String('1', 20)
self.classtype=ATOM3String('t_', 20)
self.classtype=ATOM3String('t_', 20)
self.name=ATOM3String('s_', 20)
self.name=ATOM3String('s_', 20)
self.generatedAttributes = {'cardinality': ('ATOM3String', ),
'cardinality': ('ATOM3String', ),
'classtype': ('ATOM3String', ),
'classtype': ('ATOM3String', ),
'name': ('ATOM3String', ),
'name': ('ATOM3String', ) }
self.realOrder = ['cardinality','cardinality','classtype','classtype','name','name']
self.directEditing = [1,1,1,1,1,1]
def clone(self):
cloneObject = ProcDef( self.parent )
for atr in self.realOrder:
cloneObject.setAttrValue(atr, self.getAttrValue(atr).clone() )
ASGNode.cloneActions(self, cloneObject)
return cloneObject
def copy(self, other):
ATOM3Type.copy(self, other)
for atr in self.realOrder:
self.setAttrValue(atr, other.getAttrValue(atr) )
ASGNode.copy(self, other)
def preCondition (self, actionID, * params):
if self.graphObject_:
return self.graphObject_.preCondition(actionID, params)
else: return None
def postCondition (self, actionID, * params):
if self.graphObject_:
return self.graphObject_.postCondition(actionID, params)
else: return None
def preAction (self, actionID, * params):
if self.graphObject_:
return self.graphObject_.preAction(actionID, params)
else: return None
def postAction (self, actionID, * params):
if self.graphObject_:
return self.graphObject_.postAction(actionID, params)
else: return None
def QOCA(self, params):
"""
QOCA Constraint Template
NOTE: DO NOT select a POST
|
/PRE action trigger
Constraint
|
s will be added/removed in a logical manner by other mechanisms.
"""
return # <---- Remove this to use QOCA
""" Get the high level constraint helper and solver """
from Qoca.atom3constraints.OffsetConstraints import OffsetConstraints
oc = OffsetConstraints(self.parent.qocaSolver)
"""
Example constraint, see Kernel/QOCA/atom3constraints/OffsetConstraints.py
For more types of constraints
"""
oc.fixedWidth(self.graphObject_, self.graphObject_.sizeX)
oc.fixedHeight(self.graphObject_, self.graphObject_.sizeY)
|
google/coursebuilder_xblock_module
|
src/modules/xblock_module/xblock_module.py
|
Python
|
apache-2.0
| 54,112
| 0.000185
|
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Classes supporting creation and use of content using XBlocks.
Dependencies:
1. XBlock (https://github.com/edx/XBlock)
2. App Engine XBlock runtime
(https://github.com/google/appengine_xblock_runtime)
The appropriate versions of both of these libraries must be installed the the
lib/ folder. See README.rst for more details.
"""
__author__ = 'John Orr (jorr@google.com)'
import cgi
from cStringIO import StringIO
import logging
import mimetypes
import os
import re
import tarfile
import urllib
import uuid
from xml.etree import cElementTree
import appengine_config
from appengine_xblock_runtime import store
import appengine_xblock_runtime.runtime
from common import jinja_utils
from common import safe_dom
from common import schema_fields
from common import tags
from controllers import sites
from controllers import utils
import dbmodels
import django.conf
import django.template.loader
from lxml import etree
import messages
from models import courses
from models import custom_modules
from models import jobs
from models import transforms
import models.models as m_models
from modules.dashboard import filer
from modules.dashboard import unit_lesson_editor
import modules.dashboard.dashboard as dashboard
from modules.oeditor import oeditor
import webapp2
import workbench.runtime
import xblock.core
import xblock.exceptions
import xblock.field_data
import xblock.fields
import xblock.fragment
import xblock.plugin
import xblock.runtime
from google.appengine.ext import blobstore
from google.appengine.ext import db
from google.appengine.ext import ndb
# URI routing for resources belonging to this module
RESOURCES_URI = '/modules/xblock_module/resources'
# Base URI routing used by Course Builder for XBlock static resources
XBLOCK_RESOURCES_URI = '/modules/xblock_module/xblock_resources'
# Base URI routing used by Course Builder for XBlock static resources
XBLOCK_LOCAL_RESOURCES_URI = '/modules/xblock_module/xblock_local_resources'
# URI routing used by Course Builder for call-backs to server-side XBlock code
HANDLER_URI = '/modules/xblock_module/handler'
# URI routing the the MathJax package
MATHJAX_URI = '/modules/xblock_module/MathJax'
# Allow images of up to 5Mb
MAX_ASSET_UPLOAD_SIZE_K = 5 * 1024
# The location of the static workbench files used by the XBlocks
WORKBENCH_STATIC_PATH = os.path.normpath('lib/XBlock/workbench/static')
# The location of the DJango templates used by XBlocks
XBLOCK_TEMPLATES_PATH = 'lib/XBlock/xblock/templates'
# XSRF protection token for handler callbacks
XBLOCK_XSRF_TOKEN_NAME = 'xblock_handler'
XBLOCK_EVENT_SOURCE_NAME = 'xblock-event'
XBLOCK_TAG_EVENT_SOURCE_NAME = 'tag-xblock-event'
XBLOCK_WHITELIST = [
'sequential = cb_xblocks_core.cb_xblocks_core:SequenceBlock',
'video = cb_xblocks_core.cb_xblocks_core:VideoBlock',
'cbquestion = cb_xblocks_core.cb_xblocks_core:QuestionBlock',
'html = cb_xblocks_core.cb_xblocks_core:HtmlBlock',
'vertical = cb_xblocks_core.cb_xblocks_core:VerticalBlock',
'problem = cb_xblocks_core.problem:ProblemBlock'
]
# XBlock runtime section
class StudentFieldData(xblock.field_data.SplitFieldData):
"""A field data manager for use in student (i.e., non-admin) context.
This field data manager prevents students from modifying a field which is
stored as UserScope.NONE, even if an XBlock includes code which sets it.
Thus it defends against poorly-written XBlocks which grant students too
wide permissions.
"""
def __init__(self, db_data):
authored_data = xblock.field_data.ReadOnlyFieldData(db_data)
student_data = db_data
super(StudentFieldData, self).__init__({
xblock.fields.Scope.content: authored_data,
xblock.fields.Scope.settings: authored_data,
xblock.fields.Scope.parent: authored_data,
xblock.fields.Scope.children: authored_data,
xblock
|
.fields.Scope.user_state_summary: student_data,
xblock.fields.Scope.user_state: student_data,
xblock.fields.Scope.user_info: student_
|
data,
xblock.fields.Scope.preferences: student_data})
class ForbiddenXBlockError(Exception):
"""Raised when a non-whitelisted XBlock is requested."""
def select_xblock(identifier, entry_points):
"""Hook called when loading XBlock classes, which enforces whitelist."""
entry_point = xblock.plugin.default_select(identifier, entry_points)
if str(entry_point) not in XBLOCK_WHITELIST:
raise ForbiddenXBlockError(
'Attempted to load forbidden XBlock: %s' % str(entry_point))
return entry_point
class MemoryIdManager(xblock.runtime.MemoryIdManager):
def create_usage(self, def_id, usage_id=None):
"""Extend the method definition to allow a specified usage_id."""
usage_id = usage_id or appengine_xblock_runtime.runtime.generate_id()
self._usages[usage_id] = def_id
return usage_id
def create_definition(self, block_type, def_id=None):
"""Extend the method definition to allow a specified def_id."""
def_id = def_id or appengine_xblock_runtime.runtime.generate_id()
self._definitions[def_id] = block_type
return def_id
class Runtime(appengine_xblock_runtime.runtime.Runtime):
"""A XBlock runtime which uses the App Engine datastore."""
def __init__(
self, handler, id_reader=None, field_data=None, student_id=None,
is_admin=False):
field_data = field_data or xblock.runtime.KvsFieldData(
store.KeyValueStore())
if is_admin:
pass
elif student_id:
field_data = StudentFieldData(field_data)
else:
field_data = xblock.field_data.ReadOnlyFieldData(field_data)
def get_jinja_template(template_name, dirs):
locale = handler.app_context.get_environ()['course']['locale']
return jinja_utils.get_template(template_name, dirs, locale=locale)
services = {'jinja': get_jinja_template}
super(Runtime, self).__init__(
id_reader=id_reader, field_data=field_data, student_id=student_id,
services=services, select=select_xblock)
self.handler = handler
def render_template(self, template_name, **kwargs):
"""Loads the django template for `template_name."""
template = django.template.loader.get_template(template_name)
return template.render(django.template.Context(kwargs))
def wrap_child(self, block, unused_view, frag, unused_context):
wrapped = xblock.fragment.Fragment()
wrapped.add_javascript_url(
self.resource_url('js/vendor/jquery.min.js'))
wrapped.add_javascript_url(
self.resource_url('js/vendor/jquery.cookie.js'))
data = {}
if frag.js_init_fn:
# Patch to accommodate jqueryui tabs (used by sequence XBlock)in a
# page with <base> tag set. See:
# http://stackoverflow.com/questions/13837304/jquery-ui-non-ajax-tab-loading-whole-website-into-itself
wrapped.add_javascript("""
$(function() {
$(".xblock .tabs ul li a").each(function() {
var href = $(this).attr("href");
if (href && href.charAt(0) == "#") {
$(this).attr("href", location.href.toString() + href);
}
});
});
""")
wrapped.add_javascript_url(
self.resource_url('js/runtime/%s.js' % frag.js_in
|
geekan/scrapy-live-portal
|
general_spider/general_spider/spiders/spider.py
|
Python
|
apache-2.0
| 3,707
| 0.002158
|
import re
import json
from urlparse import urlparse
import urllib
import pdb
from scrapy.selector import Selector
try:
from scrapy.spiders import Spider
except:
from scrapy.spiders import BaseSpider as Spider
from scrapy.utils.response import get_base_url
from scrapy.spiders import CrawlSpide
|
r, Rule
from scrapy.linkextractors import LinkExtractor as sle
from general_spider.items import *
from misc.log import *
from misc.spider import CommonSpider
import BasicSpiderConfig
class general_spiderSpider(Comm
|
onSpider):
name = 'general_spider'
list_css_rules = {}
def __init__(self, conf_module='TestSpiderConfig', *args, **kwargs):
if conf_module.endswith(".py"):
conf_module = conf_module[:-3]
cm = __import__(conf_module, globals=globals())
conf = cm.Config()
self.name = conf.name
self.allowed_domains = conf.allowed_domains
self.start_urls = conf.start_urls
self.list_css_rules = conf.list_css_rules
self.rules = [Rule(sle(allow=(c.allowed_rule_regex)), callback='parse_1', cb_kwargs=c.paras, follow=conf.follow) for c in conf.ex_rules]
self.conf = conf
info(self.start_urls)
info(self.rules)
info([[c.allowed_rule_regex, c.paras] for c in conf.ex_rules])
# import pdb; pdb.set_trace()
super(general_spiderSpider, self).__init__(*args, **kwargs)
def parse_1(self, response, list_css_rules):
info('---------------------')
info('Parse '+response.url)
info('list_css_rules:')
info(list_css_rules)
x = self.parse_with_rules(response, list_css_rules, dict)
# info(x)
preprocess_item = getattr(self.conf, "preprocess_item", None)
if callable(preprocess_item):
for item in x:
preprocess_item(item)
# x = self.parse_with_rules(response, self.content_css_rules, dict)
# print(json.dumps(x, ensure_ascii=False, indent=2))
# pp.pprint(x)
# return self.parse_with_rules(response, self.css_rules, general_spiderItem)
return x
def parse_start_url(self, response):
if type(self.list_css_rules) != dict:
return
return self.parse_1(response, self.list_css_rules)
class general_json_spiderSpider(CommonSpider):
name = 'general_json_spider'
list_css_rules = {}
def __init__(self, conf_module='TestSpiderConfig', *args, **kwargs):
if conf_module.endswith(".py"):
conf_module = conf_module[:-3]
cm = __import__(conf_module, globals=globals())
conf = cm.Config()
self.name = conf.name
self.allowed_domains = conf.allowed_domains
self.start_urls = conf.start_urls
self.rules = [Rule(sle(allow=(c.allowed_rule_regex)), callback='parse_1', cb_kwargs=c.paras, follow=conf.follow) for c in conf.ex_rules]
self.conf = conf
info(self.start_urls)
info(self.rules)
info([[c.allowed_rule_regex, c.paras] for c in conf.ex_rules])
# import pdb; pdb.set_trace()
super(general_json_spiderSpider, self).__init__(*args, **kwargs)
def parse_json(self, response):
json_resp = json.loads(response.body_as_unicode())
return json_resp
def parse_1(self, response):
info('---------------------')
info('Parse '+response.url)
x = self.parse_json(response)
y = None
preprocess_json_items = getattr(self.conf, "preprocess_json_items", None)
if callable(preprocess_json_items):
y = preprocess_json_items(x)
return y
def parse_start_url(self, response):
return self.parse_1(response)
|
dodonator/ticTacToe
|
outdatet/ticTacToe.py
|
Python
|
gpl-3.0
| 3,105
| 0.055717
|
import random
import os
# TicTacToe
def createNewField():
result = []
for i in range(3):
tmp = []
for i2 in range(3):
tmp.append(' ')
result.append(tmp)
return result
def printField(field):
print ''
for element in field:
print element
print ''
def isFieldFull(field):
occupiedPlaces = 0
for row in field:
for place in row:
if place != ' ':
occupiedPlaces += 1
elif place == ' ':
return False
if occupiedPlaces == 9:
return True
def KI_Turn(field):
fieldStatus = isFieldFull(field)
if fieldStatus == True:
return field
result = field
running = True
"It is the turn of the computer."
while running == True:
row = random.randint(0,2)
column = random.randint(0,2)
if field[row][column] == ' ':
result[row][column] = 'O'
running = False
else:
pass
return result
def USER_Turn(field):
fieldStatus = isFieldFull(field)
if fieldStatus == True:
return field
result = field
running = True
print "User it's your turn"
while running == True:
row = int(raw_input('Which row? '))
column = int(raw_input('Which column? '))
if field[row][column] == ' ':
result[row][column] = 'X'
running = False
else:
print 'This place is occupied!'
return result
def Winner(field):
winner = ''
for row in field
|
:
if row == ['X','X','X']:
winner = 'User'
return winner
elif row == ['O','O','O']:
winner = 'Computer'
return winner
else:
winner = ''
columns = [[],[],[]]
for row in field:
columns[0].append(row[0])
co
|
lumns[1].append(row[1])
columns[2].append(row[2])
for col in columns:
if col == ['X','X','X']:
winner = 'User'
return winner
elif col == ['O','O','O']:
winner = 'Computer'
return winner
else:
winner = ''
dia1 = [field[0][0],field[1][1],field[2][2]]
dia2 = [field[0][2],field[1][1],field[2][0]]
if dia1 == ['X','X','X'] or dia2 == ['X','X','X']:
winner = 'User'
return winner
elif dia1 == ['O','O','O'] or dia2 == ['O','O','O']:
winner = 'Computer'
return winner
else:
winner = ''
fieldStatus = isFieldFull(field)
if fieldStatus == True:
return "Nobody"
return winner
# Time to play!
userScore = 0
computerScore = 0
answer = ''
while answer != 'q':
print 'User: ' + str(userScore)
print 'Computer: ' + str(computerScore)
print 'Press q to exit or anything else to continue'
answer = raw_input(': ')
if answer == 'q':
break
os.system('clear')
field = createNewField()
win = Winner(field)
turn = 0
while win == '':
if win == 'Nobody':
print 'There is no winner.'
break
turn += 1
print 'Turn: ' + str(turn)
printField(field)
field = USER_Turn(field)
win = Winner(field)
if win == 'User':
break
os.system('clear')
turn += 1
print 'Turn: ' + str(turn)
printField(field)
field = KI_Turn(field)
win = Winner(field)
if win == 'Computer':
break
os.system('clear')
printField(field)
print 'The winner is: ' + win
if win == 'User':
userScore += (10-turn)
elif win == 'Computer':
computerScore += (10-turn)
print "User: " + str(userScore)
print "Computer: " + str(computerScore)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.