text stringlengths 4 1.02M | meta dict |
|---|---|
"""
Spatial lag operations.
"""
__author__ = "Sergio J. Rey <srey@asu.edu>, David C. Folch <david.folch@asu.edu>, Levi John Wolf <ljw2@asu.edu"
__all__ = ['lag_spatial', 'lag_categorical']
import numpy as np
def lag_spatial(w, y):
"""
Spatial lag operator.
If w is row standardized, returns the average of each observation's neighbors;
if not, returns the weighted sum of each observation's neighbors.
Parameters
----------
w : W
PySAL spatial weightsobject
y : array
numpy array with dimensionality conforming to w (see examples)
Returns
-------
wy : array
array of numeric values for the spatial lag
Examples
--------
Setup a 9x9 binary spatial weights matrix and vector of data; compute the
spatial lag of the vector.
>>> import pysal
>>> import numpy as np
>>> w = pysal.lat2W(3, 3)
>>> y = np.arange(9)
>>> yl = pysal.lag_spatial(w, y)
>>> yl
array([ 4., 6., 6., 10., 16., 14., 10., 18., 12.])
Row standardize the weights matrix and recompute the spatial lag
>>> w.transform = 'r'
>>> yl = pysal.lag_spatial(w, y)
>>> yl
array([ 2. , 2. , 3. , 3.33333333, 4. ,
4.66666667, 5. , 6. , 6. ])
Explicitly define data vector as 9x1 and recompute the spatial lag
>>> y.shape = (9, 1)
>>> yl = pysal.lag_spatial(w, y)
>>> yl
array([[ 2. ],
[ 2. ],
[ 3. ],
[ 3.33333333],
[ 4. ],
[ 4.66666667],
[ 5. ],
[ 6. ],
[ 6. ]])
Take the spatial lag of a 9x2 data matrix
>>> yr = np.arange(8, -1, -1)
>>> yr.shape = (9, 1)
>>> x = np.hstack((y, yr))
>>> yl = pysal.lag_spatial(w, x)
>>> yl
array([[ 2. , 6. ],
[ 2. , 6. ],
[ 3. , 5. ],
[ 3.33333333, 4.66666667],
[ 4. , 4. ],
[ 4.66666667, 3.33333333],
[ 5. , 3. ],
[ 6. , 2. ],
[ 6. , 2. ]])
"""
return w.sparse * y
def lag_categorical(w, y, ties='tryself'):
"""
Spatial lag operator for categorical variables.
Constructs the most common categories of neighboring observations, weighted
by their weight strength.
Parameters
----------
w : W
PySAL spatial weightsobject
y : iterable
iterable collection of categories (either int or
string) with dimensionality conforming to w (see examples)
ties : str
string describing the method to use when resolving
ties. By default, the option is "tryself",
and the category of the focal observation
is included with its neighbors to try
and break a tie. If this does not resolve the tie,
a winner is chosen randomly. To just use random choice to
break ties, pass "random" instead.
Returns
-------
an (n x k) column vector containing the most common neighboring observation
Notes
-----
This works on any array where the number of unique elements along the column
axis is less than the number of elements in the array, for any dtype.
That means the routine should work on any dtype that np.unique() can
compare.
Examples
--------
Set up a 9x9 weights matrix describing a 3x3 regular lattice. Lag one list of
categorical variables with no ties.
>>> import pysal
>>> import numpy as np
>>> np.random.seed(12345)
>>> w = pysal.lat2W(3, 3)
>>> y = ['a','b','a','b','c','b','c','b','c']
>>> y_l = pysal.weights.spatial_lag.lag_categorical(w, y)
>>> np.array_equal(y_l, np.array(['b', 'a', 'b', 'c', 'b', 'c', 'b', 'c', 'b']))
True
Explicitly reshape y into a (9x1) array and calculate lag again
>>> yvect = np.array(y).reshape(9,1)
>>> yvect_l = pysal.weights.spatial_lag.lag_categorical(w,yvect)
>>> check = np.array( [ [i] for i in ['b', 'a', 'b', 'c', 'b', 'c', 'b', 'c', 'b']] )
>>> np.array_equal(yvect_l, check)
True
compute the lag of a 9x2 matrix of categories
>>> y2 = ['a', 'c', 'c', 'd', 'b', 'a', 'd', 'd', 'c']
>>> ym = np.vstack((y,y2)).T
>>> ym_lag = pysal.weights.spatial_lag.lag_categorical(w,ym)
>>> check = np.array([['b', 'b'], ['a', 'c'], ['b', 'c'], ['c', 'd'], ['b', 'd'], ['c', 'c'], ['b', 'd'], ['c', 'd'], ['b', 'b']])
>>> np.array_equal(check, ym_lag)
True
"""
if isinstance(y, list):
y = np.array(y)
orig_shape = y.shape
if len(orig_shape) > 1:
if orig_shape[1] > 1:
return np.vstack([lag_categorical(w,col) for col in y.T]).T
y = y.flatten()
output = np.zeros_like(y)
keys = np.unique(y)
inty = np.zeros(y.shape, dtype=np.int)
for i,key in enumerate(keys):
inty[y == key] = i
for idx,neighbors in w:
vals = np.zeros(keys.shape)
for neighb, weight in neighbors.items():
vals[inty[w.id2i[neighb]]] += weight
outidx = _resolve_ties(idx,inty,vals,neighbors,ties, w)
output[w.id2i[int(idx)]] = keys[int(outidx)]
return output.reshape(orig_shape)
def _resolve_ties(i,inty,vals,neighbors,method,w):
"""
Helper function to resolve ties if lag is multimodal
first, if this function gets called when there's actually no tie, then the
correct value will be picked.
if 'random' is selected as the method, a random tiebeaker is picked
if 'tryself' is selected, then the observation's own value will be used in
an attempt to break the tie, but if it fails, a random tiebreaker will be
selected.
"""
if len(vals[vals==vals.max()]) <= 1:
return np.argmax(vals)
elif method.lower() == 'random':
ties = np.where(vals == vals.max())
return np.random.choice(vals[ties])
elif method.lower() == 'tryself':
vals[inty[w.id2i[i]]] += np.mean(neighbors.values())
return _resolve_ties(i,inty,vals,neighbors,'random', w)
| {
"content_hash": "04933665e050dc7f43aff90b7223153f",
"timestamp": "",
"source": "github",
"line_count": 197,
"max_line_length": 134,
"avg_line_length": 33.142131979695435,
"alnum_prop": 0.5107979782508807,
"repo_name": "sjsrey/pysal_core",
"id": "98c7e3ba9455f3bf683feadccf8406f9576d8761",
"size": "6529",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "pysal_core/weights/spatial_lag.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "8941"
},
{
"name": "Python",
"bytes": "831634"
},
{
"name": "Shell",
"bytes": "186"
}
],
"symlink_target": ""
} |
import sys
import time
import unittest
import nose
from lib.noseplugin import OptionParser, parser_option
from lib import base
from lib.base import (
BGP_FSM_IDLE,
BGP_FSM_ESTABLISHED,
BGP_ATTR_TYPE_COMMUNITIES,
local,
)
from lib.gobgp import GoBGPContainer
from lib.exabgp import ExaBGPContainer
def community_exists(path, com):
a, b = com.split(':')
com = (int(a) << 16) + int(b)
for a in path['attrs']:
if a['type'] == BGP_ATTR_TYPE_COMMUNITIES and com in a['communities']:
return True
return False
class GoBGPTestBase(unittest.TestCase):
@classmethod
def setUpClass(cls):
gobgp_ctn_image_name = parser_option.gobgp_image
base.TEST_PREFIX = parser_option.test_prefix
g1 = GoBGPContainer(name='g1', asn=65000, router_id='192.168.0.1',
ctn_image_name=gobgp_ctn_image_name,
log_level=parser_option.gobgp_log_level,
config_format=parser_option.config_format)
q1 = ExaBGPContainer(name='q1', asn=65001, router_id='192.168.0.2')
q2 = ExaBGPContainer(name='q2', asn=65002, router_id='192.168.0.3')
q3 = ExaBGPContainer(name='q3', asn=65003, router_id='192.168.0.4')
qs = [q1, q2, q3]
ctns = [g1, q1, q2, q3]
initial_wait_time = max(ctn.run() for ctn in ctns)
time.sleep(initial_wait_time)
g1.local('gobgp global policy export add default reject')
for q in qs:
g1.add_peer(q)
q.add_peer(g1)
# advertise a route from q1, q2, q3
for idx, q in enumerate(qs):
route = '10.0.{0}.0/24'.format(idx + 1)
q.add_route(route)
cls.gobgp = g1
cls.quaggas = {'q1': q1, 'q2': q2, 'q3': q3}
# test each neighbor state is turned establish
def test_01_neighbor_established(self):
for q in self.quaggas.values():
self.gobgp.wait_for(expected_state=BGP_FSM_ESTABLISHED, peer=q)
def test_02_check_adj_rib_out(self):
for q in self.quaggas.values():
self.assertEqual(len(self.gobgp.get_adj_rib_out(q)), 0)
def test_03_add_peer(self):
q = ExaBGPContainer(name='q4', asn=65004, router_id='192.168.0.5')
time.sleep(q.run())
self.gobgp.add_peer(q)
q.add_peer(self.gobgp)
q.add_route('10.10.0.0/24')
self.gobgp.wait_for(expected_state=BGP_FSM_ESTABLISHED, peer=q)
self.quaggas['q4'] = q
for q in self.quaggas.values():
self.assertEqual(len(self.gobgp.get_adj_rib_out(q)), 0)
def test_04_disable_peer(self):
q3 = self.quaggas['q3']
self.gobgp.disable_peer(q3)
self.gobgp.wait_for(expected_state=BGP_FSM_IDLE, peer=q3)
for q in self.quaggas.values():
if q.name == 'q3':
continue
self.assertEqual(len(self.gobgp.get_adj_rib_out(q)), 0)
def test_05_enable_peer(self):
q3 = self.quaggas['q3']
self.gobgp.enable_peer(q3)
self.gobgp.wait_for(expected_state=BGP_FSM_ESTABLISHED, peer=q3)
for q in self.quaggas.values():
self.assertEqual(len(self.gobgp.get_adj_rib_out(q)), 0)
def test_06_disable_peer2(self):
q3 = self.quaggas['q3']
# advertise a route which was also advertised by q1
# this route will be best for g1 because q3's router-id is larger
# than q1
q3.add_route('10.0.1.0/24')
time.sleep(3)
# then disable q3
self.gobgp.disable_peer(q3)
self.gobgp.wait_for(expected_state=BGP_FSM_IDLE, peer=q3)
for q in self.quaggas.values():
if q.name == 'q3':
continue
self.assertEqual(len(self.gobgp.get_adj_rib_out(q)), 0)
def test_07_adv_to_one_peer(self):
self.gobgp.local('gobgp policy neighbor add ns0 {0}'.format(self.gobgp.peers[self.quaggas['q1']]['neigh_addr'].split('/')[0]))
self.gobgp.local('gobgp policy statement add st0')
self.gobgp.local('gobgp policy statement st0 add condition neighbor ns0')
self.gobgp.local('gobgp policy statement st0 add action accept')
self.gobgp.local('gobgp policy add p0 st0')
self.gobgp.local('gobgp global policy export add p0 default reject')
for q in self.quaggas.values():
self.gobgp.softreset(q, type='out')
def test_08_check_adj_rib_out(self):
for q in self.quaggas.values():
if q.name == 'q3':
continue
paths = self.gobgp.get_adj_rib_out(q)
if q == self.quaggas['q1']:
self.assertEqual(len(paths), 2)
else:
self.assertEqual(len(paths), 0)
def test_09_change_global_policy(self):
self.gobgp.local('gobgp policy statement st0 add action community add 65100:10')
self.gobgp.local('gobgp global policy export set p0 default accept')
for q in self.quaggas.values():
self.gobgp.softreset(q, type='out')
def test_10_check_adj_rib_out(self):
for q in self.quaggas.values():
if q.name == 'q3':
continue
paths = self.gobgp.get_adj_rib_out(q)
if q != self.quaggas['q3']:
self.assertEqual(len(paths), 2)
for path in paths:
if q == self.quaggas['q1']:
self.assertTrue(community_exists(path, '65100:10'))
else:
self.assertFalse(community_exists(path, '65100:10'))
def test_11_add_ibgp_peer(self):
q = ExaBGPContainer(name='q5', asn=65000, router_id='192.168.0.6')
time.sleep(q.run())
self.quaggas['q5'] = q
self.gobgp.add_peer(q)
q.add_peer(self.gobgp)
self.gobgp.wait_for(expected_state=BGP_FSM_ESTABLISHED, peer=q)
def test_12_add_local_pref_policy(self):
self.gobgp.local('gobgp policy statement st1 add action accept')
self.gobgp.local('gobgp policy statement st1 add action local-pref 300')
self.gobgp.local('gobgp policy add p1 st1')
self.gobgp.local('gobgp global policy export set p1 default reject')
for q in self.quaggas.values():
self.gobgp.softreset(q, type='out')
def test_13_check_adj_rib_out(self):
q1 = self.quaggas['q1']
for path in self.gobgp.get_adj_rib_out(q1):
self.assertTrue(path['local-pref'] is None)
q5 = self.quaggas['q5']
for path in self.gobgp.get_adj_rib_out(q5):
self.assertEqual(path['local-pref'], 300)
def test_14_route_type_condition_local(self):
self.gobgp.local('gobgp policy statement st2 add action accept')
self.gobgp.local('gobgp policy statement st2 add condition route-type local')
self.gobgp.local('gobgp policy add p2 st2')
self.gobgp.local('gobgp global policy export set p2 default reject')
for q in self.quaggas.values():
self.gobgp.softreset(q, type='out')
q1 = self.quaggas['q1']
self.assertEqual(len(self.gobgp.get_adj_rib_out(q1)), 0)
self.gobgp.add_route('10.20.0.0/24')
time.sleep(1)
self.assertEqual(len(self.gobgp.get_adj_rib_out(q1)), 1)
self.assertEqual(self.gobgp.get_adj_rib_out(q1)[0]['nlri']['prefix'], '10.20.0.0/24')
def test_15_route_type_condition_internal(self):
self.gobgp.local('gobgp policy statement st22 add action accept')
self.gobgp.local('gobgp policy statement st22 add condition route-type internal')
self.gobgp.local('gobgp policy add p2 st22')
self.gobgp.local('gobgp policy del p2 st2')
for q in self.quaggas.values():
self.gobgp.softreset(q, type='out')
q1 = self.quaggas['q1']
self.assertEqual(len(self.gobgp.get_adj_rib_out(q1)), 0)
q5 = self.quaggas['q5']
q5.add_route('10.30.0.0/24')
time.sleep(1)
self.assertEqual(len(self.gobgp.get_adj_rib_out(q1)), 1)
self.assertEqual(self.gobgp.get_adj_rib_out(q1)[0]['nlri']['prefix'], '10.30.0.0/24')
def test_16_route_type_condition_external(self):
self.gobgp.local('gobgp policy statement st222 add action accept')
self.gobgp.local('gobgp policy statement st222 add condition route-type external')
self.gobgp.local('gobgp policy add p2 st222')
self.gobgp.local('gobgp policy del p2 st22')
for q in self.quaggas.values():
self.gobgp.softreset(q, type='out')
q1 = self.quaggas['q1']
num1 = len(self.gobgp.get_adj_rib_out(q1))
self.gobgp.add_route('10.40.0.0/24')
time.sleep(1)
num2 = len(self.gobgp.get_adj_rib_out(q1))
self.assertEqual(num1, num2)
q5 = self.quaggas['q5']
q5.add_route('10.50.0.0/24')
time.sleep(1)
num3 = len(self.gobgp.get_adj_rib_out(q1))
self.assertEqual(num1, num3)
q2 = self.quaggas['q2']
q2.add_route('10.60.0.0/24')
time.sleep(1)
num4 = len(self.gobgp.get_adj_rib_out(q1))
self.assertEqual(num1 + 1, num4)
def test_17_multi_statement(self):
self.gobgp.local('gobgp policy statement st3 add action med set 100')
self.gobgp.local('gobgp policy statement st4 add action local-pref 100')
self.gobgp.local('gobgp policy add p3 st3 st4')
self.gobgp.local('gobgp global policy import set p3 default accept')
self.gobgp.add_route('10.70.0.0/24')
time.sleep(1)
rib = self.gobgp.get_global_rib('10.70.0.0/24')
self.assertEqual(len(rib), 1)
self.assertEqual(len(rib[0]['paths']), 1)
path = rib[0]['paths'][0]
self.assertEqual(path['med'], 100)
self.assertEqual(path['local-pref'], 100)
def test_18_reject_policy(self):
self.gobgp.local('gobgp global policy import set default reject')
self.gobgp.local('gobgp neighbor all softresetin')
time.sleep(1)
# self-generated routes remain since softresetin doesn't re-evaluate
# them
for v in self.gobgp.get_global_rib():
for p in v['paths']:
self.assertEqual(p['nexthop'], '0.0.0.0')
if __name__ == '__main__':
output = local("which docker 2>&1 > /dev/null ; echo $?", capture=True)
if int(output) != 0:
print("docker not found")
sys.exit(1)
nose.main(argv=sys.argv, addplugins=[OptionParser()],
defaultTest=sys.argv[0])
| {
"content_hash": "af05c0a95ad6e41076e84780502b2cf1",
"timestamp": "",
"source": "github",
"line_count": 282,
"max_line_length": 134,
"avg_line_length": 37.329787234042556,
"alnum_prop": 0.5976061555998861,
"repo_name": "a16/gobgp",
"id": "a67cb327ab7bf11b6da21fe624a0b26db9c65107",
"size": "11141",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "test/scenario_test/global_policy_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "2861"
},
{
"name": "Go",
"bytes": "1905653"
},
{
"name": "Java",
"bytes": "1698"
},
{
"name": "JavaScript",
"bytes": "1050"
},
{
"name": "Makefile",
"bytes": "2574"
},
{
"name": "Python",
"bytes": "584359"
},
{
"name": "Ruby",
"bytes": "701"
},
{
"name": "Shell",
"bytes": "90515"
}
],
"symlink_target": ""
} |
"""Test the Partially Signed Transaction RPCs.
"""
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_equal, assert_raises_rpc_error, find_output
import json
import os
MAX_BIP125_RBF_SEQUENCE = 0xfffffffd
# Create one-input, one-output, no-fee transaction:
class PSBTTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = False
self.num_nodes = 3
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
# Create and fund a raw tx for sending 10 BTC
psbtx1 = self.nodes[0].walletcreatefundedpsbt([], {self.nodes[2].getnewaddress():10})['psbt']
# Node 1 should not be able to add anything to it but still return the psbtx same as before
psbtx = self.nodes[1].walletprocesspsbt(psbtx1)['psbt']
assert_equal(psbtx1, psbtx)
# Sign the transaction and send
signed_tx = self.nodes[0].walletprocesspsbt(psbtx)['psbt']
final_tx = self.nodes[0].finalizepsbt(signed_tx)['hex']
self.nodes[0].sendrawtransaction(final_tx)
# Create p2sh, p2wpkh, and p2wsh addresses
pubkey0 = self.nodes[0].getaddressinfo(self.nodes[0].getnewaddress())['pubkey']
pubkey1 = self.nodes[1].getaddressinfo(self.nodes[1].getnewaddress())['pubkey']
pubkey2 = self.nodes[2].getaddressinfo(self.nodes[2].getnewaddress())['pubkey']
p2sh = self.nodes[1].addmultisigaddress(2, [pubkey0, pubkey1, pubkey2], "", "legacy")['address']
p2wsh = self.nodes[1].addmultisigaddress(2, [pubkey0, pubkey1, pubkey2], "", "bech32")['address']
p2sh_p2wsh = self.nodes[1].addmultisigaddress(2, [pubkey0, pubkey1, pubkey2], "", "p2sh-segwit")['address']
p2wpkh = self.nodes[1].getnewaddress("", "bech32")
p2pkh = self.nodes[1].getnewaddress("", "legacy")
p2sh_p2wpkh = self.nodes[1].getnewaddress("", "p2sh-segwit")
# fund those addresses
rawtx = self.nodes[0].createrawtransaction([], {p2sh:10, p2wsh:10, p2wpkh:10, p2sh_p2wsh:10, p2sh_p2wpkh:10, p2pkh:10})
rawtx = self.nodes[0].fundrawtransaction(rawtx, {"changePosition":3})
signed_tx = self.nodes[0].signrawtransactionwithwallet(rawtx['hex'])['hex']
txid = self.nodes[0].sendrawtransaction(signed_tx)
self.nodes[0].generate(6)
self.sync_all()
# Find the output pos
p2sh_pos = -1
p2wsh_pos = -1
p2wpkh_pos = -1
p2pkh_pos = -1
p2sh_p2wsh_pos = -1
p2sh_p2wpkh_pos = -1
decoded = self.nodes[0].decoderawtransaction(signed_tx)
for out in decoded['vout']:
if out['scriptPubKey']['addresses'][0] == p2sh:
p2sh_pos = out['n']
elif out['scriptPubKey']['addresses'][0] == p2wsh:
p2wsh_pos = out['n']
elif out['scriptPubKey']['addresses'][0] == p2wpkh:
p2wpkh_pos = out['n']
elif out['scriptPubKey']['addresses'][0] == p2sh_p2wsh:
p2sh_p2wsh_pos = out['n']
elif out['scriptPubKey']['addresses'][0] == p2sh_p2wpkh:
p2sh_p2wpkh_pos = out['n']
elif out['scriptPubKey']['addresses'][0] == p2pkh:
p2pkh_pos = out['n']
# spend single key from node 1
rawtx = self.nodes[1].walletcreatefundedpsbt([{"txid":txid,"vout":p2wpkh_pos},{"txid":txid,"vout":p2sh_p2wpkh_pos},{"txid":txid,"vout":p2pkh_pos}], {self.nodes[1].getnewaddress():29.99})['psbt']
walletprocesspsbt_out = self.nodes[1].walletprocesspsbt(rawtx)
assert_equal(walletprocesspsbt_out['complete'], True)
self.nodes[1].sendrawtransaction(self.nodes[1].finalizepsbt(walletprocesspsbt_out['psbt'])['hex'])
# partially sign multisig things with node 1
psbtx = self.nodes[1].walletcreatefundedpsbt([{"txid":txid,"vout":p2wsh_pos},{"txid":txid,"vout":p2sh_pos},{"txid":txid,"vout":p2sh_p2wsh_pos}], {self.nodes[1].getnewaddress():29.99})['psbt']
walletprocesspsbt_out = self.nodes[1].walletprocesspsbt(psbtx)
psbtx = walletprocesspsbt_out['psbt']
assert_equal(walletprocesspsbt_out['complete'], False)
# partially sign with node 2. This should be complete and sendable
walletprocesspsbt_out = self.nodes[2].walletprocesspsbt(psbtx)
assert_equal(walletprocesspsbt_out['complete'], True)
self.nodes[2].sendrawtransaction(self.nodes[2].finalizepsbt(walletprocesspsbt_out['psbt'])['hex'])
# check that walletprocesspsbt fails to decode a non-psbt
rawtx = self.nodes[1].createrawtransaction([{"txid":txid,"vout":p2wpkh_pos}], {self.nodes[1].getnewaddress():9.99})
assert_raises_rpc_error(-22, "TX decode failed", self.nodes[1].walletprocesspsbt, rawtx)
# Convert a non-psbt to psbt and make sure we can decode it
rawtx = self.nodes[0].createrawtransaction([], {self.nodes[1].getnewaddress():10})
rawtx = self.nodes[0].fundrawtransaction(rawtx)
new_psbt = self.nodes[0].converttopsbt(rawtx['hex'])
self.nodes[0].decodepsbt(new_psbt)
# Make sure that a psbt with signatures cannot be converted
signedtx = self.nodes[0].signrawtransactionwithwallet(rawtx['hex'])
assert_raises_rpc_error(-22, "TX decode failed", self.nodes[0].converttopsbt, signedtx['hex'])
# Explicitly allow converting non-empty txs
new_psbt = self.nodes[0].converttopsbt(rawtx['hex'])
self.nodes[0].decodepsbt(new_psbt)
# Create outputs to nodes 1 and 2
node1_addr = self.nodes[1].getnewaddress()
node2_addr = self.nodes[2].getnewaddress()
txid1 = self.nodes[0].sendtoaddress(node1_addr, 13)
txid2 =self.nodes[0].sendtoaddress(node2_addr, 13)
self.nodes[0].generate(6)
self.sync_all()
vout1 = find_output(self.nodes[1], txid1, 13)
vout2 = find_output(self.nodes[2], txid2, 13)
# Create a psbt spending outputs from nodes 1 and 2
psbt_orig = self.nodes[0].createpsbt([{"txid":txid1, "vout":vout1}, {"txid":txid2, "vout":vout2}], {self.nodes[0].getnewaddress():25.999})
# Update psbts, should only have data for one input and not the other
psbt1 = self.nodes[1].walletprocesspsbt(psbt_orig)['psbt']
psbt1_decoded = self.nodes[0].decodepsbt(psbt1)
assert psbt1_decoded['inputs'][0] and not psbt1_decoded['inputs'][1]
psbt2 = self.nodes[2].walletprocesspsbt(psbt_orig)['psbt']
psbt2_decoded = self.nodes[0].decodepsbt(psbt2)
assert not psbt2_decoded['inputs'][0] and psbt2_decoded['inputs'][1]
# Combine, finalize, and send the psbts
combined = self.nodes[0].combinepsbt([psbt1, psbt2])
finalized = self.nodes[0].finalizepsbt(combined)['hex']
self.nodes[0].sendrawtransaction(finalized)
self.nodes[0].generate(6)
self.sync_all()
# Test additional args in walletcreatepsbt
# Make sure both pre-included and funded inputs
# have the correct sequence numbers based on
# replaceable arg
block_height = self.nodes[0].getblockcount()
unspent = self.nodes[0].listunspent()[0]
psbtx_info = self.nodes[0].walletcreatefundedpsbt([{"txid":unspent["txid"], "vout":unspent["vout"]}], [{self.nodes[2].getnewaddress():unspent["amount"]+1}], block_height+2, {"replaceable":True}, False)
decoded_psbt = self.nodes[0].decodepsbt(psbtx_info["psbt"])
for tx_in, psbt_in in zip(decoded_psbt["tx"]["vin"], decoded_psbt["inputs"]):
assert_equal(tx_in["sequence"], MAX_BIP125_RBF_SEQUENCE)
assert "bip32_derivs" not in psbt_in
assert_equal(decoded_psbt["tx"]["locktime"], block_height+2)
# Same construction with only locktime set
psbtx_info = self.nodes[0].walletcreatefundedpsbt([{"txid":unspent["txid"], "vout":unspent["vout"]}], [{self.nodes[2].getnewaddress():unspent["amount"]+1}], block_height, {}, True)
decoded_psbt = self.nodes[0].decodepsbt(psbtx_info["psbt"])
for tx_in, psbt_in in zip(decoded_psbt["tx"]["vin"], decoded_psbt["inputs"]):
assert tx_in["sequence"] > MAX_BIP125_RBF_SEQUENCE
assert "bip32_derivs" in psbt_in
assert_equal(decoded_psbt["tx"]["locktime"], block_height)
# Same construction without optional arguments
psbtx_info = self.nodes[0].walletcreatefundedpsbt([{"txid":unspent["txid"], "vout":unspent["vout"]}], [{self.nodes[2].getnewaddress():unspent["amount"]+1}])
decoded_psbt = self.nodes[0].decodepsbt(psbtx_info["psbt"])
for tx_in in decoded_psbt["tx"]["vin"]:
assert tx_in["sequence"] > MAX_BIP125_RBF_SEQUENCE
assert_equal(decoded_psbt["tx"]["locktime"], 0)
# BIP 174 Test Vectors
# Check that unknown values are just passed through
unknown_psbt = "cHNidP8BAD8CAAAAAf//////////////////////////////////////////AAAAAAD/////AQAAAAAAAAAAA2oBAAAAAAAACg8BAgMEBQYHCAkPAQIDBAUGBwgJCgsMDQ4PAAA="
unknown_out = self.nodes[0].walletprocesspsbt(unknown_psbt)['psbt']
assert_equal(unknown_psbt, unknown_out)
# Open the data file
with open(os.path.join(os.path.dirname(os.path.realpath(__file__)), 'data/rpc_psbt.json'), encoding='utf-8') as f:
d = json.load(f)
invalids = d['invalid']
valids = d['valid']
creators = d['creator']
signers = d['signer']
combiners = d['combiner']
finalizers = d['finalizer']
extractors = d['extractor']
# Invalid PSBTs
for invalid in invalids:
assert_raises_rpc_error(-22, "TX decode failed", self.nodes[0].decodepsbt, invalid)
# Valid PSBTs
for valid in valids:
self.nodes[0].decodepsbt(valid)
# Creator Tests
for creator in creators:
created_tx = self.nodes[0].createpsbt(creator['inputs'], creator['outputs'])
assert_equal(created_tx, creator['result'])
# Signer tests
for i, signer in enumerate(signers):
self.nodes[2].createwallet("wallet{}".format(i))
wrpc = self.nodes[2].get_wallet_rpc("wallet{}".format(i))
for key in signer['privkeys']:
wrpc.importprivkey(key)
signed_tx = wrpc.walletprocesspsbt(signer['psbt'])['psbt']
assert_equal(signed_tx, signer['result'])
# Combiner test
for combiner in combiners:
combined = self.nodes[2].combinepsbt(combiner['combine'])
assert_equal(combined, combiner['result'])
# Finalizer test
for finalizer in finalizers:
finalized = self.nodes[2].finalizepsbt(finalizer['finalize'], False)['psbt']
assert_equal(finalized, finalizer['result'])
# Extractor test
for extractor in extractors:
extracted = self.nodes[2].finalizepsbt(extractor['extract'], True)['hex']
assert_equal(extracted, extractor['result'])
if __name__ == '__main__':
PSBTTest().main()
| {
"content_hash": "70359e353380aaaa60d4fe719fa34dc5",
"timestamp": "",
"source": "github",
"line_count": 225,
"max_line_length": 209,
"avg_line_length": 49.51111111111111,
"alnum_prop": 0.6262118491921005,
"repo_name": "ericshawlinux/bitcoin",
"id": "54dc87144881a94128cb76041b5b31a4cc97074d",
"size": "11349",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "test/functional/rpc_psbt.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "28456"
},
{
"name": "C",
"bytes": "693106"
},
{
"name": "C++",
"bytes": "5054023"
},
{
"name": "CSS",
"bytes": "1127"
},
{
"name": "HTML",
"bytes": "51512"
},
{
"name": "Java",
"bytes": "30290"
},
{
"name": "M4",
"bytes": "189381"
},
{
"name": "Makefile",
"bytes": "111686"
},
{
"name": "Objective-C",
"bytes": "3892"
},
{
"name": "Objective-C++",
"bytes": "7240"
},
{
"name": "Protocol Buffer",
"bytes": "2328"
},
{
"name": "Python",
"bytes": "1150581"
},
{
"name": "QMake",
"bytes": "756"
},
{
"name": "Shell",
"bytes": "53022"
}
],
"symlink_target": ""
} |
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('..'))
os.environ['DJANGO_SETTINGS_MODULE'] = 'django.conf.global_settings'
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.txt'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'django_block_comment'
copyright = u'2011, Gabriel Hurley'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.0'
# The full version, including alpha/beta/rc tags.
release = '1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'django_block_commentdoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'django_block_comment.tex', u'django\\_block\\_comment Documentation',
u'Gabriel Hurley', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'django_block_comment', u'django_block_comment Documentation',
[u'Gabriel Hurley'], 1)
]
| {
"content_hash": "21590b6b7afe7f62126af4ae5f8a6f35",
"timestamp": "",
"source": "github",
"line_count": 204,
"max_line_length": 82,
"avg_line_length": 33,
"alnum_prop": 0.7075163398692811,
"repo_name": "gabrielhurley/django-block-comment",
"id": "962e97477f617bd6cec3744d3d26c6a75b39e502",
"size": "7163",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "docs/conf.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "10050"
},
{
"name": "Python",
"bytes": "104597"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from datetime import date, datetime # noqa: F401
from typing import List, Dict # noqa: F401
from tapi_server.models.base_model_ import Model
from tapi_server import util
class TapiCommonBandwidthProfileType(Model):
"""NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).
Do not edit the class manually.
"""
"""
allowed enum values
"""
MEF_10_X = "MEF_10.x"
RFC_2697 = "RFC_2697"
RFC_2698 = "RFC_2698"
RFC_4115 = "RFC_4115"
def __init__(self): # noqa: E501
"""TapiCommonBandwidthProfileType - a model defined in OpenAPI
"""
self.openapi_types = {
}
self.attribute_map = {
}
@classmethod
def from_dict(cls, dikt) -> 'TapiCommonBandwidthProfileType':
"""Returns the dict as a model
:param dikt: A dict.
:type: dict
:return: The tapi.common.BandwidthProfileType of this TapiCommonBandwidthProfileType. # noqa: E501
:rtype: TapiCommonBandwidthProfileType
"""
return util.deserialize_model(dikt, cls)
| {
"content_hash": "6fa5d3d64dd9855fe201e7344a837cab",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 107,
"avg_line_length": 26.558139534883722,
"alnum_prop": 0.637478108581436,
"repo_name": "OpenNetworkingFoundation/ONFOpenTransport",
"id": "5ee35ddb805a3318e752c862cd7992c7aef1fe07",
"size": "1159",
"binary": false,
"copies": "4",
"ref": "refs/heads/develop",
"path": "RI/flask_server/tapi_server/models/tapi_common_bandwidth_profile_type.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "D",
"bytes": "2562"
}
],
"symlink_target": ""
} |
import os
from django.apps import AppConfig
from django.conf import settings
class ContentConfig(AppConfig):
name = 'contentcuration'
def ready(self):
try:
os.makedirs(settings.STORAGE_ROOT)
except os.error:
pass | {
"content_hash": "da821fe34181f540c734f6f55cba9603",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 40,
"avg_line_length": 20.333333333333332,
"alnum_prop": 0.7049180327868853,
"repo_name": "aronasorman/content-curation",
"id": "e19fecc894e1051cb96f4ae08a6c5471ba2291a6",
"size": "244",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "contentcuration/contentcuration/apps.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "85010"
},
{
"name": "HTML",
"bytes": "364417"
},
{
"name": "JavaScript",
"bytes": "285260"
},
{
"name": "Makefile",
"bytes": "579"
},
{
"name": "Python",
"bytes": "325384"
}
],
"symlink_target": ""
} |
import pysal
import os.path
import pysal.core.FileIO as FileIO
from pysal.weights import W
from warnings import warn
__author__ = "Myunghwa Hwang <mhwang4@gmail.com>"
__all__ = ["GeoBUGSTextIO"]
class GeoBUGSTextIO(FileIO.FileIO):
"""
Opens, reads, and writes weights file objects in the text format
used in GeoBUGS. GeoBUGS generates a spatial weights matrix
as an R object and writes it out as an ASCII text representation of
the R object.
An exemplary GeoBUGS text file is as follows:
list([CARD],[ADJ],[WGT],[SUMNUMNEIGH])
where [CARD] and [ADJ] are required but the others are optional.
PySAL assumes [CARD] and [ADJ] always exist in an input text file.
It can read a GeoBUGS text file, even when its content is not written
in the order of [CARD], [ADJ], [WGT], and [SUMNUMNEIGH].
It always writes all of [CARD], [ADJ], [WGT], and [SUMNUMNEIGH].
PySAL does not apply text wrapping during file writing.
In the above example,
[CARD]: num=c([a list of comma-splitted neighbor cardinalities])
[ADJ]: adj=c([a list of comma-splitted neighbor IDs])
if caridnality is zero, neighbor IDs are skipped.
The ordering of observations is the same in both [CARD] and
[ADJ].
Neighbor IDs are record numbers starting from one.
[WGT]: weights=c([a list of comma-splitted weights])
The restrictions for [ADJ] also apply to [WGT].
[SUMNUMNEIGH]: sumNumNeigh=[The total number of neighbor pairs]
the total number of neighbor pairs is an integer
value and the same as the sum of neighbor cardinalities.
Notes
-----
For the files generated from R spdep nb2WB and dput function,
it is assumed that the value for the control parameter of dput function
is NULL. Please refer to R spdep nb2WB function help file.
References
----------
Thomas, A., Best, N., Lunn, D., Arnold, R., and Spiegelhalter, D.
(2004) GeoBUGS User Manual.
R spdep nb2WB function help file.
"""
FORMATS = ['geobugs_text']
MODES = ['r', 'w']
def __init__(self, *args, **kwargs):
args = args[:2]
FileIO.FileIO.__init__(self, *args, **kwargs)
self.file = open(self.dataPath, self.mode)
def read(self, n=-1):
self._complain_ifclosed(self.closed)
return self._read()
def seek(self, pos):
if pos == 0:
self.file.seek(0)
self.pos = 0
def _read(self):
"""Reads GeoBUGS text file
Returns a pysal.weights.weights.W object
Examples
--------
Type 'dir(w)' at the interpreter to see what methods are supported.
Open a GeoBUGS text file and read it into a pysal weights object
>>> w = pysal.open(pysal.examples.get_path('geobugs_scot'),'r','geobugs_text').read()
Get the number of observations from the header
>>> w.n
56
Get the mean number of neighbors
>>> w.mean_neighbors
4.1785714285714288
Get neighbor distances for a single observation
>>> w[1]
{9: 1.0, 19: 1.0, 5: 1.0}
"""
if self.pos > 0:
raise StopIteration
fbody = self.file.read()
body_structure = {}
for i in ['num', 'adj', 'weights', 'sumNumNeigh']:
i_loc = fbody.find(i)
if i_loc != -1:
body_structure[i] = (i_loc, i)
body_sequence = sorted(body_structure.values())
body_sequence.append((-1, 'eof'))
for i in range(len(body_sequence) - 1):
part, next_part = body_sequence[i], body_sequence[i + 1]
start, end = part[0], next_part[0]
part_text = fbody[start:end]
part_length, start, end = len(part_text), 0, -1
for c in xrange(part_length):
if part_text[c].isdigit():
start = c
break
for c in xrange(part_length - 1, 0, -1):
if part_text[c].isdigit():
end = c + 1
break
part_text = part_text[start: end]
part_text = part_text.replace('\n', '')
value_type = int
if part[1] == 'weights':
value_type = float
body_structure[part[1]] = [value_type(v)
for v in part_text.split(',')]
cardinalities = body_structure['num']
adjacency = body_structure['adj']
raw_weights = [1.0] * int(sum(cardinalities))
if 'weights' in body_structure and isinstance(body_structure['weights'], list):
raw_weights = body_structure['weights']
no_obs = len(cardinalities)
neighbors = {}
weights = {}
pos = 0
for i in xrange(no_obs):
neighbors[i + 1] = []
weights[i + 1] = []
no_nghs = cardinalities[i]
if no_nghs > 0:
neighbors[i + 1] = adjacency[pos: pos + no_nghs]
weights[i + 1] = raw_weights[pos: pos + no_nghs]
pos += no_nghs
self.pos += 1
return W(neighbors, weights)
def write(self, obj):
"""
Parameters
----------
.write(weightsObject)
accepts a weights object
Returns
------
a GeoBUGS text file
write a weights object to the opened text file.
Examples
--------
>>> import tempfile, pysal, os
>>> testfile = pysal.open(pysal.examples.get_path('geobugs_scot'),'r','geobugs_text')
>>> w = testfile.read()
Create a temporary file for this example
>>> f = tempfile.NamedTemporaryFile(suffix='')
Reassign to new var
>>> fname = f.name
Close the temporary named file
>>> f.close()
Open the new file in write mode
>>> o = pysal.open(fname,'w','geobugs_text')
Write the Weights object into the open file
>>> o.write(w)
>>> o.close()
Read in the newly created text file
>>> wnew = pysal.open(fname,'r','geobugs_text').read()
Compare values from old to new
>>> wnew.pct_nonzero == w.pct_nonzero
True
Clean up temporary file created for this example
>>> os.remove(fname)
"""
self._complain_ifclosed(self.closed)
if issubclass(type(obj), W):
cardinalities, neighbors, weights = [], [], []
for i in obj.id_order:
cardinalities.append(obj.cardinalities[i])
neighbors.extend(obj.neighbors[i])
weights.extend(obj.weights[i])
self.file.write('list(')
self.file.write('num=c(%s),' % ','.join(map(str, cardinalities)))
self.file.write('adj=c(%s),' % ','.join(map(str, neighbors)))
self.file.write('sumNumNeigh=%i)' % sum(cardinalities))
self.pos += 1
else:
raise TypeError("Expected a pysal weights object, got: %s" % (
type(obj)))
def close(self):
self.file.close()
FileIO.FileIO.close(self)
| {
"content_hash": "32b35ec9c9c1e6d2611b938be90d2287",
"timestamp": "",
"source": "github",
"line_count": 234,
"max_line_length": 93,
"avg_line_length": 30.876068376068375,
"alnum_prop": 0.5532179930795847,
"repo_name": "tectronics/pysal",
"id": "d1bfeb922893182d4fb3ab74c23329c1922223fc",
"size": "7225",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "pysal/core/IOHandlers/geobugs_txt.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "10152"
},
{
"name": "Makefile",
"bytes": "254"
},
{
"name": "Python",
"bytes": "2060153"
},
{
"name": "Shell",
"bytes": "11128"
},
{
"name": "XSLT",
"bytes": "17216"
}
],
"symlink_target": ""
} |
from sklearn.feature_extraction.text import TfidfVectorizer
import operator
import io
import random
import numpy as np
from numpy import arange
import nltk
from nltk import word_tokenize
def tweets2tags(text,hasht):
tx=[]
for line in text:
tokens=word_tokenize(line)
tags=nltk.pos_tag(tokens)
text= [s[0] for s in tags if s[1].startswith('NN')]
tx.extend(text)
vectorizer = TfidfVectorizer(stop_words="english",min_df=1)
X = vectorizer.fit_transform(tx)
idf = vectorizer.idf_
size=len(idf)
idf[:size/5]=2
idf[size/5:2*size/5]=3
idf[2*size/5:3*size/5]=4
idf[3*size/5:4*size/5]=5
idf[4*size/5:]=7
tags = dict(zip(vectorizer.get_feature_names(), idf))
for i in hasht:
tags[i] = 6
return tags
| {
"content_hash": "cc56884030831904dd4b80c9978b2fd4",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 67,
"avg_line_length": 29.2,
"alnum_prop": 0.5856164383561644,
"repo_name": "PawelPamula/who-are-you",
"id": "142cd0f0b0c1948708e5819975d45eb3bc6f7557",
"size": "876",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/test/tst.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1171"
},
{
"name": "HTML",
"bytes": "5494"
},
{
"name": "JavaScript",
"bytes": "874"
},
{
"name": "Python",
"bytes": "18798"
}
],
"symlink_target": ""
} |
import unittest
from conans.client.store.localdb import LocalDB
import os
from conans.test.utils.test_files import temp_folder
class LocalStoreTest(unittest.TestCase):
def localdb_test(self):
tmp_dir = temp_folder()
db_file = os.path.join(tmp_dir, "dbfile")
localdb = LocalDB(db_file)
# Test write and read login
localdb.init()
user, token = localdb.get_login("myurl1")
self.assertIsNone(user)
self.assertIsNone(token)
localdb.set_login(("pepe", "token"), "myurl1")
user, token = localdb.get_login("myurl1")
self.assertEquals("pepe", user)
self.assertEquals("token", token)
self.assertEquals("pepe", localdb.get_username("myurl1"))
| {
"content_hash": "3ecff250b8d2da074e09a603ee8a0ed5",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 65,
"avg_line_length": 31.041666666666668,
"alnum_prop": 0.6456375838926175,
"repo_name": "AversivePlusPlus/AversivePlusPlus",
"id": "84713c265f3aa38dfd7f11fd5a69af2d786057e4",
"size": "745",
"binary": false,
"copies": "8",
"ref": "refs/heads/v17.02-tag",
"path": "tools/conan/conans/test/local_db_test.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Arduino",
"bytes": "1292"
},
{
"name": "C",
"bytes": "147"
},
{
"name": "C++",
"bytes": "761305"
},
{
"name": "CMake",
"bytes": "47072"
},
{
"name": "Makefile",
"bytes": "29429"
},
{
"name": "Python",
"bytes": "39934"
}
],
"symlink_target": ""
} |
"""Tests for the experimental input pipeline ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.data.python.ops import batching
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import script_ops
from tensorflow.python.platform import test
class AssertElementShapeTest(test_base.DatasetTestBase):
def test_assert_element_shape(self):
def create_dataset(_):
return (array_ops.ones(2, dtype=dtypes.float32),
array_ops.zeros((3, 4), dtype=dtypes.int32))
dataset = dataset_ops.Dataset.range(5).map(create_dataset)
expected_shapes = (tensor_shape.TensorShape(2),
tensor_shape.TensorShape((3, 4)))
self.assertEqual(expected_shapes, dataset.output_shapes)
result = dataset.apply(batching.assert_element_shape(expected_shapes))
self.assertEqual(expected_shapes, result.output_shapes)
iterator = result.make_initializable_iterator()
init_op = iterator.initializer
get_next = iterator.get_next()
with self.cached_session() as sess:
sess.run(init_op)
for _ in range(5):
sess.run(get_next)
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def test_assert_wrong_element_shape(self):
def create_dataset(_):
return (array_ops.ones(2, dtype=dtypes.float32),
array_ops.zeros((3, 4), dtype=dtypes.int32))
dataset = dataset_ops.Dataset.range(3).map(create_dataset)
wrong_shapes = (tensor_shape.TensorShape(2),
tensor_shape.TensorShape((3, 10)))
with self.assertRaises(ValueError):
dataset.apply(batching.assert_element_shape(wrong_shapes))
def test_assert_element_shape_on_unknown_shape_dataset(self):
def create_unknown_shape_dataset(x):
return script_ops.py_func(
lambda _: ( # pylint: disable=g-long-lambda
np.ones(2, dtype=np.float32),
np.zeros((3, 4), dtype=np.int32)),
[x],
[dtypes.float32, dtypes.int32])
dataset = dataset_ops.Dataset.range(5).map(create_unknown_shape_dataset)
unknown_shapes = (tensor_shape.TensorShape(None),
tensor_shape.TensorShape(None))
self.assertEqual(unknown_shapes, dataset.output_shapes)
expected_shapes = (tensor_shape.TensorShape(2),
tensor_shape.TensorShape((3, 4)))
result = dataset.apply(batching.assert_element_shape(expected_shapes))
self.assertEqual(expected_shapes, result.output_shapes)
iterator = result.make_initializable_iterator()
init_op = iterator.initializer
get_next = iterator.get_next()
with self.cached_session() as sess:
sess.run(init_op)
for _ in range(5):
sess.run(get_next)
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def test_assert_wrong_element_shape_on_unknown_shape_dataset(self):
def create_unknown_shape_dataset(x):
return script_ops.py_func(
lambda _: ( # pylint: disable=g-long-lambda
np.ones(2, dtype=np.float32),
np.zeros((3, 4), dtype=np.int32)),
[x],
[dtypes.float32, dtypes.int32])
dataset = dataset_ops.Dataset.range(3).map(create_unknown_shape_dataset)
unknown_shapes = (tensor_shape.TensorShape(None),
tensor_shape.TensorShape(None))
self.assertEqual(unknown_shapes, dataset.output_shapes)
wrong_shapes = (tensor_shape.TensorShape(2),
tensor_shape.TensorShape((3, 10)))
iterator = (
dataset.apply(batching.assert_element_shape(wrong_shapes))
.make_initializable_iterator())
init_op = iterator.initializer
get_next = iterator.get_next()
with self.cached_session() as sess:
sess.run(init_op)
with self.assertRaises(errors.InvalidArgumentError):
sess.run(get_next)
def test_assert_partial_element_shape(self):
def create_dataset(_):
return (array_ops.ones(2, dtype=dtypes.float32),
array_ops.zeros((3, 4), dtype=dtypes.int32))
dataset = dataset_ops.Dataset.range(5).map(create_dataset)
partial_expected_shape = (
tensor_shape.TensorShape(None), # Unknown shape
tensor_shape.TensorShape((None, 4))) # Partial shape
result = dataset.apply(
batching.assert_element_shape(partial_expected_shape))
# Partial shapes are merged with actual shapes:
actual_shapes = (tensor_shape.TensorShape(2),
tensor_shape.TensorShape((3, 4)))
self.assertEqual(actual_shapes, result.output_shapes)
iterator = result.make_initializable_iterator()
init_op = iterator.initializer
get_next = iterator.get_next()
with self.cached_session() as sess:
sess.run(init_op)
for _ in range(5):
sess.run(get_next)
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def test_assert_wrong_partial_element_shape(self):
def create_dataset(_):
return (array_ops.ones(2, dtype=dtypes.float32),
array_ops.zeros((3, 4), dtype=dtypes.int32))
dataset = dataset_ops.Dataset.range(3).map(create_dataset)
wrong_shapes = (tensor_shape.TensorShape(2),
tensor_shape.TensorShape((None, 10)))
with self.assertRaises(ValueError):
dataset.apply(batching.assert_element_shape(wrong_shapes))
def test_assert_partial_element_shape_on_unknown_shape_dataset(self):
def create_unknown_shape_dataset(x):
return script_ops.py_func(
lambda _: ( # pylint: disable=g-long-lambda
np.ones(2, dtype=np.float32),
np.zeros((3, 4), dtype=np.int32)),
[x],
[dtypes.float32, dtypes.int32])
dataset = dataset_ops.Dataset.range(5).map(create_unknown_shape_dataset)
unknown_shapes = (tensor_shape.TensorShape(None),
tensor_shape.TensorShape(None))
self.assertEqual(unknown_shapes, dataset.output_shapes)
expected_shapes = (tensor_shape.TensorShape(2),
tensor_shape.TensorShape((None, 4)))
result = dataset.apply(batching.assert_element_shape(expected_shapes))
self.assertEqual(expected_shapes, result.output_shapes)
iterator = result.make_initializable_iterator()
init_op = iterator.initializer
get_next = iterator.get_next()
with self.cached_session() as sess:
sess.run(init_op)
for _ in range(5):
sess.run(get_next)
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def test_assert_wrong_partial_element_shape_on_unknown_shape_dataset(self):
def create_unknown_shape_dataset(x):
return script_ops.py_func(
lambda _: ( # pylint: disable=g-long-lambda
np.ones(2, dtype=np.float32),
np.zeros((3, 4), dtype=np.int32)),
[x],
[dtypes.float32, dtypes.int32])
dataset = dataset_ops.Dataset.range(3).map(create_unknown_shape_dataset)
unknown_shapes = (tensor_shape.TensorShape(None),
tensor_shape.TensorShape(None))
self.assertEqual(unknown_shapes, dataset.output_shapes)
wrong_shapes = (tensor_shape.TensorShape(2),
tensor_shape.TensorShape((None, 10)))
iterator = (
dataset.apply(batching.assert_element_shape(wrong_shapes))
.make_initializable_iterator())
init_op = iterator.initializer
get_next = iterator.get_next()
with self.cached_session() as sess:
sess.run(init_op)
with self.assertRaises(errors.InvalidArgumentError):
sess.run(get_next)
if __name__ == "__main__":
test.main()
| {
"content_hash": "36fb55cdb8f314e077141347be714822",
"timestamp": "",
"source": "github",
"line_count": 212,
"max_line_length": 77,
"avg_line_length": 37.693396226415096,
"alnum_prop": 0.6597422099862346,
"repo_name": "dongjoon-hyun/tensorflow",
"id": "0456463a1928cf226010670b90a5d574579e0411",
"size": "8680",
"binary": false,
"copies": "8",
"ref": "refs/heads/master",
"path": "tensorflow/contrib/data/python/kernel_tests/assert_element_shape_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "3301"
},
{
"name": "Batchfile",
"bytes": "10132"
},
{
"name": "C",
"bytes": "446293"
},
{
"name": "C#",
"bytes": "8446"
},
{
"name": "C++",
"bytes": "50950243"
},
{
"name": "CMake",
"bytes": "198845"
},
{
"name": "Dockerfile",
"bytes": "36908"
},
{
"name": "Go",
"bytes": "1285854"
},
{
"name": "HTML",
"bytes": "4681865"
},
{
"name": "Java",
"bytes": "869263"
},
{
"name": "Jupyter Notebook",
"bytes": "2611125"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "62216"
},
{
"name": "Objective-C",
"bytes": "15634"
},
{
"name": "Objective-C++",
"bytes": "101475"
},
{
"name": "PHP",
"bytes": "5191"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "PureBasic",
"bytes": "25356"
},
{
"name": "Python",
"bytes": "40335927"
},
{
"name": "RobotFramework",
"bytes": "891"
},
{
"name": "Ruby",
"bytes": "553"
},
{
"name": "Shell",
"bytes": "487251"
},
{
"name": "Smarty",
"bytes": "6976"
}
],
"symlink_target": ""
} |
from inspect import isclass
from datetime import datetime
__all__ = ['ConvertError', 'default_converters', 'Converter', 'String',
'Integer', 'Any', 'Date']
class ConvertError(Exception):
'''
Converter should raise ConvertError if the given value does not match
'''
@property
def converter(self):
return self.args[0]
@property
def value(self):
return self.args[1]
class Converter(object):
'''A base class for urlconverters'''
regex = '[.a-zA-Z0-9:@&+$,_%%-]+'
class NotSet(object): pass
default = NotSet
def __init__(self, default=NotSet, regex=None):
if not default is self.NotSet:
self.default = default
if regex is not None:
self.regex = regex
def to_python(self, value, env=None):
'''
Accepts unicode url part and returns python object.
Should be implemented in subclasses
'''
raise NotImplementedError() # pragma: no cover
def to_url(self, value):
'''
Accepts python object and returns unicode prepared to be used
in url building.
Should be implemented in subclasses
'''
raise NotImplementedError() # pragma: no cover
class String(Converter):
'''
Unquotes urlencoded string::
'''
min = 1
max = None
def __init__(self, min=None, max=None, **kwargs):
Converter.__init__(self, **kwargs)
self.min = min if min is not None else self.min
self.max = max or self.max
def to_python(self, value, env=None):
self.check_len(value)
return value
def to_url(self, value):
return unicode(value)
def check_len(self, value):
length = len(value)
if length < self.min or self.max and length > self.max:
raise ConvertError(self, value)
class Integer(Converter):
'''
Extracts integer value from url part.
'''
regex = '(?:[1-9]\d*|0)'
def to_python(self, value, env=None):
try:
value = int(value)
except ValueError:
raise ConvertError(self, value)
else:
return value
def to_url(self, value):
if isinstance(value, basestring):
# sometimes it is useful to build fake urls with placeholders,
# to be replaced in JS to real values
# For example:
# root.item(id="REPLACEME")
return value
return str(int(value))
class Any(Converter):
'''
Checks if string value is in a list of allowed values and returns that value::
web.match('/<any(yes,no,"probably, no",maybe):answer>')
'''
def __init__(self, *values, **kwargs):
Converter.__init__(self, **kwargs)
self.values = values
def to_python(self, value, env=None):
if value in self.values:
return value
raise ConvertError(self, value)
def to_url(self, value):
return unicode(value)
class Date(Converter):
'''
Converts string to datetime by strptime using given format::
web.match('/<date(format="%Y.%m.%d"):date>')
'''
format = "%Y-%m-%d"
def __init__(self, format=None, **kwargs):
Converter.__init__(self, **kwargs)
if format is not None:
self.format = format
def to_python(self, value, env=None):
try:
return datetime.strptime(value, self.format).date()
except ValueError:
raise ConvertError(self, value)
def to_url(self, value):
if isinstance(value, basestring):
# sometimes it is useful to build fake urls with placeholders,
# to be replaced in JS to real values
# For example:
# root.item(id="REPLACEME")
return value
return value.strftime(self.format)
default_converters = {'string': String,
'int': Integer,
'any': Any,
'date': Date}
# assert all defined converters are registered
for item in globals().values():
if isclass(item) and \
issubclass(item, Converter) and \
not item is Converter:
assert item in default_converters.values(), item
| {
"content_hash": "718979f6ff7ccacccb3e1ca88eaa6e48",
"timestamp": "",
"source": "github",
"line_count": 164,
"max_line_length": 82,
"avg_line_length": 25.951219512195124,
"alnum_prop": 0.574483082706767,
"repo_name": "oas89/iktomi",
"id": "869f8f6747e7a55dee7efa2a7a88f7ba69dbad51",
"size": "4281",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "iktomi/web/url_converters.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "7846"
},
{
"name": "Makefile",
"bytes": "727"
},
{
"name": "Python",
"bytes": "569309"
},
{
"name": "Shell",
"bytes": "1321"
}
],
"symlink_target": ""
} |
import os, sys, traceback
import github3
import gspread
import io
import json
import logging
from logging.config import dictConfig
from oauth2client.client import SignedJwtAssertionCredentials
GITHUB_CONFIG = {
'TOKEN': os.environ['GITHUB_TOKEN'],
'REPO_OWNER': 'opennews',
'REPO_NAME': 'srccon-product',
'TARGET_FILE': 'schedule/sessions.json',
'TARGET_BRANCHES': ['staging','master',],# choose one or more branches
}
GITHUB_SRCCON_YAML_CONFIG = {
'TOKEN': os.environ['GITHUB_TOKEN'],
'REPO_OWNER': 'opennews',
'REPO_NAME': 'srccon-product',
'TARGET_FILE': '_data/schedule.yaml',
'TARGET_BRANCHES': ['staging','master',],
}
GOOGLE_API_CONFIG = {
'CLIENT_EMAIL': os.environ['GOOGLE_API_CLIENT_EMAIL'],
'PRIVATE_KEY': os.environ['GOOGLE_API_PRIVATE_KEY'].decode('unicode_escape'),
'SCOPE': ['https://spreadsheets.google.com/feeds'],
}
# the unique ID of the spreadsheet with your data can be stored
# as an environment variable or simply added here as a string
GOOGLE_SPREADSHEET_KEY = '1MQEvn-rL90no-weGT-QQx5kCCiGfBPGCB1XUZLfxy-g'
#GOOGLE_SPREADSHEET_KEY = os.environ['GOOGLE_SPREADSHEET_KEY']
# pull data from a named worksheet, or leave blank to assume first worksheet
GOOGLE_SPREADSHEET_SHEETNAME = 'schedule data'
# if data is spread across multiple worksheets, set to True
FETCH_MULTIPLE_WORKSHEETS = False
# if fetching multiple worksheets, name sheets to skip here
# EXAMPLE: WORKSHEETS_TO_SKIP = ['Sheet1', 'Sheet4',]
WORKSHEETS_TO_SKIP = []
# set to True to store local version of JSON
MAKE_LOCAL_JSON = False
# set to False for dry runs
COMMIT_JSON_TO_GITHUB = True
# TODO: Add method for storing JSON output in S3 bucket
# S3_CONFIG = {}
# SEND_JSON_TO_S3 = False
def authenticate_with_google():
'''
Connect to Google Spreadsheet with gspread library.
'''
credentials = SignedJwtAssertionCredentials(
GOOGLE_API_CONFIG['CLIENT_EMAIL'], GOOGLE_API_CONFIG['PRIVATE_KEY'], GOOGLE_API_CONFIG['SCOPE']
)
google_api_conn = gspread.authorize(credentials)
return google_api_conn
def open_google_spreadsheet():
'''
Authenticate and return spreadsheet by `GOOGLE_SPREADSHEET_KEY`.
'''
google_api_conn = authenticate_with_google()
spreadsheet = google_api_conn.open_by_key(GOOGLE_SPREADSHEET_KEY)
return spreadsheet
def fetch_data(multiple_sheets=False, worksheets_to_skip=[]):
spreadsheet = open_google_spreadsheet()
if not multiple_sheets:
# Return data from named worksheet if specified ...
if GOOGLE_SPREADSHEET_SHEETNAME:
worksheet = spreadsheet.worksheet(GOOGLE_SPREADSHEET_SHEETNAME)
# .. otherwise return data from the first worksheet
else:
worksheet = spreadsheet.get_worksheet(0)
data = worksheet.get_all_records(empty2zero=False)
else:
# Return data from all worksheets in Google spreadsheet, optionally
# skipping sheets identified by title in `WORKSHEETS_TO_SKIP`
data = []
worksheet_list = [
sheet for sheet in spreadsheet.worksheets() if sheet.title not in WORKSHEETS_TO_SKIP
]
for worksheet in worksheet_list:
worksheet.title
data.extend(worksheet.get_all_records(empty2zero=False))
return data
def transform_data(data):
'''
Transforms data and filters/validates individual spreadsheet rows
for fields we want in the JSON output. Currently, this:
* ensures that all variables going into the JSON are strings
Additional filters should be added to _transform_response_item.
'''
def _transform_response_item(item, skip=False):
# make sure vars are strings
_transformed_item = {k: unicode(v) for k, v in item.iteritems() if k}
# EXAMPLE: get rid of data from column `rowNumber`
# if 'rowNumber' in _transformed_item:
# del _transformed_item['rowNumber']
# EXAMPLE: rename spreadsheet column `name` into JSON key `title`
# if 'name' in _transformed_item:
# _transformed_item['title'] = _transformed_item.pop('name', '')
# EXAMPLE: use `skip` flag to ignore rows without valid id
# if 'id' in _transformed_item:
# try:
# int(_transformed_item['id'])
# except:
# skip = True
# if we've triggered the skip flag anywhere, drop this record
if skip:
_transformed_item = None
return _transformed_item
# pass spreadsheet rows through the transformer
transformed_data = filter(None, [_transform_response_item(item) for item in data])
return transformed_data
def make_json(data, store_locally=False, filename=GITHUB_CONFIG['TARGET_FILE']):
'''
Turns data into nice JSON, and optionally stores to a local file.
'''
json_out = json.dumps(data, sort_keys=True, indent=4, ensure_ascii=False)
if store_locally:
with io.open(filename, 'w', encoding='utf8') as outfile:
outfile.write(unicode(json_out))
return json_out.encode('utf-8')
def commit_json(data, target_config=GITHUB_CONFIG, commit=COMMIT_JSON_TO_GITHUB):
'''
Uses token to log into GitHub, then gets the appropriate repo based
on owner/name defined in GITHUB_CONFIG.
Creates data file if it does not exist in the repo, otherwise updates
existing data file.
If `COMMIT_JSON_TO_GITHUB` is False, this will operate in "dry run" mode,
authenticating against GitHub but not changing any files.
'''
# authenticate with GitHub
gh = github3.login(token=target_config['TOKEN'])
# get the right repo
repo = gh.repository(target_config['REPO_OWNER'], target_config['REPO_NAME'])
for branch in target_config['TARGET_BRANCHES']:
# check to see whether data file exists
try:
contents = repo.file_contents(
path=target_config['TARGET_FILE'],
ref=branch
)
except:
contents = None
if commit:
if not contents:
# create file that doesn't exist
repo.create_file(
path=target_config['TARGET_FILE'],
message='adding session data for schedule',
content=data,
branch=branch
)
logger.info('Created new data file in repo')
else:
# if data has changed, update existing file
if data.decode('utf-8') == contents.decoded.decode('utf-8'):
logger.info('Data has not changed, no commit created')
else:
contents.update(
message='updating schedule data',
content=data,
branch=branch
)
logger.info('Data updated, new commit to repo')
def update_srccon_product_schedule():
data = fetch_data(multiple_sheets=FETCH_MULTIPLE_WORKSHEETS, worksheets_to_skip=WORKSHEETS_TO_SKIP)
#print 'Fetched the data ...'
data = transform_data(data)
#print 'Prepped the data ...'
json_data = make_json(data, store_locally=MAKE_LOCAL_JSON)
#print 'Made some JSON!'
commit_json(json_data, target_config=GITHUB_SRCCON_YAML_CONFIG)
#print 'Sent the yaml data to GitHub!'
commit_json(json_data)
#print 'Sent the json data to GitHub!'
'''
Set up logging.
'''
LOGGING = {
'version': 1,
'disable_existing_loggers': True,
'formatters': {
'verbose': {
'format': '%(levelname)s %(asctime)s %(message)s'
},
'simple': {
'format': '%(levelname)s %(message)s'
},
},
'handlers': {
'file': {
'level': 'DEBUG',
'class': 'logging.FileHandler',
'filename': 'log.txt',
'formatter': 'verbose'
},
'console':{
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'simple'
},
},
'loggers': {
'schedule_loader': {
'handlers':['file','console'],
'propagate': False,
'level':'DEBUG',
}
}
}
dictConfig(LOGGING)
logger = logging.getLogger('schedule_loader')
if __name__ == "__main__":
try:
update_srccon_product_schedule()
except Exception, e:
sys.stderr.write('\n')
traceback.print_exc(file=sys.stderr)
sys.stderr.write('\n')
sys.exit(1)
| {
"content_hash": "4f81388fffc52edad8051d218803e249",
"timestamp": "",
"source": "github",
"line_count": 269,
"max_line_length": 103,
"avg_line_length": 32.28252788104089,
"alnum_prop": 0.6108935974205435,
"repo_name": "ryanpitts/membot",
"id": "c752b197bda6609e9c2b487447453a8d2938532b",
"size": "8684",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "membot/apps/membot/commands/update_srccon_product_schedule.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Procfile",
"bytes": "38"
},
{
"name": "Python",
"bytes": "54588"
}
],
"symlink_target": ""
} |
# This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
from __future__ import absolute_import, division, print_function
INCLUDES = """
#include <openssl/x509.h>
/*
* See the comment above Cryptography_STACK_OF_X509 in x509.py
*/
typedef STACK_OF(X509_NAME) Cryptography_STACK_OF_X509_NAME;
typedef STACK_OF(X509_NAME_ENTRY) Cryptography_STACK_OF_X509_NAME_ENTRY;
"""
TYPES = """
typedef ... Cryptography_STACK_OF_X509_NAME_ENTRY;
typedef ... X509_NAME;
typedef ... X509_NAME_ENTRY;
typedef ... Cryptography_STACK_OF_X509_NAME;
"""
FUNCTIONS = """
X509_NAME *X509_NAME_new(void);
void X509_NAME_free(X509_NAME *);
int X509_NAME_entry_count(X509_NAME *);
X509_NAME_ENTRY *X509_NAME_get_entry(X509_NAME *, int);
ASN1_OBJECT *X509_NAME_ENTRY_get_object(X509_NAME_ENTRY *);
ASN1_STRING *X509_NAME_ENTRY_get_data(X509_NAME_ENTRY *);
unsigned long X509_NAME_hash(X509_NAME *);
int i2d_X509_NAME(X509_NAME *, unsigned char **);
int X509_NAME_add_entry_by_txt(X509_NAME *, const char *, int,
const unsigned char *, int, int, int);
int X509_NAME_add_entry_by_OBJ(X509_NAME *, ASN1_OBJECT *, int,
unsigned char *, int, int, int);
int X509_NAME_add_entry_by_NID(X509_NAME *, int, int, unsigned char *,
int, int, int);
X509_NAME_ENTRY *X509_NAME_delete_entry(X509_NAME *, int);
void X509_NAME_ENTRY_free(X509_NAME_ENTRY *);
int X509_NAME_get_index_by_NID(X509_NAME *, int, int);
int X509_NAME_cmp(const X509_NAME *, const X509_NAME *);
char *X509_NAME_oneline(X509_NAME *, char *, int);
X509_NAME *X509_NAME_dup(X509_NAME *);
X509_NAME_ENTRY *X509_NAME_ENTRY_create_by_OBJ(X509_NAME_ENTRY **,
ASN1_OBJECT *, int,
const unsigned char *, int);
int X509_NAME_add_entry(X509_NAME *, X509_NAME_ENTRY *, int, int);
"""
MACROS = """
Cryptography_STACK_OF_X509_NAME *sk_X509_NAME_new_null(void);
int sk_X509_NAME_num(Cryptography_STACK_OF_X509_NAME *);
int sk_X509_NAME_push(Cryptography_STACK_OF_X509_NAME *, X509_NAME *);
X509_NAME *sk_X509_NAME_value(Cryptography_STACK_OF_X509_NAME *, int);
void sk_X509_NAME_free(Cryptography_STACK_OF_X509_NAME *);
int sk_X509_NAME_ENTRY_num(Cryptography_STACK_OF_X509_NAME_ENTRY *);
Cryptography_STACK_OF_X509_NAME_ENTRY *sk_X509_NAME_ENTRY_new_null(void);
int sk_X509_NAME_ENTRY_push(Cryptography_STACK_OF_X509_NAME_ENTRY *,
X509_NAME_ENTRY *);
X509_NAME_ENTRY *sk_X509_NAME_ENTRY_value(
Cryptography_STACK_OF_X509_NAME_ENTRY *, int);
Cryptography_STACK_OF_X509_NAME_ENTRY *sk_X509_NAME_ENTRY_dup(
Cryptography_STACK_OF_X509_NAME_ENTRY *
);
"""
CUSTOMIZATIONS = """
"""
| {
"content_hash": "1b53f81596713fd262291cfd9bc9d897",
"timestamp": "",
"source": "github",
"line_count": 71,
"max_line_length": 79,
"avg_line_length": 40.12676056338028,
"alnum_prop": 0.6647946647946648,
"repo_name": "Ayrx/cryptography",
"id": "86d50bbd01c887e14302287161f1a77a0821b348",
"size": "2849",
"binary": false,
"copies": "8",
"ref": "refs/heads/master",
"path": "src/_cffi_src/openssl/x509name.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "1407"
},
{
"name": "C",
"bytes": "3418"
},
{
"name": "C++",
"bytes": "323"
},
{
"name": "Python",
"bytes": "1605906"
},
{
"name": "Shell",
"bytes": "6729"
}
],
"symlink_target": ""
} |
"""Test the Partially Signed Transaction RPCs.
"""
from decimal import Decimal
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_equal, assert_raises_rpc_error, connect_nodes_bi, disconnect_nodes, find_output, sync_blocks
import json
import os
MAX_BIP125_RBF_SEQUENCE = 0xfffffffd
# Create one-input, one-output, no-fee transaction:
class PSBTTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = False
self.num_nodes = 3
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def test_utxo_conversion(self):
mining_node = self.nodes[2]
offline_node = self.nodes[0]
online_node = self.nodes[1]
# Disconnect offline node from others
disconnect_nodes(offline_node, 1)
disconnect_nodes(online_node, 0)
disconnect_nodes(offline_node, 2)
disconnect_nodes(mining_node, 0)
# Mine a transaction that credits the offline address
offline_addr = offline_node.getnewaddress(address_type="p2sh-segwit")
online_addr = online_node.getnewaddress(address_type="p2sh-segwit")
online_node.importaddress(offline_addr, "", False)
mining_node.sendtoaddress(address=offline_addr, amount=1.0)
mining_node.generate(nblocks=1)
sync_blocks([mining_node, online_node])
# Construct an unsigned PSBT on the online node (who doesn't know the output is Segwit, so will include a non-witness UTXO)
utxos = online_node.listunspent(addresses=[offline_addr])
raw = online_node.createrawtransaction([{"txid":utxos[0]["txid"], "vout":utxos[0]["vout"]}],[{online_addr:0.9999}])
psbt = online_node.walletprocesspsbt(online_node.converttopsbt(raw))["psbt"]
assert("non_witness_utxo" in mining_node.decodepsbt(psbt)["inputs"][0])
# Have the offline node sign the PSBT (which will update the UTXO to segwit)
signed_psbt = offline_node.walletprocesspsbt(psbt)["psbt"]
assert("witness_utxo" in mining_node.decodepsbt(signed_psbt)["inputs"][0])
# Make sure we can mine the resulting transaction
txid = mining_node.sendrawtransaction(mining_node.finalizepsbt(signed_psbt)["hex"])
mining_node.generate(1)
sync_blocks([mining_node, online_node])
assert_equal(online_node.gettxout(txid,0)["confirmations"], 1)
# Reconnect
connect_nodes_bi(self.nodes, 0, 1)
connect_nodes_bi(self.nodes, 0, 2)
def run_test(self):
# Create and fund a raw tx for sending 10 BTC
psbtx1 = self.nodes[0].walletcreatefundedpsbt([], {self.nodes[2].getnewaddress():10})['psbt']
# Node 1 should not be able to add anything to it but still return the psbtx same as before
psbtx = self.nodes[1].walletprocesspsbt(psbtx1)['psbt']
assert_equal(psbtx1, psbtx)
# Sign the transaction and send
signed_tx = self.nodes[0].walletprocesspsbt(psbtx)['psbt']
final_tx = self.nodes[0].finalizepsbt(signed_tx)['hex']
self.nodes[0].sendrawtransaction(final_tx)
# Create p2sh, p2wpkh, and p2wsh addresses
pubkey0 = self.nodes[0].getaddressinfo(self.nodes[0].getnewaddress())['pubkey']
pubkey1 = self.nodes[1].getaddressinfo(self.nodes[1].getnewaddress())['pubkey']
pubkey2 = self.nodes[2].getaddressinfo(self.nodes[2].getnewaddress())['pubkey']
p2sh = self.nodes[1].addmultisigaddress(2, [pubkey0, pubkey1, pubkey2], "", "legacy")['address']
p2wsh = self.nodes[1].addmultisigaddress(2, [pubkey0, pubkey1, pubkey2], "", "bech32")['address']
p2sh_p2wsh = self.nodes[1].addmultisigaddress(2, [pubkey0, pubkey1, pubkey2], "", "p2sh-segwit")['address']
p2wpkh = self.nodes[1].getnewaddress("", "bech32")
p2pkh = self.nodes[1].getnewaddress("", "legacy")
p2sh_p2wpkh = self.nodes[1].getnewaddress("", "p2sh-segwit")
# fund those addresses
rawtx = self.nodes[0].createrawtransaction([], {p2sh:10, p2wsh:10, p2wpkh:10, p2sh_p2wsh:10, p2sh_p2wpkh:10, p2pkh:10})
rawtx = self.nodes[0].fundrawtransaction(rawtx, {"changePosition":3})
signed_tx = self.nodes[0].signrawtransactionwithwallet(rawtx['hex'])['hex']
txid = self.nodes[0].sendrawtransaction(signed_tx)
self.nodes[0].generate(6)
self.sync_all()
# Find the output pos
p2sh_pos = -1
p2wsh_pos = -1
p2wpkh_pos = -1
p2pkh_pos = -1
p2sh_p2wsh_pos = -1
p2sh_p2wpkh_pos = -1
decoded = self.nodes[0].decoderawtransaction(signed_tx)
for out in decoded['vout']:
if out['scriptPubKey']['addresses'][0] == p2sh:
p2sh_pos = out['n']
elif out['scriptPubKey']['addresses'][0] == p2wsh:
p2wsh_pos = out['n']
elif out['scriptPubKey']['addresses'][0] == p2wpkh:
p2wpkh_pos = out['n']
elif out['scriptPubKey']['addresses'][0] == p2sh_p2wsh:
p2sh_p2wsh_pos = out['n']
elif out['scriptPubKey']['addresses'][0] == p2sh_p2wpkh:
p2sh_p2wpkh_pos = out['n']
elif out['scriptPubKey']['addresses'][0] == p2pkh:
p2pkh_pos = out['n']
# spend single key from node 1
rawtx = self.nodes[1].walletcreatefundedpsbt([{"txid":txid,"vout":p2wpkh_pos},{"txid":txid,"vout":p2sh_p2wpkh_pos},{"txid":txid,"vout":p2pkh_pos}], {self.nodes[1].getnewaddress():29.99})['psbt']
walletprocesspsbt_out = self.nodes[1].walletprocesspsbt(rawtx)
assert_equal(walletprocesspsbt_out['complete'], True)
self.nodes[1].sendrawtransaction(self.nodes[1].finalizepsbt(walletprocesspsbt_out['psbt'])['hex'])
# partially sign multisig things with node 1
psbtx = self.nodes[1].walletcreatefundedpsbt([{"txid":txid,"vout":p2wsh_pos},{"txid":txid,"vout":p2sh_pos},{"txid":txid,"vout":p2sh_p2wsh_pos}], {self.nodes[1].getnewaddress():29.99})['psbt']
walletprocesspsbt_out = self.nodes[1].walletprocesspsbt(psbtx)
psbtx = walletprocesspsbt_out['psbt']
assert_equal(walletprocesspsbt_out['complete'], False)
# partially sign with node 2. This should be complete and sendable
walletprocesspsbt_out = self.nodes[2].walletprocesspsbt(psbtx)
assert_equal(walletprocesspsbt_out['complete'], True)
self.nodes[2].sendrawtransaction(self.nodes[2].finalizepsbt(walletprocesspsbt_out['psbt'])['hex'])
# check that walletprocesspsbt fails to decode a non-psbt
rawtx = self.nodes[1].createrawtransaction([{"txid":txid,"vout":p2wpkh_pos}], {self.nodes[1].getnewaddress():9.99})
assert_raises_rpc_error(-22, "TX decode failed", self.nodes[1].walletprocesspsbt, rawtx)
# Convert a non-psbt to psbt and make sure we can decode it
rawtx = self.nodes[0].createrawtransaction([], {self.nodes[1].getnewaddress():10})
rawtx = self.nodes[0].fundrawtransaction(rawtx)
new_psbt = self.nodes[0].converttopsbt(rawtx['hex'])
self.nodes[0].decodepsbt(new_psbt)
# Make sure that a psbt with signatures cannot be converted
signedtx = self.nodes[0].signrawtransactionwithwallet(rawtx['hex'])
assert_raises_rpc_error(-22, "TX decode failed", self.nodes[0].converttopsbt, signedtx['hex'])
assert_raises_rpc_error(-22, "TX decode failed", self.nodes[0].converttopsbt, signedtx['hex'], False)
# Unless we allow it to convert and strip signatures
self.nodes[0].converttopsbt(signedtx['hex'], True)
# Explicitly allow converting non-empty txs
new_psbt = self.nodes[0].converttopsbt(rawtx['hex'])
self.nodes[0].decodepsbt(new_psbt)
# Create outputs to nodes 1 and 2
node1_addr = self.nodes[1].getnewaddress()
node2_addr = self.nodes[2].getnewaddress()
txid1 = self.nodes[0].sendtoaddress(node1_addr, 13)
txid2 = self.nodes[0].sendtoaddress(node2_addr, 13)
blockhash = self.nodes[0].generate(6)[0]
self.sync_all()
vout1 = find_output(self.nodes[1], txid1, 13, blockhash=blockhash)
vout2 = find_output(self.nodes[2], txid2, 13, blockhash=blockhash)
# Create a psbt spending outputs from nodes 1 and 2
psbt_orig = self.nodes[0].createpsbt([{"txid":txid1, "vout":vout1}, {"txid":txid2, "vout":vout2}], {self.nodes[0].getnewaddress():25.999})
# Update psbts, should only have data for one input and not the other
psbt1 = self.nodes[1].walletprocesspsbt(psbt_orig)['psbt']
psbt1_decoded = self.nodes[0].decodepsbt(psbt1)
assert psbt1_decoded['inputs'][0] and not psbt1_decoded['inputs'][1]
psbt2 = self.nodes[2].walletprocesspsbt(psbt_orig)['psbt']
psbt2_decoded = self.nodes[0].decodepsbt(psbt2)
assert not psbt2_decoded['inputs'][0] and psbt2_decoded['inputs'][1]
# Combine, finalize, and send the psbts
combined = self.nodes[0].combinepsbt([psbt1, psbt2])
finalized = self.nodes[0].finalizepsbt(combined)['hex']
self.nodes[0].sendrawtransaction(finalized)
self.nodes[0].generate(6)
self.sync_all()
# Test additional args in walletcreatepsbt
# Make sure both pre-included and funded inputs
# have the correct sequence numbers based on
# replaceable arg
block_height = self.nodes[0].getblockcount()
unspent = self.nodes[0].listunspent()[0]
psbtx_info = self.nodes[0].walletcreatefundedpsbt([{"txid":unspent["txid"], "vout":unspent["vout"]}], [{self.nodes[2].getnewaddress():unspent["amount"]+1}], block_height+2, {"replaceable":True}, False)
decoded_psbt = self.nodes[0].decodepsbt(psbtx_info["psbt"])
for tx_in, psbt_in in zip(decoded_psbt["tx"]["vin"], decoded_psbt["inputs"]):
assert_equal(tx_in["sequence"], MAX_BIP125_RBF_SEQUENCE)
assert "bip32_derivs" not in psbt_in
assert_equal(decoded_psbt["tx"]["locktime"], block_height+2)
# Same construction with only locktime set
psbtx_info = self.nodes[0].walletcreatefundedpsbt([{"txid":unspent["txid"], "vout":unspent["vout"]}], [{self.nodes[2].getnewaddress():unspent["amount"]+1}], block_height, {}, True)
decoded_psbt = self.nodes[0].decodepsbt(psbtx_info["psbt"])
for tx_in, psbt_in in zip(decoded_psbt["tx"]["vin"], decoded_psbt["inputs"]):
assert tx_in["sequence"] > MAX_BIP125_RBF_SEQUENCE
assert "bip32_derivs" in psbt_in
assert_equal(decoded_psbt["tx"]["locktime"], block_height)
# Same construction without optional arguments
psbtx_info = self.nodes[0].walletcreatefundedpsbt([{"txid":unspent["txid"], "vout":unspent["vout"]}], [{self.nodes[2].getnewaddress():unspent["amount"]+1}])
decoded_psbt = self.nodes[0].decodepsbt(psbtx_info["psbt"])
for tx_in in decoded_psbt["tx"]["vin"]:
assert tx_in["sequence"] > MAX_BIP125_RBF_SEQUENCE
assert_equal(decoded_psbt["tx"]["locktime"], 0)
# Make sure change address wallet does not have P2SH innerscript access to results in success
# when attempting BnB coin selection
self.nodes[0].walletcreatefundedpsbt([], [{self.nodes[2].getnewaddress():unspent["amount"]+1}], block_height+2, {"changeAddress":self.nodes[1].getnewaddress()}, False)
# Regression test for 14473 (mishandling of already-signed witness transaction):
psbtx_info = self.nodes[0].walletcreatefundedpsbt([{"txid":unspent["txid"], "vout":unspent["vout"]}], [{self.nodes[2].getnewaddress():unspent["amount"]+1}])
complete_psbt = self.nodes[0].walletprocesspsbt(psbtx_info["psbt"])
double_processed_psbt = self.nodes[0].walletprocesspsbt(complete_psbt["psbt"])
assert_equal(complete_psbt, double_processed_psbt)
# We don't care about the decode result, but decoding must succeed.
self.nodes[0].decodepsbt(double_processed_psbt["psbt"])
# BIP 174 Test Vectors
# Check that unknown values are just passed through
unknown_psbt = "cHNidP8BAD8CAAAAAf//////////////////////////////////////////AAAAAAD/////AQAAAAAAAAAAA2oBAAAAAAAACg8BAgMEBQYHCAkPAQIDBAUGBwgJCgsMDQ4PAAA="
unknown_out = self.nodes[0].walletprocesspsbt(unknown_psbt)['psbt']
assert_equal(unknown_psbt, unknown_out)
# Open the data file
with open(os.path.join(os.path.dirname(os.path.realpath(__file__)), 'data/rpc_psbt.json'), encoding='utf-8') as f:
d = json.load(f)
invalids = d['invalid']
valids = d['valid']
creators = d['creator']
signers = d['signer']
combiners = d['combiner']
finalizers = d['finalizer']
extractors = d['extractor']
# Invalid PSBTs
for invalid in invalids:
assert_raises_rpc_error(-22, "TX decode failed", self.nodes[0].decodepsbt, invalid)
# Valid PSBTs
for valid in valids:
self.nodes[0].decodepsbt(valid)
# Creator Tests
for creator in creators:
created_tx = self.nodes[0].createpsbt(creator['inputs'], creator['outputs'])
assert_equal(created_tx, creator['result'])
# Signer tests
for i, signer in enumerate(signers):
self.nodes[2].createwallet("wallet{}".format(i))
wrpc = self.nodes[2].get_wallet_rpc("wallet{}".format(i))
for key in signer['privkeys']:
wrpc.importprivkey(key)
signed_tx = wrpc.walletprocesspsbt(signer['psbt'])['psbt']
assert_equal(signed_tx, signer['result'])
# Combiner test
for combiner in combiners:
combined = self.nodes[2].combinepsbt(combiner['combine'])
assert_equal(combined, combiner['result'])
# Empty combiner test
assert_raises_rpc_error(-8, "Parameter 'txs' cannot be empty", self.nodes[0].combinepsbt, [])
# Finalizer test
for finalizer in finalizers:
finalized = self.nodes[2].finalizepsbt(finalizer['finalize'], False)['psbt']
assert_equal(finalized, finalizer['result'])
# Extractor test
for extractor in extractors:
extracted = self.nodes[2].finalizepsbt(extractor['extract'], True)['hex']
assert_equal(extracted, extractor['result'])
# Unload extra wallets
for i, signer in enumerate(signers):
self.nodes[2].unloadwallet("wallet{}".format(i))
self.test_utxo_conversion()
# Test that psbts with p2pkh outputs are created properly
p2pkh = self.nodes[0].getnewaddress(address_type='legacy')
psbt = self.nodes[1].walletcreatefundedpsbt([], [{p2pkh : 1}], 0, {"includeWatching" : True}, True)
self.nodes[0].decodepsbt(psbt['psbt'])
# Test decoding error: invalid base64
assert_raises_rpc_error(-22, "TX decode failed invalid base64", self.nodes[0].decodepsbt, ";definitely not base64;")
# Send to all types of addresses
addr1 = self.nodes[1].getnewaddress("", "bech32")
txid1 = self.nodes[0].sendtoaddress(addr1, 11)
vout1 = find_output(self.nodes[0], txid1, 11)
addr2 = self.nodes[1].getnewaddress("", "legacy")
txid2 = self.nodes[0].sendtoaddress(addr2, 11)
vout2 = find_output(self.nodes[0], txid2, 11)
addr3 = self.nodes[1].getnewaddress("", "p2sh-segwit")
txid3 = self.nodes[0].sendtoaddress(addr3, 11)
vout3 = find_output(self.nodes[0], txid3, 11)
self.sync_all()
# Update a PSBT with UTXOs from the node
# Bech32 inputs should be filled with witness UTXO. Other inputs should not be filled because they are non-witness
psbt = self.nodes[1].createpsbt([{"txid":txid1, "vout":vout1},{"txid":txid2, "vout":vout2},{"txid":txid3, "vout":vout3}], {self.nodes[0].getnewaddress():32.999})
decoded = self.nodes[1].decodepsbt(psbt)
assert "witness_utxo" not in decoded['inputs'][0] and "non_witness_utxo" not in decoded['inputs'][0]
assert "witness_utxo" not in decoded['inputs'][1] and "non_witness_utxo" not in decoded['inputs'][1]
assert "witness_utxo" not in decoded['inputs'][2] and "non_witness_utxo" not in decoded['inputs'][2]
updated = self.nodes[1].utxoupdatepsbt(psbt)
decoded = self.nodes[1].decodepsbt(updated)
assert "witness_utxo" in decoded['inputs'][0] and "non_witness_utxo" not in decoded['inputs'][0]
assert "witness_utxo" not in decoded['inputs'][1] and "non_witness_utxo" not in decoded['inputs'][1]
assert "witness_utxo" not in decoded['inputs'][2] and "non_witness_utxo" not in decoded['inputs'][2]
# Two PSBTs with a common input should not be joinable
psbt1 = self.nodes[1].createpsbt([{"txid":txid1, "vout":vout1}], {self.nodes[0].getnewaddress():Decimal('10.999')})
assert_raises_rpc_error(-8, "exists in multiple PSBTs", self.nodes[1].joinpsbts, [psbt1, updated])
# Join two distinct PSBTs
addr4 = self.nodes[1].getnewaddress("", "p2sh-segwit")
txid4 = self.nodes[0].sendtoaddress(addr4, 5)
vout4 = find_output(self.nodes[0], txid4, 5)
self.nodes[0].generate(6)
self.sync_all()
psbt2 = self.nodes[1].createpsbt([{"txid":txid4, "vout":vout4}], {self.nodes[0].getnewaddress():Decimal('4.999')})
psbt2 = self.nodes[1].walletprocesspsbt(psbt2)['psbt']
psbt2_decoded = self.nodes[0].decodepsbt(psbt2)
assert "final_scriptwitness" in psbt2_decoded['inputs'][0] and "final_scriptSig" in psbt2_decoded['inputs'][0]
joined = self.nodes[0].joinpsbts([psbt, psbt2])
joined_decoded = self.nodes[0].decodepsbt(joined)
assert len(joined_decoded['inputs']) == 4 and len(joined_decoded['outputs']) == 2 and "final_scriptwitness" not in joined_decoded['inputs'][3] and "final_scriptSig" not in joined_decoded['inputs'][3]
# Newly created PSBT needs UTXOs and updating
addr = self.nodes[1].getnewaddress("", "p2sh-segwit")
txid = self.nodes[0].sendtoaddress(addr, 7)
addrinfo = self.nodes[1].getaddressinfo(addr)
blockhash = self.nodes[0].generate(6)[0]
self.sync_all()
vout = find_output(self.nodes[0], txid, 7, blockhash=blockhash)
psbt = self.nodes[1].createpsbt([{"txid":txid, "vout":vout}], {self.nodes[0].getnewaddress("", "p2sh-segwit"):Decimal('6.999')})
analyzed = self.nodes[0].analyzepsbt(psbt)
assert not analyzed['inputs'][0]['has_utxo'] and not analyzed['inputs'][0]['is_final'] and analyzed['inputs'][0]['next'] == 'updater' and analyzed['next'] == 'updater'
# After update with wallet, only needs signing
updated = self.nodes[1].walletprocesspsbt(psbt, False, 'ALL', True)['psbt']
analyzed = self.nodes[0].analyzepsbt(updated)
assert analyzed['inputs'][0]['has_utxo'] and not analyzed['inputs'][0]['is_final'] and analyzed['inputs'][0]['next'] == 'signer' and analyzed['next'] == 'signer' and analyzed['inputs'][0]['missing']['signatures'][0] == addrinfo['embedded']['witness_program']
# Check fee and size things
assert analyzed['fee'] == Decimal('0.001') and analyzed['estimated_vsize'] == 134 and analyzed['estimated_feerate'] == '0.00746268 BTC/kB'
# After signing and finalizing, needs extracting
signed = self.nodes[1].walletprocesspsbt(updated)['psbt']
analyzed = self.nodes[0].analyzepsbt(signed)
assert analyzed['inputs'][0]['has_utxo'] and analyzed['inputs'][0]['is_final'] and analyzed['next'] == 'extractor'
if __name__ == '__main__':
PSBTTest().main()
| {
"content_hash": "624abcb6957d70ea777fb160c83427f8",
"timestamp": "",
"source": "github",
"line_count": 362,
"max_line_length": 266,
"avg_line_length": 54.6243093922652,
"alnum_prop": 0.6389197936684535,
"repo_name": "ElementsProject/elements",
"id": "e9098e4f5aef1869118ea00d2d7a4eb3fe191ba0",
"size": "19983",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/bitcoin_functional/functional/rpc_psbt.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "28178"
},
{
"name": "C",
"bytes": "1836312"
},
{
"name": "C++",
"bytes": "9659428"
},
{
"name": "CMake",
"bytes": "29132"
},
{
"name": "Cap'n Proto",
"bytes": "1256"
},
{
"name": "Dockerfile",
"bytes": "1093"
},
{
"name": "HTML",
"bytes": "22723"
},
{
"name": "Java",
"bytes": "695"
},
{
"name": "M4",
"bytes": "207197"
},
{
"name": "Makefile",
"bytes": "126788"
},
{
"name": "Objective-C++",
"bytes": "5496"
},
{
"name": "Python",
"bytes": "4336448"
},
{
"name": "QMake",
"bytes": "438"
},
{
"name": "Sage",
"bytes": "42757"
},
{
"name": "Scheme",
"bytes": "25953"
},
{
"name": "Shell",
"bytes": "212725"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
import errno
import io
import os
import socket
import struct
import weakref
from .i18n import _
from .pycompat import getattr
from . import (
bundle2,
error,
httpconnection,
pycompat,
statichttprepo,
url as urlmod,
util,
wireprotoframing,
wireprototypes,
wireprotov1peer,
wireprotov2peer,
wireprotov2server,
)
from .interfaces import (
repository,
util as interfaceutil,
)
from .utils import (
cborutil,
stringutil,
urlutil,
)
httplib = util.httplib
urlerr = util.urlerr
urlreq = util.urlreq
def encodevalueinheaders(value, header, limit):
"""Encode a string value into multiple HTTP headers.
``value`` will be encoded into 1 or more HTTP headers with the names
``header-<N>`` where ``<N>`` is an integer starting at 1. Each header
name + value will be at most ``limit`` bytes long.
Returns an iterable of 2-tuples consisting of header names and
values as native strings.
"""
# HTTP Headers are ASCII. Python 3 requires them to be unicodes,
# not bytes. This function always takes bytes in as arguments.
fmt = pycompat.strurl(header) + r'-%s'
# Note: it is *NOT* a bug that the last bit here is a bytestring
# and not a unicode: we're just getting the encoded length anyway,
# and using an r-string to make it portable between Python 2 and 3
# doesn't work because then the \r is a literal backslash-r
# instead of a carriage return.
valuelen = limit - len(fmt % '000') - len(b': \r\n')
result = []
n = 0
for i in pycompat.xrange(0, len(value), valuelen):
n += 1
result.append((fmt % str(n), pycompat.strurl(value[i : i + valuelen])))
return result
class _multifile(object):
def __init__(self, *fileobjs):
for f in fileobjs:
if not util.safehasattr(f, b'length'):
raise ValueError(
b'_multifile only supports file objects that '
b'have a length but this one does not:',
type(f),
f,
)
self._fileobjs = fileobjs
self._index = 0
@property
def length(self):
return sum(f.length for f in self._fileobjs)
def read(self, amt=None):
if amt <= 0:
return b''.join(f.read() for f in self._fileobjs)
parts = []
while amt and self._index < len(self._fileobjs):
parts.append(self._fileobjs[self._index].read(amt))
got = len(parts[-1])
if got < amt:
self._index += 1
amt -= got
return b''.join(parts)
def seek(self, offset, whence=os.SEEK_SET):
if whence != os.SEEK_SET:
raise NotImplementedError(
b'_multifile does not support anything other'
b' than os.SEEK_SET for whence on seek()'
)
if offset != 0:
raise NotImplementedError(
b'_multifile only supports seeking to start, but that '
b'could be fixed if you need it'
)
for f in self._fileobjs:
f.seek(0)
self._index = 0
def makev1commandrequest(
ui, requestbuilder, caps, capablefn, repobaseurl, cmd, args
):
"""Make an HTTP request to run a command for a version 1 client.
``caps`` is a set of known server capabilities. The value may be
None if capabilities are not yet known.
``capablefn`` is a function to evaluate a capability.
``cmd``, ``args``, and ``data`` define the command, its arguments, and
raw data to pass to it.
"""
if cmd == b'pushkey':
args[b'data'] = b''
data = args.pop(b'data', None)
headers = args.pop(b'headers', {})
ui.debug(b"sending %s command\n" % cmd)
q = [(b'cmd', cmd)]
headersize = 0
# Important: don't use self.capable() here or else you end up
# with infinite recursion when trying to look up capabilities
# for the first time.
postargsok = caps is not None and b'httppostargs' in caps
# Send arguments via POST.
if postargsok and args:
strargs = urlreq.urlencode(sorted(args.items()))
if not data:
data = strargs
else:
if isinstance(data, bytes):
i = io.BytesIO(data)
i.length = len(data)
data = i
argsio = io.BytesIO(strargs)
argsio.length = len(strargs)
data = _multifile(argsio, data)
headers['X-HgArgs-Post'] = len(strargs)
elif args:
# Calling self.capable() can infinite loop if we are calling
# "capabilities". But that command should never accept wire
# protocol arguments. So this should never happen.
assert cmd != b'capabilities'
httpheader = capablefn(b'httpheader')
if httpheader:
headersize = int(httpheader.split(b',', 1)[0])
# Send arguments via HTTP headers.
if headersize > 0:
# The headers can typically carry more data than the URL.
encoded_args = urlreq.urlencode(sorted(args.items()))
for header, value in encodevalueinheaders(
encoded_args, b'X-HgArg', headersize
):
headers[header] = value
# Send arguments via query string (Mercurial <1.9).
else:
q += sorted(args.items())
qs = b'?%s' % urlreq.urlencode(q)
cu = b"%s%s" % (repobaseurl, qs)
size = 0
if util.safehasattr(data, b'length'):
size = data.length
elif data is not None:
size = len(data)
if data is not None and 'Content-Type' not in headers:
headers['Content-Type'] = 'application/mercurial-0.1'
# Tell the server we accept application/mercurial-0.2 and multiple
# compression formats if the server is capable of emitting those
# payloads.
# Note: Keep this set empty by default, as client advertisement of
# protocol parameters should only occur after the handshake.
protoparams = set()
mediatypes = set()
if caps is not None:
mt = capablefn(b'httpmediatype')
if mt:
protoparams.add(b'0.1')
mediatypes = set(mt.split(b','))
protoparams.add(b'partial-pull')
if b'0.2tx' in mediatypes:
protoparams.add(b'0.2')
if b'0.2tx' in mediatypes and capablefn(b'compression'):
# We /could/ compare supported compression formats and prune
# non-mutually supported or error if nothing is mutually supported.
# For now, send the full list to the server and have it error.
comps = [
e.wireprotosupport().name
for e in util.compengines.supportedwireengines(util.CLIENTROLE)
]
protoparams.add(b'comp=%s' % b','.join(comps))
if protoparams:
protoheaders = encodevalueinheaders(
b' '.join(sorted(protoparams)), b'X-HgProto', headersize or 1024
)
for header, value in protoheaders:
headers[header] = value
varyheaders = []
for header in headers:
if header.lower().startswith('x-hg'):
varyheaders.append(header)
if varyheaders:
headers['Vary'] = ','.join(sorted(varyheaders))
req = requestbuilder(pycompat.strurl(cu), data, headers)
if data is not None:
ui.debug(b"sending %d bytes\n" % size)
req.add_unredirected_header('Content-Length', '%d' % size)
return req, cu, qs
def _reqdata(req):
"""Get request data, if any. If no data, returns None."""
if pycompat.ispy3:
return req.data
if not req.has_data():
return None
return req.get_data()
def sendrequest(ui, opener, req):
"""Send a prepared HTTP request.
Returns the response object.
"""
dbg = ui.debug
if ui.debugflag and ui.configbool(b'devel', b'debug.peer-request'):
line = b'devel-peer-request: %s\n'
dbg(
line
% b'%s %s'
% (
pycompat.bytesurl(req.get_method()),
pycompat.bytesurl(req.get_full_url()),
)
)
hgargssize = None
for header, value in sorted(req.header_items()):
header = pycompat.bytesurl(header)
value = pycompat.bytesurl(value)
if header.startswith(b'X-hgarg-'):
if hgargssize is None:
hgargssize = 0
hgargssize += len(value)
else:
dbg(line % b' %s %s' % (header, value))
if hgargssize is not None:
dbg(
line
% b' %d bytes of commands arguments in headers'
% hgargssize
)
data = _reqdata(req)
if data is not None:
length = getattr(data, 'length', None)
if length is None:
length = len(data)
dbg(line % b' %d bytes of data' % length)
start = util.timer()
res = None
try:
res = opener.open(req)
except urlerr.httperror as inst:
if inst.code == 401:
raise error.Abort(_(b'authorization failed'))
raise
except httplib.HTTPException as inst:
ui.debug(
b'http error requesting %s\n'
% urlutil.hidepassword(req.get_full_url())
)
ui.traceback()
raise IOError(None, inst)
finally:
if ui.debugflag and ui.configbool(b'devel', b'debug.peer-request'):
code = res.code if res else -1
dbg(
line
% b' finished in %.4f seconds (%d)'
% (util.timer() - start, code)
)
# Insert error handlers for common I/O failures.
urlmod.wrapresponse(res)
return res
class RedirectedRepoError(error.RepoError):
def __init__(self, msg, respurl):
super(RedirectedRepoError, self).__init__(msg)
self.respurl = respurl
def parsev1commandresponse(
ui, baseurl, requrl, qs, resp, compressible, allowcbor=False
):
# record the url we got redirected to
redirected = False
respurl = pycompat.bytesurl(resp.geturl())
if respurl.endswith(qs):
respurl = respurl[: -len(qs)]
qsdropped = False
else:
qsdropped = True
if baseurl.rstrip(b'/') != respurl.rstrip(b'/'):
redirected = True
if not ui.quiet:
ui.warn(_(b'real URL is %s\n') % respurl)
try:
proto = pycompat.bytesurl(resp.getheader('content-type', ''))
except AttributeError:
proto = pycompat.bytesurl(resp.headers.get('content-type', ''))
safeurl = urlutil.hidepassword(baseurl)
if proto.startswith(b'application/hg-error'):
raise error.OutOfBandError(resp.read())
# Pre 1.0 versions of Mercurial used text/plain and
# application/hg-changegroup. We don't support such old servers.
if not proto.startswith(b'application/mercurial-'):
ui.debug(b"requested URL: '%s'\n" % urlutil.hidepassword(requrl))
msg = _(
b"'%s' does not appear to be an hg repository:\n"
b"---%%<--- (%s)\n%s\n---%%<---\n"
) % (safeurl, proto or b'no content-type', resp.read(1024))
# Some servers may strip the query string from the redirect. We
# raise a special error type so callers can react to this specially.
if redirected and qsdropped:
raise RedirectedRepoError(msg, respurl)
else:
raise error.RepoError(msg)
try:
subtype = proto.split(b'-', 1)[1]
# Unless we end up supporting CBOR in the legacy wire protocol,
# this should ONLY be encountered for the initial capabilities
# request during handshake.
if subtype == b'cbor':
if allowcbor:
return respurl, proto, resp
else:
raise error.RepoError(
_(b'unexpected CBOR response from server')
)
version_info = tuple([int(n) for n in subtype.split(b'.')])
except ValueError:
raise error.RepoError(
_(b"'%s' sent a broken Content-Type header (%s)") % (safeurl, proto)
)
# TODO consider switching to a decompression reader that uses
# generators.
if version_info == (0, 1):
if compressible:
resp = util.compengines[b'zlib'].decompressorreader(resp)
elif version_info == (0, 2):
# application/mercurial-0.2 always identifies the compression
# engine in the payload header.
elen = struct.unpack(b'B', util.readexactly(resp, 1))[0]
ename = util.readexactly(resp, elen)
engine = util.compengines.forwiretype(ename)
resp = engine.decompressorreader(resp)
else:
raise error.RepoError(
_(b"'%s' uses newer protocol %s") % (safeurl, subtype)
)
return respurl, proto, resp
class httppeer(wireprotov1peer.wirepeer):
def __init__(self, ui, path, url, opener, requestbuilder, caps):
self.ui = ui
self._path = path
self._url = url
self._caps = caps
self.limitedarguments = caps is not None and b'httppostargs' not in caps
self._urlopener = opener
self._requestbuilder = requestbuilder
def __del__(self):
for h in self._urlopener.handlers:
h.close()
getattr(h, "close_all", lambda: None)()
# Begin of ipeerconnection interface.
def url(self):
return self._path
def local(self):
return None
def peer(self):
return self
def canpush(self):
return True
def close(self):
try:
reqs, sent, recv = (
self._urlopener.requestscount,
self._urlopener.sentbytescount,
self._urlopener.receivedbytescount,
)
except AttributeError:
return
self.ui.note(
_(
b'(sent %d HTTP requests and %d bytes; '
b'received %d bytes in responses)\n'
)
% (reqs, sent, recv)
)
# End of ipeerconnection interface.
# Begin of ipeercommands interface.
def capabilities(self):
return self._caps
# End of ipeercommands interface.
def _callstream(self, cmd, _compressible=False, **args):
args = pycompat.byteskwargs(args)
req, cu, qs = makev1commandrequest(
self.ui,
self._requestbuilder,
self._caps,
self.capable,
self._url,
cmd,
args,
)
resp = sendrequest(self.ui, self._urlopener, req)
self._url, ct, resp = parsev1commandresponse(
self.ui, self._url, cu, qs, resp, _compressible
)
return resp
def _call(self, cmd, **args):
fp = self._callstream(cmd, **args)
try:
return fp.read()
finally:
# if using keepalive, allow connection to be reused
fp.close()
def _callpush(self, cmd, cg, **args):
# have to stream bundle to a temp file because we do not have
# http 1.1 chunked transfer.
types = self.capable(b'unbundle')
try:
types = types.split(b',')
except AttributeError:
# servers older than d1b16a746db6 will send 'unbundle' as a
# boolean capability. They only support headerless/uncompressed
# bundles.
types = [b""]
for x in types:
if x in bundle2.bundletypes:
type = x
break
tempname = bundle2.writebundle(self.ui, cg, None, type)
fp = httpconnection.httpsendfile(self.ui, tempname, b"rb")
headers = {'Content-Type': 'application/mercurial-0.1'}
try:
r = self._call(cmd, data=fp, headers=headers, **args)
vals = r.split(b'\n', 1)
if len(vals) < 2:
raise error.ResponseError(_(b"unexpected response:"), r)
return vals
except urlerr.httperror:
# Catch and re-raise these so we don't try and treat them
# like generic socket errors. They lack any values in
# .args on Python 3 which breaks our socket.error block.
raise
except socket.error as err:
if err.args[0] in (errno.ECONNRESET, errno.EPIPE):
raise error.Abort(_(b'push failed: %s') % err.args[1])
raise error.Abort(err.args[1])
finally:
fp.close()
os.unlink(tempname)
def _calltwowaystream(self, cmd, fp, **args):
filename = None
try:
# dump bundle to disk
fd, filename = pycompat.mkstemp(prefix=b"hg-bundle-", suffix=b".hg")
with os.fdopen(fd, "wb") as fh:
d = fp.read(4096)
while d:
fh.write(d)
d = fp.read(4096)
# start http push
with httpconnection.httpsendfile(self.ui, filename, b"rb") as fp_:
headers = {'Content-Type': 'application/mercurial-0.1'}
return self._callstream(cmd, data=fp_, headers=headers, **args)
finally:
if filename is not None:
os.unlink(filename)
def _callcompressable(self, cmd, **args):
return self._callstream(cmd, _compressible=True, **args)
def _abort(self, exception):
raise exception
def sendv2request(
ui, opener, requestbuilder, apiurl, permission, requests, redirect
):
wireprotoframing.populatestreamencoders()
uiencoders = ui.configlist(b'experimental', b'httppeer.v2-encoder-order')
if uiencoders:
encoders = []
for encoder in uiencoders:
if encoder not in wireprotoframing.STREAM_ENCODERS:
ui.warn(
_(
b'wire protocol version 2 encoder referenced in '
b'config (%s) is not known; ignoring\n'
)
% encoder
)
else:
encoders.append(encoder)
else:
encoders = wireprotoframing.STREAM_ENCODERS_ORDER
reactor = wireprotoframing.clientreactor(
ui,
hasmultiplesend=False,
buffersends=True,
clientcontentencoders=encoders,
)
handler = wireprotov2peer.clienthandler(
ui, reactor, opener=opener, requestbuilder=requestbuilder
)
url = b'%s/%s' % (apiurl, permission)
if len(requests) > 1:
url += b'/multirequest'
else:
url += b'/%s' % requests[0][0]
ui.debug(b'sending %d commands\n' % len(requests))
for command, args, f in requests:
ui.debug(
b'sending command %s: %s\n'
% (command, stringutil.pprint(args, indent=2))
)
assert not list(
handler.callcommand(command, args, f, redirect=redirect)
)
# TODO stream this.
body = b''.join(map(bytes, handler.flushcommands()))
# TODO modify user-agent to reflect v2
headers = {
'Accept': wireprotov2server.FRAMINGTYPE,
'Content-Type': wireprotov2server.FRAMINGTYPE,
}
req = requestbuilder(pycompat.strurl(url), body, headers)
req.add_unredirected_header('Content-Length', '%d' % len(body))
try:
res = opener.open(req)
except urlerr.httperror as e:
if e.code == 401:
raise error.Abort(_(b'authorization failed'))
raise
except httplib.HTTPException as e:
ui.traceback()
raise IOError(None, e)
return handler, res
class queuedcommandfuture(pycompat.futures.Future):
"""Wraps result() on command futures to trigger submission on call."""
def result(self, timeout=None):
if self.done():
return pycompat.futures.Future.result(self, timeout)
self._peerexecutor.sendcommands()
# sendcommands() will restore the original __class__ and self.result
# will resolve to Future.result.
return self.result(timeout)
@interfaceutil.implementer(repository.ipeercommandexecutor)
class httpv2executor(object):
def __init__(
self, ui, opener, requestbuilder, apiurl, descriptor, redirect
):
self._ui = ui
self._opener = opener
self._requestbuilder = requestbuilder
self._apiurl = apiurl
self._descriptor = descriptor
self._redirect = redirect
self._sent = False
self._closed = False
self._neededpermissions = set()
self._calls = []
self._futures = weakref.WeakSet()
self._responseexecutor = None
self._responsef = None
def __enter__(self):
return self
def __exit__(self, exctype, excvalue, exctb):
self.close()
def callcommand(self, command, args):
if self._sent:
raise error.ProgrammingError(
b'callcommand() cannot be used after commands are sent'
)
if self._closed:
raise error.ProgrammingError(
b'callcommand() cannot be used after close()'
)
# The service advertises which commands are available. So if we attempt
# to call an unknown command or pass an unknown argument, we can screen
# for this.
if command not in self._descriptor[b'commands']:
raise error.ProgrammingError(
b'wire protocol command %s is not available' % command
)
cmdinfo = self._descriptor[b'commands'][command]
unknownargs = set(args.keys()) - set(cmdinfo.get(b'args', {}))
if unknownargs:
raise error.ProgrammingError(
b'wire protocol command %s does not accept argument: %s'
% (command, b', '.join(sorted(unknownargs)))
)
self._neededpermissions |= set(cmdinfo[b'permissions'])
# TODO we /could/ also validate types here, since the API descriptor
# includes types...
f = pycompat.futures.Future()
# Monkeypatch it so result() triggers sendcommands(), otherwise result()
# could deadlock.
f.__class__ = queuedcommandfuture
f._peerexecutor = self
self._futures.add(f)
self._calls.append((command, args, f))
return f
def sendcommands(self):
if self._sent:
return
if not self._calls:
return
self._sent = True
# Unhack any future types so caller sees a clean type and so we
# break reference cycle.
for f in self._futures:
if isinstance(f, queuedcommandfuture):
f.__class__ = pycompat.futures.Future
f._peerexecutor = None
# Mark the future as running and filter out cancelled futures.
calls = [
(command, args, f)
for command, args, f in self._calls
if f.set_running_or_notify_cancel()
]
# Clear out references, prevent improper object usage.
self._calls = None
if not calls:
return
permissions = set(self._neededpermissions)
if b'push' in permissions and b'pull' in permissions:
permissions.remove(b'pull')
if len(permissions) > 1:
raise error.RepoError(
_(b'cannot make request requiring multiple permissions: %s')
% _(b', ').join(sorted(permissions))
)
permission = {
b'push': b'rw',
b'pull': b'ro',
}[permissions.pop()]
handler, resp = sendv2request(
self._ui,
self._opener,
self._requestbuilder,
self._apiurl,
permission,
calls,
self._redirect,
)
# TODO we probably want to validate the HTTP code, media type, etc.
self._responseexecutor = pycompat.futures.ThreadPoolExecutor(1)
self._responsef = self._responseexecutor.submit(
self._handleresponse, handler, resp
)
def close(self):
if self._closed:
return
self.sendcommands()
self._closed = True
if not self._responsef:
return
# TODO ^C here may not result in immediate program termination.
try:
self._responsef.result()
finally:
self._responseexecutor.shutdown(wait=True)
self._responsef = None
self._responseexecutor = None
# If any of our futures are still in progress, mark them as
# errored, otherwise a result() could wait indefinitely.
for f in self._futures:
if not f.done():
f.set_exception(
error.ResponseError(_(b'unfulfilled command response'))
)
self._futures = None
def _handleresponse(self, handler, resp):
# Called in a thread to read the response.
while handler.readdata(resp):
pass
@interfaceutil.implementer(repository.ipeerv2)
class httpv2peer(object):
limitedarguments = False
def __init__(
self, ui, repourl, apipath, opener, requestbuilder, apidescriptor
):
self.ui = ui
self.apidescriptor = apidescriptor
if repourl.endswith(b'/'):
repourl = repourl[:-1]
self._url = repourl
self._apipath = apipath
self._apiurl = b'%s/%s' % (repourl, apipath)
self._opener = opener
self._requestbuilder = requestbuilder
self._redirect = wireprotov2peer.supportedredirects(ui, apidescriptor)
# Start of ipeerconnection.
def url(self):
return self._url
def local(self):
return None
def peer(self):
return self
def canpush(self):
# TODO change once implemented.
return False
def close(self):
self.ui.note(
_(
b'(sent %d HTTP requests and %d bytes; '
b'received %d bytes in responses)\n'
)
% (
self._opener.requestscount,
self._opener.sentbytescount,
self._opener.receivedbytescount,
)
)
# End of ipeerconnection.
# Start of ipeercapabilities.
def capable(self, name):
# The capabilities used internally historically map to capabilities
# advertised from the "capabilities" wire protocol command. However,
# version 2 of that command works differently.
# Maps to commands that are available.
if name in (
b'branchmap',
b'getbundle',
b'known',
b'lookup',
b'pushkey',
):
return True
# Other concepts.
if name in (b'bundle2',):
return True
# Alias command-* to presence of command of that name.
if name.startswith(b'command-'):
return name[len(b'command-') :] in self.apidescriptor[b'commands']
return False
def requirecap(self, name, purpose):
if self.capable(name):
return
raise error.CapabilityError(
_(
b'cannot %s; client or remote repository does not support the '
b'\'%s\' capability'
)
% (purpose, name)
)
# End of ipeercapabilities.
def _call(self, name, **args):
with self.commandexecutor() as e:
return e.callcommand(name, args).result()
def commandexecutor(self):
return httpv2executor(
self.ui,
self._opener,
self._requestbuilder,
self._apiurl,
self.apidescriptor,
self._redirect,
)
# Registry of API service names to metadata about peers that handle it.
#
# The following keys are meaningful:
#
# init
# Callable receiving (ui, repourl, servicepath, opener, requestbuilder,
# apidescriptor) to create a peer.
#
# priority
# Integer priority for the service. If we could choose from multiple
# services, we choose the one with the highest priority.
API_PEERS = {
wireprototypes.HTTP_WIREPROTO_V2: {
b'init': httpv2peer,
b'priority': 50,
},
}
def performhandshake(ui, url, opener, requestbuilder):
# The handshake is a request to the capabilities command.
caps = None
def capable(x):
raise error.ProgrammingError(b'should not be called')
args = {}
# The client advertises support for newer protocols by adding an
# X-HgUpgrade-* header with a list of supported APIs and an
# X-HgProto-* header advertising which serializing formats it supports.
# We only support the HTTP version 2 transport and CBOR responses for
# now.
advertisev2 = ui.configbool(b'experimental', b'httppeer.advertise-v2')
if advertisev2:
args[b'headers'] = {
'X-HgProto-1': 'cbor',
}
args[b'headers'].update(
encodevalueinheaders(
b' '.join(sorted(API_PEERS)),
b'X-HgUpgrade',
# We don't know the header limit this early.
# So make it small.
1024,
)
)
req, requrl, qs = makev1commandrequest(
ui, requestbuilder, caps, capable, url, b'capabilities', args
)
resp = sendrequest(ui, opener, req)
# The server may redirect us to the repo root, stripping the
# ?cmd=capabilities query string from the URL. The server would likely
# return HTML in this case and ``parsev1commandresponse()`` would raise.
# We catch this special case and re-issue the capabilities request against
# the new URL.
#
# We should ideally not do this, as a redirect that drops the query
# string from the URL is arguably a server bug. (Garbage in, garbage out).
# However, Mercurial clients for several years appeared to handle this
# issue without behavior degradation. And according to issue 5860, it may
# be a longstanding bug in some server implementations. So we allow a
# redirect that drops the query string to "just work."
try:
respurl, ct, resp = parsev1commandresponse(
ui, url, requrl, qs, resp, compressible=False, allowcbor=advertisev2
)
except RedirectedRepoError as e:
req, requrl, qs = makev1commandrequest(
ui, requestbuilder, caps, capable, e.respurl, b'capabilities', args
)
resp = sendrequest(ui, opener, req)
respurl, ct, resp = parsev1commandresponse(
ui, url, requrl, qs, resp, compressible=False, allowcbor=advertisev2
)
try:
rawdata = resp.read()
finally:
resp.close()
if not ct.startswith(b'application/mercurial-'):
raise error.ProgrammingError(b'unexpected content-type: %s' % ct)
if advertisev2:
if ct == b'application/mercurial-cbor':
try:
info = cborutil.decodeall(rawdata)[0]
except cborutil.CBORDecodeError:
raise error.Abort(
_(b'error decoding CBOR from remote server'),
hint=_(
b'try again and consider contacting '
b'the server operator'
),
)
# We got a legacy response. That's fine.
elif ct in (b'application/mercurial-0.1', b'application/mercurial-0.2'):
info = {b'v1capabilities': set(rawdata.split())}
else:
raise error.RepoError(
_(b'unexpected response type from server: %s') % ct
)
else:
info = {b'v1capabilities': set(rawdata.split())}
return respurl, info
def makepeer(ui, path, opener=None, requestbuilder=urlreq.request):
"""Construct an appropriate HTTP peer instance.
``opener`` is an ``url.opener`` that should be used to establish
connections, perform HTTP requests.
``requestbuilder`` is the type used for constructing HTTP requests.
It exists as an argument so extensions can override the default.
"""
u = urlutil.url(path)
if u.query or u.fragment:
raise error.Abort(
_(b'unsupported URL component: "%s"') % (u.query or u.fragment)
)
# urllib cannot handle URLs with embedded user or passwd.
url, authinfo = u.authinfo()
ui.debug(b'using %s\n' % url)
opener = opener or urlmod.opener(ui, authinfo)
respurl, info = performhandshake(ui, url, opener, requestbuilder)
# Given the intersection of APIs that both we and the server support,
# sort by their advertised priority and pick the first one.
#
# TODO consider making this request-based and interface driven. For
# example, the caller could say "I want a peer that does X." It's quite
# possible that not all peers would do that. Since we know the service
# capabilities, we could filter out services not meeting the
# requirements. Possibly by consulting the interfaces defined by the
# peer type.
apipeerchoices = set(info.get(b'apis', {}).keys()) & set(API_PEERS.keys())
preferredchoices = sorted(
apipeerchoices, key=lambda x: API_PEERS[x][b'priority'], reverse=True
)
for service in preferredchoices:
apipath = b'%s/%s' % (info[b'apibase'].rstrip(b'/'), service)
return API_PEERS[service][b'init'](
ui, respurl, apipath, opener, requestbuilder, info[b'apis'][service]
)
# Failed to construct an API peer. Fall back to legacy.
return httppeer(
ui, path, respurl, opener, requestbuilder, info[b'v1capabilities']
)
def instance(ui, path, create, intents=None, createopts=None):
if create:
raise error.Abort(_(b'cannot create new http repository'))
try:
if path.startswith(b'https:') and not urlmod.has_https:
raise error.Abort(
_(b'Python support for SSL and HTTPS is not installed')
)
inst = makepeer(ui, path)
return inst
except error.RepoError as httpexception:
try:
r = statichttprepo.instance(ui, b"static-" + path, create)
ui.note(_(b'(falling back to static-http)\n'))
return r
except error.RepoError:
raise httpexception # use the original http RepoError instead
| {
"content_hash": "0b27991ac360fc5cb65b8940fc670ef2",
"timestamp": "",
"source": "github",
"line_count": 1114,
"max_line_length": 80,
"avg_line_length": 31.096947935368043,
"alnum_prop": 0.5777380058888055,
"repo_name": "mdaniel/intellij-community",
"id": "9a176146a4d68d4b5b5e794e6642fe96abb94722",
"size": "34956",
"binary": false,
"copies": "9",
"ref": "refs/heads/master",
"path": "plugins/hg4idea/testData/bin/mercurial/httppeer.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
} |
db_url_dst = 'postgresql://aagusti:a@localhost/gaji_pns'
| {
"content_hash": "af942736a918c14e50ce74411a66d9d0",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 56,
"avg_line_length": 57,
"alnum_prop": 0.7368421052631579,
"repo_name": "aagusti/o-sipkd",
"id": "c65e95dd5b14fe32583291b0648ed001ca7e5464",
"size": "57",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "docs/apbd/config.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "ActionScript",
"bytes": "15982"
},
{
"name": "CSS",
"bytes": "107334"
},
{
"name": "HTML",
"bytes": "1317001"
},
{
"name": "JavaScript",
"bytes": "983058"
},
{
"name": "Mako",
"bytes": "494"
},
{
"name": "PHP",
"bytes": "1684"
},
{
"name": "PLpgSQL",
"bytes": "2828"
},
{
"name": "Python",
"bytes": "615248"
},
{
"name": "Shell",
"bytes": "241"
},
{
"name": "Smarty",
"bytes": "2003"
}
],
"symlink_target": ""
} |
import sys
import os
# mask BFD warnings: https://bugs.launchpad.net/tarantool/+bug/1018356
sys.stdout.push_filter("unable to read unknown load command 0x2\d+", "")
server.test_option("--help")
server.test_option("-h")
sys.stdout.push_filter("(/\S+)+/tarantool", "tarantool")
server.test_option("-Z")
server.test_option("--no-such-option")
server.test_option("--version --no-such-option")
sys.stdout.push_filter("(\d)\.\d\.\d(-\d+-\w+)?", "\\1.minor.patch-<rev>-<commit>")
sys.stdout.push_filter("Target: .*", "Target: platform <build>")
sys.stdout.push_filter(".*Disable shared arena since.*\n", "")
sys.stdout.push_filter("Build options: .*", "Build options: flags")
sys.stdout.push_filter("C_FLAGS:.*", "C_FLAGS: flags")
sys.stdout.push_filter("CXX_FLAGS:.*", "CXX_FLAGS: flags")
sys.stdout.push_filter("Compiler: .*", "Compiler: cc")
server.test_option("--version")
server.test_option("-V ")
sys.stdout.clear_all_filters()
# Args filter cleanup
# vim: syntax=python
| {
"content_hash": "bbd16f203a2cc0c31b85812a37069de9",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 83,
"avg_line_length": 39.28,
"alnum_prop": 0.6802443991853361,
"repo_name": "mejedi/tarantool",
"id": "e5fd0d641e5ccc861c9319dc9020e073a9f271a1",
"size": "982",
"binary": false,
"copies": "11",
"ref": "refs/heads/1.7",
"path": "test/box-py/args.test.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "C",
"bytes": "1972858"
},
{
"name": "C++",
"bytes": "913498"
},
{
"name": "CMake",
"bytes": "94972"
},
{
"name": "GDB",
"bytes": "55"
},
{
"name": "Lua",
"bytes": "928941"
},
{
"name": "Makefile",
"bytes": "2122"
},
{
"name": "Objective-C",
"bytes": "2072"
},
{
"name": "Python",
"bytes": "63048"
},
{
"name": "Ragel",
"bytes": "7313"
},
{
"name": "Shell",
"bytes": "1938"
}
],
"symlink_target": ""
} |
""" Class definition for the VIC model interface
.. module:: vic
:synopsis: Definition of the VIC model class
.. moduleauthor:: Kostas Andreadis <kandread@jpl.nasa.gov>
"""
from __future__ import division
import output as vicoutput
from osgeo import ogr, gdal, osr
import decimal
import sys
import subprocess
import os
import string
from datetime import date, datetime, timedelta
import multiprocessing as mp
import shutil
import numpy as np
from collections import OrderedDict
import drought
import pandas
import dbio
import rpath
import random
from raster import TileReader
import logging
class VIC:
def __init__(self, path, dbname, resolution, startyear, startmonth, startday,
endyear, endmonth, endday, name="", savestate="", nlayer=3):
log = logging.getLogger(__name__)
self.model_path = path
self.nodata = -9999.
if bool(name):
self.name = name
else:
self.name = None
self.startyear = startyear
self.startmonth = startmonth
self.startday = startday
self.startdate = datetime(startyear, startmonth, startday)
self.endyear = endyear
self.endmonth = endmonth
self.endday = endday
self.enddate = datetime(endyear, endmonth, endday)
self.nlayers = nlayer
self.dbname = dbname
db = dbio.connect(dbname)
cur = db.cursor()
cur.execute(
"select resolution from vic.input order by abs(resolution - {0})".format(resolution))
if not bool(cur.rowcount):
log.error("No appropriate VIC input files found in the database. Exiting!")
sys.exit()
self.res = cur.fetchone()[0]
cur.close()
self.grid_decimal = -(decimal.Decimal(str(self.res)).as_tuple().exponent - 1)
self.lat = []
self.lon = []
self.gid = OrderedDict()
self.lgid = OrderedDict()
self.depths = OrderedDict()
self.skipyear = 0
self.elev = OrderedDict()
self.statefile = ""
def paramFromDB(self):
"""Retrieve file parameters from database."""
db = dbio.connect(self.dbname)
cur = db.cursor()
# cur = self.db.cursor()
cur.execute(
'select veglib,vegparam,snowbandfile from vic.input where resolution=%f;' % self.res)
veglib, vegparam, snowbands = cur.fetchone()
cur.close()
db.close()
return veglib, vegparam, snowbands
def _getSnowbands(self, snowbands):
"""Find number of snow bands from file."""
filename = "{0}/{1}".format(rpath.data, snowbands)
with open(filename) as f:
line = f.readline()
return int((len(line.split()) - 1) / 3)
def writeSoilFile(self, shapefile):
"""Write soil parameter file for current simulation based on basin shapefile."""
ds = ogr.Open(shapefile)
lyr = ds.GetLayer()
db = dbio.connect(self.dbname)
cur = db.cursor()
cur.execute(
"select * from information_schema.tables where table_name='basin' and table_schema=%s", (self.name,))
if not bool(cur.rowcount):
temptable = ''.join(random.SystemRandom().choice(
string.ascii_letters) for _ in range(8))
cur.execute(
"create table {0}(gid serial primary key, geom geometry)".format(temptable))
for i in range(lyr.GetFeatureCount()):
f = lyr.GetNextFeature()
g = f.GetGeometryRef()
cur.execute("insert into {0}(geom) values(st_geomfromtext('{1}',4326))".format(
temptable, g.ExportToWkt()))
sql = "select updategeometrysrid('{0}','geom',4326)".format(
temptable)
db.commit()
cur.execute(
"create index {0}_s on {0} using gist(geom)".format(temptable))
ds = None
cur.execute(
"select schema_name from information_schema.schemata where schema_name='{0}'".format(self.name))
if not bool(cur.rowcount):
cur.execute("create schema {0}".format(self.name))
sql = "create table {0}.basin (gid integer, elev real, depths real[], geom geometry(Point, 4326), line text, constraint {0}_gidkey primary key(gid), CONSTRAINT enforce_dims_geom CHECK (st_ndims(geom) = 2), CONSTRAINT enforce_geotype_geom CHECK (geometrytype(geom) = 'POINT'::text OR geom IS NULL))".format(self.name)
cur.execute(sql)
sql = "insert into {0}.basin (gid, elev, depths, geom, line) select v.id,v.elev,v.depths,v.geom,v.line from vic.soils as v,{1} as t where st_intersects(v.geom,t.geom) and resolution={2}".format(
self.name, temptable, self.res)
cur.execute(sql)
cur.execute("drop table {0}".format(temptable))
db.commit()
cur.execute(
"create index basin_s on {0}.basin using gist(geom)".format(self.name))
db.commit()
sql = "select line,gid,st_y(geom),st_x(geom),elev,depths from {0}.basin order by gid".format(
self.name)
cur.execute(sql)
lines = cur.fetchall()
with open(self.model_path + '/soil.txt', 'w') as fout:
for line in lines:
gid, lat, lon, elev, depths = line[1:]
fout.write("{0}\n".format(line[0]))
self.lat.append(lat)
self.lon.append(lon)
self.gid[gid] = (lat, lon)
self.lgid[(lat, lon)] = gid
self.depths[gid] = depths
self.elev[gid] = elev
cur.execute("alter table {0}.basin drop column line".format(self.name))
cur.close()
db.close()
def stateFile(self):
"""Retrieve state file path from database."""
log = logging.getLogger(__name__)
db = dbio.connect(self.dbname)
cur = db.cursor()
sql = "select filename from {0}.state where fdate = date '{1}-{2}-{3}'".format(
self.name, self.startyear, self.startmonth, self.startday)
try:
cur.execute(sql)
result = cur.fetchone()
except:
result = False
if bool(result):
filename = result[0]
else:
log.warning("No state file found for requested date, initializing from default.")
filename = None
cur.close()
db.close()
return filename
def _stateToDb(self, statefilepath):
"""Add path to state file into database."""
db = dbio.connect(self.dbname)
cur = db.cursor()
cur.execute(
"select schema_name from information_schema.schemata where schema_name='{0}'".format(self.name))
if not bool(cur.rowcount):
cur.execute("create schema {0}".format(self.name))
db.commit()
cur.execute(
"select table_name from information_schema.tables where table_schema='{0}' and table_name='state'".format(self.name))
if not bool(cur.rowcount):
sql = "create table {0}.state (filename text, fdate date)".format(
self.name)
cur.execute(sql)
db.commit()
statefile = "{0}/vic.state_{1:04d}{2:02d}{3:02d}".format(
statefilepath, self.endyear, self.endmonth, self.endday)
statedate = "{0}-{1}-{2}".format(self.endyear,
self.endmonth, self.endday)
cur.execute(
"select * from {0}.state where fdate=date '{1}'".format(self.name, statedate))
if bool(cur.rowcount):
sql = "update {0}.state set filename='{1}' where fdate=date '{2}'".format(
self.name, statefile, statedate)
else:
sql = "insert into {0}.state values ('{1}', date '{2}')".format(
self.name, statefile, statedate)
cur.execute(sql)
db.commit()
cur.close()
db.close()
def writeParamFile(self, nodes=3, time_step=24, save_state="", init_state=False, state_file="", save_state_to_db=False):
"""Write VIC global parameter file for current simulation."""
db = dbio.connect(self.dbname)
cur = db.cursor()
cur.execute(
'select rootzones from vic.input where resolution=%f;' % self.res)
root_zones = cur.fetchone()[0]
cur.close()
db.close()
# self.nlayers = nlayer
fout = open(self.model_path + '/global.txt', 'w')
fout.write("NLAYER\t{0:d}\n".format(self.nlayers))
fout.write("NODES\t{0:d}\n".format(nodes))
if time_step < 24:
fout.write("TIME_STEP\t{0:d}\nSNOW_STEP\t{1:d}\n".format(
time_step, time_step))
fout.write("FULL_ENERGY\tTRUE\nFROZEN_SOIL\tTRUE\n")
else:
fout.write("TIME_STEP\t24\nSNOW_STEP\t3\n")
fout.write("FULL_ENERGY\tFALSE\nFROZEN_SOIL\tFALSE\n")
fout.write("STARTYEAR\t{0:04d}\n".format(self.startyear))
fout.write("STARTMONTH\t{0:02d}\n".format(self.startmonth))
fout.write("STARTDAY\t{0:02d}\n".format(self.startday))
fout.write("ENDYEAR\t{0:04d}\n".format(self.endyear))
fout.write("ENDMONTH\t{0:02d}\n".format(self.endmonth))
fout.write("ENDDAY\t{0:02d}\n".format(self.endday))
fout.write("IMPLICIT\tFALSE\nTFALLBACK\tTRUE\n")
fout.write("SNOW_ALBEDO\tUSACE\nSNOW_DENSITY\tDENS_SNTHRM\n")
fout.write("BLOWING\tFALSE\nCOMPUTE_TREELINE\tFALSE\n")
fout.write("DIST_PRCP\tFALSE\nPREC_EXPT\t0.6\nCORRPREC\tFALSE\n")
fout.write("MAX_SNOW_TEMP\t0.5\nMIN_RAIN_TEMP\t-0.5\n")
fout.write("MIN_WIND_SPEED\t0.1\nAERO_RESIST_CANSNOW\tAR_406_FULL\n")
if bool(state_file):
statefile = state_file
elif init_state:
statefile = self.stateFile()
else:
statefile = None
if statefile:
fout.write("INIT_STATE\t{0:s}\n".format(statefile))
if bool(save_state):
if isinstance(save_state, str):
if not os.path.isdir(save_state):
os.mkdir(save_state)
fout.write("STATENAME\t{0}/vic.state\n".format(save_state))
else:
fout.write(
"STATENAME\t{0}/vic.state\n".format(self.model_path))
fout.write("STATEYEAR\t{0:04d}\n".format(self.endyear))
fout.write("STATEMONTH\t{0:02d}\n".format(self.endmonth))
fout.write("STATEDAY\t{0:02d}\n".format(self.endday))
self.statefile = "vic.state_{0:04d}{1:02d}{2:02d}".format(
self.endyear, self.endmonth, self.endday)
if save_state_to_db:
self._stateToDb(save_state)
fout.write("BINARY_STATE_FILE\tFALSE\n")
fout.write(
"FORCING1\t{0:s}/data_\n".format(self.model_path + "/forcings"))
fout.write("FORCE_FORMAT\tASCII\nFORCE_ENDIAN\tLITTLE\nN_TYPES\t4\n")
fout.write("FORCE_TYPE\tPREC\n")
fout.write("FORCE_TYPE\tTMAX\n")
fout.write("FORCE_TYPE\tTMIN\n")
fout.write("FORCE_TYPE\tWIND\n")
fout.write("FORCE_DT\t24\n")
fout.write("FORCEYEAR\t{0:04d}\n".format(self.startyear))
fout.write("FORCEMONTH\t{0:02d}\n".format(self.startmonth))
fout.write("FORCEDAY\t{0:02d}\n".format(self.startday))
fout.write("FORCEHOUR\t0\n")
fout.write("GRID_DECIMAL\t{0:d}\n".format(self.grid_decimal))
fout.write("WIND_H\t10.0\nMEASURE_H\t2.0\nALMA_INPUT\tFALSE\n")
fout.write("SOIL\t{0:s}\n".format(self.model_path + "/soil.txt"))
veglib, vegparam, snowbands = self.paramFromDB()
fout.write("VEGLIB\t{0}/{1}\n".format(rpath.data, veglib))
fout.write("VEGPARAM\t{0}/{1}\n".format(rpath.data, vegparam))
fout.write("VEGPARAM_LAI\tTRUE\n")
fout.write("ROOT_ZONES\t{0:d}\n".format(root_zones))
fout.write("LAI_SRC\tLAI_FROM_VEGPARAM\n")
nbands = self._getSnowbands(snowbands)
fout.write(
"SNOW_BAND\t{0:d}\t{1}/{2}\n".format(nbands, rpath.data, snowbands))
fout.write("RESULT_DIR\t{0}/output\n".format(self.model_path))
fout.write("OUT_STEP\t24\n")
fout.write("BINARY_OUTPUT\tFALSE\n")
fout.write("MOISTFRACT\tFALSE\n")
fout.write(
"COMPRESS\tFALSE\nALMA_OUTPUT\tFALSE\nPTR_HEADER\tFALSE\nPRT_SNOW_BAND\tFALSE\n")
fout.write(vicoutput.template(["eb", "wb", "sub", "sur", "csp", "eva"]))
fout.close()
def createIndexTable(self, dataset):
"""Creates index table from raster row, column, and tile for each grid cell."""
db = dbio.connect(self.dbname)
cur = db.cursor()
sname, tname = dataset.split(".")
cur.execute(
"select * from raster_resampled where sname='{0}' and tname like '{1}_%' and resolution={2}".format(sname, tname, self.res))
rtable = ".".join(cur.fetchone()[:2])
cur.execute("select * from information_schema.tables where table_name='{0}_xy' and table_schema='public'".format(sname))
if bool(cur.rowcount):
cur.execute("drop table {0}_xy".format(sname))
db.commit()
sql = "create table {0}_xy as (select gid,st_worldtorastercoordx(rast,geom) as x,st_worldtorastercoordy(rast,geom) as y,rid as tile from {4},{5}.basin where fdate=date'{1}-{2}-{3}' and st_intersects(rast,geom))".format(
sname, self.startyear, self.startmonth, self.startday, rtable, self.name)
cur.execute(sql)
cur.execute("create index {0}_xy_r on {0}_xy(tile)".format(sname))
db.commit()
cur.close()
db.close()
return rtable
def _getTiles(self, itable):
"""Get raster tile IDs for the domain."""
db = dbio.connect(self.dbname)
cur = db.cursor()
cur.execute("select distinct(tile) from {0}".format(itable))
tiles = [int(r[0]) for r in cur.fetchall()]
cur.close()
db.close()
return tiles
def _dropIndexTable(self, sname):
"""Deletes index table."""
db = dbio.connect(self.dbname)
cur = db.cursor()
cur.execute("drop table {0}_xy".format(sname))
db.commit()
cur.close()
db.close()
def _getTileData(self, rtable, t):
"""Retrieve data from *rtable* for specific tile *t*."""
db = dbio.connect(self.dbname)
cur = db.cursor()
var = rtable.split(".")[0]
sql = "select gid,fdate,st_value(rast,x,y) from {0},{1}_xy where rid=tile and tile={8} and fdate>=date'{2}-{3}-{4}' and fdate<=date'{5}-{6}-{7}' order by gid,fdate".format(
rtable, var, self.startyear, self.startmonth, self.startday, self.endyear, self.endmonth, self.endday, t)
cur.execute(sql)
data = cur.fetchall()
return data
def getForcings(self, options):
"""Get meteorological forcings from database."""
log = logging.getLogger(__name__)
if not ('precip' in options and 'temperature' in options and 'wind' in options):
log.error("No data source provided for VIC forcings")
sys.exit()
datasets = ["precip." + options["precip"], "tmax." + options["temperature"],
"tmin." + options["temperature"], "wind." + options["wind"]]
if 'lai' in options:
datasets.append("lai" + options["lai"])
self.lai = options['lai']
options['tmax'] = options['temperature']
options['tmin'] = options['temperature']
rtables = {}
for v in ['precip', 'tmax', 'tmin', 'wind']:
rtables[v] = self.createIndexTable("{0}.{1}".format(v, options[v]))
tiles = {v: self._getTiles("{0}_xy".format(v))
for v in ['precip', 'tmax', 'tmin', 'wind']}
data = {}
nprocs = mp.cpu_count()
p = mp.Pool(nprocs)
for v in tiles:
reader = TileReader(self.dbname, rtables[
v], self.startyear, self.startmonth, self.startday, self.endyear, self.endmonth, self.endday)
data[v] = p.map_async(reader, tiles[v])
data = {v: [i for s in data[v].get() for i in s if i[2] is not None] for v in data}
p.close()
p.join()
for s in data:
self._dropIndexTable(s)
self.precip = options['precip']
self.temp = options['temperature']
self.wind = options['wind']
return data['precip'], data['tmax'], data['tmin'], data['wind']
def writeForcings(self, prec, tmax, tmin, wind, lai=None):
"""Write VIC meteorological forcing data files."""
log = logging.getLogger(__name__)
if not os.path.exists(self.model_path + '/forcings'):
os.mkdir(self.model_path + '/forcings')
ndays = (date(self.endyear, self.endmonth, self.endday) -
date(self.startyear, self.startmonth, self.startday)).days + 1
try:
assert len(prec) == len(self.lat) * ndays and len(tmax) == len(self.lat) * ndays and len(tmin) == len(self.lat) * ndays and len(wind) == len(self.lat) * ndays
except AssertionError:
log.error("Missing meteorological data in database for VIC simulation. Exiting...")
sys.exit()
cgid = None
fout = None
for i in range(len(prec)):
gid = prec[i][0]
if cgid is None or gid != cgid:
if cgid is not None:
fout.close()
cgid = gid
filename = "data_{0:.{2}f}_{1:.{2}f}".format(
self.gid[gid][0], self.gid[gid][1], self.grid_decimal)
fout = open(
"{0}/forcings/{1}".format(self.model_path, filename), 'w')
log.info("writing " + filename)
fout.write("{0:f} {1:.2f} {2:.2f} {3:.1f}\n".format(
prec[i][2], tmax[i][2], tmin[i][2], wind[i][2]))
else:
fout.write("{0:f} {1:.2f} {2:.2f} {3:.1f}\n".format(
prec[i][2], tmax[i][2], tmin[i][2], wind[i][2]))
def run(self, vicexec):
"""Run VIC model."""
log = logging.getLogger(__name__)
log.info("Running VIC...")
if not os.path.exists(self.model_path + '/output'):
os.mkdir(self.model_path + '/output')
proc = subprocess.Popen([vicexec, "-g", "{0}/global.txt".format(self.model_path)], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
for line in iter(proc.stdout.readline, ''):
log.debug(line.strip())
def getOutputStruct(self, globalfile):
"""Creates a dictionary with output variable-file pairs."""
fin = open(globalfile)
prefix = None
skipyear = 0
c = 3 # Assumes daily output
out = {}
for line in fin:
if line.find("OUTFILE") == 0:
prefix = line.split()[1]
c = 3
elif line.find("SKIPYEAR") == 0:
skipyear = int(line.split()[1])
else:
if len(line) > 1 and line[0] != "#" and prefix:
varname = line.split()[1].replace("OUT_", "").lower()
out[varname] = ("output/" + prefix, c)
if varname in ["soil_moist", "soil_temp", "smliqfrac", "smfrozfrac"]:
c += self.nlayers
else:
c += 1
fin.close()
out['tmax'] = ("forcings/data", 1)
out['tmin'] = ("forcings/data", 2)
out['rainf'] = ("forcings/data", 0)
self.skipyear = skipyear
return out
def saveToDB(self, args, initialize=True, skipsave=0):
"""Reads VIC output for selected variables."""
log = logging.getLogger(__name__)
droughtvars = ["spi1", "spi3", "spi6", "spi12", "sri1", "sri3", "sri6", "sri12", "severity", "dryspells", "smdi", "cdi"]
layervars = ["soil_moist", "soil_temp", "smliqfrac", "smfrozfrac"]
outvars = self.getOutputStruct(self.model_path + "/global.txt")
outdata = {}
if len(self.lat) > 0 and len(self.lon) > 0:
nrows = int(np.round((max(self.lat) - min(self.lat)) / self.res) + 1)
ncols = int(np.round((max(self.lon) - min(self.lon)) / self.res) + 1)
mask = np.zeros((nrows, ncols), dtype='bool')
nt = (date(self.endyear, self.endmonth, self.endday) -
date(self.startyear + self.skipyear, self.startmonth, self.startday)).days + 1
args = vicoutput.variableGroup(args)
if len(args) > 0:
for var in args:
if var in outvars or var in droughtvars:
if var in layervars:
outdata[var] = np.zeros((nt, self.nlayers, nrows, ncols)) + self.nodata
else:
outdata[var] = np.zeros((nt, 1, nrows, ncols)) + self.nodata
else:
log.warning("Variable {0} not found in output files. Skipping import.".format(var))
prefix = set([outvars[v][0] for v in outdata.keys() if v not in droughtvars])
startdate = "{0}-{1}-{2}".format(self.startyear, self.startmonth, self.startday)
enddate = "{0}-{1}-{2}".format(self.endyear, self.endmonth, self.endday)
dates = pandas.date_range(startdate, enddate).values
for c in range(len(self.lat)):
pdata = {}
for p in prefix:
filename = "{0}/{1}_{2:.{4}f}_{3:.{4}f}".format(self.model_path, p, self.lat[c], self.lon[c], self.grid_decimal)
pdata[p] = pandas.read_csv(filename, delim_whitespace=True, header=None).values
i = int((max(self.lat) + self.res / 2.0 - self.lat[c]) / self.res)
j = int((self.lon[c] - min(self.lon) + self.res / 2.0) / self.res)
mask[i, j] = True
for v in [v for v in outdata if v not in droughtvars]:
if v in layervars:
for lyr in range(self.nlayers):
outdata[v][:, lyr, i, j] = pdata[outvars[v][0]][:, outvars[v][1] + lyr]
else:
outdata[v][:, 0, i, j] = pdata[outvars[v][0]][:, outvars[v][1]]
log.info("Read output for {0}|{1}".format(self.lat[c], self.lon[c]))
for var in args:
if var in droughtvars:
dout = drought.calc(var, self)
if dout is not None:
mi, mj = np.where(mask)
outdata[var][:, 0, mi, mj] = dout
else:
outdata[var] = None
if outdata[var] is not None:
self.writeToDB(outdata[var], dates, "{0}".format(var), initialize, skipsave=skipsave)
else:
log.info("No pixels simulated, not saving any output!")
return outdata
def _writeRaster(self, data, filename):
"""Writes GeoTIFF raster temporarily so that it can be imported into the database."""
nrows, ncols = data.shape
driver = gdal.GetDriverByName("GTiff")
ods = driver.Create(filename, ncols, nrows, 1, gdal.GDT_Float32)
ods.SetGeoTransform([min(self.lon) - self.res / 2.0, self.res,
0, max(self.lat) + self.res / 2.0, 0, -self.res])
srs = osr.SpatialReference()
srs.SetWellKnownGeogCS("WGS84")
ods.SetProjection(srs.ExportToWkt())
ods.GetRasterBand(1).WriteArray(data)
ods.GetRasterBand(1).SetNoDataValue(self.nodata)
ods = None
def writeToDB(self, data, dates, tablename, initialize, ensemble=False, skipsave=0):
"""Writes output data into database."""
log = logging.getLogger(__name__)
db = dbio.connect(self.dbname)
cur = db.cursor()
if dbio.tableExists(self.dbname, self.name, tablename) and ensemble and not dbio.columnExists(self.dbname, self.name, tablename, "ensemble"):
log.warning("Table {0} exists but does not contain ensemble information. Overwriting entire table!".format(tablename))
cur.execute("drop table {0}.{1}".format(self.name, tablename))
db.commit()
if dbio.tableExists(self.dbname, self.name, tablename):
if initialize:
for dt in [self.startdate + timedelta(t) for t in range((self.enddate - self.startdate).days+1)]:
dbio.deleteRasters(self.dbname, "{0}.{1}".format(self.name, tablename), dt)
else:
sql = "create table {0}.{1} (id serial not null primary key, rid int not null, fdate date not null, rast raster)".format(
self.name, tablename)
cur.execute(sql)
if data.shape[1] > 1:
cur.execute("alter table {0}.{1} add column layer int".format(self.name, tablename))
if ensemble:
cur.execute("alter table {0}.{1} add column ensemble int".format(self.name, tablename))
db.commit()
startyear, startmonth, startday = self.startyear, self.startmonth, self.startday
if skipsave > 0:
ts = date(self.startyear, self.startmonth,
self.startday) + timedelta(skipsave)
data = data[skipsave:]
startyear, startmonth, startday = ts.year, ts.month, ts.day
tiffiles = []
for t in range(data.shape[0]):
dt = date(startyear, startmonth, startday) + timedelta(t)
for lyr in range(data.shape[1]):
filename = "{0}/{1}_{2}{3:02d}{4:02d}_{5:02d}.tif".format(
self.model_path, tablename, dt.year, dt.month, dt.day, lyr + 1)
self._writeRaster(data[t, lyr, :, :], filename)
tiffiles.append(filename)
cmd = " ".join(["{0}/raster2pgsql".format(rpath.bins), "-s", "4326", "-F", "-d", "-t", "auto"] + tiffiles + ["temp", "|", "{0}/psql".format(rpath.bins), "-d", self.dbname])
proc = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
sout, err = proc.communicate()
log.debug(sout)
cur.execute("alter table temp add column fdate date")
cur.execute("update temp set fdate = date (concat_ws('-',substring(filename from {0} for 4),substring(filename from {1} for 2),substring(filename from {2} for 2)))".format(
len(tablename) + 2, len(tablename) + 6, len(tablename) + 8))
if data.shape[1] > 1:
cur.execute("alter table temp add column layer int")
cur.execute("update temp set layer=(substring(filename from {0} for 2))::int".format(
len(tablename) + 11))
cur.execute("select count(*) from temp")
n = int(cur.fetchone()[0])
ntiles = n / data.shape[0]
if data.shape[1] > 1:
cur.execute(
"insert into {0}.{1} (rid,fdate,layer,rast) select ((rid+{2}) % {2})+1,fdate,layer,rast from temp".format(self.name, tablename, ntiles))
else:
cur.execute(
"insert into {0}.{1} (rid,fdate,rast) select ((rid+{2}) % {2})+1,fdate,rast from temp".format(self.name, tablename, ntiles))
if bool(ensemble):
sql = "update {0}.{1} set ensemble = {2} where ensemble is null".format(
self.name, tablename, int(ensemble))
cur.execute(sql)
cur.execute("drop index if exists {0}.{1}_dtidx".format(
self.name, tablename))
cur.execute("create index {1}_dtidx on {0}.{1}(fdate)".format(
self.name, tablename))
cur.execute("drop index if exists {0}.{1}_spidx".format(
self.name, tablename))
cur.execute("create index {1}_spidx on {0}.{1} using gist(st_convexhull(rast))".format(
self.name, tablename))
db.commit()
cur.close()
db.close()
def save(self, saveto, args, initialize=True, skipsave=0):
"""Reads and saves selected output data variables into the database or a user-defined directory."""
if saveto == "db":
self.saveToDB(args, initialize=initialize, skipsave=skipsave)
else:
if initialize:
if os.path.isdir(saveto):
shutil.rmtree(saveto)
# elif os.path.isfile(saveto):
# os.remove(saveto)
# os.makedirs(saveto)
# shutil.move(self.model_path+"/output", saveto)
# shutil.move(self.model_path+"/forcings", saveto)
shutil.copytree(self.model_path, saveto,
ignore=shutil.ignore_patterns("*.txt"))
| {
"content_hash": "33379cce1bb45d045b7e06e99272d5f6",
"timestamp": "",
"source": "github",
"line_count": 601,
"max_line_length": 329,
"avg_line_length": 48.287853577371045,
"alnum_prop": 0.5568381516832639,
"repo_name": "nasa/RHEAS",
"id": "9ce40ebf9c7c69561d0658df2ccb2b777bf91524",
"size": "29021",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/vic/vic.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "PLpgSQL",
"bytes": "412"
},
{
"name": "Python",
"bytes": "356616"
}
],
"symlink_target": ""
} |
"""
Load datasets from the internet.
**CRUST2.0**
Load and convert the `CRUST2.0 global crustal model
<http://igppweb.ucsd.edu/~gabi/rem.html>`_ (Bassin et al., 2000).
* :func:`~fatiando.datasets.fetch_crust2`: Download the .tar.gz archive with
the model from the website
* :func:`~fatiando.datasets.crust2_to_tesseroids`: Convert the CRUST2.0 model
to tesseroids
**Sample data**
Download a `Bouguer anomaly map of Alps (EGM 2008 model)
<https://gist.github.com/leouieda/6023922>`_ in Surfer ASCII grid file format.
* :func:`~fatiando.io.fetch_bouguer_alps_egm`: Download the .grd archive with
the Bouguer anomaly of Alps (EGM 2008 model) from the website
**References**
Bassin, C., Laske, G. and Masters, G., The Current Limits of Resolution for
Surface Wave Tomography in North America, EOS Trans AGU, 81, F897, 2000.
----
"""
import urllib
import tarfile
import numpy
from . import gridder
from .mesher import Tesseroid
def fetch_crust2(fname='crust2.tar.gz'):
"""
Download the CRUST2.0 model from http://igppweb.ucsd.edu/~gabi/crust2.html
Parameters:
* fname : str
The name that the archive file will be saved when downloaded
Returns:
* fname : str
The downloaded file name
"""
urllib.urlretrieve('http://igpppublic.ucsd.edu/~gabi/ftp/crust2.tar.gz',
filename=fname)
return fname
def crust2_to_tesseroids(fname):
"""
Convert the CRUST2.0 model to tesseroids.
Opens the .tar.gz archive and converts the model to
:class:`fatiando.mesher.Tesseroid`.
Each tesseroid will have its ``props`` set to the apropriate Vp, Vs and
density.
The CRUST2.0 model includes 7 layers: ice, water, soft sediments, hard
sediments, upper crust, middle curst and lower crust. It also includes the
mantle below the Moho. The mantle portion is not included in this
conversion because there is no way to place a bottom on it.
Parameters:
* fname : str
Name of the model .tar.gz archive (see
:func:`~fatiando.io.fetch_crust2`)
Returns:
* model : list of :class:`fatiando.mesher.Tesseroid`
The converted model
"""
archive = tarfile.open(fname, 'r:gz')
# First get the topography and bathymetry information
topogrd = _crust2_get_topo(archive)
# Now make a dict with the codec for each type code
codec = _crust2_get_codec(archive)
# Get the type codes with the actual model
types = _crust2_get_types(archive)
# Convert to tesseroids
size = 2
lons = numpy.arange(-180, 180, size)
lats = numpy.arange(90, -90, -size) # This is how lats are in the file
model = []
for i in xrange(len(lats)):
for j in xrange(len(lons)):
t = types[i][j]
top = topogrd[i][j]
for layer in xrange(7):
if codec[t]['thickness'][layer] == 0:
continue
w, e, s, n = lons[j], lons[j] + size, lats[i] - size, lats[i]
bottom = top - codec[t]['thickness'][layer]
props = {'density': codec[t]['density'][layer],
'vp': codec[t]['vp'][layer],
'vs': codec[t]['vs'][layer]}
model.append(Tesseroid(w, e, s, n, top, bottom, props))
top = bottom
return model
def _crust2_get_topo(archive):
"""
Fetch the matrix of topography and bathymetry from the CRUST2.0 archive.
"""
f = archive.extractfile('./CNelevatio2.txt')
topogrd = numpy.loadtxt(f, skiprows=1)[:, 1:]
return topogrd
def _crust2_get_types(archive):
"""
Fetch a matrix with the type code for each 2x2 degree cell.
"""
f = archive.extractfile('./CNtype2.txt')
typegrd = numpy.loadtxt(f, dtype=numpy.str, skiprows=1)[:, 1:]
return typegrd
def _crust2_get_codec(archive):
"""
Fetch the type code traslation codec from the archive and convert it to a
dict.
"""
f = archive.extractfile('./CNtype2_key.txt')
# Skip the first 5 lines which are the header
lines = [l.strip() for l in f.readlines()[5:] if l.strip()]
# Each type code is 5 lines: code, vp, vs, density, thickness
codec = {}
for i in xrange(len(lines) / 5):
code = lines[i * 5][:2]
# Get the values and convert them to SI units
vp = [float(v) * 1000 for v in lines[i * 5 + 1].split()]
vs = [float(v) * 1000 for v in lines[i * 5 + 2].split()]
density = [float(v) * 1000 for v in lines[i * 5 + 3].split()]
# Skip the last thickness because it is an inf indicating the mantle
thickness = [float(v) * 1000 for v in lines[i * 5 + 4].split()[:7]]
codec[code] = {'vp': vp, 'vs': vs, 'density': density,
'thickness': thickness}
return codec
def fetch_bouguer_alps_egm(fname='bouguer_alps_egm08.grd'):
"""
Download the Bouguer anomaly of Alps (EGM 2008 model) in Surfer ASCII grid
file format from https://gist.github.com/leouieda/6023922
Parameters:
* fname : str
The name that the archive file will be saved when downloaded
Returns:
* fname : str
The downloaded file name
"""
urllib.urlretrieve('https://gist.github.com/leouieda/6023922/raw/'
'948b0acbadb18e6ad49efe2092d9d9518b247780/'
'bouguer_alps_egm08.grd', filename=fname)
return fname
| {
"content_hash": "7093db04e95a4ea257c8852789cf098b",
"timestamp": "",
"source": "github",
"line_count": 173,
"max_line_length": 78,
"avg_line_length": 31.421965317919074,
"alnum_prop": 0.6219646799116998,
"repo_name": "eusoubrasileiro/fatiando",
"id": "a1341c16fbdb69af1052b616ee92aec634debdb3",
"size": "5436",
"binary": false,
"copies": "3",
"ref": "refs/heads/sim-class-improvements",
"path": "fatiando/datasets.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "5213509"
},
{
"name": "Makefile",
"bytes": "7884"
},
{
"name": "Python",
"bytes": "946895"
},
{
"name": "Shell",
"bytes": "5112"
}
],
"symlink_target": ""
} |
import mock
from heat.common import exception
from heat.common import template_format
from heat.engine import resource
from heat.engine import resources
from heat.engine.resources.openstack.heat import cloud_watch
from heat.engine import scheduler
from heat.engine import watchrule
from heat.tests import common
from heat.tests import utils
AWS_CloudWatch_Alarm = '''
HeatTemplateFormatVersion: '2012-12-12'
Description: Template which tests alarms
Resources:
test_me:
Type: AWS::CloudWatch::Alarm
Properties:
MetricName: cpu_util
Namespace: AWS/EC2
Statistic: Average
Period: '60'
EvaluationPeriods: '1'
Threshold: '50'
ComparisonOperator: GreaterThanThreshold
'''
class CloudWatchAlarmTest(common.HeatTestCase):
def setUp(self):
super(CloudWatchAlarmTest, self).setUp()
def clear_register_class():
env = resources.global_env()
env.registry._registry.pop('CWLiteAlarmForTest')
self.ctx = utils.dummy_context()
resource._register_class('CWLiteAlarmForTest',
cloud_watch.CloudWatchAlarm)
self.addCleanup(clear_register_class)
def parse_stack(self):
t = template_format.parse(AWS_CloudWatch_Alarm)
env = {'resource_registry': {
'AWS::CloudWatch::Alarm': 'CWLiteAlarmForTest'
}}
self.stack = utils.parse_stack(t, params=env)
return self.stack
def test_resource_create_good(self):
s = self.parse_stack()
self.assertIsNone(scheduler.TaskRunner(s['test_me'].create)())
def test_resource_create_failed(self):
s = self.parse_stack()
with mock.patch.object(watchrule.WatchRule, 'store') as bad_store:
bad_store.side_effect = KeyError('any random failure')
task_func = scheduler.TaskRunner(s['test_me'].create)
self.assertRaises(exception.ResourceFailure, task_func)
def test_resource_delete_good(self):
s = self.parse_stack()
self.assertIsNone(scheduler.TaskRunner(s['test_me'].create)())
self.assertIsNone(scheduler.TaskRunner(s['test_me'].delete)())
def test_resource_delete_notfound(self):
# if a resource is not found, handle_delete() should not raise
# an exception.
s = self.parse_stack()
self.assertIsNone(scheduler.TaskRunner(s['test_me'].create)())
res_name = self.stack['test_me'].physical_resource_name()
self.wr = watchrule.WatchRule.load(self.ctx,
watch_name=res_name)
with mock.patch.object(watchrule.WatchRule, 'destroy') as bad_destroy:
watch_exc = exception.WatchRuleNotFound(watch_name='test')
bad_destroy.side_effect = watch_exc
self.assertIsNone(scheduler.TaskRunner(s['test_me'].delete)())
def _get_watch_rule(self):
stack = self.parse_stack()
return stack['test_me']
@mock.patch.object(cloud_watch.watchrule.WatchRule, 'load')
def test_check(self, mock_lock):
res = self._get_watch_rule()
scheduler.TaskRunner(res.check)()
self.assertEqual((res.CHECK, res.COMPLETE), res.state)
@mock.patch.object(cloud_watch.watchrule.WatchRule, 'load')
def test_check_fail(self, mock_load):
res = self._get_watch_rule()
exc = cloud_watch.exception.WatchRuleNotFound(watch_name='Boom')
mock_load.side_effect = exc
self.assertRaises(exception.ResourceFailure,
scheduler.TaskRunner(res.check))
self.assertEqual((res.CHECK, res.FAILED), res.state)
self.assertIn('Boom', res.status_reason)
| {
"content_hash": "96b56ceb2894f60d03f31651fb0abe09",
"timestamp": "",
"source": "github",
"line_count": 103,
"max_line_length": 78,
"avg_line_length": 35.80582524271845,
"alnum_prop": 0.6502169197396963,
"repo_name": "takeshineshiro/heat",
"id": "875c0aaff7af35e66b48848c3a6aaf4ca0ae1c10",
"size": "4263",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "heat/tests/test_cloudwatch.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "6735948"
},
{
"name": "Shell",
"bytes": "33316"
}
],
"symlink_target": ""
} |
from pymongo import MongoClient
import pandas as pd
path = '.'
consensus_level = 0.50
if consensus_level == 0.:
suffix = '_full'
else:
suffix = ''
def load_data():
client = MongoClient('localhost', 27017)
db = client['radio']
catalog = db['catalog']
return catalog
def run_static(catalog,full=False):
filename = '%s/rgz-analysis/csv/static_catalog%s.csv' % (path,suffix)
wise_default_dict = catalog.find_one({'AllWISE':{'$exists':True}})['AllWISE']
for k in wise_default_dict:
wise_default_dict[k] = -99.
wise_default_dict['designation'] = 'no_wise_match'
wise_default_dict['numberMatches'] = 0
sdss_default_dict = catalog.find_one({'SDSS':{'$exists':True},'SDSS.redshift':{'$exists':False}})['SDSS']
for k in sdss_default_dict:
sdss_default_dict[k] = -99.
sdss_default_dict['objID'] = 'no_sdss_match'
sdss_default_dict['numberMatches'] = 0
with open(filename,'w') as f:
# Header
print >> f,'source_id zooniverse_id peak1_ra peak1_dec peak1_flux peak2_ra peak2_dec peak2_flux max_angular_extent total_solid_angle wise_designation wise_ra wise_dec wise_w1mag redshift redshift_err redshift_type sdss_id sdss_ra sdss_dec consensus_level'
# Data requested by Larry for double-peaked sources
#for c in catalog.find({'radio.numberComponents':2}):
bad_entry = 0
for c in catalog.find({'consensus.level':{"$gte":consensus_level}}):
wiseval = c.setdefault('AllWISE',wise_default_dict)
sdssval = c.setdefault('SDSS',sdss_default_dict)
sdssredshift = c['SDSS'].setdefault('redshift',-99.)
sdssredshifterr = c['SDSS'].setdefault('redshift_err',-99.)
sdssredshifttype = c['SDSS'].setdefault('redshift_type',-99)
try:
print >> f,'RGZ_{0:} {14:} {1:.5f} {2:.5f} {3:.2f} {4:.5f} {5:.5f} {6:.2f} {17:.3f} {18:.3f} {10:} {7:.5f} {8:.5f} {9:.2f} {11:.4f} {12:.4f} {13:d} {16:} {19:.5f} {20:.5f} {15:.2f}'.format(\
c['catalog_id'],
c['radio']['peaks'][0]['ra'],
c['radio']['peaks'][0]['dec'],
c['radio']['peaks'][0]['flux'],
c['radio']['peaks'][1]['ra'],
c['radio']['peaks'][1]['dec'],
c['radio']['peaks'][1]['flux'],
c['AllWISE']['ra'],
c['AllWISE']['dec'],
c['AllWISE']['w1mpro'],
c['AllWISE']['designation'],
c['SDSS']['redshift'],
c['SDSS']['redshift_err'],
c['SDSS']['redshift_type'],
c['Zooniverse_id'],
c['consensus']['level'],
c['SDSS']['objID'],
c['radio']['maxAngularExtent'],
c['radio']['totalSolidAngle'],
c['SDSS']['ra'],
c['SDSS']['dec'])
except IndexError:
bad_entry += 1
print "{0:d}/{1:d} had no results for radio, SDSS, or WISE".format(bad_entry,catalog.find().count())
return None
def match_clusters():
df1 = pd.read_csv('%s/radiogalaxyzoo/cluster_matching/MATCHED_PAIRS.tsv' % path,delim_whitespace=True)
df2 = pd.read_csv('%s/rgz-analysis/csv/static_catalog%s.csv' % (path,suffix),delim_whitespace=True)
# Keep only columns that Larry is interested in
allcols = set(df1.columns)
keep = set((u'cluster_id', u'cluster_ra', u'cluster_dec', u'rgz_id',u'cluster_best_z', u'cluster_rl', u'Separation'))
df1.drop(list(allcols.difference(keep)),axis=1,inplace=True)
df1.rename(columns={'Separation':'projected_sep'},inplace=True)
# Rename the columns so I can match them against each other
df2.rename(columns={'source_id':'rgz_id'},inplace=True)
df2.rgz_id.replace("_","",regex=True,inplace=True)
dfm = df2.merge(df1,on='rgz_id')
# Saves file as hard copy, but not directly used
dfm.to_csv('%s/rgz-analysis/csv/static_catalog2%s.csv' % (path,suffix),sep = ' ', index=False)
return dfm
def load_angles(filename):
df = pd.read_csv('%s/rgz-analysis/bending_angles/%s.csv' % (path,filename))
df['angle_type'] = pd.Series((filename[7:],)*len(df), index=df.index)
return df
def match_bending_angle(dfm):
df1 = load_angles('angles_double_pixradio')
#df2 = load_angles('angles_triple_pixradio')
df3 = load_angles('angles_multipeaked_singles')
#df4 = load_angles('angles_multipeaked_singles_no_optical')
for x in (df1,df3):
print len(x),x['angle_type'][0]
dfall = pd.concat([df1,df3],ignore_index=True)
dfba = dfm.merge(dfall,on='zooniverse_id')
dfba.to_csv('%s/rgz-analysis/csv/static_catalog3%s.csv' % (path,suffix),sep = ' ', index=False)
print "\n%i sources in static catalog" % len(dfba)
return None
if __name__ == "__main__":
catalog = load_data()
run_static(catalog)
dfm = match_clusters()
match_bending_angle(dfm)
| {
"content_hash": "8eca76835b98f1ecf717d23cf3b5ecfd",
"timestamp": "",
"source": "github",
"line_count": 134,
"max_line_length": 263,
"avg_line_length": 37.156716417910445,
"alnum_prop": 0.5792327776661981,
"repo_name": "willettk/rgz-analysis",
"id": "96f08004e5d6598c082b85c18fc81021bab1c72c",
"size": "4979",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "python/catalog_larry.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "147317"
},
{
"name": "Python",
"bytes": "691021"
},
{
"name": "Ruby",
"bytes": "3598"
},
{
"name": "Shell",
"bytes": "6723"
},
{
"name": "TeX",
"bytes": "40897"
}
],
"symlink_target": ""
} |
"""
Command line tool to export
DEPRECATED
"""
import re, StringIO
from optparse import OptionValueError, make_option
from django.conf import settings
from django.core.cache import cache
from django.core.management.base import CommandError, BaseCommand
from sveedocuments.models import Page, Insert
class Command(BaseCommand):
option_list = BaseCommand.option_list + (
make_option("--get_pages", dest="get_pages", default=None, help="Get pages from given slug. Each slug separated by comma, order does matter. Special slug 'ALL' means all pages in their sitemap order.", metavar="SLUGS"),
make_option("--follows", dest="followed_pages", default=None, help="Specify each page slug to recursively follow for their children in select. Each slug separated by comma. Special slug 'ALL' means all pages.", metavar="SLUGS"),
make_option("--excludes", dest="excluded_pages", default=None, help="Specify each page slug to excludes (but not their children) in select. Each slug separated by comma.", metavar="SLUGS"),
make_option("--print", dest="print_docs", action="store_true", default=False, help="Print out the selected documents."),
make_option("--resume", dest="resume_docs", action="store_true", default=False, help="Print a title list of selected documents."),
make_option("--to_file", dest="export_to_file", default=None, help="Export selected documents in a file.", metavar="FILEPATH"),
make_option("--to_github", dest="export_to_github", default=None, help="Export selected documents in a file but with some changes to be suitable with the Github ReSructuredText parser.", metavar="FILEPATH"),
)
help = "[DEPRECATED] Command to export documents from Sveetchies-documents"
def handle(self, *args, **options):
if len(args) != 0:
raise CommandError("Command doesn't accept any arguments")
self.export_to_file = options.get('export_to_file')
self.export_to_github = options.get('export_to_github')
self.print_docs = options.get('print_docs')
self.resume_docs = options.get('resume_docs')
self.get_pages = options.get('get_pages')
self.followed_pages = options.get('followed_pages')
self.excluded_pages = options.get('excluded_pages')
self.verbosity = int(options.get('verbosity'))
self.compile_hack_regex()
if self.print_docs or self.resume_docs:
output = StringIO.StringIO()
self.output_documents(output, resumed=self.resume_docs)
print output.getvalue()
output.close()
if self.export_to_file or self.export_to_github:
output = open(self.export_to_file or self.export_to_github, "w")
self.output_documents(output)
output.close()
def compile_hack_regex(self):
"""
Compile all regex used for syntax hacks
``_page_role_regex`` to match : ::
:page:`VALUE`
``_sourcecode_directive_regex`` to match : ::
.. sourcecode:: python
:linenos:
:hl_lines: 1,2,3
"""
self._page_role_regex = re.compile(r"(?:\:page\:`)(?P<name>.*?)(?:`)")
simple = r"(?:\s)"
with_linenos = r"(?:\s[\ ]+\:linenos\:)"
with_hl_lines = r"(?:\s[\ ]+\:linenos\:\s[\ ]+\:hl_lines\:[\ ][1-9,]+)"
self._sourcecode_directive_regex = re.compile(r"(?:..[\ ]+sourcecode\:\:[\ ]+)(?P<language>.*?)(?:" + with_hl_lines + r"|" + with_linenos + r"|" + simple + r")")
def get_documents(self):
"""
Get all documents matched by given options
"""
instances = []
if self.get_pages:
followed = [v for v in (self.followed_pages or '').split(',') if v]
excluded = [v for v in (self.excluded_pages or '').split(',') if v]
if self.verbosity:
print "Followed slugs:", followed
print "Excluded slugs:", excluded
# Recherche des enfants des pages à exclure
if excluded:
tmp = []
for item in excluded:
try:
p = Page.objects.get(slug=item).get_descendants(include_self=True).values_list('slug', flat=True)
except Page.DoesNotExist:
raise CommandError("Excludes: slug '{0}' does not exist".format(item))
else:
tmp.extend(p)
excluded = tmp
if self.verbosity:
print "Excluded pages:", excluded
# Slugs des pages à extraire
if self.get_pages == 'ALL':
page_slugs = Page.objects.root_nodes().exclude(visible=False, slug__in=excluded).values_list('slug', flat=True)
else:
page_slugs = [s for s in self.get_pages.split(',') if s not in excluded]
# Récupère chaque page
for slug in page_slugs:
# Si mode suivi intégrale des enfants, ou que le slug de la page est explicitement spécifié à suivre
if followed and ('ALL' in followed or slug in followed):
try:
queryset = Page.objects.get(slug=slug).get_descendants(include_self=True).exclude(id__in=[i.id for i in instances]).exclude(visible=False, slug__in=excluded)
except Page.DoesNotExist:
raise CommandError("Get many: slug '{0}' does not exist".format(slug))
else:
instances.extend(queryset)
# Pas de suivi on récupère que la page
else:
try:
page = Page.objects.get(slug=slug, visible=True)
except Page.DoesNotExist:
raise CommandError("Get single: slug '{0}' does not exist".format(slug))
else:
instances.append(page)
if self.verbosity:
print "Pages instances:", instances
if not instances:
raise CommandError("No document finded")
return instances
def output_documents(self, output, resumed=False):
"""
Export all matched documents in a file
Documents are ordered and their titles are added at top of each them as the
highest title
"""
loaded_docs = self.get_documents()
from_multiple_docs = len(loaded_docs)>1
for document in loaded_docs:
content = document.content
if not resumed:
# Apply hacks on content if needeed
if self.export_to_github:
content = self._page_role_regex.sub(self._page_role_sub_replacement, content)
content = self._sourcecode_directive_regex.sub(self._sourcecode_directive_sub_replacement, content)
# Output the document title + content
if from_multiple_docs:
output.write( "\n" )
output.write( self._to_rest_title(document.title.encode('UTF8').strip(), character="=")+"\n\n" )
output.write( content.encode('UTF8')+"\n" )
else:
# Output the document title only
output.write( "* /%s/ : %s\n" % (document.slug.encode('UTF8'), document.title.encode('UTF8')) )
return output
def _to_rest_title(self, title, character="="):
"""Transform the document title to a ReST TOC title"""
width = max([len(line) for line in title.splitlines()])
ascii_line = character*width
return ascii_line+"\n"+title+"\n"+ascii_line
def _page_role_sub_replacement(self, matchobj):
"""Transform page roles to strong emphasis"""
return u"**{name}**".format(name=matchobj.group(1))
def _sourcecode_directive_sub_replacement(self, matchobj):
"""Transform sourcecode directives to pre"""
return u"::"
| {
"content_hash": "4812bcc27e5575f57290ca6beef3e323",
"timestamp": "",
"source": "github",
"line_count": 176,
"max_line_length": 236,
"avg_line_length": 46.61931818181818,
"alnum_prop": 0.5674588665447897,
"repo_name": "sveetch/sveedocuments",
"id": "4a68886a47b580cd481ce54b12611f18fcf3fe9c",
"size": "8239",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sveedocuments/management/commands/documents_export.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "394407"
},
{
"name": "HTML",
"bytes": "27149"
},
{
"name": "JavaScript",
"bytes": "105924"
},
{
"name": "Python",
"bytes": "145236"
},
{
"name": "Ruby",
"bytes": "1005"
}
],
"symlink_target": ""
} |
import radio
from microbit import *
radio.config(group=0)
radio.on()
while True:
message = radio.receive()
if message:
display.show(Image.HAPPY)
sleep(100)
display.scroll(message)
| {
"content_hash": "5b903a094edb5a315b89dadb46dc3e5a",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 33,
"avg_line_length": 15.428571428571429,
"alnum_prop": 0.6435185185185185,
"repo_name": "romilly/pi-towers-workshop",
"id": "7f27e1bc0440975779dd3a4cc27d0277f0ebf002",
"size": "216",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/radio2.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "426342"
},
{
"name": "Python",
"bytes": "1832"
},
{
"name": "Shell",
"bytes": "158"
}
],
"symlink_target": ""
} |
""" Tools for feature selection.
"""
from collections import OrderedDict
from numpy import sort
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.feature_selection import RFE
from sklearn.svm import LinearSVC
from sklearn.cluster import KMeans
from unstyle.features.featregister import featregistry
# TODO: Grid search with k-fold cross-validation on classifier in order to see
# if better ranking results can be achieved.
def rank_features_dt(X, y, featureset):
"""Rank features by their importance with a decision tree.
This does not seem to agree with rank_features_rbe, although it is
considerably faster. Using this with SVM classifiers is likely a bad idea.
:param X: A training set of features.
:param y: A target set (aka class labels for the training set)
:param featureset: An instance of a featureset (such as Basic9Extractor())
:rtype: An OrderedDict of the form {K : V}, with K being the feature name
and V being its importance. This dictionary will be sorted by importance.
"""
classifier = ExtraTreesClassifier(n_estimators=500, n_jobs=-1)
classifier.fit(X, y)
importances = classifier.feature_importances_
feat_importance = OrderedDict()
# Get the names of the feature columns.
for index, func in enumerate(featureset.features):
feat_importance[func] = importances[index]
print(feat_importance)
# Sort the dictionary by value and return it.
return sorted(feat_importance.items(), key=lambda x: x[1], reverse=True)
def rank_features_rfe(X, y, featureset):
"""Rank features by their importance using recursive feature elimination.
:param X: A training set of features.
:param y: A target set (aka class labels for the training set)
:param featureset: An instance of a featureset (such as Basic9Extractor())
:rtype: An OrderedDict of the form {K : V}, with K being the feature name
and V being its importance. This dictionary will be sorted by importance.
"""
# FIXME: Use an RBF SVC to rank features. It is likely that the "importance"
# rankings derived from a LinearSVC are similar as an RBF kernel SVM, but,
# for safety's sake, it is best to assume they are not.
classifier = LinearSVC()
classifier.fit(X, y)
ranker = RFE(classifier, 1, step=1)
ranker = ranker.fit(X, y)
# Get the names of the feature columns.
# FIXME: Duplicate code from rank_features. Make this its own function.
feat_importance = OrderedDict()
for index, func in enumerate(featureset.features):
feat_importance[func] = ranker.ranking_[index]
return sorted(feat_importance.items(), key=lambda x: x[1])
def ak_means_cluster(X, numAuthors):
"""Given a set of feature values, cluster them into k groups. If, after
convergence, there are less than 3 points in any given cluster, recurse with
ak_means_cluster(featureVec, numAuthors - 1).
:param X: Values for a given feature across a set of authors.
:rtype: A tuple containing (a trained k-means cluster, numAuthors)
"""
if numAuthors < 1:
raise ValueError("ak-means initialized with less than 1 cluster.")
km = KMeans(n_clusters=numAuthors, init='k-means++', n_jobs=-1)
km.fit(X)
# Check number of features found in each cluster. Restart with k - 1 if
# there are less than 3 members of any cluster.
# First, we count the number of members in each cluster:
labelTable = {}
for label in km.labels_:
if label in labelTable:
labelTable[label] += 1
else:
labelTable[label] = 1
# Now we check if any clusters have less than three members:
for label in labelTable.keys():
if labelTable[label] < 3:
print("Reinitializing k means with ", numAuthors - 1, " clusters.")
return ak_means_cluster(X, numAuthors - 1)
return (km, numAuthors)
| {
"content_hash": "d1a7001b69ff5fb06f477bd878505d5f",
"timestamp": "",
"source": "github",
"line_count": 103,
"max_line_length": 80,
"avg_line_length": 37.980582524271846,
"alnum_prop": 0.6988752556237219,
"repo_name": "pagea/unstyle",
"id": "d54adbda3a8b56d0014e0034679580260c1a0fbe",
"size": "3929",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "unstyle/feat_select.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "133"
},
{
"name": "Python",
"bytes": "186053"
},
{
"name": "Shell",
"bytes": "169"
}
],
"symlink_target": ""
} |
from swgpy.object import *
def create(kernel):
result = Creature()
result.template = "object/mobile/shared_bolle_bol_calf.iff"
result.attribute_template_id = 9
result.stfName("monster_name","bolle_bol")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result | {
"content_hash": "8120fedc5383bf85a9f5ba7d105252d6",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 60,
"avg_line_length": 22.46153846153846,
"alnum_prop": 0.6883561643835616,
"repo_name": "anhstudios/swganh",
"id": "36e5cf2b6078db5b67429b02b592636fab832fb5",
"size": "437",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "data/scripts/templates/object/mobile/shared_bolle_bol_calf.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "11887"
},
{
"name": "C",
"bytes": "7699"
},
{
"name": "C++",
"bytes": "2357839"
},
{
"name": "CMake",
"bytes": "41264"
},
{
"name": "PLSQL",
"bytes": "42065"
},
{
"name": "Python",
"bytes": "7503510"
},
{
"name": "SQLPL",
"bytes": "42770"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import, division, print_function
import six
import logging
logger = logging.getLogger(__name__)
try:
from .net_cdf_io import load_netCDF
except ImportError:
def load_netCDF(*args, **kwargs):
# Die at call time so as not to ruin entire io package.
raise ImportError("This function requires netCDF4.")
from .binary import read_binary
from .avizo_io import load_amiramesh
from .save_powder_output import save_output
from .gsas_file_reader import gsas_reader
from .save_powder_output import gsas_writer
__all__ = ['load_netCDF', 'read_binary', 'load_amiramesh', 'save_output',
'gsas_reader', 'gsas_writer']
| {
"content_hash": "d230de0114be82bb3ac139b50dd6f8d7",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 73,
"avg_line_length": 27.12,
"alnum_prop": 0.7109144542772862,
"repo_name": "licode/scikit-beam",
"id": "ea980d79eca0d1d85a321621b5844b2be4a3df56",
"size": "3160",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "skbeam/io/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "28"
},
{
"name": "C",
"bytes": "18904"
},
{
"name": "Python",
"bytes": "653950"
},
{
"name": "Shell",
"bytes": "38"
}
],
"symlink_target": ""
} |
from app import db
tag_table = db.Table('post_tags',
db.Column('post_id', db.Integer, db.ForeignKey('post.id')),
db.Column('tag_id', db.Integer, db.ForeignKey('tag.id'))
)
class User(db.Model):
id = db.Column(db.Integer, primary_key=True)
full_name = db.Column(db.String(50))
username = db.Column(db.String(20))
email = db.Column(db.String(50))
mini_profile = db.Column(db.Text)
posts = db.relationship('Post',backref='user',lazy='dynamic')
jempols = db.relationship('Jempol',backref='user',lazy='dynamic')
comments = db.relationship('Comment',backref='user',lazy='dynamic')
# data transfer object to form JSON
def dto(self):
return dict(
id = self.id,
full_name = self.full_name,
username = self.username,
email = self.email,
mini_profile = self.mini_profile)
class Post(db.Model):
id = db.Column(db.Integer, primary_key=True)
title = db.Column(db.String(100))
content = db.Column(db.Text)
user_id = db.Column(db.Integer, db.ForeignKey('user.id'))
tags = db.relationship('Tag',secondary=tag_table,backref=db.backref('post_tags'),lazy='dynamic')
jempols = db.relationship('Jempol',backref='post',lazy='dynamic')
comments = db.relationship('Comment',backref='user',lazy='dynamic')
# data transfer object to form JSON
def dto(self):
return dict(
id = self.id,
title = self.title,
content = self.content,
user_id = self.user_id)
class Jempol(db.Model):
id = db.Column(db.Integer, primary_key=True)
user_id = db.Column(db.Integer, db.ForeignKey('user.id'))
post_id = db.Column(db.Integer, db.ForeignKey('post.id'))
jempol_time = db.Column(db.DateTime)
# data transfer object to form JSON
def dto(self):
return dict(
id = self.id,
user_id = self.user_id,
post_id = self.post_id,
jempol_time = self.jempol_time)
class Tag(db.Model):
id = db.Column(db.Integer, primary_key=True)
desc = db.Column(db.String(100))
tags = db.relationship('Post',secondary=tag_table,backref=db.backref('post_tags'),lazy='dynamic')
# data transfer object to form JSON
def dto(self):
return dict(
id = self.id,
desc = self.desc)
class Comment(db.Model):
id = db.Column(db.Integer, primary_key=True)
user_id = db.Column(db.Integer, db.ForeignKey('user.id'))
post_id = db.Column(db.Integer, db.ForeignKey('post.id'))
comment = db.Column(db.Text)
comment_time = db.Column(db.DateTime)
# data transfer object to form JSON
def dto(self):
return dict(
id = self.id,
user_id = self.user_id,
post_id = self.post_id,
comment = self.comment,
comment_time = self.comment_time)
| {
"content_hash": "cc3c19e7b788ef140a4998b011b7f67f",
"timestamp": "",
"source": "github",
"line_count": 83,
"max_line_length": 101,
"avg_line_length": 34.66265060240964,
"alnum_prop": 0.6124435175530066,
"repo_name": "femmerling/nembok",
"id": "1526775c0eba7c6d4a66656f2eb1a63617ca0671",
"size": "2877",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "682"
},
{
"name": "Python",
"bytes": "161340"
}
],
"symlink_target": ""
} |
"""
The most suitable default reactor for the current platform.
Depending on a specific application's needs, some other reactor may in
fact be better.
"""
from __future__ import division, absolute_import
__all__ = ["install"]
from twisted.python.runtime import platform
def _getInstallFunction(platform):
"""
Return a function to install the reactor most suited for the given platform.
@param platform: The platform for which to select a reactor.
@type platform: L{twisted.python.runtime.Platform}
@return: A zero-argument callable which will install the selected
reactor.
"""
# Linux: epoll(7) is the default, since it scales well.
#
# OS X: poll(2) is not exposed by Python because it doesn't support all
# file descriptors (in particular, lack of PTY support is a problem) --
# see <http://bugs.python.org/issue5154>. kqueue has the same restrictions
# as poll(2) as far PTY support goes.
#
# Windows: IOCP should eventually be default, but still has some serious
# bugs, e.g. <http://twistedmatrix.com/trac/ticket/4667>.
#
# We therefore choose epoll(7) on Linux, poll(2) on other non-OS X POSIX
# platforms, and select(2) everywhere else.
try:
if platform.isLinux():
try:
from twisted.internet.epollreactor import install
except ImportError:
from twisted.internet.pollreactor import install
elif platform.getType() == 'posix' and not platform.isMacOSX():
from twisted.internet.pollreactor import install
else:
from twisted.internet.selectreactor import install
except ImportError:
from twisted.internet.selectreactor import install
return install
install = _getInstallFunction(platform)
| {
"content_hash": "d1ec46eff4fde496c6b374d3fb5686e5",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 80,
"avg_line_length": 35.094339622641506,
"alnum_prop": 0.6634408602150538,
"repo_name": "hlzz/dotfiles",
"id": "f1743b9ca10d62963ffbbca99a9dc341d644e688",
"size": "1996",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "graphics/VTK-7.0.0/ThirdParty/Twisted/twisted/internet/default.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "AppleScript",
"bytes": "1240"
},
{
"name": "Arc",
"bytes": "38"
},
{
"name": "Assembly",
"bytes": "449468"
},
{
"name": "Batchfile",
"bytes": "16152"
},
{
"name": "C",
"bytes": "102303195"
},
{
"name": "C++",
"bytes": "155056606"
},
{
"name": "CMake",
"bytes": "7200627"
},
{
"name": "CSS",
"bytes": "179330"
},
{
"name": "Cuda",
"bytes": "30026"
},
{
"name": "D",
"bytes": "2152"
},
{
"name": "Emacs Lisp",
"bytes": "14892"
},
{
"name": "FORTRAN",
"bytes": "5276"
},
{
"name": "Forth",
"bytes": "3637"
},
{
"name": "GAP",
"bytes": "14495"
},
{
"name": "GLSL",
"bytes": "438205"
},
{
"name": "Gnuplot",
"bytes": "327"
},
{
"name": "Groff",
"bytes": "518260"
},
{
"name": "HLSL",
"bytes": "965"
},
{
"name": "HTML",
"bytes": "2003175"
},
{
"name": "Haskell",
"bytes": "10370"
},
{
"name": "IDL",
"bytes": "2466"
},
{
"name": "Java",
"bytes": "219109"
},
{
"name": "JavaScript",
"bytes": "1618007"
},
{
"name": "Lex",
"bytes": "119058"
},
{
"name": "Lua",
"bytes": "23167"
},
{
"name": "M",
"bytes": "1080"
},
{
"name": "M4",
"bytes": "292475"
},
{
"name": "Makefile",
"bytes": "7112810"
},
{
"name": "Matlab",
"bytes": "1582"
},
{
"name": "NSIS",
"bytes": "34176"
},
{
"name": "Objective-C",
"bytes": "65312"
},
{
"name": "Objective-C++",
"bytes": "269995"
},
{
"name": "PAWN",
"bytes": "4107117"
},
{
"name": "PHP",
"bytes": "2690"
},
{
"name": "Pascal",
"bytes": "5054"
},
{
"name": "Perl",
"bytes": "485508"
},
{
"name": "Pike",
"bytes": "1338"
},
{
"name": "Prolog",
"bytes": "5284"
},
{
"name": "Python",
"bytes": "16799659"
},
{
"name": "QMake",
"bytes": "89858"
},
{
"name": "Rebol",
"bytes": "291"
},
{
"name": "Ruby",
"bytes": "21590"
},
{
"name": "Scilab",
"bytes": "120244"
},
{
"name": "Shell",
"bytes": "2266191"
},
{
"name": "Slash",
"bytes": "1536"
},
{
"name": "Smarty",
"bytes": "1368"
},
{
"name": "Swift",
"bytes": "331"
},
{
"name": "Tcl",
"bytes": "1911873"
},
{
"name": "TeX",
"bytes": "11981"
},
{
"name": "Verilog",
"bytes": "3893"
},
{
"name": "VimL",
"bytes": "595114"
},
{
"name": "XSLT",
"bytes": "62675"
},
{
"name": "Yacc",
"bytes": "307000"
},
{
"name": "eC",
"bytes": "366863"
}
],
"symlink_target": ""
} |
from sqlalchemy.testing import assert_raises
import sqlalchemy as sa
from sqlalchemy import testing
from sqlalchemy import Integer, Text
from sqlalchemy.sql.sqltypes import ARRAY, JSON
from sqlalchemy.testing.schema import Column
from sqlalchemy.orm import Session
from sqlalchemy.testing import fixtures
from sqlalchemy.ext.indexable import index_property
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.testing import eq_, ne_, is_, in_, not_in_
from sqlalchemy import inspect
class IndexPropertyTest(fixtures.TestBase):
def test_array(self):
Base = declarative_base()
class A(Base):
__tablename__ = 'a'
id = Column('id', Integer, primary_key=True)
array = Column('_array', ARRAY(Integer),
default=[])
first = index_property('array', 0)
tenth = index_property('array', 9)
a = A(array=[1, 2, 3])
eq_(a.first, 1)
assert_raises(AttributeError, lambda: a.tenth)
a.first = 100
eq_(a.first, 100)
eq_(a.array, [100, 2, 3])
del a.first
eq_(a.first, 2)
a2 = A(first=5)
eq_(a2.first, 5)
eq_(a2.array, [5])
def test_array_longinit(self):
Base = declarative_base()
class A(Base):
__tablename__ = 'a'
id = Column('id', Integer, primary_key=True)
array = Column('_array', ARRAY(Integer),
default=[])
first = index_property('array', 0)
fifth = index_property('array', 4)
a1 = A(fifth=10)
a2 = A(first=5)
eq_(a1.array, [None, None, None, None, 10])
eq_(a2.array, [5])
assert_raises(IndexError, setattr, a2, "fifth", 10)
def test_json(self):
Base = declarative_base()
class J(Base):
__tablename__ = 'j'
id = Column('id', Integer, primary_key=True)
json = Column('_json', JSON, default={})
field = index_property('json', 'field')
j = J(json={'a': 1, 'b': 2})
assert_raises(AttributeError, lambda: j.field)
j.field = 'test'
eq_(j.field, 'test')
eq_(j.json, {'a': 1, 'b': 2, 'field': 'test'})
j2 = J(field='test')
eq_(j2.json, {"field": "test"})
eq_(j2.field, "test")
def test_value_is_none_attributeerror(self):
Base = declarative_base()
class A(Base):
__tablename__ = 'a'
id = Column('id', Integer, primary_key=True)
array = Column('_array', ARRAY(Integer))
first = index_property('array', 1)
a = A()
assert_raises(AttributeError, getattr, a, "first")
assert_raises(AttributeError, delattr, a, "first")
def test_get_attribute_error(self):
Base = declarative_base()
class A(Base):
__tablename__ = 'a'
id = Column('id', Integer, primary_key=True)
array = Column('_array', ARRAY(Integer))
first = index_property('array', 1)
a = A(array=[])
assert_raises(AttributeError, lambda: a.first)
def test_set_immutable(self):
Base = declarative_base()
class A(Base):
__tablename__ = 'a'
id = Column(Integer, primary_key=True)
array = Column(ARRAY(Integer))
first = index_property('array', 1, mutable=False)
a = A()
def set_():
a.first = 10
assert_raises(AttributeError, set_)
def test_set_mutable_dict(self):
Base = declarative_base()
class J(Base):
__tablename__ = 'j'
id = Column(Integer, primary_key=True)
json = Column(JSON, default={})
field = index_property('json', 'field')
j = J()
j.field = 10
j.json = {}
assert_raises(AttributeError, lambda: j.field)
assert_raises(AttributeError, delattr, j, "field")
j.field = 10
eq_(j.field, 10)
def test_get_default_value(self):
Base = declarative_base()
class J(Base):
__tablename__ = 'j'
id = Column(Integer, primary_key=True)
json = Column(JSON, default={})
default = index_property('json', 'field', default='default')
none = index_property('json', 'field', default=None)
j = J()
assert j.json is None
assert j.default == 'default'
assert j.none is None
j.json = {}
assert j.default == 'default'
assert j.none is None
j.default = None
assert j.default is None
assert j.none is None
j.none = 10
assert j.default is 10
assert j.none == 10
class IndexPropertyArrayTest(fixtures.DeclarativeMappedTest):
__requires__ = ('array_type',)
__backend__ = True
@classmethod
def setup_classes(cls):
Base = cls.DeclarativeBasic
class Array(fixtures.ComparableEntity, Base):
__tablename__ = "array"
id = Column(sa.Integer, primary_key=True,
test_needs_autoincrement=True)
array = Column(ARRAY(Integer), default=[])
array0 = Column(ARRAY(Integer, zero_indexes=True), default=[])
first = index_property('array', 0)
first0 = index_property('array0', 0, onebased=False)
def test_query(self):
Array = self.classes.Array
s = Session(testing.db)
s.add_all([
Array(),
Array(array=[1, 2, 3], array0=[1, 2, 3]),
Array(array=[4, 5, 6], array0=[4, 5, 6])])
s.commit()
a1 = s.query(Array).filter(Array.array == [1, 2, 3]).one()
a2 = s.query(Array).filter(Array.first == 1).one()
eq_(a1.id, a2.id)
a3 = s.query(Array).filter(Array.first == 4).one()
ne_(a1.id, a3.id)
a4 = s.query(Array).filter(Array.first0 == 1).one()
eq_(a1.id, a4.id)
a5 = s.query(Array).filter(Array.first0 == 4).one()
ne_(a1.id, a5.id)
def test_mutable(self):
Array = self.classes.Array
s = Session(testing.db)
a = Array(array=[1, 2, 3])
s.add(a)
s.commit()
a.first = 42
eq_(a.first, 42)
s.commit()
eq_(a.first, 42)
del a.first
eq_(a.first, 2)
s.commit()
eq_(a.first, 2)
def test_modified(self):
from sqlalchemy import inspect
Array = self.classes.Array
s = Session(testing.db)
a = Array(array=[1, 2, 3])
s.add(a)
s.commit()
i = inspect(a)
is_(i.modified, False)
in_('array', i.unmodified)
a.first = 10
is_(i.modified, True)
not_in_('array', i.unmodified)
class IndexPropertyJsonTest(fixtures.DeclarativeMappedTest):
# TODO: remove reliance on "astext" for these tests
__requires__ = ('json_type',)
__only_on__ = 'postgresql'
__backend__ = True
@classmethod
def setup_classes(cls):
from sqlalchemy.dialects.postgresql import JSON
Base = cls.DeclarativeBasic
class json_property(index_property):
def __init__(self, attr_name, index, cast_type):
super(json_property, self).__init__(attr_name, index)
self.cast_type = cast_type
def expr(self, model):
expr = super(json_property, self).expr(model)
return expr.astext.cast(self.cast_type)
class Json(fixtures.ComparableEntity, Base):
__tablename__ = "json"
id = Column(sa.Integer, primary_key=True,
test_needs_autoincrement=True)
json = Column(JSON, default={})
field = index_property('json', 'field')
json_field = index_property('json', 'field')
int_field = json_property('json', 'field', Integer)
text_field = json_property('json', 'field', Text)
other = index_property('json', 'other')
subfield = json_property('other', 'field', Text)
def test_query(self):
Json = self.classes.Json
s = Session(testing.db)
s.add_all([
Json(),
Json(json={'field': 10}),
Json(json={'field': 20})])
s.commit()
a1 = s.query(Json).filter(Json.json['field'].astext.cast(Integer) == 10)\
.one()
a2 = s.query(Json).filter(Json.field.astext == '10').one()
eq_(a1.id, a2.id)
a3 = s.query(Json).filter(Json.field.astext == '20').one()
ne_(a1.id, a3.id)
a4 = s.query(Json).filter(Json.json_field.astext == '10').one()
eq_(a2.id, a4.id)
a5 = s.query(Json).filter(Json.int_field == 10).one()
eq_(a2.id, a5.id)
a6 = s.query(Json).filter(Json.text_field == '10').one()
eq_(a2.id, a6.id)
def test_mutable(self):
Json = self.classes.Json
s = Session(testing.db)
j = Json(json={})
s.add(j)
s.commit()
j.other = 42
eq_(j.other, 42)
s.commit()
eq_(j.other, 42)
def test_modified(self):
Json = self.classes.Json
s = Session(testing.db)
j = Json(json={})
s.add(j)
s.commit()
i = inspect(j)
is_(i.modified, False)
in_('json', i.unmodified)
j.other = 42
is_(i.modified, True)
not_in_('json', i.unmodified)
def test_cast_type(self):
Json = self.classes.Json
s = Session(testing.db)
j = Json(json={'field': 10})
s.add(j)
s.commit()
jq = s.query(Json).filter(Json.int_field == 10).one()
eq_(j.id, jq.id)
jq = s.query(Json).filter(Json.text_field == '10').one()
eq_(j.id, jq.id)
jq = s.query(Json).filter(Json.json_field.astext == '10').one()
eq_(j.id, jq.id)
jq = s.query(Json).filter(Json.text_field == 'wrong').first()
is_(jq, None)
j.json = {'field': True}
s.commit()
jq = s.query(Json).filter(Json.text_field == 'true').one()
eq_(j.id, jq.id)
def test_multi_dimension(self):
Json = self.classes.Json
s = Session(testing.db)
j = Json(json={'other': {'field': 'multi'}})
s.add(j)
s.commit()
eq_(j.other, {'field': 'multi'})
eq_(j.subfield, 'multi')
jq = s.query(Json).filter(Json.subfield == 'multi').first()
eq_(j.id, jq.id)
| {
"content_hash": "70f3dd27c123b5d41ff049791a043953",
"timestamp": "",
"source": "github",
"line_count": 374,
"max_line_length": 81,
"avg_line_length": 28.302139037433154,
"alnum_prop": 0.5250826641473784,
"repo_name": "robin900/sqlalchemy",
"id": "3df49cf86a9b6d391723ddb2223f38f56c31d8d8",
"size": "10585",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/ext/test_indexable.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "46256"
},
{
"name": "Python",
"bytes": "9080563"
}
],
"symlink_target": ""
} |
from django.conf import settings
from openstack_dashboard import api
from django.contrib import messages
from openstackx.api import exceptions as api_exceptions
def tenants(request):
if not request.user or not request.user.is_authenticated():
return {}
try:
return {'tenants': api.token_list_tenants(request, request.user.token)}
except api_exceptions.BadRequest, e:
messages.error(request, "Unable to retrieve tenant list from\
keystone: %s" % e.message)
return {'tenants': []}
def swift(request):
return {'swift_configured': settings.SWIFT_ENABLED}
def quantum(request):
return {'quantum_configured': settings.QUANTUM_ENABLED}
| {
"content_hash": "499e38e5605ff5fcd4a5b313f295e073",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 79,
"avg_line_length": 31.347826086956523,
"alnum_prop": 0.6851595006934813,
"repo_name": "griddynamics/osc-robot-openstack-dashboard",
"id": "a21addf3eef053a5cb188bad77078ad65e040e61",
"size": "1551",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "openstack_dashboard/context_processors.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "89587"
},
{
"name": "Python",
"bytes": "280204"
},
{
"name": "Shell",
"bytes": "416"
}
],
"symlink_target": ""
} |
"""Creates a zip file that can be used by manual testers.
python chrome/test/mini_installer/create_zip.py
This will drop <OUTPUT_DIR>\mini_installer_tests.zip onto the disk.
To generate an easy-to-distribute package for manual testers add the current
and previous installers as mini_installer.exe and
previous_version_mini_installer.exe, as well as chromedriver.exe.
This can be passed out and extracted into c:\mini_installer_tests and go through
the README.
chromedriver.exe can be obtained one of two ways. Either download it from
http://chromedriver.chromium.org/downloads. It can also be built locally, but
if you do this make sure you are not building as components else you will
have to copy some dlls to the chromedriver.exe dir.
Note: This does not zip the executables by default. However paths to the
current, previous, and chromedriver binaries can be passed to be zipped.
The easiest way to package everything is to run:
python chrome\test\mini_installer\create_zip.py ^
-o <ZIP_FILE_OUTPUT_PATH> ^
-i <CURRENT_INSTALLER_PATH> ^
-p <PREVIOUS_INSTALLER_PATH> ^
-c <CHROMEDRIVER_PATH>
This will drop a zip file making the distribution of the test needs simple.
When the runner batch script is run it will install the python packages
required by the tests to further reduce the overhead of running the tests.
The directory structure is also preserved, so running the tests from
run_tests.bat all of the import paths are correct. __init__.py files
are dropped in any empty folders to make them importable.
"""
import argparse
import logging
import os
import re
import sys
import zipfile
THIS_DIR = os.path.abspath(os.path.dirname(os.path.abspath(__file__)))
SRC_DIR = os.path.join(THIS_DIR, '..', '..', '..')
SELENIUM_PATH = os.path.abspath(os.path.join(
SRC_DIR, r'third_party', 'webdriver', 'pylib'))
BLACKLIST = ['', '.pyc', '.gn', '.gni', '.txt', '.bat']
def ArchiveDirectory(path, zipf):
"""Archive an entire directory and subdirectories.
This will skip files that have an extension in BLACKLIST.
Args:
path: The path to the current directory.
zipf: A handle to a ZipFile instance.
"""
logging.debug('Archiving %s', path)
for c_path in [os.path.join(path, name) for name in os.listdir(path)]:
if os.path.isfile(c_path):
if os.path.splitext(c_path)[-1] in BLACKLIST:
continue
logging.debug('Adding %s', os.path.relpath(c_path, SRC_DIR))
zipf.write(c_path, os.path.relpath(c_path, SRC_DIR))
elif os.path.isdir(c_path):
ArchiveDirectory(c_path, zipf)
def main():
logging.basicConfig(
format='[%(asctime)s:%(filename)s(%(lineno)d)] %(message)s',
datefmt='%m%d/%H%M%S', level=logging.INFO)
parser = argparse.ArgumentParser(
description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('--output-path', default='installer_tests.zip',
help='The path to write the zip file to')
parser.add_argument('--installer-path', default='',
help='The path to the current installer. This is '
'optional. If passed it will be zipped as '
'mini_installer.exe')
parser.add_argument('--previous-version-installer-path', default='',
help='The path to the previous installer. This is '
'optional. If passed it will be zipped as '
'previous_version_mini_installer.exe')
parser.add_argument('--chromedriver-path', default='',
help='The path to chromedriver.exe. This is '
'optional.')
args = parser.parse_args()
with zipfile.ZipFile(args.output_path, 'w') as zipf:
# Setup chrome\test\mini_installer as importable in Python
zipf.writestr(os.path.join('chrome', '__init__.py'), '')
zipf.writestr(os.path.join('chrome', 'test', '__init__.py'), '')
zipf.writestr(
os.path.join('chrome', 'test', 'mini_installer', '__init__.py'), '')
run_args = []
# Add any of the executables
if args.installer_path:
installer_name = os.path.split(args.installer_path)[-1]
run_args.append('--installer-path=' + installer_name)
logging.debug('Archiving: %s', installer_name)
zipf.write(args.installer_path, installer_name)
if args.previous_version_installer_path:
previous_version_installer_name = os.path.split(
args.previous_version_installer_path)[-1]
run_args.append(
'--previous-version-installer-path=' + previous_version_installer_name)
logging.debug('Archiving: %s', previous_version_installer_name)
zipf.write(
args.previous_version_installer_path, previous_version_installer_name)
if args.chromedriver_path:
chromedriver_name = os.path.split(args.chromedriver_path)[-1]
run_args.append('--chromedriver-path=' + chromedriver_name)
logging.debug('Archiving: %s', chromedriver_name)
zipf.write(
args.chromedriver_path, chromedriver_name)
# Add the top level files
with open(os.path.join(THIS_DIR, 'zip_test_runner.bat')) as rh:
text = rh.read().format(run_args=' '.join(run_args))
text = re.sub("\r(?!\n)|(?<!\r)\n", "\r\n", text)
zipf.writestr('zip_test_runner.bat', text)
zipf.write(os.path.join(THIS_DIR, 'ZIP_README.txt'),
os.path.split('README.txt')[-1])
# Archive this and the chromedriver code directories
logging.debug('Zipping chrome/test/mini_installer')
ArchiveDirectory(THIS_DIR, zipf)
logging.debug('Zipping third_party/webdriver/pylib')
ArchiveDirectory(SELENIUM_PATH, zipf)
logging.debug('Wrote zip to %s', args.output_path)
return 0
if __name__ == '__main__':
sys.exit(main())
| {
"content_hash": "104548bf85bc0df3ca2cb860ce58f0d1",
"timestamp": "",
"source": "github",
"line_count": 142,
"max_line_length": 80,
"avg_line_length": 40.478873239436616,
"alnum_prop": 0.674669450243563,
"repo_name": "endlessm/chromium-browser",
"id": "e6c7fae418320cd1d904472365881c3e3c2327ae",
"size": "5911",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "chrome/test/mini_installer/create_zip.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
"""
streams module contains just a Stream class. Basically you want to use only
this class and nothing else from the module.
"""
###############################################################################
from .stream import Stream
| {
"content_hash": "e86f4c9637baa6376ef0cad2a7aaf6e7",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 79,
"avg_line_length": 24,
"alnum_prop": 0.5,
"repo_name": "9seconds/streams",
"id": "350422c156b59de8323c468448a3008b29d169f5",
"size": "264",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "streams/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "84904"
},
{
"name": "Shell",
"bytes": "93"
}
],
"symlink_target": ""
} |
"""
Actions with musical files
"""
import click
import lyricstagger.log as log
import lyricstagger.misc as misc
def tag(logger: log.CliLogger, filepath: str, overwrite: bool = False) -> None:
"""Try to tag lyrics for given file"""
audio = misc.get_audio(filepath)
data = misc.get_tags(audio)
# we cannot find lyrics if we don't have tags
if data:
if overwrite or "lyrics" not in data:
lyrics = misc.fetch(data["artist"],
data["title"],
data["album"])
if lyrics:
logger.log_writing(filepath)
audio = misc.write_lyrics(audio, lyrics)
audio.save()
else:
logger.log_not_found(filepath)
def tag_force(logger: log.CliLogger, filepath: str) -> None:
"""Wrapper for tag action setting overwrite to true"""
tag(logger, filepath, overwrite=True)
def remove(logger: log.CliLogger, filepath: str) -> None:
"""Remove given file"""
audio = misc.get_audio(filepath)
logger.log_removing(filepath)
misc.remove_lyrics(audio)
audio.save()
def edit(logger: log.CliLogger, filepath: str) -> None:
"""Edit given file's lyrics with EDITOR"""
audio = misc.get_audio(filepath)
lyrics = misc.edit_lyrics(audio)
if lyrics:
logger.log_writing(filepath)
audio = misc.write_lyrics(audio, lyrics)
audio.save()
else:
logger.log_no_lyrics_saved(filepath)
def show(logger: log.CliLogger, filepath: str) -> None:
"""Pretty print lyrics from given file"""
audio = misc.get_audio(filepath)
data = misc.get_tags(audio)
if data and "lyrics" in data:
click.secho("%s" % click.format_filename(filepath), fg="blue")
click.secho("Artist: %s, Title: %s" % (data["artist"],
data["title"]),
fg="blue")
click.echo()
click.echo(data["lyrics"])
click.echo()
else:
logger.log_not_found(filepath)
click.secho("No lyrics in file '%s'" % filepath, fg="red")
def report(logger: log.CliLogger, filepath: str) -> None:
"""Show lyrics presence in given file"""
audio = misc.get_audio(filepath)
data = misc.get_tags(audio)
if data and 'lyrics' not in data:
logger.log_not_found(filepath)
click.secho("no lyrics: ", nl=False, fg="red")
else:
click.secho("lyrics found: ", nl=False, fg="green")
click.echo("%s" % click.format_filename(filepath))
| {
"content_hash": "f93ee243bcd4ea0606e562b05c52e691",
"timestamp": "",
"source": "github",
"line_count": 79,
"max_line_length": 79,
"avg_line_length": 32.46835443037975,
"alnum_prop": 0.5883040935672514,
"repo_name": "abulimov/lyricstagger",
"id": "17d7124935daec339a6e9572a08fbc6be6c19731",
"size": "2565",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lyricstagger/actions.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "60"
},
{
"name": "Python",
"bytes": "53380"
}
],
"symlink_target": ""
} |
import proto # type: ignore
__protobuf__ = proto.module(
package="google.ads.googleads.v12.enums",
marshal="google.ads.googleads.v12",
manifest={"NegativeGeoTargetTypeEnum",},
)
class NegativeGeoTargetTypeEnum(proto.Message):
r"""Container for enum describing possible negative geo target
types.
"""
class NegativeGeoTargetType(proto.Enum):
r"""The possible negative geo target types."""
UNSPECIFIED = 0
UNKNOWN = 1
PRESENCE_OR_INTEREST = 4
PRESENCE = 5
__all__ = tuple(sorted(__protobuf__.manifest))
| {
"content_hash": "d8ac7fa0ff9539aed16c91e632114ba3",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 66,
"avg_line_length": 23.12,
"alnum_prop": 0.657439446366782,
"repo_name": "googleads/google-ads-python",
"id": "e3aa0d55722902e248ad3f9f9d43341c172f857c",
"size": "1178",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "google/ads/googleads/v12/enums/types/negative_geo_target_type.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "23399881"
}
],
"symlink_target": ""
} |
from . import web
from flask import (render_template,
abort, request, redirect,
url_for, flash, session)
from aslo.persistence.activity import Activity
from aslo.service import activity as activity_service
from flask_babel import gettext
@web.route('/', defaults={'page': 1})
@web.route('/page/<int:page>')
def index(page=1, items_per_page=9):
# If Ignore_lang in the parameters, show all other non-translated apps
lang_code = session['lang_code']
ignore_lang = request.args.get('ignore_lang', False, type=bool)
if ignore_lang:
activities = activity_service.get_all(page=page)
else:
activities = activity_service.filter_by_lang_code(lang_code, page=page)
return render_template('index.html', activities=activities,
lang_code=lang_code, ignore_lang=ignore_lang)
@web.route('/<bundle_id>/<activity_version>', strict_slashes=False)
def activity_detail(bundle_id, activity_version):
lang_code = session['lang_code']
activity_version = float(activity_version)
activity = Activity.get_by_bundle_id(bundle_id)
if activity is None:
abort(404)
release = activity_service.find_release(activity, activity_version)
if release is None:
abort(404)
else:
return render_template('detail.html', activity=activity,
current_release=release, lang_code=lang_code)
@web.route('/search', methods=['GET', 'POST'])
@web.route('/search/page/<int:page>', methods=['GET', 'POST'])
def search(page=1, items_per_page=10):
if request.method == 'POST':
name = request.form['name']
else:
name = request.args.get('name')
if not name:
return redirect(url_for('web.index'))
lang_code = session['lang_code']
activities = activity_service.search_by_activity_name(
lang_code=lang_code, activity_name=name, page=page
)
flash(gettext("Search Results for {}").format(name), 'success')
return render_template('index.html', activities=activities,
search_query=name, lang_code=lang_code)
| {
"content_hash": "fa3095a538cca6c5d14e2960e3203a91",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 79,
"avg_line_length": 36.758620689655174,
"alnum_prop": 0.650093808630394,
"repo_name": "jatindhankhar/aslo-v3",
"id": "6ce29253b102e80c796b0f310e8230e83d5f675c",
"size": "2132",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "aslo/web/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "84716"
},
{
"name": "Dockerfile",
"bytes": "186"
},
{
"name": "HTML",
"bytes": "29940"
},
{
"name": "JavaScript",
"bytes": "198"
},
{
"name": "Python",
"bytes": "37199"
},
{
"name": "Shell",
"bytes": "37"
}
],
"symlink_target": ""
} |
import datetime
import time
import zlib
from nova import context
from nova.openstack.common import log as logging
from nova.openstack.common import timeutils
from nova.tests import fake_network
from nova.tests.integrated.api import client
from nova.tests.integrated import integrated_helpers
import nova.virt.fake
LOG = logging.getLogger(__name__)
class ServersTest(integrated_helpers._IntegratedTestBase):
def setUp(self):
super(ServersTest, self).setUp()
self.conductor = self.start_service(
'conductor', manager='nova.conductor.manager.ConductorManager')
def _wait_for_state_change(self, server, from_status):
for i in xrange(0, 50):
server = self.api.get_server(server['id'])
if server['status'] != from_status:
break
time.sleep(.1)
return server
def _restart_compute_service(self, *args, **kwargs):
"""restart compute service. NOTE: fake driver forgets all instances."""
self.compute.kill()
self.compute = self.start_service('compute', *args, **kwargs)
def test_get_servers(self):
# Simple check that listing servers works.
servers = self.api.get_servers()
for server in servers:
LOG.debug("server: %s" % server)
def test_create_server_with_error(self):
# Create a server which will enter error state.
fake_network.set_stub_network_methods(self.stubs)
def throw_error(*_):
raise Exception()
self.stubs.Set(nova.virt.fake.FakeDriver, 'spawn', throw_error)
server = self._build_minimal_create_server_request()
created_server = self.api.post_server({"server": server})
created_server_id = created_server['id']
found_server = self.api.get_server(created_server_id)
self.assertEqual(created_server_id, found_server['id'])
found_server = self._wait_for_state_change(found_server, 'BUILD')
self.assertEqual('ERROR', found_server['status'])
self._delete_server(created_server_id)
def test_create_and_delete_server(self):
# Creates and deletes a server.
fake_network.set_stub_network_methods(self.stubs)
# Create server
# Build the server data gradually, checking errors along the way
server = {}
good_server = self._build_minimal_create_server_request()
post = {'server': server}
# Without an imageRef, this throws 500.
# TODO(justinsb): Check whatever the spec says should be thrown here
self.assertRaises(client.OpenStackApiException,
self.api.post_server, post)
# With an invalid imageRef, this throws 500.
server['imageRef'] = self.get_invalid_image()
# TODO(justinsb): Check whatever the spec says should be thrown here
self.assertRaises(client.OpenStackApiException,
self.api.post_server, post)
# Add a valid imageRef
server['imageRef'] = good_server.get('imageRef')
# Without flavorRef, this throws 500
# TODO(justinsb): Check whatever the spec says should be thrown here
self.assertRaises(client.OpenStackApiException,
self.api.post_server, post)
server['flavorRef'] = good_server.get('flavorRef')
# Without a name, this throws 500
# TODO(justinsb): Check whatever the spec says should be thrown here
self.assertRaises(client.OpenStackApiException,
self.api.post_server, post)
# Set a valid server name
server['name'] = good_server['name']
created_server = self.api.post_server(post)
LOG.debug("created_server: %s" % created_server)
self.assertTrue(created_server['id'])
created_server_id = created_server['id']
# Check it's there
found_server = self.api.get_server(created_server_id)
self.assertEqual(created_server_id, found_server['id'])
# It should also be in the all-servers list
servers = self.api.get_servers()
server_ids = [s['id'] for s in servers]
self.assertTrue(created_server_id in server_ids)
found_server = self._wait_for_state_change(found_server, 'BUILD')
# It should be available...
# TODO(justinsb): Mock doesn't yet do this...
self.assertEqual('ACTIVE', found_server['status'])
servers = self.api.get_servers(detail=True)
for server in servers:
self.assertTrue("image" in server)
self.assertTrue("flavor" in server)
self._delete_server(created_server_id)
def _force_reclaim(self):
# Make sure that compute manager thinks the instance is
# old enough to be expired
the_past = timeutils.utcnow() + datetime.timedelta(hours=1)
timeutils.set_time_override(override_time=the_past)
ctxt = context.get_admin_context()
self.compute._reclaim_queued_deletes(ctxt)
def test_deferred_delete(self):
# Creates, deletes and waits for server to be reclaimed.
self.flags(reclaim_instance_interval=1)
fake_network.set_stub_network_methods(self.stubs)
# Create server
server = self._build_minimal_create_server_request()
created_server = self.api.post_server({'server': server})
LOG.debug("created_server: %s" % created_server)
self.assertTrue(created_server['id'])
created_server_id = created_server['id']
# Wait for it to finish being created
found_server = self._wait_for_state_change(created_server, 'BUILD')
# It should be available...
self.assertEqual('ACTIVE', found_server['status'])
# Cannot restore unless instance is deleted
self.assertRaises(client.OpenStackApiException,
self.api.post_server_action, created_server_id,
{'restore': {}})
# Cannot forceDelete unless instance is deleted
self.assertRaises(client.OpenStackApiException,
self.api.post_server_action, created_server_id,
{'forceDelete': {}})
# Delete the server
self.api.delete_server(created_server_id)
# Wait for queued deletion
found_server = self._wait_for_state_change(found_server, 'ACTIVE')
self.assertEqual('DELETED', found_server['status'])
self._force_reclaim()
# Wait for real deletion
self._wait_for_deletion(created_server_id)
def test_deferred_delete_restore(self):
# Creates, deletes and restores a server.
self.flags(reclaim_instance_interval=3600)
fake_network.set_stub_network_methods(self.stubs)
# Create server
server = self._build_minimal_create_server_request()
created_server = self.api.post_server({'server': server})
LOG.debug("created_server: %s" % created_server)
self.assertTrue(created_server['id'])
created_server_id = created_server['id']
# Wait for it to finish being created
found_server = self._wait_for_state_change(created_server, 'BUILD')
# It should be available...
self.assertEqual('ACTIVE', found_server['status'])
# Delete the server
self.api.delete_server(created_server_id)
# Wait for queued deletion
found_server = self._wait_for_state_change(found_server, 'ACTIVE')
self.assertEqual('DELETED', found_server['status'])
# Restore server
self.api.post_server_action(created_server_id, {'restore': {}})
# Wait for server to become active again
found_server = self._wait_for_state_change(found_server, 'DELETED')
self.assertEqual('ACTIVE', found_server['status'])
def test_deferred_delete_force(self):
# Creates, deletes and force deletes a server.
self.flags(reclaim_instance_interval=3600)
fake_network.set_stub_network_methods(self.stubs)
# Create server
server = self._build_minimal_create_server_request()
created_server = self.api.post_server({'server': server})
LOG.debug("created_server: %s" % created_server)
self.assertTrue(created_server['id'])
created_server_id = created_server['id']
# Wait for it to finish being created
found_server = self._wait_for_state_change(created_server, 'BUILD')
# It should be available...
self.assertEqual('ACTIVE', found_server['status'])
# Delete the server
self.api.delete_server(created_server_id)
# Wait for queued deletion
found_server = self._wait_for_state_change(found_server, 'ACTIVE')
self.assertEqual('DELETED', found_server['status'])
# Force delete server
self.api.post_server_action(created_server_id, {'forceDelete': {}})
# Wait for real deletion
self._wait_for_deletion(created_server_id)
def _wait_for_deletion(self, server_id):
# Wait (briefly) for deletion
for _retries in range(50):
try:
found_server = self.api.get_server(server_id)
except client.OpenStackApiNotFoundException:
found_server = None
LOG.debug("Got 404, proceeding")
break
LOG.debug("Found_server=%s" % found_server)
# TODO(justinsb): Mock doesn't yet do accurate state changes
#if found_server['status'] != 'deleting':
# break
time.sleep(.1)
# Should be gone
self.assertFalse(found_server)
def _delete_server(self, server_id):
# Delete the server
self.api.delete_server(server_id)
self._wait_for_deletion(server_id)
def test_create_server_with_metadata(self):
# Creates a server with metadata.
fake_network.set_stub_network_methods(self.stubs)
# Build the server data gradually, checking errors along the way
server = self._build_minimal_create_server_request()
metadata = {}
for i in range(30):
metadata['key_%s' % i] = 'value_%s' % i
server['metadata'] = metadata
post = {'server': server}
created_server = self.api.post_server(post)
LOG.debug("created_server: %s" % created_server)
self.assertTrue(created_server['id'])
created_server_id = created_server['id']
found_server = self.api.get_server(created_server_id)
self.assertEqual(created_server_id, found_server['id'])
self.assertEqual(metadata, found_server.get('metadata'))
# The server should also be in the all-servers details list
servers = self.api.get_servers(detail=True)
server_map = dict((server['id'], server) for server in servers)
found_server = server_map.get(created_server_id)
self.assertTrue(found_server)
# Details do include metadata
self.assertEqual(metadata, found_server.get('metadata'))
# The server should also be in the all-servers summary list
servers = self.api.get_servers(detail=False)
server_map = dict((server['id'], server) for server in servers)
found_server = server_map.get(created_server_id)
self.assertTrue(found_server)
# Summary should not include metadata
self.assertFalse(found_server.get('metadata'))
# Cleanup
self._delete_server(created_server_id)
def test_create_and_rebuild_server(self):
# Rebuild a server with metadata.
fake_network.set_stub_network_methods(self.stubs)
# create a server with initially has no metadata
server = self._build_minimal_create_server_request()
server_post = {'server': server}
metadata = {}
for i in range(30):
metadata['key_%s' % i] = 'value_%s' % i
server_post['server']['metadata'] = metadata
created_server = self.api.post_server(server_post)
LOG.debug("created_server: %s" % created_server)
self.assertTrue(created_server['id'])
created_server_id = created_server['id']
created_server = self._wait_for_state_change(created_server, 'BUILD')
# rebuild the server with metadata and other server attributes
post = {}
post['rebuild'] = {
"imageRef": "76fa36fc-c930-4bf3-8c8a-ea2a2420deb6",
"name": "blah",
"accessIPv4": "172.19.0.2",
"accessIPv6": "fe80::2",
"metadata": {'some': 'thing'},
}
self.api.post_server_action(created_server_id, post)
LOG.debug("rebuilt server: %s" % created_server)
self.assertTrue(created_server['id'])
found_server = self.api.get_server(created_server_id)
self.assertEqual(created_server_id, found_server['id'])
self.assertEqual({'some': 'thing'}, found_server.get('metadata'))
self.assertEqual('blah', found_server.get('name'))
self.assertEqual(post['rebuild']['imageRef'],
found_server.get('image')['id'])
self.assertEqual('172.19.0.2', found_server['accessIPv4'])
self.assertEqual('fe80::2', found_server['accessIPv6'])
# rebuild the server with empty metadata and nothing else
post = {}
post['rebuild'] = {
"imageRef": "76fa36fc-c930-4bf3-8c8a-ea2a2420deb6",
"metadata": {},
}
self.api.post_server_action(created_server_id, post)
LOG.debug("rebuilt server: %s" % created_server)
self.assertTrue(created_server['id'])
found_server = self.api.get_server(created_server_id)
self.assertEqual(created_server_id, found_server['id'])
self.assertEqual({}, found_server.get('metadata'))
self.assertEqual('blah', found_server.get('name'))
self.assertEqual(post['rebuild']['imageRef'],
found_server.get('image')['id'])
self.assertEqual('172.19.0.2', found_server['accessIPv4'])
self.assertEqual('fe80::2', found_server['accessIPv6'])
# Cleanup
self._delete_server(created_server_id)
def test_rename_server(self):
# Test building and renaming a server.
fake_network.set_stub_network_methods(self.stubs)
# Create a server
server = self._build_minimal_create_server_request()
created_server = self.api.post_server({'server': server})
LOG.debug("created_server: %s" % created_server)
server_id = created_server['id']
self.assertTrue(server_id)
# Rename the server to 'new-name'
self.api.put_server(server_id, {'server': {'name': 'new-name'}})
# Check the name of the server
created_server = self.api.get_server(server_id)
self.assertEqual(created_server['name'], 'new-name')
# Cleanup
self._delete_server(server_id)
def test_create_multiple_servers(self):
# Creates multiple servers and checks for reservation_id.
# Create 2 servers, setting 'return_reservation_id, which should
# return a reservation_id
server = self._build_minimal_create_server_request()
server['min_count'] = 2
server['return_reservation_id'] = True
post = {'server': server}
response = self.api.post_server(post)
self.assertIn('reservation_id', response)
reservation_id = response['reservation_id']
self.assertNotIn(reservation_id, ['', None])
# Create 1 more server, which should not return a reservation_id
server = self._build_minimal_create_server_request()
post = {'server': server}
created_server = self.api.post_server(post)
self.assertTrue(created_server['id'])
created_server_id = created_server['id']
# lookup servers created by the first request.
servers = self.api.get_servers(detail=True,
search_opts={'reservation_id': reservation_id})
server_map = dict((server['id'], server) for server in servers)
found_server = server_map.get(created_server_id)
# The server from the 2nd request should not be there.
self.assertEqual(found_server, None)
# Should have found 2 servers.
self.assertEqual(len(server_map), 2)
# Cleanup
self._delete_server(created_server_id)
for server_id in server_map.iterkeys():
self._delete_server(server_id)
def test_create_server_with_injected_files(self):
# Creates a server with injected_files.
fake_network.set_stub_network_methods(self.stubs)
personality = []
# Inject a text file
data = 'Hello, World!'
personality.append({
'path': '/helloworld.txt',
'contents': data.encode('base64'),
})
# Inject a binary file
data = zlib.compress('Hello, World!')
personality.append({
'path': '/helloworld.zip',
'contents': data.encode('base64'),
})
# Create server
server = self._build_minimal_create_server_request()
server['personality'] = personality
post = {'server': server}
created_server = self.api.post_server(post)
LOG.debug("created_server: %s" % created_server)
self.assertTrue(created_server['id'])
created_server_id = created_server['id']
# Check it's there
found_server = self.api.get_server(created_server_id)
self.assertEqual(created_server_id, found_server['id'])
found_server = self._wait_for_state_change(found_server, 'BUILD')
self.assertEqual('ACTIVE', found_server['status'])
# Cleanup
self._delete_server(created_server_id)
| {
"content_hash": "0af78aaaf82ea1698ae2c4a7cb21f977",
"timestamp": "",
"source": "github",
"line_count": 472,
"max_line_length": 79,
"avg_line_length": 37.82203389830509,
"alnum_prop": 0.6185861528120099,
"repo_name": "plumgrid/plumgrid-nova",
"id": "ff20eccc1c72820c40e06d77bfa00fd51b374de9",
"size": "18533",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nova/tests/integrated/test_servers.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "11944269"
},
{
"name": "Shell",
"bytes": "17148"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import, unicode_literals
from wechatpy.client.api.base import BaseWeChatAPI
class MerchantStock(BaseWeChatAPI):
def add(self, product_id, quantity, sku_info=''):
return self._post(
'merchant/stock/add',
data={
'product_id': product_id,
'quantity': quantity,
'sku_info': sku_info
}
)
def reduce(self, product_id, quantity, sku_info=''):
return self._post(
'merchant/stock/reduce',
data={
'product_id': product_id,
'quantity': quantity,
'sku_info': sku_info
}
)
| {
"content_hash": "a429d88e7cb8ec01c826736b5ab7d38e",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 56,
"avg_line_length": 28.16,
"alnum_prop": 0.5085227272727273,
"repo_name": "mruse/wechatpy",
"id": "bfe09de6cb48201c65422eb10319d9faf0d8ed95",
"size": "728",
"binary": false,
"copies": "8",
"ref": "refs/heads/master",
"path": "wechatpy/client/api/merchant/stock.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "344983"
},
{
"name": "Shell",
"bytes": "70"
}
],
"symlink_target": ""
} |
from __future__ import annotations
import contextlib
import copy
import weakref
from collections.abc import Callable, Iterable, Sequence
from dataclasses import dataclass
import os
import pprint
import textwrap
from typing import Any, Optional, TextIO, Union
import ruamel.yaml
from mitmproxy import exceptions
from mitmproxy.utils import signals, typecheck
"""
The base implementation for Options.
"""
unset = object()
class _Option:
__slots__ = ("name", "typespec", "value", "_default", "choices", "help")
def __init__(
self,
name: str,
typespec: Union[type, object], # object for Optional[x], which is not a type.
default: Any,
help: str,
choices: Optional[Sequence[str]],
) -> None:
typecheck.check_option_type(name, default, typespec)
self.name = name
self.typespec = typespec
self._default = default
self.value = unset
self.help = textwrap.dedent(help).strip().replace("\n", " ")
self.choices = choices
def __repr__(self):
return f"{self.current()} [{self.typespec}]"
@property
def default(self):
return copy.deepcopy(self._default)
def current(self) -> Any:
if self.value is unset:
v = self.default
else:
v = self.value
return copy.deepcopy(v)
def set(self, value: Any) -> None:
typecheck.check_option_type(self.name, value, self.typespec)
self.value = value
def reset(self) -> None:
self.value = unset
def has_changed(self) -> bool:
return self.current() != self.default
def __eq__(self, other) -> bool:
for i in self.__slots__:
if getattr(self, i) != getattr(other, i):
return False
return True
def __deepcopy__(self, _):
o = _Option(self.name, self.typespec, self.default, self.help, self.choices)
if self.has_changed():
o.value = self.current()
return o
@dataclass
class _UnconvertedStrings:
val: list[str]
def _sig_changed_spec(updated: set[str]) -> None: # pragma: no cover
... # expected function signature for OptManager.changed receivers.
def _sig_errored_spec(exc: Exception) -> None: # pragma: no cover
... # expected function signature for OptManager.errored receivers.
class OptManager:
"""
OptManager is the base class from which Options objects are derived.
.changed is a Signal that triggers whenever options are
updated. If any handler in the chain raises an exceptions.OptionsError
exception, all changes are rolled back, the exception is suppressed,
and the .errored signal is notified.
Optmanager always returns a deep copy of options to ensure that
mutation doesn't change the option state inadvertently.
"""
def __init__(self):
self.deferred: dict[str, Any] = {}
self.changed = signals.SyncSignal(_sig_changed_spec)
self.changed.connect(self._notify_subscribers)
self.errored = signals.SyncSignal(_sig_errored_spec)
self._subscriptions: list[tuple[weakref.ref[Callable], set[str]]] = []
# Options must be the last attribute here - after that, we raise an
# error for attribute assignment to unknown options.
self._options: dict[str, Any] = {}
def add_option(
self,
name: str,
typespec: Union[type, object],
default: Any,
help: str,
choices: Optional[Sequence[str]] = None,
) -> None:
self._options[name] = _Option(name, typespec, default, help, choices)
self.changed.send(updated={name})
@contextlib.contextmanager
def rollback(self, updated, reraise=False):
old = copy.deepcopy(self._options)
try:
yield
except exceptions.OptionsError as e:
# Notify error handlers
self.errored.send(exc=e)
# Rollback
self.__dict__["_options"] = old
self.changed.send(updated=updated)
if reraise:
raise e
def subscribe(self, func, opts):
"""
Subscribe a callable to the .changed signal, but only for a
specified list of options. The callable should accept arguments
(options, updated), and may raise an OptionsError.
The event will automatically be unsubscribed if the callable goes out of scope.
"""
for i in opts:
if i not in self._options:
raise exceptions.OptionsError("No such option: %s" % i)
self._subscriptions.append(
(signals.make_weak_ref(func), set(opts))
)
def _notify_subscribers(self, updated) -> None:
cleanup = False
for (ref, opts) in self._subscriptions:
callback = ref()
if callback is not None:
if opts & updated:
callback(self, updated)
else:
cleanup = True
if cleanup:
self.__dict__["_subscriptions"] = [
(ref, opts) for (ref, opts) in self._subscriptions if ref() is not None
]
def __eq__(self, other):
if isinstance(other, OptManager):
return self._options == other._options
return False
def __deepcopy__(self, memodict=None):
o = OptManager()
o.__dict__["_options"] = copy.deepcopy(self._options, memodict)
return o
__copy__ = __deepcopy__
def __getattr__(self, attr):
if attr in self._options:
return self._options[attr].current()
else:
raise AttributeError("No such option: %s" % attr)
def __setattr__(self, attr, value):
# This is slightly tricky. We allow attributes to be set on the instance
# until we have an _options attribute. After that, assignment is sent to
# the update function, and will raise an error for unknown options.
opts = self.__dict__.get("_options")
if not opts:
super().__setattr__(attr, value)
else:
self.update(**{attr: value})
def keys(self):
return set(self._options.keys())
def items(self):
return self._options.items()
def __contains__(self, k):
return k in self._options
def reset(self):
"""
Restore defaults for all options.
"""
for o in self._options.values():
o.reset()
self.changed.send(updated=set(self._options.keys()))
def update_known(self, **kwargs):
"""
Update and set all known options from kwargs. Returns a dictionary
of unknown options.
"""
known, unknown = {}, {}
for k, v in kwargs.items():
if k in self._options:
known[k] = v
else:
unknown[k] = v
updated = set(known.keys())
if updated:
with self.rollback(updated, reraise=True):
for k, v in known.items():
self._options[k].set(v)
self.changed.send(updated=updated)
return unknown
def update_defer(self, **kwargs):
unknown = self.update_known(**kwargs)
self.deferred.update(unknown)
def update(self, **kwargs):
u = self.update_known(**kwargs)
if u:
raise KeyError("Unknown options: %s" % ", ".join(u.keys()))
def setter(self, attr):
"""
Generate a setter for a given attribute. This returns a callable
taking a single argument.
"""
if attr not in self._options:
raise KeyError("No such option: %s" % attr)
def setter(x):
setattr(self, attr, x)
return setter
def toggler(self, attr):
"""
Generate a toggler for a boolean attribute. This returns a callable
that takes no arguments.
"""
if attr not in self._options:
raise KeyError("No such option: %s" % attr)
o = self._options[attr]
if o.typespec != bool:
raise ValueError("Toggler can only be used with boolean options")
def toggle():
setattr(self, attr, not getattr(self, attr))
return toggle
def default(self, option: str) -> Any:
return self._options[option].default
def has_changed(self, option):
"""
Has the option changed from the default?
"""
return self._options[option].has_changed()
def merge(self, opts):
"""
Merge a dict of options into this object. Options that have None
value are ignored. Lists and tuples are appended to the current
option value.
"""
toset = {}
for k, v in opts.items():
if v is not None:
if isinstance(v, (list, tuple)):
toset[k] = getattr(self, k) + v
else:
toset[k] = v
self.update(**toset)
def __repr__(self):
options = pprint.pformat(self._options, indent=4).strip(" {}")
if "\n" in options:
options = "\n " + options + "\n"
return "{mod}.{cls}({{{options}}})".format(
mod=type(self).__module__, cls=type(self).__name__, options=options
)
def set(self, *specs: str, defer: bool = False) -> None:
"""
Takes a list of set specification in standard form (option=value).
Options that are known are updated immediately. If defer is true,
options that are not known are deferred, and will be set once they
are added.
May raise an `OptionsError` if a value is malformed or an option is unknown and defer is False.
"""
# First, group specs by option name.
unprocessed: dict[str, list[str]] = {}
for spec in specs:
if "=" in spec:
name, value = spec.split("=", maxsplit=1)
unprocessed.setdefault(name, []).append(value)
else:
unprocessed.setdefault(spec, [])
# Second, convert values to the correct type.
processed: dict[str, Any] = {}
for name in list(unprocessed.keys()):
if name in self._options:
processed[name] = self._parse_setval(
self._options[name], unprocessed.pop(name)
)
# Third, stash away unrecognized options or complain about them.
if defer:
self.deferred.update(
{k: _UnconvertedStrings(v) for k, v in unprocessed.items()}
)
elif unprocessed:
raise exceptions.OptionsError(
f"Unknown option(s): {', '.join(unprocessed)}"
)
# Finally, apply updated options.
self.update(**processed)
def process_deferred(self) -> None:
"""
Processes options that were deferred in previous calls to set, and
have since been added.
"""
update: dict[str, Any] = {}
for optname, value in self.deferred.items():
if optname in self._options:
if isinstance(value, _UnconvertedStrings):
value = self._parse_setval(self._options[optname], value.val)
update[optname] = value
self.update(**update)
for k in update.keys():
del self.deferred[k]
def _parse_setval(self, o: _Option, values: list[str]) -> Any:
"""
Convert a string to a value appropriate for the option type.
"""
if o.typespec == Sequence[str]:
return values
if len(values) > 1:
raise exceptions.OptionsError(
f"Received multiple values for {o.name}: {values}"
)
optstr: Optional[str]
if values:
optstr = values[0]
else:
optstr = None
if o.typespec in (str, Optional[str]):
if o.typespec == str and optstr is None:
raise exceptions.OptionsError(f"Option is required: {o.name}")
return optstr
elif o.typespec in (int, Optional[int]):
if optstr:
try:
return int(optstr)
except ValueError:
raise exceptions.OptionsError(f"Not an integer: {optstr}")
elif o.typespec == int:
raise exceptions.OptionsError(f"Option is required: {o.name}")
else:
return None
elif o.typespec == bool:
if optstr == "toggle":
return not o.current()
if not optstr or optstr == "true":
return True
elif optstr == "false":
return False
else:
raise exceptions.OptionsError(
'Boolean must be "true", "false", or have the value omitted (a synonym for "true").'
)
raise NotImplementedError(f"Unsupported option type: {o.typespec}")
def make_parser(self, parser, optname, metavar=None, short=None):
"""
Auto-Create a command-line parser entry for a named option. If the
option does not exist, it is ignored.
"""
if optname not in self._options:
return
o = self._options[optname]
def mkf(l, s):
l = l.replace("_", "-")
f = ["--%s" % l]
if s:
f.append("-" + s)
return f
flags = mkf(optname, short)
if o.typespec == bool:
g = parser.add_mutually_exclusive_group(required=False)
onf = mkf(optname, None)
offf = mkf("no-" + optname, None)
# The short option for a bool goes to whatever is NOT the default
if short:
if o.default:
offf = mkf("no-" + optname, short)
else:
onf = mkf(optname, short)
g.add_argument(
*offf,
action="store_false",
dest=optname,
)
g.add_argument(*onf, action="store_true", dest=optname, help=o.help)
parser.set_defaults(**{optname: None})
elif o.typespec in (int, Optional[int]):
parser.add_argument(
*flags,
action="store",
type=int,
dest=optname,
help=o.help,
metavar=metavar,
)
elif o.typespec in (str, Optional[str]):
parser.add_argument(
*flags,
action="store",
type=str,
dest=optname,
help=o.help,
metavar=metavar,
choices=o.choices,
)
elif o.typespec == Sequence[str]:
parser.add_argument(
*flags,
action="append",
type=str,
dest=optname,
help=o.help + " May be passed multiple times.",
metavar=metavar,
choices=o.choices,
)
else:
raise ValueError("Unsupported option type: %s", o.typespec)
def dump_defaults(opts, out: TextIO):
"""
Dumps an annotated file with all options.
"""
# Sort data
s = ruamel.yaml.comments.CommentedMap()
for k in sorted(opts.keys()):
o = opts._options[k]
s[k] = o.default
txt = o.help.strip()
if o.choices:
txt += " Valid values are %s." % ", ".join(repr(c) for c in o.choices)
else:
t = typecheck.typespec_to_str(o.typespec)
txt += " Type %s." % t
txt = "\n".join(textwrap.wrap(txt))
s.yaml_set_comment_before_after_key(k, before="\n" + txt)
return ruamel.yaml.YAML().dump(s, out)
def dump_dicts(opts, keys: Iterable[str] | None = None) -> dict:
"""
Dumps the options into a list of dict object.
Return: A list like: { "anticache": { type: "bool", default: false, value: true, help: "help text"} }
"""
options_dict = {}
if keys is None:
keys = opts.keys()
for k in sorted(keys):
o = opts._options[k]
t = typecheck.typespec_to_str(o.typespec)
option = {
"type": t,
"default": o.default,
"value": o.current(),
"help": o.help,
"choices": o.choices,
}
options_dict[k] = option
return options_dict
def parse(text):
if not text:
return {}
try:
yaml = ruamel.yaml.YAML(typ="unsafe", pure=True)
data = yaml.load(text)
except ruamel.yaml.error.YAMLError as v:
if hasattr(v, "problem_mark"):
snip = v.problem_mark.get_snippet()
raise exceptions.OptionsError(
"Config error at line %s:\n%s\n%s"
% (v.problem_mark.line + 1, snip, v.problem)
)
else:
raise exceptions.OptionsError("Could not parse options.")
if isinstance(data, str):
raise exceptions.OptionsError("Config error - no keys found.")
elif data is None:
return {}
return data
def load(opts: OptManager, text: str) -> None:
"""
Load configuration from text, over-writing options already set in
this object. May raise OptionsError if the config file is invalid.
"""
data = parse(text)
opts.update_defer(**data)
def load_paths(opts: OptManager, *paths: str) -> None:
"""
Load paths in order. Each path takes precedence over the previous
path. Paths that don't exist are ignored, errors raise an
OptionsError.
"""
for p in paths:
p = os.path.expanduser(p)
if os.path.exists(p) and os.path.isfile(p):
with open(p, encoding="utf8") as f:
try:
txt = f.read()
except UnicodeDecodeError as e:
raise exceptions.OptionsError(f"Error reading {p}: {e}")
try:
load(opts, txt)
except exceptions.OptionsError as e:
raise exceptions.OptionsError(f"Error reading {p}: {e}")
def serialize(
opts: OptManager, file: TextIO, text: str, defaults: bool = False
) -> None:
"""
Performs a round-trip serialization. If text is not None, it is
treated as a previous serialization that should be modified
in-place.
- If "defaults" is False, only options with non-default values are
serialized. Default values in text are preserved.
- Unknown options in text are removed.
- Raises OptionsError if text is invalid.
"""
data = parse(text)
for k in opts.keys():
if defaults or opts.has_changed(k):
data[k] = getattr(opts, k)
for k in list(data.keys()):
if k not in opts._options:
del data[k]
ruamel.yaml.YAML().dump(data, file)
def save(opts: OptManager, path: str, defaults: bool = False) -> None:
"""
Save to path. If the destination file exists, modify it in-place.
Raises OptionsError if the existing data is corrupt.
"""
path = os.path.expanduser(path)
if os.path.exists(path) and os.path.isfile(path):
with open(path, encoding="utf8") as f:
try:
data = f.read()
except UnicodeDecodeError as e:
raise exceptions.OptionsError(f"Error trying to modify {path}: {e}")
else:
data = ""
with open(path, "wt", encoding="utf8") as f:
serialize(opts, f, data, defaults)
| {
"content_hash": "abaa9be1f563fe15c85089bfb3ab6a7e",
"timestamp": "",
"source": "github",
"line_count": 610,
"max_line_length": 105,
"avg_line_length": 32.19016393442623,
"alnum_prop": 0.5495518435526584,
"repo_name": "mhils/mitmproxy",
"id": "033819be7c4608eb8017d37862060401cbe5dfff",
"size": "19636",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "mitmproxy/optmanager.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "3618"
},
{
"name": "Dockerfile",
"bytes": "618"
},
{
"name": "HTML",
"bytes": "10672"
},
{
"name": "JavaScript",
"bytes": "134086"
},
{
"name": "Kaitai Struct",
"bytes": "3670"
},
{
"name": "Less",
"bytes": "21203"
},
{
"name": "PowerShell",
"bytes": "258"
},
{
"name": "Python",
"bytes": "2367991"
},
{
"name": "Shell",
"bytes": "3055"
},
{
"name": "TypeScript",
"bytes": "279053"
}
],
"symlink_target": ""
} |
from pprint import pprint
from datetime import datetime, date
from itertools import product
from django import forms
from django.db.models import Exists, OuterRef
from django.contrib.auth.mixins import LoginRequiredMixin
from fo2.connections import db_cursor_so
from base.views import O2BaseGetPostView
from geral.functions import has_permission
from utils.functions import dec_month, dec_months, safe_cast
import produto.queries
import lotes.views
from produto.forms import ModeloBugForm
import comercial.models as models
import comercial.queries as queries
class AnaliseModeloOld(LoginRequiredMixin, O2BaseGetPostView):
def __init__(self, *args, **kwargs):
super(AnaliseModeloOld, self).__init__(*args, **kwargs)
# self.Form_class = ModeloForm2
self.Form_class = ModeloBugForm
self.template_name = 'comercial/analise_modelo.html'
self.title_name = 'Define meta de estoque'
self.get_args = ['modelo', 'rotina_calculo']
def mount_context_modelo(self, modelo, zerados):
self.context.update({
'modelo': modelo,
})
refs = produto.queries.modelo_inform(self.cursor, modelo)
if len(refs) == 0:
self.context.update({
'msg_erro': 'Modelo não encontrado',
})
return
# referencias automaticamente consideradas
data_ref = queries.pa_de_modelo(self.cursor, modelo)
if len(data_ref) > 0:
self.context['referencias'] = {
'headers': ['Referência'],
'fields': ['ref'],
'data': data_ref,
}
# referências a incluir
ref_incl = models.MetaModeloReferencia.objects.filter(
modelo=modelo,
# incl_excl='i',
).values('referencia', 'multiplicador')
if len(ref_incl) == 0:
refs_incl = None
mult_incl = None
else:
self.context['adicionadas'] = {
'headers': ['Referência', 'Multiplicador'],
'fields': ['referencia', 'multiplicador'],
'data': ref_incl,
}
refs_incl = tuple([r['referencia'] for r in ref_incl])
mult_incl = tuple([r['multiplicador'] for r in ref_incl])
# vendas do modelo
data = []
zero_data_row = {p['range']: 0 for p in self.periodos}
zero_data_row['qtd'] = 0
for periodo in self.periodos:
data_periodo = queries.get_vendas(
self.cursor, ref=None, periodo=periodo['range'],
colecao=None, cliente=None, por='modelo', modelo=modelo,
zerados=zerados
) # refs_incl=refs_incl, mult_incl=mult_incl)
if len(data_periodo) == 0:
data_periodo = [{'modelo': modelo, 'qtd': 0}]
for row in data_periodo:
data_row = next(
(dr for dr in data if dr['modelo'] == row['modelo']),
False)
if not data_row:
data_row = {
'modelo': row['modelo'],
**zero_data_row
}
data.append(data_row)
data_row[periodo['range']] = round(
row['qtd'] / periodo['meses'])
data_row['qtd'] += round(
row['qtd'] * periodo['peso'] / self.tot_peso)
self.context['modelo_ponderado'] = {
'headers': ['Modelo', 'Venda ponderada',
*['{} (P:{})'.format(
p['descr'], p['peso']
) for p in self.periodos]],
'fields': ['modelo', 'qtd',
*[p['range'] for p in self.periodos]],
'data': data,
'style': self.style_pond_meses,
}
venda_ponderada = data[0]['qtd']
# vendas por tamanho
data = []
zero_data_row = {p['range']: 0 for p in self.periodos}
zero_data_row['qtd'] = 0
zero_data_row['grade'] = 0
total_qtd = 0
for periodo in self.periodos:
if venda_ponderada == 0:
data_tam = queries.get_modelo_dims(
self.cursor,
modelo=modelo,
get='tam',
)
data_periodo = []
for row_tam in data_tam:
data_periodo.append(
{'tam': row_tam['TAM'], 'qtd': 0}
)
else:
data_periodo = queries.get_vendas(
self.cursor, ref=None, periodo=periodo['range'],
colecao=None, cliente=None, por='tam', modelo=modelo,
order_qtd=False, zerados=zerados
) # , refs_incl=refs_incl, mult_incl=mult_incl)
for row in data_periodo:
data_row = next(
(dr for dr in data if dr['tam'] == row['tam']),
False)
if not data_row:
data_row = {
'tam': row['tam'],
**zero_data_row
}
data.append(data_row)
data_row[periodo['range']] = round(
row['qtd'] / periodo['meses'])
qtd = round(row['qtd'] * periodo['peso'] / self.tot_peso)
data_row['qtd'] += qtd
total_qtd += qtd
if len(data) == 1 or total_qtd == 0:
if total_qtd == 0:
for row in data:
row['grade'] = 1
self.context['tamanho_ponderado'] = {
'headers': ['Tamanho', 'Venda ponderada',
*['{} (P:{})'.format(
p['descr'], p['peso']
) for p in self.periodos]],
'fields': ['tam', 'qtd',
*[p['range'] for p in self.periodos]],
'data': data,
'style': self.style_pond_meses,
}
else:
qtds = [row['qtd'] for row in data]
def grade_minima(qtds, max_erro):
total = sum(qtds)
max_value = 1
while max_value <= 9:
grades = product(
range(max_value+1), repeat=len(qtds))
best = {'grade': [], 'erro': 1}
for grade in grades:
if max(grade) < max_value:
continue
tot_grade = sum(grade)
diff = 0
for i in range(len(qtds)):
qtd_grade = total / tot_grade * grade[i]
diff += abs(qtd_grade - qtds[i])
if best['erro'] > (diff / total):
best['erro'] = diff / total
best['grade'] = grade
if best['erro'] <= max_erro:
break
max_value += 1
return best['grade'], best['erro']
grade_tam, grade_erro = grade_minima(qtds, 0.05)
for i in range(len(data)):
if grade_tam is None:
data[i]['grade'] = 0
else:
data[i]['grade'] = grade_tam[i]
self.context['tamanho_ponderado'] = {
'headers': ['Tamanho',
'Grade (E:{:.0f}%)'.format(grade_erro * 100),
'Venda ponderada',
*['{} (P:{})'.format(
p['descr'], p['peso']
) for p in self.periodos]],
'fields': ['tam', 'grade', 'qtd',
*[p['range'] for p in self.periodos]],
'data': data,
'style': {
** self.style_pond_meses,
len(self.periodos)+3: 'text-align: right;',
}
}
# vendas por cor
data = []
zero_data_row = {p['range']: 0 for p in self.periodos}
zero_data_row['qtd'] = 0
zero_data_row['distr'] = 0
total_qtd = 0
for periodo in self.periodos:
if venda_ponderada == 0:
data_tam = queries.get_modelo_dims(
self.cursor,
modelo=modelo,
get='cor',
)
data_periodo = []
for row_tam in data_tam:
data_periodo.append(
{'cor': row_tam['COR'], 'qtd': 0}
)
else:
data_periodo = queries.get_vendas(
self.cursor, ref=None, periodo=periodo['range'],
colecao=None, cliente=None, por='cor', modelo=modelo,
zerados=zerados
) # refs_incl=refs_incl, mult_incl=mult_incl)
for row in data_periodo:
data_row = next(
(dr for dr in data if dr['cor'] == row['cor']),
False)
if not data_row:
data_row = {
'cor': row['cor'],
**zero_data_row
}
data.append(data_row)
data_row[periodo['range']] = round(
row['qtd'] / periodo['meses'])
qtd = round(row['qtd'] * periodo['peso'] / self.tot_peso)
data_row['qtd'] += qtd
total_qtd += qtd
if len(data) == 1 or total_qtd == 0:
if total_qtd == 0:
for row in data:
row['distr'] = 1
self.context['cor_ponderada'] = {
'headers': ['Cor', 'Venda ponderada',
*['{} (P:{})'.format(
p['descr'], p['peso']
) for p in self.periodos]],
'fields': ['cor', 'qtd',
*[p['range'] for p in self.periodos]],
'data': data,
'style': self.style_pond_meses,
}
else:
tot_distr = 0
max_distr_row = {'distr': 0}
for row in data:
row['distr'] = round(row['qtd'] / total_qtd * 100)
if max_distr_row['distr'] < row['distr']:
max_distr_row = row
tot_distr += row['distr']
if tot_distr < 100:
max_distr_row['distr'] += (100 - tot_distr)
self.context['cor_ponderada'] = {
'headers': ['Cor', 'Distribuição', 'Venda ponderada',
*['{} (P:{})'.format(
p['descr'], p['peso']
) for p in self.periodos]],
'fields': ['cor', 'distr', 'qtd',
*[p['range'] for p in self.periodos]],
'data': data,
'style': {
** self.style_pond_meses,
len(self.periodos)+3: 'text-align: right;',
}
}
# vendas por referência
data = []
zero_data_row = {p['range']: 0 for p in self.periodos}
zero_data_row['qtd'] = 0
for periodo in self.periodos:
if venda_ponderada == 0:
data_tam = queries.get_modelo_dims(
self.cursor,
modelo=modelo,
get='ref',
)
data_periodo = []
for row_tam in data_tam:
data_periodo.append(
{'ref': row_tam['REF'], 'qtd': 0}
)
else:
data_periodo = queries.get_vendas(
self.cursor, ref=None, periodo=periodo['range'],
colecao=None, cliente=None, por='ref', modelo=modelo,
zerados=zerados
) # refs_incl=refs_incl, mult_incl=mult_incl)
for row in data_periodo:
data_row = next(
(dr for dr in data if dr['ref'] == row['ref']),
False)
if not data_row:
data_row = {
'ref': row['ref'],
**zero_data_row
}
data.append(data_row)
data_row[periodo['range']] = round(
row['qtd'] / periodo['meses'])
data_row['qtd'] += round(
row['qtd'] * periodo['peso'] / self.tot_peso)
self.context['por_ref'] = {
'headers': ['Referência', 'Venda ponderada',
*['{} (P:{})'.format(
p['descr'], p['peso']
) for p in self.periodos]],
'fields': ['ref', 'qtd',
*[p['range'] for p in self.periodos]],
'data': data,
'style': self.style_pond_meses,
}
# última meta
meta = models.MetaEstoque.objects.filter(modelo=modelo)
meta = meta.annotate(antiga=Exists(
models.MetaEstoque.objects.filter(
modelo=OuterRef('modelo'),
data__gt=OuterRef('data')
)
))
meta = meta.filter(antiga=False)
meta_list = list(meta.values())
tem_meta = len(meta_list) == 1
if tem_meta:
meta_list = meta_list[0]
meta_tamanhos = models.MetaEstoqueTamanho.objects.filter(meta=meta)
meta_grade_tamanhos = {}
for tamanho in meta_tamanhos:
meta_grade_tamanhos[
'tam_{}'.format(tamanho.tamanho)] = tamanho.quantidade
meta_cores = models.MetaEstoqueCor.objects.filter(meta=meta)
meta_grade_cores = {}
for cor in meta_cores:
meta_grade_cores['cor_{}'.format(cor.cor)] = cor.quantidade
self.context.update({
'meta_venda_mensal': meta_list['venda_mensal'],
'meta_multiplicador': meta_list['multiplicador'],
'meta_meta_estoque': meta_list['meta_estoque'],
'meta_grade_tamanhos': meta_grade_tamanhos,
'meta_grade_cores': meta_grade_cores,
})
# Form
self.context.update({
'pode_gravar': has_permission(
self.request, 'comercial.can_define_goal'),
})
venda_mensal = self.context['modelo_ponderado']['data'][0]['qtd']
multiplicador = 2
meta_form = forms.Form()
meta_form.fields['modelo'] = forms.CharField(
initial=modelo, widget=forms.HiddenInput())
meta_form.fields['meta_estoque'] = forms.IntegerField(
initial=0, widget=forms.HiddenInput())
if tem_meta:
val_inicial = meta_list['venda_mensal']
else:
val_inicial = venda_mensal
meta_form.fields['venda'] = forms.IntegerField(
required=True, initial=val_inicial,
label='Venda mensal')
if tem_meta:
val_inicial = meta_list['multiplicador']
else:
val_inicial = multiplicador
meta_form.fields['multiplicador'] = forms.FloatField(
required=True, initial=val_inicial,
label='Multiplicador')
str_tamanhos = ''
pond_grade_tamanhos = {}
tam_form = forms.Form()
for row in self.context['tamanho_ponderado']['data']:
str_tamanhos += '{} '.format(row['tam'])
field_name = 'tam_{}'.format(row['tam'])
if len(self.context['tamanho_ponderado']['data']) == 1:
val_inicial = 1
else:
val_inicial = row['grade']
pond_grade_tamanhos[field_name] = val_inicial
if tem_meta:
if field_name in meta_grade_tamanhos:
val_inicial = meta_grade_tamanhos[field_name]
else:
val_inicial = 0
tam_form.fields[field_name] = forms.IntegerField(
required=True, initial=val_inicial,
label=row['tam'])
self.context.update({
'tam_form': tam_form,
})
meta_form.fields['str_tamanhos'] = forms.CharField(
initial=str_tamanhos, widget=forms.HiddenInput())
pond_grade_cores = {}
cor_form = forms.Form()
for row in self.context['cor_ponderada']['data']:
field_name = 'cor_{}'.format(row['cor'])
if len(self.context['cor_ponderada']['data']) == 1:
val_inicial = 1
else:
val_inicial = row['distr']
pond_grade_cores[field_name] = val_inicial
if tem_meta:
if field_name in meta_grade_cores:
val_inicial = meta_grade_cores[field_name]
else:
val_inicial = 0
cor_form.fields[field_name] = forms.IntegerField(
required=True, initial=val_inicial,
label=row['cor'])
self.context.update({
'cor_form': cor_form,
})
self.context.update({
'pond_venda_mensal': venda_mensal,
'pond_multiplicador': multiplicador,
'pond_grade_tamanhos': pond_grade_tamanhos,
'pond_grade_cores': pond_grade_cores,
'meta_form': meta_form,
'venda_mensal': venda_mensal,
'multiplicador': multiplicador,
})
def grava_meta(self):
if not has_permission(self.request, 'comercial.can_define_goal'):
return
modelo = safe_cast(self.request.POST['modelo'], str, '')
venda = safe_cast(self.request.POST['venda'], int, 0)
multiplicador = safe_cast(
self.request.POST['multiplicador'], float, 0)
meta_estoque = safe_cast(self.request.POST['meta_estoque'], int, 0)
str_tamanhos = safe_cast(self.request.POST['str_tamanhos'], str, '')
str_tamanhos = str_tamanhos.strip()
ordem_tamanhos = str_tamanhos.split(' ')
tamanhos = {}
for vari in [key for key in self.request.POST
if key.startswith('tam_')]:
tamanhos[vari[4:]] = safe_cast(self.request.POST[vari], int, 0)
cores = {}
for vari in [key for key in self.request.POST
if key.startswith('cor_')]:
cores[vari[4:]] = safe_cast(self.request.POST[vari], int, 0)
try:
meta = models.MetaEstoque.objects.get(
modelo=modelo, data=date.today())
except models.MetaEstoque.DoesNotExist:
meta = models.MetaEstoque()
meta.modelo = modelo
meta.venda_mensal = venda
meta.multiplicador = multiplicador
meta.data = date.today()
meta.meta_estoque = meta_estoque
meta.save()
for tamanho in tamanhos:
try:
meta_tamanho = models.MetaEstoqueTamanho.objects.get(
meta=meta, tamanho=tamanho)
except models.MetaEstoqueTamanho.DoesNotExist:
meta_tamanho = models.MetaEstoqueTamanho()
meta_tamanho.meta = meta
meta_tamanho.tamanho = tamanho
meta_tamanho.quantidade = tamanhos[tamanho]
meta_tamanho.ordem = ordem_tamanhos.index(tamanho)
meta_tamanho.save()
for cor in cores:
try:
meta_cor = models.MetaEstoqueCor.objects.get(
meta=meta, cor=cor)
except models.MetaEstoqueCor.DoesNotExist:
meta_cor = models.MetaEstoqueCor()
meta_cor.meta = meta
meta_cor.cor = cor
meta_cor.quantidade = cores[cor]
meta_cor.save()
metas = models.getMetaEstoqueAtual().filter(modelo=modelo)
if len(metas) != 0:
lotes.views.calculaMetaGiroMetas(self.cursor, metas)
def mount_context(self):
self.cursor = db_cursor_so(self.request)
modelo = self.form.cleaned_data['modelo']
rotina_calculo = self.form.cleaned_data['rotina_calculo']
if 'grava' in self.request.POST:
self.grava_meta()
nfs = list(models.ModeloPassadoPeriodo.objects.filter(
modelo_id=1).order_by('ordem').values())
if len(nfs) == 0:
self.context.update({
'msg_erro': 'Nenhum período definido',
})
return
self.data_nfs = list(nfs)
self.periodos = []
self.tot_peso = 0
n_mes = 0
hoje = datetime.today()
mes = dec_month(hoje, 1)
self.style = {}
for i, row in enumerate(self.data_nfs):
periodo = {
'range': '{}:{}'.format(
n_mes+row['meses'], n_mes),
'meses': row['meses'],
'peso': row['peso'],
}
n_mes += row['meses']
self.tot_peso += row['meses'] * row['peso']
mes_fim = mes.strftime("%m/%Y")
mes = dec_months(mes, row['meses']-1)
mes_ini = mes.strftime("%m/%Y")
mes = dec_month(mes)
if row['meses'] == 1:
periodo['descr'] = mes_ini
else:
if mes_ini[-4:] == mes_fim[-4:]:
periodo['descr'] = '{} - {}'.format(mes_fim[:2], mes_ini)
else:
periodo['descr'] = '{} - {}'.format(mes_fim, mes_ini)
self.style[i+2] = 'text-align: right;'
self.periodos.append(periodo)
self.style_pond_meses = {
** self.style,
len(self.periodos)+2: 'text-align: right;',
}
self.mount_context_modelo(modelo, zerados=(rotina_calculo=="bug"))
| {
"content_hash": "eb4d29f1437f9ff08fc8a94f88d05c67",
"timestamp": "",
"source": "github",
"line_count": 576,
"max_line_length": 79,
"avg_line_length": 38.43402777777778,
"alnum_prop": 0.4607462282048966,
"repo_name": "anselmobd/fo2",
"id": "b89070a57c9fac3c8e3ad8bb64e7e883e5cec3c4",
"size": "22148",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/comercial/views/analise_modelo_old.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "160899"
},
{
"name": "HTML",
"bytes": "855985"
},
{
"name": "JavaScript",
"bytes": "203109"
},
{
"name": "PLSQL",
"bytes": "2762"
},
{
"name": "Python",
"bytes": "3228268"
},
{
"name": "Shell",
"bytes": "2161"
}
],
"symlink_target": ""
} |
from django.conf.urls import patterns, url
from django.views.generic.base import RedirectView
from pages import views
urlpatterns = patterns('',
url(r'^favicon\.ico/$', RedirectView.as_view(url='/static/favicon.ico')),
url(r'^robots.txt$', views.robots, name='robots'),
url(r'^(?P<url>.*)/$', views.handle, name='handle'),
url(r'^$', views.index, name='index'),
)
| {
"content_hash": "7e88cfb4a5363be6bfa22344912f9275",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 77,
"avg_line_length": 25.733333333333334,
"alnum_prop": 0.6606217616580311,
"repo_name": "rsomji/crawlbin",
"id": "5ddd401ff28a19ed650a0f022982ee8e198b7a64",
"size": "386",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "pages/urls.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "848"
},
{
"name": "HTML",
"bytes": "23418"
},
{
"name": "Python",
"bytes": "30800"
}
],
"symlink_target": ""
} |
import unittest
from gcc_facade import\
GCCFacade
#
# TestGCCFacade
#
class TestGCCFacade( unittest.TestCase ):
def test_negative( self ):
gccFacade = GCCFacade()
self.assertRaises( Exception, gccFacade.parseLine, "line" )
self.assertRaises( Exception, gccFacade.parseLine, ". " )
self.assertRaises( Exception, gccFacade.parseLine, "...line" )
self.assertRaises( Exception, gccFacade.parseLine, "......." )
def test_positive( self ):
gccFacade = GCCFacade()
self.assertEqual( gccFacade.parseLine( ". f" ), ( 1, "f" ) )
self.assertEqual( gccFacade.parseLine( ".. directory" ), ( 2, "directory" ) )
self.assertEqual( gccFacade.parseLine( "... path" ), ( 3, "path" ) )
#
# main
#
if __name__ == "__main__":
unittest.main() | {
"content_hash": "074e487b82e9e66c2aa0f9442a2fd49e",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 85,
"avg_line_length": 28.03448275862069,
"alnum_prop": 0.6137761377613776,
"repo_name": "wo3kie/pchGenerator",
"id": "36336d32fe91ae22925e136f06c11b587de19535",
"size": "813",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_gcc_facade.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "44587"
}
],
"symlink_target": ""
} |
"""Tests for the ChooseFastestBranchDataset serialization."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.data.experimental.kernel_tests.serialization import dataset_serialization_test_base
from tensorflow.python.data.experimental.ops import batching
from tensorflow.python.data.experimental.ops import optimization
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
class ChooseFastestBranchDatasetSerializationTest(
dataset_serialization_test_base.DatasetSerializationTestBase):
def testCore(self):
def build_ds(size):
dataset = dataset_ops.Dataset.range(size)
def branch_0(dataset):
return dataset.map(lambda x: x).batch(10)
def branch_1(dataset):
return dataset.batch(10).map(lambda x: x)
return optimization._ChooseFastestBranchDataset( # pylint: disable=protected-access
dataset, [branch_0, branch_1],
ratio_numerator=10)
for size in [100, 1000]:
self.run_core_tests(lambda: build_ds(size), None, size // 10) # pylint: disable=cell-var-from-loop
def testWithCapture(self):
def build_ds():
dataset = dataset_ops.Dataset.range(10)
const_64 = constant_op.constant(1, dtypes.int64)
const_32 = constant_op.constant(1, dtypes.int32)
def branch_0(dataset):
return dataset.map(lambda x: x + const_64)
def branch_1(dataset):
return dataset.map(lambda x: x + math_ops.cast(const_32, dtypes.int64))
return optimization._ChooseFastestBranchDataset(
dataset, [branch_0, branch_1], num_elements_per_branch=3)
self.run_core_tests(build_ds, None, 10)
def testWithPrefetch(self):
def build_ds():
dataset = dataset_ops.Dataset.range(10)
const_64 = constant_op.constant(1, dtypes.int64)
const_32 = constant_op.constant(1, dtypes.int32)
def branch_0(dataset):
return dataset.map(lambda x: x + const_64)
def branch_1(dataset):
return dataset.map(lambda x: x + math_ops.cast(const_32, dtypes.int64))
return optimization._ChooseFastestBranchDataset(
dataset, [branch_0, branch_1], num_elements_per_branch=3)
self.run_core_tests(build_ds, None, 10)
def testWithMoreOutputThanInput(self):
def build_ds():
dataset = dataset_ops.Dataset.from_tensors(0).repeat(1000).batch(100)
def branch(dataset):
return dataset.apply(batching.unbatch())
return optimization._ChooseFastestBranchDataset(
dataset, [branch, branch],
ratio_denominator=10,
num_elements_per_branch=100)
self.run_core_tests(build_ds, None, 1000)
if __name__ == "__main__":
test.main()
| {
"content_hash": "90063de47c2d1d9af5e07cccaf094a82",
"timestamp": "",
"source": "github",
"line_count": 90,
"max_line_length": 106,
"avg_line_length": 32.46666666666667,
"alnum_prop": 0.6978097193702943,
"repo_name": "ghchinoy/tensorflow",
"id": "eaedcae421014ab52a2c56740e91c669594c579a",
"size": "3611",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tensorflow/python/data/experimental/kernel_tests/serialization/choose_fastest_branch_dataset_serialization_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "3568"
},
{
"name": "Batchfile",
"bytes": "15317"
},
{
"name": "C",
"bytes": "699905"
},
{
"name": "C#",
"bytes": "8446"
},
{
"name": "C++",
"bytes": "67022491"
},
{
"name": "CMake",
"bytes": "206499"
},
{
"name": "Dockerfile",
"bytes": "73602"
},
{
"name": "Go",
"bytes": "1585039"
},
{
"name": "HTML",
"bytes": "4680118"
},
{
"name": "Java",
"bytes": "836400"
},
{
"name": "Jupyter Notebook",
"bytes": "1665583"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "98194"
},
{
"name": "Objective-C",
"bytes": "94022"
},
{
"name": "Objective-C++",
"bytes": "175222"
},
{
"name": "PHP",
"bytes": "17600"
},
{
"name": "Pascal",
"bytes": "3239"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "Python",
"bytes": "48407007"
},
{
"name": "RobotFramework",
"bytes": "891"
},
{
"name": "Ruby",
"bytes": "4733"
},
{
"name": "Shell",
"bytes": "476920"
},
{
"name": "Smarty",
"bytes": "27495"
},
{
"name": "Swift",
"bytes": "56155"
}
],
"symlink_target": ""
} |
'''
Harvester for the New Prairie Press for the SHARE project
Example API call: http://newprairiepress.org/do/oai/?verb=ListRecords&metadataPrefix=oai_dc
'''
from __future__ import unicode_literals
from scrapi.base import OAIHarvester
class Npp_ksuHarvester(OAIHarvester):
short_name = 'npp_ksu'
long_name = 'New Prairie Press at Kansas State University'
url = 'http://newprairiepress.org'
base_url = 'http://newprairiepress.org/do/oai/'
property_list = ['identifier', 'source', 'date', 'type', 'format', 'setSpec']
timezone_granularity = True
| {
"content_hash": "848f6aef9f4b20ebd10ec2407d36ecf2",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 91,
"avg_line_length": 31.833333333333332,
"alnum_prop": 0.7155322862129145,
"repo_name": "CenterForOpenScience/scrapi",
"id": "b7b5ea83a47ff296bdafcc0abdd3022f1a53829d",
"size": "573",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "scrapi/harvesters/npp_ksu.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "964"
},
{
"name": "HTML",
"bytes": "2300"
},
{
"name": "Python",
"bytes": "460450"
}
],
"symlink_target": ""
} |
import hashlib
import binascii
import evernote.edam.userstore.constants as UserStoreConstants
from evernote.edam.notestore.ttypes import NoteFilter
import evernote.edam.type.ttypes as Types
from evernote.api.client import EvernoteClient
# Sandbox: https://sandbox.evernote.com/api/DeveloperToken.action
# Production: https://www.evernote.com/api/DeveloperToken.action
auth_token = "S=s1:U=8f65c:E=14fa7113473:C=1484f600570:P=1cd:A=en-devtoken:V=2:H=484744fd0ffa906797416ae02ce5cd9c"
client = EvernoteClient(token=auth_token, sandbox=True)
note_store = client.get_note_store()
# GET CONTENT
def get_tip_notes(start_date, end_date):
tip_notes = []
noteFilter = NoteFilter()
noteFilter.words = "tag:tip created:%s -created:%s" % (start_date, end_date) # notes with tag #tip created between 2012-01-01 and 2014-09-08 (flight dates)
note_list = note_store.findNotes(auth_token, noteFilter, 0, 10)
for note in note_list.notes:
guid = note.guid
title = note.title
url = "evernote:///view/8f65c/s1/%s/%s/" % (guid, guid)
tip_notes.append( '<div><en-todo/> %s (<a href="%s">view full note</a>)</div>' % (title, url) )
return tip_notes
tip_notes = get_tip_notes("20120101", "20140908")
for tip_note in tip_notes:
# note.content += tip_note
print tip_note
sys.exit()
# Listar todos notebooks:
notebooks = note_store.listNotebooks()
print "Achei ", len(notebooks), " notebooks:"
for notebook in notebooks:
print " * ", notebook.name
# Criar uma nova nota:
print "\nCriando uma nova nota no notebook principal\n"
note = Types.Note()
note.title = "Evernote API Workshop @ Campus Party! Python!"
# Anexando uma imagem:
image = open('enlogo.png', 'rb').read()
md5 = hashlib.md5()
md5.update(image)
hash = md5.digest()
data = Types.Data()
data.size = len(image)
data.bodyHash = hash
data.body = image
resource = Types.Resource()
resource.mime = 'image/png'
resource.data = data
# Adicionando o novo Resource na lista de resources dessa nota
note.resources = [resource]
# Para exibir a imagem no meio da nota, soh precisamos do hash MD5 dela
hash_hex = binascii.hexlify(hash)
# ENML = Evernote Markup Language. Eh um subset do HTML, com umas tags a mais
note.content = '<?xml version="1.0" encoding="UTF-8"?>'
note.content += '<!DOCTYPE en-note SYSTEM "http://xml.evernote.com/pub/enml2.dtd">'
note.content += '<en-note>'
note.content += 'Esta eh uma nova nota, inserida direto no notebook principal :)<br/>'
note.content += 'Olha aqui o logo do Evernote:<br/>'
note.content += '<en-media type="image/png" hash="' + hash_hex + '"/>'
note.content += '</en-note>'
# Finalmente, enviando a nota
created_note = note_store.createNote(note)
print "Nota criada com sucesso! O GUID dela eh: ", created_note.guid | {
"content_hash": "e430572ed87aa2c765e31c36b8d573dd",
"timestamp": "",
"source": "github",
"line_count": 95,
"max_line_length": 156,
"avg_line_length": 28.821052631578947,
"alnum_prop": 0.7176771365960555,
"repo_name": "Doingcast/bizmem",
"id": "8d313f229dbdb048f59337c3e905e8450a5140fc",
"size": "2785",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tip.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "27195"
},
{
"name": "HTML",
"bytes": "4181"
},
{
"name": "Python",
"bytes": "1442818"
}
],
"symlink_target": ""
} |
import os
import distutils.core
import sqlite3
import jam
from base64 import b64encode
project_dir = os.getcwd()
jam_project_dir = os.path.join(os.path.dirname(jam.__file__), 'project')
distutils.dir_util.copy_tree(jam_project_dir, project_dir, preserve_mode=0)
os.chmod(os.path.join(project_dir, 'server.py'), 0o777)
dirs = ['js', 'reports', os.path.join('static', 'reports')]
for dir in dirs:
path = os.path.join(project_dir, dir)
if not os.path.isdir(path):
os.makedirs(path)
key = b64encode(os.urandom(20)).decode('utf-8')
con = sqlite3.connect(os.path.join(project_dir, 'admin.sqlite'))
cursor = con.cursor()
cursor.execute("UPDATE SYS_PARAMS SET F_LANGUAGE=NULL, F_SECRET_KEY='%s'" % key)
cursor.execute("UPDATE SYS_TASKS SET F_NAME=NULL, F_ITEM_NAME=NULL, \
F_MANUAL_UPDATE=NULL, F_DB_TYPE=NULL, F_ALIAS=NULL, F_LOGIN=NULL, \
F_PASSWORD=NULL, F_HOST=NULL, F_PORT=NULL, F_ENCODING=NULL")
con.commit()
con.close()
| {
"content_hash": "a1142ef217bc91fa1d418c4222a0e80c",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 80,
"avg_line_length": 35.074074074074076,
"alnum_prop": 0.7032734952481521,
"repo_name": "jam-py/jam-py",
"id": "d5fc6b431374ebfcccdd5911de9f004345ee8d96",
"size": "994",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "jam/bin/jam-project.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "37683"
},
{
"name": "HTML",
"bytes": "67579"
},
{
"name": "JavaScript",
"bytes": "2789171"
},
{
"name": "Python",
"bytes": "432048"
}
],
"symlink_target": ""
} |
from __future__ import print_function
from optparse import OptionParser
import os.path
import re
import sys
parser = OptionParser()
opts, args = parser.parse_args()
if not len(args):
parser.error('not enough arguments')
elif len(args) > 1:
parser.error('too many arguments')
DEST_PATTERN = r'\g<1>%s\g<3>' % args[0]
def replace_in_file(filename, src_pattern):
try:
f = open(filename, "r")
s = f.read()
f.close()
s = re.sub(src_pattern, DEST_PATTERN, s)
f = open(filename, "w")
f.write(s)
f.close()
except IOError as err:
print('error updating %s: %s' % (filename, err), file=sys.stderr)
src_pattern = re.compile(r'^(\s*<version>)([-.\w]+)(</version>\s*)$',
re.MULTILINE)
for project in ['gerrit-acceptance-framework', 'gerrit-extension-api',
'gerrit-plugin-api', 'gerrit-plugin-archetype',
'gerrit-plugin-gwt-archetype', 'gerrit-plugin-gwtui',
'gerrit-plugin-js-archetype', 'gerrit-war']:
pom = os.path.join(project, 'pom.xml')
replace_in_file(pom, src_pattern)
src_pattern = re.compile(r"^(GERRIT_VERSION = ')([-.\w]+)(')$", re.MULTILINE)
replace_in_file('VERSION', src_pattern)
src_pattern = re.compile(r'^(\s*-DarchetypeVersion=)([-.\w]+)(\s*\\)$',
re.MULTILINE)
replace_in_file(os.path.join('Documentation', 'dev-plugins.txt'), src_pattern)
| {
"content_hash": "810399a994ef2ad8c3a49a04526af8fa",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 78,
"avg_line_length": 31.022222222222222,
"alnum_prop": 0.6160458452722063,
"repo_name": "joshuawilson/merrit",
"id": "9f03a5972a9175e1633fa180ad3fa263621963f9",
"size": "2014",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "tools/version.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "52838"
},
{
"name": "GAP",
"bytes": "4119"
},
{
"name": "Go",
"bytes": "6200"
},
{
"name": "Groff",
"bytes": "28221"
},
{
"name": "HTML",
"bytes": "380099"
},
{
"name": "Java",
"bytes": "10070223"
},
{
"name": "JavaScript",
"bytes": "197056"
},
{
"name": "Makefile",
"bytes": "1313"
},
{
"name": "PLpgSQL",
"bytes": "4202"
},
{
"name": "Perl",
"bytes": "9943"
},
{
"name": "Prolog",
"bytes": "17904"
},
{
"name": "Python",
"bytes": "18218"
},
{
"name": "Shell",
"bytes": "48919"
},
{
"name": "TypeScript",
"bytes": "1882"
}
],
"symlink_target": ""
} |
import pyaf.Bench.TS_datasets as tsds
import tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 1024 , FREQ = 'D', seed = 0, trendtype = "ConstantTrend", cycle_length = 30, transform = "Logit", sigma = 0.0, exog_count = 20, ar_order = 0); | {
"content_hash": "aeac5c24065a74b46c8a1c90e8531f6c",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 166,
"avg_line_length": 38,
"alnum_prop": 0.706766917293233,
"repo_name": "antoinecarme/pyaf",
"id": "6f7989d37ca40fa202fa18cb114ffc3cab0f7586",
"size": "266",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/artificial/transf_Logit/trend_ConstantTrend/cycle_30/ar_/test_artificial_1024_Logit_ConstantTrend_30__20.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "6773299"
},
{
"name": "Procfile",
"bytes": "24"
},
{
"name": "Python",
"bytes": "54209093"
},
{
"name": "R",
"bytes": "807"
},
{
"name": "Shell",
"bytes": "3619"
}
],
"symlink_target": ""
} |
"""
This modules defines de principal constants used during OMiSCID runtime
"""
import os
import xsd
import logging
logger = logging.getLogger(__name__)
OMISCID_DOMAIN = os.environ.get("OMISCID_WORKING_DOMAIN", "_bip._tcp")
if not OMISCID_DOMAIN.startswith("_bip"):
if logger.isEnabledFor(logging.WARNING):
logger.warning("OMiSCID Domain must begin with a _bip")
OMISCID_DOMAIN = "_bip._tcp"
#
# TXT Record
#
CONSTANT_PREFIX = 'c'
READ_PREFIX = 'r'
READ_WRITE_PREFIX = 'w'
TXT_SEPARATOR = '/'
OUTPUT_CONNECTOR_PREFIX = 'o'
INPUT_CONNECTOR_PREFIX = 'i'
IO_CONNECTOR_PREFIX = 'd'
WRITE_VARIABLE_PREFIX = 'w'
READ_VARIABLE_PREFIX = 'r'
XML_IO_CONNECTOR_TAG = 'inoutput'
XML_I_CONNECTOR_TAG = 'input'
XML_O_CONNECTOR_TAG = 'output'
XML_CONSTANT_TAG = 'constant'
XML_READ_TAG = 'read'
XML_READ_WRITE_TAG = 'readWrite'
DESCRIPTION_VARIABLE_NAME = 'desc'
FULL_DESCRIPTION_VALUE = 'full'
PREFIX_LEN = 2
#
# Control Protocol
#
CONTROL_QUERY = '<?xml version="1.0" standalone="yes"?><controlQuery id="%08.x">%s</controlQuery>'
CONTROL_EVENT = '<?xml version="1.0" standalone="yes"?><controlEvent>%s</controlEvent>'
CONTROL_ANSWER = '<?xml version="1.0" standalone="yes"?><controlAnswer id="%08.x">%s</controlAnswer>'
CONTROL_QUERY_TAG = 'controlQuery'
CONTROL_EVENT_TAG = 'controlEvent'
CONTROL_ANSWER_TAG = 'controlAnswer'
#
# Control MSG
#
REQUEST_CONTROL_QUERY = '<%s name="%s"/>'
VARIABLE_EVENT_MSG = '<variable name="%s"><value><![CDATA[%s]]></value></variable>'
VARIABLE_SUBSCRIBE = '<subscribe name="%s"/>'
VARIABLE_UNSUBSCRIBE = '<unsubscribe name="%s"/>'
XML_VARIABLE_TYPE = 'variable'
SERVICE_FULL_DESCRIPTION = '<fullDescription/>'
#
# XSD
#a
CONTROL_ANSWER_XSD = xsd.control_answer
CONTROL_QUERY_XSD = xsd.control_query
SERVICE_XSD = xsd.service
# General
#
UNBOUNDED_SERVICE_NAME = "unbound service"
UNBOUNDED_CONNECTOR_NAME = "unbound connector"
UNBOUNDED_VARIABLE_NAME = "unbound variable"
PEERID = "%08.x"
QUERY_TIMEOUT = 5 #seconds
CONNECTION_TIMEOUT = 5 #seconds
PROXY_DISCONNECT_TIMEOUT = 5
OUTPUT_CONNECTOR_TYPE = OUTPUT_CONNECTOR_PREFIX[0]
INPUT_CONNECTOR_TYPE = INPUT_CONNECTOR_PREFIX[0]
IO_CONNECTOR_TYPE = IO_CONNECTOR_PREFIX[0]
#
# Variables
#
LOCK_DESCRIPTION = "LOCK DESCRIPTION"
LOCK_TYPE = "bool"
#
# QUERY tags
#
VARIABLE_TAG = 'variable'
FULLDESC_TAG = 'fullDescription'
SUBSCRIBE_TAG = 'subscribe'
UNSUBSCRIBE_TAG = 'unsubscribe'
INPUT_CONNECTOR_TAG = 'input'
OUTPUT_CONNECTOR_TAG = 'output'
IO_CONNECTOR_TAG = 'inoutput'
| {
"content_hash": "a8ae91a010704f5c5919c6840756adc6",
"timestamp": "",
"source": "github",
"line_count": 105,
"max_line_length": 101,
"avg_line_length": 23.65714285714286,
"alnum_prop": 0.712157809983897,
"repo_name": "AmibisLabs/amibis-py",
"id": "b6ea7ba6f10046851048a2a63e6b4d4a64598322",
"size": "2486",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "pymiscid/cstes.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "110900"
},
{
"name": "Shell",
"bytes": "595"
}
],
"symlink_target": ""
} |
"""ORCHSET Dataset Loader
.. admonition:: Dataset Info
:class: dropdown
Orchset is intended to be used as a dataset for the development and
evaluation of melody extraction algorithms. This collection contains
64 audio excerpts focused on symphonic music with their corresponding
annotation of the melody.
For more details, please visit: https://zenodo.org/record/1289786#.XREpzaeZPx6
"""
import csv
import os
from typing import BinaryIO, Optional, TextIO, Tuple
from deprecated.sphinx import deprecated
import librosa
import numpy as np
from smart_open import open
from mirdata import annotations, core, download_utils, io, jams_utils
BIBTEX = """@article{bosch2016evaluation,
title={Evaluation and combination of pitch estimation methods for melody extraction in symphonic classical music},
author={Bosch, Juan J and Marxer, Ricard and G{\'o}mez, Emilia},
journal={Journal of New Music Research},
volume={45},
number={2},
pages={101--117},
year={2016},
publisher={Taylor \\& Francis}
}"""
INDEXES = {
"default": "1.0",
"test": "1.0",
"1.0": core.Index(filename="orchset_index_1.0.json"),
}
REMOTES = {
"all": download_utils.RemoteFileMetadata(
filename="Orchset_dataset_0.zip",
url="https://zenodo.org/record/1289786/files/Orchset_dataset_0.zip?download=1",
checksum="cf6fe52d64624f61ee116c752fb318ca",
unpack_directories=["Orchset"],
)
}
LICENSE_INFO = (
"Creative Commons Attribution Non Commercial Share Alike 4.0 International."
)
class Track(core.Track):
"""orchset Track class
Args:
track_id (str): track id of the track
Attributes:
alternating_melody (bool): True if the melody alternates between instruments
audio_path_mono (str): path to the mono audio file
audio_path_stereo (str): path to the stereo audio file
composer (str): the work's composer
contains_brass (bool): True if the track contains any brass instrument
contains_strings (bool): True if the track contains any string instrument
contains_winds (bool): True if the track contains any wind instrument
excerpt (str): True if the track is an excerpt
melody_path (str): path to the melody annotation file
only_brass (bool): True if the track contains brass instruments only
only_strings (bool): True if the track contains string instruments only
only_winds (bool): True if the track contains wind instruments only
predominant_melodic_instruments (list): List of instruments which play the melody
track_id (str): track id
work (str): The musical work
Cached Properties:
melody (F0Data): melody annotation
"""
def __init__(
self,
track_id,
data_home,
dataset_name,
index,
metadata,
):
super().__init__(
track_id,
data_home,
dataset_name,
index,
metadata,
)
self.melody_path = self.get_path("melody")
self.audio_path_mono = self.get_path("audio_mono")
self.audio_path_stereo = self.get_path("audio_stereo")
@property
def composer(self):
return self._track_metadata.get("composer")
@property
def work(self):
return self._track_metadata.get("work")
@property
def excerpt(self):
return self._track_metadata.get("excerpt")
@property
def predominant_melodic_instruments(self):
return self._track_metadata.get("predominant_melodic_instruments-normalized")
@property
def alternating_melody(self):
return self._track_metadata.get("alternating_melody")
@property
def contains_winds(self):
return self._track_metadata.get("contains_winds")
@property
def contains_strings(self):
return self._track_metadata.get("contains_strings")
@property
def contains_brass(self):
return self._track_metadata.get("contains_brass")
@property
def only_strings(self):
return self._track_metadata.get("only_strings")
@property
def only_winds(self):
return self._track_metadata.get("only_winds")
@property
def only_brass(self):
return self._track_metadata.get("only_brass")
@core.cached_property
def melody(self) -> Optional[annotations.F0Data]:
return load_melody(self.melody_path)
@property
def audio_mono(self) -> Optional[Tuple[np.ndarray, float]]:
"""the track's audio (mono)
Returns:
* np.ndarray - the mono audio signal
* float - The sample rate of the audio file
"""
return load_audio_mono(self.audio_path_mono)
@property
def audio_stereo(self) -> Optional[Tuple[np.ndarray, float]]:
"""the track's audio (stereo)
Returns:
* np.ndarray - the mono audio signal
* float - The sample rate of the audio file
"""
return load_audio_stereo(self.audio_path_stereo)
def to_jams(self):
"""Get the track's data in jams format
Returns:
jams.JAMS: the track's data in jams format
"""
return jams_utils.jams_converter(
audio_path=self.audio_path_mono,
f0_data=[(self.melody, "annotated melody")],
metadata=self._track_metadata,
)
@io.coerce_to_bytes_io
def load_audio_mono(fhandle: BinaryIO) -> Tuple[np.ndarray, float]:
"""Load an Orchset audio file.
Args:
fhandle (str or file-like): File-like object or path to audio file
Returns:
* np.ndarray - the mono audio signal
* float - The sample rate of the audio file
"""
return librosa.load(fhandle, sr=None, mono=True)
@io.coerce_to_bytes_io
def load_audio_stereo(fhandle: BinaryIO) -> Tuple[np.ndarray, float]:
"""Load an Orchset audio file.
Args:
fhandle (str or file-like): File-like object or path to audio file
Returns:
* np.ndarray - the stereo audio signal
* float - The sample rate of the audio file
"""
return librosa.load(fhandle, sr=None, mono=False)
@io.coerce_to_string_io
def load_melody(fhandle: TextIO) -> annotations.F0Data:
"""Load an Orchset melody annotation file
Args:
fhandle (str or file-like): File-like object or path to melody annotation file
Raises:
IOError: if melody_path doesn't exist
Returns:
F0Data: melody annotation data
"""
times = []
freqs = []
voicing = []
reader = csv.reader(fhandle, delimiter="\t")
for line in reader:
times.append(float(line[0]))
freqs.append(float(line[1]))
voicing.append(0.0 if line[1] == "0" else 1.0)
melody_data = annotations.F0Data(
np.array(times), "s", np.array(freqs), "hz", np.array(voicing), "binary"
)
return melody_data
@core.docstring_inherit(core.Dataset)
class Dataset(core.Dataset):
"""
The orchset dataset
"""
def __init__(self, data_home=None, version="default"):
super().__init__(
data_home,
version,
name="orchset",
track_class=Track,
bibtex=BIBTEX,
indexes=INDEXES,
remotes=REMOTES,
license_info=LICENSE_INFO,
)
@core.cached_property
def _metadata(self):
predominant_inst_path = os.path.join(
self.data_home, "Orchset - Predominant Melodic Instruments.csv"
)
try:
with open(predominant_inst_path, "r") as fhandle:
reader = csv.reader(fhandle, delimiter=",")
raw_data = []
for line in reader:
if line[0] == "excerpt":
continue
raw_data.append(line)
except FileNotFoundError:
raise FileNotFoundError("Metadata not found. Did you run .download()?")
tf_dict = {"TRUE": True, "FALSE": False}
metadata_index = {}
for line in raw_data:
track_id = line[0].split(".")[0]
id_split = track_id.split(".")[0].split("-")
if id_split[0] == "Musorgski" or id_split[0] == "Rimski":
id_split[0] = "-".join(id_split[:2])
id_split.pop(1)
melodic_instruments = [s.split(",") for s in line[1].split("+")]
melodic_instruments = [
item.lower() for sublist in melodic_instruments for item in sublist
]
for i, inst in enumerate(melodic_instruments):
if inst == "string":
melodic_instruments[i] = "strings"
elif inst == "winds (solo)":
melodic_instruments[i] = "winds"
melodic_instruments = sorted(list(set(melodic_instruments)))
metadata_index[track_id] = {
"predominant_melodic_instruments-raw": line[1],
"predominant_melodic_instruments-normalized": melodic_instruments,
"alternating_melody": tf_dict[line[2]],
"contains_winds": tf_dict[line[3]],
"contains_strings": tf_dict[line[4]],
"contains_brass": tf_dict[line[5]],
"only_strings": tf_dict[line[6]],
"only_winds": tf_dict[line[7]],
"only_brass": tf_dict[line[8]],
"composer": id_split[0],
"work": "-".join(id_split[1:-1]),
"excerpt": id_split[-1][2:],
}
return metadata_index
@deprecated(
reason="Use mirdata.datasets.orchset.load_audio_mono",
version="0.3.4",
)
def load_audio_mono(self, *args, **kwargs):
return load_audio_mono(*args, **kwargs)
@deprecated(
reason="Use mirdata.datasets.orchset.load_audio_stereo",
version="0.3.4",
)
def load_audio_stereo(self, *args, **kwargs):
return load_audio_stereo(*args, **kwargs)
@deprecated(
reason="Use mirdata.datasets.orchset.load_melody",
version="0.3.4",
)
def load_melody(self, *args, **kwargs):
return load_melody(*args, **kwargs)
| {
"content_hash": "e67314f46004164b492785ad91183eb2",
"timestamp": "",
"source": "github",
"line_count": 343,
"max_line_length": 118,
"avg_line_length": 29.93586005830904,
"alnum_prop": 0.5974873393065836,
"repo_name": "mir-dataset-loaders/mirdata",
"id": "8bfeab17867c3a8f0796a0288db581d474174287",
"size": "10268",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mirdata/datasets/orchset.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "1374647"
}
],
"symlink_target": ""
} |
from argparse import ArgumentParser
import logging
import os
import shutil
import subprocess
import sys
import tempfile
import warnings
import django
from django import contrib
from django.apps import apps
from django.conf import settings
from django.db import connection
from django.test import TransactionTestCase, TestCase
from django.test.utils import get_runner
from django.utils.deprecation import RemovedInDjango19Warning, RemovedInDjango20Warning
from django.utils._os import upath
from django.utils import six
warnings.simplefilter("error", RemovedInDjango19Warning)
warnings.simplefilter("error", RemovedInDjango20Warning)
CONTRIB_MODULE_PATH = 'django.contrib'
CONTRIB_DIR = os.path.dirname(upath(contrib.__file__))
RUNTESTS_DIR = os.path.abspath(os.path.dirname(upath(__file__)))
TEMPLATE_DIR = os.path.join(RUNTESTS_DIR, 'templates')
TEMP_DIR = tempfile.mkdtemp(prefix='django_')
os.environ['DJANGO_TEST_TEMP_DIR'] = TEMP_DIR
SUBDIRS_TO_SKIP = [
'data',
'test_discovery_sample',
'test_discovery_sample2',
'test_runner_deprecation_app',
]
ALWAYS_INSTALLED_APPS = [
'django.contrib.contenttypes',
'django.contrib.auth',
'django.contrib.sites',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.admin.apps.SimpleAdminConfig',
'django.contrib.staticfiles',
]
ALWAYS_MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
)
def get_test_modules():
modules = []
discovery_paths = [
(None, RUNTESTS_DIR),
(CONTRIB_MODULE_PATH, CONTRIB_DIR)
]
if connection.features.gis_enabled:
discovery_paths.append(
('django.contrib.gis.tests', os.path.join(CONTRIB_DIR, 'gis', 'tests'))
)
for modpath, dirpath in discovery_paths:
for f in os.listdir(dirpath):
if ('.' in f or
f.startswith('sql') or
os.path.basename(f) in SUBDIRS_TO_SKIP or
os.path.isfile(f) or
not os.path.exists(os.path.join(dirpath, f, '__init__.py'))):
continue
if not connection.vendor == 'postgresql' and f == 'postgres_tests' or f == 'postgres':
continue
modules.append((modpath, f))
return modules
def get_installed():
return [app_config.name for app_config in apps.get_app_configs()]
def setup(verbosity, test_labels):
print("Testing against Django installed in '%s'" % os.path.dirname(django.__file__))
# Force declaring available_apps in TransactionTestCase for faster tests.
def no_available_apps(self):
raise Exception("Please define available_apps in TransactionTestCase "
"and its subclasses.")
TransactionTestCase.available_apps = property(no_available_apps)
TestCase.available_apps = None
state = {
'INSTALLED_APPS': settings.INSTALLED_APPS,
'ROOT_URLCONF': getattr(settings, "ROOT_URLCONF", ""),
# Remove the following line in Django 2.0.
'TEMPLATE_DIRS': settings.TEMPLATE_DIRS,
'TEMPLATES': settings.TEMPLATES,
'LANGUAGE_CODE': settings.LANGUAGE_CODE,
'STATIC_URL': settings.STATIC_URL,
'STATIC_ROOT': settings.STATIC_ROOT,
'MIDDLEWARE_CLASSES': settings.MIDDLEWARE_CLASSES,
}
# Redirect some settings for the duration of these tests.
settings.INSTALLED_APPS = ALWAYS_INSTALLED_APPS
settings.ROOT_URLCONF = 'urls'
settings.STATIC_URL = '/static/'
settings.STATIC_ROOT = os.path.join(TEMP_DIR, 'static')
# Remove the following line in Django 2.0.
settings.TEMPLATE_DIRS = (TEMPLATE_DIR,)
settings.TEMPLATES = [{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [TEMPLATE_DIR],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
}]
settings.LANGUAGE_CODE = 'en'
settings.SITE_ID = 1
settings.MIDDLEWARE_CLASSES = ALWAYS_MIDDLEWARE_CLASSES
# Ensure the middleware classes are seen as overridden otherwise we get a compatibility warning.
settings._explicit_settings.add('MIDDLEWARE_CLASSES')
settings.MIGRATION_MODULES = {
# these 'tests.migrations' modules don't actually exist, but this lets
# us skip creating migrations for the test models.
'auth': 'django.contrib.auth.tests.migrations',
'contenttypes': 'django.contrib.contenttypes.tests.migrations',
}
if verbosity > 0:
# Ensure any warnings captured to logging are piped through a verbose
# logging handler. If any -W options were passed explicitly on command
# line, warnings are not captured, and this has no effect.
logger = logging.getLogger('py.warnings')
handler = logging.StreamHandler()
logger.addHandler(handler)
warnings.filterwarnings(
'ignore',
'django.contrib.webdesign will be removed in Django 2.0.',
RemovedInDjango20Warning
)
# Load all the ALWAYS_INSTALLED_APPS.
django.setup()
# Load all the test model apps.
test_modules = get_test_modules()
# Reduce given test labels to just the app module path
test_labels_set = set()
for label in test_labels:
bits = label.split('.')
if bits[:2] == ['django', 'contrib']:
bits = bits[:3]
else:
bits = bits[:1]
test_labels_set.add('.'.join(bits))
installed_app_names = set(get_installed())
for modpath, module_name in test_modules:
if modpath:
module_label = '.'.join([modpath, module_name])
else:
module_label = module_name
# if the module (or an ancestor) was named on the command line, or
# no modules were named (i.e., run all), import
# this module and add it to INSTALLED_APPS.
if not test_labels:
module_found_in_labels = True
else:
module_found_in_labels = any(
# exact match or ancestor match
module_label == label or module_label.startswith(label + '.')
for label in test_labels_set)
if module_found_in_labels and module_label not in installed_app_names:
if verbosity >= 2:
print("Importing application %s" % module_name)
settings.INSTALLED_APPS.append(module_label)
apps.set_installed_apps(settings.INSTALLED_APPS)
return state
def teardown(state):
try:
# Removing the temporary TEMP_DIR. Ensure we pass in unicode
# so that it will successfully remove temp trees containing
# non-ASCII filenames on Windows. (We're assuming the temp dir
# name itself does not contain non-ASCII characters.)
shutil.rmtree(six.text_type(TEMP_DIR))
except OSError:
print('Failed to remove temp directory: %s' % TEMP_DIR)
# Restore the old settings.
for key, value in state.items():
setattr(settings, key, value)
def django_tests(verbosity, interactive, failfast, keepdb, reverse, test_labels, debug_sql):
state = setup(verbosity, test_labels)
extra_tests = []
# Run the test suite, including the extra validation tests.
if not hasattr(settings, 'TEST_RUNNER'):
settings.TEST_RUNNER = 'django.test.runner.DiscoverRunner'
TestRunner = get_runner(settings)
test_runner = TestRunner(
verbosity=verbosity,
interactive=interactive,
failfast=failfast,
keepdb=keepdb,
reverse=reverse,
debug_sql=debug_sql,
)
# Catch warnings thrown in test DB setup -- remove in Django 1.9
with warnings.catch_warnings():
warnings.filterwarnings(
'ignore',
"Custom SQL location '<app_label>/models/sql' is deprecated, "
"use '<app_label>/sql' instead.",
RemovedInDjango19Warning
)
warnings.filterwarnings(
'ignore',
'initial_data fixtures are deprecated. Use data migrations instead.',
RemovedInDjango19Warning
)
failures = test_runner.run_tests(
test_labels or get_installed(), extra_tests=extra_tests)
teardown(state)
return failures
def bisect_tests(bisection_label, options, test_labels):
state = setup(options.verbosity, test_labels)
test_labels = test_labels or get_installed()
print('***** Bisecting test suite: %s' % ' '.join(test_labels))
# Make sure the bisection point isn't in the test list
# Also remove tests that need to be run in specific combinations
for label in [bisection_label, 'model_inheritance_same_model_name']:
try:
test_labels.remove(label)
except ValueError:
pass
subprocess_args = [
sys.executable, upath(__file__), '--settings=%s' % options.settings]
if options.failfast:
subprocess_args.append('--failfast')
if options.verbosity:
subprocess_args.append('--verbosity=%s' % options.verbosity)
if not options.interactive:
subprocess_args.append('--noinput')
iteration = 1
while len(test_labels) > 1:
midpoint = len(test_labels) // 2
test_labels_a = test_labels[:midpoint] + [bisection_label]
test_labels_b = test_labels[midpoint:] + [bisection_label]
print('***** Pass %da: Running the first half of the test suite' % iteration)
print('***** Test labels: %s' % ' '.join(test_labels_a))
failures_a = subprocess.call(subprocess_args + test_labels_a)
print('***** Pass %db: Running the second half of the test suite' % iteration)
print('***** Test labels: %s' % ' '.join(test_labels_b))
print('')
failures_b = subprocess.call(subprocess_args + test_labels_b)
if failures_a and not failures_b:
print("***** Problem found in first half. Bisecting again...")
iteration = iteration + 1
test_labels = test_labels_a[:-1]
elif failures_b and not failures_a:
print("***** Problem found in second half. Bisecting again...")
iteration = iteration + 1
test_labels = test_labels_b[:-1]
elif failures_a and failures_b:
print("***** Multiple sources of failure found")
break
else:
print("***** No source of failure found... try pair execution (--pair)")
break
if len(test_labels) == 1:
print("***** Source of error: %s" % test_labels[0])
teardown(state)
def paired_tests(paired_test, options, test_labels):
state = setup(options.verbosity, test_labels)
test_labels = test_labels or get_installed()
print('***** Trying paired execution')
# Make sure the constant member of the pair isn't in the test list
# Also remove tests that need to be run in specific combinations
for label in [paired_test, 'model_inheritance_same_model_name']:
try:
test_labels.remove(label)
except ValueError:
pass
subprocess_args = [
sys.executable, upath(__file__), '--settings=%s' % options.settings]
if options.failfast:
subprocess_args.append('--failfast')
if options.verbosity:
subprocess_args.append('--verbosity=%s' % options.verbosity)
if not options.interactive:
subprocess_args.append('--noinput')
for i, label in enumerate(test_labels):
print('***** %d of %d: Check test pairing with %s' % (
i + 1, len(test_labels), label))
failures = subprocess.call(subprocess_args + [label, paired_test])
if failures:
print('***** Found problem pair with %s' % label)
return
print('***** No problem pair found')
teardown(state)
if __name__ == "__main__":
parser = ArgumentParser(description="Run the Django test suite.")
parser.add_argument('modules', nargs='*', metavar='module',
help='Optional path(s) to test modules; e.g. "i18n" or '
'"i18n.tests.TranslationTests.test_lazy_objects".')
parser.add_argument(
'-v', '--verbosity', default=1, type=int, choices=[0, 1, 2, 3],
help='Verbosity level; 0=minimal output, 1=normal output, 2=all output')
parser.add_argument(
'--noinput', action='store_false', dest='interactive', default=True,
help='Tells Django to NOT prompt the user for input of any kind.')
parser.add_argument(
'--failfast', action='store_true', dest='failfast', default=False,
help='Tells Django to stop running the test suite after first failed '
'test.')
parser.add_argument(
'-k', '--keepdb', action='store_true', dest='keepdb', default=False,
help='Tells Django to preserve the test database between runs.')
parser.add_argument(
'--settings',
help='Python path to settings module, e.g. "myproject.settings". If '
'this isn\'t provided, either the DJANGO_SETTINGS_MODULE '
'environment variable or "test_sqlite" will be used.')
parser.add_argument('--bisect',
help='Bisect the test suite to discover a test that causes a test '
'failure when combined with the named test.')
parser.add_argument('--pair',
help='Run the test suite in pairs with the named test to find problem '
'pairs.')
parser.add_argument('--reverse', action='store_true', default=False,
help='Sort test suites and test cases in opposite order to debug '
'test side effects not apparent with normal execution lineup.')
parser.add_argument('--liveserver',
help='Overrides the default address where the live server (used with '
'LiveServerTestCase) is expected to run from. The default value '
'is localhost:8081.')
parser.add_argument(
'--selenium', action='store_true', dest='selenium', default=False,
help='Run the Selenium tests as well (if Selenium is installed)')
parser.add_argument(
'--debug-sql', action='store_true', dest='debug_sql', default=False,
help='Turn on the SQL query logger within tests')
options = parser.parse_args()
# mock is a required dependency
try:
from django.test import mock # NOQA
except ImportError:
print(
"Please install test dependencies first: \n"
"$ pip install -r requirements/py%s.txt" % sys.version_info.major
)
sys.exit(1)
# Allow including a trailing slash on app_labels for tab completion convenience
options.modules = [os.path.normpath(labels) for labels in options.modules]
if options.settings:
os.environ['DJANGO_SETTINGS_MODULE'] = options.settings
else:
if "DJANGO_SETTINGS_MODULE" not in os.environ:
os.environ['DJANGO_SETTINGS_MODULE'] = 'test_sqlite'
options.settings = os.environ['DJANGO_SETTINGS_MODULE']
if options.liveserver is not None:
os.environ['DJANGO_LIVE_TEST_SERVER_ADDRESS'] = options.liveserver
if options.selenium:
os.environ['DJANGO_SELENIUM_TESTS'] = '1'
if options.bisect:
bisect_tests(options.bisect, options, options.modules)
elif options.pair:
paired_tests(options.pair, options, options.modules)
else:
failures = django_tests(options.verbosity, options.interactive,
options.failfast, options.keepdb,
options.reverse, options.modules,
options.debug_sql)
if failures:
sys.exit(bool(failures))
| {
"content_hash": "2c49bae284a176200291c52c36391906",
"timestamp": "",
"source": "github",
"line_count": 427,
"max_line_length": 100,
"avg_line_length": 37.8056206088993,
"alnum_prop": 0.6331536889054079,
"repo_name": "runekaagaard/django-contrib-locking",
"id": "f3ec6407c9f60f7ed9c2e0b1448190ae6e17309d",
"size": "16165",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/runtests.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "53566"
},
{
"name": "JavaScript",
"bytes": "106009"
},
{
"name": "Makefile",
"bytes": "5765"
},
{
"name": "Python",
"bytes": "10638047"
},
{
"name": "Shell",
"bytes": "10452"
}
],
"symlink_target": ""
} |
from niprov.dependencies import Dependencies
def inspect(location, dependencies=Dependencies()):
fileFactory = dependencies.getFileFactory()
return fileFactory.locatedAt(location).inspect()
| {
"content_hash": "d593dd8c5291996ddbcc8a6d318a0f9e",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 52,
"avg_line_length": 25.25,
"alnum_prop": 0.7920792079207921,
"repo_name": "ilogue/niprov",
"id": "42874b5401f608a2439954c54b5cc4b2d895297e",
"size": "244",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "niprov/inspection.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "1187"
},
{
"name": "HTML",
"bytes": "2160"
},
{
"name": "JavaScript",
"bytes": "16060"
},
{
"name": "Mako",
"bytes": "6445"
},
{
"name": "Python",
"bytes": "327793"
},
{
"name": "Shell",
"bytes": "309"
}
],
"symlink_target": ""
} |
import re
from sfa.util.xrn import Xrn, get_authority
# temporary helper functions to use this module instead of namespace
def hostname_to_hrn (auth, login_base, hostname):
return PlXrn(auth=auth+'.'+login_base,hostname=hostname).get_hrn()
def hostname_to_urn(auth, login_base, hostname):
return PlXrn(auth=auth+'.'+login_base,hostname=hostname).get_urn()
def slicename_to_hrn (auth_hrn, slicename):
return PlXrn(auth=auth_hrn,slicename=slicename).get_hrn()
def email_to_hrn (auth_hrn, email):
return PlXrn(auth=auth_hrn, email=email).get_hrn()
def hrn_to_pl_slicename (hrn):
return PlXrn(xrn=hrn,type='slice').pl_slicename()
# removed-dangerous - was used for non-slice objects
#def hrn_to_pl_login_base (hrn):
# return PlXrn(xrn=hrn,type='slice').pl_login_base()
def hrn_to_pl_authname (hrn):
return PlXrn(xrn=hrn,type='any').pl_authname()
def xrn_to_hostname(hrn):
return Xrn.unescape(PlXrn(xrn=hrn, type='node').get_leaf())
# helpers to handle external objects created via fedaration
def top_auth (hrn):
return hrn.split('.')[0]
def hash_loginbase(site_hrn):
if len(site_hrn) <= 12:
return site_hrn.replace('.','8')
ratio = float(12) / len(site_hrn)
auths_tab = site_hrn.split('.')
auths_tab2 = []
for auth in auths_tab:
auth2 = auth[:int(len(auth)*ratio)]
auths_tab2.append(auth2)
return '8'.join(auths_tab2)
class PlXrn (Xrn):
@staticmethod
def site_hrn (auth, login_base):
return '.'.join([auth,login_base])
def __init__ (self, auth=None, hostname=None, slicename=None, email=None, interface=None, **kwargs):
#def hostname_to_hrn(auth_hrn, login_base, hostname):
if hostname is not None:
self.type='node'
# keep only the first part of the DNS name
#self.hrn='.'.join( [auth,hostname.split(".")[0] ] )
# escape the '.' in the hostname
self.hrn='.'.join( [auth,Xrn.escape(hostname)] )
self.hrn_to_urn()
#def slicename_to_hrn(auth_hrn, slicename):
elif slicename is not None:
self.type='slice'
# split at the first _
parts = slicename.split("_",1)
self.hrn = ".".join([auth] + parts )
self.hrn_to_urn()
#def email_to_hrn(auth_hrn, email):
elif email is not None:
self.type='person'
# keep only the part before '@' and replace special chars into _
self.hrn='.'.join([auth,email.split('@')[0].replace(".", "_").replace("+", "_")])
self.hrn_to_urn()
elif interface is not None:
self.type = 'interface'
self.hrn = auth + '.' + interface
self.hrn_to_urn()
else:
Xrn.__init__ (self,**kwargs)
#def hrn_to_pl_slicename(hrn):
def pl_slicename (self):
self._normalize()
leaf = self.leaf
sliver_id_parts = leaf.split(':')
name = sliver_id_parts[0]
name = re.sub('[^a-zA-Z0-9_]', '', name)
return self.pl_login_base() + '_' + name
#def hrn_to_pl_authname(hrn):
def pl_authname (self):
self._normalize()
return self.authority[-1]
def interface_name(self):
self._normalize()
return self.leaf
def pl_login_base (self):
self._normalize()
if self.type and self.type.startswith('authority'):
base = self.leaf
else:
base = self.authority[-1]
# Fix up names of GENI Federates
base = base.lower()
base = re.sub('[\\\\]*[^a-zA-Z0-9]', '', base)
return base
| {
"content_hash": "aee023f7c60cf82ff114061bbdd54da8",
"timestamp": "",
"source": "github",
"line_count": 103,
"max_line_length": 104,
"avg_line_length": 35.37864077669903,
"alnum_prop": 0.5826015367727772,
"repo_name": "onelab-eu/sfa",
"id": "1b0e47455ced61f983b1992647be2ac79aea3a03",
"size": "3682",
"binary": false,
"copies": "2",
"ref": "refs/heads/geni-v3",
"path": "sfa/planetlab/plxrn.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "424"
},
{
"name": "Makefile",
"bytes": "14208"
},
{
"name": "Python",
"bytes": "1439281"
},
{
"name": "Shell",
"bytes": "19422"
},
{
"name": "XSLT",
"bytes": "15293"
}
],
"symlink_target": ""
} |
from objects.resources.ConfigReader import ConfigReader
class GameScreenLocalization(ConfigReader):
def __init__(self, config, section_name):
ConfigReader.__init__(self, config, section_name)
@property
def fuel_label(self):
return self.get_config_property('play_screen_fuel')
@property
def shield_label(self):
return self.get_config_property('play_screen_shield')
| {
"content_hash": "200626f9ce97691903f8a3665c5bc837",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 61,
"avg_line_length": 29.428571428571427,
"alnum_prop": 0.6990291262135923,
"repo_name": "Superzer0/pyRiverRaid",
"id": "d0af21de9a5cc8c09eef47250ea77a3a59830e95",
"size": "412",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "objects/resources/i18n/game_screen_localization.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "104662"
}
],
"symlink_target": ""
} |
from protocols.forms import forms
from core.utils import VESSELS
class GrowForm(forms.VerbForm):
name = "Grow"
slug = "grow"
edit_to_what = forms.CharField(required = False, help_text = 'sample, mastermix, tube, etc')
duration = forms.IntegerField(help_text='this is the minimal time this should take', initial = 'sec')
vessel_type = forms.ChoiceField(required = False, choices = VESSELS)
remarks = forms.CharField(required = False)
describe_where = forms.CharField(required = False, help_text = 'bench, desktop, rotator, etc')
comment_why = forms.CharField(required = False)
| {
"content_hash": "76260adfa91114fe525291952a6e1373",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 105,
"avg_line_length": 43.785714285714285,
"alnum_prop": 0.7096247960848288,
"repo_name": "Bionetbook/bionetbook",
"id": "7431b4e4bc942227e1d29afcb6847599917a84fb",
"size": "613",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "bnbapp/bionetbook/protocols/forms/verbs/grow.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "ApacheConf",
"bytes": "1584"
},
{
"name": "CSS",
"bytes": "955489"
},
{
"name": "HTML",
"bytes": "1662331"
},
{
"name": "JavaScript",
"bytes": "8620958"
},
{
"name": "Makefile",
"bytes": "1215"
},
{
"name": "Python",
"bytes": "1238882"
}
],
"symlink_target": ""
} |
"""
Pythonifier for IntOpticDiag Table/View
"""
from jnpr.junos.factory import loadyaml
from os.path import splitext
_YAML_ = splitext(__file__)[0] + '.yml'
globals().update(loadyaml(_YAML_))
| {
"content_hash": "a9fa0da85a7de018c5b77021c17af652",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 39,
"avg_line_length": 27.428571428571427,
"alnum_prop": 0.71875,
"repo_name": "mith1979/ansible_automation",
"id": "a61c9bcc60308596a156a14d1802b97b5f5f3b5d",
"size": "192",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "applied_python/applied_python/lib/python2.7/site-packages/jnpr/junos/op/intopticdiag.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1005"
},
{
"name": "C",
"bytes": "84868"
},
{
"name": "CSS",
"bytes": "50289"
},
{
"name": "HTML",
"bytes": "70428"
},
{
"name": "JavaScript",
"bytes": "105262"
},
{
"name": "PowerShell",
"bytes": "51840"
},
{
"name": "Python",
"bytes": "19073705"
},
{
"name": "Shell",
"bytes": "3747"
},
{
"name": "XSLT",
"bytes": "152770"
}
],
"symlink_target": ""
} |
"""Application configuration."""
import os
class Config(object):
"""Base configuration."""
SECRET_KEY = os.environ.get('PP_CALCULATOR_SECRET', 'secret-key') # TODO: Change me
APP_DIR = os.path.abspath(os.path.dirname(__file__)) # This directory
PROJECT_ROOT = os.path.abspath(os.path.join(APP_DIR, os.pardir))
BCRYPT_LOG_ROUNDS = 13
ASSETS_DEBUG = False
DEBUG_TB_ENABLED = False # Disable Debug toolbar
DEBUG_TB_INTERCEPT_REDIRECTS = False
CACHE_TYPE = 'simple' # Can be "memcached", "redis", etc.
class ProdConfig(Config):
"""Production configuration."""
ENV = 'prod'
DEBUG = False
DEBUG_TB_ENABLED = False # Disable Debug toolbar
class DevConfig(Config):
"""Development configuration."""
ENV = 'dev'
DEBUG = True
# Put the db file in project root
DEBUG_TB_ENABLED = True
ASSETS_DEBUG = True # Don't bundle/minify static assets
CACHE_TYPE = 'simple' # Can be "memcached", "redis", etc.
class TestConfig(Config):
"""Test configuration."""
TESTING = True
DEBUG = True
BCRYPT_LOG_ROUNDS = 4 # For faster tests; needs at least 4 to avoid "ValueError: Invalid rounds"
WTF_CSRF_ENABLED = False # Allows form testing
| {
"content_hash": "36ba5ede0375deba6508464b4250b3ea",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 101,
"avg_line_length": 28.651162790697676,
"alnum_prop": 0.6566558441558441,
"repo_name": "oanise93/purchasing-power-calculator",
"id": "ee17b7100a33f0deee43f2495b8d90cda6cafdff",
"size": "1256",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pp_calculator/settings.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "596543"
},
{
"name": "HTML",
"bytes": "143240"
},
{
"name": "JavaScript",
"bytes": "941863"
},
{
"name": "Python",
"bytes": "11956"
}
],
"symlink_target": ""
} |
"""
Example showing use of unicode and UTF-8 encoding.
"""
from __future__ import absolute_import
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
import pygraphviz as pgv
# specify UTF-8 encoding (it is the default)
A=pgv.AGraph(encoding='UTF-8')
# nodes, attributes, etc can be strings or unicode
A.add_node(1,label='plain string')
A.add_node(2,label='unicode')
# you can enter unicode text as
hello='Здравствуйте!'
A.add_node(3,label=hello)
# or using unicode code points
hello='\u0417\u0434\u0440\u0430\u0432\u0441\u0442\u0432\u0443\u0439\u0442\u0435!'
A.add_node(hello) # unicode node label
goodbye="До свидания"
A.add_edge(1,hello,key=goodbye)
A.add_edge("שלום",hello)
#A.add_edge(1,3,hello="こんにちは / コンニチハ")
A.add_edge(1,"こんにちは")
print(A) # print to screen
A.write('utf8.dot') # write to simple.dot
| {
"content_hash": "bd64b82ac212cae89dc0908635559b96",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 81,
"avg_line_length": 25.705882352941178,
"alnum_prop": 0.7311212814645309,
"repo_name": "vagdevik/SE2017",
"id": "604bd80008dcaba92655769e982de9e6dd592355",
"size": "954",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "se2017/share/doc/pygraphviz-1.3.1/examples/utf8_encoding.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "469412"
},
{
"name": "HTML",
"bytes": "1033241"
},
{
"name": "JavaScript",
"bytes": "742343"
},
{
"name": "PowerShell",
"bytes": "471"
},
{
"name": "Python",
"bytes": "111976"
},
{
"name": "Ruby",
"bytes": "2373"
},
{
"name": "Shell",
"bytes": "3230"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('skaba', '0011_auto_20160831_1516'),
]
operations = [
migrations.CreateModel(
name='Guildpoints',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('points', models.IntegerField(default=1)),
('guild', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='skaba.Guild')),
],
),
]
| {
"content_hash": "b18a2f7982c9ee68119f3a7def7dd64a",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 131,
"avg_line_length": 30.636363636363637,
"alnum_prop": 0.6038575667655787,
"repo_name": "Jonneitapuro/isoskaba2",
"id": "efaf3e10b66773f330625e5f7f0b41f0f031148f",
"size": "746",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "skaba/migrations/0012_guildpoints.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "5011"
},
{
"name": "HTML",
"bytes": "24843"
},
{
"name": "JavaScript",
"bytes": "1510"
},
{
"name": "Python",
"bytes": "50453"
},
{
"name": "Shell",
"bytes": "525"
}
],
"symlink_target": ""
} |
import re
import os
import sys
import types
from copy import copy
from distutils.ccompiler import *
from distutils import ccompiler
from distutils.errors import DistutilsExecError, DistutilsModuleError, \
DistutilsPlatformError
from distutils.sysconfig import customize_compiler
from distutils.version import LooseVersion
from numpy.distutils import log
from numpy.distutils.exec_command import exec_command
from numpy.distutils.misc_util import cyg2win32, is_sequence, mingw32, \
quote_args, msvc_on_amd64
from numpy.distutils.compat import get_exception
# hack to set compiler optimizing options. Needs to integrated with something.
import distutils.sysconfig
_old_init_posix = distutils.sysconfig._init_posix
def _new_init_posix():
_old_init_posix()
distutils.sysconfig._config_vars['OPT'] = '-Wall -g -O0'
#distutils.sysconfig._init_posix = _new_init_posix
def replace_method(klass, method_name, func):
if sys.version_info[0] < 3:
m = types.MethodType(func, None, klass)
else:
# Py3k does not have unbound method anymore, MethodType does not work
m = lambda self, *args, **kw: func(self, *args, **kw)
setattr(klass, method_name, m)
# Using customized CCompiler.spawn.
def CCompiler_spawn(self, cmd, display=None):
"""
Execute a command in a sub-process.
Parameters
----------
cmd : str
The command to execute.
display : str or sequence of str, optional
The text to add to the log file kept by `numpy.distutils`.
If not given, `display` is equal to `cmd`.
Returns
-------
None
Raises
------
DistutilsExecError
If the command failed, i.e. the exit status was not 0.
"""
if display is None:
display = cmd
if is_sequence(display):
display = ' '.join(list(display))
log.info(display)
s,o = exec_command(cmd)
if s:
if is_sequence(cmd):
cmd = ' '.join(list(cmd))
print(o)
if re.search('Too many open files', o):
msg = '\nTry rerunning setup command until build succeeds.'
else:
msg = ''
raise DistutilsExecError('Command "%s" failed with exit status %d%s' % (cmd, s, msg))
replace_method(CCompiler, 'spawn', CCompiler_spawn)
def CCompiler_object_filenames(self, source_filenames, strip_dir=0, output_dir=''):
"""
Return the name of the object files for the given source files.
Parameters
----------
source_filenames : list of str
The list of paths to source files. Paths can be either relative or
absolute, this is handled transparently.
strip_dir : bool, optional
Whether to strip the directory from the returned paths. If True,
the file name prepended by `output_dir` is returned. Default is False.
output_dir : str, optional
If given, this path is prepended to the returned paths to the
object files.
Returns
-------
obj_names : list of str
The list of paths to the object files corresponding to the source
files in `source_filenames`.
"""
if output_dir is None:
output_dir = ''
obj_names = []
for src_name in source_filenames:
base, ext = os.path.splitext(os.path.normpath(src_name))
base = os.path.splitdrive(base)[1] # Chop off the drive
base = base[os.path.isabs(base):] # If abs, chop off leading /
if base.startswith('..'):
# Resolve starting relative path components, middle ones
# (if any) have been handled by os.path.normpath above.
i = base.rfind('..')+2
d = base[:i]
d = os.path.basename(os.path.abspath(d))
base = d + base[i:]
if ext not in self.src_extensions:
raise UnknownFileError("unknown file type '%s' (from '%s')" % (ext, src_name))
if strip_dir:
base = os.path.basename(base)
obj_name = os.path.join(output_dir,base + self.obj_extension)
obj_names.append(obj_name)
return obj_names
replace_method(CCompiler, 'object_filenames', CCompiler_object_filenames)
def CCompiler_compile(self, sources, output_dir=None, macros=None,
include_dirs=None, debug=0, extra_preargs=None,
extra_postargs=None, depends=None):
"""
Compile one or more source files.
Please refer to the Python distutils API reference for more details.
Parameters
----------
sources : list of str
A list of filenames
output_dir : str, optional
Path to the output directory.
macros : list of tuples
A list of macro definitions.
include_dirs : list of str, optional
The directories to add to the default include file search path for
this compilation only.
debug : bool, optional
Whether or not to output debug symbols in or alongside the object
file(s).
extra_preargs, extra_postargs : ?
Extra pre- and post-arguments.
depends : list of str, optional
A list of file names that all targets depend on.
Returns
-------
objects : list of str
A list of object file names, one per source file `sources`.
Raises
------
CompileError
If compilation fails.
"""
# This method is effective only with Python >=2.3 distutils.
# Any changes here should be applied also to fcompiler.compile
# method to support pre Python 2.3 distutils.
if not sources:
return []
# FIXME:RELATIVE_IMPORT
if sys.version_info[0] < 3:
from fcompiler import FCompiler
else:
from numpy.distutils.fcompiler import FCompiler
if isinstance(self, FCompiler):
display = []
for fc in ['f77','f90','fix']:
fcomp = getattr(self,'compiler_'+fc)
if fcomp is None:
continue
display.append("Fortran %s compiler: %s" % (fc, ' '.join(fcomp)))
display = '\n'.join(display)
else:
ccomp = self.compiler_so
display = "C compiler: %s\n" % (' '.join(ccomp),)
log.info(display)
macros, objects, extra_postargs, pp_opts, build = \
self._setup_compile(output_dir, macros, include_dirs, sources,
depends, extra_postargs)
cc_args = self._get_cc_args(pp_opts, debug, extra_preargs)
display = "compile options: '%s'" % (' '.join(cc_args))
if extra_postargs:
display += "\nextra options: '%s'" % (' '.join(extra_postargs))
log.info(display)
# build any sources in same order as they were originally specified
# especially important for fortran .f90 files using modules
if isinstance(self, FCompiler):
objects_to_build = build.keys()
for obj in objects:
if obj in objects_to_build:
src, ext = build[obj]
if self.compiler_type=='absoft':
obj = cyg2win32(obj)
src = cyg2win32(src)
self._compile(obj, src, ext, cc_args, extra_postargs, pp_opts)
else:
for obj, (src, ext) in build.items():
self._compile(obj, src, ext, cc_args, extra_postargs, pp_opts)
# Return *all* object filenames, not just the ones we just built.
return objects
replace_method(CCompiler, 'compile', CCompiler_compile)
def CCompiler_customize_cmd(self, cmd, ignore=()):
"""
Customize compiler using distutils command.
Parameters
----------
cmd : class instance
An instance inheriting from `distutils.cmd.Command`.
ignore : sequence of str, optional
List of `CCompiler` commands (without ``'set_'``) that should not be
altered. Strings that are checked for are:
``('include_dirs', 'define', 'undef', 'libraries', 'library_dirs',
'rpath', 'link_objects')``.
Returns
-------
None
"""
log.info('customize %s using %s' % (self.__class__.__name__,
cmd.__class__.__name__))
def allow(attr):
return getattr(cmd, attr, None) is not None and attr not in ignore
if allow('include_dirs'):
self.set_include_dirs(cmd.include_dirs)
if allow('define'):
for (name,value) in cmd.define:
self.define_macro(name, value)
if allow('undef'):
for macro in cmd.undef:
self.undefine_macro(macro)
if allow('libraries'):
self.set_libraries(self.libraries + cmd.libraries)
if allow('library_dirs'):
self.set_library_dirs(self.library_dirs + cmd.library_dirs)
if allow('rpath'):
self.set_runtime_library_dirs(cmd.rpath)
if allow('link_objects'):
self.set_link_objects(cmd.link_objects)
replace_method(CCompiler, 'customize_cmd', CCompiler_customize_cmd)
def _compiler_to_string(compiler):
props = []
mx = 0
keys = compiler.executables.keys()
for key in ['version','libraries','library_dirs',
'object_switch','compile_switch',
'include_dirs','define','undef','rpath','link_objects']:
if key not in keys:
keys.append(key)
for key in keys:
if hasattr(compiler,key):
v = getattr(compiler, key)
mx = max(mx,len(key))
props.append((key,repr(v)))
lines = []
format = '%-' + repr(mx+1) + 's = %s'
for prop in props:
lines.append(format % prop)
return '\n'.join(lines)
def CCompiler_show_customization(self):
"""
Print the compiler customizations to stdout.
Parameters
----------
None
Returns
-------
None
Notes
-----
Printing is only done if the distutils log threshold is < 2.
"""
if 0:
for attrname in ['include_dirs','define','undef',
'libraries','library_dirs',
'rpath','link_objects']:
attr = getattr(self,attrname,None)
if not attr:
continue
log.info("compiler '%s' is set to %s" % (attrname,attr))
try:
self.get_version()
except:
pass
if log._global_log.threshold<2:
print('*'*80)
print(self.__class__)
print(_compiler_to_string(self))
print('*'*80)
replace_method(CCompiler, 'show_customization', CCompiler_show_customization)
def CCompiler_customize(self, dist, need_cxx=0):
"""
Do any platform-specific customization of a compiler instance.
This method calls `distutils.sysconfig.customize_compiler` for
platform-specific customization, as well as optionally remove a flag
to suppress spurious warnings in case C++ code is being compiled.
Parameters
----------
dist : object
This parameter is not used for anything.
need_cxx : bool, optional
Whether or not C++ has to be compiled. If so (True), the
``"-Wstrict-prototypes"`` option is removed to prevent spurious
warnings. Default is False.
Returns
-------
None
Notes
-----
All the default options used by distutils can be extracted with::
from distutils import sysconfig
sysconfig.get_config_vars('CC', 'CXX', 'OPT', 'BASECFLAGS',
'CCSHARED', 'LDSHARED', 'SO')
"""
# See FCompiler.customize for suggested usage.
log.info('customize %s' % (self.__class__.__name__))
customize_compiler(self)
if need_cxx:
# In general, distutils uses -Wstrict-prototypes, but this option is
# not valid for C++ code, only for C. Remove it if it's there to
# avoid a spurious warning on every compilation. All the default
# options used by distutils can be extracted with:
# from distutils import sysconfig
# sysconfig.get_config_vars('CC', 'CXX', 'OPT', 'BASECFLAGS',
# 'CCSHARED', 'LDSHARED', 'SO')
try:
self.compiler_so.remove('-Wstrict-prototypes')
except (AttributeError, ValueError):
pass
if hasattr(self,'compiler') and 'cc' in self.compiler[0]:
if not self.compiler_cxx:
if self.compiler[0].startswith('gcc'):
a, b = 'gcc', 'g++'
else:
a, b = 'cc', 'c++'
self.compiler_cxx = [self.compiler[0].replace(a,b)]\
+ self.compiler[1:]
else:
if hasattr(self,'compiler'):
log.warn("#### %s #######" % (self.compiler,))
log.warn('Missing compiler_cxx fix for '+self.__class__.__name__)
return
replace_method(CCompiler, 'customize', CCompiler_customize)
def simple_version_match(pat=r'[-.\d]+', ignore='', start=''):
"""
Simple matching of version numbers, for use in CCompiler and FCompiler.
Parameters
----------
pat : str, optional
A regular expression matching version numbers.
Default is ``r'[-.\\d]+'``.
ignore : str, optional
A regular expression matching patterns to skip.
Default is ``''``, in which case nothing is skipped.
start : str, optional
A regular expression matching the start of where to start looking
for version numbers.
Default is ``''``, in which case searching is started at the
beginning of the version string given to `matcher`.
Returns
-------
matcher : callable
A function that is appropriate to use as the ``.version_match``
attribute of a `CCompiler` class. `matcher` takes a single parameter,
a version string.
"""
def matcher(self, version_string):
# version string may appear in the second line, so getting rid
# of new lines:
version_string = version_string.replace('\n',' ')
pos = 0
if start:
m = re.match(start, version_string)
if not m:
return None
pos = m.end()
while 1:
m = re.search(pat, version_string[pos:])
if not m:
return None
if ignore and re.match(ignore, m.group(0)):
pos = m.end()
continue
break
return m.group(0)
return matcher
def CCompiler_get_version(self, force=False, ok_status=[0]):
"""
Return compiler version, or None if compiler is not available.
Parameters
----------
force : bool, optional
If True, force a new determination of the version, even if the
compiler already has a version attribute. Default is False.
ok_status : list of int, optional
The list of status values returned by the version look-up process
for which a version string is returned. If the status value is not
in `ok_status`, None is returned. Default is ``[0]``.
Returns
-------
version : str or None
Version string, in the format of `distutils.version.LooseVersion`.
"""
if not force and hasattr(self,'version'):
return self.version
self.find_executables()
try:
version_cmd = self.version_cmd
except AttributeError:
return None
if not version_cmd or not version_cmd[0]:
return None
try:
matcher = self.version_match
except AttributeError:
try:
pat = self.version_pattern
except AttributeError:
return None
def matcher(version_string):
m = re.match(pat, version_string)
if not m:
return None
version = m.group('version')
return version
status, output = exec_command(version_cmd,use_tee=0)
version = None
if status in ok_status:
version = matcher(output)
if version:
version = LooseVersion(version)
self.version = version
return version
replace_method(CCompiler, 'get_version', CCompiler_get_version)
def CCompiler_cxx_compiler(self):
"""
Return the C++ compiler.
Parameters
----------
None
Returns
-------
cxx : class instance
The C++ compiler, as a `CCompiler` instance.
"""
if self.compiler_type=='msvc': return self
cxx = copy(self)
cxx.compiler_so = [cxx.compiler_cxx[0]] + cxx.compiler_so[1:]
if sys.platform.startswith('aix') and 'ld_so_aix' in cxx.linker_so[0]:
# AIX needs the ld_so_aix script included with Python
cxx.linker_so = [cxx.linker_so[0], cxx.compiler_cxx[0]] \
+ cxx.linker_so[2:]
else:
cxx.linker_so = [cxx.compiler_cxx[0]] + cxx.linker_so[1:]
return cxx
replace_method(CCompiler, 'cxx_compiler', CCompiler_cxx_compiler)
compiler_class['intel'] = ('intelccompiler','IntelCCompiler',
"Intel C Compiler for 32-bit applications")
compiler_class['intele'] = ('intelccompiler','IntelItaniumCCompiler',
"Intel C Itanium Compiler for Itanium-based applications")
ccompiler._default_compilers += (('linux.*','intel'),('linux.*','intele'))
if sys.platform == 'win32':
compiler_class['mingw32'] = ('mingw32ccompiler', 'Mingw32CCompiler',
"Mingw32 port of GNU C Compiler for Win32"\
"(for MSC built Python)")
if mingw32():
# On windows platforms, we want to default to mingw32 (gcc)
# because msvc can't build blitz stuff.
log.info('Setting mingw32 as default compiler for nt.')
ccompiler._default_compilers = (('nt', 'mingw32'),) \
+ ccompiler._default_compilers
_distutils_new_compiler = new_compiler
def new_compiler (plat=None,
compiler=None,
verbose=0,
dry_run=0,
force=0):
# Try first C compilers from numpy.distutils.
if plat is None:
plat = os.name
try:
if compiler is None:
compiler = get_default_compiler(plat)
(module_name, class_name, long_description) = compiler_class[compiler]
except KeyError:
msg = "don't know how to compile C/C++ code on platform '%s'" % plat
if compiler is not None:
msg = msg + " with '%s' compiler" % compiler
raise DistutilsPlatformError(msg)
module_name = "numpy.distutils." + module_name
try:
__import__ (module_name)
except ImportError:
msg = str(get_exception())
log.info('%s in numpy.distutils; trying from distutils',
str(msg))
module_name = module_name[6:]
try:
__import__(module_name)
except ImportError:
msg = str(get_exception())
raise DistutilsModuleError("can't compile C/C++ code: unable to load module '%s'" % \
module_name)
try:
module = sys.modules[module_name]
klass = vars(module)[class_name]
except KeyError:
raise DistutilsModuleError(("can't compile C/C++ code: unable to find class '%s' " +
"in module '%s'") % (class_name, module_name))
compiler = klass(None, dry_run, force)
log.debug('new_compiler returns %s' % (klass))
return compiler
ccompiler.new_compiler = new_compiler
_distutils_gen_lib_options = gen_lib_options
def gen_lib_options(compiler, library_dirs, runtime_library_dirs, libraries):
library_dirs = quote_args(library_dirs)
runtime_library_dirs = quote_args(runtime_library_dirs)
r = _distutils_gen_lib_options(compiler, library_dirs,
runtime_library_dirs, libraries)
lib_opts = []
for i in r:
if is_sequence(i):
lib_opts.extend(list(i))
else:
lib_opts.append(i)
return lib_opts
ccompiler.gen_lib_options = gen_lib_options
# Also fix up the various compiler modules, which do
# from distutils.ccompiler import gen_lib_options
# Don't bother with mwerks, as we don't support Classic Mac.
for _cc in ['msvc', 'bcpp', 'cygwinc', 'emxc', 'unixc']:
_m = sys.modules.get('distutils.'+_cc+'compiler')
if _m is not None:
setattr(_m, 'gen_lib_options', gen_lib_options)
_distutils_gen_preprocess_options = gen_preprocess_options
def gen_preprocess_options (macros, include_dirs):
include_dirs = quote_args(include_dirs)
return _distutils_gen_preprocess_options(macros, include_dirs)
ccompiler.gen_preprocess_options = gen_preprocess_options
##Fix distutils.util.split_quoted:
# NOTE: I removed this fix in revision 4481 (see ticket #619), but it appears
# that removing this fix causes f2py problems on Windows XP (see ticket #723).
# Specifically, on WinXP when gfortran is installed in a directory path, which
# contains spaces, then f2py is unable to find it.
import re
import string
_wordchars_re = re.compile(r'[^\\\'\"%s ]*' % string.whitespace)
_squote_re = re.compile(r"'(?:[^'\\]|\\.)*'")
_dquote_re = re.compile(r'"(?:[^"\\]|\\.)*"')
_has_white_re = re.compile(r'\s')
def split_quoted(s):
s = s.strip()
words = []
pos = 0
while s:
m = _wordchars_re.match(s, pos)
end = m.end()
if end == len(s):
words.append(s[:end])
break
if s[end] in string.whitespace: # unescaped, unquoted whitespace: now
words.append(s[:end]) # we definitely have a word delimiter
s = s[end:].lstrip()
pos = 0
elif s[end] == '\\': # preserve whatever is being escaped;
# will become part of the current word
s = s[:end] + s[end+1:]
pos = end+1
else:
if s[end] == "'": # slurp singly-quoted string
m = _squote_re.match(s, end)
elif s[end] == '"': # slurp doubly-quoted string
m = _dquote_re.match(s, end)
else:
raise RuntimeError("this can't happen (bad char '%c')" % s[end])
if m is None:
raise ValueError("bad string (mismatched %s quotes?)" % s[end])
(beg, end) = m.span()
if _has_white_re.search(s[beg+1:end-1]):
s = s[:beg] + s[beg+1:end-1] + s[end:]
pos = m.end() - 2
else:
# Keeping quotes when a quoted word does not contain
# white-space. XXX: send a patch to distutils
pos = m.end()
if pos >= len(s):
words.append(s)
break
return words
ccompiler.split_quoted = split_quoted
##Fix distutils.util.split_quoted:
# define DISTUTILS_USE_SDK when necessary to workaround distutils/msvccompiler.py bug
msvc_on_amd64()
| {
"content_hash": "a5b59f7b878a976a761595f759960e18",
"timestamp": "",
"source": "github",
"line_count": 658,
"max_line_length": 97,
"avg_line_length": 34.610942249240125,
"alnum_prop": 0.5891367348731009,
"repo_name": "Ademan/NumPy-GSoC",
"id": "0889d11a035a81a899a358abfb83d88b73219e65",
"size": "22774",
"binary": false,
"copies": "58",
"ref": "refs/heads/master",
"path": "numpy/distutils/ccompiler.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "5895913"
},
{
"name": "C++",
"bytes": "22396"
},
{
"name": "FORTRAN",
"bytes": "11637"
},
{
"name": "Python",
"bytes": "4391595"
},
{
"name": "Shell",
"bytes": "2483"
}
],
"symlink_target": ""
} |
import re
from setuptools import setup, Command
import subprocess
def read_module_contents():
with open('src/skynetd/__init__.py') as skynet_init:
return skynet_init.read()
module_file = read_module_contents()
metadata = dict(re.findall("__([a-z]+)__\s*=\s*'([^']+)'", module_file))
class BumpCommand(Command):
""" Bump the __version__ number and commit all changes. """
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
version = metadata['version'].split('.')
version[-1] = str(int(version[-1]) + 1) # Bump the final part
try:
print('old version: %s new version: %s' %
(metadata['version'], '.'.join(version)))
raw_input('Press enter to confirm, or ctrl-c to exit >')
except KeyboardInterrupt:
raise SystemExit("\nNot proceeding")
old = "__version__ = '%s'" % metadata['version']
new = "__version__ = '%s'" % '.'.join(version)
module_file = read_module_contents()
with open('src/skynetd/__init__.py', 'w') as fileh:
fileh.write(module_file.replace(old, new))
# Commit everything with a standard commit message
cmd = ['git', 'commit', '-a', '-s', '-m',
'version %s' % '.'.join(version)]
print(' '.join(cmd))
subprocess.check_call(cmd)
class ReleaseCommand(Command):
""" Tag and push a new release. """
user_options = [('sign', 's', 'GPG-sign the Git tag and release files')]
def initialize_options(self):
self.sign = False
def finalize_options(self):
pass
def run(self):
# Create Git tag
version = metadata['version']
tag_name = 'v%s' % version
cmd = ['git', 'tag', '-a', tag_name, '-m', 'version %s' % version]
if self.sign:
cmd.append('-s')
print(' '.join(cmd))
subprocess.check_call(cmd)
# Push Git tag to origin remote
cmd = ['git', 'push', 'origin', tag_name]
print(' '.join(cmd))
subprocess.check_call(cmd)
# Push package to pypi
# TODO: package is not yet on pypi.
# cmd = ['python', 'setup.py', 'sdist', 'upload']
# if self.sign:
# cmd.append('--sign')
# print(' '.join(cmd))
# subprocess.check_call(cmd)
# Push master to the remote
cmd = ['git', 'push', 'origin', 'master']
print(' '.join(cmd))
subprocess.check_call(cmd)
setup(
name='skynet',
version=metadata['version'],
description='Skyring Node Eventing Agent',
long_description="skynet is the node eventing agent for Skyring."
" Each storage node managed by Skyring will have this agent running on"
" them. It is a daemon which listens to dbus signals, filters it, "
"processes it and pushes the filtered signals to Skyring using"
" saltstack's eventing framework. Currently this daemon has capability"
" to send basic storage related, few node process related and"
" network related events.",
# The project's main homepage.
url='https://github.com/skyrings/skynet',
# Author details
author='Darshan N',
author_email='darshan.n.2024@gmail.com',
license='Apache License, Version 2.0',
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 3 - Alpha',
'Programming Language :: Python :: 2.7',
],
keywords='Skyring node Eventing Agent',
packages=["skynetd"],
package_dir={
'skynetd': 'src/skynetd'
},
entry_points={
'console_scripts': [
'skynetd=skynetd.skynetd:main',
],
},
cmdclass={'bump': BumpCommand, 'release': ReleaseCommand},
)
| {
"content_hash": "d3c59ea1911b7db46a35b200dd4ad376",
"timestamp": "",
"source": "github",
"line_count": 134,
"max_line_length": 76,
"avg_line_length": 29.134328358208954,
"alnum_prop": 0.5717213114754098,
"repo_name": "skyrings/skynet",
"id": "ed25561f7e309b595d4fb18752c2f9d7180c8ca2",
"size": "4484",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Groff",
"bytes": "735"
},
{
"name": "Makefile",
"bytes": "1256"
},
{
"name": "Python",
"bytes": "43994"
},
{
"name": "Shell",
"bytes": "4522"
}
],
"symlink_target": ""
} |
"""
KubeVirt API
This is KubeVirt API an add-on for Kubernetes.
OpenAPI spec version: 1.0.0
Contact: kubevirt-dev@googlegroups.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import kubevirt
from kubevirt.rest import ApiException
from kubevirt.models.v1beta1_data_volume_source_registry import V1beta1DataVolumeSourceRegistry
class TestV1beta1DataVolumeSourceRegistry(unittest.TestCase):
""" V1beta1DataVolumeSourceRegistry unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testV1beta1DataVolumeSourceRegistry(self):
"""
Test V1beta1DataVolumeSourceRegistry
"""
# FIXME: construct object with mandatory attributes with example values
#model = kubevirt.models.v1beta1_data_volume_source_registry.V1beta1DataVolumeSourceRegistry()
pass
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "487deb6b3909be645277916c4f82c721",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 102,
"avg_line_length": 24.19047619047619,
"alnum_prop": 0.7165354330708661,
"repo_name": "kubevirt/client-python",
"id": "040c7bc209c8d8ba946f7b46751b7d32cb5b1410",
"size": "1033",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/test_v1beta1_data_volume_source_registry.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "4224980"
},
{
"name": "Shell",
"bytes": "2209"
}
],
"symlink_target": ""
} |
import os
from robot import utils
from robot.errors import DataError, FrameworkError
from robot.output import LOGGER, loggerhelper
class _BaseSettings(object):
_cli_opts = {'Name' : ('name', None),
'Doc' : ('doc', None),
'Metadata' : ('metadata', []),
'TestNames' : ('test', []),
'SuiteNames' : ('suite', []),
'SetTag' : ('settag', []),
'Include' : ('include', []),
'Exclude' : ('exclude', []),
'Critical' : ('critical', None),
'NonCritical' : ('noncritical', None),
'OutputDir' : ('outputdir', utils.abspath('.')),
'Log' : ('log', 'log.html'),
'Report' : ('report', 'report.html'),
'XUnitFile' : ('xunitfile', 'NONE'),
'SplitLog' : ('splitlog', False),
'TimestampOutputs' : ('timestampoutputs', False),
'LogTitle' : ('logtitle', None),
'ReportTitle' : ('reporttitle', None),
'ReportBackground' : ('reportbackground',
('#99FF66', '#99FF66', '#FF3333')),
'SuiteStatLevel' : ('suitestatlevel', -1),
'TagStatInclude' : ('tagstatinclude', []),
'TagStatExclude' : ('tagstatexclude', []),
'TagStatCombine' : ('tagstatcombine', []),
'TagDoc' : ('tagdoc', []),
'TagStatLink' : ('tagstatlink', []),
'RemoveKeywords' : ('removekeywords', []),
'NoStatusRC' : ('nostatusrc', False),
'MonitorWidth' : ('monitorwidth', 78),
'MonitorColors' : ('monitorcolors', 'AUTO'),
'StdOut' : ('stdout', None),
'StdErr' : ('stderr', None)}
_output_opts = ['Output', 'Log', 'Report', 'DebugFile', 'XUnitFile']
def __init__(self, options=None, log=True):
self._opts = {}
self._cli_opts = self._cli_opts.copy()
self._cli_opts.update(self._extra_cli_opts)
self._process_cli_opts(options or {}, log)
if log: LOGGER.info('Settings:\n%s' % unicode(self))
def _process_cli_opts(self, opts, log):
for name, (cli_name, default) in self._cli_opts.items():
value = opts.get(cli_name, default)
if value in [None, []]:
value = default
elif default == [] and isinstance(value, basestring):
value = [value]
self[name] = self._process_value(name, value, log)
def __setitem__(self, name, value):
if name not in self._cli_opts:
raise KeyError("Non-existing settings '%s'" % name)
self._opts[name] = value
def _process_value(self, name, value, log):
if name == 'LogLevel':
return self._process_log_level(value)
if value == self._get_default_value(name):
return value
if name in ['Name', 'Doc', 'LogTitle', 'ReportTitle']:
if name == 'Doc': value = self._escape(value)
return value.replace('_', ' ')
if name in ['Metadata', 'TagDoc']:
if name == 'Metadata': value = [self._escape(v) for v in value]
return [self._process_metadata_or_tagdoc(v) for v in value]
if name in ['Include', 'Exclude']:
return [v.replace('AND', '&').replace('_', ' ') for v in value]
if name in self._output_opts and utils.eq(value, 'NONE'):
return 'NONE'
if name == 'OutputDir':
return utils.abspath(value)
if name in ['SuiteStatLevel', 'MonitorWidth']:
return self._convert_to_positive_integer_or_default(name, value)
if name in ['Listeners', 'VariableFiles']:
return [self._split_args_from_name_or_path(item) for item in value]
if name == 'ReportBackground':
return self._process_report_background(value)
if name == 'TagStatCombine':
return [self._process_tag_stat_combine(v) for v in value]
if name == 'TagStatLink':
return [v for v in [self._process_tag_stat_link(v) for v in value] if v]
if name == 'RemoveKeywords':
return [v.upper() for v in value]
return value
def _process_log_level(self, level):
level, visible_level = self._split_log_level(level.upper())
self._opts['VisibleLogLevel'] = visible_level
return level
def _split_log_level(self, level):
if ':' in level:
level, visible_level = level.split(':', 1)
else:
visible_level = level
self._validate_log_level_and_default(level, visible_level)
return level, visible_level
def _validate_log_level_and_default(self, log_level, default):
if log_level not in loggerhelper.LEVELS:
raise DataError("Invalid log level '%s'" % log_level)
if default not in loggerhelper.LEVELS:
raise DataError("Invalid log level '%s'" % default)
if not loggerhelper.IsLogged(log_level)(default):
raise DataError("Default visible log level '%s' is lower than "
"log level '%s'" % (default, log_level))
def __getitem__(self, name):
if name not in self._opts:
raise KeyError("Non-existing setting '%s'" % name)
if name in self._output_opts:
return self._get_output_file(name)
return self._opts[name]
def _get_output_file(self, type_):
"""Returns path of the requested output file and creates needed dirs.
`type_` can be 'Output', 'Log', 'Report', 'DebugFile' or 'XUnitFile'.
"""
name = self._opts[type_]
if self._outputfile_disabled(type_, name):
return 'NONE'
name = self._process_output_name(name, type_)
path = utils.abspath(os.path.join(self['OutputDir'], name))
self._create_output_dir(os.path.dirname(path), type_)
return path
def _process_output_name(self, name, type_):
base, ext = os.path.splitext(name)
if self['TimestampOutputs']:
base = '%s-%s' % (base, utils.get_start_timestamp('', '-', ''))
ext = self._get_output_extension(ext, type_)
return base + ext
def _get_output_extension(self, ext, type_):
if ext != '':
return ext
if type_ in ['Output', 'XUnitFile']:
return '.xml'
if type_ in ['Log', 'Report']:
return '.html'
if type_ == 'DebugFile':
return '.txt'
raise FrameworkError("Invalid output file type: %s" % type_)
def _create_output_dir(self, path, type_):
try:
if not os.path.exists(path):
os.makedirs(path)
except EnvironmentError, err:
raise DataError("Creating %s file directory '%s' failed: %s"
% (type_.lower(), path, err.strerror))
def _process_metadata_or_tagdoc(self, value):
value = value.replace('_', ' ')
if ':' in value:
return value.split(':', 1)
return value, ''
def _process_report_background(self, colors):
if colors.count(':') not in [1, 2]:
LOGGER.error("Invalid report background colors '%s'." % colors)
return self._get_default_value('ReportBackground')
colors = colors.split(':')
if len(colors) == 2:
return colors[0], colors[0], colors[1]
return tuple(colors)
def _process_tag_stat_combine(self, value):
for replwhat, replwith in [('AND', '&'), ('&', ' & '), ('NOT', ' NOT ')]:
value = value.replace(replwhat, replwith)
if ':' not in value:
return value, ''
pattern, title = value.rsplit(':', 1)
return pattern, title.replace('_', ' ')
def _process_tag_stat_link(self, value):
tokens = value.split(':')
if len(tokens) >= 3:
return tokens[0], ':'.join(tokens[1:-1]), tokens[-1]
LOGGER.error("Invalid format for option '--tagstatlink'. "
"Expected 'tag:link:title' but got '%s'." % value)
return None
def _convert_to_positive_integer_or_default(self, name, value):
value = self._convert_to_integer(name, value)
return value if value > 0 else self._get_default_value(name)
def _convert_to_integer(self, name, value):
try:
return int(value)
except ValueError:
LOGGER.error("Option '--%s' expected integer value but got '%s'. "
"Default value used instead." % (name.lower(), value))
return self._get_default_value(name)
def _get_default_value(self, name):
return self._cli_opts[name][1]
def _split_args_from_name_or_path(self, name):
if ':' not in name or os.path.exists(name):
args = []
else:
args = name.split(':')
name = args.pop(0)
# Handle absolute Windows paths with arguments
if len(name) == 1 and args[0].startswith(('/', '\\')):
name = name + ':' + args.pop(0)
if os.path.exists(name):
name = os.path.abspath(name)
return name, args
def __contains__(self, setting):
return setting in self._cli_opts
def __unicode__(self):
return '\n'.join('%s: %s' % (name, self._opts[name])
for name in sorted(self._opts))
@property
def output(self):
return self._get_file('Output')
@property
def log(self):
return self._get_file('Log')
@property
def report(self):
return self._get_file('Report')
@property
def xunit(self):
return self._get_file('XUnitFile')
def _get_file(self, name):
value = self[name]
return value if value != 'NONE' else None
@property
def split_log(self):
return self['SplitLog']
@property
def status_rc(self):
return not self['NoStatusRC']
class RobotSettings(_BaseSettings):
_extra_cli_opts = {'Output' : ('output', 'output.xml'),
'LogLevel' : ('loglevel', 'INFO'),
'RunMode' : ('runmode', []),
'RunEmptySuite' : ('runemptysuite', False),
'WarnOnSkipped' : ('warnonskippedfiles', False),
'Variables' : ('variable', []),
'VariableFiles' : ('variablefile', []),
'Listeners' : ('listener', []),
'DebugFile' : ('debugfile', 'NONE')}
def is_rebot_needed(self):
return not ('NONE' == self['Log'] == self['Report'] == self['XUnitFile'])
def get_rebot_datasource_and_settings(self):
datasource = self['Output']
settings = RebotSettings(log=False)
settings._opts.update(self._opts)
for name in ['Variables', 'VariableFiles', 'Listeners']:
del(settings._opts[name])
for name in ['Include', 'Exclude', 'TestNames', 'SuiteNames', 'Metadata']:
settings._opts[name] = []
for name in ['Name', 'Doc']:
settings._opts[name] = None
settings._opts['Output'] = 'NONE'
settings._opts['LogLevel'] = 'TRACE'
settings._opts['ProcessEmptySuite'] = self['RunEmptySuite']
return datasource, settings
def _outputfile_disabled(self, type_, name):
if name == 'NONE':
return True
return self._opts['Output'] == 'NONE' and type_ != 'DebugFile'
def _escape(self, value):
return utils.escape(value)
class RebotSettings(_BaseSettings):
_extra_cli_opts = {'Output' : ('output', 'NONE'),
'LogLevel' : ('loglevel', 'TRACE'),
'ProcessEmptySuite' : ('processemptysuite', False),
'StartTime' : ('starttime', None),
'EndTime' : ('endtime', None)}
def _outputfile_disabled(self, type_, name):
return name == 'NONE'
def _escape(self, value):
return value
@property
def suite_config(self):
return {
'name': self['Name'],
'doc': self['Doc'],
'metadata': dict(self['Metadata']),
'set_tags': self['SetTag'],
'include_tags': self['Include'],
'exclude_tags': self['Exclude'],
'include_suites': self['SuiteNames'],
'include_tests': self['TestNames'],
'process_empty_suite': self['ProcessEmptySuite'],
'remove_keywords': self['RemoveKeywords'],
'log_level': self['LogLevel'],
'critical': self['Critical'],
'noncritical': self['NonCritical'],
'starttime': self['StartTime'],
'endtime': self['EndTime']
}
@property
def statistics_config(self):
return {
'suite_stat_level': self['SuiteStatLevel'],
'tag_stat_include': self['TagStatInclude'],
'tag_stat_exclude': self['TagStatExclude'],
'tag_stat_combine': self['TagStatCombine'],
'tag_stat_link': self['TagStatLink'],
'tag_doc': self['TagDoc'],
}
@property
def log_config(self):
if not self.log:
return {}
return {
'title': self['LogTitle'],
'reportURL': self._url_from_path(self.log, self.report),
'splitLogBase': os.path.basename(os.path.splitext(self.log)[0]),
'defaultLevel': self['VisibleLogLevel']
}
@property
def report_config(self):
if not self.report:
return {}
return {
'title': self['ReportTitle'],
'logURL': self._url_from_path(self.report, self.log),
'background' : self._resolve_background_colors(),
}
def _url_from_path(self, source, destination):
if not destination:
return None
return utils.get_link_path(destination, os.path.dirname(source))
def _resolve_background_colors(self):
colors = self['ReportBackground']
return {'pass': colors[0], 'nonCriticalFail': colors[1], 'fail': colors[2]}
| {
"content_hash": "ad21056d9abb8cce15819a608ee4faea",
"timestamp": "",
"source": "github",
"line_count": 368,
"max_line_length": 84,
"avg_line_length": 39.57065217391305,
"alnum_prop": 0.5190907842329351,
"repo_name": "Senseg/robotframework",
"id": "3c79ffe393431bc4f73841e6e5c192c3456a34ce",
"size": "15168",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/robot/conf/settings.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "716"
},
{
"name": "Java",
"bytes": "48873"
},
{
"name": "JavaScript",
"bytes": "149654"
},
{
"name": "Python",
"bytes": "1637427"
},
{
"name": "Shell",
"bytes": "1323"
}
],
"symlink_target": ""
} |
import re
import os
from PyQt5.QtWidgets import QLabel, QDoubleSpinBox, QHBoxLayout, QVBoxLayout, QDialog, QTabWidget, QWidget, \
QCheckBox, QStatusBar, QHeaderView, QTableWidgetItem, QSpinBox, QLineEdit, QComboBox, QCompleter, QPlainTextEdit
from PyQt5.QtCore import QTimer, Qt
from logging import getLogger
import yaml
from .. import make_icon_button, get_icon, BasicTable, get_monospace_font, show_error, CommitableComboBoxWithHistory, \
request_confirmation
logger = getLogger(__name__)
class StateTable(BasicTable):
COLUMNS = [
BasicTable.Column('Parameter',
lambda e: e[0]),
BasicTable.Column('Value',
lambda e: e[1],
resize_mode=QHeaderView.Stretch),
]
def __init__(self, parent):
super(StateTable, self).__init__(parent, self.COLUMNS, font=get_monospace_font())
def update_state(self, key_value_list):
self.setUpdatesEnabled(False)
existing_keys = [self.item(x, 0).text() for x in range(self.rowCount())]
new_keys = [str(k) for k, _ in key_value_list]
if existing_keys == new_keys:
for row in range(self.rowCount()):
self.setItem(row, 1, QTableWidgetItem(str(key_value_list[row][1])))
else:
self.clear()
self.setRowCount(len(key_value_list))
for i, kv in enumerate(key_value_list):
self.set_row(i, kv)
self.setUpdatesEnabled(True)
class StateWidget(QWidget):
def __init__(self, parent, cli_iface):
super(StateWidget, self).__init__(parent)
self._cli_iface = cli_iface
self._table = StateTable(self)
self._reload_button = make_icon_button('refresh', 'Reload state information from the adapter', self,
on_clicked=self._do_reload, text='Reload')
self._auto_reload_checkbox = QCheckBox('Auto reload every [sec]:', self)
self._auto_reload_checkbox.stateChanged.connect(self._update_auto_reload)
self._auto_reload_spinbox = QDoubleSpinBox(self)
self._auto_reload_spinbox.setDecimals(1)
self._auto_reload_spinbox.setMinimum(0.5)
self._auto_reload_spinbox.setMaximum(10)
self._auto_reload_spinbox.setValue(1)
self._auto_reload_spinbox.setSingleStep(0.5)
self._auto_reload_spinbox.setToolTip('Auto reload interval, in seconds')
self._auto_reload_spinbox.valueChanged.connect(self._update_auto_reload)
self._auto_reload_timer = QTimer(self)
self._auto_reload_timer.setSingleShot(False)
self._auto_reload_timer.timeout.connect(self._do_reload)
layout = QVBoxLayout(self)
buttons_layout = QHBoxLayout(self)
buttons_layout.addWidget(self._reload_button, 1)
buttons_layout.addWidget(self._auto_reload_checkbox)
buttons_layout.addWidget(self._auto_reload_spinbox)
layout.addLayout(buttons_layout)
layout.addWidget(self._table, 1)
self.setLayout(layout)
# noinspection PyCallByClass,PyTypeChecker
QTimer.singleShot(100, self._do_reload)
def _update_auto_reload(self):
enabled = self._auto_reload_checkbox.isChecked()
if enabled:
interval = float(self._auto_reload_spinbox.value())
self._auto_reload_timer.start(int(interval * 1e3 + 0.5))
self.window().show_message('Auto reload interval %0.1f seconds', interval)
else:
self._auto_reload_timer.stop()
self.window().show_message('Auto reload stopped')
def _do_reload(self):
logger.debug('Reloading state...')
self.window().show_message('State requested...')
def proxy(kv):
if isinstance(kv, Exception):
self.window().show_message('State request failed: %r', kv)
elif kv is None:
self.window().show_message('State request timed out')
else:
self.window().show_message('State request succeeded')
self._table.update_state(kv)
self._cli_iface.request_state(proxy)
class ConfigParam:
def __init__(self, name, value, default, minimum, maximum):
self.name = name
self.value = value
self.default = default
self.minimum = minimum
self.maximum = maximum
def cast(what, to):
return to(what) if what is not None else None
# noinspection PyChainedComparisons
if isinstance(self.value, int) and 0 <= self.value <= 1 and self.minimum == 0 and self.maximum == 1:
self.type = bool
self.value = bool(self.value)
elif isinstance(self.value, int):
self.type = int
elif isinstance(self.value, float):
self.type = float
else:
raise ValueError('Invalid value type')
self.default = cast(self.default, self.type)
self.minimum = cast(self.minimum, self.type)
self.maximum = cast(self.maximum, self.type)
def __str__(self):
s = '%s = ' % self.name
s += ('%d' if self.type in (bool, int) else '%s') % self.value
if self.minimum is not None:
s += (' [%d, %d]' if self.type in (bool, int) else ' [%s, %s]') % (self.minimum, self.maximum)
if self.default is not None:
s += (' (%d)' if self.type in (bool, int) else ' (%s)') % self.default
return s
__repr__ = __str__
@staticmethod
def parse_cli_response_line(line):
# Examples:
# uart.baudrate = 115200 [2400, 3000000] (115200)
# uart.baudrate = 115200 [2400, 3000000]
# uart.baudrate = 115200
# uart.baudrate = 115200 (115200)
# Q: Why couldn't Chris try out the regular expressions he created until he left home?
# A: His mom wouldn't let him play with matches.
pattern = r'(?m)^\s*(\S+)\s*=\s*([^\s\[\(]+)\s*(?:\[(\S+),\s*(\S+)\])?\s*(?:\((\S+)\))?'
(name, value, minimum, maximum, default), = re.findall(pattern, line)
if not name or not value:
raise ValueError('Invalid parameter string %r: name or value could not be parsed' % line)
try:
value = eval(value)
minimum, maximum, default = [(eval(x) if x else None) for x in (minimum, maximum, default)]
except Exception as ex:
raise ValueError('Could not parse parameter string %r' % line) from ex
if (minimum is None) != (maximum is None):
raise ValueError('Invalid parameter string %r: minimum or maximum cannot be set separately' % line)
return ConfigParam(name=name, value=value, default=default, minimum=minimum, maximum=maximum)
class ConfigParamEditWindow(QDialog):
def __init__(self, parent, model, cli_iface, store_callback):
super(ConfigParamEditWindow, self).__init__(parent)
self.setAttribute(Qt.WA_DeleteOnClose)
self.setWindowTitle('Edit Parameter')
self.setModal(True)
self._model = model
self._cli_iface = cli_iface
self._store_callback = store_callback
name_label = QLabel(model.name, self)
name_label.setFont(get_monospace_font())
if model.type is bool:
self._value = QCheckBox(self)
self._value.setChecked(model.value)
elif model.type is int:
self._value = QSpinBox(self)
if model.minimum is not None:
self._value.setRange(model.minimum,
model.maximum)
else:
self._value.setRange(-0x80000000,
+0x7FFFFFFF)
self._value.setValue(model.value)
elif model.type is float:
self._value = QDoubleSpinBox(self)
if model.minimum is not None:
self._value.setRange(model.minimum,
model.maximum)
else:
self._value.setRange(-3.4028235e+38,
+3.4028235e+38)
self._value.setValue(model.value)
elif model.type is str:
self._value = QLineEdit(self)
self._value.setText(model.value)
else:
raise ValueError('Unsupported value type %r' % model.type)
self._ok_button = make_icon_button('check', 'Send changes to the device', self,
text='OK', on_clicked=self._do_ok)
self._cancel_button = make_icon_button('remove', 'Discard changes and close this window', self,
text='Cancel', on_clicked=self.close)
layout = QVBoxLayout(self)
value_layout = QHBoxLayout(self)
value_layout.addWidget(name_label)
value_layout.addWidget(self._value, 1)
controls_layout = QHBoxLayout(self)
controls_layout.addWidget(self._cancel_button)
controls_layout.addWidget(self._ok_button)
layout.addLayout(value_layout)
layout.addLayout(controls_layout)
self.setLayout(layout)
def _do_ok(self):
if self._model.type is bool:
value = self._value.isChecked()
elif self._model.type is int or self._model.type is float:
value = self._value.value()
else:
value = self._value.text()
self._store_callback(value)
self.close()
class ConfigWidget(QWidget):
COLUMNS = [
BasicTable.Column('Name',
lambda e: e.name),
BasicTable.Column('Value',
lambda e: e.value,
resize_mode=QHeaderView.Stretch),
BasicTable.Column('Default',
lambda e: e.default),
BasicTable.Column('Min',
lambda e: e.minimum if e.type is not bool else ''),
BasicTable.Column('Max',
lambda e: e.maximum if e.type is not bool else ''),
]
def __init__(self, parent, cli_iface):
super(ConfigWidget, self).__init__(parent)
self._cli_iface = cli_iface
self._table = BasicTable(self, self.COLUMNS, font=get_monospace_font())
self._table.cellDoubleClicked.connect(lambda row, col: self._do_edit_param(row))
self._parameters = []
self._have_unsaved_changes = False
self._fetch_button = make_icon_button('refresh',
'Fetch configuration from the adapter',
self, on_clicked=self._do_fetch, text='Fetch')
self._store_button = make_icon_button('database',
'Store the current configuration into non-volatile memory on the adapter',
self, on_clicked=self._do_store, text='Store')
self._erase_button = make_icon_button('eraser',
'Erase configuration from the non-volatile memory',
self, on_clicked=self._do_erase, text='Erase')
layout = QVBoxLayout(self)
buttons_layout = QHBoxLayout(self)
buttons_layout.addWidget(self._fetch_button)
buttons_layout.addWidget(self._store_button)
buttons_layout.addWidget(self._erase_button)
layout.addWidget(QLabel('Double click to change parameter value.', self))
layout.addLayout(buttons_layout)
layout.addWidget(self._table, 1)
self.setLayout(layout)
# noinspection PyCallByClass,PyTypeChecker
QTimer.singleShot(100, self._do_fetch)
@property
def have_unsaved_changes(self):
return self._have_unsaved_changes
def _do_edit_param(self, index):
def callback(value):
try:
self._cli_iface.set_config_param(self._parameters[index].name, value, self._show_callback_result)
# noinspection PyCallByClass,PyTypeChecker
QTimer.singleShot(10, self._do_fetch)
except Exception as ex:
show_error('Parameter Change Error', 'Could request parameter change.', ex, self)
else:
self._have_unsaved_changes = True
# noinspection PyCallByClass,PyTypeChecker
QTimer.singleShot(2000, lambda:
self.window().show_message('Click "Store" to make your configuration changes persistent'))
try:
win = ConfigParamEditWindow(self, self._parameters[index], self._cli_iface, callback)
win.show()
except Exception as ex:
show_error('Parameter Dialog Error', 'Could not open parameter edit dialog.', ex, self)
def _show_callback_result(self, result):
if isinstance(result, Exception):
self.window().show_message('Operation failed: %r', result)
elif not result:
self.window().show_message('Operation timed out')
else:
self.window().show_message('Success')
def _do_fetch(self):
def callback(params):
self._table.setUpdatesEnabled(False)
self._table.clear()
self._parameters = []
if params is None:
self.window().show_message('Configuration parameters request timed out')
elif isinstance(params, Exception):
self.window().show_message('Configuration parameters request failed: %r', params)
else:
self.window().show_message('Configuration parameters request succeeded')
self._parameters = params
self._table.setRowCount(len(params))
for row, par in enumerate(params):
self._table.set_row(row, par)
self._table.setUpdatesEnabled(True)
self._table.clear()
self._cli_iface.request_all_config_params(callback)
def _do_store(self):
self._cli_iface.store_all_config_params(self._show_callback_result)
self._have_unsaved_changes = False
def _do_erase(self):
self._cli_iface.erase_all_config_params(self._show_callback_result)
self._have_unsaved_changes = False
class CLIWidget(QWidget):
def __init__(self, parent, cli_iface):
super(CLIWidget, self).__init__(parent)
self._cli_iface = cli_iface
self._command_line = CommitableComboBoxWithHistory(self)
self._command_line.setToolTip('Enter the command here')
self._command_line.setSizeAdjustPolicy(QComboBox.AdjustToContents)
self._command_line.setFont(get_monospace_font())
self._command_line.on_commit = self._do_execute
self._command_line_completer = QCompleter()
self._command_line_completer.setCaseSensitivity(Qt.CaseSensitive)
self._command_line_completer.setModel(self._command_line.model())
self._command_line.setCompleter(self._command_line_completer)
self._execute_button = make_icon_button('flash', 'Execute command', self, on_clicked=self._do_execute)
self._response_box = QPlainTextEdit(self)
self._response_box.setToolTip('Command output will be printed here')
self._response_box.setReadOnly(True)
self._response_box.setLineWrapMode(QPlainTextEdit.NoWrap)
self._response_box.setFont(get_monospace_font())
self._response_box.setVerticalScrollBarPolicy(Qt.ScrollBarAsNeeded)
try:
self._log_viewer.setPlaceholderText('Command output will be printed here')
except AttributeError: # Old PyQt
pass
layout = QVBoxLayout(self)
controls_layout = QHBoxLayout(self)
controls_layout.addWidget(self._command_line, 1)
controls_layout.addWidget(self._execute_button)
layout.addLayout(controls_layout)
layout.addWidget(self._response_box, 1)
self.setLayout(layout)
def _do_execute(self):
self._response_box.clear()
command = self._command_line.currentText()
if not command.strip():
return
self._command_line.add_current_text_to_history()
def callback(lines):
self.setEnabled(True)
if lines is None:
self.window().show_message('Command response timed out')
self._response_box.setPlainText('<RESPONSE TIMED OUT>')
else:
self.window().show_message('Command response received')
self._response_box.setPlainText(lines)
self.setEnabled(False)
self._cli_iface.execute_raw_command(command, callback)
class ControlPanelWindow(QDialog):
def __init__(self, parent, cli_iface, iface_name):
super(ControlPanelWindow, self).__init__(parent)
self.setWindowTitle('SLCAN Adapter Control Panel')
self.setAttribute(Qt.WA_DeleteOnClose) # This is required to stop background timers!
self._cli_iface = cli_iface
self._iface_name = iface_name
self._state_widget = StateWidget(self, self._cli_iface)
self._config_widget = ConfigWidget(self, self._cli_iface)
self._cli_widget = CLIWidget(self, self._cli_iface)
self._tab_widget = QTabWidget(self)
self._tab_widget.addTab(self._state_widget, get_icon('dashboard'), 'Adapter State')
self._tab_widget.addTab(self._config_widget, get_icon('wrench'), 'Configuration')
self._tab_widget.addTab(self._cli_widget, get_icon('terminal'), 'Command Line')
self._status_bar = QStatusBar(self)
self._status_bar.setSizeGripEnabled(False)
iface_name_label = QLabel(iface_name.split('/')[-1], self)
iface_name_label.setFont(get_monospace_font())
layout = QVBoxLayout(self)
layout.addWidget(iface_name_label)
layout.addWidget(self._tab_widget)
layout.addWidget(self._status_bar)
left, top, right, bottom = layout.getContentsMargins()
bottom = 0
layout.setContentsMargins(left, top, right, bottom)
self.setLayout(layout)
self.resize(400, 400)
def closeEvent(self, close_event):
if self._config_widget.have_unsaved_changes:
if request_confirmation('Save changes?',
'You made changes to the adapter configuration that were not saved. '
'Do you want to go back and save them?',
parent=self):
close_event.ignore()
self._tab_widget.setCurrentWidget(self._config_widget)
return
super(ControlPanelWindow, self).closeEvent(close_event)
def show_message(self, text, *fmt, duration=0):
self._status_bar.showMessage(text % fmt, duration * 1000)
class CLIInterface:
def __init__(self, driver):
self._driver = driver
def check_is_interface_supported(self, callback):
def proxy(resp):
logger.info('CLIInterface.check_is_interface_supported() response: %r', resp)
callback(not resp.expired)
self._driver.execute_cli_command('stat', proxy)
def request_state(self, callback):
def proxy(resp):
if resp.expired:
callback(None)
else:
try:
values = [yaml.load(x) for x in resp.lines]
output = []
for kv in values:
for k, v in kv.items():
output.append((k, v))
callback(output)
except Exception as ex:
callback(ex)
self._driver.execute_cli_command('stat', proxy)
def request_all_config_params(self, callback):
def proxy(resp):
if resp.expired:
callback(None)
else:
try:
output = [ConfigParam.parse_cli_response_line(x) for x in resp.lines]
logger.info('Adapter config params: %r', output)
callback(output)
except Exception as ex:
callback(ex)
self._driver.execute_cli_command('cfg list', proxy)
@staticmethod
def _make_binary_proxy(callback):
def proxy(resp):
if resp.expired:
callback(None)
else:
if len(resp.lines) > 0:
callback(Exception('Unexpected response: %r' % resp.lines))
else:
callback(True)
return proxy
def store_all_config_params(self, callback):
self._driver.execute_cli_command('cfg save', self._make_binary_proxy(callback))
def erase_all_config_params(self, callback):
self._driver.execute_cli_command('cfg erase', self._make_binary_proxy(callback))
def set_config_param(self, name, value, callback):
if isinstance(value, (bool, int)):
value = '%d' % value
elif isinstance(value, float):
value = '%.9f' % value
elif isinstance(value, str):
pass
else:
raise ValueError('Unexpected value type: %r' % type(value))
line = 'cfg set %s %s' % (name, value)
self._driver.execute_cli_command(line, self._make_binary_proxy(callback))
def execute_raw_command(self, command, callback):
def proxy(resp):
if resp.expired:
callback(None)
else:
lines = os.linesep.join(resp.lines)
callback(lines)
self._driver.execute_cli_command(command, proxy)
@staticmethod
def is_backend_supported(driver):
return hasattr(driver, 'execute_cli_command')
| {
"content_hash": "25771ac1c9c67af33a1f998d81f58bd6",
"timestamp": "",
"source": "github",
"line_count": 568,
"max_line_length": 120,
"avg_line_length": 38.434859154929576,
"alnum_prop": 0.5848563968668408,
"repo_name": "UAVCAN/gui_tool",
"id": "b0de2250303d71391ca04002958b016551b102fd",
"size": "22019",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "uavcan_gui_tool/widgets/can_adapter_control_panel/slcan_cli.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "265419"
},
{
"name": "Shell",
"bytes": "60"
}
],
"symlink_target": ""
} |
"""
Copyright 2012 GroupDocs.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
class SetAnnotationAccessResult:
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually."""
def __init__(self):
self.swaggerTypes = {
'annotation': 'AnnotationInfo'
}
self.annotation = None # AnnotationInfo
| {
"content_hash": "936f0164240aaa394930db09643b4279",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 77,
"avg_line_length": 29.903225806451612,
"alnum_prop": 0.686084142394822,
"repo_name": "liosha2007/temporary-groupdocs-python3-sdk",
"id": "eadb833bd421b949829ac043ae88b5f7e067beb7",
"size": "949",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "groupdocs/models/SetAnnotationAccessResult.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "992590"
}
],
"symlink_target": ""
} |
import imp
import re
from glob import glob
test_types = {}
test_type_folders = glob("/FrameworkBenchmarks/toolset/test_types/*/")
# Loads all the test_types from the folders in this directory
for folder in test_type_folders:
# regex that grabs the characters between "toolset/test_types/"
# and the final "/" in the folder string to get the name
test_type_name = re.findall(r'.+\/(.+)\/$', folder, re.M)[0]
test_type = imp.load_source("TestType", "%s%s.py" % (folder, test_type_name))
test_types[test_type_name] = test_type.TestType
| {
"content_hash": "efa2069207704387a50bb5c75da7ba79",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 81,
"avg_line_length": 37.06666666666667,
"alnum_prop": 0.6888489208633094,
"repo_name": "martin-g/FrameworkBenchmarks",
"id": "2670a3d309d2909acaf0e004bc41e78fd864db32",
"size": "556",
"binary": false,
"copies": "15",
"ref": "refs/heads/master",
"path": "toolset/test_types/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ASP",
"bytes": "838"
},
{
"name": "ApacheConf",
"bytes": "11042"
},
{
"name": "Batchfile",
"bytes": "3478"
},
{
"name": "C",
"bytes": "155530"
},
{
"name": "C#",
"bytes": "242420"
},
{
"name": "C++",
"bytes": "69687"
},
{
"name": "CMake",
"bytes": "6387"
},
{
"name": "CSS",
"bytes": "186916"
},
{
"name": "Clojure",
"bytes": "60896"
},
{
"name": "Crystal",
"bytes": "5428"
},
{
"name": "D",
"bytes": "3939"
},
{
"name": "Dart",
"bytes": "36881"
},
{
"name": "Elixir",
"bytes": "11738"
},
{
"name": "Erlang",
"bytes": "38290"
},
{
"name": "Go",
"bytes": "70117"
},
{
"name": "Groovy",
"bytes": "20515"
},
{
"name": "HTML",
"bytes": "150582"
},
{
"name": "Haskell",
"bytes": "35546"
},
{
"name": "Java",
"bytes": "503616"
},
{
"name": "JavaScript",
"bytes": "423424"
},
{
"name": "Kotlin",
"bytes": "10209"
},
{
"name": "Lua",
"bytes": "14599"
},
{
"name": "Makefile",
"bytes": "3479"
},
{
"name": "MoonScript",
"bytes": "2398"
},
{
"name": "Nginx",
"bytes": "117549"
},
{
"name": "Nim",
"bytes": "253"
},
{
"name": "PHP",
"bytes": "776014"
},
{
"name": "Perl",
"bytes": "11179"
},
{
"name": "PowerShell",
"bytes": "36536"
},
{
"name": "Python",
"bytes": "284982"
},
{
"name": "QMake",
"bytes": "2301"
},
{
"name": "Ruby",
"bytes": "93235"
},
{
"name": "Rust",
"bytes": "16372"
},
{
"name": "Scala",
"bytes": "84538"
},
{
"name": "Shell",
"bytes": "221487"
},
{
"name": "Smarty",
"bytes": "1338"
},
{
"name": "Swift",
"bytes": "15615"
},
{
"name": "UrWeb",
"bytes": "65535"
},
{
"name": "Volt",
"bytes": "769"
}
],
"symlink_target": ""
} |
import argparse
import glob
import json
import logging
import os
import re
import time
import tweepy
from . import FMK, classify_user, user_url
log = logging.getLogger(__name__)
DEFAULT_BLOCK_TIMEOUT = 120
def add_user_args(parser):
g = parser.add_mutually_exclusive_group(required=True)
g.add_argument('--user-id', type=int)
g.add_argument('--screen-name', type=str)
def get_user_kwargs(args):
if args.user_id is not None:
return {'user_id': args.user_id}
elif args.screen_name is not None:
return {'screen_name': args.screen_name}
else:
raise ValueError(args)
def save_user(user, directory):
path = os.path.join(directory, 'user.{}.json'.format(user.id_str))
with open(path, 'w') as f:
json.dump(obj=user._json, fp=f)
def fetch_followers(api, args):
'''Fetches all followers' JSON and saves them to the given directory.
Files will have names of the form 'user.<numeric id>.json'.'''
os.makedirs(args.directory, exist_ok=True)
n = api.me().followers_count
g = tweepy.Cursor(api.followers, count=200).items()
for i, follower in enumerate(g, 1):
log.info('[%d/%d] %s', i, n, follower.screen_name)
save_user(follower, args.directory)
def fetch_mutuals(api, args):
'''Intersects a directory of user.*.json (as populated with fetch-followers) with users
following USER_ID/SCREEN_NAME.'''
mine = {
int(re.match(r'user.(\d+).json', os.path.basename(f)).group(1))
for f in glob.glob(os.path.join(args.directory, 'user.*.json'))
}
mutuals = set()
kwargs = get_user_kwargs(args)
kwargs['count'] = 5000
g = tweepy.Cursor(api.followers_ids, **kwargs).pages()
for i, page in enumerate(g, 1):
m = (mine & set(page))
log.info('Page %d: %d mutuals', i, len(m))
print('\n'.join(map(str, m)), flush=True)
mutuals |= m
time.sleep(60)
log.info('Done; %d mutuals total', len(mutuals))
def classify(api, args):
classes = {e: set() for e in FMK}
for dirpath, _, filenames in os.walk(args.directory):
for filename in filenames:
if not re.match(r'user.\d+.json', filename):
continue
with open(os.path.join(dirpath, filename), 'rb') as f:
j = json.load(f)
user = tweepy.models.User.parse(api, j)
c = classify_user(api, user, fetch_statuses=False)
classes[c].add(user)
for user in classes[FMK.BLOCK]:
args.block_file.write('{}\n'.format(user.id))
already_following = {u for u in classes[FMK.FOLLOW_BACK] if u.following}
classes[FMK.FOLLOW_BACK] -= already_following
results = {'already following': len(already_following)}
for e, us in classes.items():
results[e.name.lower().replace('_', ' ')] = len(us)
w = max(map(len, results))
v = max(len(str(n)) for n in results.values())
for label, n in results.items():
print('{:>{w}}: {:{v}} users'.format(label, n, w=w, v=v))
def report_spam(api, *args, **kwargs):
sleep_time = 15 * 60
for i in reversed(range(5)):
try:
return api.report_spam(*args, **kwargs)
except tweepy.TweepError as e:
if e.api_code == 205 and i > 0:
# “You are over the limit for spam reports. The account limit
# for reporting spam has been reached. Try again later.”
#
# Annoyingly, this is a different error code to the normal
# “rate-limited“ error code so tweepy's built-in rate limiting
# doesn't apply.
log.info("Over the spam-report limit; sleeping for %ds",
sleep_time, exc_info=True)
time.sleep(sleep_time)
sleep_time *= 1.5
else:
raise
def _block_many(api, to_block_ids, timeout, report):
n = len(to_block_ids)
for i, to_block_id in enumerate(to_block_ids, 1):
try:
if report:
log.info('[%d/%d] reporting #%d', i, n, to_block_id)
u = report_spam(api, user_id=to_block_id, perform_block=True)
log.info('reported and blocked %s (#%d)', user_url(u), to_block_id)
else:
log.info('[%d/%d] blocking #%d', i, n, to_block_id)
u = api.create_block(user_id=to_block_id,
include_entities=False,
skip_status=True)
log.info('blocked %s (#%d)', user_url(u), to_block_id)
api.destroy_friendship(user_id=to_block_id)
log.info('Unfollowed #%d', to_block_id)
except tweepy.TweepError as e:
if e.api_code in (
34, # reported by report_spam
50, # reported by create_block
):
log.info('#%d no longer exists', to_block_id)
else:
raise
if i < n:
time.sleep(timeout)
def block(api, args):
'''Unfollow, block, and optionally report as spam many user IDs.'''
to_block_ids = {int(line) for line in args.block_file if line.strip()}
log.info('would like to unfollow block %d ids', len(to_block_ids))
existing_block_ids = set(tweepy.Cursor(api.blocks_ids).items())
log.info('%d existing blocks', len(existing_block_ids))
to_block_ids.difference_update(existing_block_ids)
_block_many(api, to_block_ids, timeout=args.timeout, report=args.report)
def block_one(api, args):
'''Block and unfollow a user, and (optionally) our friends who follow them.'''
kwargs = get_user_kwargs(args)
if args.mutuals:
log.info('Fetching our friends')
my_friends = set(tweepy.Cursor(api.friends_ids).items())
log.info('Fetched %d friends', len(my_friends))
time.sleep(args.timeout)
mutuals = set()
log.info('Intersecting friends with users following %s', kwargs)
g = tweepy.Cursor(api.followers_ids, **kwargs).pages()
for i, page in enumerate(g, 1):
m = my_friends & set(page)
log.info('Page %d: %d mutuals', i, len(m))
mutuals |= m
time.sleep(args.timeout)
_block_many(api, mutuals, timeout=args.timeout, report=False)
u = api.create_block(include_entities=False,
skip_status=True,
**kwargs)
log.info('Blocked %s', user_url(u))
api.destroy_friendship(**kwargs)
log.info('Unfollowed %s', user_url(u))
def ℕ(value):
'''Vim really deals badly with this function name.'''
try:
i = int(value)
if i < 0:
raise ValueError
return i
except ValueError:
raise argparse.ArgumentTypeError('{!r} ∉ ℕ'.format(value))
def add_subcommands(subparsers, var):
# fetch-followers
fetch_p = subparsers.add_parser('fetch-followers', help='fetch some tweeps',
description=fetch_followers.__doc__)
fetch_p.set_defaults(func=fetch_followers)
default_fetch_directory = os.path.join(var, 'followers')
fetch_p.add_argument('directory', default=default_fetch_directory,
help='(default: {})'.format(default_fetch_directory))
# fetch-mutuals
fetch_m = subparsers.add_parser('fetch-mutuals', help='intersect some tweeps',
description=fetch_mutuals.__doc__)
fetch_m.set_defaults(func=fetch_mutuals)
fetch_m.add_argument('directory')
add_user_args(fetch_m)
# classify
classify_p = subparsers.add_parser('classify', help='group some tweeps')
classify_p.set_defaults(func=classify)
classify_p.add_argument('directory', default=default_fetch_directory,
help='(default: {})'.format(default_fetch_directory))
classify_p.add_argument('block_file', type=argparse.FileType('w'),
help='file to store one numeric user id per line, '
'as used by "block" command')
block_one_p = subparsers.add_parser('block-one', help='block one tweep',
description=block_one.__doc__)
block_one_p.set_defaults(func=block_one)
add_user_args(block_one_p)
block_one_p.add_argument('--mutuals', action='store_true',
help='Also block friends who follow them')
block_one_p.add_argument('--timeout', type=ℕ, default=DEFAULT_BLOCK_TIMEOUT,
help='delay in seconds between each API call')
# block
block_p = subparsers.add_parser('block', help='block some tweeps',
description=block.__doc__)
block_p.set_defaults(func=block)
block_p.add_argument('block_file', type=argparse.FileType('r'),
help='file with one numeric user id per line')
block_p.add_argument('--report', action='store_true',
help='with --block, also report for spam')
block_p.add_argument('--timeout', type=ℕ, default=DEFAULT_BLOCK_TIMEOUT,
help='delay in seconds between each API call')
__all__ = ['add_subcommands']
| {
"content_hash": "649dde3be94566ce23a1c11d766227e7",
"timestamp": "",
"source": "github",
"line_count": 260,
"max_line_length": 91,
"avg_line_length": 35.66538461538462,
"alnum_prop": 0.5781300549983824,
"repo_name": "wjt/fewerror",
"id": "7392fccff7f00f6ac204e3a2d76da03190fedc12",
"size": "9291",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fewerror/twitter/batch.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "162"
},
{
"name": "Python",
"bytes": "63927"
},
{
"name": "Shell",
"bytes": "403"
}
],
"symlink_target": ""
} |
from django.test import TestCase
from django.contrib.auth.models import User
from models import *
import random
import datetime
from decimal import *
alphabet = [chr(i) for i in range(97,123)]
def random_name():
return ''.join([random.choice(alphabet) for i in range (10)])
def random_date():
random_second = random.randrange(364 * 24 * 60 * 60)
return datetime.datetime.now() + datetime.timedelta(seconds=random_second)
class SimpleTest(TestCase):
def setUp(self):
for i in range(10):
User.objects.create_user(random_name(), random_name() + '@example.com', random_name())
for i in range(10):
Subject.objects.create(
name=random_name(),
email=random_name() + '@example.com',
phone='',
birthday=datetime.date.today(),
gender=random.choice(GENDER_CHOICES)[0],
field=random.choice(FIELD_CHOICES)[0])
for i in range(2):
experimenter = random.choice(User.objects.all())
Experiment.objects.create(
public_name=random_name(),
private_name=random_name(),
experimenter=experimenter)
for i in range(5):
Session.objects.create(
time=random_date(),
duration=Decimal(random.randint(1, 3) + random.random()).quantize(Decimal('0.1')),
required_subjects=random.randint(6, 12),
extra_subjects=random.randint(1, 5),
experiment=random.choice(Experiment.objects.all()))
def test_all(self):
self.assertEqual(len(Subject.objects.all()), 20)
| {
"content_hash": "586a5cc376bfccf38a40626a2e3fbf97",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 89,
"avg_line_length": 32.15909090909091,
"alnum_prop": 0.6975265017667844,
"repo_name": "RedwoodAdmin/RedwoodFramework",
"id": "894dece864be8f97d7ab4e4f2127a2dbe420b732",
"size": "1415",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "expecon/tests.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "7923"
},
{
"name": "Go",
"bytes": "109387"
},
{
"name": "HTML",
"bytes": "64184"
},
{
"name": "Java",
"bytes": "1306375"
},
{
"name": "JavaScript",
"bytes": "3751414"
},
{
"name": "Python",
"bytes": "32267"
},
{
"name": "Shell",
"bytes": "1756"
}
],
"symlink_target": ""
} |
import yaml
import sys
from .dom_case import DomCase
class DomCaseHandler(object):
def __init__(self, case_file, config):
self._api_cases = []
self._case_file = case_file
self._config = config
def load_cases(self):
# todo 代码重复,优化
try:
with open(self._case_file) as _case_file:
case_files = yaml.safe_load(_case_file)
except IOError:
print("[Error] case file not exists: %s" % self._case_file)
sys.exit(-1)
for case in case_files:
self._api_cases.append(DomCase(case))
return self._api_cases
| {
"content_hash": "2af8ecb2dbb66d5bfe2cbff433a25a32",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 71,
"avg_line_length": 27.434782608695652,
"alnum_prop": 0.5625990491283677,
"repo_name": "LoveOrange/paradise",
"id": "14389f7e791619b5000067354b567d7d7da3267a",
"size": "687",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/dom_case_handler.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "12994"
},
{
"name": "Shell",
"bytes": "374"
}
],
"symlink_target": ""
} |
import _plotly_utils.basevalidators
class TokenValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(self, plotly_name="token", parent_name="funnel.stream", **kwargs):
super(TokenValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
no_blank=kwargs.pop("no_blank", True),
role=kwargs.pop("role", "info"),
strict=kwargs.pop("strict", True),
**kwargs
)
| {
"content_hash": "43220bf97f1fc49f06b41dc1716e1720",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 83,
"avg_line_length": 38.5,
"alnum_prop": 0.5918367346938775,
"repo_name": "plotly/python-api",
"id": "bbe051cf089f517d6b91c75bd5699855d7dd5ec6",
"size": "539",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/validators/funnel/stream/_token.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "6870"
},
{
"name": "Makefile",
"bytes": "1708"
},
{
"name": "Python",
"bytes": "823245"
},
{
"name": "Shell",
"bytes": "3238"
}
],
"symlink_target": ""
} |
import mock
from st2common.constants.action import LIVEACTION_STATUS_SUCCEEDED
from st2common.services import action as action_service
from st2tests.fixturesloader import FixturesLoader
from tests import FunctionalTest
FIXTURES_PACK = 'aliases'
TEST_MODELS = {
'aliases': ['alias1.yaml', 'alias2.yaml'],
'actions': ['action1.yaml'],
'runners': ['runner1.yaml']
}
TEST_LOAD_MODELS = {
'aliases': ['alias3.yaml']
}
class DummyActionExecution(object):
def __init__(self, id_=None, status=LIVEACTION_STATUS_SUCCEEDED, result=''):
self.id = id_
self.status = status
self.result = result
class TestAliasExecution(FunctionalTest):
models = None
alias1 = None
alias2 = None
@classmethod
def setUpClass(cls):
super(TestAliasExecution, cls).setUpClass()
cls.models = FixturesLoader().save_fixtures_to_db(fixtures_pack=FIXTURES_PACK,
fixtures_dict=TEST_MODELS)
cls.alias1 = cls.models['aliases']['alias1.yaml']
cls.alias2 = cls.models['aliases']['alias2.yaml']
@mock.patch.object(action_service, 'request',
return_value=(None, DummyActionExecution(id_=1)))
def testBasicExecution(self, request):
command = 'Lorem ipsum value1 dolor sit "value2 value3" amet.'
post_resp = self._do_post(alias_execution=self.alias1, command=command)
self.assertEqual(post_resp.status_int, 200)
expected_parameters = {'param1': 'value1', 'param2': 'value2 value3'}
self.assertEquals(request.call_args[0][0].parameters, expected_parameters)
@mock.patch.object(action_service, 'request',
return_value=(None, DummyActionExecution(id_=1)))
def testExecutionWithArrayTypeSingleValue(self, request):
command = 'Lorem ipsum value1 dolor sit value2 amet.'
post_resp = self._do_post(alias_execution=self.alias2, command=command)
self.assertEqual(post_resp.status_int, 200)
expected_parameters = {'param1': 'value1', 'param3': ['value2']}
self.assertEquals(request.call_args[0][0].parameters, expected_parameters)
@mock.patch.object(action_service, 'request',
return_value=(None, DummyActionExecution(id_=1)))
def testExecutionWithArrayTypeMultiValue(self, request):
command = 'Lorem ipsum value1 dolor sit "value2, value3" amet.'
post_resp = self._do_post(alias_execution=self.alias2, command=command)
self.assertEqual(post_resp.status_int, 200)
expected_parameters = {'param1': 'value1', 'param3': ['value2', 'value3']}
self.assertEquals(request.call_args[0][0].parameters, expected_parameters)
def _do_post(self, alias_execution, command, expect_errors=False):
execution = {'name': alias_execution.name,
'format': alias_execution.formats[0],
'command': command,
'user': 'stanley',
'source_channel': 'test',
'notification_route': 'test'}
return self.app.post_json('/v1/aliasexecution', execution,
expect_errors=expect_errors)
| {
"content_hash": "bd81886c6792b040ca6e580853e10ef9",
"timestamp": "",
"source": "github",
"line_count": 77,
"max_line_length": 86,
"avg_line_length": 41.90909090909091,
"alnum_prop": 0.6349550666253486,
"repo_name": "alfasin/st2",
"id": "94fa8d44a64194c360c2984d66750f8ce48643d4",
"size": "4007",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "st2api/tests/unit/controllers/v1/test_alias_execution.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "198"
},
{
"name": "Makefile",
"bytes": "36110"
},
{
"name": "PowerShell",
"bytes": "299"
},
{
"name": "Python",
"bytes": "2907491"
},
{
"name": "Shell",
"bytes": "16363"
},
{
"name": "Slash",
"bytes": "677"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
import json
from django.test import SimpleTestCase
from django.utils import six, text
from django.utils.functional import lazystr
from django.utils.text import format_lazy
from django.utils.translation import override, ugettext_lazy
IS_WIDE_BUILD = (len('\U0001F4A9') == 1)
class TestUtilsText(SimpleTestCase):
def test_get_text_list(self):
self.assertEqual(text.get_text_list(['a', 'b', 'c', 'd']), 'a, b, c or d')
self.assertEqual(text.get_text_list(['a', 'b', 'c'], 'and'), 'a, b and c')
self.assertEqual(text.get_text_list(['a', 'b'], 'and'), 'a and b')
self.assertEqual(text.get_text_list(['a']), 'a')
self.assertEqual(text.get_text_list([]), '')
with override('ar'):
self.assertEqual(text.get_text_list(['a', 'b', 'c']), "a، b أو c")
def test_smart_split(self):
testdata = [
('This is "a person" test.',
['This', 'is', '"a person"', 'test.']),
('This is "a person\'s" test.',
['This', 'is', '"a person\'s"', 'test.']),
('This is "a person\\"s" test.',
['This', 'is', '"a person\\"s"', 'test.']),
('"a \'one',
['"a', "'one"]),
('all friends\' tests',
['all', 'friends\'', 'tests']),
('url search_page words="something else"',
['url', 'search_page', 'words="something else"']),
("url search_page words='something else'",
['url', 'search_page', "words='something else'"]),
('url search_page words "something else"',
['url', 'search_page', 'words', '"something else"']),
('url search_page words-"something else"',
['url', 'search_page', 'words-"something else"']),
('url search_page words=hello',
['url', 'search_page', 'words=hello']),
('url search_page words="something else',
['url', 'search_page', 'words="something', 'else']),
("cut:','|cut:' '",
["cut:','|cut:' '"]),
(lazystr("a b c d"), # Test for #20231
['a', 'b', 'c', 'd']),
]
for test, expected in testdata:
self.assertEqual(list(text.smart_split(test)), expected)
def test_truncate_chars(self):
truncator = text.Truncator(
'The quick brown fox jumped over the lazy dog.'
)
self.assertEqual('The quick brown fox jumped over the lazy dog.', truncator.chars(100)),
self.assertEqual('The quick brown fox ...', truncator.chars(23)),
self.assertEqual('The quick brown fo.....', truncator.chars(23, '.....')),
# Ensure that we normalize our unicode data first
nfc = text.Truncator('o\xfco\xfco\xfco\xfc')
nfd = text.Truncator('ou\u0308ou\u0308ou\u0308ou\u0308')
self.assertEqual('oüoüoüoü', nfc.chars(8))
self.assertEqual('oüoüoüoü', nfd.chars(8))
self.assertEqual('oü...', nfc.chars(5))
self.assertEqual('oü...', nfd.chars(5))
# Ensure the final length is calculated correctly when there are
# combining characters with no precomposed form, and that combining
# characters are not split up.
truncator = text.Truncator('-B\u030AB\u030A----8')
self.assertEqual('-B\u030A...', truncator.chars(5))
self.assertEqual('-B\u030AB\u030A-...', truncator.chars(7))
self.assertEqual('-B\u030AB\u030A----8', truncator.chars(8))
# Ensure the length of the end text is correctly calculated when it
# contains combining characters with no precomposed form.
truncator = text.Truncator('-----')
self.assertEqual('---B\u030A', truncator.chars(4, 'B\u030A'))
self.assertEqual('-----', truncator.chars(5, 'B\u030A'))
# Make a best effort to shorten to the desired length, but requesting
# a length shorter than the ellipsis shouldn't break
self.assertEqual('...', text.Truncator('asdf').chars(1))
# Ensure that lazy strings are handled correctly
self.assertEqual(text.Truncator(lazystr('The quick brown fox')).chars(12), 'The quick...')
def test_truncate_words(self):
truncator = text.Truncator('The quick brown fox jumped over the lazy dog.')
self.assertEqual('The quick brown fox jumped over the lazy dog.', truncator.words(10))
self.assertEqual('The quick brown fox...', truncator.words(4))
self.assertEqual('The quick brown fox[snip]', truncator.words(4, '[snip]'))
# Ensure that lazy strings are handled correctly
truncator = text.Truncator(lazystr('The quick brown fox jumped over the lazy dog.'))
self.assertEqual('The quick brown fox...', truncator.words(4))
def test_truncate_html_words(self):
truncator = text.Truncator(
'<p id="par"><strong><em>The quick brown fox jumped over the lazy dog.</em></strong></p>'
)
self.assertEqual(
'<p id="par"><strong><em>The quick brown fox jumped over the lazy dog.</em></strong></p>',
truncator.words(10, html=True)
)
self.assertEqual(
'<p id="par"><strong><em>The quick brown fox...</em></strong></p>',
truncator.words(4, html=True)
)
self.assertEqual(
'<p id="par"><strong><em>The quick brown fox....</em></strong></p>',
truncator.words(4, '....', html=True)
)
self.assertEqual(
'<p id="par"><strong><em>The quick brown fox</em></strong></p>',
truncator.words(4, '', html=True)
)
# Test with new line inside tag
truncator = text.Truncator(
'<p>The quick <a href="xyz.html"\n id="mylink">brown fox</a> jumped over the lazy dog.</p>'
)
self.assertEqual(
'<p>The quick <a href="xyz.html"\n id="mylink">brown...</a></p>',
truncator.words(3, '...', html=True)
)
# Test self-closing tags
truncator = text.Truncator('<br/>The <hr />quick brown fox jumped over the lazy dog.')
self.assertEqual('<br/>The <hr />quick brown...', truncator.words(3, '...', html=True))
truncator = text.Truncator('<br>The <hr/>quick <em>brown fox</em> jumped over the lazy dog.')
self.assertEqual('<br>The <hr/>quick <em>brown...</em>', truncator.words(3, '...', html=True))
# Test html entities
truncator = text.Truncator('<i>Buenos días! ¿Cómo está?</i>')
self.assertEqual('<i>Buenos días! ¿Cómo...</i>', truncator.words(3, '...', html=True))
truncator = text.Truncator('<p>I <3 python, what about you?</p>')
self.assertEqual('<p>I <3 python...</p>', truncator.words(3, '...', html=True))
def test_wrap(self):
digits = '1234 67 9'
self.assertEqual(text.wrap(digits, 100), '1234 67 9')
self.assertEqual(text.wrap(digits, 9), '1234 67 9')
self.assertEqual(text.wrap(digits, 8), '1234 67\n9')
self.assertEqual(text.wrap('short\na long line', 7), 'short\na long\nline')
self.assertEqual(text.wrap('do-not-break-long-words please? ok', 8), 'do-not-break-long-words\nplease?\nok')
long_word = 'l%sng' % ('o' * 20)
self.assertEqual(text.wrap(long_word, 20), long_word)
self.assertEqual(text.wrap('a %s word' % long_word, 10), 'a\n%s\nword' % long_word)
self.assertEqual(text.wrap(lazystr(digits), 100), '1234 67 9')
def test_normalize_newlines(self):
self.assertEqual(text.normalize_newlines("abc\ndef\rghi\r\n"), "abc\ndef\nghi\n")
self.assertEqual(text.normalize_newlines("\n\r\r\n\r"), "\n\n\n\n")
self.assertEqual(text.normalize_newlines("abcdefghi"), "abcdefghi")
self.assertEqual(text.normalize_newlines(""), "")
self.assertEqual(text.normalize_newlines(lazystr("abc\ndef\rghi\r\n")), "abc\ndef\nghi\n")
def test_normalize_newlines_bytes(self):
"""normalize_newlines should be able to handle bytes too"""
normalized = text.normalize_newlines(b"abc\ndef\rghi\r\n")
self.assertEqual(normalized, "abc\ndef\nghi\n")
self.assertIsInstance(normalized, six.text_type)
def test_phone2numeric(self):
numeric = text.phone2numeric('0800 flowers')
self.assertEqual(numeric, '0800 3569377')
lazy_numeric = lazystr(text.phone2numeric('0800 flowers'))
self.assertEqual(lazy_numeric, '0800 3569377')
def test_slugify(self):
items = (
# given - expected - unicode?
('Hello, World!', 'hello-world', False),
('spam & eggs', 'spam-eggs', False),
('spam & ıçüş', 'spam-ıçüş', True),
('foo ıç bar', 'foo-ıç-bar', True),
(' foo ıç bar', 'foo-ıç-bar', True),
('你好', '你好', True),
)
for value, output, is_unicode in items:
self.assertEqual(text.slugify(value, allow_unicode=is_unicode), output)
def test_unescape_entities(self):
items = [
('', ''),
('foo', 'foo'),
('&', '&'),
('&', '&'),
('&', '&'),
('foo & bar', 'foo & bar'),
('foo & bar', 'foo & bar'),
]
for value, output in items:
self.assertEqual(text.unescape_entities(value), output)
self.assertEqual(text.unescape_entities(lazystr(value)), output)
def test_unescape_string_literal(self):
items = [
('"abc"', 'abc'),
("'abc'", 'abc'),
('"a \"bc\""', 'a "bc"'),
("'\'ab\' c'", "'ab' c"),
]
for value, output in items:
self.assertEqual(text.unescape_string_literal(value), output)
self.assertEqual(text.unescape_string_literal(lazystr(value)), output)
def test_get_valid_filename(self):
filename = "^&'@{}[],$=!-#()%+~_123.txt"
self.assertEqual(text.get_valid_filename(filename), "-_123.txt")
self.assertEqual(text.get_valid_filename(lazystr(filename)), "-_123.txt")
def test_compress_sequence(self):
data = [{'key': i} for i in range(10)]
seq = list(json.JSONEncoder().iterencode(data))
seq = [s.encode('utf-8') for s in seq]
actual_length = len(b''.join(seq))
out = text.compress_sequence(seq)
compressed_length = len(b''.join(out))
self.assertTrue(compressed_length < actual_length)
def test_format_lazy(self):
self.assertEqual('django/test', format_lazy('{}/{}', 'django', lazystr('test')))
self.assertEqual('django/test', format_lazy('{0}/{1}', *('django', 'test')))
self.assertEqual('django/test', format_lazy('{a}/{b}', **{'a': 'django', 'b': 'test'}))
self.assertEqual('django/test', format_lazy('{a[0]}/{a[1]}', a=('django', 'test')))
t = {}
s = format_lazy('{0[a]}-{p[a]}', t, p=t)
t['a'] = lazystr('django')
self.assertEqual('django-django', s)
t['a'] = 'update'
self.assertEqual('update-update', s)
# The format string can be lazy. (string comes from contrib.admin)
s = format_lazy(
ugettext_lazy("Added {name} \"{object}\"."),
name='article', object='My first try',
)
with override('fr'):
self.assertEqual('article «\xa0My first try\xa0» ajouté.', s)
| {
"content_hash": "2355e605f1408edfd6627e15c3c9fb97",
"timestamp": "",
"source": "github",
"line_count": 248,
"max_line_length": 116,
"avg_line_length": 46.471774193548384,
"alnum_prop": 0.5642516268980478,
"repo_name": "Leila20/django",
"id": "1ce993bdb2b45d3b0f397e0f7795091e8557b517",
"size": "11589",
"binary": false,
"copies": "9",
"ref": "refs/heads/master",
"path": "tests/utils_tests/test_text.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "53138"
},
{
"name": "HTML",
"bytes": "172977"
},
{
"name": "JavaScript",
"bytes": "448151"
},
{
"name": "Makefile",
"bytes": "125"
},
{
"name": "Python",
"bytes": "12145773"
},
{
"name": "Shell",
"bytes": "809"
},
{
"name": "Smarty",
"bytes": "130"
}
],
"symlink_target": ""
} |
"""Unittest for the base checker."""
import re
import sys
import unittest
import astroid
from pylint.checkers import base
from pylint.testutils import CheckerTestCase, Message, set_config
class TestDocstring(CheckerTestCase):
CHECKER_CLASS = base.DocStringChecker
def test_missing_docstring_module(self):
module = astroid.parse("something")
message = Message('missing-docstring', node=module, args=('module',))
with self.assertAddsMessages(message):
self.checker.visit_module(module)
def test_missing_docstring_empty_module(self):
module = astroid.parse("")
with self.assertNoMessages():
self.checker.visit_module(module)
def test_empty_docstring_module(self):
module = astroid.parse("''''''")
message = Message('empty-docstring', node=module, args=('module',))
with self.assertAddsMessages(message):
self.checker.visit_module(module)
def test_empty_docstring_function(self):
func = astroid.extract_node("""
def func(tion):
pass""")
message = Message('missing-docstring', node=func, args=('function',))
with self.assertAddsMessages(message):
self.checker.visit_functiondef(func)
@set_config(docstring_min_length=2)
def test_short_function_no_docstring(self):
func = astroid.extract_node("""
def func(tion):
pass""")
with self.assertNoMessages():
self.checker.visit_functiondef(func)
@set_config(docstring_min_length=2)
def test_long_function_no_docstring(self):
func = astroid.extract_node("""
def func(tion):
pass
pass
""")
message = Message('missing-docstring', node=func, args=('function',))
with self.assertAddsMessages(message):
self.checker.visit_functiondef(func)
@set_config(docstring_min_length=2)
def test_long_function_nested_statements_no_docstring(self):
func = astroid.extract_node("""
def func(tion):
try:
pass
except:
pass
""")
message = Message('missing-docstring', node=func, args=('function',))
with self.assertAddsMessages(message):
self.checker.visit_functiondef(func)
@set_config(docstring_min_length=2)
def test_function_no_docstring_by_name(self):
func = astroid.extract_node("""
def __fun__(tion):
pass""")
with self.assertNoMessages():
self.checker.visit_functiondef(func)
def test_class_no_docstring(self):
klass = astroid.extract_node("""
class Klass(object):
pass""")
message = Message('missing-docstring', node=klass, args=('class',))
with self.assertAddsMessages(message):
self.checker.visit_classdef(klass)
class TestNameChecker(CheckerTestCase):
CHECKER_CLASS = base.NameChecker
CONFIG = {
'bad_names': set(),
}
@set_config(attr_rgx=re.compile('[A-Z]+'),
property_classes=('abc.abstractproperty', '.custom_prop'))
def test_property_names(self):
# If a method is annotated with @property, it's name should
# match the attr regex. Since by default the attribute regex is the same
# as the method regex, we override it here.
methods = astroid.extract_node("""
import abc
def custom_prop(f):
return property(f)
class FooClass(object):
@property
def FOO(self): #@
pass
@property
def bar(self): #@
pass
@abc.abstractproperty
def BAZ(self): #@
pass
@custom_prop
def QUX(self): #@
pass
""")
with self.assertNoMessages():
self.checker.visit_functiondef(methods[0])
self.checker.visit_functiondef(methods[2])
self.checker.visit_functiondef(methods[3])
with self.assertAddsMessages(Message('invalid-name', node=methods[1],
args=('Attribute', 'bar',
"'[A-Z]+' pattern"))):
self.checker.visit_functiondef(methods[1])
@set_config(attr_rgx=re.compile('[A-Z]+'))
def test_property_setters(self):
method = astroid.extract_node("""
class FooClass(object):
@property
def foo(self): pass
@foo.setter
def FOOSETTER(self): #@
pass
""")
with self.assertNoMessages():
self.checker.visit_functiondef(method)
def test_module_level_names(self):
assign = astroid.extract_node("""
import collections
Class = collections.namedtuple("a", ("b", "c")) #@
""")
with self.assertNoMessages():
self.checker.visit_assignname(assign.targets[0])
assign = astroid.extract_node("""
class ClassA(object):
pass
ClassB = ClassA
""")
with self.assertNoMessages():
self.checker.visit_assignname(assign.targets[0])
module = astroid.parse("""
def A():
return 1, 2, 3
CONSTA, CONSTB, CONSTC = A()
CONSTD = A()""")
with self.assertNoMessages():
self.checker.visit_assignname(module.body[1].targets[0].elts[0])
self.checker.visit_assignname(module.body[2].targets[0])
assign = astroid.extract_node("""
CONST = "12 34 ".rstrip().split()""")
with self.assertNoMessages():
self.checker.visit_assignname(assign.targets[0])
@unittest.skipIf(sys.version_info >= (3, 0), reason="Needs Python 2.x")
@set_config(const_rgx=re.compile(".+"))
@set_config(function_rgx=re.compile(".+"))
@set_config(class_rgx=re.compile(".+"))
def test_assign_to_new_keyword_py2(self):
ast = astroid.extract_node("""
True = 0 #@
False = 1 #@
def True(): #@
pass
class True: #@
pass
""")
with self.assertAddsMessages(
Message(msg_id='assign-to-new-keyword', node=ast[0].targets[0], args=('True', '3.0'))
):
self.checker.visit_assignname(ast[0].targets[0])
with self.assertAddsMessages(
Message(msg_id='assign-to-new-keyword', node=ast[1].targets[0], args=('False', '3.0'))
):
self.checker.visit_assignname(ast[1].targets[0])
with self.assertAddsMessages(
Message(msg_id='assign-to-new-keyword', node=ast[2], args=('True', '3.0'))
):
self.checker.visit_functiondef(ast[2])
with self.assertAddsMessages(
Message(msg_id='assign-to-new-keyword', node=ast[3], args=('True', '3.0'))
):
self.checker.visit_classdef(ast[3])
@unittest.skipIf(sys.version_info >= (3, 7), reason="Needs Python 3.6 or earlier")
@set_config(const_rgx=re.compile(".+"))
@set_config(function_rgx=re.compile(".+"))
@set_config(class_rgx=re.compile(".+"))
def test_assign_to_new_keyword_py3(self):
ast = astroid.extract_node("""
async = "foo" #@
await = "bar" #@
def async(): #@
pass
class async: #@
pass
""")
with self.assertAddsMessages(
Message(msg_id='assign-to-new-keyword', node=ast[0].targets[0], args=('async', '3.7'))
):
self.checker.visit_assignname(ast[0].targets[0])
with self.assertAddsMessages(
Message(msg_id='assign-to-new-keyword', node=ast[1].targets[0], args=('await', '3.7'))
):
self.checker.visit_assignname(ast[1].targets[0])
with self.assertAddsMessages(
Message(msg_id='assign-to-new-keyword', node=ast[2], args=('async', '3.7'))
):
self.checker.visit_functiondef(ast[2])
with self.assertAddsMessages(
Message(msg_id='assign-to-new-keyword', node=ast[3], args=('async', '3.7'))
):
self.checker.visit_classdef(ast[3])
class TestMultiNamingStyle(CheckerTestCase):
CHECKER_CLASS = base.NameChecker
MULTI_STYLE_RE = re.compile('(?:(?P<UP>[A-Z]+)|(?P<down>[a-z]+))$')
@set_config(class_rgx=MULTI_STYLE_RE)
def test_multi_name_detection_majority(self):
classes = astroid.extract_node("""
class classb(object): #@
pass
class CLASSA(object): #@
pass
class CLASSC(object): #@
pass
""")
message = Message('invalid-name',
node=classes[0],
args=('Class', 'classb',
"'(?:(?P<UP>[A-Z]+)|(?P<down>[a-z]+))$' pattern"))
with self.assertAddsMessages(message):
for cls in classes:
self.checker.visit_classdef(cls)
self.checker.leave_module(cls.root)
@set_config(class_rgx=MULTI_STYLE_RE)
def test_multi_name_detection_first_invalid(self):
classes = astroid.extract_node("""
class class_a(object): #@
pass
class classb(object): #@
pass
class CLASSC(object): #@
pass
""")
messages = [
Message('invalid-name', node=classes[0],
args=('Class', 'class_a', "'(?:(?P<UP>[A-Z]+)|(?P<down>[a-z]+))$' pattern")),
Message('invalid-name', node=classes[2],
args=('Class', 'CLASSC', "'(?:(?P<UP>[A-Z]+)|(?P<down>[a-z]+))$' pattern"))
]
with self.assertAddsMessages(*messages):
for cls in classes:
self.checker.visit_classdef(cls)
self.checker.leave_module(cls.root)
@set_config(method_rgx=MULTI_STYLE_RE,
function_rgx=MULTI_STYLE_RE,
name_group=('function:method',))
def test_multi_name_detection_group(self):
function_defs = astroid.extract_node("""
class First(object):
def func(self): #@
pass
def FUNC(): #@
pass
""", module_name='test')
message = Message('invalid-name', node=function_defs[1],
args=('Function', 'FUNC',
"'(?:(?P<UP>[A-Z]+)|(?P<down>[a-z]+))$' pattern"))
with self.assertAddsMessages(message):
for func in function_defs:
self.checker.visit_functiondef(func)
self.checker.leave_module(func.root)
@set_config(function_rgx=re.compile('(?:(?P<ignore>FOO)|(?P<UP>[A-Z]+)|(?P<down>[a-z]+))$'))
def test_multi_name_detection_exempt(self):
function_defs = astroid.extract_node("""
def FOO(): #@
pass
def lower(): #@
pass
def FOO(): #@
pass
def UPPER(): #@
pass
""")
message = Message('invalid-name', node=function_defs[3],
args=('Function', 'UPPER',
"'(?:(?P<ignore>FOO)|(?P<UP>[A-Z]+)|(?P<down>[a-z]+))$' pattern"))
with self.assertAddsMessages(message):
for func in function_defs:
self.checker.visit_functiondef(func)
self.checker.leave_module(func.root)
class TestComparison(CheckerTestCase):
CHECKER_CLASS = base.ComparisonChecker
def test_comparison(self):
node = astroid.extract_node("foo == True")
message = Message('singleton-comparison',
node=node,
args=(True, "just 'expr' or 'expr is True'"))
with self.assertAddsMessages(message):
self.checker.visit_compare(node)
node = astroid.extract_node("foo == False")
message = Message('singleton-comparison',
node=node,
args=(False, "'not expr' or 'expr is False'"))
with self.assertAddsMessages(message):
self.checker.visit_compare(node)
node = astroid.extract_node("foo == None")
message = Message('singleton-comparison',
node=node,
args=(None, "'expr is None'"))
with self.assertAddsMessages(message):
self.checker.visit_compare(node)
node = astroid.extract_node("True == foo")
messages = (Message('misplaced-comparison-constant',
node=node,
args=('foo == True',)),
Message('singleton-comparison',
node=node,
args=(True, "just 'expr' or 'expr is True'")))
with self.assertAddsMessages(*messages):
self.checker.visit_compare(node)
node = astroid.extract_node("False == foo")
messages = (Message('misplaced-comparison-constant',
node=node,
args=('foo == False',)),
Message('singleton-comparison',
node=node,
args=(False, "'not expr' or 'expr is False'")))
with self.assertAddsMessages(*messages):
self.checker.visit_compare(node)
node = astroid.extract_node("None == foo")
messages = (Message('misplaced-comparison-constant',
node=node,
args=('foo == None',)),
Message('singleton-comparison',
node=node,
args=(None, "'expr is None'")))
with self.assertAddsMessages(*messages):
self.checker.visit_compare(node)
class TestNamePresets(unittest.TestCase):
SNAKE_CASE_NAMES = {'test_snake_case', 'test_snake_case11', 'test_https_200'}
CAMEL_CASE_NAMES = {'testCamelCase', 'testCamelCase11', 'testHTTP200'}
UPPER_CASE_NAMES = {'TEST_UPPER_CASE', 'TEST_UPPER_CASE11', 'TEST_HTTP_200'}
PASCAL_CASE_NAMES = {'TestPascalCase', 'TestPascalCase11', 'TestHTTP200'}
ALL_NAMES = SNAKE_CASE_NAMES | CAMEL_CASE_NAMES | UPPER_CASE_NAMES | PASCAL_CASE_NAMES
def _test_name_is_correct_for_all_name_types(self, naming_style, name):
for name_type in base.KNOWN_NAME_TYPES:
self._test_is_correct(naming_style, name, name_type)
def _test_name_is_incorrect_for_all_name_types(self, naming_style, name):
for name_type in base.KNOWN_NAME_TYPES:
self._test_is_incorrect(naming_style, name, name_type)
def _test_should_always_pass(self, naming_style):
always_pass_data = [
('__add__', 'method'),
('__set_name__', 'method'),
('__version__', 'const'),
('__author__', 'const')
]
for name, name_type in always_pass_data:
self._test_is_correct(naming_style, name, name_type)
def _test_is_correct(self, naming_style, name, name_type):
rgx = naming_style.get_regex(name_type)
self.assertTrue(rgx.match(name),
"{!r} does not match pattern {!r} (style: {}, type: {})".
format(name, rgx, naming_style, name_type))
def _test_is_incorrect(self, naming_style, name, name_type):
rgx = naming_style.get_regex(name_type)
self.assertFalse(rgx.match(name),
"{!r} match pattern {!r} but shouldn't (style: {}, type: {})".
format(name, rgx, naming_style, name_type))
def test_snake_case(self):
naming_style = base.SnakeCaseStyle
for name in self.SNAKE_CASE_NAMES:
self._test_name_is_correct_for_all_name_types(naming_style, name)
for name in self.ALL_NAMES - self.SNAKE_CASE_NAMES:
self._test_name_is_incorrect_for_all_name_types(naming_style, name)
self._test_should_always_pass(naming_style)
def test_camel_case(self):
naming_style = base.CamelCaseStyle
for name in self.CAMEL_CASE_NAMES:
self._test_name_is_correct_for_all_name_types(naming_style, name)
for name in self.ALL_NAMES - self.CAMEL_CASE_NAMES:
self._test_name_is_incorrect_for_all_name_types(naming_style, name)
self._test_should_always_pass(naming_style)
def test_upper_case(self):
naming_style = base.UpperCaseStyle
for name in self.UPPER_CASE_NAMES:
self._test_name_is_correct_for_all_name_types(naming_style, name)
for name in self.ALL_NAMES - self.UPPER_CASE_NAMES:
self._test_name_is_incorrect_for_all_name_types(naming_style, name)
self._test_should_always_pass(naming_style)
def test_pascal_case(self):
naming_style = base.PascalCaseStyle
for name in self.PASCAL_CASE_NAMES:
self._test_name_is_correct_for_all_name_types(naming_style, name)
for name in self.ALL_NAMES - self.PASCAL_CASE_NAMES:
self._test_name_is_incorrect_for_all_name_types(naming_style, name)
self._test_should_always_pass(naming_style)
| {
"content_hash": "5733d1fb3de52f252e8bfe7c4baecf65",
"timestamp": "",
"source": "github",
"line_count": 454,
"max_line_length": 98,
"avg_line_length": 37.8215859030837,
"alnum_prop": 0.5531419253392348,
"repo_name": "lucidmotifs/auto-aoc",
"id": "cd3d5cf7af7ff4e9e821ed24c3974bd588bf6b8f",
"size": "17941",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": ".venv/lib/python3.5/site-packages/pylint/test/unittest_checker_base.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "74"
},
{
"name": "C",
"bytes": "41695"
},
{
"name": "C++",
"bytes": "35306"
},
{
"name": "CSS",
"bytes": "96"
},
{
"name": "HTML",
"bytes": "48431"
},
{
"name": "JavaScript",
"bytes": "2043"
},
{
"name": "Python",
"bytes": "4850280"
},
{
"name": "Shell",
"bytes": "3778"
},
{
"name": "Visual Basic",
"bytes": "820"
},
{
"name": "XSLT",
"bytes": "2058"
}
],
"symlink_target": ""
} |
"""distutils.ccompiler
Contains CCompiler, an abstract base class that defines the interface
for the Distutils compiler abstraction model."""
# This module should be kept compatible with Python 2.1.
__revision__ = "$Id: ccompiler.py,v 1.61 2004/11/10 22:23:13 loewis Exp $"
import sys, os, re
from types import *
from copy import copy
from distutils.errors import *
from distutils.spawn import spawn
from distutils.file_util import move_file
from distutils.dir_util import mkpath
from distutils.dep_util import newer_pairwise, newer_group
from distutils.sysconfig import python_build
from distutils.util import split_quoted, execute
from distutils import log
class CCompiler:
"""Abstract base class to define the interface that must be implemented
by real compiler classes. Also has some utility methods used by
several compiler classes.
The basic idea behind a compiler abstraction class is that each
instance can be used for all the compile/link steps in building a
single project. Thus, attributes common to all of those compile and
link steps -- include directories, macros to define, libraries to link
against, etc. -- are attributes of the compiler instance. To allow for
variability in how individual files are treated, most of those
attributes may be varied on a per-compilation or per-link basis.
"""
# 'compiler_type' is a class attribute that identifies this class. It
# keeps code that wants to know what kind of compiler it's dealing with
# from having to import all possible compiler classes just to do an
# 'isinstance'. In concrete CCompiler subclasses, 'compiler_type'
# should really, really be one of the keys of the 'compiler_class'
# dictionary (see below -- used by the 'new_compiler()' factory
# function) -- authors of new compiler interface classes are
# responsible for updating 'compiler_class'!
compiler_type = None
# XXX things not handled by this compiler abstraction model:
# * client can't provide additional options for a compiler,
# e.g. warning, optimization, debugging flags. Perhaps this
# should be the domain of concrete compiler abstraction classes
# (UnixCCompiler, MSVCCompiler, etc.) -- or perhaps the base
# class should have methods for the common ones.
# * can't completely override the include or library searchg
# path, ie. no "cc -I -Idir1 -Idir2" or "cc -L -Ldir1 -Ldir2".
# I'm not sure how widely supported this is even by Unix
# compilers, much less on other platforms. And I'm even less
# sure how useful it is; maybe for cross-compiling, but
# support for that is a ways off. (And anyways, cross
# compilers probably have a dedicated binary with the
# right paths compiled in. I hope.)
# * can't do really freaky things with the library list/library
# dirs, e.g. "-Ldir1 -lfoo -Ldir2 -lfoo" to link against
# different versions of libfoo.a in different locations. I
# think this is useless without the ability to null out the
# library search path anyways.
# Subclasses that rely on the standard filename generation methods
# implemented below should override these; see the comment near
# those methods ('object_filenames()' et. al.) for details:
src_extensions = None # list of strings
obj_extension = None # string
static_lib_extension = None
shared_lib_extension = None # string
static_lib_format = None # format string
shared_lib_format = None # prob. same as static_lib_format
exe_extension = None # string
# Default language settings. language_map is used to detect a source
# file or Extension target language, checking source filenames.
# language_order is used to detect the language precedence, when deciding
# what language to use when mixing source types. For example, if some
# extension has two files with ".c" extension, and one with ".cpp", it
# is still linked as c++.
language_map = {".c" : "c",
".cc" : "c++",
".cpp" : "c++",
".cxx" : "c++",
".m" : "objc",
}
language_order = ["c++", "objc", "c"]
def __init__ (self,
verbose=0,
dry_run=0,
force=0):
self.dry_run = dry_run
self.force = force
self.verbose = verbose
# 'output_dir': a common output directory for object, library,
# shared object, and shared library files
self.output_dir = None
# 'macros': a list of macro definitions (or undefinitions). A
# macro definition is a 2-tuple (name, value), where the value is
# either a string or None (no explicit value). A macro
# undefinition is a 1-tuple (name,).
self.macros = []
# 'include_dirs': a list of directories to search for include files
self.include_dirs = []
# 'libraries': a list of libraries to include in any link
# (library names, not filenames: eg. "foo" not "libfoo.a")
self.libraries = []
# 'library_dirs': a list of directories to search for libraries
self.library_dirs = []
# 'runtime_library_dirs': a list of directories to search for
# shared libraries/objects at runtime
self.runtime_library_dirs = []
# 'objects': a list of object files (or similar, such as explicitly
# named library files) to include on any link
self.objects = []
for key in self.executables.keys():
self.set_executable(key, self.executables[key])
# __init__ ()
def set_executables (self, **args):
"""Define the executables (and options for them) that will be run
to perform the various stages of compilation. The exact set of
executables that may be specified here depends on the compiler
class (via the 'executables' class attribute), but most will have:
compiler the C/C++ compiler
linker_so linker used to create shared objects and libraries
linker_exe linker used to create binary executables
archiver static library creator
On platforms with a command-line (Unix, DOS/Windows), each of these
is a string that will be split into executable name and (optional)
list of arguments. (Splitting the string is done similarly to how
Unix shells operate: words are delimited by spaces, but quotes and
backslashes can override this. See
'distutils.util.split_quoted()'.)
"""
# Note that some CCompiler implementation classes will define class
# attributes 'cpp', 'cc', etc. with hard-coded executable names;
# this is appropriate when a compiler class is for exactly one
# compiler/OS combination (eg. MSVCCompiler). Other compiler
# classes (UnixCCompiler, in particular) are driven by information
# discovered at run-time, since there are many different ways to do
# basically the same things with Unix C compilers.
for key in args.keys():
if not self.executables.has_key(key):
raise ValueError, \
"unknown executable '%s' for class %s" % \
(key, self.__class__.__name__)
self.set_executable(key, args[key])
# set_executables ()
def set_executable(self, key, value):
if type(value) is StringType:
setattr(self, key, split_quoted(value))
else:
setattr(self, key, value)
def _find_macro (self, name):
i = 0
for defn in self.macros:
if defn[0] == name:
return i
i = i + 1
return None
def _check_macro_definitions (self, definitions):
"""Ensures that every element of 'definitions' is a valid macro
definition, ie. either (name,value) 2-tuple or a (name,) tuple. Do
nothing if all definitions are OK, raise TypeError otherwise.
"""
for defn in definitions:
if not (type (defn) is TupleType and
(len (defn) == 1 or
(len (defn) == 2 and
(type (defn[1]) is StringType or defn[1] is None))) and
type (defn[0]) is StringType):
raise TypeError, \
("invalid macro definition '%s': " % defn) + \
"must be tuple (string,), (string, string), or " + \
"(string, None)"
# -- Bookkeeping methods -------------------------------------------
def define_macro (self, name, value=None):
"""Define a preprocessor macro for all compilations driven by this
compiler object. The optional parameter 'value' should be a
string; if it is not supplied, then the macro will be defined
without an explicit value and the exact outcome depends on the
compiler used (XXX true? does ANSI say anything about this?)
"""
# Delete from the list of macro definitions/undefinitions if
# already there (so that this one will take precedence).
i = self._find_macro (name)
if i is not None:
del self.macros[i]
defn = (name, value)
self.macros.append (defn)
def undefine_macro (self, name):
"""Undefine a preprocessor macro for all compilations driven by
this compiler object. If the same macro is defined by
'define_macro()' and undefined by 'undefine_macro()' the last call
takes precedence (including multiple redefinitions or
undefinitions). If the macro is redefined/undefined on a
per-compilation basis (ie. in the call to 'compile()'), then that
takes precedence.
"""
# Delete from the list of macro definitions/undefinitions if
# already there (so that this one will take precedence).
i = self._find_macro (name)
if i is not None:
del self.macros[i]
undefn = (name,)
self.macros.append (undefn)
def add_include_dir (self, dir):
"""Add 'dir' to the list of directories that will be searched for
header files. The compiler is instructed to search directories in
the order in which they are supplied by successive calls to
'add_include_dir()'.
"""
self.include_dirs.append (dir)
def set_include_dirs (self, dirs):
"""Set the list of directories that will be searched to 'dirs' (a
list of strings). Overrides any preceding calls to
'add_include_dir()'; subsequence calls to 'add_include_dir()' add
to the list passed to 'set_include_dirs()'. This does not affect
any list of standard include directories that the compiler may
search by default.
"""
self.include_dirs = copy (dirs)
def add_library (self, libname):
"""Add 'libname' to the list of libraries that will be included in
all links driven by this compiler object. Note that 'libname'
should *not* be the name of a file containing a library, but the
name of the library itself: the actual filename will be inferred by
the linker, the compiler, or the compiler class (depending on the
platform).
The linker will be instructed to link against libraries in the
order they were supplied to 'add_library()' and/or
'set_libraries()'. It is perfectly valid to duplicate library
names; the linker will be instructed to link against libraries as
many times as they are mentioned.
"""
self.libraries.append (libname)
def set_libraries (self, libnames):
"""Set the list of libraries to be included in all links driven by
this compiler object to 'libnames' (a list of strings). This does
not affect any standard system libraries that the linker may
include by default.
"""
self.libraries = copy (libnames)
def add_library_dir (self, dir):
"""Add 'dir' to the list of directories that will be searched for
libraries specified to 'add_library()' and 'set_libraries()'. The
linker will be instructed to search for libraries in the order they
are supplied to 'add_library_dir()' and/or 'set_library_dirs()'.
"""
self.library_dirs.append (dir)
def set_library_dirs (self, dirs):
"""Set the list of library search directories to 'dirs' (a list of
strings). This does not affect any standard library search path
that the linker may search by default.
"""
self.library_dirs = copy (dirs)
def add_runtime_library_dir (self, dir):
"""Add 'dir' to the list of directories that will be searched for
shared libraries at runtime.
"""
self.runtime_library_dirs.append (dir)
def set_runtime_library_dirs (self, dirs):
"""Set the list of directories to search for shared libraries at
runtime to 'dirs' (a list of strings). This does not affect any
standard search path that the runtime linker may search by
default.
"""
self.runtime_library_dirs = copy (dirs)
def add_link_object (self, object):
"""Add 'object' to the list of object files (or analogues, such as
explicitly named library files or the output of "resource
compilers") to be included in every link driven by this compiler
object.
"""
self.objects.append (object)
def set_link_objects (self, objects):
"""Set the list of object files (or analogues) to be included in
every link to 'objects'. This does not affect any standard object
files that the linker may include by default (such as system
libraries).
"""
self.objects = copy (objects)
# -- Private utility methods --------------------------------------
# (here for the convenience of subclasses)
# Helper method to prep compiler in subclass compile() methods
def _setup_compile(self, outdir, macros, incdirs, sources, depends,
extra):
"""Process arguments and decide which source files to compile.
Merges _fix_compile_args() and _prep_compile().
"""
if outdir is None:
outdir = self.output_dir
elif type(outdir) is not StringType:
raise TypeError, "'output_dir' must be a string or None"
if macros is None:
macros = self.macros
elif type(macros) is ListType:
macros = macros + (self.macros or [])
else:
raise TypeError, "'macros' (if supplied) must be a list of tuples"
if incdirs is None:
incdirs = self.include_dirs
elif type(incdirs) in (ListType, TupleType):
incdirs = list(incdirs) + (self.include_dirs or [])
else:
raise TypeError, \
"'include_dirs' (if supplied) must be a list of strings"
if extra is None:
extra = []
# Get the list of expected output (object) files
objects = self.object_filenames(sources,
strip_dir=python_build,
output_dir=outdir)
assert len(objects) == len(sources)
# XXX should redo this code to eliminate skip_source entirely.
# XXX instead create build and issue skip messages inline
if self.force:
skip_source = {} # rebuild everything
for source in sources:
skip_source[source] = 0
elif depends is None:
# If depends is None, figure out which source files we
# have to recompile according to a simplistic check. We
# just compare the source and object file, no deep
# dependency checking involving header files.
skip_source = {} # rebuild everything
for source in sources: # no wait, rebuild nothing
skip_source[source] = 1
n_sources, n_objects = newer_pairwise(sources, objects)
for source in n_sources: # no really, only rebuild what's
skip_source[source] = 0 # out-of-date
else:
# If depends is a list of files, then do a different
# simplistic check. Assume that each object depends on
# its source and all files in the depends list.
skip_source = {}
# L contains all the depends plus a spot at the end for a
# particular source file
L = depends[:] + [None]
for i in range(len(objects)):
source = sources[i]
L[-1] = source
if newer_group(L, objects[i]):
skip_source[source] = 0
else:
skip_source[source] = 1
pp_opts = gen_preprocess_options(macros, incdirs)
build = {}
for i in range(len(sources)):
src = sources[i]
obj = objects[i]
ext = os.path.splitext(src)[1]
self.mkpath(os.path.dirname(obj))
if skip_source[src]:
log.debug("skipping %s (%s up-to-date)", src, obj)
else:
build[obj] = src, ext
return macros, objects, extra, pp_opts, build
def _get_cc_args(self, pp_opts, debug, before):
# works for unixccompiler, emxccompiler, cygwinccompiler
cc_args = pp_opts + ['-c']
if debug:
cc_args[:0] = ['-g']
if before:
cc_args[:0] = before
return cc_args
def _fix_compile_args (self, output_dir, macros, include_dirs):
"""Typecheck and fix-up some of the arguments to the 'compile()'
method, and return fixed-up values. Specifically: if 'output_dir'
is None, replaces it with 'self.output_dir'; ensures that 'macros'
is a list, and augments it with 'self.macros'; ensures that
'include_dirs' is a list, and augments it with 'self.include_dirs'.
Guarantees that the returned values are of the correct type,
i.e. for 'output_dir' either string or None, and for 'macros' and
'include_dirs' either list or None.
"""
if output_dir is None:
output_dir = self.output_dir
elif type (output_dir) is not StringType:
raise TypeError, "'output_dir' must be a string or None"
if macros is None:
macros = self.macros
elif type (macros) is ListType:
macros = macros + (self.macros or [])
else:
raise TypeError, "'macros' (if supplied) must be a list of tuples"
if include_dirs is None:
include_dirs = self.include_dirs
elif type (include_dirs) in (ListType, TupleType):
include_dirs = list (include_dirs) + (self.include_dirs or [])
else:
raise TypeError, \
"'include_dirs' (if supplied) must be a list of strings"
return output_dir, macros, include_dirs
# _fix_compile_args ()
def _prep_compile(self, sources, output_dir, depends=None):
"""Decide which souce files must be recompiled.
Determine the list of object files corresponding to 'sources',
and figure out which ones really need to be recompiled.
Return a list of all object files and a dictionary telling
which source files can be skipped.
"""
# Get the list of expected output (object) files
objects = self.object_filenames(sources, strip_dir=python_build,
output_dir=output_dir)
assert len(objects) == len(sources)
if self.force:
skip_source = {} # rebuild everything
for source in sources:
skip_source[source] = 0
elif depends is None:
# If depends is None, figure out which source files we
# have to recompile according to a simplistic check. We
# just compare the source and object file, no deep
# dependency checking involving header files.
skip_source = {} # rebuild everything
for source in sources: # no wait, rebuild nothing
skip_source[source] = 1
n_sources, n_objects = newer_pairwise(sources, objects)
for source in n_sources: # no really, only rebuild what's
skip_source[source] = 0 # out-of-date
else:
# If depends is a list of files, then do a different
# simplistic check. Assume that each object depends on
# its source and all files in the depends list.
skip_source = {}
# L contains all the depends plus a spot at the end for a
# particular source file
L = depends[:] + [None]
for i in range(len(objects)):
source = sources[i]
L[-1] = source
if newer_group(L, objects[i]):
skip_source[source] = 0
else:
skip_source[source] = 1
return objects, skip_source
# _prep_compile ()
def _fix_object_args (self, objects, output_dir):
"""Typecheck and fix up some arguments supplied to various methods.
Specifically: ensure that 'objects' is a list; if output_dir is
None, replace with self.output_dir. Return fixed versions of
'objects' and 'output_dir'.
"""
if type (objects) not in (ListType, TupleType):
raise TypeError, \
"'objects' must be a list or tuple of strings"
objects = list (objects)
if output_dir is None:
output_dir = self.output_dir
elif type (output_dir) is not StringType:
raise TypeError, "'output_dir' must be a string or None"
return (objects, output_dir)
def _fix_lib_args (self, libraries, library_dirs, runtime_library_dirs):
"""Typecheck and fix up some of the arguments supplied to the
'link_*' methods. Specifically: ensure that all arguments are
lists, and augment them with their permanent versions
(eg. 'self.libraries' augments 'libraries'). Return a tuple with
fixed versions of all arguments.
"""
if libraries is None:
libraries = self.libraries
elif type (libraries) in (ListType, TupleType):
libraries = list (libraries) + (self.libraries or [])
else:
raise TypeError, \
"'libraries' (if supplied) must be a list of strings"
if library_dirs is None:
library_dirs = self.library_dirs
elif type (library_dirs) in (ListType, TupleType):
library_dirs = list (library_dirs) + (self.library_dirs or [])
else:
raise TypeError, \
"'library_dirs' (if supplied) must be a list of strings"
if runtime_library_dirs is None:
runtime_library_dirs = self.runtime_library_dirs
elif type (runtime_library_dirs) in (ListType, TupleType):
runtime_library_dirs = (list (runtime_library_dirs) +
(self.runtime_library_dirs or []))
else:
raise TypeError, \
"'runtime_library_dirs' (if supplied) " + \
"must be a list of strings"
return (libraries, library_dirs, runtime_library_dirs)
# _fix_lib_args ()
def _need_link (self, objects, output_file):
"""Return true if we need to relink the files listed in 'objects'
to recreate 'output_file'.
"""
if self.force:
return 1
else:
if self.dry_run:
newer = newer_group (objects, output_file, missing='newer')
else:
newer = newer_group (objects, output_file)
return newer
# _need_link ()
def detect_language (self, sources):
"""Detect the language of a given file, or list of files. Uses
language_map, and language_order to do the job.
"""
if type(sources) is not ListType:
sources = [sources]
lang = None
index = len(self.language_order)
for source in sources:
base, ext = os.path.splitext(source)
extlang = self.language_map.get(ext)
try:
extindex = self.language_order.index(extlang)
if extindex < index:
lang = extlang
index = extindex
except ValueError:
pass
return lang
# detect_language ()
# -- Worker methods ------------------------------------------------
# (must be implemented by subclasses)
def preprocess (self,
source,
output_file=None,
macros=None,
include_dirs=None,
extra_preargs=None,
extra_postargs=None):
"""Preprocess a single C/C++ source file, named in 'source'.
Output will be written to file named 'output_file', or stdout if
'output_file' not supplied. 'macros' is a list of macro
definitions as for 'compile()', which will augment the macros set
with 'define_macro()' and 'undefine_macro()'. 'include_dirs' is a
list of directory names that will be added to the default list.
Raises PreprocessError on failure.
"""
pass
def compile(self, sources, output_dir=None, macros=None,
include_dirs=None, debug=0, extra_preargs=None,
extra_postargs=None, depends=None):
"""Compile one or more source files.
'sources' must be a list of filenames, most likely C/C++
files, but in reality anything that can be handled by a
particular compiler and compiler class (eg. MSVCCompiler can
handle resource files in 'sources'). Return a list of object
filenames, one per source filename in 'sources'. Depending on
the implementation, not all source files will necessarily be
compiled, but all corresponding object filenames will be
returned.
If 'output_dir' is given, object files will be put under it, while
retaining their original path component. That is, "foo/bar.c"
normally compiles to "foo/bar.o" (for a Unix implementation); if
'output_dir' is "build", then it would compile to
"build/foo/bar.o".
'macros', if given, must be a list of macro definitions. A macro
definition is either a (name, value) 2-tuple or a (name,) 1-tuple.
The former defines a macro; if the value is None, the macro is
defined without an explicit value. The 1-tuple case undefines a
macro. Later definitions/redefinitions/ undefinitions take
precedence.
'include_dirs', if given, must be a list of strings, the
directories to add to the default include file search path for this
compilation only.
'debug' is a boolean; if true, the compiler will be instructed to
output debug symbols in (or alongside) the object file(s).
'extra_preargs' and 'extra_postargs' are implementation- dependent.
On platforms that have the notion of a command-line (e.g. Unix,
DOS/Windows), they are most likely lists of strings: extra
command-line arguments to prepand/append to the compiler command
line. On other platforms, consult the implementation class
documentation. In any event, they are intended as an escape hatch
for those occasions when the abstract compiler framework doesn't
cut the mustard.
'depends', if given, is a list of filenames that all targets
depend on. If a source file is older than any file in
depends, then the source file will be recompiled. This
supports dependency tracking, but only at a coarse
granularity.
Raises CompileError on failure.
"""
# A concrete compiler class can either override this method
# entirely or implement _compile().
macros, objects, extra_postargs, pp_opts, build = \
self._setup_compile(output_dir, macros, include_dirs, sources,
depends, extra_postargs)
cc_args = self._get_cc_args(pp_opts, debug, extra_preargs)
for obj in objects:
try:
src, ext = build[obj]
except KeyError:
continue
self._compile(obj, src, ext, cc_args, extra_postargs, pp_opts)
# Return *all* object filenames, not just the ones we just built.
return objects
def _compile(self, obj, src, ext, cc_args, extra_postargs, pp_opts):
"""Compile 'src' to product 'obj'."""
# A concrete compiler class that does not override compile()
# should implement _compile().
pass
def create_static_lib (self,
objects,
output_libname,
output_dir=None,
debug=0,
target_lang=None):
"""Link a bunch of stuff together to create a static library file.
The "bunch of stuff" consists of the list of object files supplied
as 'objects', the extra object files supplied to
'add_link_object()' and/or 'set_link_objects()', the libraries
supplied to 'add_library()' and/or 'set_libraries()', and the
libraries supplied as 'libraries' (if any).
'output_libname' should be a library name, not a filename; the
filename will be inferred from the library name. 'output_dir' is
the directory where the library file will be put.
'debug' is a boolean; if true, debugging information will be
included in the library (note that on most platforms, it is the
compile step where this matters: the 'debug' flag is included here
just for consistency).
'target_lang' is the target language for which the given objects
are being compiled. This allows specific linkage time treatment of
certain languages.
Raises LibError on failure.
"""
pass
# values for target_desc parameter in link()
SHARED_OBJECT = "shared_object"
SHARED_LIBRARY = "shared_library"
EXECUTABLE = "executable"
def link (self,
target_desc,
objects,
output_filename,
output_dir=None,
libraries=None,
library_dirs=None,
runtime_library_dirs=None,
export_symbols=None,
debug=0,
extra_preargs=None,
extra_postargs=None,
build_temp=None,
target_lang=None):
"""Link a bunch of stuff together to create an executable or
shared library file.
The "bunch of stuff" consists of the list of object files supplied
as 'objects'. 'output_filename' should be a filename. If
'output_dir' is supplied, 'output_filename' is relative to it
(i.e. 'output_filename' can provide directory components if
needed).
'libraries' is a list of libraries to link against. These are
library names, not filenames, since they're translated into
filenames in a platform-specific way (eg. "foo" becomes "libfoo.a"
on Unix and "foo.lib" on DOS/Windows). However, they can include a
directory component, which means the linker will look in that
specific directory rather than searching all the normal locations.
'library_dirs', if supplied, should be a list of directories to
search for libraries that were specified as bare library names
(ie. no directory component). These are on top of the system
default and those supplied to 'add_library_dir()' and/or
'set_library_dirs()'. 'runtime_library_dirs' is a list of
directories that will be embedded into the shared library and used
to search for other shared libraries that *it* depends on at
run-time. (This may only be relevant on Unix.)
'export_symbols' is a list of symbols that the shared library will
export. (This appears to be relevant only on Windows.)
'debug' is as for 'compile()' and 'create_static_lib()', with the
slight distinction that it actually matters on most platforms (as
opposed to 'create_static_lib()', which includes a 'debug' flag
mostly for form's sake).
'extra_preargs' and 'extra_postargs' are as for 'compile()' (except
of course that they supply command-line arguments for the
particular linker being used).
'target_lang' is the target language for which the given objects
are being compiled. This allows specific linkage time treatment of
certain languages.
Raises LinkError on failure.
"""
raise NotImplementedError
# Old 'link_*()' methods, rewritten to use the new 'link()' method.
def link_shared_lib (self,
objects,
output_libname,
output_dir=None,
libraries=None,
library_dirs=None,
runtime_library_dirs=None,
export_symbols=None,
debug=0,
extra_preargs=None,
extra_postargs=None,
build_temp=None,
target_lang=None):
self.link(CCompiler.SHARED_LIBRARY, objects,
self.library_filename(output_libname, lib_type='shared'),
output_dir,
libraries, library_dirs, runtime_library_dirs,
export_symbols, debug,
extra_preargs, extra_postargs, build_temp, target_lang)
def link_shared_object (self,
objects,
output_filename,
output_dir=None,
libraries=None,
library_dirs=None,
runtime_library_dirs=None,
export_symbols=None,
debug=0,
extra_preargs=None,
extra_postargs=None,
build_temp=None,
target_lang=None):
self.link(CCompiler.SHARED_OBJECT, objects,
output_filename, output_dir,
libraries, library_dirs, runtime_library_dirs,
export_symbols, debug,
extra_preargs, extra_postargs, build_temp, target_lang)
def link_executable (self,
objects,
output_progname,
output_dir=None,
libraries=None,
library_dirs=None,
runtime_library_dirs=None,
debug=0,
extra_preargs=None,
extra_postargs=None,
target_lang=None):
self.link(CCompiler.EXECUTABLE, objects,
self.executable_filename(output_progname), output_dir,
libraries, library_dirs, runtime_library_dirs, None,
debug, extra_preargs, extra_postargs, None, target_lang)
# -- Miscellaneous methods -----------------------------------------
# These are all used by the 'gen_lib_options() function; there is
# no appropriate default implementation so subclasses should
# implement all of these.
def library_dir_option (self, dir):
"""Return the compiler option to add 'dir' to the list of
directories searched for libraries.
"""
raise NotImplementedError
def runtime_library_dir_option (self, dir):
"""Return the compiler option to add 'dir' to the list of
directories searched for runtime libraries.
"""
raise NotImplementedError
def library_option (self, lib):
"""Return the compiler option to add 'dir' to the list of libraries
linked into the shared library or executable.
"""
raise NotImplementedError
def has_function(self, funcname,
includes=None,
include_dirs=None,
libraries=None,
library_dirs=None):
"""Return a boolean indicating whether funcname is supported on
the current platform. The optional arguments can be used to
augment the compilation environment.
"""
# this can't be included at module scope because it tries to
# import math which might not be available at that point - maybe
# the necessary logic should just be inlined?
import tempfile
if includes is None:
includes = []
if include_dirs is None:
include_dirs = []
if libraries is None:
libraries = []
if library_dirs is None:
library_dirs = []
fd, fname = tempfile.mkstemp(".c", funcname, text=True)
f = os.fdopen(fd, "w")
for incl in includes:
f.write("""#include "%s"\n""" % incl)
f.write("""\
main (int argc, char **argv) {
%s();
}
""" % funcname)
f.close()
try:
objects = self.compile([fname], include_dirs=include_dirs)
except CompileError:
return False
try:
self.link_executable(objects, "a.out",
libraries=libraries,
library_dirs=library_dirs)
except (LinkError, TypeError):
return False
return True
def find_library_file (self, dirs, lib, debug=0):
"""Search the specified list of directories for a static or shared
library file 'lib' and return the full path to that file. If
'debug' true, look for a debugging version (if that makes sense on
the current platform). Return None if 'lib' wasn't found in any of
the specified directories.
"""
raise NotImplementedError
# -- Filename generation methods -----------------------------------
# The default implementation of the filename generating methods are
# prejudiced towards the Unix/DOS/Windows view of the world:
# * object files are named by replacing the source file extension
# (eg. .c/.cpp -> .o/.obj)
# * library files (shared or static) are named by plugging the
# library name and extension into a format string, eg.
# "lib%s.%s" % (lib_name, ".a") for Unix static libraries
# * executables are named by appending an extension (possibly
# empty) to the program name: eg. progname + ".exe" for
# Windows
#
# To reduce redundant code, these methods expect to find
# several attributes in the current object (presumably defined
# as class attributes):
# * src_extensions -
# list of C/C++ source file extensions, eg. ['.c', '.cpp']
# * obj_extension -
# object file extension, eg. '.o' or '.obj'
# * static_lib_extension -
# extension for static library files, eg. '.a' or '.lib'
# * shared_lib_extension -
# extension for shared library/object files, eg. '.so', '.dll'
# * static_lib_format -
# format string for generating static library filenames,
# eg. 'lib%s.%s' or '%s.%s'
# * shared_lib_format
# format string for generating shared library filenames
# (probably same as static_lib_format, since the extension
# is one of the intended parameters to the format string)
# * exe_extension -
# extension for executable files, eg. '' or '.exe'
def object_filenames(self, source_filenames, strip_dir=0, output_dir=''):
if output_dir is None:
output_dir = ''
obj_names = []
for src_name in source_filenames:
base, ext = os.path.splitext(src_name)
base = os.path.splitdrive(base)[1] # Chop off the drive
base = base[os.path.isabs(base):] # If abs, chop off leading /
if ext not in self.src_extensions:
raise UnknownFileError, \
"unknown file type '%s' (from '%s')" % (ext, src_name)
if strip_dir:
base = os.path.basename(base)
obj_names.append(os.path.join(output_dir,
base + self.obj_extension))
return obj_names
def shared_object_filename(self, basename, strip_dir=0, output_dir=''):
assert output_dir is not None
if strip_dir:
basename = os.path.basename (basename)
return os.path.join(output_dir, basename + self.shared_lib_extension)
def executable_filename(self, basename, strip_dir=0, output_dir=''):
assert output_dir is not None
if strip_dir:
basename = os.path.basename (basename)
return os.path.join(output_dir, basename + (self.exe_extension or ''))
def library_filename(self, libname, lib_type='static', # or 'shared'
strip_dir=0, output_dir=''):
assert output_dir is not None
if lib_type not in ("static", "shared", "dylib"):
raise ValueError, "'lib_type' must be \"static\", \"shared\" or \"dylib\""
fmt = getattr(self, lib_type + "_lib_format")
ext = getattr(self, lib_type + "_lib_extension")
dir, base = os.path.split (libname)
filename = fmt % (base, ext)
if strip_dir:
dir = ''
return os.path.join(output_dir, dir, filename)
# -- Utility methods -----------------------------------------------
def announce (self, msg, level=1):
log.debug(msg)
def debug_print (self, msg):
from distutils.debug import DEBUG
if DEBUG:
print msg
def warn (self, msg):
sys.stderr.write ("warning: %s\n" % msg)
def execute (self, func, args, msg=None, level=1):
execute(func, args, msg, self.dry_run)
def spawn (self, cmd):
spawn (cmd, dry_run=self.dry_run)
def move_file (self, src, dst):
return move_file (src, dst, dry_run=self.dry_run)
def mkpath (self, name, mode=0777):
mkpath (name, mode, self.dry_run)
# class CCompiler
# Map a sys.platform/os.name ('posix', 'nt') to the default compiler
# type for that platform. Keys are interpreted as re match
# patterns. Order is important; platform mappings are preferred over
# OS names.
_default_compilers = (
# Platform string mappings
# on a cygwin built python we can use gcc like an ordinary UNIXish
# compiler
('cygwin.*', 'unix'),
('os2emx', 'emx'),
# OS name mappings
('posix', 'unix'),
('nt', 'msvc'),
('mac', 'mwerks'),
)
def get_default_compiler(osname=None, platform=None):
""" Determine the default compiler to use for the given platform.
osname should be one of the standard Python OS names (i.e. the
ones returned by os.name) and platform the common value
returned by sys.platform for the platform in question.
The default values are os.name and sys.platform in case the
parameters are not given.
"""
if osname is None:
osname = os.name
if platform is None:
platform = sys.platform
for pattern, compiler in _default_compilers:
if re.match(pattern, platform) is not None or \
re.match(pattern, osname) is not None:
return compiler
# Default to Unix compiler
return 'unix'
# Map compiler types to (module_name, class_name) pairs -- ie. where to
# find the code that implements an interface to this compiler. (The module
# is assumed to be in the 'distutils' package.)
compiler_class = { 'unix': ('unixccompiler', 'UnixCCompiler',
"standard UNIX-style compiler"),
'msvc': ('msvccompiler', 'MSVCCompiler',
"Microsoft Visual C++"),
'cygwin': ('cygwinccompiler', 'CygwinCCompiler',
"Cygwin port of GNU C Compiler for Win32"),
'mingw32': ('cygwinccompiler', 'Mingw32CCompiler',
"Mingw32 port of GNU C Compiler for Win32"),
'bcpp': ('bcppcompiler', 'BCPPCompiler',
"Borland C++ Compiler"),
'mwerks': ('mwerkscompiler', 'MWerksCompiler',
"MetroWerks CodeWarrior"),
'emx': ('emxccompiler', 'EMXCCompiler',
"EMX port of GNU C Compiler for OS/2"),
}
def show_compilers():
"""Print list of available compilers (used by the "--help-compiler"
options to "build", "build_ext", "build_clib").
"""
# XXX this "knows" that the compiler option it's describing is
# "--compiler", which just happens to be the case for the three
# commands that use it.
from distutils.fancy_getopt import FancyGetopt
compilers = []
for compiler in compiler_class.keys():
compilers.append(("compiler="+compiler, None,
compiler_class[compiler][2]))
compilers.sort()
pretty_printer = FancyGetopt(compilers)
pretty_printer.print_help("List of available compilers:")
def new_compiler (plat=None,
compiler=None,
verbose=0,
dry_run=0,
force=0):
"""Generate an instance of some CCompiler subclass for the supplied
platform/compiler combination. 'plat' defaults to 'os.name'
(eg. 'posix', 'nt'), and 'compiler' defaults to the default compiler
for that platform. Currently only 'posix' and 'nt' are supported, and
the default compilers are "traditional Unix interface" (UnixCCompiler
class) and Visual C++ (MSVCCompiler class). Note that it's perfectly
possible to ask for a Unix compiler object under Windows, and a
Microsoft compiler object under Unix -- if you supply a value for
'compiler', 'plat' is ignored.
"""
if plat is None:
plat = os.name
try:
if compiler is None:
compiler = get_default_compiler(plat)
(module_name, class_name, long_description) = compiler_class[compiler]
except KeyError:
msg = "don't know how to compile C/C++ code on platform '%s'" % plat
if compiler is not None:
msg = msg + " with '%s' compiler" % compiler
raise DistutilsPlatformError, msg
try:
module_name = "distutils." + module_name
__import__ (module_name)
module = sys.modules[module_name]
klass = vars(module)[class_name]
except ImportError:
raise DistutilsModuleError, \
"can't compile C/C++ code: unable to load module '%s'" % \
module_name
except KeyError:
raise DistutilsModuleError, \
("can't compile C/C++ code: unable to find class '%s' " +
"in module '%s'") % (class_name, module_name)
# XXX The None is necessary to preserve backwards compatibility
# with classes that expect verbose to be the first positional
# argument.
return klass (None, dry_run, force)
def gen_preprocess_options (macros, include_dirs):
"""Generate C pre-processor options (-D, -U, -I) as used by at least
two types of compilers: the typical Unix compiler and Visual C++.
'macros' is the usual thing, a list of 1- or 2-tuples, where (name,)
means undefine (-U) macro 'name', and (name,value) means define (-D)
macro 'name' to 'value'. 'include_dirs' is just a list of directory
names to be added to the header file search path (-I). Returns a list
of command-line options suitable for either Unix compilers or Visual
C++.
"""
# XXX it would be nice (mainly aesthetic, and so we don't generate
# stupid-looking command lines) to go over 'macros' and eliminate
# redundant definitions/undefinitions (ie. ensure that only the
# latest mention of a particular macro winds up on the command
# line). I don't think it's essential, though, since most (all?)
# Unix C compilers only pay attention to the latest -D or -U
# mention of a macro on their command line. Similar situation for
# 'include_dirs'. I'm punting on both for now. Anyways, weeding out
# redundancies like this should probably be the province of
# CCompiler, since the data structures used are inherited from it
# and therefore common to all CCompiler classes.
pp_opts = []
for macro in macros:
if not (type (macro) is TupleType and
1 <= len (macro) <= 2):
raise TypeError, \
("bad macro definition '%s': " +
"each element of 'macros' list must be a 1- or 2-tuple") % \
macro
if len (macro) == 1: # undefine this macro
pp_opts.append ("-U%s" % macro[0])
elif len (macro) == 2:
if macro[1] is None: # define with no explicit value
pp_opts.append ("-D%s" % macro[0])
else:
# XXX *don't* need to be clever about quoting the
# macro value here, because we're going to avoid the
# shell at all costs when we spawn the command!
pp_opts.append ("-D%s=%s" % macro)
for dir in include_dirs:
pp_opts.append ("-I%s" % dir)
return pp_opts
# gen_preprocess_options ()
def gen_lib_options (compiler, library_dirs, runtime_library_dirs, libraries):
"""Generate linker options for searching library directories and
linking with specific libraries. 'libraries' and 'library_dirs' are,
respectively, lists of library names (not filenames!) and search
directories. Returns a list of command-line options suitable for use
with some compiler (depending on the two format strings passed in).
"""
lib_opts = []
for dir in library_dirs:
lib_opts.append (compiler.library_dir_option (dir))
for dir in runtime_library_dirs:
opt = compiler.runtime_library_dir_option (dir)
if type(opt) is ListType:
lib_opts = lib_opts + opt
else:
lib_opts.append (opt)
# XXX it's important that we *not* remove redundant library mentions!
# sometimes you really do have to say "-lfoo -lbar -lfoo" in order to
# resolve all symbols. I just hope we never have to say "-lfoo obj.o
# -lbar" to get things to work -- that's certainly a possibility, but a
# pretty nasty way to arrange your C code.
for lib in libraries:
(lib_dir, lib_name) = os.path.split (lib)
if lib_dir:
lib_file = compiler.find_library_file ([lib_dir], lib_name)
if lib_file:
lib_opts.append (lib_file)
else:
compiler.warn ("no library file corresponding to "
"'%s' found (skipping)" % lib)
else:
lib_opts.append (compiler.library_option (lib))
return lib_opts
# gen_lib_options ()
| {
"content_hash": "afec5795158fb22a671a4d33f5f62354",
"timestamp": "",
"source": "github",
"line_count": 1270,
"max_line_length": 86,
"avg_line_length": 41.04015748031496,
"alnum_prop": 0.5898006561654612,
"repo_name": "MalloyPower/parsing-python",
"id": "2194501901f60c9b4f2d059a9bc31ad8bd55df9a",
"size": "52121",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "front-end/testsuite-python-lib/Python-2.4/Lib/distutils/ccompiler.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "1963"
},
{
"name": "Lex",
"bytes": "238458"
},
{
"name": "Makefile",
"bytes": "4513"
},
{
"name": "OCaml",
"bytes": "412695"
},
{
"name": "Python",
"bytes": "17319"
},
{
"name": "Rascal",
"bytes": "523063"
},
{
"name": "Yacc",
"bytes": "429659"
}
],
"symlink_target": ""
} |
"""Google Cloud Pipeline Dataproc Batch components."""
import os
try:
from kfp.v2.components import load_component_from_file
except ImportError:
from kfp.components import load_component_from_file
__all__ = [
'DataprocPySparkBatchOp',
'DataprocSparkBatchOp',
'DataprocSparkRBatchOp',
'DataprocSparkSqlBatchOp'
]
DataprocPySparkBatchOp = load_component_from_file(
os.path.join(os.path.dirname(__file__), 'create_pyspark_batch/component.yaml'))
DataprocSparkBatchOp = load_component_from_file(
os.path.join(os.path.dirname(__file__), 'create_spark_batch/component.yaml'))
DataprocSparkRBatchOp = load_component_from_file(
os.path.join(os.path.dirname(__file__), 'create_spark_r_batch/component.yaml'))
DataprocSparkSqlBatchOp = load_component_from_file(
os.path.join(os.path.dirname(__file__), 'create_spark_sql_batch/component.yaml'))
| {
"content_hash": "8c8c70de192bf0fb5aa72f593d63ee1d",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 89,
"avg_line_length": 33.074074074074076,
"alnum_prop": 0.7267637178051511,
"repo_name": "kubeflow/pipelines",
"id": "26d2436887a5fd0f160f29f7119d8e9ce9c24494",
"size": "1499",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "components/google-cloud/google_cloud_pipeline_components/v1/dataproc/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "799"
},
{
"name": "CSS",
"bytes": "2171"
},
{
"name": "Dockerfile",
"bytes": "49331"
},
{
"name": "Go",
"bytes": "1903937"
},
{
"name": "HTML",
"bytes": "3656"
},
{
"name": "JavaScript",
"bytes": "544297"
},
{
"name": "Jinja",
"bytes": "938"
},
{
"name": "Jupyter Notebook",
"bytes": "359548"
},
{
"name": "Makefile",
"bytes": "22164"
},
{
"name": "Mustache",
"bytes": "23652"
},
{
"name": "PowerShell",
"bytes": "3194"
},
{
"name": "Python",
"bytes": "5684887"
},
{
"name": "Shell",
"bytes": "264595"
},
{
"name": "Smarty",
"bytes": "8295"
},
{
"name": "Starlark",
"bytes": "553"
},
{
"name": "TypeScript",
"bytes": "4294958"
}
],
"symlink_target": ""
} |
import os
from contextlib import contextmanager
from storlets.gateway.common.exceptions import StorletLoggerError
class StorletLogger(object):
def __init__(self, path):
self.log_path = path
self._file = None
def open(self):
if self._file is not None:
raise StorletLoggerError('StorletLogger is already open')
try:
log_dir_path = os.path.dirname(self.log_path)
if not os.path.exists(log_dir_path):
os.makedirs(log_dir_path)
self._file = open(self.log_path, 'a')
except Exception:
raise
def getfd(self):
if self._file is None:
# TODO(kota_): Is it safe to return None?
return None
return self._file.fileno()
def getsize(self):
statinfo = os.stat(self.log_path)
return statinfo.st_size
def close(self):
if self._file is None:
raise StorletLoggerError('StorletLogger is not open')
try:
self._file.close()
except Exception:
raise
else:
self._file = None
@contextmanager
def activate(self):
self.open()
try:
yield
finally:
self.close()
| {
"content_hash": "aa0248976afdb7a87e4217cb8d2da299",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 69,
"avg_line_length": 24.862745098039216,
"alnum_prop": 0.555993690851735,
"repo_name": "openstack/storlets",
"id": "3450ec80db89395b9988dbdf4004a6b0721935ed",
"size": "1916",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "storlets/gateway/common/logger.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "31430"
},
{
"name": "Java",
"bytes": "184917"
},
{
"name": "Jupyter Notebook",
"bytes": "7689"
},
{
"name": "Makefile",
"bytes": "347"
},
{
"name": "Python",
"bytes": "579917"
},
{
"name": "Shell",
"bytes": "20127"
}
],
"symlink_target": ""
} |
"""wsgi implement behaviour that provides service control as wsgi
middleware.
It provides the :class:`Middleware`, which is a WSGI middleware implementation
that wraps another WSGI application to uses a provided
:class:`endpoints_management.control.client.Client` to provide service control.
"""
# pylint: disable=too-many-arguments
from __future__ import absolute_import
from datetime import datetime
import httplib
import logging
import os
import socket
import uuid
import urllib2
import urlparse
import wsgiref.util
from webob.exc import HTTPServiceUnavailable, status_map as exc_status_map
from ..auth import suppliers, tokens
from ..config.service_config import ServiceConfigException
from . import check_request, quota_request, report_request, service, sm_messages
_logger = logging.getLogger(__name__)
_CONTENT_LENGTH = u'content-length'
_DEFAULT_LOCATION = u'global'
_METADATA_SERVER_URL = u'http://metadata.google.internal'
def _running_on_gce():
headers = {u'Metadata-Flavor': u'Google'}
try:
request = urllib2.Request(_METADATA_SERVER_URL, headers=headers)
response = urllib2.urlopen(request)
if response.info().getheader(u'Metadata-Flavor') == u'Google':
return True
except (urllib2.URLError, socket.error):
pass
return False
def _get_platform():
server_software = os.environ.get(u'SERVER_SOFTWARE', u'')
if server_software.startswith(u'Development'):
return report_request.ReportedPlatforms.DEVELOPMENT
elif os.environ.get(u'KUBERNETES_SERVICE_HOST'):
return report_request.ReportedPlatforms.GKE
elif _running_on_gce():
# We're either in GAE Flex or GCE
if os.environ.get(u'GAE_MODULE_NAME'):
return report_request.ReportedPlatforms.GAE_FLEX
else:
return report_request.ReportedPlatforms.GCE
elif server_software.startswith(u'Google App Engine'):
return report_request.ReportedPlatforms.GAE_STANDARD
return report_request.ReportedPlatforms.UNKNOWN
platform = _get_platform()
def running_on_devserver():
return platform == report_request.ReportedPlatforms.DEVELOPMENT
def add_all(application, project_id, control_client,
loader=service.Loaders.FROM_SERVICE_MANAGEMENT):
"""Adds all endpoints middleware to a wsgi application.
Sets up application to use all default endpoints middleware.
Example:
>>> application = MyWsgiApp() # an existing WSGI application
>>>
>>> # the name of the controlled service
>>> service_name = 'my-service-name'
>>>
>>> # A GCP project with service control enabled
>>> project_id = 'my-project-id'
>>>
>>> # wrap the app for service control
>>> from endpoints_management.control import wsgi
>>> control_client = client.Loaders.DEFAULT.load(service_name)
>>> control_client.start()
>>> wrapped_app = add_all(application, project_id, control_client)
>>>
>>> # now use wrapped_app in place of app
Args:
application: the wrapped wsgi application
project_id: the project_id thats providing service control support
control_client: the service control client instance
loader (:class:`endpoints_management.control.service.Loader`): loads the service
instance that configures this instance's behaviour
"""
try:
a_service = loader.load()
if not a_service:
raise ValueError(u'No service config loaded.')
except (ServiceConfigException, ValueError):
_logger.exception(u'Failed to load service config, installing server error handler.')
# This will answer all requests with HTTP 503 Service Unavailable
return HTTPServiceUnavailable()
authenticator = _create_authenticator(a_service)
wrapped_app = Middleware(application, project_id, control_client)
if authenticator:
wrapped_app = AuthenticationMiddleware(wrapped_app, authenticator)
return EnvironmentMiddleware(wrapped_app, a_service)
def _next_operation_uuid():
return uuid.uuid4().hex
def _request_method(environ):
return environ.get(u'HTTP_X_HTTP_METHOD_OVERRIDE', environ[u'REQUEST_METHOD'])
class EnvironmentMiddleware(object):
"""A WSGI middleware that sets related variables in the environment.
It attempts to add the following vars:
- google.api.config.service
- google.api.config.service_name
- google.api.config.method_registry
- google.api.config.reporting_rules
- google.api.config.method_info
"""
# pylint: disable=too-few-public-methods
SERVICE = u'google.api.config.service'
SERVICE_NAME = u'google.api.config.service_name'
METHOD_REGISTRY = u'google.api.config.method_registry'
METHOD_INFO = u'google.api.config.method_info'
REPORTING_RULES = u'google.api.config.reporting_rules'
def __init__(self, application, a_service):
"""Initializes a new Middleware instance.
Args:
application: the wrapped wsgi application
a_service (:class:`endpoints_management.gen.servicemanagement_v1_messages.Service`):
a service instance
"""
if not isinstance(a_service, sm_messages.Service):
raise ValueError(u"service is None or not an instance of Service")
self._application = application
self._service = a_service
method_registry, reporting_rules = self._configure()
self._method_registry = method_registry
self._reporting_rules = reporting_rules
def _configure(self):
registry = service.MethodRegistry(self._service)
logs, metric_names, label_names = service.extract_report_spec(self._service)
reporting_rules = report_request.ReportingRules.from_known_inputs(
logs=logs,
metric_names=metric_names,
label_names=label_names)
return registry, reporting_rules
def __call__(self, environ, start_response):
environ[self.SERVICE] = self._service
environ[self.SERVICE_NAME] = self._service.name
environ[self.METHOD_REGISTRY] = self._method_registry
environ[self.REPORTING_RULES] = self._reporting_rules
parsed_uri = urlparse.urlparse(wsgiref.util.request_uri(environ))
http_method = _request_method(environ)
method_info = self._method_registry.lookup(http_method, parsed_uri.path)
if method_info:
environ[self.METHOD_INFO] = method_info
return self._application(environ, start_response)
class Middleware(object):
"""A WSGI middleware implementation that provides service control.
Example:
>>> app = MyWsgiApp() # an existing WSGI application
>>>
>>> # the name of the controlled service
>>> service_name = 'my-service-name'
>>>
>>> # A GCP project with service control enabled
>>> project_id = 'my-project-id'
>>>
>>> # wrap the app for service control
>>> from endpoints_management.control import client, wsgi, service
>>> control_client = client.Loaders.DEFAULT.load(service_name)
>>> control_client.start()
>>> wrapped_app = wsgi.Middleware(app, control_client, project_id)
>>> env_app = wsgi.EnvironmentMiddleware(wrapped,app)
>>>
>>> # now use env_app in place of app
"""
# pylint: disable=too-few-public-methods, fixme
_NO_API_KEY_MSG = (
u'Method does not allow callers without established identity.'
u' Please use an API key or other form of API consumer identity'
u' to call this API.')
def __init__(self,
application,
project_id,
control_client,
next_operation_id=_next_operation_uuid,
timer=datetime.utcnow):
"""Initializes a new Middleware instance.
Args:
application: the wrapped wsgi application
project_id: the project_id thats providing service control support
control_client: the service control client instance
next_operation_id (func): produces the next operation
timer (func[[datetime.datetime]]): a func that obtains the current time
"""
self._application = application
self._project_id = project_id
self._control_client = control_client
self._next_operation_id = next_operation_id
self._timer = timer
def __call__(self, environ, start_response):
# pylint: disable=too-many-locals
method_info = environ.get(EnvironmentMiddleware.METHOD_INFO)
if not method_info:
# just allow the wrapped application to handle the request
_logger.debug(u'method_info not present in the wsgi environment'
u', no service control')
return self._application(environ, start_response)
latency_timer = _LatencyTimer(self._timer)
latency_timer.start()
# Determine if the request can proceed
http_method = _request_method(environ)
parsed_uri = urlparse.urlparse(wsgiref.util.request_uri(environ))
app_info = _AppInfo()
# TODO: determine if any of the more complex ways of getting the request size
# (e.g) buffering and counting the wsgi input stream is more appropriate here
try:
app_info.request_size = int(environ.get(u'CONTENT_LENGTH',
report_request.NOT_SET))
except ValueError:
_logger.warn(u'ignored bad content-length: %s', environ.get(u'CONTENT_LENGTH'))
app_info.http_method = http_method
app_info.url = parsed_uri
# Default to 0 for consumer project number to disable per-consumer
# metric reporting if the check request doesn't return one.
consumer_project_number = 0
check_info = self._create_check_info(method_info, parsed_uri, environ)
if not check_info.api_key and not method_info.allow_unregistered_calls:
_logger.debug(u"skipping %s, no api key was provided", parsed_uri)
error_msg = self._handle_missing_api_key(app_info, start_response)
else:
check_req = check_info.as_check_request()
_logger.debug(u'checking %s with %s', method_info, check_request)
check_resp = self._control_client.check(check_req)
error_msg = self._handle_check_response(app_info, check_resp, start_response)
if (check_resp and check_resp.checkInfo and
check_resp.checkInfo.consumerInfo):
consumer_project_number = (
check_resp.checkInfo.consumerInfo.projectNumber)
if error_msg is None:
quota_info = self._create_quota_info(method_info, parsed_uri, environ)
if not quota_info.quota_info:
_logger.debug(u'no metric costs for this method')
else:
quota_request = quota_info.as_allocate_quota_request()
quota_response = self._control_client.allocate_quota(quota_request)
error_msg = self._handle_quota_response(
app_info, quota_response, start_response)
if error_msg:
# send a report request that indicates that the request failed
rules = environ.get(EnvironmentMiddleware.REPORTING_RULES)
latency_timer.end()
report_req = self._create_report_request(method_info,
check_info,
app_info,
latency_timer,
rules,
consumer_project_number)
_logger.debug(u'scheduling report_request %s', report_req)
self._control_client.report(report_req)
return error_msg
# update the client with the response
latency_timer.app_start()
# run the application request in an inner handler that sets the status
# and response code on app_info
def inner_start_response(status, response_headers, exc_info=None):
app_info.response_code = int(status.partition(u' ')[0])
for name, value in response_headers:
if name.lower() == _CONTENT_LENGTH:
app_info.response_size = int(value)
break
return start_response(status, response_headers, exc_info)
result = self._application(environ, inner_start_response)
# perform reporting, result must be joined otherwise the latency record
# is incorrect
result = b''.join(result)
latency_timer.end()
app_info.response_size = len(result)
rules = environ.get(EnvironmentMiddleware.REPORTING_RULES)
report_req = self._create_report_request(method_info,
check_info,
app_info,
latency_timer,
rules,
consumer_project_number)
_logger.debug(u'scheduling report_request %s', report_req)
self._control_client.report(report_req)
return result
def _create_report_request(self,
method_info,
check_info,
app_info,
latency_timer,
reporting_rules,
consumer_project_number):
# TODO: determine how to obtain the consumer_project_id and the location
# correctly
report_info = report_request.Info(
api_key=check_info.api_key,
api_key_valid=app_info.api_key_valid,
api_method=method_info.selector,
consumer_project_id=self._project_id, # TODO: see above
consumer_project_number=consumer_project_number,
location=_DEFAULT_LOCATION, # TODO: see above
method=app_info.http_method,
operation_id=check_info.operation_id,
operation_name=check_info.operation_name,
backend_time=latency_timer.backend_time,
overhead_time=latency_timer.overhead_time,
platform=platform,
producer_project_id=self._project_id,
protocol=report_request.ReportedProtocols.HTTP,
request_size=app_info.request_size,
request_time=latency_timer.request_time,
response_code=app_info.response_code,
response_size=app_info.response_size,
referer=check_info.referer,
service_name=check_info.service_name,
url=app_info.url
)
return report_info.as_report_request(reporting_rules, timer=self._timer)
def _get_api_key_info(self, method_info, parsed_uri, environ):
api_key = _find_api_key_param(method_info, parsed_uri)
if not api_key:
api_key = _find_api_key_header(method_info, environ)
if not api_key:
api_key = _find_default_api_key_param(parsed_uri)
return api_key
def _create_check_info(self, method_info, parsed_uri, environ):
service_name = environ.get(EnvironmentMiddleware.SERVICE_NAME)
operation_id = self._next_operation_id()
api_key = self._get_api_key_info(method_info, parsed_uri, environ)
check_info = check_request.Info(
android_cert_fingerprint=environ.get('HTTP_X_ANDROID_CERT', ''),
android_package_name=environ.get('HTTP_X_ANDROID_PACKAGE', ''),
api_key=api_key,
api_key_valid=api_key is not None,
client_ip=environ.get(u'REMOTE_ADDR', u''),
consumer_project_id=self._project_id, # TODO: switch this to producer_project_id
ios_bundle_id=environ.get('HTTP_X_IOS_BUNDLE_IDENTIFIER', ''),
operation_id=operation_id,
operation_name=method_info.selector,
referer=environ.get(u'HTTP_REFERER', u''),
service_name=service_name
)
return check_info
def _create_quota_info(self, method_info, parsed_uri, environ):
service_name = environ.get(EnvironmentMiddleware.SERVICE_NAME)
operation_id = self._next_operation_id()
api_key = self._get_api_key_info(method_info, parsed_uri, environ)
service = environ.get(EnvironmentMiddleware.SERVICE)
return quota_request.Info(
api_key=api_key,
api_key_valid=api_key is not None,
referer=environ.get(u'HTTP_REFERER', u''),
consumer_project_id=self._project_id,
operation_id=operation_id,
operation_name=method_info.selector,
service_name=service_name,
quota_info=method_info.quota_info,
config_id=service.id,
client_ip=environ.get(u'REMOTE_ADDR', u''),
)
def _handle_check_response(self, app_info, check_resp, start_response):
code, detail, api_key_valid = check_request.convert_response(
check_resp, self._project_id)
if code == httplib.OK:
return None # the check was OK
# there was problem; the request cannot proceed
_logger.warn(u'Check failed %d, %s', code, detail)
app_info.response_code = code
app_info.api_key_valid = api_key_valid
return self._return_simple_http_response(start_response, code, detail)
def _handle_quota_response(self, app_info, quota_resp, start_response):
code, detail = quota_request.convert_response(
quota_resp, self._project_id)
if code == httplib.OK:
return None # the quota was OK
# there was problem; the request cannot proceed
_logger.warn(u'Quota failed %d, %s', code, detail)
app_info.response_code = code
return self._return_simple_http_response(start_response, code, detail)
def _handle_missing_api_key(self, app_info, start_response):
code = httplib.UNAUTHORIZED
detail = self._NO_API_KEY_MSG
_logger.warn(u'Check not performed %d, %s', code, detail)
app_info.response_code = code
app_info.api_key_valid = False
return self._return_simple_http_response(start_response, code, detail)
def _return_simple_http_response(self, start_response, code, detail):
resp = exc_status_map[code](
detail=detail, body_template = '''${explanation}\n\n${detail}\n''')
# The resp (response) object is actually a very specialized
# WSGI application, which means it accepts an environment
# dictionary and a start_response callable. We don't have
# access to the real request's WSGI environment at this point,
# and even if we did, a lot of actual requests will simply
# assume an application/json content type instead of
# specifying it with the Accept header. So we just make up a
# minimalistic WSGI environment; REQUEST_METHOD POST so that
# it's fine to return a body in the response and Accept
# application/json so the error response app will generate
# JSON instead of HTML or text.
return resp({'REQUEST_METHOD': 'POST', 'HTTP_ACCEPT': 'application/json'}, start_response)
class _AppInfo(object):
# pylint: disable=too-few-public-methods
def __init__(self):
self.api_key_valid = True
self.response_code = httplib.INTERNAL_SERVER_ERROR
self.response_size = report_request.NOT_SET
self.request_size = report_request.NOT_SET
self.http_method = None
self.url = None
class _LatencyTimer(object):
def __init__(self, timer):
self._timer = timer
self._start = None
self._app_start = None
self._end = None
def start(self):
self._start = self._timer()
def app_start(self):
self._app_start = self._timer()
def end(self):
self._end = self._timer()
if self._app_start is None:
self._app_start = self._end
@property
def request_time(self):
if self._start and self._end:
return self._end - self._start
return None
@property
def overhead_time(self):
if self._start and self._app_start:
return self._app_start - self._start
return None
@property
def backend_time(self):
if self._end and self._app_start:
return self._end - self._app_start
return None
def _find_api_key_param(info, parsed_uri):
params = info.api_key_url_query_params
if not params:
return None
param_dict = urlparse.parse_qs(parsed_uri.query)
if not param_dict:
return None
for q in params:
value = param_dict.get(q)
if value:
# param's values are lists, assume the first value
# is what's needed
return value[0]
return None
_DEFAULT_API_KEYS = (u'key', u'api_key')
def _find_default_api_key_param(parsed_uri):
param_dict = urlparse.parse_qs(parsed_uri.query)
if not param_dict:
return None
for q in _DEFAULT_API_KEYS:
value = param_dict.get(q)
if value:
# param's values are lists, assume the first value
# is what's needed
return value[0]
return None
def _find_api_key_header(info, environ):
headers = info.api_key_http_header
if not headers:
return None
for h in headers:
value = environ.get(u'HTTP_' + h.upper())
if value:
return value # headers have single values
return None
def _create_authenticator(a_service):
"""Create an instance of :class:`google.auth.tokens.Authenticator`.
Args:
a_service (:class:`endpoints_management.gen.servicemanagement_v1_messages.Service`): a
service instance
"""
if not isinstance(a_service, sm_messages.Service):
raise ValueError(u"service is None or not an instance of Service")
authentication = a_service.authentication
if not authentication:
_logger.info(u"authentication is not configured in service, "
u"authentication checks will be disabled")
return
issuers_to_provider_ids = {}
issuer_uri_configs = {}
for provider in authentication.providers:
issuer = provider.issuer
jwks_uri = provider.jwksUri
# Enable openID discovery if jwks_uri is unset
open_id = jwks_uri is None
issuer_uri_configs[issuer] = suppliers.IssuerUriConfig(open_id, jwks_uri)
issuers_to_provider_ids[issuer] = provider.id
key_uri_supplier = suppliers.KeyUriSupplier(issuer_uri_configs)
jwks_supplier = suppliers.JwksSupplier(key_uri_supplier)
authenticator = tokens.Authenticator(issuers_to_provider_ids, jwks_supplier)
return authenticator
class AuthenticationMiddleware(object):
"""A WSGI middleware that does authentication checks for incoming
requests.
In environments where os.environ is replaced with a request-local and
thread-independent copy (e.g. Google Appengine), authentication result is
added to os.environ so that the wrapped application can make use of the
authentication result.
"""
# pylint: disable=too-few-public-methods
USER_INFO = u"google.api.auth.user_info"
def __init__(self, application, authenticator):
"""Initializes an authentication middleware instance.
Args:
application: a WSGI application to be wrapped
authenticator (:class:`google.auth.tokens.Authenticator`): an
authenticator that authenticates incoming requests
"""
if not isinstance(authenticator, tokens.Authenticator):
raise ValueError(u"Invalid authenticator")
self._application = application
self._authenticator = authenticator
def __call__(self, environ, start_response):
method_info = environ.get(EnvironmentMiddleware.METHOD_INFO)
if not method_info or not method_info.auth_info:
# No authentication configuration for this method
_logger.debug(u"authentication is not configured")
return self._application(environ, start_response)
auth_token = _extract_auth_token(environ)
user_info = None
if not auth_token:
_logger.debug(u"No auth token is attached to the request")
else:
try:
service_name = environ.get(EnvironmentMiddleware.SERVICE_NAME)
user_info = self._authenticator.authenticate(auth_token,
method_info.auth_info,
service_name)
except Exception: # pylint: disable=broad-except
_logger.debug(u"Cannot decode and verify the auth token. The backend "
u"will not be able to retrieve user info", exc_info=True)
environ[self.USER_INFO] = user_info
# pylint: disable=protected-access
if user_info and not isinstance(os.environ, os._Environ):
# Set user info into os.environ only if os.environ is replaced
# with a request-local copy
os.environ[self.USER_INFO] = user_info
response = self._application(environ, start_response)
# Erase user info from os.environ for safety and sanity.
if self.USER_INFO in os.environ:
del os.environ[self.USER_INFO]
return response
_ACCESS_TOKEN_PARAM_NAME = u"access_token"
_BEARER_TOKEN_PREFIX = u"Bearer "
_BEARER_TOKEN_PREFIX_LEN = len(_BEARER_TOKEN_PREFIX)
def _extract_auth_token(environ):
# First try to extract auth token from HTTP authorization header.
auth_header = environ.get(u"HTTP_AUTHORIZATION")
if auth_header:
if auth_header.startswith(_BEARER_TOKEN_PREFIX):
return auth_header[_BEARER_TOKEN_PREFIX_LEN:]
return
# Then try to read auth token from query.
parameters = urlparse.parse_qs(environ.get(u"QUERY_STRING", u""))
if _ACCESS_TOKEN_PARAM_NAME in parameters:
auth_token, = parameters[_ACCESS_TOKEN_PARAM_NAME]
return auth_token
| {
"content_hash": "7ede4e3ac79feb02852a91009c3352f6",
"timestamp": "",
"source": "github",
"line_count": 686,
"max_line_length": 98,
"avg_line_length": 38.74052478134111,
"alnum_prop": 0.6231562311860325,
"repo_name": "nparley/mylatitude",
"id": "479ca68f237f13a39ffc35f6515f24bcd6b6dc58",
"size": "27169",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/endpoints_management/control/wsgi.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "10569"
},
{
"name": "HTML",
"bytes": "14612"
},
{
"name": "JavaScript",
"bytes": "46484"
},
{
"name": "Python",
"bytes": "9183716"
}
],
"symlink_target": ""
} |
"""
Collect icmp round trip times
Only valid for ipv4 hosts currently
#### Dependencies
* ping
#### Configuration
Configuration is done by:
Create a file named: PingCollector.conf in the collectors_config_path
* enabled = true
* interval = 60
* target_1 = example.org
* target_fw = 192.168.0.1
* target_localhost = localhost
Test your configuration using the following command:
diamond-setup --print -C PingCollector
You should get a response back that indicates 'enabled': True and see entries
for your targets in pairs like:
'target_1': 'example.org'
The graphite nodes pushed are derived from the pinged hostnames by replacing all
dots with underscores, i.e. 'www.example.org' becomes 'www_example_org'.
"""
import diamond.collector
class PingCollector(diamond.collector.ProcessCollector):
def get_default_config_help(self):
config_help = super(PingCollector, self).get_default_config_help()
config_help.update({
'bin': 'The path to the ping binary',
})
return config_help
def get_default_config(self):
"""
Returns the default collector settings
"""
config = super(PingCollector, self).get_default_config()
config.update({
'path': 'ping',
'bin': '/bin/ping',
})
return config
def collect(self):
for key in self.config.keys():
if key[:7] == "target_":
host = self.config[key]
metric_name = host.replace('.', '_')
ping = self.run_command(['-nq', '-c 1', host])
ping = ping[0].strip().split("\n")[-1]
# Linux
if ping.startswith('rtt'):
ping = ping.split()[3].split('/')[0]
metric_value = float(ping)
# OS X
elif ping.startswith('round-trip '):
ping = ping.split()[3].split('/')[0]
metric_value = float(ping)
# Unknown
else:
metric_value = 10000
self.publish(metric_name, metric_value, precision=3)
| {
"content_hash": "f5810201dd8cd316c1b67f0e17981598",
"timestamp": "",
"source": "github",
"line_count": 79,
"max_line_length": 80,
"avg_line_length": 27.72151898734177,
"alnum_prop": 0.5616438356164384,
"repo_name": "MichaelDoyle/Diamond",
"id": "8d28816db7b20937f37f33a546fb79473a6d5c80",
"size": "2206",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "src/collectors/ping/ping.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "21681"
},
{
"name": "Makefile",
"bytes": "4465"
},
{
"name": "Python",
"bytes": "1595996"
},
{
"name": "Roff",
"bytes": "23868"
},
{
"name": "Ruby",
"bytes": "230"
},
{
"name": "Shell",
"bytes": "12795"
}
],
"symlink_target": ""
} |
"""Test asyncio support"""
# Copyright (c) PyZMQ Developers
# Distributed under the terms of the Modified BSD License.
import os
import sys
import pytest
from pytest import mark
import zmq
from zmq.utils.strtypes import u
try:
import asyncio
import zmq.asyncio as zaio
from zmq.auth.asyncio import AsyncioAuthenticator
except ImportError:
if sys.version_info >= (3,4):
raise
asyncio = None
from concurrent.futures import CancelledError
from zmq.tests import BaseZMQTestCase, SkipTest
from zmq.tests.test_auth import TestThreadAuthentication
class TestAsyncIOSocket(BaseZMQTestCase):
if asyncio is not None:
Context = zaio.Context
def setUp(self):
if asyncio is None:
raise SkipTest()
self.loop = zaio.ZMQEventLoop()
asyncio.set_event_loop(self.loop)
super(TestAsyncIOSocket, self).setUp()
def tearDown(self):
self.loop.close()
super().tearDown()
def test_socket_class(self):
s = self.context.socket(zmq.PUSH)
assert isinstance(s, zaio.Socket)
s.close()
def test_recv_multipart(self):
@asyncio.coroutine
def test():
a, b = self.create_bound_pair(zmq.PUSH, zmq.PULL)
f = b.recv_multipart()
assert not f.done()
yield from a.send(b'hi')
recvd = yield from f
self.assertEqual(recvd, [b'hi'])
self.loop.run_until_complete(test())
def test_recv(self):
@asyncio.coroutine
def test():
a, b = self.create_bound_pair(zmq.PUSH, zmq.PULL)
f1 = b.recv()
f2 = b.recv()
assert not f1.done()
assert not f2.done()
yield from a.send_multipart([b'hi', b'there'])
recvd = yield from f2
assert f1.done()
self.assertEqual(f1.result(), b'hi')
self.assertEqual(recvd, b'there')
self.loop.run_until_complete(test())
@mark.skipif(not hasattr(zmq, 'RCVTIMEO'), reason="requires RCVTIMEO")
def test_recv_timeout(self):
@asyncio.coroutine
def test():
a, b = self.create_bound_pair(zmq.PUSH, zmq.PULL)
b.rcvtimeo = 100
f1 = b.recv()
b.rcvtimeo = 1000
f2 = b.recv_multipart()
with self.assertRaises(zmq.Again):
yield from f1
yield from a.send_multipart([b'hi', b'there'])
recvd = yield from f2
assert f2.done()
self.assertEqual(recvd, [b'hi', b'there'])
self.loop.run_until_complete(test())
@mark.skipif(not hasattr(zmq, 'SNDTIMEO'), reason="requires SNDTIMEO")
def test_send_timeout(self):
@asyncio.coroutine
def test():
s = self.socket(zmq.PUSH)
s.sndtimeo = 100
with self.assertRaises(zmq.Again):
yield from s.send(b'not going anywhere')
self.loop.run_until_complete(test())
def test_recv_string(self):
@asyncio.coroutine
def test():
a, b = self.create_bound_pair(zmq.PUSH, zmq.PULL)
f = b.recv_string()
assert not f.done()
msg = u('πøøπ')
yield from a.send_string(msg)
recvd = yield from f
assert f.done()
self.assertEqual(f.result(), msg)
self.assertEqual(recvd, msg)
self.loop.run_until_complete(test())
def test_recv_json(self):
@asyncio.coroutine
def test():
a, b = self.create_bound_pair(zmq.PUSH, zmq.PULL)
f = b.recv_json()
assert not f.done()
obj = dict(a=5)
yield from a.send_json(obj)
recvd = yield from f
assert f.done()
self.assertEqual(f.result(), obj)
self.assertEqual(recvd, obj)
self.loop.run_until_complete(test())
def test_recv_json_cancelled(self):
@asyncio.coroutine
def test():
a, b = self.create_bound_pair(zmq.PUSH, zmq.PULL)
f = b.recv_json()
assert not f.done()
f.cancel()
# cycle eventloop to allow cancel events to fire
yield from asyncio.sleep(0)
obj = dict(a=5)
yield from a.send_json(obj)
with pytest.raises(CancelledError):
recvd = yield from f
assert f.done()
# give it a chance to incorrectly consume the event
events = yield from b.poll(timeout=5)
assert events
yield from asyncio.sleep(0)
# make sure cancelled recv didn't eat up event
f = b.recv_json()
recvd = yield from asyncio.wait_for(f, timeout=5)
assert recvd == obj
self.loop.run_until_complete(test())
def test_recv_pyobj(self):
@asyncio.coroutine
def test():
a, b = self.create_bound_pair(zmq.PUSH, zmq.PULL)
f = b.recv_pyobj()
assert not f.done()
obj = dict(a=5)
yield from a.send_pyobj(obj)
recvd = yield from f
assert f.done()
self.assertEqual(f.result(), obj)
self.assertEqual(recvd, obj)
self.loop.run_until_complete(test())
def test_recv_dontwait(self):
@asyncio.coroutine
def test():
push, pull = self.create_bound_pair(zmq.PUSH, zmq.PULL)
f = pull.recv(zmq.DONTWAIT)
with self.assertRaises(zmq.Again):
yield from f
yield from push.send(b'ping')
yield from pull.poll() # ensure message will be waiting
f = pull.recv(zmq.DONTWAIT)
assert f.done()
msg = yield from f
self.assertEqual(msg, b'ping')
self.loop.run_until_complete(test())
def test_recv_cancel(self):
@asyncio.coroutine
def test():
a, b = self.create_bound_pair(zmq.PUSH, zmq.PULL)
f1 = b.recv()
f2 = b.recv_multipart()
assert f1.cancel()
assert f1.done()
assert not f2.done()
yield from a.send_multipart([b'hi', b'there'])
recvd = yield from f2
assert f1.cancelled()
assert f2.done()
self.assertEqual(recvd, [b'hi', b'there'])
self.loop.run_until_complete(test())
def test_poll(self):
@asyncio.coroutine
def test():
a, b = self.create_bound_pair(zmq.PUSH, zmq.PULL)
f = b.poll(timeout=0)
yield from asyncio.sleep(0)
self.assertEqual(f.result(), 0)
f = b.poll(timeout=1)
assert not f.done()
evt = yield from f
self.assertEqual(evt, 0)
f = b.poll(timeout=1000)
assert not f.done()
yield from a.send_multipart([b'hi', b'there'])
evt = yield from f
self.assertEqual(evt, zmq.POLLIN)
recvd = yield from b.recv_multipart()
self.assertEqual(recvd, [b'hi', b'there'])
self.loop.run_until_complete(test())
def test_aiohttp(self):
try:
import aiohttp
except ImportError:
raise SkipTest("Requires aiohttp")
from aiohttp import web
zmq.asyncio.install()
@asyncio.coroutine
def echo(request):
print(request.path)
return web.Response(body=str(request).encode('utf8'))
@asyncio.coroutine
def server(loop):
app = web.Application(loop=loop)
app.router.add_route('GET', '/', echo)
srv = yield from loop.create_server(app.make_handler(),
'127.0.0.1', 8080)
print("Server started at http://127.0.0.1:8080")
return srv
@asyncio.coroutine
def client():
push, pull = self.create_bound_pair(zmq.PUSH, zmq.PULL)
res = yield from aiohttp.request('GET', 'http://127.0.0.1:8080/')
text = yield from res.text()
yield from push.send(text.encode('utf8'))
rcvd = yield from pull.recv()
self.assertEqual(rcvd.decode('utf8'), text)
loop = asyncio.get_event_loop()
loop.run_until_complete(server(loop))
print("servered")
loop.run_until_complete(client())
def test_poll_raw(self):
@asyncio.coroutine
def test():
p = zaio.Poller()
# make a pipe
r, w = os.pipe()
r = os.fdopen(r, 'rb')
w = os.fdopen(w, 'wb')
# POLLOUT
p.register(r, zmq.POLLIN)
p.register(w, zmq.POLLOUT)
evts = yield from p.poll(timeout=1)
evts = dict(evts)
assert r.fileno() not in evts
assert w.fileno() in evts
assert evts[w.fileno()] == zmq.POLLOUT
# POLLIN
p.unregister(w)
w.write(b'x')
w.flush()
evts = yield from p.poll(timeout=1000)
evts = dict(evts)
assert r.fileno() in evts
assert evts[r.fileno()] == zmq.POLLIN
assert r.read(1) == b'x'
r.close()
w.close()
loop = asyncio.get_event_loop()
loop.run_until_complete(test())
class TestAsyncioAuthentication(TestThreadAuthentication):
"""Test authentication running in a asyncio task"""
if asyncio is not None:
Context = zaio.Context
def shortDescription(self):
"""Rewrite doc strings from TestThreadAuthentication from
'threaded' to 'asyncio'.
"""
doc = self._testMethodDoc
if doc:
doc = doc.split("\n")[0].strip()
if doc.startswith('threaded auth'):
doc = doc.replace('threaded auth', 'asyncio auth')
return doc
def setUp(self):
if asyncio is None:
raise SkipTest()
self.loop = zaio.ZMQEventLoop()
asyncio.set_event_loop(self.loop)
super().setUp()
def tearDown(self):
super().tearDown()
self.loop.close()
def make_auth(self):
return AsyncioAuthenticator(self.context)
def can_connect(self, server, client):
"""Check if client can connect to server using tcp transport"""
@asyncio.coroutine
def go():
result = False
iface = 'tcp://127.0.0.1'
port = server.bind_to_random_port(iface)
client.connect("%s:%i" % (iface, port))
msg = [b"Hello World"]
yield from server.send_multipart(msg)
if (yield from client.poll(1000)):
rcvd_msg = yield from client.recv_multipart()
self.assertEqual(rcvd_msg, msg)
result = True
return result
return self.loop.run_until_complete(go())
| {
"content_hash": "f4f08ff53d52caa6dffc6bef74e30ab3",
"timestamp": "",
"source": "github",
"line_count": 340,
"max_line_length": 77,
"avg_line_length": 32.582352941176474,
"alnum_prop": 0.5350243726304387,
"repo_name": "nitin-cherian/LifeLongLearning",
"id": "195d534fc60020011803594ad0a781f53b3a5aec",
"size": "11082",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "Python/PythonProgrammingLanguage/Encapsulation/encap_env/lib/python3.5/site-packages/zmq/tests/asyncio/_test_asyncio.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "32365"
},
{
"name": "CSS",
"bytes": "10259"
},
{
"name": "HTML",
"bytes": "55977"
},
{
"name": "JavaScript",
"bytes": "7368910"
},
{
"name": "Jupyter Notebook",
"bytes": "768879"
},
{
"name": "Mako",
"bytes": "494"
},
{
"name": "Python",
"bytes": "17502534"
},
{
"name": "Shell",
"bytes": "7751"
},
{
"name": "Smarty",
"bytes": "30663"
}
],
"symlink_target": ""
} |
from oslo_policy import policy
from nova.policies import base
BASE_POLICY_NAME = 'os_compute_api:os-create-backup'
POLICY_ROOT = 'os_compute_api:os-create-backup:%s'
create_backup_policies = [
policy.RuleDefault(
name=POLICY_ROOT % 'discoverable',
check_str=base.RULE_ANY),
policy.RuleDefault(
name=BASE_POLICY_NAME,
check_str=base.RULE_ADMIN_OR_OWNER),
]
def list_rules():
return create_backup_policies
| {
"content_hash": "5028b524c53cfb958b8e60b5a60e92fc",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 52,
"avg_line_length": 21.666666666666668,
"alnum_prop": 0.6857142857142857,
"repo_name": "alaski/nova",
"id": "539e0220e546a60ea3fd39ce441ee9e45a27b8d7",
"size": "1094",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "nova/policies/create_backup.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "16744610"
},
{
"name": "Shell",
"bytes": "20716"
},
{
"name": "Smarty",
"bytes": "351433"
}
],
"symlink_target": ""
} |
import simplejson as json
with open('packages.txt','r') as f:
lines = f.readlines()
items = []
current_item = {}
def parse_line(s):
return s.lstrip().rstrip("\n ").split("|")[::-1]
for i in range(len(lines)):
if lines[i].startswith(r"\item"):
current_item = {}
current_item['title'] = parse_line(lines[i+1])
current_item['href'] = parse_line(lines[i+2])
current_item['description'] = parse_line(lines[i+3])
if i+4 < len(lines) and not lines[i+4].startswith(r"\item"):
current_item['additional_hrefs'] = parse_line(lines[i+4])[::-1]
else:
current_item['additional_hrefs'] = []
items.append(current_item)
with open('packages.json', 'w', encoding='utf8') as f:
json.dump(items,f,indent=4 * ' ', ensure_ascii=False)
| {
"content_hash": "0d04b20ee21794eb08e6bea3db07b33f",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 75,
"avg_line_length": 31.346153846153847,
"alnum_prop": 0.5852760736196319,
"repo_name": "benmaier/benmaier.github.io",
"id": "dbeef6f21b3893a9601b88df64c4c1756ff42950",
"size": "815",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "CV/data/parse_packages.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "18101"
},
{
"name": "HTML",
"bytes": "77799"
},
{
"name": "JavaScript",
"bytes": "60672"
},
{
"name": "Python",
"bytes": "5390"
}
],
"symlink_target": ""
} |
import datetime
import os
import requests
import sys
import time
import ConfigParser
OPTIONS = ['username', 'password', 'wowpath', 'interval']
UPLOAD_URI = 'https://www.wowthing.org/api/upload/'
def main():
# Make sure the config file exists
config_file = os.path.join(os.path.dirname(__file__), 'sync.conf')
if not os.path.exists(config_file):
error('config file does not exist: %s' % config_file)
# Try to load config file
config = ConfigParser.ConfigParser()
config.read(config_file)
# Sanity check on variables
for option in OPTIONS:
try:
blah = config.get('sync', option)
except:
error('config file "%s" option is missing!' % option)
if not blah:
error('config file "%s" option is blank!' % option)
# Make sure the WoW path exists
wtf_path = os.path.join(config.get('sync', 'wowpath'), 'WTF', 'Account')
if not os.path.isdir(wtf_path):
error('path does not exist or is not a directory: %s' % wtf_path)
# I guess we can get started now
loop(config, wtf_path)
def error(message):
print 'ERROR: %s' % (message)
sys.exit(1)
def log(message):
print '%s %s' % (datetime.datetime.now(), message)
def loop(config, wtf_path):
last_mtime = {}
# Find all account paths
for filename in os.listdir(wtf_path):
if filename == 'SavedVariables':
continue
filepath = os.path.join(wtf_path, filename, 'SavedVariables', 'WoWthing_Collector.lua')
if os.path.exists(filepath):
last_mtime[filepath] = os.path.getmtime(filepath)
log('wowthing_pysync started')
# Loop forever and upload files if data changes
interval = int(config.get('sync', 'interval'))
session = requests.Session()
session.headers.update({
'User-Agent': 'wowthing_pysync',
})
while True:
for filepath, old_mtime in last_mtime.items():
new_mtime = os.path.getmtime(filepath)
if new_mtime > old_mtime:
last_mtime[filepath] = new_mtime
upload(config, filepath, session)
time.sleep(interval)
def upload(config, filepath, session):
log('uploading %s' % filepath)
data = dict(username=config.get('sync', 'username'), password=config.get('sync', 'password'))
files = dict(lua_file=open(filepath, 'rb'))
r = session.post(UPLOAD_URI, data, files=files)
if r.status_code == requests.codes.ok:
log('upload complete')
else:
error('upload failed: %s %r' % (r.status_code, r.content))
if __name__ == '__main__':
main()
| {
"content_hash": "4b60878c8084faa0363b507ee8040f37",
"timestamp": "",
"source": "github",
"line_count": 92,
"max_line_length": 97,
"avg_line_length": 28.597826086956523,
"alnum_prop": 0.6164956290383884,
"repo_name": "madcowfred/wowthing_pysync",
"id": "9ec99f259c0b22452edc1a54dca0c591610d2cd7",
"size": "2654",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "wowthing_pysync.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "ApacheConf",
"bytes": "228"
},
{
"name": "Python",
"bytes": "2654"
}
],
"symlink_target": ""
} |
BOT_NAME = 'psicrawler'
SPIDER_MODULES = ['psicrawler.spiders']
# NEWSPIDER_MODULE = 'jobs.wikinews_en.spiders'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
USER_AGENT = ' Mozilla/5.0 (X11; Linux i686; rv:10.0) Gecko/20100101 Firefox/10.0'
# Obey robots.txt rules
ROBOTSTXT_OBEY = False
# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0)
# See http://scrapy.readthedocs.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
#DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
#COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False
# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
#}
# Enable or disable spider middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
# 'wikinews_en.middlewares.MyCustomSpiderMiddleware': 543,
#}
# Enable or disable downloader middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
#DOWNLOADER_MIDDLEWARES = {
# 'wikinews_en.middlewares.MyCustomDownloaderMiddleware': 543,
#}
# Enable or disable extensions
# See http://scrapy.readthedocs.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
#}
# Configure item pipelines
# See http://scrapy.readthedocs.org/en/latest/topics/item-pipeline.html
ITEM_PIPELINES = {
'psicrawler.pipelines.PsicrawlerPipeline': 300,
}
# Enable and configure the AutoThrottle extension (disabled by default)
# See http://doc.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
| {
"content_hash": "fb18ae84130a92892b7640928e15986c",
"timestamp": "",
"source": "github",
"line_count": 79,
"max_line_length": 109,
"avg_line_length": 35.063291139240505,
"alnum_prop": 0.7678700361010831,
"repo_name": "psiopic2/psicrawler",
"id": "0efe21867a739f1a02b6453af49e6a7a7804a492",
"size": "3206",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "psicrawler/settings.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "21963"
},
{
"name": "Shell",
"bytes": "79"
}
],
"symlink_target": ""
} |
from pypnm.dummy_numpy import DummyNumpy
try:
import numpy
except ImportError:
numpy = DummyNumpy
class InvalidFormat(BaseException): pass
class TemplateMode(object):
is_binary_input = False
pixcel_function = lambda self, i: 255 * i / self.max_pixel_value
data_bit_size = 8 # if binary mode
def __init__(self, width, height):
self.width = width
self.height = height
self.max_pixel_value = 255
def _gen_iter(self, data):
def gen_iter_split_space(data):
while data:
while data and data[0] == ' ':
data = data[1:]
res = ''
while data and data[0] != ' ':
res += data[0]
data = data[1:]
if res.isdigit():
value = int(res) % (self.max_pixel_value + 1 )
if self.pixcel_function is not None:
value = self.pixcel_function(value)
yield value
def gen_iter_one_bytes(data):
ONE_BYTE = 8
if self.max_pixel_value == 8:
for x in data:
yield ord(x)
elif self.max_pixel_value == 2:
for x in data:
for y in bin(x)[2:].rjust(ONE_BYTE, 0):
yield int(y)
if self.is_binary_input:
return gen_iter_one_bytes(data)
else:
return gen_iter_split_space(data)
def _parse_rect(self, iterator, color_num=1):
if color_num == 1:
array = numpy.empty(( self.height, self.width))
else:
array = numpy.empty(( color_num, self.height, self.width))
for y in range(self.height):
for x in range(self.width):
if color_num == 1:
try:
array[y][x] = next(iterator)
except StopIteration:
raise InvalidFormat('less data size: (%d,%d)' % (x, y))
else:
for c in range(color_num):
try:
array[y][x][c] = next(iterator)
except StopIteration:
raise InvalidFormat('less data size: (%d,%d)' % (x, y))
return array
def _parse(self, it):
# over write here
pass
def parse(self, data):
it = self._gen_iter(data)
try:
return self._parse(it)
except StopIteration:
raise InvalidFormat('less data size')
class ParserP1(TemplateMode):
is_binary_input = False
pixcel_function = lambda self, i: 255 * i # input data is {0,1} convert to 8bit world
def _parse(self, it):
self.max_pixel_value = 1
return self._parse_rect(it)
class ParserP2(TemplateMode):
is_binary_input = False
def _parse(self, it):
self.max_pixel_value = next(it)
return self._parse_rect(it)
class ParserP3(TemplateMode):
is_binary_input = False
def _parse(self, it):
self.max_pixel_value = next(it)
return self._parse_rect(it, 3)
class ParserP4(ParserP1):
is_binary_input = True
pass
class ParserP5(ParserP2):
is_binary_input = True
pass
class ParserP6(ParserP3):
is_binary_input = True
pass
supported_format = {
'P1': ParserP1,
'P2': ParserP2,
'P3': ParserP3,
# 'P4': ParserP4,
# 'P5': ParserP5,
# 'P6': ParserP6,
}
def _gen_iter(idata):
"""
:type data:str
"""
data = idata
for _ in range(3):
data = data.lstrip(' ')
idx = data.index(' ')
yield data[:idx]
data = data[idx:]
yield data
def parse2array(string):
data = ' '.join([x[:x.find('#')] if '#' in x else x for x in string.splitlines()])
i = _gen_iter(data)
try:
format_type = next(i)
width = next(i)
height = next(i)
except IndexError:
raise InvalidFormat('format or width or height is not exists')
if width and width.isdigit() and height and height.isdigit():
width = int(width)
height = int(height)
else:
raise InvalidFormat('widht and height is not integer: w:%s h:%s' % (width, height))
if width <= 0 or height <= 0:
raise InvalidFormat('width or height is invlaid')
if format_type not in supported_format:
raise InvalidFormat('the format is not supported: %s' % format_type)
data = next(i)
parser = supported_format[format_type](width, height)
res = parser.parse(data)
return res | {
"content_hash": "7170ad2040787ec1bd0a48d14194c6f0",
"timestamp": "",
"source": "github",
"line_count": 174,
"max_line_length": 91,
"avg_line_length": 26.626436781609197,
"alnum_prop": 0.5240664796028491,
"repo_name": "cocuh/pypnm",
"id": "d410e130cb122e230b22aefd7b3c54632bcc9762",
"size": "4633",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pypnm/parser.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "9814"
}
],
"symlink_target": ""
} |
from twisted.python import log
from buildbot.process.buildstep import LoggingBuildStep
from buildbot.status.builder import SKIPPED, FAILURE
from buildbot.steps.slave import CompositeStepMixin
class Source(LoggingBuildStep, CompositeStepMixin):
"""This is a base class to generate a source tree in the buildslave.
Each version control system has a specialized subclass, and is expected
to override __init__ and implement computeSourceRevision() and
startVC(). The class as a whole builds up the self.args dictionary, then
starts a RemoteCommand with those arguments.
"""
renderables = LoggingBuildStep.renderables + [
'description', 'descriptionDone', 'descriptionSuffix',
'workdir' ]
description = None # set this to a list of short strings to override
descriptionDone = None # alternate description when the step is complete
descriptionSuffix = None # extra information to append to suffix
# if the checkout fails, there's no point in doing anything else
haltOnFailure = True
flunkOnFailure = True
notReally = False
branch = None # the default branch, should be set in __init__
def __init__(self, workdir=None, mode='update', alwaysUseLatest=False,
timeout=20*60, retry=None, env=None, logEnviron=True,
description=None, descriptionDone=None, descriptionSuffix=None,
codebase='', **kwargs):
"""
@type workdir: string
@param workdir: local directory (relative to the Builder's root)
where the tree should be placed
@type alwaysUseLatest: boolean
@param alwaysUseLatest: whether to always update to the most
recent available sources for this build.
Normally the Source step asks its Build for a list of all
Changes that are supposed to go into the build, then computes a
'source stamp' (revision number or timestamp) that will cause
exactly that set of changes to be present in the checked out
tree. This is turned into, e.g., 'cvs update -D timestamp', or
'svn update -r revnum'. If alwaysUseLatest=True, bypass this
computation and always update to the latest available sources
for each build.
The source stamp helps avoid a race condition in which someone
commits a change after the master has decided to start a build
but before the slave finishes checking out the sources. At best
this results in a build which contains more changes than the
buildmaster thinks it has (possibly resulting in the wrong
person taking the blame for any problems that result), at worst
is can result in an incoherent set of sources (splitting a
non-atomic commit) which may not build at all.
@type logEnviron: boolean
@param logEnviron: If this option is true (the default), then the
step's logfile will describe the environment
variables on the slave. In situations where the
environment is not relevant and is long, it may
be easier to set logEnviron=False.
@type codebase: string
@param codebase: Specifies which changes in a build are processed by
the step. The default codebase value is ''. The codebase must correspond
to a codebase assigned by the codebaseGenerator. If no codebaseGenerator
is defined in the master then codebase doesn't need to be set, the
default value will then match all changes.
"""
LoggingBuildStep.__init__(self, **kwargs)
# This will get added to args later, after properties are rendered
self.workdir = workdir
self.sourcestamp = None
self.codebase = codebase
if self.codebase:
self.name = ' '.join((self.name, self.codebase))
self.alwaysUseLatest = alwaysUseLatest
self.logEnviron = logEnviron
self.env = env
self.timeout = timeout
descriptions_for_mode = {
"clobber": "checkout",
"export": "exporting"}
descriptionDones_for_mode = {
"clobber": "checkout",
"export": "export"}
if description:
self.description = description
else:
self.description = [
descriptions_for_mode.get(mode, "updating")]
if isinstance(self.description, str):
self.description = [self.description]
if descriptionDone:
self.descriptionDone = descriptionDone
else:
self.descriptionDone = [
descriptionDones_for_mode.get(mode, "update")]
if isinstance(self.descriptionDone, str):
self.descriptionDone = [self.descriptionDone]
if descriptionSuffix:
self.descriptionSuffix = descriptionSuffix
else:
self.descriptionSuffix = self.codebase or None # want None in lieu of ''
if isinstance(self.descriptionSuffix, str):
self.descriptionSuffix = [self.descriptionSuffix]
def updateSourceProperty(self, name, value, source=''):
"""
Update a property, indexing the property by codebase if codebase is not
''. Source steps should generally use this instead of setProperty.
"""
# pick a decent source name
if source == '':
source = self.__class__.__name__
if self.codebase != '':
assert not isinstance(self.getProperty(name, None), str), \
"Sourcestep %s has a codebase, other sourcesteps don't" \
% self.name
property_dict = self.getProperty(name, {})
property_dict[self.codebase] = value
LoggingBuildStep.setProperty(self, name, property_dict, source)
else:
assert not isinstance(self.getProperty(name, None), dict), \
"Sourcestep %s does not have a codebase, other sourcesteps do" \
% self.name
LoggingBuildStep.setProperty(self, name, value, source)
def setStepStatus(self, step_status):
LoggingBuildStep.setStepStatus(self, step_status)
def setDefaultWorkdir(self, workdir):
self.workdir = self.workdir or workdir
def describe(self, done=False):
desc = self.descriptionDone if done else self.description
if self.descriptionSuffix:
desc = desc[:]
desc.extend(self.descriptionSuffix)
return desc
def computeSourceRevision(self, changes):
"""Each subclass must implement this method to do something more
precise than -rHEAD every time. For version control systems that use
repository-wide change numbers (SVN, P4), this can simply take the
maximum such number from all the changes involved in this build. For
systems that do not (CVS), it needs to create a timestamp based upon
the latest Change, the Build's treeStableTimer, and an optional
self.checkoutDelay value."""
return None
def start(self):
if self.notReally:
log.msg("faking %s checkout/update" % self.name)
self.step_status.setText(["fake", self.name, "successful"])
self.addCompleteLog("log",
"Faked %s checkout/update 'successful'\n" \
% self.name)
return SKIPPED
if not self.alwaysUseLatest:
# what source stamp would this step like to use?
s = self.build.getSourceStamp(self.codebase)
self.sourcestamp = s
if self.sourcestamp:
# if branch is None, then use the Step's "default" branch
branch = s.branch or self.branch
# if revision is None, use the latest sources (-rHEAD)
revision = s.revision
if not revision:
revision = self.computeSourceRevision(s.changes)
# the revision property is currently None, so set it to something
# more interesting
if revision is not None:
self.updateSourceProperty('revision', str(revision))
# if patch is None, then do not patch the tree after checkout
# 'patch' is None or a tuple of (patchlevel, diff, root)
# root is optional.
patch = s.patch
if patch:
self.addCompleteLog("patch", patch[1])
else:
log.msg("No sourcestamp found in build for codebase '%s'" % self.codebase)
self.step_status.setText(["Codebase", '%s' % self.codebase ,"not", "in", "build" ])
self.addCompleteLog("log",
"No sourcestamp found in build for codebase '%s'" \
% self.codebase)
self.finished(FAILURE)
return FAILURE
else:
revision = None
branch = self.branch
patch = None
self.startVC(branch, revision, patch)
| {
"content_hash": "1ee0a98f18982dbe81e7a23f7d05bbd0",
"timestamp": "",
"source": "github",
"line_count": 214,
"max_line_length": 99,
"avg_line_length": 43.154205607476634,
"alnum_prop": 0.6120194910665945,
"repo_name": "denny820909/builder",
"id": "13da81c30d383dce7ad6fa675a87bc0d85ffffd7",
"size": "9942",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "lib/python2.7/site-packages/buildbot-0.8.8-py2.7.egg/buildbot/steps/source/base.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "106"
},
{
"name": "C",
"bytes": "68706"
},
{
"name": "CSS",
"bytes": "18630"
},
{
"name": "D",
"bytes": "532"
},
{
"name": "GAP",
"bytes": "14120"
},
{
"name": "HTML",
"bytes": "69377"
},
{
"name": "Makefile",
"bytes": "1220"
},
{
"name": "Objective-C",
"bytes": "1291"
},
{
"name": "Python",
"bytes": "21088388"
},
{
"name": "Shell",
"bytes": "2766"
},
{
"name": "Smarty",
"bytes": "393"
}
],
"symlink_target": ""
} |
"""Class implementing a multi-worker parameter server tf.distribute strategy."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
from tensorflow.python.distribute import cross_device_ops as cross_device_ops_lib
from tensorflow.python.distribute import device_util
from tensorflow.python.distribute import distribute_lib
from tensorflow.python.distribute import input_lib
from tensorflow.python.distribute import mirrored_strategy
from tensorflow.python.distribute import multi_worker_util
from tensorflow.python.distribute import numpy_dataset
from tensorflow.python.distribute import values
from tensorflow.python.distribute.cluster_resolver import SimpleClusterResolver
from tensorflow.python.distribute.cluster_resolver import TFConfigClusterResolver
from tensorflow.python.eager import context
from tensorflow.python.framework import device as tf_device
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variable_scope as vs
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import device_setter
from tensorflow.python.util import nest
from tensorflow.python.util.tf_export import tf_export
_LOCAL_CPU = "/device:CPU:0"
# TODO(yuefengz): maybe cache variables on local CPU.
@tf_export("distribute.experimental.ParameterServerStrategy", v1=[])
class ParameterServerStrategy(distribute_lib.Strategy):
"""An asynchronous multi-worker parameter server tf.distribute strategy.
This strategy requires two jobs: workers and parameter servers. Variables and
updates to those variables will be assigned to parameter servers and other
operations are assigned to workers.
When each worker has more than one GPU, operations will be replicated on all
GPUs. Even though operations may be replicated, variables are not and each
worker shares a common view for which parameter server a variable is assigned
to.
By default it uses `TFConfigClusterResolver` to detect configurations for
multi-worker training. This requires a 'TF_CONFIG' environment variable and
the 'TF_CONFIG' must have a cluster spec.
This class assumes each worker is running the same code independently, but
parameter servers are running a standard server. This means that while each
worker will synchronously compute a single gradient update across all GPUs,
updates between workers proceed asynchronously. Operations that occur only on
the first replica (such as incrementing the global step), will occur on the
first replica *of every worker*.
It is expected to call `call_for_each_replica(fn, ...)` for any
operations which potentially can be replicated across replicas (i.e. multiple
GPUs) even if there is only CPU or one GPU. When defining the `fn`, extra
caution needs to be taken:
1) It is generally not recommended to open a device scope under the strategy's
scope. A device scope (i.e. calling `tf.device`) will be merged with or
override the device for operations but will not change the device for
variables.
2) It is also not recommended to open a colocation scope (i.e. calling
`tf.compat.v1.colocate_with`) under the strategy's scope. For colocating
variables, use `strategy.extended.colocate_vars_with` instead. Colocation of
ops will possibly create device assignment conflicts.
Note: This strategy only works with the Estimator API. Pass an instance of
this strategy to the `experimental_distribute` argument when you create the
`RunConfig`. This instance of `RunConfig` should then be passed to the
`Estimator` instance on which `train_and_evaluate` is called.
For Example:
```
strategy = tf.distribute.experimental.ParameterServerStrategy()
run_config = tf.estimator.RunConfig(
experimental_distribute.train_distribute=strategy)
estimator = tf.estimator.Estimator(config=run_config)
tf.estimator.train_and_evaluate(estimator,...)
"""
def __init__(self, cluster_resolver=None):
"""Initializes this strategy with an optional `cluster_resolver`.
Args:
cluster_resolver: Optional
`tf.distribute.cluster_resolver.ClusterResolver` object. Defaults to a
`tf.distribute.cluster_resolver.TFConfigClusterResolver`.
"""
if cluster_resolver is None:
cluster_resolver = TFConfigClusterResolver()
if not cluster_resolver.cluster_spec():
raise ValueError("Cluster spec must be non-empty in `cluster_resolver`.")
extended = ParameterServerStrategyExtended(
self, cluster_resolver=cluster_resolver)
super(ParameterServerStrategy, self).__init__(extended)
distribute_lib.distribution_strategy_gauge.get_cell("V2").set(
"ParameterServerStrategy")
distribute_lib.distribution_strategy_replica_gauge.get_cell("num_ps").set(
len(self.extended.parameter_devices))
@tf_export(v1=["distribute.experimental.ParameterServerStrategy"]) # pylint: disable=missing-docstring
class ParameterServerStrategyV1(distribute_lib.StrategyV1):
__doc__ = ParameterServerStrategy.__doc__
def __init__(self, cluster_resolver=None):
"""Initializes this strategy."""
super(ParameterServerStrategyV1, self).__init__(
ParameterServerStrategyExtended(
self, cluster_resolver=cluster_resolver))
distribute_lib.distribution_strategy_gauge.get_cell("V1").set(
"ParameterServerStrategy")
distribute_lib.distribution_strategy_replica_gauge.get_cell("num_ps").set(
len(self.extended.parameter_devices))
__init__.__doc__ = ParameterServerStrategy.__init__.__doc__
# TODO(josh11b): Switch to V2 when we no longer need to support tf.compat.v1.
class ParameterServerStrategyExtended(distribute_lib.StrategyExtendedV1):
"""Implementation of ParameterServerStrategy and CentralStorageStrategy."""
def __init__(self,
container_strategy,
cluster_resolver=None,
compute_devices=None,
parameter_device=None):
super(ParameterServerStrategyExtended, self).__init__(container_strategy)
self._initialize_strategy(
cluster_resolver=cluster_resolver,
compute_devices=compute_devices,
parameter_device=parameter_device)
# We typically don't need to do all-reduce in this strategy.
self._cross_device_ops = (
cross_device_ops_lib.ReductionToOneDevice(reduce_to_device=_LOCAL_CPU))
def _initialize_strategy(self,
cluster_resolver=None,
compute_devices=None,
parameter_device=None):
if cluster_resolver and cluster_resolver.cluster_spec():
self._initialize_multi_worker(cluster_resolver)
else:
self._initialize_local(
compute_devices, parameter_device, cluster_resolver=cluster_resolver)
def _initialize_multi_worker(self, cluster_resolver):
"""Initialize devices for multiple workers.
It creates variable devices and compute devices. Variables and operations
will be assigned to them respectively. We have one compute device per
replica. The variable device is a device function or device string. The
default variable device assigns variables to parameter servers in a
round-robin fashion.
Args:
cluster_resolver: a descendant of `ClusterResolver` object.
Raises:
ValueError: if the cluster doesn't have ps jobs.
"""
# TODO(b/126786766): TFConfigClusterResolver returns wrong number of GPUs in
# some cases.
if isinstance(cluster_resolver, TFConfigClusterResolver):
num_gpus = context.num_gpus()
else:
num_gpus = cluster_resolver.num_accelerators().get("GPU", 0)
# Save the num_gpus_per_worker for configure method.
self._num_gpus_per_worker = num_gpus
cluster_spec = cluster_resolver.cluster_spec()
task_type = cluster_resolver.task_type
task_id = cluster_resolver.task_id
if not task_type or task_id is None:
raise ValueError("When `cluster_spec` is given, you must also specify "
"`task_type` and `task_id`")
cluster_spec = multi_worker_util.normalize_cluster_spec(cluster_spec)
assert cluster_spec.as_dict()
worker_device = "/job:%s/task:%d" % (task_type, task_id)
self._input_host_device = numpy_dataset.SingleDevice(worker_device)
# Define compute devices which is a list of device strings and one for each
# replica. When there are GPUs, replicate operations on these GPUs.
# Otherwise, place operations on CPU.
if num_gpus > 0:
compute_devices = tuple(
"%s/device:GPU:%d" % (worker_device, i) for i in range(num_gpus))
else:
compute_devices = (worker_device,)
self._device_map = values.ReplicaDeviceMap(compute_devices)
self._input_workers = input_lib.InputWorkers(
self._device_map, [(worker_device, compute_devices)])
# In distributed mode, place variables on ps jobs in a round-robin fashion.
# Note that devices returned from `replica_device_setter` are not
# canonical and therefore we don't canonicalize all variable devices to
# make them consistent.
# TODO(yuefengz): support passing a strategy object to control variable
# assignment.
# TODO(yuefengz): merge the logic of replica_device_setter into this
# class.
num_ps_replicas = len(cluster_spec.as_dict().get("ps", []))
if num_ps_replicas == 0:
raise ValueError("The cluster spec needs to have `ps` jobs.")
self._variable_device = device_setter.replica_device_setter(
ps_tasks=num_ps_replicas,
worker_device=worker_device,
merge_devices=True,
cluster=cluster_spec)
# The `_parameter_devices` is needed for the `parameter_devices` property
# and is a list of all variable devices. Here parameter devices are all
# tasks of the "ps" job.
self._parameter_devices = tuple(map("/job:ps/task:{}".format,
range(num_ps_replicas)))
# Add a default device so that ops without specified devices will not end up
# on other workers.
self._default_device = worker_device
self._is_chief = multi_worker_util.is_chief(cluster_spec, task_type,
task_id)
self._cluster_spec = cluster_spec
self._task_type = task_type
self._task_id = task_id
logging.info(
"Multi-worker ParameterServerStrategy with "
"cluster_spec = %r, task_type = %r, task_id = %r, "
"num_ps_replicas = %r, is_chief = %r, device_map = %r, "
"variable_device = %r", cluster_spec.as_dict(), task_type, task_id,
num_ps_replicas, self._is_chief, self._device_map,
self._variable_device)
# TODO(yuefengz): get rid of cluster_resolver argument when contrib's
# version no longer depends on this class.
def _initialize_local(self,
compute_devices,
parameter_device,
cluster_resolver=None):
"""Initialize local devices for training."""
worker_device = device_util.canonicalize("/device:CPU:0")
self._input_host_device = numpy_dataset.SingleDevice(worker_device)
if compute_devices is None:
if not cluster_resolver:
num_gpus = context.num_gpus()
else:
num_gpus = cluster_resolver.num_accelerators().get("GPU", 0)
# Save the num_gpus_per_worker for configure method which is used by the
# contrib version.
self._num_gpus_per_worker = num_gpus
compute_devices = device_util.local_devices_from_num_gpus(num_gpus)
if parameter_device is None:
# If there is only one GPU, put everything on that GPU. Otherwise, place
# variables on CPU.
if len(compute_devices) == 1:
parameter_device = compute_devices[0]
else:
parameter_device = _LOCAL_CPU
self._device_map = values.ReplicaDeviceMap(compute_devices)
self._input_workers = input_lib.InputWorkers(
self._device_map, [(worker_device, compute_devices)])
self._variable_device = parameter_device
self._parameter_devices = (parameter_device,)
self._is_chief = True
self._cluster_spec = None
self._task_type = None
self._task_id = None
logging.info(
"ParameterServerStrategy with compute_devices = %r, "
"variable_device = %r", compute_devices, self._variable_device)
def _validate_colocate_with_variable(self, colocate_with_variable):
values.validate_colocate(colocate_with_variable, self)
def _experimental_distribute_dataset(self, dataset):
return input_lib.get_distributed_dataset(
dataset,
self._input_workers,
self._container_strategy(),
split_batch_by=self._num_replicas_in_sync)
def _make_dataset_iterator(self, dataset):
return input_lib.DatasetIterator(
dataset,
self._input_workers,
self._container_strategy(),
split_batch_by=self._num_replicas_in_sync)
def _make_input_fn_iterator(
self,
input_fn,
replication_mode=distribute_lib.InputReplicationMode.PER_WORKER):
"""Distributes the dataset to each local GPU."""
if self._cluster_spec:
input_pipeline_id = multi_worker_util.id_in_cluster(
self._cluster_spec, self._task_type, self._task_id)
num_input_pipelines = multi_worker_util.worker_count(
self._cluster_spec, self._task_type)
else:
input_pipeline_id = 0
num_input_pipelines = 1
input_context = distribute_lib.InputContext(
num_input_pipelines=num_input_pipelines,
input_pipeline_id=input_pipeline_id,
num_replicas_in_sync=self._num_replicas_in_sync)
return input_lib.InputFunctionIterator(input_fn, self._input_workers,
[input_context],
self._container_strategy())
def _experimental_make_numpy_dataset(self, numpy_input, session):
return numpy_dataset.one_host_numpy_dataset(
numpy_input, self._input_host_device, session)
def _experimental_distribute_datasets_from_function(self, dataset_fn):
if self._cluster_spec:
input_pipeline_id = multi_worker_util.id_in_cluster(
self._cluster_spec, self._task_type, self._task_id)
num_input_pipelines = multi_worker_util.worker_count(
self._cluster_spec, self._task_type)
else:
input_pipeline_id = 0
num_input_pipelines = 1
input_context = distribute_lib.InputContext(
num_input_pipelines=num_input_pipelines,
input_pipeline_id=input_pipeline_id,
num_replicas_in_sync=self._num_replicas_in_sync)
return input_lib.get_distributed_datasets_from_function(
dataset_fn,
self._input_workers,
[input_context],
self._container_strategy())
def _broadcast_to(self, tensor, destinations):
# This is both a fast path for Python constants, and a way to delay
# converting Python values to a tensor until we know what type it
# should be converted to. Otherwise we have trouble with:
# global_step.assign_add(1)
# since the `1` gets broadcast as an int32 but global_step is int64.
if isinstance(tensor, (float, int)):
return tensor
if not cross_device_ops_lib.check_destinations(destinations):
# TODO(josh11b): Use current logical device instead of 0 here.
destinations = values.LogicalDeviceSpec(
device_map=self._device_map, logical_device=0)
return self._cross_device_ops.broadcast(tensor, destinations)
def _allow_variable_partition(self):
return not context.executing_eagerly()
# TODO(yuefengz): Not all ops in device_setter.STANDARD_PS_OPS will go through
# this creator, such as "MutableHashTable".
def _create_variable(self, next_creator, *args, **kwargs):
if self._num_replicas_in_sync > 1:
aggregation = kwargs.pop("aggregation", vs.VariableAggregation.NONE)
if aggregation not in (
vs.VariableAggregation.NONE,
vs.VariableAggregation.SUM,
vs.VariableAggregation.MEAN,
vs.VariableAggregation.ONLY_FIRST_REPLICA
):
raise ValueError("Invalid variable aggregation mode: " + aggregation +
" for variable: " + kwargs["name"])
def var_creator(*args, **kwargs):
"""Create an AggregatingVariable and fix up collections."""
# Record what collections this variable should be added to.
collections = kwargs.pop("collections", None)
if collections is None:
collections = [ops.GraphKeys.GLOBAL_VARIABLES]
kwargs["collections"] = []
# Create and wrap the variable.
v = next_creator(*args, **kwargs)
wrapped = values.AggregatingVariable(
self._container_strategy(), v, aggregation)
# Add the wrapped variable to the requested collections.
# The handling of eager mode and the global step matches
# ResourceVariable._init_from_args().
if not context.executing_eagerly():
g = ops.get_default_graph()
# If "trainable" is True, next_creator() will add the contained
# variable to the TRAINABLE_VARIABLES collection, so we manually
# remove it and replace with the wrapper. We can't set "trainable"
# to False for next_creator() since that causes functions like
# implicit_gradients to skip those variables.
if kwargs.get("trainable", True):
collections.append(ops.GraphKeys.TRAINABLE_VARIABLES)
l = g.get_collection_ref(ops.GraphKeys.TRAINABLE_VARIABLES)
if v in l:
l.remove(v)
g.add_to_collections(collections, wrapped)
elif ops.GraphKeys.GLOBAL_STEP in collections:
ops.add_to_collections(ops.GraphKeys.GLOBAL_STEP, wrapped)
return wrapped
else:
var_creator = next_creator
if "colocate_with" in kwargs:
colocate_with = kwargs["colocate_with"]
if isinstance(colocate_with, numpy_dataset.SingleDevice):
with ops.device(colocate_with.device):
return var_creator(*args, **kwargs)
with ops.device(None):
with ops.colocate_with(colocate_with):
return var_creator(*args, **kwargs)
with ops.colocate_with(None, ignore_existing=True):
with ops.device(self._variable_device):
return var_creator(*args, **kwargs)
def _call_for_each_replica(self, fn, args, kwargs):
# pylint: disable=protected-access
return mirrored_strategy._call_for_each_replica(
self._container_strategy(), self._device_map, fn, args, kwargs)
def _verify_destinations_not_different_worker(self, destinations):
if not self._cluster_spec:
return
if destinations is None:
return
for d in cross_device_ops_lib.get_devices_from(destinations):
d_spec = tf_device.DeviceSpec.from_string(d)
if d_spec.job == self._task_type and d_spec.task != self._task_id:
raise ValueError(
"Cannot reduce to another worker: %r, current worker is %r" %
(d, self._input_workers.worker_devices[0]))
def _reduce_to(self, reduce_op, value, destinations):
self._verify_destinations_not_different_worker(destinations)
if not isinstance(value, values.DistributedValues):
# pylint: disable=protected-access
return cross_device_ops_lib.reduce_non_distributed_value(
reduce_op, self._device_map, value, destinations)
return self._cross_device_ops.reduce(
reduce_op, value, destinations=destinations)
def _batch_reduce_to(self, reduce_op, value_destination_pairs):
for _, destinations in value_destination_pairs:
self._verify_destinations_not_different_worker(destinations)
return self._cross_device_ops.batch_reduce(reduce_op,
value_destination_pairs)
def _select_single_value(self, structured):
"""Select any single value in `structured`."""
def _select_fn(x): # pylint: disable=g-missing-docstring
if isinstance(x, values.Mirrored):
if len(x.devices) == 1:
return x.primary
else:
raise ValueError(
"You cannot update variable with a Mirrored object with multiple "
"components %r when using ParameterServerStrategy. You must "
"specify a single value or a Mirrored with a single value." % x)
elif isinstance(x, values.PerReplica):
raise ValueError(
"You cannot update variable with a PerReplica object %r when using "
"ParameterServerStrategy. You must specify a single value or a "
"Mirrored with a single value" % x)
else:
return x
return nest.map_structure(_select_fn, structured)
def _update(self, var, fn, args, kwargs, group):
if isinstance(var, values.AggregatingVariable):
var = var.get()
if not isinstance(var, resource_variable_ops.BaseResourceVariable):
raise ValueError(
"You can not update `var` %r. It must be a Variable." % var)
with ops.colocate_with(var), distribute_lib.UpdateContext(var.device):
result = fn(var, *self._select_single_value(args),
**self._select_single_value(kwargs))
if group:
return result
else:
return nest.map_structure(self._local_results, result)
# TODO(yuefengz): does it need to call _select_single_value?
def _update_non_slot(self, colocate_with, fn, args, kwargs, group):
with ops.device(
colocate_with.device), distribute_lib.UpdateContext(colocate_with):
result = fn(*args, **kwargs)
if group:
return result
else:
return nest.map_structure(self._local_results, result)
def _local_results(self, val):
if isinstance(val, values.DistributedValues):
return val.values
return (val,)
def value_container(self, val):
if (hasattr(val, "_aggregating_container") and
not isinstance(val, values.AggregatingVariable)):
wrapper = val._aggregating_container() # pylint: disable=protected-access
if wrapper is not None:
return wrapper
return val
def read_var(self, var):
# No need to distinguish between normal variables and replica-local
# variables.
return array_ops.identity(var)
def _configure(self,
session_config=None,
cluster_spec=None,
task_type=None,
task_id=None):
"""Configures the strategy class with `cluser_spec`.
The strategy object will be re-initialized if `cluster_spec` is passed to
`configure` but was not passed when instantiating the strategy.
Args:
session_config: Session config object.
cluster_spec: a dict, ClusterDef or ClusterSpec object specifying the
cluster configurations.
task_type: the current task type.
task_id: the current task id.
Raises:
ValueError: if `cluster_spec` is given but `task_type` or `task_id` is
not.
"""
if cluster_spec:
# Use the num_gpus_per_worker recorded in constructor since _configure
# doesn't take num_gpus.
cluster_resolver = SimpleClusterResolver(
cluster_spec=multi_worker_util.normalize_cluster_spec(cluster_spec),
task_type=task_type,
task_id=task_id,
num_accelerators={"GPU": self._num_gpus_per_worker})
self._initialize_multi_worker(cluster_resolver)
if session_config:
session_config.CopyFrom(self._update_config_proto(session_config))
def _update_config_proto(self, config_proto):
updated_config = copy.deepcopy(config_proto)
if not self._cluster_spec:
updated_config.isolate_session_state = True
return updated_config
updated_config.isolate_session_state = False
assert self._task_type
assert self._task_id is not None
# The device filters prevent communication between workers.
del updated_config.device_filters[:]
if self._task_type in ["chief", "worker"]:
updated_config.device_filters.extend(
["/job:%s/task:%d" % (self._task_type, self._task_id), "/job:ps"])
elif self._task_type == "evaluator":
updated_config.device_filters.append(
"/job:%s/task:%d" % (self._task_type, self._task_id))
return updated_config
def _in_multi_worker_mode(self):
"""Whether this strategy indicates working in multi-worker settings."""
# With a PS job, PS strategy should always be considered as in multi
# worker mode.
return True
@property
def _num_replicas_in_sync(self):
return self._device_map.num_replicas_in_graph
@property
def worker_devices(self):
return self._device_map.all_devices
@property
def worker_devices_by_replica(self):
return self._device_map.devices_by_replica
@property
def parameter_devices(self):
return self._parameter_devices
def non_slot_devices(self, var_list):
return min(var_list, key=lambda x: x.name)
@property
def experimental_between_graph(self):
# TODO(yuefengz): Should this return False in the local case?
return True
@property
def experimental_should_init(self):
return self._is_chief
@property
def should_checkpoint(self):
return self._is_chief
@property
def should_save_summary(self):
return self._is_chief
# TODO(priyag): Delete this once all strategies use global batch size.
@property
def _global_batch_size(self):
"""`make_dataset_iterator` and `make_numpy_iterator` use global batch size.
`make_input_fn_iterator` assumes per-replica batching.
Returns:
Boolean.
"""
return True
| {
"content_hash": "201fc33d5580c7b53d9b64e34b8cf32a",
"timestamp": "",
"source": "github",
"line_count": 637,
"max_line_length": 103,
"avg_line_length": 40.50392464678179,
"alnum_prop": 0.6833843649470951,
"repo_name": "DavidNorman/tensorflow",
"id": "2d3800e4d8f2e33ccf6e18c9b4e2da8021f36bd5",
"size": "26490",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tensorflow/python/distribute/parameter_server_strategy.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "4913"
},
{
"name": "Batchfile",
"bytes": "15272"
},
{
"name": "C",
"bytes": "774469"
},
{
"name": "C#",
"bytes": "8562"
},
{
"name": "C++",
"bytes": "74659044"
},
{
"name": "CMake",
"bytes": "6545"
},
{
"name": "Dockerfile",
"bytes": "79827"
},
{
"name": "Go",
"bytes": "1670422"
},
{
"name": "HTML",
"bytes": "4680032"
},
{
"name": "Java",
"bytes": "827737"
},
{
"name": "Jupyter Notebook",
"bytes": "540800"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "MLIR",
"bytes": "1004638"
},
{
"name": "Makefile",
"bytes": "66660"
},
{
"name": "Objective-C",
"bytes": "105247"
},
{
"name": "Objective-C++",
"bytes": "297569"
},
{
"name": "PHP",
"bytes": "23553"
},
{
"name": "Pascal",
"bytes": "3752"
},
{
"name": "Pawn",
"bytes": "14529"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "Python",
"bytes": "37406546"
},
{
"name": "RobotFramework",
"bytes": "891"
},
{
"name": "Ruby",
"bytes": "4706"
},
{
"name": "Shell",
"bytes": "452517"
},
{
"name": "Smarty",
"bytes": "31460"
},
{
"name": "Swift",
"bytes": "62814"
}
],
"symlink_target": ""
} |
import math
from builtins import range
from ase.data import vdw_radii
import numpy as np
__doc__ = """
Calculate the accessible-surface area of atoms.
Uses the simple Shrake-Rupley algorithm, that generates a
relatively uniform density of dots over every atoms and
eliminates those within the sphere of another atom. The remaining
dots is used to calculate the area.
Reference: A. Shrake & J. A. Rupley. "Environment and Exposure to
Solvent of Protein Atoms. Lysozyme and Insulin." J Mol Biol. 79
(1973) 351- 371. """
def get_neighbor_list(cut, atoms):
r = atoms.get_all_distances() # type: np.ndarray
n_list = []
for i in range(len(atoms)):
sub = \
np.where(
(0.0 < r[i, :]).astype(bool) & (r[i, :] < cut).astype(bool))[
0]
n_list.append(sub)
return n_list
def get_coordination(cut, atoms):
nl = get_neighbor_list(cut, atoms)
cl = np.asarray([len(i) for i in nl])
return cl
def generate_sphere_points(n):
"""
Returns list of coordinates on a sphere using the Golden-
Section Spiral algorithm.
Parameters
----------
n: int
Number of points
Returns
-------
sphere point coordinates
"""
points = np.zeros((n, 3))
inc = math.pi * (3 - math.sqrt(5))
offset = 2 / float(n)
for k in range(int(n)):
y = k * offset - 1 + (offset / 2)
r = math.sqrt(1 - y * y)
phi = k * inc
points[k, :] = [math.cos(phi) * r, y, math.sin(phi) * r]
return points
def calculate_asa(atoms, probe, cutoff=None, tag=1, n_sphere_point=960):
"""
Returns the accessible-surface areas of the atoms, by rolling a
ball with probe radius over the atoms with their radius
defined.
Parameters
----------
atoms: ase.atoms object
The atomic configuration
probe: float
The size of the probe molecule
cutoff: float
The bond length cutoff
tag: int
The number to tag the surface atoms with
n_sphere_point: int
Number of points per sphere
"""
if cutoff is None:
elements = list(set(atoms.numbers))
cutoff = np.min(vdw_radii[elements]) * 2
sphere_points = generate_sphere_points(n_sphere_point)
const = 4.0 * math.pi / len(sphere_points)
areas = []
surface = []
n_list = list(get_neighbor_list(cutoff, atoms))
for i, atom_i in enumerate(atoms):
neighbor_indices = n_list[i]
n_neighbor = len(neighbor_indices)
j_closest_neighbor = 0
radius = probe + vdw_radii[atom_i.number]
n_accessible_point = 0
for k in range(n_sphere_point):
is_accessible = True
test_point = sphere_points[k, :] / np.linalg.norm(
sphere_points[k, :]) * radius + atom_i.position
cycled_indices = list(range(j_closest_neighbor, n_neighbor))
cycled_indices.extend(range(j_closest_neighbor))
for j in cycled_indices:
atom_j = atoms[int(neighbor_indices[j])]
r = vdw_radii[atom_j.number] + probe
diff = atom_j.position - test_point
if np.dot(diff, diff) < r * r:
j_closest_neighbor = j
is_accessible = False
break
if is_accessible:
n_accessible_point += 1
surface.append(test_point)
area = const * n_accessible_point * radius * radius
if area > 0:
atoms[i].tag = tag
areas.append(area)
return areas, surface
| {
"content_hash": "4bb15d9fa94132938345ee73d649d7c7",
"timestamp": "",
"source": "github",
"line_count": 120,
"max_line_length": 77,
"avg_line_length": 29.891666666666666,
"alnum_prop": 0.5818232506272651,
"repo_name": "CJ-Wright/pyIID",
"id": "7c0e09a29b271dadb5477e1ef5aacd1ddd7256a5",
"size": "3588",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyiid/asa.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "244316"
}
],
"symlink_target": ""
} |
from openturns import *
TESTPREAMBLE()
RandomGenerator.SetSeed(0)
try:
distribution = Normal(4)
size = 10
myPlane = LHSExperiment(distribution, size)
print "myPlane = ", myPlane
sample, weights = myPlane.generateWithWeights()
print "sample = ", repr(sample)
print "weights = ", repr(weights)
except:
import sys
print "t_LHSExperiment_std.py", sys.exc_type, sys.exc_value
| {
"content_hash": "3dbdbbe37a8d9869d986d60ab8197a14",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 63,
"avg_line_length": 25.5625,
"alnum_prop": 0.684596577017115,
"repo_name": "sofianehaddad/ot-svn",
"id": "138a3eb2a79a7367138455744777c2435d523171",
"size": "433",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "python/test/t_LHSExperiment_std.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "6498"
},
{
"name": "C",
"bytes": "455749"
},
{
"name": "C++",
"bytes": "10021345"
},
{
"name": "CMake",
"bytes": "240050"
},
{
"name": "FORTRAN",
"bytes": "299"
},
{
"name": "Makefile",
"bytes": "12372"
},
{
"name": "NSIS",
"bytes": "26263"
},
{
"name": "Python",
"bytes": "1221927"
},
{
"name": "R",
"bytes": "11141"
},
{
"name": "Scilab",
"bytes": "2612"
},
{
"name": "Shell",
"bytes": "20403"
},
{
"name": "TeX",
"bytes": "4250"
},
{
"name": "Visual Basic",
"bytes": "3294"
}
],
"symlink_target": ""
} |
"""Top level component of a sandbox.
A sandlet is an abstraction to be used by applications to organize a sandbox
into discrete groupings. A user can start an entire sandbox, which will start
all sandlets, or selectively choose which ones to start.
Sandlets are made up of components, which are the actual subprocesses or jobs
to run.
"""
import logging
import time
class DependencyError(Exception):
"""Raised when the configuration has an incorrect set of dependencies."""
class BaseAction(object):
@classmethod
def able_to_act(cls, component, components, remaining):
pass
@classmethod
def do_action(cls, component):
pass
@classmethod
def get_unfinished(cls, component, remaining):
pass
class StartAction(BaseAction):
"""Starts components."""
@classmethod
def able_to_act(cls, component, components, remaining):
# A component can start if none of its dependencies are remaining
return not set(component.dependencies).intersection(remaining)
@classmethod
def do_action(cls, component):
component.start()
@classmethod
def get_unfinished(cls, available):
return [x.name for x in available if not x.is_up()]
class StopAction(BaseAction):
"""Stops components."""
@classmethod
def able_to_act(cls, component, components, remaining):
# A component can stop if there are no remaining components have it as a
# dependency
return not [a.name for a in components
if component.name in a.dependencies and a.name in remaining]
@classmethod
def do_action(cls, component):
component.stop()
@classmethod
def get_unfinished(cls, available):
return [x.name for x in available if not x.is_down()]
class ComponentGroup(object):
"""A grouping of components with dependencies that can be executed."""
def __init__(self):
self.components = []
def add_component(self, component):
self.components.append(component)
def execute(self, action, subcomponents=None):
remaining = subcomponents or [x.name for x in self.components]
while remaining:
available = [x for x in self.components if x.name in remaining
and action.able_to_act(x, self.components, remaining)]
if not available:
# This is a cycle, we have remaining tasks but none can run
raise DependencyError(
'Cycle detected: remaining components: %s.' % remaining)
for component in available:
action.do_action(component)
remaining.remove(component.name)
while True:
unfinished_components = action.get_unfinished(available)
if not unfinished_components:
break
logging.info(
'Waiting to be finished: %s.', ', '.join(unfinished_components))
time.sleep(10)
class Sandlet(object):
"""Top-level component of a sandbox.
Sandlets should be defined in a way to split applications in a logical way.
"""
def __init__(self, name):
self.name = name
self.dependencies = []
self.components = ComponentGroup()
def start(self):
logging.info('Starting sandlet %s.', self.name)
self.components.execute(StartAction)
def stop(self):
logging.info('Stopping sandlet %s.', self.name)
self.components.execute(StopAction)
def is_up(self):
"""Whether the component has finished being started."""
return True
def is_down(self):
"""Whether the component has finished being stopped."""
return True
class SandletComponent(object):
"""Entity of a sandlet that encapsulates a process or job."""
def __init__(self, name, sandbox_name):
self.name = name
self.dependencies = []
self.sandbox_name = sandbox_name
def start(self):
logging.info('Starting component %s.', self.name)
def stop(self):
logging.info('Stopping component %s.', self.name)
def is_up(self):
"""Whether the component has finished being started."""
return True
def is_down(self):
"""Whether the component has finished being stopped."""
return True
| {
"content_hash": "d3e0410bb783558d6c5938b5d4e3cbe9",
"timestamp": "",
"source": "github",
"line_count": 149,
"max_line_length": 77,
"avg_line_length": 27.033557046979865,
"alnum_prop": 0.6884309831181727,
"repo_name": "theskyinflames/bpulse-go-client",
"id": "02f932e8a5aa4c2b9ba4fbbb596146be53815140",
"size": "4028",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "vendor/github.com/youtube/vitess/test/cluster/sandbox/sandlet.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Go",
"bytes": "133726"
},
{
"name": "Shell",
"bytes": "2415"
}
],
"symlink_target": ""
} |
"""
Tests for pbcopy/pbpaste commands.
"""
import os
import tempfile
from io import open
from stash.tests.stashtest import StashTestCase
class CopyPasteTests(StashTestCase):
"""
Test class for the 'pbcopy' and 'pbpaste' commands.
"""
def test_pbcopy_help(self):
"""
test 'pbcopy --help'.
"""
output_1 = self.run_command("pbcopy -h", exitcode=0)
output_2 = self.run_command("pbcopy --help", exitcode=0)
self.assertEqual(output_1, output_2)
self.assertIn("-h", output_1)
self.assertIn("--help", output_1)
self.assertIn("file", output_1)
self.assertIn("pbcopy", output_1)
self.assertIn("...", output_1)
def test_pbpaste_help(self):
"""
test 'pbpaste --help'.
"""
output_1 = self.run_command("pbpaste -h", exitcode=0)
output_2 = self.run_command("pbpaste --help", exitcode=0)
self.assertEqual(output_1, output_2)
self.assertIn("-h", output_1)
self.assertIn("--help", output_1)
self.assertIn("file", output_1)
self.assertIn("pbpaste", output_1)
def test_copy_paste_stdin(self):
"""
Test copy of stdin & paste
"""
self.run_command("echo teststring | pbcopy", exitcode=0)
output = self.run_command("pbpaste", exitcode=0)
self.assertEqual("teststring\n", output)
def test_copy_paste_file(self):
"""
Test copy of a file & paste
"""
p = os.path.join(self.get_data_path(), "testfile.txt")
self.run_command("pbcopy " + p, exitcode=0)
output = self.run_command("pbpaste", exitcode=0)
with open(p, "r", encoding="utf-8") as fin:
content = fin.read()
self.assertEqual(output, content)
def test_paste_into_file(self):
"""
Test copy of a file & paste into a file.
Comparsion is done using 'md5sum'
"""
pin = os.path.join(self.get_data_path(), "testfile.txt")
pout = os.path.join(tempfile.gettempdir(), "testpastefile.txt")
if os.path.exists(pout):
os.remove(pout)
self.run_command("pbcopy " + pin, exitcode=0)
self.run_command("pbpaste " + pout, exitcode=0)
org_hash = self.run_command("md5sum " + pin, exitcode=0).split()[0]
paste_hash = self.run_command("md5sum " + pout, exitcode=0).split()[0]
self.assertEqual(org_hash, paste_hash)
| {
"content_hash": "ac6d705bc864db72a41e49a43330e145",
"timestamp": "",
"source": "github",
"line_count": 74,
"max_line_length": 78,
"avg_line_length": 33.567567567567565,
"alnum_prop": 0.5744766505636071,
"repo_name": "ywangd/stash",
"id": "21416eaa9515a70f3977ae9c70e2b7b546f78fa9",
"size": "2484",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/pbcopy_pbpaste/test_pbcopy_pbpaste.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "939583"
},
{
"name": "Shell",
"bytes": "1648"
}
],
"symlink_target": ""
} |
from swgpy.object import *
def create(kernel):
result = Tangible()
result.template = "object/tangible/lair/base/shared_poi_all_lair_insecthill_large_evil_fire_red.iff"
result.attribute_template_id = -1
result.stfName("lair_n","insecthill")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result | {
"content_hash": "1145800158f57658b749699394b30802",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 101,
"avg_line_length": 25.307692307692307,
"alnum_prop": 0.7021276595744681,
"repo_name": "obi-two/Rebelion",
"id": "6aa8440f29e3f8f190102a7491f89a49a28e31ee",
"size": "474",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "data/scripts/templates/object/tangible/lair/base/shared_poi_all_lair_insecthill_large_evil_fire_red.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "11818"
},
{
"name": "C",
"bytes": "7699"
},
{
"name": "C++",
"bytes": "2293610"
},
{
"name": "CMake",
"bytes": "39727"
},
{
"name": "PLSQL",
"bytes": "42065"
},
{
"name": "Python",
"bytes": "7499185"
},
{
"name": "SQLPL",
"bytes": "41864"
}
],
"symlink_target": ""
} |
__author__ = 'RongShun'
from django import forms
from django.db import models
from django.forms import ModelForm
from lib.common.commands.adb import Adb
from lib.common.constant import SCRIPTED_PROFILE_INTERACTION, RANDOM_INTERACTION
from lib.core.managers.session import get_current_device_serial
adb= Adb()
class ConfigForm(forms.Form):
"""
This class handles the creation of django form on configuration interface.
For more information on django form, visit https://docs.djangoproject.com/en/1.7/ref/forms/fields/
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
NOTE: get_current_device_serial() method is useful to get the device's serial number that you are interested in.
You might need it to extract information from the particular device for form manipulation
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
"""
#EXAMPLE FORM
enable_profile = forms.BooleanField()
choices = (
(SCRIPTED_PROFILE_INTERACTION, "run profile simulation script"),
(RANDOM_INTERACTION, "random simulation")
)
simulation_option = forms.ChoiceField(choices)
| {
"content_hash": "8b6eecbab591e69bff7070a44a228e9f",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 121,
"avg_line_length": 46.333333333333336,
"alnum_prop": 0.6027178257394085,
"repo_name": "WhySoGeeky/DroidPot",
"id": "125037af63b0119f5e1066cc3672042bddb0252f",
"size": "1251",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "modules/templates/profile/view_template.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "874"
},
{
"name": "C",
"bytes": "31005"
},
{
"name": "CSS",
"bytes": "791857"
},
{
"name": "HTML",
"bytes": "1896759"
},
{
"name": "JavaScript",
"bytes": "2509094"
},
{
"name": "Makefile",
"bytes": "2057"
},
{
"name": "PHP",
"bytes": "1684"
},
{
"name": "Python",
"bytes": "13513559"
},
{
"name": "Shell",
"bytes": "3886"
},
{
"name": "TeX",
"bytes": "57070"
}
],
"symlink_target": ""
} |
import sys, os
sys.path.insert(0, os.path.abspath('..'))
settings_dir = os.path.dirname(__file__)
PROJECT_ROOT = os.path.abspath(os.path.dirname(settings_dir))
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', 'your_email@example.com'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'NAME': '', # Or path to database file if using sqlite3.
# The following settings are not used with sqlite3:
'USER': '',
'PASSWORD': '',
'HOST': '', # Empty for localhost through domain sockets or '127.0.0.1' for localhost through TCP.
'PORT': '', # Set to empty string for default.
}
}
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts
ALLOWED_HOSTS = []
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/var/www/example.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://example.com/media/", "http://media.example.com/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/var/www/example.com/static/"
STATIC_ROOT = ''
# URL prefix for static files.
# Example: "http://example.com/static/", "http://static.example.com/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'muebj=&qpdj_q7bm2naoi4bw7rnt)0_rr9s1f@0lrh!*e)eclj'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'demo.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'demo.wsgi.application'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Uncomment the next line to enable the admin:
# 'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
'bits',
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
| {
"content_hash": "939082bed21231727b83f389d85b32cf",
"timestamp": "",
"source": "github",
"line_count": 161,
"max_line_length": 127,
"avg_line_length": 34.04347826086956,
"alnum_prop": 0.6856413063309615,
"repo_name": "scdoshi/django-bits",
"id": "81d2c9177b6791611a4ac1108780c743a897b7fc",
"size": "5518",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "demo/demo/settings.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "32545"
},
{
"name": "Shell",
"bytes": "6711"
}
],
"symlink_target": ""
} |
"""
Package: AutoItLibrary
Module: Loggger
Purpose: Defines a Logger class from which other classes can inherit the ability to log messages to
stdout. The Logger class' _log method does this in the style required by Robot Framework, but
this method could be overridden in the class that uses it in order to log in some other style
or to some other output path.
Copyright (c) 2009 Texas Instruments
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
__author__ = "Martin Taylor <cmtaylor@ti.com>"
__version__ = "1.0.1"
class Logger :
def _log(self, message, level='INFO') :
print '*%s* %s' % (level, message)
def _info(self, message) :
self._log(message)
def _debug(self, message) :
self._log(message, 'DEBUG')
def _warn(self, message) :
self._log(message, "WARN")
def _html(self, message) :
self._log(message, 'HTML')
def _infoKW(self, KW, *args, **kwargs) :
"""
Print a generic log message for the entry point of a given keyword, KW at *INFO* level.
"""
self._info("%s.%s(%s)" % (KW.im_class.__name__, KW.func_name, self._FormatArgs(KW, *args, **kwargs)))
def _debugKW(self, KW, *args, **kwargs) :
"""
Print a generic log message for the entry point of a given keyword, KW at *DEBUG* level.
"""
self._debug("%s.%s(%s)" % (KW.im_class.__name__, KW.func_name, self._FormatArgs(KW, *args, **kwargs)))
def _FormatASCII(self, pyObj) :
"""
Format the given pyObj as an ASCII string. This is first attempted by doing str(pyObj).
If that fails then if pyObj is a Unicode string it replaces each non-ASCII character with its
repr encoding. For any other pyObj it simply returns the complete repr encoding of pyObj.
"""
try :
aString = str(pyObj)
except UnicodeEncodeError :
if isinstance(pyObj, type(u'')) :
aString = ""
for c in pyObj :
if ord(c) > 128 :
aString += repr(c)[2:-1]
else :
aString += c
else :
aString = repr(pyObj)
finally :
return aString
def _FormatArg(self, fmtLine, argName, argVal) :
"""
Format the given argName and argVal and add them to the current given fmtLine.
If fmtLine is non-empty then ", " is appended to it before appending the formatted argName=argVal.
"""
if len(fmtLine) > 0 :
fmtLine += ", "
if isinstance(argVal, type(1)) :
fmtLine += "%s=%d" % (argName, argVal)
elif isinstance(argVal, type(1.1)) :
fmtLine += "%s=%g" % (argName, argVal)
else :
fmtLine += "%s='%s'" % (argName, self._FormatASCII(argVal))
return fmtLine
def _FormatArgs(self, func, *args, **kwargs) :
"""
Format an arbitrary list of args and kwargs for function func for printing in a log line.
TBD: Add any defaulted args not present in args or kwargs
"""
fmtLine = ""
funcArgs = None
#
# If we got some positional args then format those, adding the
# argument name from the tuple of co_varnames obtained above.
#
if len(args) > 0 :
funcArgs = func.func_code.co_varnames
#
# If func is a method of a class then it will have "self" as the first argument.
# We don't want to print that, and it won't be in args or kwargs anyway, so remove it.
#
if funcArgs[0] == "self" :
funcArgs = funcArgs[1:]
ai = 0
for arg in args :
fmtLine = self._FormatArg(fmtLine, funcArgs[ai], arg)
ai += 1
#
# If we got some kwargs, then format those.
#
if len(kwargs.keys()) > 0 :
if funcArgs == None :
funcArgs = func.func_code.co_varnames
#
# If func is a method of a class then it will have "self" as the first argument.
# We don't want to print that, and it won't be in args or kwargs anyway, so remove it.
#
if funcArgs[0] == "self" :
funcArgs = funcArgs[1:]
ai = 0
for i in range(ai, len(funcArgs)) :
key = funcArgs[i]
if key not in kwargs.keys() :
continue
fmtLine = self._FormatArg(fmtLine, key, kwargs[key])
del kwargs[key]
#
# TBD: Add any defaulted args not present in args or kwargs
#
#
# Add any additional args passed but not explicitly expected
#
if len(kwargs.keys()) > 0 :
for key in kwargs :
fmtLine = self._FormatArg(fmtLine, key, kwargs[key])
return fmtLine
#
# -------------------------------- End of file --------------------------------
| {
"content_hash": "62623e1f881d7533aecad414d8039406",
"timestamp": "",
"source": "github",
"line_count": 152,
"max_line_length": 110,
"avg_line_length": 38.24342105263158,
"alnum_prop": 0.531223120591777,
"repo_name": "zheli/robotframework-autoitlibrary-forked",
"id": "22d8c2ce4ec30357279eed73c90861d3a1e06eff",
"size": "5813",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/AutoItLibrary/Logger.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "38952"
}
],
"symlink_target": ""
} |
from distutils.core import setup
setup(name='gapmaps',
version='0.1',
description='ScienceDirect gapmap generator',
author='Fabian Latorre',
author_email='latorrefabian@gmail.com',
url='',
packages=['gapmaps'],
)
| {
"content_hash": "5d2d869f670e2ade64a42b7fd4aee3e9",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 51,
"avg_line_length": 25.5,
"alnum_prop": 0.6313725490196078,
"repo_name": "latorrefabian/gapmaps",
"id": "61aa448a1518a78e728dde98a1fb8dbd2cd5fa07",
"size": "278",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1781193"
},
{
"name": "Python",
"bytes": "49940"
}
],
"symlink_target": ""
} |
"""
MINDBODY Public API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: v6
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import swagger_client
from swagger_client.models.get_classes_request import GetClassesRequest # noqa: E501
from swagger_client.rest import ApiException
class TestGetClassesRequest(unittest.TestCase):
"""GetClassesRequest unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testGetClassesRequest(self):
"""Test GetClassesRequest"""
# FIXME: construct object with mandatory attributes with example values
# model = swagger_client.models.get_classes_request.GetClassesRequest() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "5abfd980e7a2ea13678754fd6f66b5e1",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 119,
"avg_line_length": 24.92105263157895,
"alnum_prop": 0.6990496304118268,
"repo_name": "mindbody/API-Examples",
"id": "2231436f3529e2479b1148cc5847553a3718e6d7",
"size": "964",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "SDKs/Python/test/test_get_classes_request.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "PHP",
"bytes": "3610259"
},
{
"name": "Python",
"bytes": "2338642"
},
{
"name": "Ruby",
"bytes": "2284441"
},
{
"name": "Shell",
"bytes": "5058"
}
],
"symlink_target": ""
} |
from ._monitor_query_client import MonitorQueryClient
__all__ = ['MonitorQueryClient']
| {
"content_hash": "eceebd05ede45856493d5a5a92fcc402",
"timestamp": "",
"source": "github",
"line_count": 2,
"max_line_length": 53,
"avg_line_length": 43.5,
"alnum_prop": 0.7701149425287356,
"repo_name": "Azure/azure-sdk-for-python",
"id": "9682cfa946552b35eba88ea2c0e68a54b492b018",
"size": "555",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/monitor/azure-monitor-query/azure/monitor/query/_generated/aio/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
} |
import torch
import torchvision.datasets as datasets
import torchvision.transforms as transforms
import torch.utils.data as data_utils
import os, glob, platform, datetime, random
from PIL import Image
def default_loader(path):
return Image.open(path).convert('RGB')
def make_dataset(dir, phase, test_scene=None):
images_paths = glob.glob(os.path.join(dir, 'clean', '*', '*.png'))
albedo_paths = images_paths[:]
shading_paths = images_paths[:]
pathes = []
for img_path in images_paths:
sp = img_path.split('/');
if phase == 'train':
if sp[-2] == test_scene: continue
else:
if sp[-2] != test_scene: continue
sp[-3] = 'albedo';
sp = ['/'] + sp;
albedo_path = os.path.join(*sp)
sp = img_path.split('/');
sp[-3] = 'shading';
sp[-1] = sp[-1].replace('frame', 'out')
sp = ['/'] + sp;
shading_path = os.path.join(*sp)
pathes.append((img_path, albedo_path, shading_path))
return pathes
class MyImageFolder(data_utils.Dataset):
def __init__(self, root, phase='train', transform=None, target_transform=None, random_crop=True, loader=default_loader, img_extentions=None, test_scene=None, image_h=None, image_w=None):
imgs = make_dataset(root, phase, test_scene=test_scene)
if len(imgs) == 0:
raise(RuntimeError("Found 0 images in subfolders of: " + root + "\n"
"Supported image extensions are: " + ",".join(img_extentions)))
self.root = root
self.imgs = imgs
self.transform = transform
self.normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
self.target_transform = target_transform
self.loader = loader
self.random_crop = random_crop
self.image_h = image_h
self.image_w = image_w
def __getitem__(self, index):
img_path, albedo_path, shading_path = self.imgs[index]
img = self.loader(img_path)
albedo = self.loader(albedo_path)
shading = self.loader(shading_path)
if self.random_crop == True:
i, j, h, w = self.get_params(img, (int(self.image_h), int(self.image_w)))
img = img.crop((j, i, j + w, i + h))
albedo = albedo.crop((j, i, j + w, i + h))
shading = shading.crop((j, i, j + w, i + h))
# print(img.size)
# print((i, j, h, w))
if self.transform is not None: img = self.transform(img)
if self.transform is not None: albedo = self.transform(albedo)
if self.transform is not None: shading = self.transform(shading)
if self.normalize is not None: img = self.normalize(img)
scene = img_path.split('/')[-2]
return img, albedo, shading, scene, img_path
def __len__(self):
return len(self.imgs)
def get_params(self, img, output_size):
"""Get parameters for ``crop`` for a random crop.
Args:
img (PIL Image): Image to be cropped.
output_size (tuple): Expected output size of the crop.
Returns:
tuple: params (i, j, h, w) to be passed to ``crop`` for random crop.
"""
w, h = img.size
th, tw = output_size
if w == tw and h == th:
return 0, 0, h, w
i = random.randint(0, h - th)
j = random.randint(0, w - tw)
return i, j, th, tw
| {
"content_hash": "4d633a351e2dced0ac7fa1f8deb34dd9",
"timestamp": "",
"source": "github",
"line_count": 95,
"max_line_length": 190,
"avg_line_length": 36.72631578947368,
"alnum_prop": 0.5614789337919175,
"repo_name": "albertxavier001/graduation-project",
"id": "4b00e7ea95054afb282d153cb849591f5c0dac0f",
"size": "3489",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "pytorch/snapshot_store/final/script/myimagefolder.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "55210"
},
{
"name": "Jupyter Notebook",
"bytes": "6086649"
},
{
"name": "Matlab",
"bytes": "240"
},
{
"name": "Python",
"bytes": "459143"
},
{
"name": "Shell",
"bytes": "565"
}
],
"symlink_target": ""
} |
from celery import Celery
from lib.settings import Settings
celery_client = Celery(broker=Settings.mongo_connection_string)
celery_client.conf.CELERY_TASK_SERIALIZER = "json"
#@celery_client.task(name='tasks.process_import')
def queue_vcf_import(file_id):
celery_client.send_task('tasks.process_import', [file_id], serializer='json')
| {
"content_hash": "5aa43202db37cc95004e5628b6e5ea94",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 81,
"avg_line_length": 34.1,
"alnum_prop": 0.7712609970674487,
"repo_name": "ClinGen/ildb",
"id": "aee09c9fb46ec11810d29bca3dcf321a4f463c1f",
"size": "341",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "vault/src/api/task_queue.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "58994"
},
{
"name": "HTML",
"bytes": "39707"
},
{
"name": "Nginx",
"bytes": "1879"
},
{
"name": "Python",
"bytes": "47332"
},
{
"name": "Shell",
"bytes": "2045"
},
{
"name": "TypeScript",
"bytes": "38539"
}
],
"symlink_target": ""
} |
"""
Textbox text highlighter.
Takes a string or regex expression as input.
All matches that are found, get highlighted.
Created by Jack Ackermann
"""
import re
import tkinter as tkinter
from tkinter import Tk, Frame, END, INSERT
class Highlighter(Frame):
"""
Class takes in3 arguments: regex_pattern, data_textbox, output_textbox
Highlights all matches for re.finditer
Inserts all found re.findall matches into a Text box
"""
def __init__(self, regex_pattern, data_textbox, output_textbox,
*args, **kwargs):
self.regex_pattern = regex_pattern
self.data_textbox = data_textbox
self.output_textbox = output_textbox
# Setup a tag to highlight the found word in red
self.data_textbox.tag_configure("highlight", background="light blue",
foreground="red")
def find_matches(self):
"""
Makes sure all tags and found data is cleared before search begins
Highlights all re.findinter matches found in the data_textbox
Inserts all re.findall matches found inside the output_textbox
"""
# Clears tags and data before every search
self.data_textbox.tag_remove("highlight", 1.0, END)
self.output_textbox.delete(1.0, END)
line_no = 0
keyword = self.regex_pattern.get()
data = self.data_textbox.get(1.0, END)
if len(keyword) == 0:
pass
# messagebox.showwarning('No Regex Pattern to find',
# 'Please enter a regex expression to match')
else:
for lines in data.split('\n'):
line_no += 1
for m in re.finditer(r'{}'.format(keyword), lines):
#print('{0}.{1}, {0}.{2}'.format(line_no, m.start(), m.end()))
self.data_textbox.tag_add("highlight",
'{0}.{1}'.format(line_no,
m.start()),
'{0}.{1}'.format(line_no,
m.end()))
find_text = re.findall(keyword, data)
for items in find_text:
self.output_textbox.insert(INSERT, items + '\n')
| {
"content_hash": "e6189e082b0802edf969a132adc29139",
"timestamp": "",
"source": "github",
"line_count": 66,
"max_line_length": 82,
"avg_line_length": 36.63636363636363,
"alnum_prop": 0.5194375516956162,
"repo_name": "HawkeyeZAR/PyRegexEvaluator",
"id": "be950cad6c0c98350c8e0c1b62efc8ea8066fd52",
"size": "2418",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "PyRegexEvaluator/libs/highlighter.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "12537"
}
],
"symlink_target": ""
} |
from msrest.serialization import Model
class WinRMListener(Model):
"""Describes Protocol and thumbprint of Windows Remote Management listener.
:param protocol: The Protocol used by the WinRM listener. Http and Https
are supported. Possible values include: 'Http', 'Https'
:type protocol: str or :class:`ProtocolTypes
<azure.mgmt.compute.compute.v2015_06_15.models.ProtocolTypes>`
:param certificate_url: The Certificate URL in KMS for Https listeners.
Should be null for Http listeners.
:type certificate_url: str
"""
_attribute_map = {
'protocol': {'key': 'protocol', 'type': 'ProtocolTypes'},
'certificate_url': {'key': 'certificateUrl', 'type': 'str'},
}
def __init__(self, protocol=None, certificate_url=None):
self.protocol = protocol
self.certificate_url = certificate_url
| {
"content_hash": "6801b32a897473969d7f74f08255c5cc",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 79,
"avg_line_length": 37.69565217391305,
"alnum_prop": 0.6828143021914648,
"repo_name": "v-iam/azure-sdk-for-python",
"id": "24294170c5930ffa52c27f49de15306ee3167cc8",
"size": "1341",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "azure-mgmt-compute/azure/mgmt/compute/compute/v2015_06_15/models/win_rm_listener.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "19856874"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.