repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
| prefix
stringlengths 0
8.16k
| middle
stringlengths 3
512
| suffix
stringlengths 0
8.17k
|
|---|---|---|---|---|---|---|---|---|
UCSBarchlab/PyRTL
|
tests/test_helperfuncs.py
|
Python
|
bsd-3-clause
| 47,619
| 0.001638
|
import random
import unittest
import six
import os
import sys
import pyrtl
import pyrtl.corecircuits
import pyrtl.helperfuncs
from pyrtl.rtllib import testingutils as utils
# ---------------------------------------------------------------
class TestWireVectorList(unittest.TestCase):
def setUp(self):
pass
def test_input_list_type(self):
inputs = pyrtl.helperfuncs.input_list('one, two, three')
self.assertTrue(all(isinstance(inp, pyrtl.Input) for inp in inputs))
def test_output_list_type(self):
outputs = pyrtl.helperfuncs.output_list('one, two, three')
self.assertTrue(all(isinstance(outp, pyrtl.Output) for outp in outputs))
def test_register_list_type(self):
registers = pyrtl.helperfuncs.register_list('one, two, three')
self.assertTrue(all(isinstance(reg, pyrtl.Register) for reg in registers))
def test_wirevector_list_type(self):
# Single string of names
wirevectors = pyrtl.helperfuncs.wirevector_list('one, two, three')
self.assertTrue(all(isinstance(wire, pyrtl.WireVector) for wire in wirevectors))
self.assertListEqual([wire.bitwidth for wire in wirevectors], [1, 1, 1])
# List of names
wirevectors = pyrtl.helperfuncs.wirevector_list('one, two, three')
self.assertTrue(all(isinstance(wire, pyrtl.WireVector) for wire in wirevectors))
self.assertListEqual([wire.bitwidth for wire in wirevectors], [1, 1, 1])
def test_wirevector_list_bitwidth(self):
wirevectors = pyrtl.helperfuncs.wirevector_list('one, two, three')
self.assertListEqual([wire.bitwidth for wire in wirevectors], [1, 1, 1])
wirevectors = pyrtl.helperfuncs.wirevector_list('one, two, three', 8)
self.assertListEqual([wire.bitwidth for wire in wirevectors], [8, 8, 8])
def test_wirevector_list_per_wire_width(self):
wirevectors = pyrtl.helperfuncs.wirevector_list('one/2, two/4, three/8')
self.assertListEqual([wire.bitwidth for wire in wirevectors], [2, 4, 8])
wirevectors = pyrtl.helperfuncs.wirevector_list(['one', 'two', 'three'], [2, 4, 8])
self.assertListEqual([wire.bitwidth for wire in wirevectors], [2, 4, 8])
def test_wirevector_list_raise_errors(self):
with self.assert
|
Raises(ValueError):
pyrtl.helperfuncs.wirevector_list(['one', 'two', 'three'], [2, 4])
with self.assertRaises(pyrtl.PyrtlError):
pyrtl.helperfuncs.wirevector_list('one/2, two/4, three/8', 16)
with self.
|
assertRaises(pyrtl.PyrtlError):
pyrtl.helperfuncs.wirevector_list(['one/2', 'two/4', 'three/8'], [8, 4, 2])
class TestNonCoreHelpers(unittest.TestCase):
def setUp(self):
pass
def test_log2(self):
self.assertEqual(pyrtl.log2(1), 0)
self.assertEqual(pyrtl.log2(2), 1)
self.assertEqual(pyrtl.log2(8), 3)
self.assertEqual(pyrtl.log2(16), 4)
with self.assertRaises(pyrtl.PyrtlError):
pyrtl.log2(-1)
with self.assertRaises(pyrtl.PyrtlError):
pyrtl.log2(1.5)
with self.assertRaises(pyrtl.PyrtlError):
pyrtl.log2(0)
with self.assertRaises(pyrtl.PyrtlError):
pyrtl.log2(7)
with self.assertRaises(pyrtl.PyrtlError):
pyrtl.log2(9)
def test_truncate_function(self):
self.assertEqual(pyrtl.truncate(5, 3), 5)
self.assertEqual(pyrtl.truncate(9, 3), 1)
self.assertEqual(pyrtl.truncate(-1, 3), 7)
with self.assertRaises(pyrtl.PyrtlError):
pyrtl.truncate(5, -1)
with self.assertRaises(pyrtl.PyrtlError):
pyrtl.truncate(29, 0)
def test_val_to_signed_integer(self):
self.assertEqual(pyrtl.val_to_signed_integer(0b000, 3), 0)
self.assertEqual(pyrtl.val_to_signed_integer(0b001, 3), 1)
self.assertEqual(pyrtl.val_to_signed_integer(0b010, 3), 2)
self.assertEqual(pyrtl.val_to_signed_integer(0b011, 3), 3)
self.assertEqual(pyrtl.val_to_signed_integer(0b100, 3), -4)
self.assertEqual(pyrtl.val_to_signed_integer(0b101, 3), -3)
self.assertEqual(pyrtl.val_to_signed_integer(0b110, 3), -2)
self.assertEqual(pyrtl.val_to_signed_integer(0b111, 3), -1)
def test_infer_val_and_bitwidth(self):
self.assertEqual(pyrtl.infer_val_and_bitwidth(2, bitwidth=5), (2, 5))
self.assertEqual(pyrtl.infer_val_and_bitwidth(3), (3, 2))
self.assertEqual(pyrtl.infer_val_and_bitwidth(True), (1, 1))
self.assertEqual(pyrtl.infer_val_and_bitwidth(False), (0, 1))
self.assertEqual(pyrtl.infer_val_and_bitwidth("5'd12"), (12, 5))
self.assertEqual(pyrtl.infer_val_and_bitwidth("5'b10"), (2, 5))
self.assertEqual(pyrtl.infer_val_and_bitwidth("5'b10"), (2, 5))
self.assertEqual(pyrtl.infer_val_and_bitwidth("8'B 0110_1100"), (108, 8))
self.assertEqual(pyrtl.infer_val_and_bitwidth(-3, bitwidth=5), (0b11101, 5))
self.assertEqual(pyrtl.infer_val_and_bitwidth(3, signed=True), (3, 3))
self.assertEqual(pyrtl.infer_val_and_bitwidth(-3, signed=True), (5, 3))
self.assertEqual(pyrtl.infer_val_and_bitwidth(-4, signed=True), (4, 3))
self.assertEqual(pyrtl.infer_val_and_bitwidth(-3, bitwidth=5, signed=True), (29, 5))
self.assertEqual(pyrtl.infer_val_and_bitwidth(3, bitwidth=2), (3, 2))
self.assertEqual(pyrtl.infer_val_and_bitwidth(0), (0, 1))
self.assertEqual(pyrtl.infer_val_and_bitwidth(1), (1, 1))
self.assertEqual(pyrtl.infer_val_and_bitwidth(2), (2, 2))
self.assertEqual(pyrtl.infer_val_and_bitwidth(3), (3, 2))
self.assertEqual(pyrtl.infer_val_and_bitwidth(4), (4, 3))
self.assertEqual(pyrtl.infer_val_and_bitwidth(0, signed=True), (0, 1))
self.assertEqual(pyrtl.infer_val_and_bitwidth(1, signed=True), (1, 2))
self.assertEqual(pyrtl.infer_val_and_bitwidth(2, signed=True), (2, 3))
self.assertEqual(pyrtl.infer_val_and_bitwidth(3, signed=True), (3, 3))
self.assertEqual(pyrtl.infer_val_and_bitwidth(4, signed=True), (4, 4))
self.assertEqual(pyrtl.infer_val_and_bitwidth(-1, signed=True), (1, 1))
self.assertEqual(pyrtl.infer_val_and_bitwidth(-2, signed=True), (2, 2))
self.assertEqual(pyrtl.infer_val_and_bitwidth(-3, signed=True), (5, 3))
self.assertEqual(pyrtl.infer_val_and_bitwidth(-4, signed=True), (4, 3))
self.assertEqual(pyrtl.infer_val_and_bitwidth(-5, signed=True), (11, 4))
with self.assertRaises(pyrtl.PyrtlError):
pyrtl.infer_val_and_bitwidth(-3)
with self.assertRaises(pyrtl.PyrtlError):
pyrtl.infer_val_and_bitwidth(True, signed=True)
with self.assertRaises(pyrtl.PyrtlError):
pyrtl.infer_val_and_bitwidth(3, bitwidth=2, signed=True)
class TestMatchBitpattern(unittest.TestCase):
def setUp(self):
random.seed(8492049)
pyrtl.reset_working_block()
def check_trace(self, correct_string):
sim_trace = pyrtl.SimulationTrace()
sim = pyrtl.Simulation(tracer=sim_trace)
for i in range(8):
sim.step({})
output = six.StringIO()
sim_trace.print_trace(output, compact=True)
self.assertEqual(output.getvalue(), correct_string)
def test_pattern_type_or_length_mismatch(self):
instr = pyrtl.WireVector(name='instr', bitwidth=8)
with self.assertRaises(pyrtl.PyrtlError):
o, _ = pyrtl.match_bitpattern(instr, '000100010')
with self.assertRaises(pyrtl.PyrtlError):
o, _ = pyrtl.match_bitpattern(instr, '0001000')
with self.assertRaises(pyrtl.PyrtlError):
o, _ = pyrtl.match_bitpattern(instr, '0b00010001')
with self.assertRaises(pyrtl.PyrtlError):
o, _ = pyrtl.match_bitpattern(instr, 0b000100010)
with self.assertRaises(pyrtl.PyrtlError):
o, _ = pyrtl.match_bitpattern(instr, '')
with self.assertRaises(pyrtl.PyrtlError):
o, _ = pyrtl.match_bitpattern(instr, None)
with self.assertRaises(pyrtl.PyrtlError):
o, _ = pyrtl.match_bitpattern(instr, instr)
|
oriordan/yubistack
|
yubistack/exceptions.py
|
Python
|
bsd-2-clause
| 1,873
| 0.003737
|
"""
yubistack.exceptions
~~~~~~~~~~~~~~~~~~~~
List all custom exceptions here
"""
STATUS_CODES = {
# YKAuth
'BAD_PASSWORD': 'Invalid password',
'DISABLED_TOKEN': 'Token is disabled',
'UNKNOWN_USER': 'Unknown user',
'INVALID_TOKEN': 'Token is not associated with user',
# YKVal
'BACKEND_ERROR': 'Backend error',
'BAD_OTP': 'Invalid OTP',
'BAD_SIGNATURE': 'The HMAC signature verification failed',
'DELAYED_OTP': 'Expired OTP',
'INVALID_PARAMETER': 'The request has invalid parameter',
'MISSING_PARAMETER': 'The request missing parameter',
'NO_SUCH_CLIENT': 'The request id does not exist',
'NOT_ENOUGH_ANSWERS': 'Server could not get requested number of syncs before timeout',
'OPERATION_NOT_ALLOWED': 'The request is now allowed',
'REPLAYED_OTP': 'Replayed OTP',
'REPLAYED_REQUEST': 'Server has seen the OTP/Nonce combination before',
# YKKSM
'CORRUPT_OTP': 'Corrupt OTP',
'MISSING_OTP': 'No OTP provided',
'UNKNOWN_TOKEN': 'Unknown yubikey',
}
class YubistackE
|
rror(Exception):
""" Yubistack Exception """
NAME = 'Yubistack error'
def __init__(self, *args):
super(YubistackError, self).__init__(*args)
self.error_code = self.args[0]
def __str__(self):
message = STATUS_CODES[self.error_code]
if len(self.args) == 2:
|
message += ': %s' % self.args[1]
return message
class YKAuthError(YubistackError):
""" Error returned by the Client class """
NAME = 'Authentication error'
class YKValError(YubistackError):
""" Error returned by the Validator class """
NAME = 'Validation error'
class YKSyncError(YubistackError):
""" Error returned by the Sync class """
NAME = 'Sync error'
class YKKSMError(YubistackError):
""" Error returned by the Decryptor class """
NAME = 'Decryption error'
|
CAB-LAB/cablab-core
|
esdl/dat.py
|
Python
|
gpl-3.0
| 3,600
| 0.003611
|
"""
.. _xarray.Dataset: http://xarray.pydata.org/en/stable/data-structures.html#dataset
.. _xarray.DataArray: http://
|
xarray.pydata.org/en/stable/data-structures.html#dataarray
.. _Numpy: http://www.numpy.org/
The following functions provide the hi
|
gh-level API of the ESDC Python DAT.
It provides additional analytical utility functions which work for `xarray.Dataset`_ objects
which are used to represent the ESDC data.
"""
import xarray as xr
def corrcf(ds, var1=None, var2=None, dim='time'):
'''
Function calculating the correlation coefficient of two variables **var1** and **var2** in one `xarray.Dataset`_
**ds**.
:param ds: an `xarray.Dataset`_
:param var1: Variable 1
:param var2: Variable 2, both have to be of identical size
:param dim: dimension for aggregation, default is time.
In the default case, the result is an image
:return:
'''
if not isinstance(ds, xr.Dataset):
print('Input object ', ds, ' is no xarray Dataset!')
var1 = None
if var1 is not None:
if var2 is None:
var2 = var1
ds_tmean = ds.mean(skipna=True, dim=dim)
ds_tstd = ds.std(skipna=True, dim=dim)
covar_1 = (ds[var1] - ds_tmean[var1]) * (ds[var2] - ds_tmean[var2])
res = covar_1.mean(dim='time', skipna=True) / (ds_tstd[var1] * ds_tstd[var2])
else:
res = None
return res
def map_plot(ds, var=None, time=0, title_str='No title', projection='kav7', lon_0=0, resolution=None, **kwargs):
'''
Function plotting a projected map for a variable **var** in `xarray.Dataset`_ **ds**.
:param ds: an `xarray.Dataset`_
:param var: variable to plot
:param time: time step or datetime date to plot
:param title_str: Title string
:param projection: for Basemap
:param lon_0: longitude 0 for central
:param resolution: resolution for Basemap object
:param kwargs: Any other **kwargs** accepted by the pcolormap function of Basemap
:return:
'''
if isinstance(time, int):
res = ds[var].isel(time=time)
elif time is None:
res = ds[var]
time = None
else:
try:
res = ds[var].sel(time=time, method='nearest')
except:
print("Wrong date format, should be YYYY-MM-DD")
raise
lons, lats = np.meshgrid(np.array(res.lon), np.array(res.lat))
ma_res = np.ma.array(res, mask=np.isnan(res))
if "vmin" in kwargs:
vmin = kwargs["vmin"]
else:
vmin = None
if "vmax" in kwargs:
vmax = kwargs["vmax"]
else:
vmax = None
if title_str == "No title":
title_str = var + ' ' + str(time)
else:
title_str = title_str + ' ' + str(res.time.values)[0:10]
fig = plt.figure()
ax = fig.add_axes([0.05, 0.05, 0.9, 0.9])
m = Basemap(projection, lon_0, resolution)
m.drawmapboundary(fill_color='0.3')
ccmap = plt.cm.jet
ccmap.set_bad("gray", 1.)
im = m.pcolormesh(lons, lats, ma_res, shading='flat', cmap=ccmap,
latlon=True, vmin=vmin, vmax=vmax, **kwargs)
# lay-out
m.drawparallels(np.arange(-90., 99., 30.))
m.drawmeridians(np.arange(-180., 180., 60.))
cb = m.colorbar(im, "bottom", size="5%", pad="2%")
cb.set_label(ds[var].attrs['standard_name'] + ' (' + ds[var].attrs['units'] + ')')
ax.set_title(title_str)
# write to disk if specified
if "plot_me" in kwargs:
if kwargs["plot_me"] == True:
plt.savefig(title_str[0:15] + '.png', dpi=600)
fig.set_size_inches(8, 12)
return fig, ax, m
|
fy0/my-leetcode
|
990.Satisfiability Of Equality Equations/main.py
|
Python
|
apache-2.0
| 674
| 0.001484
|
class Solution:
|
def translateNum(self, num: int) -> int:
if num < 10:
return 1
table = {}
for i in range(26):
table[str(i)] = chr(ord('a') + i)
num_str = str(num)
arr = []
def solve(eated, s, last):
if not s:
arr.append(eated)
return
i = s[0]
|
if last == '1' or (last == '2' and i in ('0', '1', '2', '3', '4', '5')):
# if ord(eated[-1]) < ord('k'):
solve(table[last + i], s[1:], last + i)
solve(eated + table[i], s[1:], i)
solve('', num_str, None)
return len(arr)
|
free-free/pyblog
|
pyblog/cache/redis_cache.py
|
Python
|
mit
| 13,227
| 0.001285
|
#-*- coding:utf-8 -*-
import logging
logging.basicConfig(level=logging.ERROR)
from cache_abstract import CacheAbstractDriver
import asyncio
try:
import redis
except ImportError:
logging.error("Can't import 'redis' module")
exit(-1)
try:
import aioredis
except ImportError:
logging.error("Can't import 'aioredis' module")
exit(-1)
class AsyncRedisCacheClient(object):
def __init__(self, host, port, db, *args, **kwargs):
assert isinstance(host, str)
assert isinstance(port, int)
assert isinstance(db, int) and 0 <= db < 16
self.__host = host
self.__port = port
self.__db = db
self.__connection = None
self.__key_type_map = {
"1": "string",
"2": "hash",
"3": "list"
}
self.__key_type_hash = "_key_type"
@asyncio.coroutine
def get_connection(self, loop=None):
self.__connection = yield from aioredis.create_redis((self.__host, self.__port), db=self.__db, loop=loop)
return self.__connection
@asyncio.coroutine
def set(self, key, value, expires, key_prefix):
if not self.__connection:
yield from self.get_connection()
if not value and isinstance(key, dict):
pipe = self.__connection.pipeline()
for k, item in keys.items():
pipe.hset(self.__key_type_hash, key_prefix + k, 1)
pipe.set(key_prefix + k, item)
if expires > 0:
pipe.expire(key_prefix + k, expires)
return (yield from pipe.execute())
else:
key = key_prefix + key
if isinstance(value, str):
pipe = self.__connection.pipeline()
pipe.hset(self.__key_type_hash, key, 1)
pipe.set(key, value)
if expires > 0:
pipe.expire(key, expires)
return (yield from pipe.execute())
elif isinstance(value, dict):
pipe = self.__connection.pipeline()
pipe.hset(self.__key_type_hash, key, 2)
for field, item_v in value.items():
pipe.hset(key, field, item_v)
if expires > 0:
pipe.expire(key, expires)
return (yield from pipe.execute())
elif isinstance(value, (list, tuple)):
pipe = self.__connection.pipeline()
pipe.hset(self.__key_type_hash, key, 3)
value = list(value)
pipe.lpush(key, *value)
if expires > 0:
pipe.expire(key, expires)
return (yield from pipe.execute())
@asyncio.coroutine
def get(self, key, key_prefix):
if not self.__connection:
yield from self.get_connection()
key_type = yield from self.exists(key, key_prefix)
key = key_prefix + key
if key_type == "string":
return (yield from self.__connection.get(key)) or ""
elif key_type == "hash":
return (yield from self.__connection.hgetall(key)) or {}
elif key_type == "list":
return (yield from self.__connection.lrange(key, 0, -1)) or []
else:
return None
@asyncio.coroutine
def exists(self, key, key_prefix):
key = key_prefix + key
if not self.__connection:
yield from self.get_connection()
key_type = yield from self.__connection.hget(self.__key_type_hash, key)
if key_type:
return self.__key_type_map.get(key_type.decode("utf-8"))
return None
@asyncio.c
|
oroutine
d
|
ef delete_key(self, key, key_prefix):
if not self.__connection:
yield from self.get_connection()
key = key_prefix + key
return (yield from self.__connection.hdel(self.__key_type_hash, key))
@asyncio.coroutine
def delete(self, key, key_prefix):
if not self.__connection:
yield from self.get_connection()
key_type = yield from self.exists(key, key_prefix)
result = ""
if key_type == "string":
result = yield from self.__connection.delete(key_prefix + key)
elif key_type == "hash":
result = yield from self.__connection.hdel(key, *(yield from self.__connection.hkeys(key_prefix + key)))
elif key_type == "list":
length = yield from self.__connection.llen(key_prefix + key)
if length:
result = yield from self.__connection.ltrim(key_prefix + key, length + 1, length + 1)
else:
result = None
yield from self.delete_key(key, key_prefix)
return result
@asyncio.coroutine
def inc(self, key, delta, key_prefix):
if not self.__connection:
yield from self.get_connection()
key_type = yield from self.exists(key, key_prefix)
key = key_prefix + key
if key_type == "string":
return (yield from self.__connection.incrby(key, delta))
else:
raise TypeError("can't increment '%s'" % key)
@asyncio.coroutine
def dec(self, key, delta, key_prefix):
if not self.__connection:
yield from self.get_connection()
key_type = yield from self.exists(key, key_prefix)
key = key_prefix + key
if key_type == "string":
return (yield from self.__connection.decrby(key, delta))
else:
raise TypeError("can't decrement '%s'" % key)
@asyncio.coroutine
def close(self):
if self.__connection:
self.__connection.close()
yield from self.__connection.wait_closed()
class RedisCacheClient(object):
def __init__(self, host, port, db, *args, **kwargs):
assert isinstance(host, str)
assert isinstance(port, int)
assert isinstance(db, int) and 0 <= self.__db < 16
self.__host = host
self.__port = port
self.__db = db
self._connection = redis.StrictRedis(
self.__host, self.__port, self.__db)
self.__key_type_map = {
"1": "string",
"2": "hash",
"3": "list"
}
self.__key_type_hash = "_key_type"
def set(self, key, value, expires, key_prefix):
if not value and isinstance(key, dict):
pipe = self._connection.pipeline()
for k, item in key.items():
pipe.hset(self.__key_type_hash, key_prefix + k, 1)
pipe.set(key_prefix + k, item)
if expires > 0:
pipe.expire(key_prefix + k, expires)
return pipe.execute()
else:
key = key_prefix + key
if isinstance(value, str):
pipe = self._connection.pipeline()
pipe.hset(self.__key_type_hash, key, 1)
pipe.set(key, value)
if expires > 0:
pipe.expire(key, expires)
return pipe.execute()
elif isinstance(value, dict):
pipe = self._connection.pipeline()
pipe.hset(self.__key_type_hash, key, 2)
pipe.hmset(key, value)
if expires > 0:
pipe.expire(key, expires)
return pipe.execute()
elif isinstance(value, (list, tuple)):
pipe = self._connection.pipeline()
pipe.hset(self.__key_type_hash, key, 3)
pipe.lpush(key, *value)
if expires > 0:
pipe.expire(key, expires)
return pipe.execute()
def get(self, key, key_prefix):
key_type = self.exists(key, key_prefix)
key = key_prefix + key
if key_type == "string":
return self._connection.get(key) or ""
elif key_type == "hash":
return self._connection.hgetall(key) or {}
elif key_type == "list":
return self._connection.lrange(key, 0, -1) or []
else:
return None
def delete(self, key, key_prefix):
key_type = self.exists(key, key_prefix)
|
felipenaselva/repo.felipe
|
plugin.video.salts/salts_lib/worker_pool.py
|
Python
|
gpl-2.0
| 4,999
| 0.006001
|
"""
SALTS XBMC Addon
Copyright (C) 2016 tknorris
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import Queue
import threading
import log_utils
Empty = Queue.Empty
COMPONENT = __name__
class WorkerPool(object):
def __init__(self, max_workers=None):
self.max_workers = max_workers
self.workers = []
self.out_q = Queue.Queue()
self.in_q = Queue.Queue()
self.new_job = threading.Event()
self.manager = None
self.closing = False
self.__start_manager()
def request(self, func, args=None, kwargs=None):
if args is None: args = []
if kwargs is None: kwargs = {}
self.in_q.put({'func': func, 'args': args, 'kwargs': kwargs})
self.new_job.set()
def receive(self, timeout):
return self.out_q.get(True, timeout)
def close(self):
self.closing = True
self.new_job.set()
# tell all consumers to die
self.in_q.put(None)
if self.manager is not None:
self.manager.join()
return reap_workers(self.workers)
def __start_manager(self):
self.manager = threading.Thread(target=self.__manage_consumers)
self.manager.daemon = True
self.manager.start()
log_utils.log('Pool Manager(%s): started.' % (self), log_utils.LOGDEBUG, COMPONENT)
def __manage_consumers(self):
while not self.closing:
self.new_job.wait()
self.new_job.clear()
if self.closing:
break
new_workers = self.in_q.qsize() # create a worker for each job waiting (up to max_workers)
if new_workers > 0:
if self.max_workers is None:
max_new = new_workers
else:
max_new = self.max_workers - len(self.workers)
log_utils.log('Pool Manager: Requested: %s Allowed: %s - Pool Size: (%s / %s)' % (new_workers, max_new, len(self.workers), self.max_workers), log_utils.LOGDEBUG, COMPONENT)
if max_new > 0:
|
if new_workers > max_new:
new_workers = max_new
for _ in xrange(new_workers):
try:
worker = threading.Thread(target=self.consumer)
worker.daemon = True
worker.start()
self.workers.append(worker)
log_utils.log('Pool Manager: %s thrown in Pool: (%s/%s)'
|
% (worker.name, len(self.workers), self.max_workers), log_utils.LOGDEBUG, COMPONENT)
except RuntimeError as e:
try: log_utils.log('Pool Manager: %s missed Pool: %s - (%s/%s)' % (worker.name, e, len(self.workers), self.max_workers), log_utils.LOGWARNING)
except UnboundLocalError: pass # worker may not have gotten assigned
log_utils.log('Pool Manager(%s): quitting.' % (self), log_utils.LOGDEBUG, COMPONENT)
def consumer(self):
me = threading.current_thread()
while True:
job = self.in_q.get()
if job is None:
log_utils.log('Worker: %s committing suicide.' % (me.name), log_utils.LOGDEBUG, COMPONENT)
self.in_q.put(job)
break
log_utils.log('Worker: %s handling job: |%s| with args: |%s| and kwargs: |%s|' % (me.name, job['func'], job['args'], job['kwargs']), log_utils.LOGDEBUG, COMPONENT)
result = job['func'](*job['args'], **job['kwargs'])
self.out_q.put(result)
def reap_workers(workers, timeout=0):
"""
Reap thread/process workers; don't block by default; return un-reaped workers
"""
log_utils.log('In Reap: Total Workers: %s' % (len(workers)), log_utils.LOGDEBUG, COMPONENT)
living_workers = []
for worker in workers:
if worker:
log_utils.log('Reaping: %s' % (worker.name), log_utils.LOGDEBUG)
worker.join(timeout)
if worker.is_alive():
log_utils.log('Worker %s still running' % (worker.name), log_utils.LOGDEBUG, COMPONENT)
living_workers.append(worker)
return living_workers
|
fahadadeel/Aspose.Cells-for-Java
|
Plugins/Aspose-Cells-Java-for-Python/setup.py
|
Python
|
mit
| 705
| 0.014184
|
__author__ = 'fahadadeel'
from setuptools import setup, find_packages
setup(
name = 'aspose-cells-java-for-python',
packages
|
= find_packages(),
version = '1.0',
description = 'Aspose.cells Java for Python is a project that demonstrates / provides the Aspose.Cells for Java API usage examples in Python.',
author='Fahad Adeel',
author_email='cells@aspose.com',
url='https://github.com/asposecells/Aspose_Cells_Java/tree/master/Plugins/Aspose-Cells-Java-for-Python',
classifiers=[
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'License
|
:: OSI Approved :: MIT License',
'Operating System :: OS Independent'
]
)
|
lucashmorais/x-Bench
|
mozmill-env/python/Lib/site-packages/jsbridge/__init__.py
|
Python
|
mit
| 1,601
| 0
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, you can obtain one at http://mozilla.org/MPL/2.0/.
import asyncore
from datetime import datetime, timedelta
import socket
import os
import sys
from time import sleep
from jsobjects import JSObject
from network import Bridge, BackChannel, create_network
parent = os.path.abspath(os.path.dirname(__file__))
extension_path = os.path.join(parent, 'extension')
wait_to_create_timeout = 60
def find_port():
free_socket = socket.socket(socke
|
t.AF_INET, socket.SOCK_STREAM)
free_socket.bind(('127.0.0.1', 0))
port = free_socket.getsockname()[1]
free_socket.close()
return port
def wait_and_create_network(host, port, timeout=wait_to_create_timeout):
deadline = datetime.utcnow() + timedelta(seconds=timeout)
connected = False
w
|
hile datetime.utcnow() < deadline:
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((host, port))
s.close()
connected = True
break
except socket.error:
pass
sleep(.25)
if not connected:
raise Exception("Cannot connect to jsbridge extension, port %s" % port)
back_channel, bridge = create_network(host, port)
sleep(.5)
while back_channel.registered is False:
back_channel.close()
bridge.close()
asyncore.socket_map = {}
sleep(1)
back_channel, bridge = create_network(host, port)
return back_channel, bridge
|
kooksee/TIOT
|
test/project/src/app/proto/controller/RFIDController.py
|
Python
|
gpl-2.0
| 4,692
| 0.002081
|
#encoding=utf-8
from app.proto.common.data_parse import hex_to_dec, dec_to_hex
from app.proto.common.hachi.c
|
ore import XBee
from app.proto.frames.RFIDFrames import rfid_frame
class RFIDController:
def __init__(self, escaped=True):
# self.xbee = XBee()
pass
def parse_pkgs(self, bytestream):
'''
未处理leftovers数据 todo
'''
container = rfid_frame.parse(bytestream)
return container.packets, container.leftovers
def make_packet(packet, *a
|
rgs, **kwargs):
pass
# rfid = RFIDController()
# rfid.parse_pkgs("FF FF F1 07 0E 01 00 13 8E 88 00 04 00 47 ").pkgs
# frame = bytearray.fromhex("FF FF F1 07 0E 01 00 13 8E 88 00 04 00 47 ")
# frame = bytearray.fromhex("ff ff f1 07 44 0a 80 13 8e 00003f80139400003f 00138d00003f 00138f00003f00139a00003f00139c00003f00138a00003d00138c00003d00139900003d00139b0000066e32")
# FF FF F1 07 26 05 00 13 9A 00 00 1D 80 13 8E 00 00 1D 00 13 8C 00 00 1D 00 13 8D 00 00 15 80 13 94 00 00 13 77 F9
# pkgs,leftovers = rfid.parse_pkgs(frame)
# print pkgs
#
# data = ""
# for pkg in pkgs[0].data.block:
# for cardID in pkg.CardID:
# data += dec_to_hex(cardID)
# print hex_to_dec(data)
# print dec_to_hex(pkg.triggerID) == ""
# data = ""
# print pkgs
# print pkgs[0].data.block[0].CardID
# print pkgs[0].data.block[0].triggerID
# print pkgs[0].data.block[1].CardID
# print len(leftovers)
# data = ""
# for ii in pkgs[0].data.block[0].CardID:
# data += dec_to_hex(ii)
# print data
# sent_getReaderID = None
# sent_scanReader = None
# sent_getDoorForbValue = None
# sent_setDoorForbValue = None
#
# #设置得到阅读器id的指令
# def set_getReaderID(self,getReaderID_str_hex):
# self.sent_getReaderID = getReaderID_str_hex
# return self
#
# # get_readerID
# # 得到阅读器id:get_readerID(receive_ReaderID)
# def get_readerID(receive_ReaderID):
# return receive_ReaderID[8:10]
#
# #设置扫描电子标签的指令
# def set_scanReader(self,scanReader_str_hex):
# self.sent_scanReader = scanReader_str_hex
# return self
#
# #设置得到门禁值的指令
# def set_getDoorForbValue(self,getDoorForbValue_str_hex):
# self.sent_getDoorForbValue = getDoorForbValue_str_hex
# return self
#
# #设置设置门禁值的指令
# def set_setDoorForbValue(self,setDoorForbValue_str_hex):
# self.sent_setDoorForbValue = setDoorForbValue_str_hex
# return self
#
# # get_reader_status
# #得到阅读器的状态
# def get_reader_status(receive_scanReaderValue):
# return receive_scanReaderValue[10:12]
#
# # get_three_values
# # 解析扫描阅读器后得到的,电子标签,低频触发器,卡片的相对时间
# #返回的格式为[[str1,str2,str3],[],[],[]],其中str1是电子标签的ID,str2是低频触发器ID,str3是相对时间
# def get_three_values(
# receive_scanReaderValue): # FF FF F1 07 0E 01 00 13 8E 00 00 F2 1D 65
# scan_values = receive_scanReaderValue[12:-4]
# values_list = []
# for x in range(0, len(scan_values), 12):
# values_list.append(scan_values[0 + x:12 + x])
# print values_list[x]
# return [[hex_to_dec(y[0:6]), hex_to_dec(y[6:8]), hex_to_dec(y[8:12])] for y in values_list]
#
# print get_three_values("FFFFF1070E0100138E0000F21D65")
#
# # 得到门限值
# def get_DoorForbValue(receive_DoorForbValue):
# return receive_DoorForbValue[8:10]
#
# # set_DoorForbValue
# def set_DoorForbValue(str_intDoorForbValue):
# return "FFFFF306" + str_intDoorForbValue + "校验结果"
#
# # parse_setDoorForbResult
# #解析设置门限的返回结果信息,如果结果得到的是AA,那么就是设置成功,如果是55,那么就是设置失败
# def parse_setDoorForbResult(receive_setDoorForbResult):
# return receive_setDoorForbResult[8:10]
#
# # get_instruct_code
# # 得到指令码:get_instruct_code("FFFFF205F7")
# def get_instruct_code(str_instruct):
# return str_instruct[4:6]
#
# # get_instruct_length
# #得到指令长度:get_instruct_length("FFFFF205F7")
# # def get_instruct_length(str_instruct):
# # if (0 == cmp(str_instruct, receive_scanReaderValue)):
# # return str_instruct[8:10]
# # return str_instruct[6:8]
|
lexibrent/certificate-transparency
|
python/ct/client/async_log_client.py
|
Python
|
apache-2.0
| 18,435
| 0.000868
|
"""RFC 6962 client API."""
from ct.client import log_client
from ct.client.db import database
import gflags
import logging
import random
from twisted.internet import defer
from twisted.internet import error
from twisted.internet import protocol
from twisted.internet import reactor as ireactor
from twisted.internet import task
from twisted.internet import threads
from twisted.python import failure
from twisted.web import client
from twisted.web import http
from twisted.web import iweb
from Queue import Queue
from zope.interface import implements
logging = logging.getLogger('async_log_client.py')
FLAGS = gflags.FLAGS
gflags.DEFINE_integer("max_fetchers_in_parallel", 100, "Maximum number of "
"concurrent fetches.")
gflags.DEFINE_integer("get_entries_retry_delay", 1, "Number of seconds after "
"which get-entries will be retried if it encountered "
"an error.")
gflags.DEFINE_integer("entries_buffer", 100000, "Size of buffer which stores "
"fetched entries before async log client is able to "
"return them. 100000 entries shouldn't take more "
"than 600 Mb of memory.")
gflags.DEFINE_integer("response_buffer_size_bytes", 50 * 1000 * 1000, "Maximum "
"size of a single response buffer. Should be set such "
"that a get_entries response comfortably fits in the "
"the buffer. A typical log entry is expected to be < "
|
"10kB.")
gflags.DEFINE_bool("persist_entries", True, "Cache entries on disk.")
class HTTPConnectionError(log_client.HTTPError):
"""Connection failed."""
pass
class HTTPResponseSizeExceededError(log_client.HTTPError):
"""HTTP response exceeded maximum permitted size."""
pass
###############################################################################
# The
|
asynchronous twisted log client. #
###############################################################################
class ResponseBodyHandler(protocol.Protocol):
"""Response handler for HTTP requests."""
def __init__(self, finished):
"""Initialize the one-off response handler.
Args:
finished: a deferred that will be fired with the body when the
complete response has been received; or with an error when the
connection is lost.
"""
self._finished = finished
def connectionMade(self):
self._buffer = []
self._len = 0
self._overflow = False
def dataReceived(self, data):
self._len += len(data)
if self._len > FLAGS.response_buffer_size_bytes:
# Note this flag has to be set *before* calling loseConnection()
# to ensure connectionLost gets called with the flag set.
self._overflow = True
self.transport.loseConnection()
else:
self._buffer.append(data)
def connectionLost(self, reason):
if self._overflow:
self._finished.errback(HTTPResponseSizeExceededError(
"Connection aborted: response size exceeded %d bytes" %
FLAGS.response_buffer_size_bytes))
elif not reason.check(*(error.ConnectionDone, client.ResponseDone,
http.PotentialDataLoss)):
self._finished.errback(HTTPConnectionError(
"Connection lost (received %d bytes)" % self._len))
else:
body = "".join(self._buffer)
self._finished.callback(body)
class AsyncRequestHandler(object):
"""A helper for asynchronous response body delivery."""
def __init__(self, agent):
self._agent = agent
@staticmethod
def _response_cb(response):
try:
log_client.RequestHandler.check_response_status(
response.code, response.phrase,
list(response.headers.getAllRawHeaders()))
except log_client.HTTPError as e:
return failure.Failure(e)
finished = defer.Deferred()
response.deliverBody(ResponseBodyHandler(finished))
return finished
@staticmethod
def _make_request(path, params):
if not params:
return path
return path + "?" + "&".join(["%s=%s" % (key, value)
for key, value in params.iteritems()])
def get(self, path, params=None):
d = self._agent.request("GET", self._make_request(path, params))
d.addCallback(self._response_cb)
return d
class EntryProducer(object):
"""A push producer for log entries."""
implements(iweb.IBodyProducer)
def __init__(self, handler, reactor, uri, start, end,
batch_size, entries_db=None):
self._handler = handler
self._reactor = reactor
self._uri = uri
self._entries_db = entries_db
self._consumer = None
assert 0 <= start <= end
self._start = start
self._end = end
self._current = self._start
self._batch_size = batch_size
self._batches = Queue()
self._currently_fetching = 0
self._currently_stored = 0
self._last_fetching = self._current
self._max_currently_fetching = (FLAGS.max_fetchers_in_parallel *
self._batch_size)
# Required attribute of the interface.
self.length = iweb.UNKNOWN_LENGTH
self.min_delay = FLAGS.get_entries_retry_delay
@property
def finished(self):
return self._current > self._end
def __fail(self, failure):
if not self._stopped:
self.stopProducing()
self._done.errback(failure)
@staticmethod
def _calculate_retry_delay(retries):
"""Calculates delay based on number of retries which already happened.
Random is there, so we won't attack server lots of requests exactly
at the same time, and 1.3 is nice constant for exponential back-off."""
return ((0.4 + random.uniform(0.3, 0.6)) * FLAGS.get_entries_retry_delay
* 1.4**retries)
def _response_eb(self, failure, first, last, retries):
"""Error back for HTTP errors"""
if not self._paused:
# if it's not last retry and failure wasn't our fault we retry
if (retries < FLAGS.get_entries_max_retries and
not failure.check(log_client.HTTPClientError)):
logging.info("Error (%s): %s" % (self._uri, failure))
logging.info("Retrying get-entries for range <%d, %d> retry: %d"
% (first, last, retries))
d = task.deferLater(self._reactor,
self._calculate_retry_delay(retries),
self._fetch_parsed_entries,
first, last)
d.addErrback(self._response_eb, first, last, retries + 1)
return d
else:
self.__fail(failure)
def _fetch_eb(self, failure):
"""Error back for errors after getting result of a request
(InvalidResponse)"""
self.__fail(failure)
def _write_pending(self):
d = defer.Deferred()
d.callback(None)
if self._pending:
self._current += len(self._pending)
self._currently_stored -= len(self._pending)
d = self._consumer.consume(self._pending)
self._pending = None
return d
def _batch_completed(self, result):
self._currently_fetching -= len(result)
self._currently_stored += len(result)
return result
def _store_batch(self, entry_batch, start_index):
assert self._entries_db
d = threads.deferToThread(self._entries_db.store_entries,
enumerate(entry_batch, start_index))
d.addCallback(lambda _: entry_batch)
return d
def _get_entries_from_db(self, first, last):
if FLAGS.persist_entries and self._entries_db:
d = t
|
brainwane/carmen
|
carmen.py
|
Python
|
gpl-3.0
| 6,748
| 0.0123
|
#!/usr/bin/python
# Copyright 2013 Sumana Harihareswara
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
#
|
but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
A silly game in Python. See the README.
"""
import sys
import random
import textwrap
|
import mock
def anykey():
x = raw_input("Press Return to continue. ") + "a"
class City(object):
"""Each City has a name, a set of destinations one step away, and a clue."""
def __init__(self, n, c):
self.dests = []
self.name = n
self.clue = c
class Villain(object):
def __init__(self):
self.name = random.choice(["Carmen", "Waldo", "Edward Snowden", "Lyra"])
self.location = random.choice([cbl, chmr, ftl])
class Hero(object):
def __init__(self):
self.location = ind
self.name = raw_input("Detective at keyboard, identify yourself: ")
class Game():
def __init__(self):
self.player = Hero()
self.nemesis = Villain()
def __repr__(self):
return """A session of the game, with a hero and villain set up with names and locations."""
def wincondition(self):
"""The player wins when s/he is in the same City as the nemesis."""
return self.player.location == self.nemesis.location
def playturn(self):
print "%s, you are now in %s and you can head to:" % (self.player.name, self.player.location.name)
self.where2go()
print "You ask around about %s and learn that %s" % (self.nemesis.name, self.nemesis.location.clue)
choice = raw_input('OK, now which way will you go? Choose a number. ')
self.choose(choice)
self.wincondition()
def wingame(self):
print "You found her in %s so you win!" % currentsession.nemesis.location.name
playagain=raw_input('Would you like to play again? Y/N: ')
if (playagain == "N") or (playagain == "n"):
sys.exit()
else:
self.player.location = ind
self.nemesis = Villain()
print "Get ready for a new game!"
anykey()
def where2go(self):
for i,x in enumerate(self.player.location.dests):
print "%d. %s" % (i+1, x.name)
def choose(self, path):
try:
path = int(path)
except ValueError:
print "That doesn't make sense, %s, because it's not the number for one of your possible destinations." % self.player.name
print "So you stay in %s." % self.player.location.name
return
if path < 1 or path > (len(self.player.location.dests)):
return "That doesn't make sense, %s, so you stay in %s." % (self.player.name, self.player.location.name)
else:
self.player.location = self.player.location.dests[path-1]
if self.wincondition(): self.wingame()
self.nemesis.location = random.choice(self.nemesis.location.dests)
return "You follow %s to %s." % (self.nemesis.name, self.player.location.name)
ind = City("Independence", "she thought she'd stock up for a journey -- bullets, yokes of oxen, and whatnot.")
sjo = City("Saint Joseph", "she had a headache and needed to find some baby aspirin.")
cbl = City("Council Bluffs", "she knew that you can't beat City Hall, but thought another municipal body might back down more easily.")
fkn = City("Fort Kearney", "she wanted to visit the easternmost point of the Platte River Valley's natural roadway.")
chmr = City("Chimney Rock", "the tow-headed woman was tired of spelunking and wanted to try climbing.")
ftl = City("Fort Laramie", "she had a lot of questions about the American Fur Company.")
vc = City("Virginia City", "she wanted to see the birthplace of Calamity Jane.")
sp = City("South Pass", "she said she was fixin' to cross the Continental Divide!")
slc = City("Salt Lake City", "she said she was planning on having coffee with the Prophet... they didn't have the heart to tell her.")
fh = City("Fort Hall", "she asked about the Snake River country.")
pdx = City("Portland", "she said she longed to see the future home of Open Source Bridge, the yearly conference by the Stumptown Syndicate.")
# Clue wit by Leonard. Thank you @leonardr.
ind.dests = [fkn]
sjo.dests = [fkn]
cbl.dests = [fkn]
fkn.dests = [cbl, ind, ftl, sjo, vc, chmr]
chmr.dests = [fkn]
ftl.dests = [vc, sp, fkn]
vc.dests = [ftl, fkn]
sp.dests = [fh, ftl, slc]
slc.dests = [sp, fh]
fh.dests = [sp, pdx, slc]
pdx.dests = [fh]
def test_bidirectionalpaths():
for city in [ind, sjo, cbl, fkn, chmr, ftl, vc, sp, slc, fh, pdx]:
for dest in city.dests:
try:
assert city in dest.dests
except AssertionError:
print "bidirectional fail! City" , city.name , "was not in" , dest.name , "destinations."
dest.dests.append(city)
print "fixed it!" , city.name , "now in destinations for" , dest.name , "in this list:", map(lambda x: x.name,dest.dests)
test_bidirectionalpaths()
def test_pathfinding():
# try to get ind-fkn-ftl-sp-slc-fh-pdx
# FIXME: does not work yet due to syntax error
try:
map(lambda x,y:assert y in x.dests,[[ind,fkn], [fkn,ftl], [ftl,sp], [sp,slc], [slc,fh], [fh,pdx]])
except AssertionError:
print "whoops!",y,"not in the destination list for",x
# would be good to do pathfinding
gpl = """You are now playing:
Where On The Oregon Trail is Carmen Sandiego?
Copyright (C) 2013 Sumana Harihareswara and licensed under the GNU Public License.
This program comes with ABSOLUTELY NO WARRANTY.
This is free software, and you are welcome to redistribute it under certain conditions; see https://www.gnu.org/licenses/gpl.txt for details."""
if __name__=="__main__":
print gpl
currentsession = Game()
currentrank = "Okay, %s, your current rank is: Carpenter. Welcome to %s." % (currentsession.player.name, currentsession.player.location.name)
print textwrap.fill(currentrank,70,replace_whitespace=False)
assert currentsession.nemesis != currentsession.player
anykey()
print "%s has stolen a wagon tongue and Interpol has assigned you to catch her! Get ready for a chase!" % currentsession.nemesis.name
while True:
currentsession.playturn()
|
SEMAFORInformatik/femagtools
|
examples/model-creation/createall.py
|
Python
|
bsd-2-clause
| 886
| 0.001129
|
import femagtools
import importlib
import os
import logging
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(message)s')
models = ['statorBG-magnetSector',
'stator1-magnetIron3',
'stator1-magnetIron4',
's
|
tator1-magnetIron5',
'stator1-magnetIronV',
'stator2-magnetSector',
'stator4-magnetSector',
'statorR
|
otor3-magnetIron' ]
logger = logging.getLogger("fslcreator")
workdir = os.path.join(os.path.expanduser('~'), 'femag')
try:
os.mkdir(workdir)
except FileExistsError:
pass
logger.info("Femagtools Version %s Working Dir %s",
femagtools.__version__, workdir)
for m in models:
mod = importlib.import_module(m)
logger.info("--> %s <--", m)
with open(os.path.join(workdir, m+'.fsl'), 'w') as f:
f.write('\n'.join(getattr(mod, 'create_fsl')()))
|
VitalPet/c2c-rd-addons
|
c2c_reporting_tools_chricar/core/__init__.py
|
Python
|
agpl-3.0
| 1,404
| 0.000713
|
# -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (c) Camptocamp SA
# Author: Arnaud WÃŒst
#
#
# This file is part of the c2c_report_tools module.
#
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsability of assessing all potential
# consequences resulting from its eventual inadequacies and bugs
# End users who are looking for a ready-to-use solution with commercial
# garantees and support are strongly adviced to contract a Fr
|
ee Software
# Service Company
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option)
|
any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
##############################################################################
import table_elements
|
tuttleofx/sconsProject
|
autoconf/fontconfig.py
|
Python
|
mit
| 105
| 0
|
fr
|
om _external import *
fontconfig = LibWithHeaderChecker('fontconfig', 'fontconfig/fontconf
|
ig.h', 'c')
|
shitolepriya/Saloon_erp
|
erpnext/hr/doctype/attendance_status/test_attendance_status.py
|
Python
|
agpl-3.0
| 299
| 0.006689
|
#
|
-*- coding: utf-8 -*-
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# See license.txt
from __future__ import unicode_literals
import frappe
import unittest
# test_records = frappe.get_test_records('Attendance Status')
class TestAttendanceStatus(unittest.TestCase):
|
pass
|
stdweird/aquilon
|
lib/python2.6/aquilon/worker/commands/update_interface_machine.py
|
Python
|
apache-2.0
| 7,493
| 0.000801
|
# -*- cpy-indent-level: 4; indent-tabs-mode: nil -*-
# ex: set expandtab softtabstop=4 shiftwidth=4:
#
# Copyright (C) 2008,2009,2010,2011,2012,2013 Contributor
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains the logic for `aq update interface --machine`."""
from aquilon.exceptions_ import ArgumentError, AquilonError
from aquilon.worker.broker import BrokerCommand # pylint: disable=W0611
from aquilon.worker.dbwrappers.interface import (verify_port_group,
choose_port_group,
assign_address,
rename_interface)
from aquilon.worker.locks import lock_queue
from aquilon.worker.templates.machine import PlenaryMachineInfo
from aquilon.worker.processes import DSDBRunner
from aquilon.aqdb.model import Machine, Interface, Model
from aquilon.utils import first_of
class CommandUpdateInterfaceMachine(BrokerCommand):
r
|
equired_parameters = ["interfac
|
e", "machine"]
def render(self, session, logger, interface, machine, mac, model, vendor,
boot, pg, autopg, comments, master, clear_master, default_route,
rename_to, **arguments):
"""This command expects to locate an interface based only on name
and machine - all other fields, if specified, are meant as updates.
If the machine has a host, dsdb may need to be updated.
The boot flag can *only* be set to true. This is mostly technical,
as at this point in the interface it is difficult to tell if the
flag was unset or set to false. However, it also vastly simplifies
the dsdb logic - we never have to worry about a user trying to
remove the boot flag from a host in dsdb.
"""
audit_results = []
dbhw_ent = Machine.get_unique(session, machine, compel=True)
dbinterface = Interface.get_unique(session, hardware_entity=dbhw_ent,
name=interface, compel=True)
oldinfo = DSDBRunner.snapshot_hw(dbhw_ent)
if arguments.get('hostname', None):
# Hack to set an intial interface for an aurora host...
dbhost = dbhw_ent.host
if dbhost.archetype.name == 'aurora' and \
dbhw_ent.primary_ip and not dbinterface.addresses:
assign_address(dbinterface, dbhw_ent.primary_ip,
dbhw_ent.primary_name.network)
# We may need extra IP verification (or an autoip option)...
# This may also throw spurious errors if attempting to set the
# port_group to a value it already has.
if pg is not None and dbinterface.port_group != pg.lower().strip():
dbinterface.port_group = verify_port_group(
dbinterface.hardware_entity, pg)
elif autopg:
dbinterface.port_group = choose_port_group(
session, logger, dbinterface.hardware_entity)
audit_results.append(('pg', dbinterface.port_group))
if master:
if dbinterface.addresses:
# FIXME: as a special case, if the only address is the
# primary IP, then we could just move it to the master
# interface. However this can be worked around by bonding
# the interface before calling "add host", so don't bother
# for now.
raise ArgumentError("Can not enslave {0:l} because it has "
"addresses.".format(dbinterface))
dbmaster = Interface.get_unique(session, hardware_entity=dbhw_ent,
name=master, compel=True)
if dbmaster in dbinterface.all_slaves():
raise ArgumentError("Enslaving {0:l} would create a circle, "
"which is not allowed.".format(dbinterface))
dbinterface.master = dbmaster
if clear_master:
if not dbinterface.master:
raise ArgumentError("{0} is not a slave.".format(dbinterface))
dbinterface.master = None
if comments:
dbinterface.comments = comments
if boot:
# Should we also transfer the primary IP to the new boot interface?
# That could get tricky if the new interface already has an IP
# address...
for i in dbhw_ent.interfaces:
if i == dbinterface:
i.bootable = True
i.default_route = True
else:
i.bootable = False
i.default_route = False
if default_route is not None:
dbinterface.default_route = default_route
if not first_of(dbhw_ent.interfaces, lambda x: x.default_route):
logger.client_info("Warning: {0:l} has no default route, hope "
"that's ok.".format(dbhw_ent))
#Set this mac address last so that you can update to a bootable
#interface *before* adding a mac address. This is so the validation
#that takes place in the interface class doesn't have to be worried
#about the order of update to bootable=True and mac address
if mac:
q = session.query(Interface).filter_by(mac=mac)
other = q.first()
if other and other != dbinterface:
raise ArgumentError("MAC address {0} is already in use by "
"{1:l}.".format(mac, other))
dbinterface.mac = mac
if model or vendor:
if not dbinterface.model_allowed:
raise ArgumentError("Model/vendor can not be set for a {0:lc}."
.format(dbinterface))
dbmodel = Model.get_unique(session, name=model, vendor=vendor,
machine_type='nic', compel=True)
dbinterface.model = dbmodel
if rename_to:
rename_interface(session, dbinterface, rename_to)
session.flush()
session.refresh(dbhw_ent)
plenary_info = PlenaryMachineInfo(dbhw_ent, logger=logger)
key = plenary_info.get_write_key()
try:
lock_queue.acquire(key)
plenary_info.write(locked=True)
if dbhw_ent.host and dbhw_ent.host.archetype.name != "aurora":
dsdb_runner = DSDBRunner(logger=logger)
dsdb_runner.update_host(dbhw_ent, oldinfo)
dsdb_runner.commit_or_rollback()
except AquilonError, err:
plenary_info.restore_stash()
raise ArgumentError(err)
except:
plenary_info.restore_stash()
raise
finally:
lock_queue.release(key)
for name, value in audit_results:
self.audit_result(session, name, value, **arguments)
return
|
astrieanna/haiku-dropbox-client
|
db_ls.py
|
Python
|
mit
| 476
| 0.004202
|
import sys
from cli_client import AP
|
P_KEY, APP_SECRET, DropboxTerm
def main(path):
if APP_KEY == '' or APP_SECRET == '':
exit("You need to set your APP_KEY and APP_SECRET!")
term = DropboxTerm(APP_KEY, APP_SECRET)
if path != "":
term.do_cd(path)
term.do_ls()
if __name__ == '__main__':
if len(sys.argv) == 1:
main("")
elif len(sys.argv) != 2:
|
print "usage: python db_ls.py <path>"
else:
main(sys.argv[1])
|
indico/indico
|
indico/web/flask/wrappers.py
|
Python
|
mit
| 9,269
| 0.001834
|
# This file is part of Indico.
# Copyright (C) 2002 - 2022 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
import os
import re
from contextlib import contextmanager
from uuid import uuid4
from flask import Blueprint, Flask, current_app, g, request
from flask.blueprints import BlueprintSetupState
from flask.helpers import locked_cached_property
from flask.testing import FlaskClient
from flask.wrappers import Request
from flask_pluginengine import PluginFlaskMixin
from flask_webpackext import current_webpack
from jinja2 import FileSystemLoader, TemplateNotFound
from jinja2.runtime import StrictUndefined
from ua_parser import user_agent_parser
from werkzeug.datastructures import ImmutableOrderedMultiDict
from werkzeug.user_agent import UserAgent
from werkzeug.utils import cached_property
from indico.core.config import config
from indico.util.json import IndicoJSONEncoder
from indico.web.flask.session import IndicoSessionInterface
from indico.web.flask.templating import CustomizationLoader, IndicoEnvironment
from indico.web.flask.util import make_view_func
AUTH_BEARER_RE = re.compile(r'^Bearer (.+)$')
class ParsedUserAgent(UserAgent):
@cached_property
def _details(self):
return user_agent_parser.Parse(self.string)
@property
def platform(self):
return self._details['os']['family']
@property
def browser(self):
return self._details['user_agent']['family']
@property
def version(self):
return '.'.join(
part
for key in ('major', 'minor', 'patch')
if (part := self._details['user_agent'][key]) is not None
)
class IndicoRequest(Request):
parameter_storage_class = ImmutableOrderedMultiDict
user_agent_class = ParsedUserAgent
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if self.remote_addr is not None and self.remote_addr.startswith('::ffff:'):
# convert ipv6-style ipv4 to the regular ipv4 notation
self.remote_addr = self.remote_addr[7:]
@cached_property
def id(self):
return uuid4().hex[:16]
@cached_property
def relative_url(self):
"""The request's path including its query string if applicable."""
return self.script_root + self.full_path.rstrip('?')
@cached_property
def bearer_token(self):
"""Bearer token included in the request, if any."""
auth_header = request.headers.get('Authorization')
if not auth_header:
return None
m = AUTH_BEARER_RE.match(auth_header)
return m.group(1) if m else None
@property
def is_xhr(self):
# XXX: avoid using this in new code; this header is non-standard and only set
# by default in jquery, but not by anything else. check if the request accepts
# json as an alternative.
return self.headers.get('X-Requested-With', '').lower() == 'xmlhttprequest'
class IndicoFlaskClient(FlaskClient):
def open(self, *args, **kwargs):
# our tests always push an app context, but we do not want to leak `g` between
# test client calls, so we always use a throwaway app context for the requests
with current_app.app_context():
return super().open(*args, **kwargs)
class IndicoFlask(PluginFlaskMixin, Flask):
json_encoder = IndicoJSONEncoder
request_class = IndicoRequest
session_interface = IndicoSessionInterface()
test_client_class = IndicoFlaskClient
jinja_environment = IndicoEnvironment
jinja_options = dict(Flask.jinja_options, undefined=StrictUndefined)
@property
def session_cookie_name(self):
name = super().session_cookie_name
if not request.is_secure:
name += '_http'
return name
def create_global_jinja_loader(self):
default_loader = super().create_global_jinja_loader()
# use an empty list if there's no global customization dir so we can
# add directories of plugins late
|
r once they are available
|
customization_dir = os.path.join(config.CUSTOMIZATION_DIR, 'templates') if config.CUSTOMIZATION_DIR else []
return CustomizationLoader(default_loader, customization_dir, config.CUSTOMIZATION_DEBUG)
def add_url_rule(self, rule, endpoint=None, view_func=None, **options):
from indico.web.rh import RHSimple
# Endpoints from Flask-Multipass need to be wrapped in the RH
# logic to get the autocommit logic and error handling for code
# running inside the identity handler.
if endpoint is not None and endpoint.startswith('_flaskmultipass'):
view_func = RHSimple.wrap_function(view_func, disable_csrf_check=True)
return super().add_url_rule(rule, endpoint=endpoint, view_func=view_func, **options)
@property
def has_static_folder(self):
return False
@property
def manifest(self):
if 'custom_manifests' in g:
return g.custom_manifests[None]
return current_webpack.manifest
class IndicoBlueprintSetupState(BlueprintSetupState):
@contextmanager
def _unprefixed(self):
prefix = self.url_prefix
self.url_prefix = None
yield
self.url_prefix = prefix
def add_url_rule(self, rule, endpoint=None, view_func=None, **options):
if rule.startswith('!/'):
with self._unprefixed():
super().add_url_rule(rule[1:], endpoint, view_func, **options)
else:
super().add_url_rule(rule, endpoint, view_func, **options)
class IndicoBlueprint(Blueprint):
"""A Blueprint implementation that allows prefixing URLs with `!` to
ignore the url_prefix of the blueprint.
It also supports automatically creating rules in two versions - with and
without a prefix.
:param event_feature: If set, this blueprint will raise `NotFound`
for all its endpoints unless the event referenced
by the `event_id` URL argument has the specified
feature.
"""
def __init__(self, *args, **kwargs):
self.__prefix = None
self.__default_prefix = ''
self.__virtual_template_folder = kwargs.pop('virtual_template_folder', None)
event_feature = kwargs.pop('event_feature', None)
super().__init__(*args, **kwargs)
if event_feature:
@self.before_request
def _check_event_feature():
from indico.modules.events.features.util import require_feature
event_id = request.view_args.get('event_id')
if event_id is not None:
require_feature(event_id, event_feature)
@locked_cached_property
def jinja_loader(self):
if self.template_folder is not None:
return IndicoFileSystemLoader(os.path.join(self.root_path, self.template_folder),
virtual_path=self.__virtual_template_folder)
def make_setup_state(self, app, options, first_registration=False):
return IndicoBlueprintSetupState(self, app, options, first_registration)
def add_url_rule(self, rule, endpoint=None, view_func=None, **options):
if view_func is not None:
# We might have a RH class here - convert it to a callable suitable as a view func.
view_func = make_view_func(view_func)
super().add_url_rule(self.__default_prefix + rule, endpoint, view_func, **options)
if self.__prefix:
super().add_url_rule(self.__prefix + rule, endpoint, view_func, **options)
@contextmanager
def add_prefixed_rules(self, prefix, default_prefix=''):
"""Create prefixed rules in addition to the normal ones.
When specifying a default_prefix, too, the normally "unprefixed" rules
are prefixed with it.
"""
assert self.__prefix is None and not self.__default_prefix
self.__prefix = prefix
self.__default_prefix = default_prefix
yield
self.__prefix = None
|
tum-vision/articulation
|
articulation_tutorials/python_service_client/model_selection_client.py
|
Python
|
bsd-2-clause
| 1,925
| 0.038961
|
#!/usr/bin/env python
import roslib; roslib.load_manifest('articulation_tutorials')
import rospy
import numpy
from articulation_msgs.msg import *
from articulation_msgs.srv import *
from geometry_msgs.msg import Pose, Point, Quaternion
from sensor_msgs.msg import ChannelFloat32
PRISMATIC = 0
ROTATIONAL = 1
MODELS={PRISMATIC:'prismatic',ROTATIONAL:'rotational'}
def sample_track(model = PRISMATIC, n = 100, sigma_position = 0.02):
msg = TrackMsg()
msg.header.stamp = rospy.get_rostime()
msg.header.frame_id = "/"
msg.id = model
for i in range(0,n):
q = i / float(n)
if model == PRISMATIC:
pose = Pose(Point(q, 0, 0), Quaternion(0, 0, 0, 1))
elif model == ROTATIONAL:
pose = Pose(Point(numpy.sin(q), numpy.cos(q) - 1.0, 0), Quaternion(0, 0, 0, 1))
else:
raise NameError("unknown model, cannot generate trajectory!")
pose.position.x += numpy.random.rand()*sigma_position
pose.position.y += numpy.random.rand()*sigma_position
pose.position.z += numpy.random.rand()*sigma_position
msg.pose.append( pose )
return msg
def main():
rospy.init_node('test_fitting')
model_select = rospy.ServiceProxy('model_select', TrackModelSrv)
model_pub = rospy.Publisher('model', ModelMsg)
print
while True:
for model_type,model_name in MODELS
|
.items():
request = TrackModelSrvRequest()
print "generating track of type '%s'" % model_name
request.model.track = sample_track( model_type )
try:
response = model_select(request)
print "selected model: '%s' (n = %d, log LH = %f)" % (
response.model.name,
len(response.model.track.pose),
[entry.value for entry in response.model.params if entry.name==
|
'loglikelihood'][0]
)
model_pub.publish(response.model)
except rospy.ServiceException:
print "model selection failed"
pass
if rospy.is_shutdown():
exit(0)
print
rospy.sleep(0.5)
if __name__ == '__main__':
main()
|
wavesoft/creditpiggy
|
creditpiggy-server/creditpiggy/api/migrations/0002_projectcredentials_websitecredentials.py
|
Python
|
gpl-2.0
| 1,466
| 0.003411
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import creditpiggy.core.models
class Migration(migrations.Migration):
dependencies = [
('core', '0014_auto_20150616_1247'),
('api', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='ProjectCredentials',
|
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('token', models.CharField(default=creditpiggy.core.models.new_uuid, help_text=b'Anonymous authentication token for the credentials', unique=True, max_length=32, db_index=True)),
('secret',
|
models.CharField(default=creditpiggy.core.models.gen_token_key, help_text=b'Shared secret between project and administrator', max_length=48)),
('project', models.ForeignKey(to='core.PiggyProject')),
],
),
migrations.CreateModel(
name='WebsiteCredentials',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('token', models.CharField(default=creditpiggy.core.models.new_uuid, help_text=b'Anonymous authentication token for the website', unique=True, max_length=32, db_index=True)),
('domains', models.TextField(default=b'[]')),
],
),
]
|
iawells/gluon
|
gluon/common/particleGenerator/DataBaseModelGenerator.py
|
Python
|
apache-2.0
| 6,227
| 0.000803
|
#!/usr/bin/python
from __future__ import print_function
import sys
import re
import yaml
import sqlalchemy as sa
from sqlalchemy.ext.declarative import declarative_base
class DataBaseModelProcessor(object):
def __init__(self):
self.db_models = {}
def add_model(self, model):
self.data = model
def get_table_class(self, table_name):
try:
return self.db_models[table_name]
except ValueError as e:
raise Exception('Unknown table name %s' % table_name)
def build_sqla_models(self, base=None):
"""Make SQLAlchemy classes for each of the elements in the data read"""
if not base:
base = declarative_base()
if not self.data:
raise Exception('Cannot create Database Model from empty model.')
def de_camel(s):
s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', s)
return re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower()
# Make a model class that we've never thought of before
for table_name, table_data in self.data.iteritems():
self.get_primary_key(table_data)
for table_name, table_data in self.data.iteritems():
try:
attrs = {}
for col_name, col_desc in table_data['attributes'].iteritems():
try:
options = {}
args = []
# Step 1: deal with object xrefs
if col_desc['type'] in self.data:
# This is a foreign key reference. Make the column
# like the FK, but drop the primary from it and
# use the local one.
tgt_name = col_desc['type']
tgt_data = self.data[tgt_name]
primary_col = tgt_data['primary']
repl_col_desc = \
dict(tgt_data['attributes'][primary_col])
if 'primary' in repl_col_desc:
# The FK will be a primary, doesn't mean we are
del repl_col_desc['primary']
# May still be the local PK if we used to be,
# though
if col_desc.get('primary'):
repl_col_desc['primary'] = True
# Set the SQLA col option to make clear what's
# going on
args.append(sa.ForeignKey('%s.%s' %
(de_camel(tgt_name),
primary_col)))
# The col creation code will now duplicate the FK
# column nicely
col_desc = repl_col_desc
# Step 2: convert our special types to ones a DB likes
if col_desc['type'] == 'uuid':
# UUIDs, from a DB perspective, are a form of
# string
repl_col_desc = dict(col_desc)
repl_col_desc['type'] = 'string'
repl_col_desc['length'] = 64
col_desc = repl_col_desc
# Step 3: with everything DB-ready, spit out the table
# definition
if col_desc.get('primary', False):
options['primary_key'] = True
# Save the information about the primary key as well
# in the object
attrs['_primary_key'] = col_name
required = col_desc.get('required', False)
options['nullable'] = not required
if col_desc['type'] == 'string':
attrs[col_name] = sa.Column(sa.String(
col_desc['length']), *args, **options)
elif col_desc['type'] == 'integer':
attrs[col_name] = sa.Column(sa.Integer(), *args,
**options)
elif col_desc['type'] == 'boolean':
attrs[col_name] = sa.Column(sa.Boolean(), *args,
**options)
elif col_desc['type'] == 'enum':
attrs[col_name] = sa.Column(
sa.Enum(*col_desc['values']), *args,
**options)
else:
raise Exception('Unknown column type %s' %
col_desc['type'])
except:
print('During processing of attribute ', col_name,
file=sys.stderr)
raise
if not '_primary_key' in attrs:
raise Exception("One and only one primary key has to "
"be given to each column")
attrs['__tablename__'] = de_camel(table_name)
|
attrs['__name__'] = table_name
self.db_models[table_name] = ty
|
pe(table_name, (base,), attrs)
except:
print('During processing of table ', table_name,
file=sys.stderr)
raise
@classmethod
def get_primary_key(cls, table_data):
primary = []
for k, v in table_data['attributes'].iteritems():
if 'primary' in v:
primary = k
break
# If not specified, a UUID is used as the PK
if not primary:
table_data['attributes']['uuid'] = \
{'type': 'string', 'length': 36, 'primary': True,
'required': True}
primary = 'uuid'
table_data['primary'] = primary
return primary
|
samuelmaudo/yepes
|
yepes/forms/inline_model.py
|
Python
|
bsd-3-clause
| 2,021
| 0.000495
|
# -*- coding:utf-8 -*-
from __future__ import unicode_literals
from django.forms.models import (
inlineformset_factory,
ModelForm, ModelFormMetaclass,
)
from django.utils import six
class InlineModelFormMetaclass(ModelFormMetaclass):
def __new__(cls, name, bases, attrs):
options = attrs.get('Meta')
for base in reversed(bases):
if options is None:
options = getattr(base, 'Meta', None)
inlines = getattr(options, 'inlines', ())
super_new = super(InlineModelFormMetaclass, cls).__new__
new_cls = super_new(cls, name, bases, attrs)
new_cls._meta.inlines = inlines
return new_cls
@six.add_metaclass(InlineModelFormMetaclass)
class InlineModelForm(ModelForm):
def __init__(self, data=None, files=None, inlines=(), *args, **kwargs):
super(InlineModelForm, self).__init__(data, files, *args, **kwargs)
opts = self._meta
model = self.instance.__class__
self._inline_form_sets = []
for field_name in (inlines or opts.inlines):
|
kwargs = {'extra': 0}
if not isinstan
|
ce(field_name, six.string_types):
kwargs.update(field_name[1])
field_name = field_name[0]
field = getattr(model, field_name).related
FormSet = inlineformset_factory(model, field.model, **kwargs)
form_set = FormSet(data=data, files=files, instance=self.instance)
self._inline_form_sets.append(form_set)
setattr(self, field_name, form_set)
def is_valid(self):
valid = super(InlineModelForm, self).is_valid()
for form_set in self._inline_form_sets:
if not form_set.is_valid():
valid = False
return valid
def save(self, commit=True):
instances = [super(InlineModelForm, self).save(commit=commit)]
for form_set in self._inline_form_sets:
instances.extend(form_set.save(commit=commit))
return instances
|
gautamMalu/rootfs_xen_arndale
|
usr/lib/python2.7/plat-arm-linux-gnueabihf/IN.py
|
Python
|
gpl-2.0
| 15,532
| 0.009207
|
# Generated by h2py from /usr/include/netinet/in.h
_NETINET_IN_H = 1
# Included from features.h
_FEATURES_H = 1
_ISOC95_SOURCE = 1
_ISOC99_SOURCE = 1
_ISOC11_SOURCE = 1
_POSIX_SOURCE = 1
_POSIX_C_SOURCE = 200809L
_XOPEN_SOURCE = 700
_XOPEN_SOURCE_EXTENDED = 1
_LARGEFILE64_SOURCE = 1
_DEFAULT_SOURCE = 1
_BSD_SOURCE = 1
_SVID_SOURCE = 1
_ATFILE_SOURCE = 1
_DEFAULT_SOURCE = 1
_BSD_SOURCE = 1
_SVID_SOURCE = 1
__USE_ISOC11 = 1
__USE_ISOC99 = 1
__USE_ISOC95 = 1
__USE_ISOCXX11 = 1
__USE_POSIX_IMPLICITLY = 1
_POSIX_SOURCE = 1
_POSIX_C_SOURCE = 200809L
_POSIX_SOURCE = 1
_POSIX_C_SOURCE = 2
_POSIX_C_SOURCE = 199506L
_POSIX_C_SOURCE = 200112L
_POSIX_C_SOURCE = 200809L
__USE_POSIX_IMPLICITLY = 1
__USE_POSIX = 1
__USE_POSIX2 = 1
__USE_POSIX199309 = 1
__USE_POSIX199506 = 1
__USE_XOPEN2K = 1
__USE_ISOC95 = 1
__USE_ISOC99 = 1
__USE_XOPEN2K8 = 1
_ATFILE_SOURCE = 1
__USE_XOPEN = 1
__USE_XOPEN_EXTENDED = 1
__USE_UNIX98 = 1
_LARGEFILE_SOURCE = 1
__USE_XOPEN2K8 = 1
__USE_XOPEN2K8XSI = 1
__USE_XOPEN2K = 1
__USE_XOPEN2KXSI = 1
__USE_ISOC95 = 1
__USE_ISOC99 = 1
__USE_XOPEN_EXTENDED = 1
__USE_LARGEFILE = 1
__USE_LARGEFILE64 = 1
__USE_FILE_OFFSET64 = 1
__USE_MISC = 1
__USE_BSD = 1
__USE_SVID = 1
__USE_ATFILE = 1
__USE_GNU = 1
__USE_REENTRANT = 1
__USE_FORTIFY_LEVEL = 2
__USE_FORTIFY_LEVEL = 1
__USE_FORTIFY_LEVEL = 0
__GNU_LIBRARY__ = 6
__GLIBC__ = 2
__GLIBC_MINOR__ = 19
# Included from sys/cdefs.h
_SYS_CDEFS_H = 1
def __NTH(fct): return fct
def __NTH(fct): return fct
def __P(args): return args
def __PMT(args): return args
def __STRING(x): return #x
def __bos(ptr): return __builtin_object_size (ptr, __USE_FORTIFY_LEVEL > 1)
def __bos0(ptr): return __builtin_object_size (ptr, 0)
def __warnattr(msg): return __attribute__((__warning__ (msg)))
__flexarr = []
__flexarr = [0]
__flexarr = []
__flexarr = [1]
def __ASMNAME(cname): return __ASMNAME2 (__USER_LABEL_PREFIX__, cname)
def __attribute__(xyz): return
def __attribute_alloc_size__(params): return \
def __attribute_alloc_size__(params): return
def __attribute_format_arg__(x): return __attribute__ ((__format_arg__ (x)))
def __attribute_format_arg__(x): return
def __glibc_unlikely(cond): return __builtin_expect ((cond), 0)
def __glibc_likely(cond): return __builtin_expect ((cond), 1)
def __glibc_unlikely(cond): return (cond)
def __glibc_likely(cond): return (cond)
# Included from bits/wordsize.h
__WORDSIZE = 32
__LDBL_COMPAT = 1
def __LDBL_REDIR_DECL(name): return \
__USE_LARGEFILE = 1
__USE_LARGEFILE64 = 1
__USE_EXTERN_INLINES = 1
# Included from gnu/stubs.h
# Included from stdint.h
_STDINT_H = 1
# Included from bits/wchar.h
_BITS_WCHAR_H = 1
def __INT64_C(c): return c ## L
def __UINT64_C(c): return c ## UL
def __INT64_C(c): return c ## LL
def __UINT64_C(c): return c ## ULL
INT8_MIN = (-128)
INT16_MIN = (-32767-1)
INT32_MIN = (-2147483647-1)
INT64_MIN = (-__INT64_C(9223372036854775807)-1)
INT8_MAX = (127)
INT16_MAX = (32767)
INT32_MAX = (2147483647)
INT64_MAX = (__INT64_C(9223372036854775807))
UINT8_MAX = (255)
UINT16_MAX = (65535)
UINT64_MAX = (__UINT64_C(18446744073709551615))
INT_LEAST8_MIN = (-128)
INT_LEAST16_MIN = (-32767-1)
INT_LEAST32_MIN = (-2147483647-1)
INT_LEAST64_MIN = (-__INT64_C(9223372036854775807)-1)
INT_LEAST8_MAX = (127)
INT_LEAST16_MAX = (32767)
INT_LEAST32_MAX = (2147483647)
INT_LEAST64_MAX = (__INT64_C(9223372036854775807))
UINT_LEAST8_MAX = (255)
UINT_LEAST16_MAX = (65535)
UINT_LEAST64_MAX = (__UINT64_C(18446744073709551615))
INT_FAST8_MIN = (-128)
INT_FAST16_MIN = (-9223372036854775807L-1)
INT_FAST32_MIN = (-9223372036854775807L-1)
INT_FAST16_MIN = (-2147483647-1)
INT_FAST32_MIN = (-2147483647-1)
INT_FAST64_MIN = (-__INT64_C(9223372036854775807)-1)
INT_FAST8_MAX = (127)
INT_FAST16_MAX = (9223372036854775807L)
INT_FAST32_MAX = (9223372036854775807L)
INT_FAST16_MAX = (2147483647)
INT_FAST32_MAX = (2147483647)
INT_FAST64_MAX = (__INT64_C(9223372036854775807))
UINT_FAST8_MAX = (255)
UINT_FAST64_MAX = (__UINT64_C(18446744073709551615))
INTPTR_MIN = (-9223372036854775807L-1)
INTPTR_MAX = (9223372036854775807L)
INTPTR_MIN = (-2147483647-1)
INTPTR_MAX = (2147483647)
INTMAX_MIN = (-__INT64_C(9223372036854775807)-1)
INTMAX_MAX = (__INT64_C(9223372036854775807))
UINTMAX_MAX = (__UINT64_C(18446744073709551615))
PTRDIFF_MIN = (-9223372036854775807L-1)
PTRDIFF_MAX = (9223372036854775807L)
PTRDIFF_MIN = (-2147483647-1)
PTRDIFF_MAX = (2147483647)
SIG_ATOMIC_MIN = (-2147483647-1)
SIG_ATOMIC_MAX = (2147483647)
def INT8_C(c): return c
def INT16_C(c): return c
def INT32_C(c): return c
def INT64_C(c): return c ## L
def INT64_C(c): return c ## LL
def UINT8_C(c): return c
def UINT16_C(c): return c
def UINT32_C(c): return c ## U
def UINT64_C(c): return c ## UL
def UINT64_C(c): return c ## ULL
def INTMAX_C(c): return c ## L
def UINTMAX_C(c): return c ## UL
def INTMAX_C(c): return c ## LL
def UINTMAX_C(c): return c ## ULL
# Included from sys/socket.h
_SYS_SOCKET_H = 1
# Included from sys/uio.h
_SYS_UIO_H = 1
# Included from sys/types.h
_SYS_TYPES_H = 1
# Included from bits/types.h
_BITS_TYPES_H = 1
__S32_TYPE = int
__SWORD_TYPE = int
__SLONG32_TYPE = int
# Included from bits/typesizes.h
_BITS_TYPESIZES_H = 1
__PID_T_TYPE = __S32_TYPE
__FSWORD_T_TYPE = __SWORD_TYPE
__DADDR_T_TYPE = __S32_TYPE
__KEY_T_TYPE = __S32_TYPE
__CLOCKID_T_TYPE = __S32_TYPE
__SSIZE_T_TYPE = __SWORD_TYPE
__OFF_T_MATCHES_OFF64_T = 1
__INO_T_MATCHES_INO64_T = 1
__FD_SETSIZE = 1024
# Included from time.h
_TIME_H = 1
# Included from bits/time.h
_STRUCT_TIMEVAL = 1
_BITS_TIME_H = 1
CLOCKS_PER_SEC = 1000000l
CLOCK_REALTIME = 0
CLOCK_MONOTONIC = 1
CLOCK_PROCESS_CPUTIME_ID = 2
CLOCK_THREAD_CPUTIME_ID = 3
CLOCK_MONOTONIC_RAW = 4
CLOCK_REALTIME_COARSE = 5
CLOCK_MONOTONIC_COARSE = 6
CLOCK_BOOTTIME = 7
CLOCK_REALTIME_ALARM = 8
CLOCK_BOOTTIME_ALARM = 9
TIMER_ABSTIME = 1
# Included from bits/timex.h
_BITS_TIMEX_H = 1
ADJ_OFFSET = 0x0001
ADJ_FREQUENCY = 0x0002
ADJ_MAXERROR = 0x0004
ADJ_ESTERROR = 0x0008
ADJ_STATUS = 0x0010
ADJ_TIMECONST = 0x0020
ADJ_TAI = 0x0080
ADJ_MICRO = 0x1000
ADJ_NANO = 0x2000
ADJ_TICK = 0x4000
ADJ_OFFSET_SINGLESHOT = 0x8001
ADJ_OFFSET_SS_READ = 0xa001
MOD_OFFSET = ADJ_OFFSET
MOD_FREQUENCY = ADJ_FREQUENCY
MOD_MAXERROR = ADJ_MAXERROR
MOD_ESTERROR = ADJ_ESTERROR
MOD_STATUS = ADJ_STATUS
MOD_TIMECONST = ADJ_TIMECONST
MOD_CLKB = ADJ_TICK
MOD_CLKA = ADJ_OFFSET_SINGLESHOT
MOD_TAI = ADJ_TAI
MOD_MICRO = ADJ_MICRO
MOD_NANO = ADJ_NANO
STA_PLL = 0x0001
STA_PPSFREQ = 0x0002
STA_PPSTIME = 0x0004
STA_FLL = 0x0008
STA_INS = 0x0010
STA_DEL = 0x0020
STA_UNSYNC = 0x0040
STA_FREQHOLD = 0x0080
STA_PPSSIGNAL = 0x0100
STA_PPSJITTER = 0x0200
STA_PPSWANDER = 0x0400
STA_PPSERROR = 0x0800
STA_CLOCKERR = 0x1000
STA_NANO = 0x2000
STA_MODE = 0x4000
STA_CLK = 0x8000
STA_RONLY = (STA_PPSSIGNAL | STA_PPSJITTER | STA_PPSWANDER | \
STA_PPSERROR | STA_CLOCKERR | STA_NANO | STA_MODE | STA_CLK)
CLK_TCK = CLOCKS_PER_SEC
__clock_t_defined = 1
__time_t_defined = 1
__clockid_t_defined = 1
__timer_t_defined = 1
__timespec_defined = 1
TIME_UTC = 1
# Included from xlocale.h
_XLOCALE_H = 1
def __isleap(year): r
|
eturn \
__BIT_TYPES_DEFINED__ = 1
# Included from endian.h
_ENDIAN_H = 1
__LITTLE_ENDIAN = 1234
__BIG_ENDIAN = 4321
__PDP_ENDIAN = 3412
# Included from bits/endian.h
__BYTE_ORDER = __BIG_ENDIAN
__BYTE_ORDER = __LITTLE_EN
|
DIAN
__FLOAT_WORD_ORDER = __BYTE_ORDER
LITTLE_ENDIAN = __LITTLE_ENDIAN
BIG_ENDIAN = __BIG_ENDIAN
PDP_ENDIAN = __PDP_ENDIAN
BYTE_ORDER = __BYTE_ORDER
# Included from bits/byteswap.h
_BITS_BYTESWAP_H = 1
def __bswap_constant_16(x): return \
def __bswap_constant_32(x): return \
def __bswap_32(x): return \
def __bswap_constant_64(x): return \
def __bswap_64(x): return \
def __bswap_constant_64(x): return \
def htobe16(x): return __bswap_16 (x)
def htole16(x): return (x)
def be16toh(x): return __bswap_16 (x)
def le16toh(x): return (x)
def htobe32(x): return __bswap_32 (x)
def htole32(x): return (x)
def be32toh(x): return __bswap_32 (x)
def le32toh(x): return (x)
def htobe64(x): return __bswap_64 (x)
def htole64(x): return (x)
def be64toh(x): return __bswap_64 (x)
def le64toh(x): return (x)
def htobe16(x): return (x)
def htole16(x): return __bswap_16 (x)
def be16toh(x): ret
|
sergiopasra/megaradrp
|
megaradrp/processing/tests/test_fibermatch.py
|
Python
|
gpl-3.0
| 2,099
| 0.000953
|
import pytest
from megaradrp.processing.fibermatch import generate_box_model
from megaradrp.processing.fibermatch import count_peaks
PEAKS = [
3.806000000000000000e+03,
3.812000000000000000e+03,
3.818000000000000000e+03,
3.824000000000000000e+03,
3.830000000000000000e+03,
3.836000000000000000e+03,
3.842000000000000000e+03,
3.848000000000000000e+03,
3.854000000000000000e+03,
3.860000000000000000e+03,
3.867000000000000000e+03,
3.872000000000000000e+03,
3.878000000000000000e+03,
3.884000000000000000e+03,
3.890000000000000000e+03,
3.897000000000000000e+03,
3.903000000000000000e+03,
3.909000000000000000e+03,
3.915000000000000000e+03,
3.921000000000000000e+03
]
def test_generate_model():
expected = [
(1, 0),
(2, 0),
(3, 0),
(4, 0),
(5, 0)
]
model = generate_box_model(5, start=1)
assert len(model) == len(expected)
for m, e in zip(model, expected):
assert m == e
expected = [
(1, 0),
(2, 1),
(3, 0),
(4, 0),
(5, 0)
]
model = generate_box_model(5, missing_relids=[2])
assert len(model) == len(expected)
for m, e in zip(model, expected):
assert m == e
expected = [
|
(10, 0),
(12, 1),
(13, 0),
(14, 0),
(15, 0)
]
model = generate_box_model(5, start=10, skip_fibids=[11], missing_relids=[2])
assert len(model) == len(expected)
for m, e in zip(model, expected):
assert m == e
def test_count_peaks1():
with pytest.raises(ValueError):
count_peaks([])
def test_count_peaks():
expected =
|
[]
idx = 0
for p in PEAKS:
t = (idx + 1, p, 0, idx)
expected.append(t)
idx += 1
result = count_peaks(PEAKS, tol=1.2, distance=6.0)
assert result == expected
|
a67878813/script
|
handwrite.py
|
Python
|
apache-2.0
| 765
| 0.023112
|
from PIL import Image, ImageFont
from handright import Template, handwrite
text = "啊啊啊啊巴巴爸爸啛啛喳喳顶顶顶顶柔柔弱弱共和国刚刚\n\r 顶顶顶顶灌灌灌灌哈哈哈哈斤斤计较坎坎坷坷啦啦啦啦噢噢噢噢噗噗噗噗噗"
template = Template(
background=Image.new(mode="1", size=(3300, 1000), color=1),
font=ImageFont.truetype("C:\\font\\MiNiJianJiaShu-1.ttf", size=150),
word_spacing =
|
2, line_spacing_sigma = 1,
font
|
_size_sigma = 2,
word_spacing_sigma = 0.8,
perturb_x_sigma = 3,
perturb_y_sigma = 3,
perturb_theta_sigma = 0.1
)
images = handwrite(text, template)
for im in images:
assert isinstance(im, Image.Image)
im.save("C:\\font\\3.png")
#im.show()
|
alexmilowski/duckpond
|
duckpond/apps/service/app.py
|
Python
|
apache-2.0
| 1,161
| 0.023256
|
from flask import Flask, request, g, session, redirect, abort, Response
from datetime import datetime
from functools import wraps
import base64
app = Flask(__name__)
app.config.from_envvar('WEB_CONF')
def authenticate(f):
@wraps(f)
def wrapper(*args, **kwargs):
v = request.headers.get('authorization')
authenticated = False
if v is not None and v.find('Basic ')==0:
s = str(base64.b64decode(v[6:]),'utf-8')
username = s[0:s.find(':')]
password = s[s.find(':')+1:]
info = app.config['AUTH_SERVICE'].authenticate(username,password);
if info is not None:
request.roles = info.get('roles')
authenticated = True
if v is not None and v.fin
|
d('Bearer ')==0:
roles = app.config['AUTH_SERVICE'].validateToken(v[7:])
if roles is not None:
authenticated = True
request.roles = roles
if authenticated:
return f(*args, **kwargs)
else:
return
|
Response(status=401, headers={'WWW-Authenticate': 'Basic realm="service"'})
return wrapper
@app.before_request
@authenticate
def before_request():
pass
|
bitmazk/django-feedback-form
|
feedback_form/models.py
|
Python
|
mit
| 2,071
| 0
|
"""Models for the ``feedback_form`` app."""
from django.contrib.contenttypes.fields import GenericForeignKey
from django.contrib.contenttypes.models import ContentType
from django.db import models
from django.conf import settings
from django.utils.encoding import python_2_unicode_compatible
from django.utils.translation import ugettext_lazy as _
@python_2_unicode_compatible
class Feedback(models.Model):
"""
Holds information about one user feedback.
:user: User account of the poster, if logged in.
:email: Email field, if user isn't logged in and wants to send her email.
:current_url: URL of the current page.
:message: Feedback text.
:creation_date: Datetime of the feedback creation.
:content_object: Optional related object the feedback is referring to.
"""
user = models.ForeignKey(
settings.AUTH_USER_MODEL,
verbose_name=_('User'),
related_name='feedback_form_submissions',
blank=True, null=True,
)
email = models.EmailField(
verbose_name=_('Email'),
blank=True,
)
current_url = models.URLField(
verbose_name=_('Current URL'),
max_length=4000,
blank=True,
)
message = models.TextField(
verbose_name=_('Message'),
max_length=4000,
)
creation_date = models.DateTimeField(
auto_now_add=True,
verbose_name=_('Creation
|
Date'),
)
# Generic FK to the object this feedback is about
content_type = models.ForeignKey(
ContentType,
related_
|
name='feedback_content_objects',
null=True, blank=True,
)
object_id = models.PositiveIntegerField(null=True, blank=True)
content_object = GenericForeignKey('content_type', 'object_id')
class Meta:
ordering = ['-creation_date']
def __str__(self):
if self.user:
return '{0} - {1}'.format(self.creation_date, self.user)
elif self.email:
return '{0} - {1}'.format(self.creation_date, self.email)
return '{0}'.format(self.creation_date)
|
Russell-IO/ansible
|
lib/ansible/modules/network/netscaler/netscaler_gslb_service.py
|
Python
|
gpl-3.0
| 24,020
| 0.002748
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2017 Citrix Systems
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, div
|
ision, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: netscaler_gslb_service
short_description: Manage gslb service entities in Netscaler.
description:
-
|
Manage gslb service entities in Netscaler.
version_added: "2.4"
author: George Nikolopoulos (@giorgos-nikolopoulos)
options:
servicename:
description:
- >-
Name for the GSLB service. Must begin with an ASCII alphanumeric or underscore C(_) character, and
must contain only ASCII alphanumeric, underscore C(_), hash C(#), period C(.), space, colon C(:), at C(@),
equals C(=), and hyphen C(-) characters. Can be changed after the GSLB service is created.
- >-
- "Minimum length = 1"
cnameentry:
description:
- "Canonical name of the GSLB service. Used in CNAME-based GSLB."
- "Minimum length = 1"
servername:
description:
- "Name of the server hosting the GSLB service."
- "Minimum length = 1"
servicetype:
choices:
- 'HTTP'
- 'FTP'
- 'TCP'
- 'UDP'
- 'SSL'
- 'SSL_BRIDGE'
- 'SSL_TCP'
- 'NNTP'
- 'ANY'
- 'SIP_UDP'
- 'SIP_TCP'
- 'SIP_SSL'
- 'RADIUS'
- 'RDP'
- 'RTSP'
- 'MYSQL'
- 'MSSQL'
- 'ORACLE'
description:
- "Type of service to create."
port:
description:
- "Port on which the load balancing entity represented by this GSLB service listens."
- "Minimum value = 1"
- "Range 1 - 65535"
- "* in CLI is represented as 65535 in NITRO API"
publicip:
description:
- >-
The public IP address that a NAT device translates to the GSLB service's private IP address.
Optional.
publicport:
description:
- >-
The public port associated with the GSLB service's public IP address. The port is mapped to the
service's private port number. Applicable to the local GSLB service. Optional.
maxclient:
description:
- >-
The maximum number of open connections that the service can support at any given time. A GSLB service
whose connection count reaches the maximum is not considered when a GSLB decision is made, until the
connection count drops below the maximum.
- "Minimum value = C(0)"
- "Maximum value = C(4294967294)"
healthmonitor:
description:
- "Monitor the health of the GSLB service."
type: bool
sitename:
description:
- "Name of the GSLB site to which the service belongs."
- "Minimum length = 1"
cip:
choices:
- 'enabled'
- 'disabled'
description:
- >-
In the request that is forwarded to the GSLB service, insert a header that stores the client's IP
address. Client IP header insertion is used in connection-proxy based site persistence.
cipheader:
description:
- >-
Name for the HTTP header that stores the client's IP address. Used with the Client IP option. If
client IP header insertion is enabled on the service and a name is not specified for the header, the
NetScaler appliance uses the name specified by the cipHeader parameter in the set ns param command
or, in the GUI, the Client IP Header parameter in the Configure HTTP Parameters dialog box.
- "Minimum length = 1"
sitepersistence:
choices:
- 'ConnectionProxy'
- 'HTTPRedirect'
- 'NONE'
description:
- "Use cookie-based site persistence. Applicable only to C(HTTP) and C(SSL) GSLB services."
siteprefix:
description:
- >-
The site's prefix string. When the service is bound to a GSLB virtual server, a GSLB site domain is
generated internally for each bound service-domain pair by concatenating the site prefix of the
service and the name of the domain. If the special string NONE is specified, the site-prefix string
is unset. When implementing HTTP redirect site persistence, the NetScaler appliance redirects GSLB
requests to GSLB services by using their site domains.
clttimeout:
description:
- >-
Idle time, in seconds, after which a client connection is terminated. Applicable if connection proxy
based site persistence is used.
- "Minimum value = 0"
- "Maximum value = 31536000"
maxbandwidth:
description:
- >-
Integer specifying the maximum bandwidth allowed for the service. A GSLB service whose bandwidth
reaches the maximum is not considered when a GSLB decision is made, until its bandwidth consumption
drops below the maximum.
downstateflush:
choices:
- 'enabled'
- 'disabled'
description:
- >-
Flush all active transactions associated with the GSLB service when its state transitions from UP to
DOWN. Do not enable this option for services that must complete their transactions. Applicable if
connection proxy based site persistence is used.
maxaaausers:
description:
- >-
Maximum number of SSL VPN users that can be logged on concurrently to the VPN virtual server that is
represented by this GSLB service. A GSLB service whose user count reaches the maximum is not
considered when a GSLB decision is made, until the count drops below the maximum.
- "Minimum value = C(0)"
- "Maximum value = C(65535)"
monthreshold:
description:
- >-
Monitoring threshold value for the GSLB service. If the sum of the weights of the monitors that are
bound to this GSLB service and are in the UP state is not equal to or greater than this threshold
value, the service is marked as DOWN.
- "Minimum value = C(0)"
- "Maximum value = C(65535)"
hashid:
description:
- "Unique hash identifier for the GSLB service, used by hash based load balancing methods."
- "Minimum value = C(1)"
comment:
description:
- "Any comments that you might want to associate with the GSLB service."
appflowlog:
choices:
- 'enabled'
- 'disabled'
description:
- "Enable logging appflow flow information."
ipaddress:
description:
- >-
IP address for the GSLB service. Should represent a load balancing, content switching, or VPN virtual
server on the NetScaler appliance, or the IP address of another load balancing device.
monitor_bindings:
description:
- Bind monitors to this gslb service
suboptions:
weight:
description:
- Weight to assign to the monitor-service binding.
- A larger number specifies a greater weight.
- Contributes to the monitoring threshold, which determines the state of the service.
- Minimum value = C(1)
- Maximum value = C(100)
monitor_name:
|
iandees/all-the-places
|
locations/spiders/aunt_annes.py
|
Python
|
mit
| 10,953
| 0.006665
|
import scrapy
import xml.etree.ElementTree as ET
from locations.items import GeojsonPointItem
URL = 'http://hosted.where2getit.com/auntieannes/2014/ajax?&xml_request=%3Crequest%3E%3Cappkey%3E6B95F8A2-0C8A-11DF-A056-A52C2C77206B%3C%2Fappkey%3E%3Cformdata+id%3D%22locatorsearch%22%3E%3Cdataview%3Estore_default%3C%2Fdataview%3E%3Climit%3E250%3C%2Flimit%3E%3Cgeolocs%3E%3Cgeoloc%3E%3Caddressline%3E{}%3C%2Faddressline%3E%3Clongitude%3E%3C%2Flongitude%3E%3Clatitude%3E%3C%2Flatitude%3E%3Ccountry%3E{}%3C%2Fcountry%3E%3C%2Fgeoloc%3E%3C%2Fgeolocs%3E%3Cwhere%3E%3Cor%3E%3Chascatering%3E%3Ceq%3E%3C%2Feq%3E%3C%2Fhascatering%3E%3Chaspretzelfieldtrip%3E%3Ceq%3E%3C%2Feq%3E%3C%2Fhaspretzelfieldtrip%3E%3Cnewstores%3E%3Ceq%3E%3C%2Feq%3E%3C%2Fnewstores%3E%3C%2For%3E%3C%2Fwhere%3E%3Csearchradius%3E10%7C25%7C50%7C100%7C250%7C500%7C750%7C1000%3C%2Fsearchradius%3E%3Cstateonly%3E1%3C%2Fstateonly%3E%3C%2Fformdata%3E%3C%2Frequest%3E'
US_STATES = (
"AL", "AK", "AZ", "AR", "CA", "CO", "CT", "DC", "DE", "FL", "GA",
"HI", "ID", "IL", "IN", "IA", "KS", "KY", "LA", "ME", "MD",
"MA", "MI", "MN", "MS", "MO", "MT", "NE", "NV", "NH", "NJ",
"NM", "NY", "NC", "ND", "OH", "OK", "OR", "PA", "RI", "SC",
"SD", "TN", "TX", "UT", "VT", "VA", "WA", "WV", "WI", "WY",
)
UK_Cities = (
'London', 'Birmingham', 'Manchester', 'Glasgow', 'Leeds',
'Liverpool', 'Bristol', 'Newcastle', 'Sunderland', 'Wolverhampton',
'Nottingham', 'Sheffield', 'Belfast', 'Leicester', 'Bradford',
)
UAE_Cities = (
"Abu Dhabi", "Sharjah", "Dubai", "Dayrah","Al Ain",
"Fujairah", "Ras al-Khaimah", "Ar Ruways", "As Satwah",
"Al Khan",
)
TT_Cities = (
"Arima", "San Fernando", "Princes Town", "Piarco", "RioClaro", "Port of Spain",
"Victoria", "Maraval", "Fyzabad", "Debe", "Couva", "Diego Martin", "Chaguanas",
"Penal", "Cunupia", "Curepe", "Roxborough", "San Juan", "Arouca", "Saint Joseph",
"California", "Marabella", "Siparia", "Gasparillo", "Morvant", "Barataria", "Saint Clair",
"Laventille", "Carenage", "Ward of Tacarigua", "Caroni", "Lopinot", "Tunapuna", "Santa Cruz",
"Saint Augustine", "Golden Lane", "Scarborough", "Moriah", "Saint James", "Carapichaima",
"Valsayn", "Freeport", "Claxton Bay", "Sangre Grande", "Cumuto", "Woodbrook", "Petit Valley",
"El Dorado", "Phoenix Park",
)
Thailand_Cities = (
"Bangkok", "Chumphon", "Kathu", "Phang Khon", "Sakon Nakhon", "Mueang Nonthaburi",
"Kalasin", "Chon Buri", "Loei", "Khon Kaen", "Nong Bua Lamphu", "Roi Et", "Udon Thani",
"Kumphawapi", "Kanchanaburi", "Nong Khai", "Ayutthaya", "Chiang Mai", "Songkhla",
"Chiang Rai", "Surin", "Thanyaburi", "Wiphawadi", "Phuket", "Sing Buri", "Satun",
"Prachin Buri", "Ubon Ratchathani", "Pattaya", "Yala", "Bang Na", "Samut Songkhram", "Phetchabun"
"Ratchaburi", "Lampang", "Narathiwat", "New Sukhothai", "Lopburi", "Uttaradit", "Maha Sarakham",
"Mae Hong Son", "Suphan Buri", "Chachoengsao", "Samut Sakhon", "Phrae", "Din Daeng",
"Pathum Wan", "Phayao", "Trang", "Mukdahan", "Phetchaburi", "Uthai Thani", "Krabi", "Phichit",
"Phitsanulok", "Ban Pat Mon", "Prachuap Khiri Khan", "Ban Khlong Prasong", "Yasothon",
"Ranong", "Lamphun", "Nong Bua", "Amnat Charoen", "Ban Phichit", "Bang Khae", "Thon Buri",
"Min Buri", "Ban Tham", "Sam Sen", "Ang Thong", "Mueang Samut Prakan", "Sa Kaeo", "Pathum Thani",
"Chanthaburi", "Huai Khwang", "Rayong", "Sattahip", "Phan", "Si Racha", "Phatthalung",
"Rawai", "Buriram", "Dusit", "Khlong Luang", "Trat", "Ban Bueng", "Sung Noen", "Manorom",
"Ban Bang Plong", "Tak", "Ban Tha Duea", "Amphawa", "Ban Pong Lang", "Phaya Thai", "Si Sa Ket",
"Nakhon Ratchasima", "Bang Phlat", "Ban Bang Phli Nakhon", "Salaya", "Krathum Baen",
"Hua Hin", "Ban Talat Rangsit", "Ban Khlong Ngae", "Nong Prue", "Wang Thonglang",
"Samphanthawong", "Bang Khun Thian", "Chatuchak", "Chaiyaphum",
"Nakhon Pathom", "Nan", "Bang Kruai", "Sathon", "Suan Luang", "Ban Wang Yai"
"Khlong San", "Watthana", "Lat Krabang", "Muak Lek", "Kosum Phisai", "Ban Phlam", "Non Thai",
"Photharam", "Thalang", "Bang Kapi", "Long", "Ka Bang", "Pattani", "Nakhon Si Thammarat",
"Khlong Toei", "Cha-am", "Amphoe Aranyaprathet", "Phang Nga", "Ban Tha Ruea", "Chiang Muan",
"Ban Ang Thong", "Ban Khlong Takhian", "Khan Na Yao", "Bang Sue", "Sam Khok", "Don Mueang",
"Ban Pratunam Tha Khai","Sena", "Prakanong", "Ban Tha Pai", "Bang Lamung", "Nakhon Sawan",
"San Sai", "Kamphaeng Phet", "Pak Kret", "Hat Yai", "Ban Nam Hak", "Khlung", "Makkasan",
"Bang Sao Thong", "Ban Hua Thale", "Klaeng", "Chulabhorn", "Ban Don Sak", "Phanna Nikhom",
"Ban Na", "Ban Ko Pao","Mae Sot"
)
Korea_Cities = (
"Seoul", "Incheon", "Paju", "Cheonan", "Yongin", "Kwanghui-dong", "Pon-dong",
"Gwangju", "Gwangmyeong", "Tang-ni", "Busan", "Seongnam-si", "Suwon-si", "Namyang",
"Namyangju", "Jeju-si", "Ulsan", "Osan", "Hanam", "Pyong-gol", "Anyang-si",
"Yangsan", "Daejeon", "Nonsan", "Seocho", "Wonju", "Kisa", "Daegu", "Ansan-si", "Gongju",
"Haeundae", "Sasang", "Bucheon-si", "Chuncheon", "Ilsan-dong", "Naju", "Jinju", "Uiwang",
"Gangneung", "Yongsan-dong", "Pohang", "Changwon", "Jeonju", "Yeosu",
"Songnim", "Gimhae", "Songjeong", "Hyoja-dong", "Icheon-si", "Kimso", "Iksan", "Deokjin",
"Koyang-dong", "Samsung", "Anseong", "Samjung-ni", "Mapo-dong", "Gunnae", "Nae-ri",
"Suncheon", "Okpo-dong", "Moppo", "Sangdo-dong", "Cheongju-si", "Ch'aeun",
"Taebuk", "Yeoju", "Seong-dong", "Duchon", "Gyeongju", "Andong", "Seosan City", "Asan",
"Miryang", "Wonmi-gu", "Janghowon", "Chungnim", "Songam", "Tongan", "Ap'o", "Jecheon",
"Se-ri", "Ka-ri", "Hansol", "Songang", "Hyangyang", "Gyeongsan-si", "Gumi", "Unpo",
"Ulchin", "Namhyang-dong", "T'aebaek", "Hadong", "Haesan", "Chungju", "Chilgok",
)
Singapore_Cities = (
"Singapore", "Yishun New Town", "Bedok New Town", "Ayer Raja New Town",
"Kalang", "Tampines New Town", "Ang Mo Kio New Town", "Kampong Pasir Ris", "Hougang",
"Yew Tee", "Choa Chu Kang New Town", "Punggol", "Changi Village", "Bukit Timah Estate",
"Serangoon", "Jurong Town", "Tanglin Halt", "Woodlands New Town", "Jurong East New Town",
"Bukit Panjang New Town", "Bukit Batok New Town", "Pasir Panjang", "Holland Village",
"Tai Seng", "Toa Payoh New Town", "Bukit Timah", "Jurong West New Town", "Kembangan",
"Queenstown Estate", "Boon Lay", "Simei New Town", "Pandan Valley", "Clementi New Town",
"Tanjong Pagar"
)
Saudi_Arabia_Cities = (
"Riyadh", "Dammam", "Safwa", "Al Qatif", "Dhahran", "Al Faruq", "Khobar", "Jubail",
"Sayhat", "Jeddah", "Ta'if", "Mecca", "Al Hufuf", "Medina", "Rahimah", "Rabigh",
"Yanbu` al Bahr", "Abqaiq", "Mina", "Ramdah", "Lina
|
h", "Abha", "Jizan", "Al Yamamah",
"Tabuk", "Sambah", "Ras Tanura", "At Tuwal", "Sabya", "Buraidah", "Najran", "Sakaka",
"Madinat Yanbu` as Sina`iyah", "Hayil", "Khulays", "Khamis Mushait", "Ra's
|
al Khafji",
"Al Bahah", "Rahman", "Jazirah", "Jazirah"
)
Indonesia_Cities = (
"Jakarta", "Surabaya", "Medan", "Bandung", "Bekasi", "Palembang", "Tangerang", "Makassar",
"Semarang", "South Tangerang",
)
Malaysia_Cities = (
"Kaula Lumpur", "Kota Bharu", "Klang", "Johor Bahru", "Subang Jaya", "Ipoh", "Kuching", "Seremban",
"Petaling Jaya", "Shah Alam", 'Penang', 'Kelantan', "Pantai", "Petaling Jaya", "Kajang",
"Setapak", "Bukit Kayu Hitam", "Bayan Lepas", "Taiping", "Kuala Terengganu", "Kuantan",
"Alor Gajah",
)
Japan_Cities = (
'Tokyo', "Hiroshima", "Saitama", "Nihon'odori", "Ibaraki", "Urayasu",
"Suita", "Funabashi", "Nagareyama", "Ichikawa", "Isesaki", "Koga", "Ichihara",
"Koshigaya", "Shibukawa", "Aoicho", "Yamakita", "Gotemba", "Nisshin", "Nishinomiya",
"Den'en-chofu", "Kawasaki", "Toyama-shi", "Moriguchi", "Chita", "Sano", "Nago
|
mscuthbert/abjad
|
abjad/tools/selectortools/CountsSelectorCallback.py
|
Python
|
gpl-3.0
| 3,980
| 0.001508
|
# -*- encoding: utf-8 -*-
from abjad.tools import sequencetools
from abjad.tools import datastructuretools
from abjad.tools import selectiontools
from abjad.tools.abctools import AbjadValueObject
class CountsSelectorCallback(AbjadValueObject):
r'''A counts selector callback.
::
>>> callback = selectortools.CountsSelectorCallback([3])
>>> print(format(callback))
selectortools.CountsSelectorCallback(
counts=datastructuretools.CyclicTuple(
[3]
),
cyclic=True,
fuse_overhang=False,
nonempty=False,
overhang=True,
rotate=True,
)
'''
### CLASS VARIABLES ###
__slots__ = (
'_counts',
'_cyclic',
'_fuse_overhang',
'_overhang',
'_rotate',
'_nonempty',
)
### INITIALIZER ###
def __init__(
self,
counts=(3,),
cyclic=True,
fuse_overhang=False,
nonempty=False,
overhang=True,
rotate=True,
):
counts = datastructuretools.CyclicTuple(int(_) for _ in counts)
self._counts = counts
self._cyclic = bool(cyclic)
self._fuse_overhang = bool(fuse_overhang)
self._overhang = bool(overhang)
self._rotate = bool(rotate)
self._nonempty = bool(nonempty)
### SPECIAL METHODS ###
def __call__(self, expr, rotation=None):
r'''Iterates tuple `expr`.
Returns tuple in which each item is a selection or component.
'''
assert isinstance(expr, tuple), repr(tuple)
if rotation is None:
rotation = 0
rotation = int(rotation)
result = []
counts = self.counts
if self.rotate:
counts = sequencetools.rotate_sequence(counts, -rotation)
for subexpr in expr:
groups = sequencetools.partition_sequence_by_counts(
subexpr,
[abs(_) for _ in counts],
cyclic=self.cyclic,
overhang=self.overhang,
)
if self.overhang and self.fuse_overhang and 1 < len(groups):
last_count = counts[(len(groups) - 1) % len(counts)]
if len(groups[-1]) != last_count:
last_group = groups.pop()
groups[-1] += last_group
subresult = []
for i, group in enumerate(groups):
count = counts[i]
if count < 0:
continue
items = selectiontools.Selection(group)
subresult.append(items)
if self.nonempty and not subresult:
group = selectiontools.Selection(groups[0])
subresult.append(group)
result.extend(subresult)
if self.rotate:
counts = sequencetools.rotate_sequence(counts, -1)
return tuple(result)
### PUBLIC PROPERTIES ###
@property
def counts(self):
r'''Gets counts selector callback counts.
Returns tuple.
'''
return self._counts
@property
def cyclic(self):
r'''Gets counts selector callback cyclicity.
Returns boolean.
'''
return self._cyclic
@property
|
def fuse_overhang(self):
r'''Gets counts selector callback fuse overhang flag.
Returns ordinal constant.
'''
|
return self._fuse_overhang
@property
def nonempty(self):
r'''Gets counts selector callback nonempty flag.
Returns boolean.
'''
return self._nonempty
@property
def overhang(self):
r'''Gets counts selector callback overhang flag.
Returns boolean.
'''
return self._overhang
@property
def rotate(self):
r'''Gets counts selector callback rotate flag.
Returns boolean.
'''
return self._rotate
|
quodlibet/mutagen
|
tests/test_dsdiff.py
|
Python
|
gpl-2.0
| 2,668
| 0
|
import os
from mutagen.dsdiff import DSDIFF, IffError
from tests import TestCase, DATA_DIR, get_temp_copy
class TDSDIFF(TestCase):
silence_1 = os.path.join(DATA_DIR, '28
|
22400-1ch-0s-silence.dff')
silence_2 = os.path.join(DATA_DIR, '5644800-2ch-s01-silence.dff')
silence_dst = os.path.join(DATA_DIR, '5644800-2ch-s01-silence-dst.dff')
def setUp(self):
self.dff_1 =
|
DSDIFF(self.silence_1)
self.dff_2 = DSDIFF(self.silence_2)
self.dff_dst = DSDIFF(self.silence_dst)
self.dff_id3 = DSDIFF(get_temp_copy(self.silence_dst))
self.dff_no_id3 = DSDIFF(get_temp_copy(self.silence_2))
def test_channels(self):
self.failUnlessEqual(self.dff_1.info.channels, 1)
self.failUnlessEqual(self.dff_2.info.channels, 2)
self.failUnlessEqual(self.dff_dst.info.channels, 2)
def test_length(self):
self.failUnlessEqual(self.dff_1.info.length, 0)
self.failUnlessEqual(self.dff_2.info.length, 0.01)
self.failUnlessEqual(self.dff_dst.info.length, 0)
def test_sampling_frequency(self):
self.failUnlessEqual(self.dff_1.info.sample_rate, 2822400)
self.failUnlessEqual(self.dff_2.info.sample_rate, 5644800)
self.failUnlessEqual(self.dff_dst.info.sample_rate, 5644800)
def test_bits_per_sample(self):
self.failUnlessEqual(self.dff_1.info.bits_per_sample, 1)
def test_bitrate(self):
self.failUnlessEqual(self.dff_1.info.bitrate, 2822400)
self.failUnlessEqual(self.dff_2.info.bitrate, 11289600)
self.failUnlessEqual(self.dff_dst.info.bitrate, 0)
def test_notdsf(self):
self.failUnlessRaises(IffError, DSDIFF, os.path.join(
DATA_DIR, '2822400-1ch-0s-silence.dsf'))
def test_pprint(self):
self.failUnless(self.dff_1.pprint())
def test_mime(self):
self.failUnless("audio/x-dff" in self.dff_1.mime)
def test_update_tags(self):
from mutagen.id3 import TIT1
tags = self.dff_id3.tags
tags.add(TIT1(encoding=3, text="foobar"))
tags.save()
new = DSDIFF(self.dff_id3.filename)
self.failUnlessEqual(new["TIT1"], ["foobar"])
def test_delete_tags(self):
self.dff_id3.tags.delete()
new = DSDIFF(self.dff_id3.filename)
self.failUnlessEqual(new.tags, None)
def test_save_tags(self):
from mutagen.id3 import TIT1
self.dff_no_id3.add_tags()
tags = self.dff_no_id3.tags
tags.add(TIT1(encoding=3, text="foobar"))
tags.save(self.dff_no_id3.filename)
new = DSDIFF(self.dff_no_id3.filename)
self.failUnlessEqual(new["TIT1"], ["foobar"])
|
babraham123/mysite
|
blogs/views.py
|
Python
|
mit
| 842
| 0.004751
|
from django.shortcuts imp
|
ort render, render_to_response
from django.core.paginator import Paginator, InvalidPage, EmptyPage
from django.core.urlresolvers import rev
|
erse
from blogs.models import Post
from django.http import Http404
def postlist(request):
posts = Post.objects.all().order_by("-created")
paginator = Paginator(posts, 2)
try:
page = int(request.GET.get("page", '1'))
except ValueError:
page = 1
try:
posts = paginator.page(page)
except (InvalidPage, EmptyPage):
posts = paginator.page(paginator.num_pages)
return render_to_response("postlist.html", {'posts':posts})
def postpage(request, post_id):
try:
post = Post.objects.get(pk=post_id)
except Post.DoesNotExist:
raise Http404
return render(request, 'postpage.html', {'post': post})
|
nabin-info/hackerrank.com
|
python-division.py
|
Python
|
mit
| 132
| 0.015152
|
#!/usr/bin/python
im
|
port sys
a = int(raw_input().strip())
b = int(raw_input().strip())
print (a / b)
print (
|
float(a) / float(b))
|
NiLuJe/calibre-kobo-driver
|
tests/test_common.py
|
Python
|
gpl-3.0
| 4,914
| 0.001231
|
# vim: fileencoding=UTF-8:expandtab:autoindent:ts=4:sw=4:sts=4
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
# To import from calibre, some things need to be added to `sys` first. Do not import
# anything from calibre or the plugins yet.
import glob
import os
import sys
import unittest
test_dir = os.path.dirname(os.path.abspath(__file__))
src_dir = os.path.dirname(test_dir)
test_libdir = os.path.join(
src_dir, "pylib", "python{major}".format(major=sys.version_info.major)
)
sys.path += glob.glob(os.path.join(test_libdir, "*.zip"))
try:
from unittest import mock
except ImportError:
# Python 2
import mock
from calibre_plugins.kobotouch_extended import common
from polyglot.builtins import unicode_type
LANGUAGES = ("en_CA", "fr_CA", "fr_FR", "de_DE", "ar_EG", "ru_RU")
TEST_STRINGS = [
{
"encodings": {"UTF-8", "CP1252"},
"test_strings": [
unicode_type(s) for s in ["Hello, World!", "J'ai
|
trouvé mon livre préféré"]
],
},
{
"encodings": {"UTF-8", "CP1256"},
"test_strings": [unicode_type(s) for s in ["مرحبا بالعالم"]],
},
{
"encodings": {"UTF-8", "CP1251"},
"test_strings": [unicode_type(s) for s in ["Привет мир"]],
},
{
"encodings": {"UTF-8", "CP932"},
"test_strings": [unicode_type(s) for s in ["こんにちは世界"]],
},
]
TEST_TIME = "2020-04-01 01:02:03"
def gen_lang_code():
enc
|
odings = set()
for o in TEST_STRINGS:
encodings |= o["encodings"]
for enc in encodings:
yield enc
class TestCommon(unittest.TestCase):
orig_lang = "" # type: str
def setUp(self): # type: () -> None
self.orig_lang = os.environ.get("LANG", None)
def tearDown(self): # type: () -> None
if not self.orig_lang:
if "LANG" in os.environ:
del os.environ["LANG"]
else:
os.environ["LANG"] = self.orig_lang
self.orig_lang = ""
def test_logger_log_level(self): # type: () -> None
for envvar in ("CALIBRE_DEVELOP_FROM", "CALIBRE_DEBUG"):
if envvar in os.environ:
del os.environ[envvar]
logger = common.Logger()
self.assertEqual(logger.log_level, "INFO")
os.environ["CALIBRE_DEVELOP_FROM"] = "true"
logger = common.Logger()
self.assertEqual(logger.log_level, "DEBUG")
del os.environ["CALIBRE_DEVELOP_FROM"]
os.environ["CALIBRE_DEBUG"] = "1"
logger = common.Logger()
self.assertEqual(logger.log_level, "DEBUG")
del os.environ["CALIBRE_DEBUG"]
def _run_logger_unicode_test(self, as_bytes): # type: (bool) -> None
for o in TEST_STRINGS:
for enc in o["encodings"]:
with mock.patch(
"calibre_plugins.kobotouch_extended.common.preferred_encoding", enc
), mock.patch(
"calibre_plugins.kobotouch_extended.common.time.strftime",
mock.MagicMock(return_value=TEST_TIME),
):
logger = common.Logger()
for msg in o["test_strings"]:
test_tagged = logger._tag_args("DEBUG", msg)
self.assertListEqual(
test_tagged,
[
"{timestr} [{level}] {msg}".format(
timestr=TEST_TIME, level="DEBUG", msg=msg
),
],
)
def test_logger_ensure_unicode_from_bytes(self): # type: () -> None
self._run_logger_unicode_test(True)
self._run_logger_unicode_test(False)
@mock.patch(
"calibre_plugins.kobotouch_extended.common.Logger.print_formatted_log",
mock.MagicMock(),
)
@mock.patch(
"calibre_plugins.kobotouch_extended.common.Logger._prints", mock.MagicMock(),
)
@mock.patch(
"calibre_plugins.kobotouch_extended.common.Logger._tag_args",
mock.MagicMock(return_value="Goodbye, World"),
)
def test_logger_logs(self):
logger = common.Logger()
logger.debug("Hello, World")
logger.print_formatted_log.assert_called_with("DEBUG", "Hello, World")
logger("Hello, World")
logger.print_formatted_log.assert_called_with("INFO", "Hello, World")
logger.print_formatted_log.reset_mock()
logger._prints.reset_mock()
logger._tag_args.reset_mock()
logger.exception("Oh noes!")
logger._tag_args.assert_called_with("ERROR", "Oh noes!")
self.assertEqual(logger._prints.call_count, 2)
if __name__ == "__main__":
unittest.main(module="test_common", verbosity=2)
|
shrimo/node_image_tools
|
node_graph.py
|
Python
|
gpl-3.0
| 3,999
| 0.021505
|
# Visualize node graph
# Copyright 2013 Victor Lavrentev
import matplotlib.pyplot as plt
import json, sys
import networkx as nx
from node_lib import graph
print '\nNode image tools (Visualize node graph) v01a\n'
try:
file_node=sys.argv[1]
except:
print '->Error. No script'
sys.exit (0)
with open(file_node) as jdf:
data_io = json.load(jdf)
# Bringing format node to object type and sort by ID
sorted_names=sorted(data_io, key=lambda x : data_io[x]['id'])
GNode = graph
Node_graph=nx.DiGraph()
# Creating a graph
for _name in sorted_names:
node = GNode(data_io[_name])
if (node.type=='read'):
Node_graph.add_node(node.name)
if (node.type=='cc'):
Node_graph.add_node(node.name)
Node_graph.add_edge(node.link,node.name)
if (node.type=='size'):
Node_graph.add_node(node.name)
Node_graph.add_edge(node.link,node.name)
if (node.type=='rotate'):
Node_graph.add_node(node.name)
Node_graph.add_edge(node.link,node.name)
if (node.type=='gradient'):
Node_graph.add_nod
|
e(node.name)
if (node.type=='composite'):
Node_graph.add_node(node.name)
Node_graph.add_edge(node.link_a,node.name
|
)
Node_graph.add_edge(node.link_b,node.name)
if (node.job=='mask'):
Node_graph.add_edge(node.mask,node.name)
if (node.type=='blur'):
Node_graph.add_node(node.name)
Node_graph.add_edge(node.link,node.name)
if (node.type=='sharpen'):
Node_graph.add_node(node.name)
Node_graph.add_edge(node.link,node.name)
if (node.type=='view'):
Node_graph.add_node(node.name)
Node_graph.add_edge(node.link,node.name)
if (node.type=='write'):
Node_graph.add_node(node.name)
Node_graph.add_edge(node.link,node.name)
if (node.type=='invert'):
Node_graph.add_node(node.name)
Node_graph.add_edge(node.link,node.name)
pos=nx.spring_layout(Node_graph,iterations=30,weight=50,scale=1)
# Draw graph
for _graph in sorted_names:
graph_ = GNode(data_io[_graph])
if (graph_.type=='write' or graph_.type=='read'):
nx.draw_networkx_nodes(Node_graph,pos,node_size=2000,alpha=0.5,
node_shape='s',node_color='r',nodelist=[graph_.name])
if (graph_.type=='blur' or graph_.type=='sharpen'):
nx.draw_networkx_nodes(Node_graph,pos,node_size=2000,alpha=0.5,
node_color='g',nodelist=[graph_.name])
if (graph_.type=='gradient'):
nx.draw_networkx_nodes(Node_graph,pos,node_size=2000,alpha=0.25,
node_shape='s',node_color='r',nodelist=[graph_.name])
if (graph_.type=='composite'):
nx.draw_networkx_nodes(Node_graph,pos,node_size=5000,alpha=0.5,
node_color='y',nodelist=[graph_.name])
if (graph_.type=='cc' or graph_.type=='invert'):
nx.draw_networkx_nodes(Node_graph,pos,node_size=2000,alpha=0.5,
node_color='b',nodelist=[graph_.name])
if (graph_.type=='rotate' or graph_.type=='size'):
nx.draw_networkx_nodes(Node_graph,pos,node_size=2000,alpha=0.3,
node_color='w',nodelist=[graph_.name])
if (graph_.type=='view'):
nx.draw_networkx_nodes(Node_graph,pos,node_size=2000,alpha=0.25,
node_shape='^',node_color='r',nodelist=[graph_.name])
nx.draw_networkx_labels(Node_graph,pos,
font_size=10,font_family='sans-serif')
nx.draw_networkx_edges(Node_graph,
pos,width=2,
alpha=0.5,
edge_color='g',
style='solid',
arrows=True,
)
#plt.savefig("node_graph3.png")
plt.show()
print '\nVisualize graph:',file_node,'completed'
|
newbee-7/News_Crawl
|
crawl.py
|
Python
|
mit
| 1,994
| 0.008024
|
# coding:utf-8
import url_manager, html_downloader, html_parser, html_outputer
import traceback
import iMessage
class Crawl(object):
def __init__(self):
self.urls = url_manager.UrlManager()
self.downloader = html_downloader.HtmlDownloader()
self.parser = html_parser.HtmlParser()
self.outputer = html_outputer.HtmlOutputer()
def craw(self, root_urls):
count = 1
for root_url in root_urls:
self.urls.add_new_url(root_url)
while self.urls.has_new_url():
try:
new_url = self.urls.get_new_url()
print 'craw %d : %s' % (count, new_url)
html_cont = self.downloader.download(new_url)
new_urls, new_data = self.parser.parse(new_url, html_cont)
self.urls.add_new_urls(new_urls)
self.outputer.collect_data(new_data)
count = count + 1
except Exception,e:
print 'craw failed!'
#print 'str(Exception):\t', str(Exception)
#print 'str(e):\t\t', str(e)
#print 'repr(e):\t', repr(e)
#print 'e.message:\t', e.message
#print 'traceback.print_exc():'; traceback.print_exc()
#print 'traceback.f
|
ormat_exc():\n%s' % traceback.format_exc()
datass = self.outputer.output_html()
News = ''
for datas in datas
|
s:
for data in datas:
News += datas[data]+'\n'
#print News
if News != '':
iMessage.send_Message(News, 'CQUT_News')
if __name__=="__main__":
root_urls = ["http://cs.cqut.edu.cn/Notice/NoticeStudentMore.aspx", "http://cs.cqut.edu.cn/Notice/NoticeMore.aspx?NtcCategoryID=5", "http://cs.cqut.edu.cn/News/NewsMore.aspx", "http://cs.cqut.edu.cn/Notice/NoticeEmpMore.aspx"]
obj_News_crawl = Crawl()
obj_News_crawl.craw(root_urls)
|
nacl-webkit/chrome_deps
|
tools/telemetry/telemetry/tab_test_case.py
|
Python
|
bsd-3-clause
| 1,243
| 0.011263
|
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
from telemetry import browser_finder
from telemetry import options_for_unittests
class TabTestCase(unittest.TestCase):
def __init__(self, *args):
self._extra_browser_args = []
super(TabTestCase, self).__init__(*args)
def setUp(self):
self._browser = None
self._tab = None
options = options_for_unittests.GetCopy()
self.CustomizeBrowserOptions(options)
if self._extra_browser_args:
for arg in self._extra_browser_args:
options.extra_browser_args.append(arg)
browser_to_create = browser_f
|
inder.FindBrowser(options)
if not browser_to_create:
raise Exception('No b
|
rowser found, cannot continue test.')
try:
self._browser = browser_to_create.Create()
self._tab = self._browser.tabs[0]
except:
self.tearDown()
raise
def tearDown(self):
if self._tab:
self._tab.Disconnect()
if self._browser:
self._browser.Close()
def CustomizeBrowserOptions(self, options):
"""Override to add test-specific options to the BrowserOptions object"""
pass
|
jgmize/kuma
|
kuma/wiki/models.py
|
Python
|
mpl-2.0
| 73,565
| 0.00015
|
import hashlib
import json
import sys
import traceback
from datetime import datetime, timedelta
from functools import wraps
from uuid import uuid4
import newrelic.agent
import waffle
from constance import config
from django.apps import apps
from django.conf import settings
from django.core.exceptions import ValidationError
from django.db import models, transaction
from django.db.models import signals
from django.utils.decorators import available_attrs
from django.utils.functional import cached_property
from django.utils.translation import ugettext, ugettext_lazy as _
from pyquery import PyQuery
from taggit.managers import TaggableManager
from taggit.models import ItemBase, TagBase
from taggit.utils import edit_string_for_tags, parse_tags
from tidings.models import NotificationsMixin
from kuma.core.cache import memcache
from kuma.core.exceptions import ProgrammingError
from kuma.core.i18n import get_language_mapping
from kuma.core.urlresolvers import reverse
from kuma.search.decorators import register_live_index
from kuma.spam.models import AkismetSubmission, SpamAttempt
from . import kumascript
from .constants import (DEKI_FILE_URL, DOCUMENT_LAST_MODIFIED_CACHE_KEY_TMPL,
KUMA_FILE_URL, REDIRECT_CONTENT, REDIRECT_HTML,
TEMPLATE_TITLE_PREFIX)
from .content import parse as parse_content
from .content import (Extractor, H2TOCFilter, H3TOCFilter, SectionTOCFilter,
get_content_sections, get_seo_description)
from .exceptions import (DocumentRenderedContentNotAvailable,
DocumentRenderingInProgress, PageMoveError,
SlugCollision, UniqueCollision)
from .jobs import DocumentContributorsJob, DocumentZoneStackJob
from .managers import (DeletedDocumentManager, DocumentAdminManager,
DocumentManager, RevisionIPManager,
TaggedDocumentManager, TransformManager)
from .signals import render_done
from .templatetags.jinja_helpers import absolutify
from .utils import tidy_content
def cache_with_field(field_name):
"""Decorator for generated content methods.
If the backing model field is null, or kwarg force_fresh is True, call the
decorated method to generate and return the content.
Otherwise, just return the value in the backing model field.
"""
def decorator(fn):
@wraps(fn, assigned=available_attrs(fn))
def wrapper(self, *args, **kwargs):
force_fresh = kwargs.pop('force_fresh', False)
# Try getting the value using the DB field.
field_val = getattr(self, field_name)
if field_val is not None and not force_fresh:
return field_val
# DB field is blank, or we're forced to generate it fresh.
field_val = fn(self, force_fresh=force_fresh)
setattr(self, field_name, field_val)
return field_val
return wrapper
return decorator
def _inherited(parent_attr, direct_attr):
"""Return a descriptor delegating to an attr of the original document.
If `self` is a translation, the descriptor delegates to the attribute
`parent_attr` from the original document. Otherwise, it delegates to the
attribute `direct_attr` from `self`.
Use this only on a reference to another object, like a ManyToMany or a
ForeignKey. Using it on a normal field won't work well, as it'll preclude
the use of that field in QuerySet field lookups. Also, ModelForms that are
passed instance=this_obj won't see the inherited value.
"""
getter = lambda self: (getattr(self.parent, parent_attr)
if self.parent and self.parent.id != self.id
else getattr(self, direct_attr))
setter = lambda self, val: (setattr(self.parent, parent_attr, val)
if self.parent and self.parent.id != self.id
else setattr(self, direct_attr, val))
return property(getter, setter)
def valid_slug_parent(slug, locale):
slug_bits = slug.split('/')
slug_bits.pop()
parent = None
if slug_bits:
parent_slug =
|
'/
|
'.join(slug_bits)
try:
parent = Document.objects.get(locale=locale, slug=parent_slug)
except Document.DoesNotExist:
raise Exception(
ugettext('Parent %s does not exist.' % (
'%s/%s' % (locale, parent_slug))))
return parent
class DocumentTag(TagBase):
"""A tag indexing a document"""
class Meta:
verbose_name = _('Document Tag')
verbose_name_plural = _('Document Tags')
def tags_for(cls, model, instance=None, **extra_filters):
"""
Sadly copied from taggit to work around the issue of not being
able to use the TaggedItemBase class that has tag field already
defined.
"""
kwargs = extra_filters or {}
if instance is not None:
kwargs.update({
'%s__content_object' % cls.tag_relname(): instance
})
return cls.tag_model().objects.filter(**kwargs)
kwargs.update({
'%s__content_object__isnull' % cls.tag_relname(): False
})
return cls.tag_model().objects.filter(**kwargs).distinct()
class TaggedDocument(ItemBase):
"""Through model, for tags on Documents"""
content_object = models.ForeignKey('Document')
tag = models.ForeignKey(DocumentTag, related_name="%(app_label)s_%(class)s_items")
objects = TaggedDocumentManager()
@classmethod
def tags_for(cls, *args, **kwargs):
return tags_for(cls, *args, **kwargs)
class DocumentAttachment(models.Model):
"""
Intermediary between Documents and Attachments. Allows storing the
user who attached a file to a document, and a (unique for that
document) name for referring to the file from the document.
"""
file = models.ForeignKey(
'attachments.Attachment',
related_name='document_attachments',
)
document = models.ForeignKey(
'wiki.Document',
related_name='attached_files',
)
attached_by = models.ForeignKey(settings.AUTH_USER_MODEL, null=True)
name = models.TextField()
# whether or not this attachment was uploaded for the document
is_original = models.BooleanField(
verbose_name=_('uploaded to the document'),
default=False,
)
# whether or not this attachment is linked in the document's content
is_linked = models.BooleanField(
verbose_name=_('linked in the document content'),
default=False,
)
class Meta:
db_table = 'attachments_documentattachment'
def __unicode__(self):
return u'"%s" for document "%s"' % (self.file, self.document)
def clean(self):
if self.pk and (self.document.files.through.objects.exclude(pk=self.pk)
.exists()):
raise ValidationError(
_("Attachment %(attachment_id)s can't be attached "
"multiple times to document %(document_id)s") %
{'attachment_id': self.pk, 'document_id': self.document.pk}
)
@register_live_index
class Document(NotificationsMixin, models.Model):
"""A localized knowledgebase document, not revision-specific."""
TOC_FILTERS = {
1: SectionTOCFilter,
2: H2TOCFilter,
3: H3TOCFilter,
4: SectionTOCFilter
}
title = models.CharField(max_length=255, db_index=True)
slug = models.CharField(max_length=255, db_index=True)
# NOTE: Documents are indexed by tags, but tags are edited in Revisions.
# Also, using a custom through table to isolate Document tags from those
# used in other models and apps. (Works better than namespaces, for
# completion and such.)
tags = TaggableManager(through=TaggedDocument)
# Is this document a template or not?
is_template = models.BooleanField(default=False, editable=False,
db_index=True)
# Is this a redirect or not?
is_redirect = models.BooleanField(default=False, editable=False,
|
lemonad/behorighet
|
behorighet/main/views.py
|
Python
|
bsd-3-clause
| 532
| 0
|
# -*- coding: utf-8 -*-
from django.shortcuts import render
from units.models import Unit
def startpage(request):
"""Start page.
Shows list of units availab
|
le for statistics
|
/filtering.
"""
units = Unit.objects.all()
# t = loader.get_template('startpage.html')
# c = RequestContext(request, {
# 'units': units,
# })
# return HttpResponse(t.render(c)).
return render(request,
'startpage.html',
{'units': units, })
|
bcraenen/KFClassifier
|
other/methods/ExtraTreesSample.py
|
Python
|
gpl-3.0
| 3,466
| 0.018465
|
#!/usr/bin/env python
import arff
import numpy as np
import sys
from sklearn import preprocessing
#from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.feature_selection import RFE
from sklearn.pipeline import Pipeline
from sklearn.grid_search import GridSearchCV
from sklearn.cross_validation import StratifiedKFold
from sklearn import cross_validation
from sklearn import metrics
#Parameters:
filename = sys.argv[1]
n_estimators = [100,200,300,500,1000,3000]
#n_estimators = [1000]
n_featuresToSelect = [2,3,5,10,20]
#n_featuresToSelect = [20]
maxDepth = [2,3,4,5,10]
#maxDepth = [2]
verboseLevel=10
n_jobs=10
n_crossValidation=10
n_accuracy=10
#n_accuracy=2
n_folds=10
# 10% removal of features
stepSize=0.1
print "Load dataset"
# Load dataset
arffDecoder = arff.ArffDecoder()
dataset = arffDecoder.decode(open(filename, 'rb'), encode_nominal=True)
print "Preprocess dataset"
# Get categorical features
categoricals = []
# NOTE: skip last (class) 'feature'
for feature in range(0,len(dataset['attributes'])-1):
if isinstance(dataset['attributes'][feature][1], list):
categoricals.append(feature)
print "Categorical indices: {0}".format(categoricals)
# Apply OneHotEncoder
oneHotEncoder = preprocessing.OneHotEncoder(categorical_features=categoricals, sparse=False)
print "Number of features: {0}".format(len(dataset['data'][0]))
print "Number of samples: {0}".format(len(dataset['data']))
binData = oneHotEncoder.fit_transform(np.array(dataset['data']))
print "n-values: {0}".format(oneHotEncoder.n_values_)
print "feature indices: {0}".format(oneHotEncoder.feature_indices_)
print "Number of binarised features: {0}".format(len(binData[0]))
print "Number of binarised samples: {0}".format(len(binData))
# Setting up input and outputs
inputs = binData[:,:-1]
output = binData[:,-1]
print "Start grid search"
# Setup experimental pipeline
scaler = preprocessing.RobustScaler()
#classifier = RandomForestClassifier(n_estimators=n_estimators[0],max_depth=maxDepth[0],oob_score=True,bootstrap=True)
classifier = ExtraTreesClassifier(n_estimators=n_estimators[0],max_depth=maxDepth[0],oob_score=True,bootstrap=True)
selector = RFE(classifier,n_features_to_select=n_featuresToSelect[0],step=stepSize)
pipeline = Pipeline([("scaler",scaler),("RFE",selector),("classifier",classifier)])
paramGrid = dict(RFE__n_features_to_select=n_featuresToSelect, classifier__max_depth=maxDepth, classifier__n_estimators=n_estimators)
# Do grid search
gridSearch = GridSearchCV(pipeline,param_grid=paramGrid,verbose=verboseLevel,n_jobs=n_jobs,cv=n_crossValidation)
gr
|
idSearch.fit(inputs,output)
estimator = gridSearch.best_estimator_
print "Results: "
print "Selected features: {0}".format(estimator.named_steps['RFE'].n_features_to_select)
print "Max depth: {0}".format(estimator.named_steps['classifier'].max_depth)
print "Number of trees: {0}".format(estimator.named_steps['classifier'].n_estimators)
# Calculate accuracies
print "Calculate accuracies"
accuracy = []
fo
|
r count in range(0,n_accuracy):
cv = StratifiedKFold(output,n_folds=n_folds,shuffle=True)
predicted = cross_validation.cross_val_predict(estimator,inputs,output,cv=cv,verbose=verboseLevel,n_jobs=n_jobs,)
score = metrics.accuracy_score(output,predicted,normalize=True)
accuracy.append(score)
print "Accuracy array: {0}".format(accuracy)
print "Cross-validation accuracy of final model {0}".format(np.mean(accuracy))
|
plotly/python-api
|
packages/python/plotly/plotly/validators/sankey/link/_label.py
|
Python
|
mit
| 442
| 0.002262
|
import _plot
|
ly_utils.basevalidators
class LabelValidator(_plotly_utils.basevalidators.DataArrayValidator):
def __init__(self, plotly_name="label", parent_name="sankey.link", **kwargs):
super(LabelValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
role=
|
kwargs.pop("role", "data"),
**kwargs
)
|
srcole/tools
|
plt.py
|
Python
|
mit
| 10,251
| 0.025266
|
# -*- coding: utf-8 -*-
"""
Miscellaneous functions for plotting
1. bar : create a bar chart with error bars
2. viztime : plot a pretty time series
3. scatt_2cond : scatter plot that compares the x and y values for each point
4. unpair_2cond : plot to compare the distribution of two sets of values
5. scatt_corr : plot a correlation
6. viz_ecog : plot multiple channels of channel x time data in an interactive plot
7. color2d : plot a matrix with values encoded in a colormap
8. spectrogram : plot a spectrogram using pcolormesh
"""
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
from matplotlib import cm
from matplotlib.widgets import Slider
def bar(y, yerr, xlab, ylab,
y2 = None, yerr2 = None, legend = None,
ylim=None,yticksvis=True,figsize=(3,6),
fontsize=15):
if ylim is None:
ylim = (np.min(y-yerr),np.max(y+yerr))
x = np.arange(len(y))
if y2 is None:
x_width = .5
Nextra=0
else:
if len(np.shape(y2)) == 1:
Nextra = 1
else:
Nextra = np.int(np.shape(y2)[0])
x_width = .8 / np.float(Nextra+1)
plt.figure(figsize=figsize)
plt.bar(x,y,x_width, color='k', yerr=yerr,ecolor='k')
if y2 is not None:
if Nextra == 1:
plt.bar(x+x_width,y2,x_width, color='r', yerr=yerr2,ecolor='k')
else:
colorlist = ('r','b','g','y','c')
for e in range(Nextra):
plt.bar(x+x_width*(e+1),y2[e],x_width,color=colorlist[e], yerr=yerr2[e],ecolor='k')
plt.legend(legend, loc='best',fontsize=fontsize)
plt.xticks(x+x_width*Nextra/2.,xlab,size=fontsize)
else:
plt.xticks(x+x_width/2.,xlab,size=fontsize)
plt.xlim((x[0]-x_width,x[-1]+x_width*(Nextra+2)))
plt.ylim(ylim)
plt.yticks(ylim,visible=yticksvis,size=fontsize)
plt.ylabel(ylab,size=fontsize)
plt.tight_layout()
def viztime(x, y,
xlim = None, ylim = None,
xticks = None, yticks = None,
xlabel = '', ylabel = '',
figsize = (12,4),
returnax = False):
if xlim is None:
xlim = (np.min(x),np.max(x))
if ylim is None:
ylim = (np.min(y),np.max(y))
if xticks is None:
xticks = xlim
if yticks is None:
yticks = ylim
plt.figure(figsize=figsize)
plt.plot(x,y,'k-')
plt.xlim(xlim)
plt.xticks(xticks,size=15)
plt.xlabel(xlabel,size=20)
plt.ylim(ylim)
plt.yticks(yticks,fontsize=15)
plt.ylabel(ylabel,size=20)
plt.tight_layout()
if returnax:
return plt.gca()
def scatt_2cond(x, y, ms = 12,
lims = None, ticks = None,
xlabel = '', ylabel = '',
figsize = (5,5),
returnax = False):
if lims is None:
lims = (np.min(np.hstack((x,y))),np.max(np.hstack((x,y))))
# Add buffer
lims_range = lims[1] - lims[0]
lims = (lims[0] - lims_range*.1, lims[1] + lims_range*.1)
if ticks is None:
ticks = lims
plt.figure(figsize=figsize)
plt.plot(x,y,'k.', ms = ms)
plt.plot(lims, lims,'k-')
plt.xlim(lims)
plt.xticks(ticks,size=15)
plt.xlabel(xlabel,size=20)
plt.ylim(lims)
plt.yticks(ticks,fontsize=15)
plt.ylabel(ylabel,size=20)
plt.tight_layout()
if returnax:
return plt.gca()
def unpair_2cond(y1, y2, xlabs, ms = 12,
ylim = None, yticks = None,
ylabel = '',
figsize = (3,5),
returnax = False):
if ylim is None:
ylim = (np.min(np.hstack((y1,y2))),np.max(np.hstack((y1,y2))))
if yticks is None:
yticks = ylim
plt.figure(figsize=figsize)
plt.plot(np.zeros(len(y1)),y1,'k.', ms=ms)
plt.plot(np.ones(len(y2)),y2,'k.', ms=ms)
plt.xlim((-1,2))
plt.xticks([0,1], xlabs,size=20)
plt.ylim(ylim)
plt.yticks(yticks,fontsize=15)
plt.ylabel(ylabel,size=20)
plt.tight_layout()
if returnax:
return plt.gca()
def scatt_corr(x, y, ms = 12,
xlim = None, ylim = None,
xticks = None, yticks = None,
xlabel = '', ylabel = '',
showrp = False, ploc = (0,0), rloc = (0,1), corrtype = 'Pearson',
showline = False,
figsize = (5,5),
returnax = False):
if xlim is None:
xlim = (np.min(x),np.max(x))
if ylim is None:
ylim = (np.min(y),np.max(y))
if xticks is None:
xticks = xlim
if yticks
|
is None:
ytick
|
s = ylim
plt.figure(figsize=figsize)
plt.plot(x,y,'k.', ms = ms)
if showline:
from tools.misc import linfit
linplt = linfit(x,y)
plt.plot(linplt[0],linplt[1], 'k--')
if showrp:
if corrtype == 'Pearson':
r, p = sp.stats.pearsonr(x,y)
elif corrtype == 'Spearman':
r, p = sp.stats.spearmanr(x,y)
ax = plt.gca()
ax.text(rloc[0], rloc[1], '$r^2 = $' + np.str(np.round(r**2,2)), fontsize=15)
ax.text(ploc[0], ploc[1], '$p = $' + np.str(np.round(p,3)), fontsize=15)
plt.xlim(xlim)
plt.xticks(xticks,size=20)
plt.xlabel(xlabel,size=20)
plt.ylim(ylim)
plt.yticks(yticks,fontsize=20)
plt.ylabel(ylabel,size=20)
plt.tight_layout()
if returnax:
return plt.gca()
def viz_ecog(x, t, tmax = 30,
Nch_plot = 6, init_t_len = 1, init_ch_start = 0, init_t_start = 0, figsize=(20,10)):
"""
Visualize ECoG data
Parameters
----------
x : 2-d array
channels by time
t : 1-d array
time indices corresponding to columns of x
Nch_plot : int
Number of channels to plot in the figure
init_t_len : float
Initial value for length of time to plot
init_ch_start : int
Initial value for the first channel plotted (channels plotted sequentially)
init_t_start : float
Initial value for start of plotting
"""
# Init figure
fig, ax = plt.subplots(figsize=figsize)
plt.subplots_adjust(left=0.04, right=0.96, top=0.98, bottom=0.1)
# make first figure
tplt = np.where(np.logical_and(t>=init_t_start,t<init_t_start+init_t_len))[0]
for ch in range(Nch_plot):
plt.subplot(Nch_plot,1,ch+1)
plt.plot(t[tplt],x[ch+init_ch_start][tplt])
plt.ylabel(str(ch+init_ch_start))
plt.xlim((t[tplt[0]],t[tplt[-1]]))
if ch == Nch_plot-1:
plt.xlabel('Time (s)')
#% Update functions
def update(val):
cur_t_len = sTlen.val
cur_ch_start = int(sCh.val)
cur_t_start = sTstart.val
tplt = np.where(np.logical_and(t>=cur_t_start,t<cur_t_start+cur_t_len))[0]
for ch in range(Nch_plot):
plt.subplot(Nch_plot,1,ch+1)
plt.cla()
plt.plot(t[tplt],x[ch+cur_ch_start][tplt])
plt.ylabel(str(ch+cur_ch_start))
plt.xlim((t[tplt[0]],t[tplt[-1]]))
if ch == Nch_plot-1:
plt.xlabel('Time (s)')
fig.canvas.draw_idle()
# Make sliders and buttons
axcolor = 'lightgoldenrodyellow'
axTlen = plt.axes([0.13, 0.01, 0.1, 0.03], axisbg=axcolor)
axCh = plt.axes([0.28, 0.01, 0.3, 0.03], axisbg=axcolor)
axTstart = plt.axes([0.65, 0.01, 0.3, 0.03], axisbg=axcolor)
sTlen = Slider(axTlen, '$t_{len}$', 0.1, 10, valinit=init_t_len)
sCh = Slider(axCh, 'Chans', 0, np.shape(x)[0]-Nch_plot, valinit=init_ch_start)
sTstart = Slider(axTstart, '$t_{start}$', 0, tmax, valinit=init_t_start)
sTlen.on_changed(update)
sCh.on_changed(update)
sTstart.on_changed(update)
plt.show()
def color2d(X, cmap=None, clim=None, cticks=None, color_label='', plot_title='',
plot_xlabel='', plot_ylabel='',
plot_xticks_locs=[], plot_xticks_labels=[],
plot_yticks_locs=[], plot_yticks_labels=[],
interpolation='none', fontsize_major=20, fontsize_minor=10):
"""Plot the matrix X using a 2-dimensional color matrix
Note you can put this in a subplot. it d
|
ygol/odoo
|
addons/account_payment/controllers/portal.py
|
Python
|
agpl-3.0
| 1,661
| 0.005418
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and lic
|
ensing details.
from odoo.addons.account.controllers.portal import PortalAccount
from odoo.http import request
class PortalAccount(P
|
ortalAccount):
def _invoice_get_page_view_values(self, invoice, access_token, **kwargs):
values = super(PortalAccount, self)._invoice_get_page_view_values(invoice, access_token, **kwargs)
payment_inputs = request.env['payment.acquirer']._get_available_payment_input(partner=invoice.partner_id, company=invoice.company_id)
# if not connected (using public user), the method _get_available_payment_input will return public user tokens
is_public_user = request.env.user._is_public()
if is_public_user:
# we should not display payment tokens owned by the public user
payment_inputs.pop('pms', None)
token_count = request.env['payment.token'].sudo().search_count([('acquirer_id.company_id', '=', invoice.company_id.id),
('partner_id', '=', invoice.partner_id.id),
])
values['existing_token'] = token_count > 0
values.update(payment_inputs)
# if the current user is connected we set partner_id to his partner otherwise we set it as the invoice partner
# we do this to force the creation of payment tokens to the correct partner and avoid token linked to the public user
values['partner_id'] = invoice.partner_id if is_public_user else request.env.user.partner_id,
return values
|
scollis/price_watch
|
scripts/get_html.py
|
Python
|
bsd-2-clause
| 379
| 0.002639
|
#!/bin/env python
import urllib2
from datet
|
ime import datetime
site = 'http://www.fuel-prices-europe.info/'
fh = urllib2.urlopen(site)
lines = fh.readlines()
fh.close()
now = datetime.now()
my_str_date = now.strftime('%Y%m%d')
outdir = '/Users/scollis/tmp/'
prefix = 'fuel'
p
|
ostfix = '.html'
ofh = open(outdir+prefix+my_str_date+postfix, 'w')
ofh.writelines(lines)
ofh.close()
|
EmuKit/emukit
|
tests/emukit/multi_fidelity/test_convert_list_to_array.py
|
Python
|
apache-2.0
| 2,211
| 0.000905
|
import numpy as np
import pytest
from emukit.multi_fidelity.convert_lists_to_array import (
convert_x_list_to_array,
convert_xy_lists_to_arrays,
convert_y_list_to_array,
)
def test_convert_x_list_to_array():
x_list = [np.array([[1, 0], [2, 1]]), np.array([[3, 2], [4, 5]])]
x_array = convert_x_list_to_array(x_list)
expected_output = np.array([[1, 0, 0], [2, 1, 0], [3, 2, 1], [4, 5, 1]])
assert np.array_equal(x_array, expected_output)
def test_convert_y_list_to_array():
y_list = [np.array([[0.0], [1.0]]), np.array([[2.0], [5.0]])]
y_array = convert_y_list_to_array(y_list)
expected_output = np.array([[0.0], [1.0], [2.0], [5.0]])
assert np.array_equal(y_array, expected_output)
def test_convert_xy_lists_to_arrays():
x_list = [np.array([[1, 0], [2, 1]]), np.array([[3, 2], [4, 5]])]
y_list = [np.array([[0.0], [1.0]]), np.array([[2.0], [5.0]])]
x_array, y_array = convert_xy_lists_to_arrays(x_list, y_list)
expected_y = np.array([[0.0], [1.0], [2.0], [5.0]])
expected_x = np.array([[1, 0, 0], [2, 1, 0], [3, 2, 1], [4, 5, 1]])
assert np.array_equal(y_array, expected_y)
assert np.array_equal(x_array, expected_x)
def test_convert_y_list_to_array_fails_with_1d_input():
y_list = [np.array([0.0, 1.0]), np.array([2.0, 5.0])]
with pytest.raises(ValueError):
convert_y_list_to_array(y_list)
def test_convert_x_list_to_array_fails_with_1d_input():
x_list = [np.array([0.0, 1.0]), np.array([2.0, 5.0])]
with pytest.raises(ValueError):
convert_x_list_to_array(x_list)
def test_convert_xy_lists_to_arrays_fails_with_different_number_of_fidelities():
x_list = [np.array([[1, 0], [2, 1]]), np.array([[3, 2], [4, 5]])]
y_list = [np.array([0.0, 1.0]), np.array([2.0, 5.0]), np.array([3, 6])]
with pytest.raises(ValueError):
convert_xy_lists_to_arrays(x_list, y_list)
def test_convert_xy_lists_to_arrays_fails_with_different_number_of_points_at_fidelity():
|
x_list = [np.array([[1, 0], [2, 1], [3, 4]]), np.array([[3, 2], [4, 5]])]
y_list = [np.array([0
|
.0, 1.0]), np.array([2.0, 5.0])]
with pytest.raises(ValueError):
convert_xy_lists_to_arrays(x_list, y_list)
|
sesamesushi/desatisrevu
|
controllers/utils.py
|
Python
|
apache-2.0
| 24,558
| 0.000407
|
# Copyright 2012 Google Inc. All Rights Reserved.
#
#
|
Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the L
|
icense.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Handlers that are not directly related to course content."""
__author__ = 'Saifu Angto (saifu@google.com)'
import base64
import hmac
import os
import time
import urlparse
import appengine_config
from common import jinja_utils
from models import models
from models import transforms
from models.config import ConfigProperty
from models.config import ConfigPropertyEntity
from models.courses import Course
from models.models import Student
from models.models import StudentProfileDAO
from models.models import TransientStudent
from models.roles import Roles
import webapp2
from google.appengine.api import namespace_manager
from google.appengine.api import users
# The name of the template dict key that stores a course's base location.
COURSE_BASE_KEY = 'gcb_course_base'
# The name of the template dict key that stores data from course.yaml.
COURSE_INFO_KEY = 'course_info'
TRANSIENT_STUDENT = TransientStudent()
XSRF_SECRET_LENGTH = 20
XSRF_SECRET = ConfigProperty(
'gcb_xsrf_secret', str, (
'Text used to encrypt tokens, which help prevent Cross-site request '
'forgery (CSRF, XSRF). You can set the value to any alphanumeric text, '
'preferably using 16-64 characters. Once you change this value, the '
'server rejects all subsequent requests issued using an old value for '
'this variable.'),
'course builder XSRF secret')
# Whether to record page load/unload events in a database.
CAN_PERSIST_PAGE_EVENTS = ConfigProperty(
'gcb_can_persist_page_events', bool, (
'Whether or not to record student page interactions in a '
'datastore. Without event recording, you cannot analyze student '
'page interactions. On the other hand, no event recording reduces '
'the number of datastore operations and minimizes the use of Google '
'App Engine quota. Turn event recording on if you want to analyze '
'this data.'),
False)
# Whether to record tag events in a database.
CAN_PERSIST_TAG_EVENTS = ConfigProperty(
'gcb_can_persist_tag_events', bool, (
'Whether or not to record student tag interactions in a '
'datastore. Without event recording, you cannot analyze student '
'tag interactions. On the other hand, no event recording reduces '
'the number of datastore operations and minimizes the use of Google '
'App Engine quota. Turn event recording on if you want to analyze '
'this data.'),
False)
# Whether to record events in a database.
CAN_PERSIST_ACTIVITY_EVENTS = ConfigProperty(
'gcb_can_persist_activity_events', bool, (
'Whether or not to record student activity interactions in a '
'datastore. Without event recording, you cannot analyze student '
'activity interactions. On the other hand, no event recording reduces '
'the number of datastore operations and minimizes the use of Google '
'App Engine quota. Turn event recording on if you want to analyze '
'this data.'),
False)
# Date format string for displaying datetimes in UTC.
# Example: 2013-03-21 13:00 UTC
HUMAN_READABLE_DATETIME_FORMAT = '%Y-%m-%d, %H:%M UTC'
# Date format string for displaying dates. Example: 2013-03-21
HUMAN_READABLE_DATE_FORMAT = '%Y-%m-%d'
# Time format string for displaying times. Example: 01:16:40 UTC.
HUMAN_READABLE_TIME_FORMAT = '%H:%M:%S UTC'
class PageInitializer(object):
"""Abstract class that defines an interface to initialize page headers."""
@classmethod
def initialize(cls, template_value):
raise NotImplementedError
class DefaultPageInitializer(PageInitializer):
"""Implements default page initializer."""
@classmethod
def initialize(cls, template_value):
pass
class PageInitializerService(object):
"""Installs the appropriate PageInitializer."""
_page_initializer = DefaultPageInitializer
@classmethod
def get(cls):
return cls._page_initializer
@classmethod
def set(cls, page_initializer):
cls._page_initializer = page_initializer
class ReflectiveRequestHandler(object):
"""Uses reflection to handle custom get() and post() requests.
Use this class as a mix-in with any webapp2.RequestHandler to allow request
dispatching to multiple get() and post() methods based on the 'action'
parameter.
Open your existing webapp2.RequestHandler, add this class as a mix-in.
Define the following class variables:
default_action = 'list'
get_actions = ['default_action', 'edit']
post_actions = ['save']
Add instance methods named get_list(self), get_edit(self), post_save(self).
These methods will now be called automatically based on the 'action'
GET/POST parameter.
"""
def create_xsrf_token(self, action):
return XsrfTokenManager.create_xsrf_token(action)
def get(self):
"""Handles GET."""
action = self.request.get('action')
if not action:
action = self.default_action
if action not in self.get_actions:
self.error(404)
return
handler = getattr(self, 'get_%s' % action)
if not handler:
self.error(404)
return
return handler()
def post(self):
"""Handles POST."""
action = self.request.get('action')
if not action or action not in self.post_actions:
self.error(404)
return
handler = getattr(self, 'post_%s' % action)
if not handler:
self.error(404)
return
# Each POST request must have valid XSRF token.
xsrf_token = self.request.get('xsrf_token')
if not XsrfTokenManager.is_xsrf_token_valid(xsrf_token, action):
self.error(403)
return
return handler()
class ApplicationHandler(webapp2.RequestHandler):
"""A handler that is aware of the application context."""
@classmethod
def is_absolute(cls, url):
return bool(urlparse.urlparse(url).scheme)
@classmethod
def get_base_href(cls, handler):
"""Computes current course <base> href."""
base = handler.app_context.get_slug()
if not base.endswith('/'):
base = '%s/' % base
# For IE to work with the <base> tag, its href must be an absolute URL.
if not cls.is_absolute(base):
parts = urlparse.urlparse(handler.request.url)
base = urlparse.urlunparse(
(parts.scheme, parts.netloc, base, None, None, None))
return base
def __init__(self, *args, **kwargs):
super(ApplicationHandler, self).__init__(*args, **kwargs)
self.template_value = {}
def get_template(self, template_file, additional_dirs=None):
"""Computes location of template files for the current namespace."""
self.template_value[COURSE_INFO_KEY] = self.app_context.get_environ()
self.template_value['is_course_admin'] = Roles.is_course_admin(
self.app_context)
self.template_value[
'is_read_write_course'] = self.app_context.fs.is_read_write()
self.template_value['is_super_admin'] = Roles.is_super_admin()
self.template_value[COURSE_BASE_KEY] = self.get_base_href(self)
template_environ = self.app_context.get_template_environ(
self.template_value[COURSE_INFO_KEY]['course']['locale'],
additional_dirs
)
template_environ.filters[
'gcb_tags'] = jinja_utils.get_gcb_tags_filter(self)
return templa
|
tier-one-monitoring/monstr
|
Monstr/Modules/CMSJobStatus/CMSJobStatus.py
|
Python
|
apache-2.0
| 9,424
| 0.002759
|
#!/bin/python
from datetime import timedelta
from pprint import pprint as pp
import json
import Monstr.Core.Utils as Utils
import Monstr.Core.DB as DB
import Monstr.Core.BaseModule as BaseModule
import pytz
from Monstr.Core.DB import Column, Integer, String, DateTime, UniqueConstraint, func
class CMSJobStatus(BaseModule.BaseModule):
name = 'CMSJobStatus'
table_schemas = {'main': (Column('id', Integer, primary_key=True),
Column('time', DateTime(True)),
Column('site_name', String(60)),
Column('aborted', Integer),
Column('app_succeeded', Integer),
Column('applic_failed', Integer),
Column('application_failed', Integer),
Column('cancelled', Integer),
Column('pending', Integer),
Column('running', Integer),
Column('site_failed', Integer),
Column('submitted', Integer),
UniqueConstraint("time", "site_name"),)}
status_list = [{'name': 'load', 'status': 0, 'time': Utils.get_UTC_now(), 'description': ''},
{'name': 'rank', 'status': 0, 'time': Utils.get_UTC_now(), 'description': ''},
{'name': 'site_failures', 'status': 0, 'time': Utils.get_UTC_now(), 'description': ''}]
# tables = None
config = {}
default_config = {'period': 8}
def __init__(self, config=None):
super(CMSJobStatus, self).__init__()
self.db_handler = DB.DBHandler()
self.rest_links['lastStatus'] = self.lastStatus
self.config = self.default_config
if config is not None:
self.config.update(config)
def isInteresting(self, site_name):
if site_name.startswith('T1'):
return True
if site_name.startswith('T0'):
return True
if site_name == 'T2_CH_CERN':
return True
return False
def Retrieve(self, params):
# Get current time and last recorded time
current_time = Utils.get_UTC_now().replace(minute=0, second=0, microsecond=0)
last_time = current_time - timedelta(hours=self.config['period'])
# Gather all data hour by hour
update_incert_list = []
while last_time < current_time:
begin = last_time
end = last_time + timedelta(hours=1)
time1 = '+' + str(begin.hour) + "%3A00"
time2 = '+' + str(end.hour) + "%3A00"
date1 = str(begin).split(' ')[0] + time1
date2 = str(end).split(' ')[0] + time2
url = "http://dashb-cms-job.cern.ch/dashboard/request.py/jobsummary-plot-or-table2?user=&submissiontool=&application=&activity=&status=&check=terminated&tier=&sortby=site&ce=&rb=&grid=&jobtype=&submissionui=&dataset=&submissiontype=&task=&subtoolver=&genactivity=&outputse=&appexitcode=&accesstype=&inputse=&cores=&date1=" + date1 + "&date2=" + date2 + "&prettyprint"
json_raw = Utils.get_page(url)
json_obj = json.loads(json_raw)['summaries']
for obj in json_obj:
site_name = str(obj['name'])
if self.isInteresting(site_name):
current_status = {'site_name': site_name,
'time': last_time,
'applic_failed': int(obj['applic-failed']),
'app_succeeded': int(obj['app-succeeded']),
'pending': int(obj['pending']),
'running': int(obj['running']),
'aborted': int(obj['aborted']),
'application_failed': int(obj['application-failed']),
'site_failed': int(obj['site-failed']),
'cancelled': int(obj['cancelled']),
'submitted': int(obj['submitted'])}
update_incert_list.append(current_status)
last_time = last_time + timedelta(hours=1)
return {'main': update_incert_list}
def InsertToDB(self, data):
for schema in data:
table = self.tables[schema]
min_time = min([x['time'] for x in data[schema]])
d = table.delete(table.c.time >= min_time)
d.execute()
self.db_handler.bulk_insert(table, data[schema])
# --------------------------------------------------------------------------
# Helper functions for Analyze
# --------------------------------------------------------------------------
def _get_sites_success_fail(self, check_time, interval=8):
from sqlalchemy.sql import select
result = {}
query = select([self.tables['main'].c.site_name,
func.sum(self.tables['main'].c.app_succeeded).label('app_succeeded'),
func.sum(self.tables['main'].c.site_failed).label('site_failed')])\
.where(self.tables['main'].c.time > check_time - timedelta(hours=interval))\
.group_by(self.tables['main'].c.site_name)
cursor = query.execute()
resultProxy = cursor.fetchall()
for row in resultProxy:
item = dict(row.items())
result[item['site_name']] = {'app_succeeded': item['app_succeeded'],
'site_failed': item['site_failed'],
'site_name': item['s
|
ite_name']}
return result
def _get_fail_ratio_status(self, fail_ratio):
return {fail_ratio < 0.01: 10,
0.01 <= fail_ratio < 0.05: 20,
0.05 <= fail_ratio < 0.12: 30,
0.12 <= fail_ratio < 0.3: 40,
0.3 <= fail_ratio: 50}[True]
def _get_load_status(self, load):
return {load > 25000: 10,
25000 >= load > 15000: 20,
|
15000 >= load > 7000: 30,
7000 >= load > 1000: 40,
1000 >= load: 50}[True]
def _get_rank_status(self, rank):
return {rank < 4: 10,
4 <= rank < 5: 20,
5 <= rank < 6: 30,
6 <= rank < 7: 40,
7 <= rank: 50}[True]
def Analyze(self, data):
new_statuses = []
update_time = Utils.get_UTC_now()
site_info = self._get_sites_success_fail(update_time)
app_succeeded = site_info['T1_RU_JINR']['app_succeeded']
site_failed = site_info['T1_RU_JINR']['site_failed']
fail_ratio = 1.0 * site_failed / (site_failed + app_succeeded)
load = site_failed + app_succeeded
sorted_list = list(reversed(sorted([site_info[site]for site in site_info], key=lambda x: x['app_succeeded'])))
print sorted_list
rank = [x['site_name'] for x in sorted_list].index('T1_RU_JINR') + 1
# for i in range(0, len(sorted_list)):
# if sorted_list[i]['site_name'] == 'T1_RU_JINR':
# rank = i + 1
new_statuses.append({'name': 'load', 'status': self._get_load_status(load), 'time': update_time, 'description': 'Load: is ' + str(load)})
new_statuses.append({'name': 'rank', 'status': self._get_rank_status(rank), 'time': update_time, 'description': 'Rank: is ' + str(rank)})
new_statuses.append({'name': 'site_failures', 'status': self._get_fail_ratio_status(fail_ratio), 'time': update_time, 'description': 'Fail ratio: is ' + str(fail_ratio)})
self.update_status(new_statuses)
# ==========================================================================
# Web
# ==========================================================================
def lastStatus(self, incoming_params):
response = {}
try:
default_params = {'delta': 8}
params = self._create_params(default_params, incoming_params)
result = []
max_time = self.
|
lsaffre/lino-cosi
|
lino_cosi/setup_info.py
|
Python
|
agpl-3.0
| 3,193
| 0.000627
|
# -*-
|
coding: UTF-8 -*-
# Copyright 2014-2021 Rumma & Ko Ltd
# License: GNU Affero General Public License v3 (see file COPYING for details)
SETUP_INFO = dict(
name='lino-cosi',
version='21.3.0',
install_requires=['lino-xl', 'django-iban', 'lxml'],
# tests_require=['beautifulsoup4'], # satisfied by lino deps
test_suite='tests',
description="A Lino application to make accounting s
|
imple",
long_description="""
**Lino Così** is a
`Lino application <http://www.lino-framework.org/>`__
for accounting (`more <https://cosi.lino-framework.org/about.html>`__).
- The central project homepage is http://cosi.lino-framework.org
- You can try it yourself in `our demo sites
<https://www.lino-framework.org/demos.html>`__
- We have some `end-user documentation in German
<https://de.cosi.lino-framework.org/>`__
- Technical specs are at https://www.lino-framework.org/specs/cosi
- This is an integral part of the Lino framework, which is documented
at https://www.lino-framework.org
- The changelog is at https://www.lino-framework.org/changes
- For introductions, commercial information and hosting solutions
see https://www.saffre-rumma.net
- This is a sustainably free open-source project. Your contributions are
welcome. See https://community.lino-framework.org for details.
""",
author='Luc Saffre',
author_email='luc.saffre@gmail.com',
url="https://github.com/lino-framework/cosi",
license_files=['COPYING'],
classifiers="""\
Programming Language :: Python
Programming Language :: Python :: 3
Development Status :: 5 - Production/Stable
Environment :: Web Environment
Framework :: Django
Intended Audience :: Developers
Intended Audience :: System Administrators
License :: OSI Approved :: GNU Affero General Public License v3
Operating System :: OS Independent
Topic :: Office/Business :: Financial :: Accounting
""".splitlines())
SETUP_INFO.update(packages=[
'lino_cosi',
'lino_cosi.lib',
'lino_cosi.lib.cosi',
'lino_cosi.lib.contacts',
'lino_cosi.lib.contacts.fixtures',
'lino_cosi.lib.contacts.management',
'lino_cosi.lib.contacts.management.commands',
'lino_cosi.lib.products',
'lino_cosi.lib.products.fixtures',
'lino_cosi.lib.orders',
])
SETUP_INFO.update(message_extractors={
'lino_cosi': [
('**/cache/**', 'ignore', None),
('**.py', 'python', None),
('**.js', 'javascript', None),
('**/templates_jinja/**.html', 'jinja2', None),
],
})
SETUP_INFO.update(
# package_data=dict(),
zip_safe=False,
include_package_data=True)
# def add_package_data(package, *patterns):
# l = SETUP_INFO['package_data'].setdefault(package, [])
# l.extend(patterns)
# return l
# ~ add_package_data('lino_cosi',
# ~ 'config/patrols/Patrol/*.odt',
# ~ 'config/patrols/Overview/*.odt')
# l = add_package_data('lino_cosi.lib.cosi')
# for lng in 'de fr'.split():
# l.append('lino_cosi/lib/cosi/locale/%s/LC_MESSAGES/*.mo' % lng)
# l = add_package_data('lino_xl.lib.sepa',
# 'lino_xl.lib/sepa/config/iban/*')
# 'config/iban/*')
# print 20160820, SETUP_INFO['package_data']
# raw_input()
|
Hedde/fabric_interface
|
src/fabric_interface/hosts/context_processors.py
|
Python
|
mit
| 548
| 0.001825
|
__author__ = 'heddevanderheide'
# Django spec
|
ific
from fabric_interface.projects.models import Project
from fabric_interface.hosts.models import Host
def hosts(request):
"""
Adds host QuerySet context variable to the context.
"""
view_name = request.resolver_match.view_name
kwargs = request.resolver_match.kwargs
slug = kwargs.get('slug')
if 'project_' in view_name and slug:
return {
'host_list': Host.objects.filter(projects__slug=slug)
}
|
return {'host_list': Host.objects.all()}
|
mhbu50/frappe
|
frappe/printing/doctype/print_style/print_style.py
|
Python
|
mit
| 664
| 0.024096
|
# -*- coding: utf-8 -*-
# Copyright (c) 2017, Frappe Tec
|
hnologies and contributors
# For license information, please see license.txt
import frappe
from frappe.model.document import Document
class PrintStyle(Document):
def validate(self):
if (self.standard==1
and not frappe.local.conf.get("developer_mode")
and not (frappe.flags.in_import or frappe.flags.in_test)):
frappe.throw(frappe._("Standard Print Style cannot be changed. Please duplicate to edit."))
|
def on_update(self):
self.export_doc()
def export_doc(self):
# export
from frappe.modules.utils import export_module_json
export_module_json(self, self.standard == 1, 'Printing')
|
ymap/aioredis
|
aioredis/errors.py
|
Python
|
mit
| 2,627
| 0
|
__all__ = [
'RedisError',
'ProtocolError',
'ReplyError',
'MaxClientsError',
'AuthError',
'PipelineError',
'MultiExecError',
'WatchVariableError',
'ChannelClosedError',
'ConnectionClosedError',
'ConnectionForcedCloseError',
'PoolClosedError',
'MasterNotFoundError',
'SlaveNotFoundError',
'ReadOnlyError',
]
class RedisError(Exception):
"""Base exception class for aioredis
|
exceptions."""
class ProtocolError(RedisError):
"""Raised when pr
|
otocol error occurs."""
class ReplyError(RedisError):
"""Raised for redis error replies (-ERR)."""
MATCH_REPLY = None
def __new__(cls, msg, *args):
for klass in cls.__subclasses__():
if msg and klass.MATCH_REPLY and msg.startswith(klass.MATCH_REPLY):
return klass(msg, *args)
return super().__new__(cls, msg, *args)
class MaxClientsError(ReplyError):
"""Raised for redis server when the maximum number of client has been
reached."""
MATCH_REPLY = "ERR max number of clients reached"
class AuthError(ReplyError):
"""Raised when authentication errors occurs."""
MATCH_REPLY = ("NOAUTH ", "ERR invalid password")
class PipelineError(RedisError):
"""Raised if command within pipeline raised error."""
def __init__(self, errors):
super().__init__('{} errors:'.format(self.__class__.__name__), errors)
class MultiExecError(PipelineError):
"""Raised if command within MULTI/EXEC block caused error."""
class WatchVariableError(MultiExecError):
"""Raised if watched variable changed (EXEC returns None)."""
class ChannelClosedError(RedisError):
"""Raised when Pub/Sub channel is unsubscribed and messages queue is empty.
"""
class ReadOnlyError(RedisError):
"""Raised from slave when read-only mode is enabled"""
class MasterNotFoundError(RedisError):
"""Raised for sentinel master not found error."""
class SlaveNotFoundError(RedisError):
"""Raised for sentinel slave not found error."""
class MasterReplyError(RedisError):
"""Raised by sentinel client for master error replies."""
class SlaveReplyError(RedisError):
"""Raised by sentinel client for slave error replies."""
class ConnectionClosedError(RedisError):
"""Raised if connection to server was closed."""
class ConnectionForcedCloseError(ConnectionClosedError):
"""Raised if connection was closed with .close() method."""
class PoolClosedError(RedisError):
"""Raised if pool is closed."""
class RedisClusterError(RedisError):
"""Cluster exception class for aioredis exceptions."""
|
danielreed/python-hpOneView
|
hpOneView/resources/networking/ethernet_networks.py
|
Python
|
mit
| 9,856
| 0.002334
|
# -*- coding: utf-8 -*-
###
# (C) Copyright (2012-2016) Hewlett Packard Enterprise Development LP
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
###
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from future import standard_library
standard_library.install_aliases()
__title__ = 'ethernet-networks'
__version__ = '0.0.1'
__copyright__ = '(C) Copyright (2012-2016) Hewlett Packard Enterprise ' \
' Development LP'
__license__ = 'MIT'
__status__ = 'Development'
from hpOneView.resources.resource import ResourceClient
class EthernetNetworks(object):
URI = '/rest/ethernet-networks'
def __init__(self, con):
self._connection = con
self._client = ResourceClient(con, self.URI)
self.__default_values = {
"ethernetNetworkType": "Tagged",
"type": "ethernet-networkV3"
}
def get_all(self, start=0, count=-1, filter='', sort=''):
"""
Gets a paginated collection of Ethernet networks. The collection is based on optional sorting and filtering,
and constrained by start and count parameters.
Args:
start:
The first item to return, using 0-based indexing.
If not specified, the default is 0 - start with the first available item.
count:
The number of resources to return. A count of -1 requests all the items.
The actual number of items in the response may differ from the requested
count if the sum of start and count exceed the total number of items.
filter:
A general filter/query string to narrow the list of items returned. The
default is no filter - all resources are returned.
sort:
The sort order of the returned data set. By default, the sort order is based
on create time, with the oldest entry first.
Returns:
list: A list of ethernet networks.
"""
return self._client.get_all(start, count, filter=filter, sort=sort)
def delete(self, resource, force=False, timeout=-1):
"""
Deletes an Ethernet network.
Any deployed connections that are using the network are placed in the 'Failed' state.
Args:
resource: dict object to delete
force:
If set to true the operation completes despite any problems with
network connectivity or errors on the resource itself. The default is false.
timeout:
Timeout in seconds. Wait task completion by default. The timeout does not abort the operation
in OneView, just stops waiting for its completion.
Returns:
bool: Indicating if the resource was successfully deleted.
"""
return self._client.delete(resource, force=force, timeout=timeout)
def get(self, id_or_uri):
"""
Gets the Ethernet network.
Args:
id_or_uri: ID or uri of Ethernet network.
Returns:
dict: The ethernet network.
"""
return self._client.get(id_or_uri)
def create(self, resource, timeout=-1):
"""
Creates an Ethernet network.
Args:
resource (dict): Object to create.
timeout:
Timeout in seconds. Wait task completion by default. The timeout does not abort the operation
in OneView, just stops waiting for its completion.
Returns:
dict: Created resource.
"""
data = self.__default_values.copy()
data.update(resource)
return self._client.create(data, timeout=timeout)
def create_bulk(self, resource, timeout=-1):
"""
Creates bulk Ethernet networks.
Args:
resource (dict): Specifications to create in bulk.
time
|
out:
Timeout in seconds. Wait task completion by default. The timeout does not abort the operation
in OneView, just stops waiting for its completion.
Returns:
list
|
: List of created Ethernet Networks.
"""
data = {"type": "bulk-ethernet-network"}
data.update(resource)
uri = self.URI + '/bulk'
self._client.create(data, uri=uri, timeout=timeout)
return self.get_range(resource['namePrefix'], resource['vlanIdRange'])
def get_range(self, name_prefix, vlan_id_range):
"""
Gets a list of Ethernet Networks that match the 'given name_prefix' and the 'vlan_id_range'.
Examples:
>>> enet.get_range('Enet_name', '1-2,5')
# The result contains the ethernet network with names:
['Enet_name_1', 'Enet_name_2', 'Enet_name_5']
>>> enet.get_range('Enet_name', '2')
# The result contains the ethernet network with names:
['Enet_name_1', 'Enet_name_2']
Args:
name_prefix: The Ethernet Network prefix
vlan_id_range: A combination of values or ranges to be retrieved. For example '1-10,50,51,500-700'.
Returns:
list: A list of Ethernet Networks.
"""
filter = '"\'name\' matches \'{}\_%\'"'.format(name_prefix)
ethernet_networks = self.get_all(filter=filter, sort='vlanId:ascending')
vlan_ids = self.dissociate_values_or_ranges(vlan_id_range)
for net in ethernet_networks[:]:
if int(net['vlanId']) not in vlan_ids:
ethernet_networks.remove(net)
return ethernet_networks
def dissociate_values_or_ranges(self, vlan_id_range):
"""
Build a list of vlan ids given a combination of ranges and/or values
Examples:
>>> enet.dissociate_values_or_ranges('1-2,5')
[1, 2, 5]
>>> enet.dissociate_values_or_ranges('5')
[1, 2, 3, 4, 5]
>>> enet.dissociate_values_or_ranges('4-5,7-8')
[4, 5, 7, 8]
Args:
vlan_id_range: A combination of values or ranges. For example '1-10,50,51,500-700'.
Returns:
list: vlan ids
"""
values_or_ranges = vlan_id_range.split(',')
vlan_ids = []
# The expected result is different if the vlan_id_range contains only one value
if len(values_or_ranges) == 1 and '-' not in values_or_ranges[0]:
vlan_ids = list(range(1, int(values_or_ranges[0]) + 1))
else:
for value_or_range in values_or_ranges:
value_or_range.strip()
if '-' not in value_or_range:
vlan_ids.append(int(value_or_range))
else:
start, end = value_or_range.split('-')
range_ids = range(int(start), int(end) + 1)
vlan_ids.extend(range_ids)
return vlan_ids
def update(self, resource, timeout=-1):
"""
Updates an Ethernet network.
Args:
resour
|
thread/django-yadt
|
django_yadt/management/commands/yadt_gc.py
|
Python
|
bsd-3-clause
| 1,117
| 0.000895
|
import os
from django.core.files.storage import default_storage
from django.core.management.base import BaseCommand, CommandError
from ...utils import get_variant
class Command(BaseCommand):
USAGE = "<app_label> <model> <field> <variant>"
def handle(self, *args, **options):
try:
app_label, model_name, field_name, variant_name = args
except ValueError:
|
raise CommandError(self.USAGE)
variant = get_variant(app_label, model_name, field_name, variant_name)
in_database = set(
getattr(getattr(x, field_name), variant_name).filename
for x in variant.image.field.model._default_manager.all()
)
base = os.path.join(
variant.image.field.upload_to,
variant.name,
)
on_disk = set(
os.path.join(
variant.
|
image.field.upload_to,
variant.name,
x,
) for x in os.listdir(default_storage.path(base))
)
for x in on_disk.difference(in_database):
print("I: Can be deleted: %s" % x)
|
ccubed/AngelBot
|
Currency.py
|
Python
|
mit
| 5,135
| 0.002532
|
import aiohttp
class Currency:
def __init__(self, client):
self.apiurl = "https://api.fixer.io"
self.currencies = {'USD': 'US Dollar',
'JPY': 'Japanese Yen',
'BGN': 'Bulgarian Lev',
'CZK': 'Czech Koruna',
'DKK': 'Danish Krone',
'GBP': 'Pound Sterling',
'HUF': 'Hungarian Forint',
'PLN': 'Polish Zloty',
'RON': 'Romanian Leu',
'SEK': 'Swedish Krona',
'CHF': 'Swiss Franc',
'NOK': 'Norwegian Krone',
'HRK': 'Croatian Kuna',
'RUB': 'Russian Rouble',
'TRY': 'Turkish Lira',
'AUD': 'Australian Dollar',
'BRL': 'Brazilian Real',
'CAD': 'Canadian Dollar',
'CNY': 'Chinese Yuan Renminbi',
'HKD': 'Hong Kong Dollar',
'IDR': 'Indonesian Rupiah',
'ILS': 'Israeli Shekel',
'INR': 'Indian Rupee',
'KRW': 'South Korean Won',
'MXN': 'Mexican Peso',
'MYR': 'Malaysian Ringgit',
'NZD': 'New Zealand Dollar',
'PHP': 'Philippine Peso',
'SGD': 'Singapore Dollar',
'THB': 'Thai Baht',
'ZAR': 'South African Rand',
'EUR': 'Euro'}
self.commands = [['convert', self.convert], ['currencies', self.currencylist], ['rates', self.latest]]
self.bot = client
async def convert(self, message):
"""
#convert x [currency] to [other currency base]
"""
currency_from = " ".join(message.content.split("to")[0].split()[2:])
currency_to = message.content.split("to")[1].strip()
if currency_from not in self.currencies.keys() or currency_to not in self.currencies.keys():
await self.bot.send_message(message.channel, "Please make sure to enter a valid currency. Valid currencies are as follows.")
await self.bot.send_message(message.channel, await self.currencylist())
amt = message.content.split("to")[0].split()[1]
try:
amt = self.convertcurrency(amt)
except ValueError:
await self.bot.send_message(message.channel, "Amount to convert needs to be a number.")
async with aiohttp.ClientSession() as session:
async with session.get(self.apiurl+"/latest", params={"base": currency_from, "symbols": currency_to}, headers={'User-Agent': 'AngelBot 2 (Python 3.5.1 AioHTTP)'}) as response:
if response.status == 200:
jsd = await response.json()
if len(jsd['rates']) > 0:
conversion = amt * float(jsd['rates'][currency_to])
await self.bot.send_message(message.channel, "{} {} is {} {} based on data as of {}.".format(amt, self.currencies[currency_from], round(conversion, 2), self.currencies[currency_to], jsd['date']))
else:
await self.bot.send_message(message.channel, "I can't convert {} to {} because Fixer.io has no currency rate information on that specific combination.".format(self.curr
|
encies[currency_from], self.currencies[currency_to]))
async def latest(self, message):
"""
#currency [base]
"""
if len(message.content.split()) == 2:
base = message.content.split()[1]
else:
b
|
ase = "GBP"
async with aiohttp.ClientSession() as session:
async with session.get(self.apiurl+"/latest", params={'base': base}, headers={'User-Agent': 'AngelBot 2 (Python 3.5.1 AioHTTP)'}) as response:
if response.status == 200:
jsd = await response.json()
msg = "Conversion rates against 1 {} as of {}\n".format(self.currencies[jsd['base']], jsd['date'])
msg += "\n".join(["{}: {}".format(self.currencies[x], jsd['rates'][x]) for x in jsd['rates']])
await self.bot.send_message(message.channel, msg)
else:
await self.bot.send_message(message.channel, "I wasn't able to query Fixer.io for currency data from the European Central Bank right now. Please try again later.")
async def currencylist(self):
return "\n".join(["{}: {}".format(x, self.currencies[x]) for x in self.currencies])
@staticmethod
def convertcurrency(number):
try:
float(number)
return float(number)
except ValueError:
try:
float(number[1:])
return float(number[1:])
except ValueError:
return ValueError
|
yk5/incubator-airflow
|
airflow/contrib/kubernetes/kubernetes_request_factory/kubernetes_request_factory.py
|
Python
|
apache-2.0
| 6,584
| 0.000456
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing p
|
ermissions and limitations
# under the License.
from abc import ABCMeta, abstractmethod
import six
class Kubernet
|
esRequestFactory:
"""
Create requests to be sent to kube API.
Extend this class to talk to kubernetes and generate your specific resources.
This is equivalent of generating yaml files that can be used by `kubectl`
"""
__metaclass__ = ABCMeta
@abstractmethod
def create(self, pod):
"""
Creates the request for kubernetes API.
:param pod: The pod object
"""
pass
@staticmethod
def extract_image(pod, req):
req['spec']['containers'][0]['image'] = pod.image
@staticmethod
def extract_image_pull_policy(pod, req):
if pod.image_pull_policy:
req['spec']['containers'][0]['imagePullPolicy'] = pod.image_pull_policy
@staticmethod
def add_secret_to_env(env, secret):
env.append({
'name': secret.deploy_target,
'valueFrom': {
'secretKeyRef': {
'name': secret.secret,
'key': secret.key
}
}
})
@staticmethod
def extract_labels(pod, req):
req['metadata']['labels'] = req['metadata'].get('labels', {})
for k, v in six.iteritems(pod.labels):
req['metadata']['labels'][k] = v
@staticmethod
def extract_annotations(pod, req):
req['metadata']['annotations'] = req['metadata'].get('annotations', {})
for k, v in six.iteritems(pod.annotations):
req['metadata']['annotations'][k] = v
@staticmethod
def extract_affinity(pod, req):
req['spec']['affinity'] = req['spec'].get('affinity', {})
for k, v in six.iteritems(pod.affinity):
req['spec']['affinity'][k] = v
@staticmethod
def extract_cmds(pod, req):
req['spec']['containers'][0]['command'] = pod.cmds
@staticmethod
def extract_args(pod, req):
req['spec']['containers'][0]['args'] = pod.args
@staticmethod
def extract_node_selector(pod, req):
if len(pod.node_selectors) > 0:
req['spec']['nodeSelector'] = pod.node_selectors
@staticmethod
def attach_volumes(pod, req):
req['spec']['volumes'] = (
req['spec'].get('volumes', []))
if len(pod.volumes) > 0:
req['spec']['volumes'].extend(pod.volumes)
@staticmethod
def attach_volume_mounts(pod, req):
if len(pod.volume_mounts) > 0:
req['spec']['containers'][0]['volumeMounts'] = (
req['spec']['containers'][0].get('volumeMounts', []))
req['spec']['containers'][0]['volumeMounts'].extend(pod.volume_mounts)
@staticmethod
def extract_name(pod, req):
req['metadata']['name'] = pod.name
@staticmethod
def extract_volume_secrets(pod, req):
vol_secrets = [s for s in pod.secrets if s.deploy_type == 'volume']
if any(vol_secrets):
req['spec']['containers'][0]['volumeMounts'] = (
req['spec']['containers'][0].get('volumeMounts', []))
req['spec']['volumes'] = (
req['spec'].get('volumes', []))
for idx, vol in enumerate(vol_secrets):
vol_id = 'secretvol' + str(idx)
req['spec']['containers'][0]['volumeMounts'].append({
'mountPath': vol.deploy_target,
'name': vol_id,
'readOnly': True
})
req['spec']['volumes'].append({
'name': vol_id,
'secret': {
'secretName': vol.secret
}
})
@staticmethod
def extract_env_and_secrets(pod, req):
env_secrets = [s for s in pod.secrets if s.deploy_type == 'env']
if len(pod.envs) > 0 or len(env_secrets) > 0:
env = []
for k in pod.envs.keys():
env.append({'name': k, 'value': pod.envs[k]})
for secret in env_secrets:
KubernetesRequestFactory.add_secret_to_env(env, secret)
req['spec']['containers'][0]['env'] = env
@staticmethod
def extract_resources(pod, req):
if not pod.resources or pod.resources.is_empty_resource_request():
return
req['spec']['containers'][0]['resources'] = {}
if pod.resources.has_requests():
req['spec']['containers'][0]['resources']['requests'] = {}
if pod.resources.request_memory:
req['spec']['containers'][0]['resources']['requests'][
'memory'] = pod.resources.request_memory
if pod.resources.request_cpu:
req['spec']['containers'][0]['resources']['requests'][
'cpu'] = pod.resources.request_cpu
if pod.resources.has_limits():
req['spec']['containers'][0]['resources']['limits'] = {}
if pod.resources.request_memory:
req['spec']['containers'][0]['resources']['limits'][
'memory'] = pod.resources.limit_memory
if pod.resources.request_cpu:
req['spec']['containers'][0]['resources']['limits'][
'cpu'] = pod.resources.limit_cpu
@staticmethod
def extract_init_containers(pod, req):
if pod.init_containers:
req['spec']['initContainers'] = pod.init_containers
@staticmethod
def extract_service_account_name(pod, req):
if pod.service_account_name:
req['spec']['serviceAccountName'] = pod.service_account_name
@staticmethod
def extract_image_pull_secrets(pod, req):
if pod.image_pull_secrets:
req['spec']['imagePullSecrets'] = [{
'name': pull_secret
} for pull_secret in pod.image_pull_secrets.split(',')]
|
huggingface/transformers
|
src/transformers/utils/dummy_sentencepiece_and_speech_objects.py
|
Python
|
apache-2.0
| 347
| 0
|
# This file is autogenerated by the command `make fix-
|
copies`, do not edit.
# flake8: noqa
from ..file_utils import DummyObject, requires_backends
class Speech2TextProcessor(metaclass=DummyObject):
_backends = ["sentencepiece", "speech"]
def __init__(self, *args, **kwargs):
|
requires_backends(self, ["sentencepiece", "speech"])
|
EmanueleCannizzaro/scons
|
bin/scons_dev_master.py
|
Python
|
mit
| 5,698
| 0.003335
|
#!/bin/sh
#
# A script for turning a generic Ubuntu system into a master for
# SCons development.
import getopt
import sys
from Command import CommandRunner, Usage
INITIAL_PACKAGES = [
'subversion',
]
INSTALL_PACKAGES = [
'wget',
]
PYTHON_PACKAGES = [
'g++',
'gcc',
'make',
'zlib1g-dev',
]
BUILDING_PACKAGES = [
'python-libxml2',
'python-libxslt1',
'fop',
'python-dev',
'python-epydoc',
'rpm',
'tar',
# additional packages that Bill Deegan's web page suggests
#'docbook-to-man',
#'docbook-xsl',
#'docbook2x',
#'tetex-bin',
#'tetex-latex',
# for ubuntu 9.10
# 'texlive-lang-french'
]
DOCUMENTATION_PACKAGES = [
'docbook-doc',
'epydoc-doc',
'gcc-doc',
'pkg-config',
'python-doc',
'sun-java5-doc',
'sun-java6-doc',
'swig-doc',
'texlive-doc',
]
TESTING_PACKAGES = [
'bison',
'cssc',
'cvs',
'flex',
'g++',
'gcc',
'gcj',
'ghostscript',
# 'libgcj7-dev',
'm4',
'openssh-client',
'openssh-server',
'python-profiler',
'python-all-dev',
'rcs',
'rpm',
# 'sun-java5-jdk',
'sun-java6-jdk',
'swig',
'texlive-base-bin',
'texlive-extra-utils',
'texlive-latex-base',
'texlive-latex-extra',
'zip',
]
BUILDBOT_PACKAGES = [
'buildbot',
'cron',
]
default_args = [
'upgrade',
'checkout',
'building',
'testing',
'python-versions',
'scons-versions',
]
def main(argv=None):
if argv is None:
argv = sys.argv
short_options = 'hnqy'
long_options = ['help', 'no-exec', 'password=', 'quiet', 'username=',
'yes', 'assume-yes']
helpstr = """\
Usage: scons_dev_master.py [-hnqy] [--password PASSWORD] [--username USER]
[ACTIONS ...]
ACTIONS (in default order):
upgrade Upgrade the system
checkout Check out SCons
building Install packages for building SCons
testing Install packages for testing SCons
scons-versions Install versions of SCons
python-versions Install versions of Python
ACTIONS (optional):
buildbot Install packages for running BuildBot
"""
scons_url = 'http://scons.tigris.org/svn/scons/trunk'
sudo = 'sudo'
password = '""'
username = 'guest'
yesflag = ''
try:
try:
opts, args = getopt.getopt(argv[1:], short_options, long_options)
except getopt.error, msg:
raise Usage(msg)
for o, a in opts:
if o in ('-h', '--help'):
print helpstr
sys.exit(0)
elif o in ('-n', '--no-exec'):
CommandRunner.execute = CommandRunner.do_not_execute
elif o in ('--password'):
password = a
elif o in ('-q', '--quiet'):
CommandRunner.display = CommandRunner.do_not_display
elif o in ('--username'):
username = a
elif o in ('-y', '--yes', '--assume-yes'):
yesflag = o
except Usage, err:
sys.stderr.write(str(err.msg) + '\n')
sys.stderr.write('use -h to get help\n')
return 2
if not args:
args = default_args
initial_packages = ' '.join(INITIAL_PACKAGES)
install_packages = ' '.join(INSTALL_PACKAGES)
building_packages = ' '.join(BUILDING_PACKAGES)
testing_packages = ' '.join(TESTING_PACKAGES)
buildbot_packages = ' '.join(BUILDBOT_PACKAGES)
python_packages = ' '.join(PYTHON_PACKAGES)
cmd = CommandRunner(locals())
for arg in args:
if arg == 'upgrade':
cmd.run('%(sudo)s apt-get %(yesflag)s upgrade')
elif arg == 'checkout':
cmd.run('%(sudo)s apt-get %(yesflag)s install %(initial_packages)s')
cmd.run('svn co --username guest --password "" %(scons_url)s')
elif arg == 'building':
cmd.run('%(sudo)s apt-get %(yesflag)s install %(building_packages)s')
elif arg == 'testing':
|
cmd.run('%(sudo)s apt-get %(yesflag)s install %(testing_packages)s')
elif arg == 'buildbot':
cmd.run('%(sudo)s apt-get %(yesflag)s install %(buildbot_packages)s')
elif arg == 'python-versions':
if install_packages:
|
cmd.run('%(sudo)s apt-get %(yesflag)s install %(install_packages)s')
install_packages = None
cmd.run('%(sudo)s apt-get %(yesflag)s install %(python_packages)s')
try:
import install_python
except ImportError:
msg = 'Could not import install_python; skipping python-versions.\n'
sys.stderr.write(msg)
else:
install_python.main(['install_python.py', '-a'])
elif arg == 'scons-versions':
if install_packages:
cmd.run('%(sudo)s apt-get %(yesflag)s install %(install_packages)s')
install_packages = None
try:
import install_scons
except ImportError:
msg = 'Could not import install_scons; skipping scons-versions.\n'
sys.stderr.write(msg)
else:
install_scons.main(['install_scons.py', '-a'])
else:
msg = '%s: unknown argument %s\n'
sys.stderr.write(msg % (argv[0], repr(arg)))
sys.exit(1)
if __name__ == "__main__":
sys.exit(main())
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
hansenhahn/playton-2
|
Programas/tl_overlay.py
|
Python
|
cc0-1.0
| 1,277
| 0.052467
|
#!/usr/bin/
|
env python
# -*- coding: windows-1252 -*-
'''
Created on 17/04/2018
@author: diego.hahn
'''
import time
import re
import glob
import os.path
import struct
import array
import sys
import mmap
if __name__ == '__main__':
import argparse
os.chdir( sys.path[0] )
parser = argparse.Argument
|
Parser()
parser.add_argument( '-s0', dest = "src0", type = str, nargs = "?", required = True )
parser.add_argument( '-s1', dest = "src1", type = str, nargs = "?", required = True )
parser.add_argument( '-n', dest = "num", type = int , required = True )
args = parser.parse_args()
print "Updating overlay for file number {0}".format( args.num )
with open( args.src1 , "rb" ) as fd:
fd.seek( 0, 2 )
size0 = fd.tell() # Tamanho comprimido
fd.seek( -8, 1 )
header3, header1 = struct.unpack('<LL', fd.read(8))
header3 = header3 & 0x00FFFFFF
size1 = header1 + header3 # Tamanho descomprimido
with open( args.src0 , "r+b" ) as fd:
fd.seek( args.num * 0x20 + 8 )
fd.write( struct.pack( "<L", size1 ) )
fd.seek( args.num * 0x20 + 28 )
fd.write( struct.pack( "<L", size0 | 0x01000000 ) )
|
ingadhoc/website
|
l10n_ar_website_sale_ux/__manifest__.py
|
Python
|
agpl-3.0
| 1,366
| 0
|
##############################################################################
#
# Copyright (C) 2015 ADHOC SA (http://www.adhoc.com.ar)
# All Rights Reserved.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed i
|
n the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have rece
|
ived a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'l10n_ar Website Sale UX',
'category': 'base.module_category_knowledge_management',
'version': '13.0.1.1.0',
'author': 'ADHOC SA',
'website': 'www.adhoc.com.ar',
'license': 'AGPL-3',
'depends': [
'product',
'website_sale',
],
'data': [
'views/l10n_ar_website_sale_ux.xml',
'views/l10n_ar_website_sale_hide_taxes.xml'
],
'installable': False,
}
|
tkcroat/Augerquant
|
Development/Auger_integquant_functions_11Nov16backup.py
|
Python
|
mit
| 64,350
| 0.024149
|
# -*- coding: utf-8 -*-
"""
Created on Wed May 11 08:08:52 2016
@author: tkc
"""
import re
from collections import defaultdict
import pandas as pd
import numpy as np
import scipy
import scipy.stats
from scipy import optimize
from math import factorial # used by Savgol matrix
from scipy.optimize import curve_fit
#%%
def organizecolumns(df1,mycols):
''' Pass df and template (list of desired columns in desired order) and return reorganized newdf
'''
cols1=df1.columns.tolist()
newdf=df1 # avoids modification of passed df
uniquelist=[i for i in cols1 if i not in mycols]
for i,colname in enumerate(uniquelist): # remove cols from df1 that are absent from df2
# newdf.drop(colname, axis=1, inplace=True) # this modifies both passed and returned dfs
newdf=newdf.drop(colname, axis=1)
newdf=newdf[mycols] # reorder columns based on template df
return newdf
def parseelemlist(elemlist):
'''Find and separate multielement peaks to be averaged (e.g. Fe2 & Fe) from longer string of element peaks
e.g. splits "Mg Fe Fe2 Si" into "Mg Si" and "{Fe,[Fe,Fe2]} dictionary'''
# Strip numbers from strings within list
newlist=[re.match('\D+',i).group(0) for i in elemlist]
# find duplicated peaks (multiple peaks per element)
Multielem = defaultdict(list)
for i, item in enumerate(newlist):
Multielem[item].append(i)
Multielem = {k:v for k,v in Multielem.items() if len(v)>1} # dictionary with duplicated item and list with indices
duplist=list(Multielem.values()) # get list
duplist=[item for sublist in duplist for item in sublist] # single list with positions of duplicated elements
# now alter multipeak elements list to give dict with element and then list of peak for that element
for key,value in Multielem.items():
templist=value # dictionary value is list of elem peak index positions
peaklist=[]
for i, index in enumerate(templist): # create new list with original elem peak from index positions
peaklist.append(elemlist[index])
# now replace list of index positions with elempeak names
Multielem.update({key:peaklist}) # key will be multipeak element string i.e. "Fe"
# finally construct new single elements list with multipeak ones removed (handle each separately)
newelemlist=[]
for i in range(0,len(elemlist)):
if i not in duplist:
newelemlist.append(elemlist[i])
return newelemlist, Multielem
def parseelem2(elemlist, Multielem):
''' After multielement peaks removed, also move secondary peaks used as primary to dict (handle separately)
e.g. splits "S Mg Fe2 Si" into "S Mg Si" and "{Fe,[Fe2]} dictionary; same structure and df output
for averaging of Fe, Fe2, or straight Fe2 or straight Fe'''
# starting elemlist will only have single entries (i.e Ti2 but not Ti & Ti2)
newelemlist=[]
for i, elem in enumerate(elemlist):
if re.search(r'\d',elem): # has number
match=re.search(r'\d',elem)
newkey=elem[0:match.start()]
# store alt quant (i.e. on Ti2) with same structure as multiple quant (Ti & Ti2)
# Another entry in multielement list... makes things easier for later quant comparisons
templist=[] # peakIDs added as list (of length 1)
templist.append(elem) # list containing single string (keeps identical data structure)
Multielem.update({newkey:templist}) # add to existing dictionary for separate handling
else:
newelemlist.append(elemlist[i]) # just copy over
return newelemlist, Multielem # return altered element list and multielem dictionary
def getelemthresholds(elemlist, AESquantparams):
'''get element-dependent significance thresholds for each peak from AESquantparams
return dictionary with element and associated significance level'''
thresholds={} # returns list of element dependent thresholds for this elem
|
ent set
for i, elem in enumerate(elemlist):
# find row in AESquantparams for this element
thiselemdata=AESquantparams[(AESquantparams['element']==elem)]
thiselemdata=thiselemdata.squeeze() # series with this elements params
thresholds.update({elem:thiselemdata.siglevel})
return thresholds
def cloneparamro
|
ws(df):
''' Make param log entry for for each areanum - used by calccomposition to correctly process spe files with multiple spatial areas
passed df is usually list of spe files
this solves problem that AugerParamLog only has one entry (despite possibly having multiple distinct areas with different spectra'''
df['Areanumber']=1 # set existing entries as area 1
mycols=df.dtypes.index
newrows=pd.DataFrame(columns=mycols) # blank df for new entries
for index, row in df.iterrows():
numareas=int(df.loc[index]['Areas'])
for i in range(2,numareas+1):
newrow=df.loc[index] # clone this row as series
newrow=newrow.set_value('Areanumber',i)
newrows=newrows.append(newrow)
df=pd.concat([df,newrows], ignore_index=True) # merge new rows with existing ones
df=df.sort_values(['Filenumber','Areanumber'])
return df
def calccomp(df, Integquantlog, elemlist, AESquantparams):
'''Calculate elemental composition of given files based on input element list
threshold - ratio of element peak to noise peak (0 means no threshold applied
load element-dependent significance level from AESquantparams'''
thresholds=getelemthresholds(elemlist, AESquantparams) # Get list of sigma levels for significance/inclusion
# thresholds for both single and multipeak
elemlist, multipeaklist = parseelemlist(elemlist) # list of single peak elements and dict with multipeaks
# check if any of the single peaks are secondary (i.e. quant on Fe2 not main Fe)
elemlist, multipeaklist= parseelem2(elemlist, multipeaklist)
# two element lists needed (elements with one peak and elements with compositions averaged from two peaks i.e. Fe2, Fe3)
# to process compositions from multiple areas, clone rows from spe log (one for each areanum)
df=cloneparamrows(df) # splits single entry for 5 spatial area spe into 5 rows with Areanumber 1-5
df=df.reset_index(drop=True)
df['AESbasis']=0.0 # resets to zero if already present from calcamplitude
mycols=['Filenumber', 'Project', 'Filename', 'FilePath', 'Sample', 'Comments','AESbasis','Areanumber']
for i, elem in enumerate(elemlist): # add columns for basis
df[elem]=0.0 # add col for each element to spelist
df['sig'+elem]=0.0 # copy peak significance (ratio of integrated counts over 1 sigma of background)
df['err'+elem]=0.0 # another for total error in adjusted counts basis
mycols.append(elem)
mycols.append('sig'+elem)
mycols.append('err'+elem)
for i,elem in enumerate(list(multipeaklist.keys())): # get elements (keys) from dict
df[elem]=0.0
df['sig'+elem]=0.0
df['err'+elem]=0.0
mycols.append(elem)
mycols.append('sig'+elem)
mycols.append('err'+elem)
for i, elem in enumerate(elemlist): # now add at.% columns (e.g. %S, %Mg)
colname='%'+elem # at % columns named %S, %Mg, etc.
errname='err%'+elem
mycols.append(colname) # add to column list template
mycols.append(errname)
df[colname]=0.0
df[errname]=0.0
for i,elem in enumerate(list(multipeaklist.keys())): # add multipeak elements
colname='%'+elem # at % columns named %S, %Mg, etc.
errname='err%'+elem
mycols.append(colname) # add to column list template
mycols.append(errname)
df[colname]=0.0
df[errname]=0.0
for i in range(0,len(df)): # loop through all desired spectrum (multiarea ones already have duplicated rows)
filenum=df.iloc[i]['Filenumber']
areanum=df.iloc[i]['Arean
|
rayosborn/pycal
|
scripts/SendReminder.py
|
Python
|
lgpl-3.0
| 2,414
| 0.004143
|
#!/usr/bin/env python
# PyCal - Python web calend
|
ar
#
# Copyright (C) 2004 Ray Osborn
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as publi
|
shed by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
# $Id: SendReminder.py,v 1.1.1.1 2004/03/10 15:09:20 osborn Exp $
#
"""
CGI script to send an email reminder of user and password.
"""
from pycal.PyCal import *
from pycal.Editor import Editor
from pycal.GetModule import GetEditors, GetSupervisors
from pycal.PrintModule import AdminPage, LoginPage, ErrorPage
from pycal.CGImodule import CGIlogin, CGIgetForm, SendEmail
import pycal.HTML as HTML
def main():
try:
form = CGIgetForm()
user = CGIlogin(form)
if form.has_key("user"):
username = form["user"]
else:
raise CalendarError, "No username specified"
if username in GetEditors():
e = Editor(username)
if not e.email:
raise CalendarError, "Email address not specified"
mailto = e.email
subject = "%s Calendar Editor Infomation" \
% calendarAbbr
text="""\
%s has been registered as a %s Calendar Editor.
Username: %s Password: %s
To login to the %s Calendar, go to <%s/login.html>
and use the assigned username and password.
""" % (e.name, calendarAbbr, e.user, e.password, calendarAbbr, webURL)
SendEmail(mailto, subject, text)
message = "Requested information has been sent to %s" \
% HTML.Anchor(mailto, scheme="mailto:")
if user == "admin" or user in GetSupervisors():
print AdminPage(message)
else:
print LoginPage(message)
except CalendarError, errorText:
print ErrorPage(errorText)
if __name__ == "__main__":
main()
|
unnikrishnankgs/va
|
venv/lib/python3.5/site-packages/IPython/core/shellapp.py
|
Python
|
bsd-2-clause
| 15,915
| 0.005278
|
# encoding: utf-8
"""
A mixin for :class:`~IPython.core.application.Application` classes that
launch InteractiveShell instances, load extensions, etc.
"""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
import glob
import os
import sys
from traitlets.config.application import boolean_flag
from traitlets.config.configurable import Configurable
from traitlets.config.loader import Config
from IPython.core import pylabtools
from IPython.utils.contexts import preserve_keys
from IPython.utils.path import filefind
from traitlets import (
Unicode, Instance, List, Bool, CaselessStrEnum, observe,
)
from IPython.terminal import pt_inputhooks
#-----------------------------------------------------------------------------
# Aliases and Flags
#-----------------------------------------------------------------------------
gui_keys = tuple(sorted(pt_inputhooks.backends) + sorted(pt_inputhooks.aliases))
backend_keys = sorted(pylabtools.backends.keys())
backend_keys.insert(0, 'auto')
shell_flags = {}
addflag = lambda *args: shell_flags.update(boolean_flag(*args))
addflag('autoindent', 'InteractiveShell.autoindent',
'Turn on autoindenting.', 'Turn off autoindenting.'
)
addflag('automagic', 'InteractiveShell.automagic',
"""Turn on the auto calling of magic commands. Type %%magic at the
IPython prompt for more information.""",
'Turn off the auto calling of magic commands.'
)
addflag('pdb', 'InteractiveShell.pdb',
"Enable auto calling the pdb debugger after every exception.",
"Disable auto calling the pdb debugger after every exception."
)
addflag('pprint', 'PlainTextFormatter.pprint',
"Enable auto pretty printing of results.",
"Disable auto pretty printing of results."
)
addflag('color-info', 'InteractiveShell.color_info',
"""IPython can display information about objects via a set of functions,
and optionally can use colors for this, syntax highlighting
source code and various other elements. This is on by default, but can cause
problems with some pagers. If you see such problems, you can disable the
colours.""",
"Disable using colors for info related things."
)
nosep_config = Config()
nosep_config.InteractiveShell.separate_in = ''
nosep_config.InteractiveShell.separate_out = ''
nosep_config.InteractiveShell.separate_out2 = ''
shell_flags['nosep']=(nosep_config, "Eliminate all spacing between prompts.")
shell_flags['pylab'] = (
{'InteractiveShellApp' : {'pylab' : 'auto'}},
"""Pre-load matplotlib and numpy for interactive use with
the default matplotlib backend."""
)
shell_flags['matplotlib'] = (
{'InteractiveShellApp' : {'matplotlib' : 'auto'}},
"""Configure matplotlib for interactive use with
the default matplotlib backend."""
)
# it's possible we don't want short aliases for *all* of these:
shell_aliases = dict(
autocall='InteractiveShell.autocall',
colors='InteractiveShell.colors',
logfile='InteractiveShell.logfile',
logappend='InteractiveShell.logappend',
c='InteractiveShellApp.code_to_run',
m='InteractiveShellApp.module_to_run',
ext='InteractiveShellApp.extra_extension',
gui='InteractiveShellApp.gui',
pylab='InteractiveShellApp.pylab',
matplotlib='InteractiveShellApp.matplotlib',
)
shell_aliases['cache-size'] = 'InteractiveShell.cache_size'
#-----------------------------------------------------------------------------
# Main classes and functions
#-----------------------------------------------------------------------------
class InteractiveShellApp(Configurable):
"""A Mixin for applications that start InteractiveShell instances.
Provides configurables for loading extensions and executing files
as part of configuring a Shell environment.
The following methods should be called by the :meth:`initialize` method
of the subclass:
- :meth:`init_path`
- :meth:`init_shell` (to be implemented by the subclass)
- :meth:`init_gui_pylab`
- :meth:`init_extensions`
- :meth:`init_code`
"""
extensions = List(Unicode(),
help="A list of dotted module names of IPython extensions to load."
).tag(config=True)
extra_extension = Unicode('',
help="dotted module name of an IPython extension to load."
).tag(config=True)
reraise_ipython_extension_failures = Bool(False,
help="Reraise exceptions encountered loading IPython extensions?",
).tag(config=True)
# Extensions that are always loaded (not configurable)
default_extensions = List(Unicode(), [u'storemagic']).tag(config=False)
hide_initial_ns =
|
Bool(True,
help="""Should variables loaded at startup (by startup files, exec_lines, etc.)
be hidden from tools like %who?"""
).tag(config=True)
exec_files = List(Unicode(),
help="""List of files to run at IPython startup."""
).tag(config=True)
exec_PYTHONSTARTUP = Bool(True,
help="""Run the
|
file referenced by the PYTHONSTARTUP environment
variable at IPython startup."""
).tag(config=True)
file_to_run = Unicode('',
help="""A file to be run""").tag(config=True)
exec_lines = List(Unicode(),
help="""lines of code to run at IPython startup."""
).tag(config=True)
code_to_run = Unicode('',
help="Execute the given command string."
).tag(config=True)
module_to_run = Unicode('',
help="Run the module as a script."
).tag(config=True)
gui = CaselessStrEnum(gui_keys, allow_none=True,
help="Enable GUI event loop integration with any of {0}.".format(gui_keys)
).tag(config=True)
matplotlib = CaselessStrEnum(backend_keys, allow_none=True,
help="""Configure matplotlib for interactive use with
the default matplotlib backend."""
).tag(config=True)
pylab = CaselessStrEnum(backend_keys, allow_none=True,
help="""Pre-load matplotlib and numpy for interactive use,
selecting a particular matplotlib backend and loop integration.
"""
).tag(config=True)
pylab_import_all = Bool(True,
help="""If true, IPython will populate the user namespace with numpy, pylab, etc.
and an ``import *`` is done from numpy and pylab, when using pylab mode.
When False, pylab mode should not import any names into the user namespace.
"""
).tag(config=True)
shell = Instance('IPython.core.interactiveshell.InteractiveShellABC',
allow_none=True)
# whether interact-loop should start
interact = Bool(True)
user_ns = Instance(dict, args=None, allow_none=True)
@observe('user_ns')
def _user_ns_changed(self, change):
if self.shell is not None:
self.shell.user_ns = change['new']
self.shell.init_user_ns()
def init_path(self):
"""Add current working directory, '', to sys.path"""
if sys.path[0] != '':
sys.path.insert(0, '')
def init_shell(self):
raise NotImplementedError("Override in subclasses")
def init_gui_pylab(self):
"""Enable GUI event loop integration, taking pylab into account."""
enable = False
shell = self.shell
if self.pylab:
enable = lambda key: shell.enable_pylab(key, import_all=self.pylab_import_all)
key = self.pylab
elif self.matplotlib:
enable = shell.enable_matplotlib
key = self.matplotlib
elif self.gui:
enable = shell.enable_gui
key = self.gui
if not enable:
return
try:
r = enable(key)
except ImportError:
self.log.warning("Eventloop or matplotlib integration failed. Is matplotlib installed?")
self.shell.showtraceback()
return
except Exception:
self.log.warning("GUI event loop or pylab initialization failed")
self.shell.showtraceback()
return
if isinstance(r, tuple):
gui, backend = r[:2]
self.log.info("Enabling GUI event loop integration, "
"eventloop=
|
eventable/vobject
|
docs/build/lib/vobject/change_tz.py
|
Python
|
apache-2.0
| 3,148
| 0.003812
|
"""Translate an ics file's events to a different timezone."""
from optparse import OptionParser
from vobject import icalendar, base
try:
import PyICU
except:
PyICU = None
from datetime import datetime
def change_tz(cal, new_timezone, default, utc_only=False, utc_tz=icalendar.utc):
"""
Change the timezone of the specified component.
Args:
cal (Component): the component to change
new_timezone (tzinfo): the timezone to change to
default (tzinfo): a timezone to assume if the dtstart or dtend in cal
doesn't have an existing timezone
utc_only (bool): only convert dates that are in utc
utc_tz (tzinfo): the tzinfo to compare to for UTC when processing
utc_only=True
"""
for vevent in getattr(cal, 'vevent_list', []):
start = getattr(vevent, 'dtstart', None)
end = getattr(vevent, 'dtend', None)
for node in (start, end):
if node:
dt = node.value
if (isinstance(dt, datetime) and
(not utc_only or dt.tzinfo == utc_tz)):
if dt.tzinfo is None:
dt = dt.replace(tzinfo = default)
node.value = dt.astimezone(new_timezone)
def main():
options, args = get_options()
if PyICU is
|
None:
print("Failure. change_tz requires PyICU, exiting")
elif options.list:
for tz_string in PyICU.TimeZone.createEnumeration():
print(tz_string)
elif ar
|
gs:
utc_only = options.utc
if utc_only:
which = "only UTC"
else:
which = "all"
print("Converting {0!s} events".format(which))
ics_file = args[0]
if len(args) > 1:
timezone = PyICU.ICUtzinfo.getInstance(args[1])
else:
timezone = PyICU.ICUtzinfo.default
print("... Reading {0!s}".format(ics_file))
cal = base.readOne(open(ics_file))
change_tz(cal, timezone, PyICU.ICUtzinfo.default, utc_only)
out_name = ics_file + '.converted'
print("... Writing {0!s}".format(out_name))
out = file(out_name, 'wb')
cal.serialize(out)
print("Done")
version = "0.1"
def get_options():
# Configuration options
usage = """usage: %prog [options] ics_file [timezone]"""
parser = OptionParser(usage=usage, version=version)
parser.set_description("change_tz will convert the timezones in an ics file. ")
parser.add_option("-u", "--only-utc", dest="utc", action="store_true",
default=False, help="Only change UTC events.")
parser.add_option("-l", "--list", dest="list", action="store_true",
default=False, help="List available timezones")
(cmdline_options, args) = parser.parse_args()
if not args and not cmdline_options.list:
print("error: too few arguments given")
print
print(parser.format_help())
return False, False
return cmdline_options, args
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
print("Aborted")
|
peraktong/AnniesLasso
|
sandbox-scripts/rf_start.py
|
Python
|
mit
| 1,538
| 0.004551
|
import os
import numpy as np
from astropy.table import Table
import AnniesLasso as tc
a = tc.load_model("gridsearch-2.0-3.0.model", threads=8)
# Load the d
|
ata.
PATH, CATALOG, FILE_FORMAT = ("", "apogee-rg.fits",
"apogee-rg-custom-normalization-{}.memmap")
labelled_set = Table.read(os.path.join(PATH, CATALOG))
dispersion = np.memmap(os.path.join(PATH, FILE_FORMAT).format("dispersion"),
mode="r", dtype=float)
normalized_flux = np.memmap(
os.path.join(PATH, FILE_FORMAT).format("flux"),
mode="c", dtype=float).reshape
|
((len(labelled_set), -1))
normalized_ivar = np.memmap(
os.path.join(PATH, FILE_FORMAT).format("ivar"),
mode="c", dtype=float).reshape(normalized_flux.shape)
# Split up the data into ten random subsets.
np.random.seed(123) # For reproducibility.
q = np.random.randint(0, 10, len(labelled_set)) % 10
validate_set = (q == 0)
train_set = (~validate_set)
a._dispersion = dispersion
a._labelled_set = labelled_set[train_set]
a._normalized_flux = normalized_flux[train_set]
a._normalized_ivar = normalized_ivar[train_set]
a._set_s2_by_hogg_heuristic()
a.train()
a.save("gridsearch-2.0-3.0-s2-heuristically-set.model", overwrite=True)
raise a
b = tc.L1RegularizedCannonModel(labelled_set[validate_set],
normalized_flux[validate_set], normalized_ivar[validate_set], dispersion,
threads=4)
b.regularization = 1000.0
b.vectorizer = tc.vectorizer.NormalizedPolynomialVectorizer(
labelled_set[validate_set],
tc.vectorizer.polynomial.terminator(["TEFF", "LOGG", "FE_H"], 2))
|
itdxer/neupy
|
examples/competitive/sofm_compare_weight_init.py
|
Python
|
mit
| 1,754
| 0
|
from itertools import product
import matplotlib.pyplot as plt
from neupy import algorithms, utils, init
from utils import plot_2d_grid, make_circle, make_elipse, make_square
plt.style.use('ggplot')
utils.reproducible()
if __name__ == '__main__':
GRID_WIDTH = 4
GRID_HEIGHT = 4
datasets = [
make_square(),
make_circle(),
make_elipse(corr=0.7),
]
configurations = [{
'weight_init': init.Uniform(0, 1),
'title': 'Random uniform initialization',
}, {
'weight_init': 'sample_from_data',
'title': 'Sampled from the data',
}, {
'weight_init': 'init_pca',
'title': 'Initialize with PCA',
}]
plt.figure(figsize=(15, 15))
plt.title("Compare weight initialization methods for SOFM")
red, blue = ('#E24A33', '#348ABD')
n_columns = len(configurations)
n_rows = len(datasets)
index = 1
for data, conf in product(datasets, configurations):
sofm = algorithms.SOFM(
n_inputs=2,
features_grid=(GRID_HEIGHT, GRID_WIDTH),
verbose=True,
shuffle_data=True,
weight=conf['weight_init'],
learning_radius=8,
reduce_radius_after=5,
std=2,
reduce_std_after=5,
step=0.3,
reduce_step_after=5,
)
if not sofm.init
|
ialized:
sofm.init_weights(data)
plt.subplot(n_rows, n_columns, index)
plt.title(conf[
|
'title'])
plt.scatter(*data.T, color=blue, alpha=0.05)
plt.scatter(*sofm.weight, color=red)
weights = sofm.weight.reshape((2, GRID_HEIGHT, GRID_WIDTH))
plot_2d_grid(weights, color=red)
index += 1
plt.show()
|
caot/intellij-community
|
python/testData/refactoring/extractmethod/Statement.after.py
|
Python
|
apache-2.0
| 98
| 0.010204
|
def f():
|
a = 1
|
b = 1
foo(a, b)
def foo(a_new, b_new):
print(a_new + b_new * 123)
|
atdsaa/django-pgcrypto-fields
|
pgcrypto/admin.py
|
Python
|
bsd-2-clause
| 209
| 0
|
class PGPAdmin(object):
def get_queryset(self, request):
"""Skip any auto decryp
|
tion when ORM calls are from the admin."""
return self.model.objects
|
.get_queryset(**{'skip_decrypt': True})
|
amiraliakbari/sharif-mabani-python
|
by-session/ta-921/j1/turtle7.py
|
Python
|
mit
| 73
| 0
|
x = 2
print "salam!"
for
|
i in range(10):
print x,
x = x * 2
| |
acidjunk/django-scrumboard
|
scrumtools/wsgi.py
|
Python
|
gpl-3.0
| 395
| 0.002532
|
"""
WSGI config for ScrumBoard project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on thi
|
s file, see
https://docs.djangoproject.com/en/1.7/howto/deployment/wsgi/
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "scrumtools.settings")
from django.core.wsgi import get_wsgi_application
application
|
= get_wsgi_application()
|
simontakite/sysadmin
|
pythonscripts/thinkpython/pie.py
|
Python
|
gpl-2.0
| 1,636
| 0.001222
|
"""This module contains code from
Think Python by Allen B. Downey
http://thinkpython.com
Copyright 2012 Allen B. Downey
License: GNU GPLv3 http://www.gnu.org/licenses/gpl
|
.html
"""
import math
try:
# see if Swampy is installed as a package
from swampy.TurtleWorld import *
except ImportError:
# otherwise see if the modules are on the PYTHONPATH
from TurtleWorld import *
def draw_pie(t, n, r):
"""Draws a pie, then moves into position to the right.
t: Turtle
n: number of segments
r: l
|
ength of the radial spokes
"""
polypie(t, n, r)
pu(t)
fd(t, r*2 + 10)
pd(t)
def polypie(t, n, r):
"""Draws a pie divided into radial segments.
t: Turtle
n: number of segments
r: length of the radial spokes
"""
angle = 360.0 / n
for i in range(n):
isosceles(t, r, angle/2)
lt(t, angle)
def isosceles(t, r, angle):
"""Draws an icosceles triangle.
The turtle starts and ends at the peak, facing the middle of the base.
t: Turtle
r: length of the equal legs
angle: peak angle in degrees
"""
y = r * math.sin(angle * math.pi / 180)
rt(t, angle)
fd(t, r)
lt(t, 90+angle)
fd(t, 2*y)
lt(t, 90+angle)
fd(t, r)
lt(t, 180-angle)
# create the world and bob
world = TurtleWorld()
bob = Turtle()
bob.delay = 0
pu(bob)
bk(bob, 130)
pd(bob)
# draw polypies with various number of sides
size = 40
draw_pie(bob, 5, size)
draw_pie(bob, 6, size)
draw_pie(bob, 7, size)
draw_pie(bob, 8, size)
die(bob)
# dump the contents of the campus to the file canvas.eps
world.canvas.dump()
wait_for_user()
|
pablogonzalezalba/a-language-of-ice-and-fire
|
lexer_rules.py
|
Python
|
mit
| 1,138
| 0.011424
|
# -*- cod
|
ing: utf-8 -*-
tokens = [
'LPAREN',
'RPAREN',
'LBRACE',
'RBRACE',
'EQUAL',
'DOUBLE_EQUAL',
'NUMBER',
'COMMA',
'VAR_DEFINITION',
'IF',
'ELSE',
'END'
|
,
'ID',
'PRINT'
]
t_LPAREN = r"\("
t_RPAREN = r"\)"
t_LBRACE = r"\{"
t_RBRACE = r"\}"
t_EQUAL = r"\="
t_DOUBLE_EQUAL = r"\=\="
def t_NUMBER(token):
r"[0-9]+"
token.value = int(token.value)
return token
t_COMMA = r","
def t_VAR_DEFINITION(token):
r",\sFirst\sof\s(his|her)\sName"
return token
def t_IF(token):
r"I\spromise"
return token
def t_ELSE(token):
r"Mayhaps"
return token
def t_PRINT(token):
r"Hodor"
return token
def t_END(token):
r"And\snow\shis\swatch\sis\sended"
return token
def t_ID(token):
r"[a-zA-Z][_a-zA-Z0-9]*"
return token
t_ignore = " \t"
def t_NEWLINE(token):
r"\n+"
token.lexer.lineno += len(token.value)
def t_IGNORE_COMMENTS(token):
r"//(.*)\n"
token.lexer.lineno += 1
def t_error(token):
raise Exception("Sintax error: Unknown token on line {0}. \"{1}\"".format(token.lineno, token.value.partition("\n")[0]))
|
aurelienmaury/galaxie
|
sandbox_mo/zero-ears/proc.py
|
Python
|
gpl-3.0
| 645
| 0.007752
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = 'amaury'
import subprocess
import time
import s
|
ys
class Timeout(Exception):
pass
def run(command, timeout=10):
proc = subprocess.Popen(command, bufsize=0, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
poll_seconds = .250
deadline = time.time()+timeout
while time.time() < deadline and proc.poll() == None:
time.sleep(poll_seconds)
if proc.poll() == None:
if float(sys.version[:3]) >= 2.6:
proc.terminate()
raise Timeout()
st
|
dout, stderr = proc.communicate()
return stdout, stderr, proc.returncode
|
StackPointCloud/profitbricks-sdk-python
|
tests/test_errors.py
|
Python
|
apache-2.0
| 2,455
| 0.000815
|
# Copyright 2015-2017 ProfitBricks GmbH
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KI
|
ND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
|
from profitbricks.client import ProfitBricksService, Datacenter, Volume
from profitbricks.errors import PBError, PBNotAuthorizedError, PBNotFoundError, PBValidationError
from helpers import configuration
from helpers.resources import resource
class TestErrors(unittest.TestCase):
@classmethod
def setUpClass(self):
self.resource = resource()
self.client = ProfitBricksService(
username=configuration.USERNAME,
password=configuration.PASSWORD,
headers=configuration.HEADERS)
self.datacenter = self.client.create_datacenter(
datacenter=Datacenter(**self.resource['datacenter']))
@classmethod
def tearDownClass(self):
self.client.delete_datacenter(datacenter_id=self.datacenter['id'])
def test_pb_not_found(self):
try:
self.client.get_datacenter("fake_id")
except PBError as err:
self.assertTrue(isinstance(err, PBNotFoundError))
def test_pb_unauthorized_error(self):
try:
self.client = ProfitBricksService(
username=configuration.USERNAME + "1",
password=configuration.PASSWORD,
headers=configuration.HEADERS)
self.client.list_datacenters()
except PBError as err:
self.assertTrue(isinstance(err, PBNotAuthorizedError))
def test_pb_validation_error(self):
try:
i = Volume(
name='Explicitly created volume',
size=5,
disk_type='HDD',
image='fake_image_id',
bus='VIRTIO')
self.client.create_volume(datacenter_id=self.datacenter['id'], volume=i)
except PBError as err:
self.assertTrue(isinstance(err, PBValidationError))
if __name__ == '__main__':
unittest.main()
|
MarcoMengoli/marcomengoli.github.io
|
filesForPosts/parserBasket.py
|
Python
|
mit
| 3,836
| 0.011992
|
import re
import urllib.request
import io
from bs4 import BeautifulSoup
base_url = "http://www.basketball-reference.com"
file_name = "players"
separator = "#"
def getPlayers(character)
|
:
url = "{0}/players/{1}".format(base_url, character)
u = urllib.re
|
quest.urlopen(url, data = None)
f = io.TextIOWrapper(u,encoding='utf-8')
dataString = f.read()
#with open("http://www.basketball-reference.com/players/a") as playersFile:
#dataBinary = playersFile.read()
#dataString = str(dataBinary)
namesRegex = r'(?i)<tr[^>]*>\s*<td[^>]*>\s*<a\s+href="([^"]+)"\s*>([^<]+)</a>'
pattern = re.compile(namesRegex)
namesList = []
linkMap = {}
for match in pattern.finditer(dataString):
try:
href = "{0}{1}".format(base_url, match.group(1))
name = match.group(2)
namesList.append(name)
linkMap[name] = href
print("{0} -> {1}".format(name, href))
getPlayerInfo(href, character)
except:
pass
#print("{0} -> {1}".format(namesList[0], linkMap[namesList[0]]))
#getPlayerInfo(linkMap[namesList[0]])
def getPlayerInfo(url, character):
try:
infos = ""
regex_intraTD = r'(?i)<td[^>]*>\s*([^<]*)<\/td>'
pattern = re.compile(regex_intraTD)
u = urllib.request.urlopen(url, data = None)
f = io.TextIOWrapper(u,encoding='utf-8')
dataString = f.read()
soup = BeautifulSoup(dataString, "html.parser")
# READING MAIN INFORMATIONS
info_box = soup.find('div', id='info_box')
main_info = info_box.find_all("div", class_="person_image_offset")
if not main_info:
print("NONE")
main_info = info_box
else:
main_info = main_info[0]
all_main_info_p0 = main_info.find_all("p", class_="margin_top")[0]
infos = "{0}{1}{2}".format(infos, separator, all_main_info_p0.span.string)
all_main_info_p1 = main_info.find_all("p", class_="padding_bottom_half")[0]
cont = 0
for p_text in all_main_info_p1.descendants:
if cont == 2 or cont == 5:
regex_noSymbols = re.compile('[^a-zA-Z]')
p_text = regex_noSymbols.sub('', p_text)
#print("{0} --- {1}".format(cont, p_text))
infos = "{0}{1}{2}".format(infos, separator, p_text)
if cont == 9 or cont == 12:
regex_noSymbols = re.compile('[^0-9\-]')
p_text = regex_noSymbols.sub('', p_text)
#print("{0} --- {1}".format(cont, p_text))
infos = "{0}{1}{2}".format(infos, separator, p_text)
cont = cont + 1
# NOW READING EACH TOTAL FROM THE TABLES
all_tfoot = soup.find_all("tfoot")
for tfoot in all_tfoot:
tr = tfoot.tr #only the first tr
i = 0
for td in tr.descendants:
tdstr = str(td)
if i > 0:
for match in pattern.finditer(tdstr):
value = match.group(1)
infos = "{0}{1}{2}".format(infos, separator, value)
#print("{0} --- {1}".format(i, tdstr))
i = i + 1
full_file_name = "{0}{1}{2}".format(file_name, character, ".txt")
file = open(full_file_name, 'a')
infos = infos.replace('\n', ' ').replace('\r', '') + "\n"
file.write(infos)
file.close()
except:
return 0
if __name__ == "__main__":
for one in range(97,123):
print(str(chr(one)))
getPlayers(str(chr(one)))
#getPlayers("a")
|
gds-attic/transactions-explorer
|
test/filters/test_filters.py
|
Python
|
mit
| 7,300
| 0.000548
|
from hamcrest import assert_that, is_
from lib.filters import number_as_magnitude, number_as_financial_magnitude, join_url_parts, string_as_static_url, digest, number_as_grouped_number, number_as_percentage_change
def test_number_as_magnitude():
assert_that(number_as_magnitude(1.23), is_("1.23"))
assert_that(number_as_magnitude(1.234), is_("1.23"))
assert_that(number_as_magnitude(1.236), is_("1.24"))
assert_that(number_as_magnitude(12.3), is_("12.3"))
assert_that(number_as_magnitude(12.34), is_("12.3"))
assert_that(number_as_magnitude(12.36), is_("12.4"))
assert_that(number_as_magnitude(123), is_("123"))
assert_that(number_as_magnitude(123.4), is_("123"))
assert_that(number_as_magnitude(123.6), is_("124"))
assert_that(number_as_magnitude(1230), is_("1.23k"))
assert_that(number_as_magnitude(1234), is_("1.23k"))
assert_that(number_as_magnitude(1236), is_("1.24k"))
assert_that(number_as_magnitude(12300), is_("12.3k"))
assert_that(number_as_magnitude(12340), is_("12.3k"))
assert_that(number_as_magnitude(12360), is_("12.4k"))
assert_that(number_as_magnitude(123000), is_("123k"))
assert_that(number_as_magnitude(123400), is_("123
|
k"))
assert_that(number_as_magnitude(123600), is_("124k"))
assert_that(number_as_magnitude(1230000), is_("1.23m"))
assert_that(number_as_magnitude(1234000), is_("1.23m"))
assert_that(number_as_magnitude(1236000), is_("1.24m"))
assert_that(number_as_m
|
agnitude(12300000), is_("12.3m"))
assert_that(number_as_magnitude(12340000), is_("12.3m"))
assert_that(number_as_magnitude(12360000), is_("12.4m"))
assert_that(number_as_magnitude(123000000), is_("123m"))
assert_that(number_as_magnitude(123400000), is_("123m"))
assert_that(number_as_magnitude(123600000), is_("124m"))
assert_that(number_as_magnitude(1230000000), is_("1.23bn"))
assert_that(number_as_magnitude(1234000000), is_("1.23bn"))
assert_that(number_as_magnitude(1236000000), is_("1.24bn"))
assert_that(number_as_magnitude(12300000000), is_("12.3bn"))
assert_that(number_as_magnitude(12340000000), is_("12.3bn"))
assert_that(number_as_magnitude(12360000000), is_("12.4bn"))
assert_that(number_as_magnitude(123000000000), is_("123bn"))
assert_that(number_as_magnitude(123400000000), is_("123bn"))
assert_that(number_as_magnitude(123600000000), is_("124bn"))
def test_number_as_financial_magnitude():
assert_that(number_as_financial_magnitude(1.23), is_("1.23"))
assert_that(number_as_financial_magnitude(1.234), is_("1.23"))
assert_that(number_as_financial_magnitude(1.236), is_("1.24"))
assert_that(number_as_financial_magnitude(12.33), is_("12.33"))
assert_that(number_as_financial_magnitude(12.334), is_("12.33"))
assert_that(number_as_financial_magnitude(12.336), is_("12.34"))
assert_that(number_as_financial_magnitude(123), is_("123"))
assert_that(number_as_financial_magnitude(123.4), is_("123"))
assert_that(number_as_financial_magnitude(123.6), is_("124"))
assert_that(number_as_financial_magnitude(1230), is_("1.23k"))
assert_that(number_as_financial_magnitude(1234), is_("1.23k"))
assert_that(number_as_financial_magnitude(1236), is_("1.24k"))
assert_that(number_as_financial_magnitude(12300), is_("12.3k"))
assert_that(number_as_financial_magnitude(12340), is_("12.3k"))
assert_that(number_as_financial_magnitude(12360), is_("12.4k"))
assert_that(number_as_financial_magnitude(123000), is_("123k"))
assert_that(number_as_financial_magnitude(123400), is_("123k"))
assert_that(number_as_financial_magnitude(123600), is_("124k"))
assert_that(number_as_financial_magnitude(1230000), is_("1.23m"))
assert_that(number_as_financial_magnitude(1234000), is_("1.23m"))
assert_that(number_as_financial_magnitude(1236000), is_("1.24m"))
assert_that(number_as_financial_magnitude(12300000), is_("12.3m"))
assert_that(number_as_financial_magnitude(12340000), is_("12.3m"))
assert_that(number_as_financial_magnitude(12360000), is_("12.4m"))
assert_that(number_as_financial_magnitude(123000000), is_("123m"))
assert_that(number_as_financial_magnitude(123400000), is_("123m"))
assert_that(number_as_financial_magnitude(123600000), is_("124m"))
assert_that(number_as_financial_magnitude(1230000000), is_("1.23bn"))
assert_that(number_as_financial_magnitude(1234000000), is_("1.23bn"))
assert_that(number_as_financial_magnitude(1236000000), is_("1.24bn"))
assert_that(number_as_financial_magnitude(12300000000), is_("12.3bn"))
assert_that(number_as_financial_magnitude(12340000000), is_("12.3bn"))
assert_that(number_as_financial_magnitude(12360000000), is_("12.4bn"))
assert_that(number_as_financial_magnitude(123000000000), is_("123bn"))
assert_that(number_as_financial_magnitude(123400000000), is_("123bn"))
assert_that(number_as_financial_magnitude(123600000000), is_("124bn"))
def test_number_as_grouped_number():
assert_that(number_as_grouped_number(123456789), is_("123,456,789"))
assert_that(number_as_grouped_number(123), is_("123"))
assert_that(number_as_grouped_number(4567.22), is_("4,567"))
assert_that(number_as_grouped_number(4567.98), is_("4,568"))
assert_that(number_as_grouped_number("not a number"), is_(""))
def test_number_as_percentage_change():
assert_that(number_as_percentage_change(None), is_("0%"))
assert_that(number_as_percentage_change(1.0), is_("0%"))
assert_that(number_as_percentage_change(1.00001), is_("0%"))
assert_that(number_as_percentage_change(0.999991), is_("0%"))
assert_that(number_as_percentage_change(0.0), is_("-100%"))
assert_that(number_as_percentage_change(2.0), is_("+100%"))
assert_that(number_as_percentage_change(1.1234567), is_("+12.35%"))
assert_that(number_as_percentage_change(0.1234567), is_("-87.65%"))
class Test_join_url_parts(object):
def test_string_as_link(self):
assert_that(
join_url_parts('/', 'some/path'),
is_('/some/path'))
def test_string_as_link_with_user_defined_path_prefix(self):
assert_that(
join_url_parts('/custom/prefix/', 'some/path'),
is_('/custom/prefix/some/path'))
def test_string_as_link_adds_trailing_slash_after_prefix(self):
assert_that(
join_url_parts('/custom/prefix', 'some/path'),
is_('/custom/prefix/some/path'))
def test_string_as_link_does_not_add_double_slashes(self):
assert_that(
join_url_parts('/custom/prefix/', '/some/path'),
is_('/custom/prefix/some/path'))
class Test_string_as_static_url:
def setUp(self):
digest.set_digests({})
def test_return_url_with_digest(self):
digest.set_digests({
'asset.css': 'asset-1425361275412.css'
})
assert_that(
string_as_static_url('asset.css'),
is_('https://assets.digital.cabinet-office.gov.uk/static/asset-1425361275412.css')
)
def test_fallback_to_plain_url_when_digest_is_unknown(self):
assert_that(
string_as_static_url('asset.css'),
is_('https://assets.digital.cabinet-office.gov.uk/static/asset.css')
)
|
6mandati6/6mandati6
|
tt.py
|
Python
|
apache-2.0
| 135
| 0.014815
|
"hey iam there yyyyyyyyyyyyyyyyyyyyyyyyyyyy
|
yyyyyuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuu
|
uuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuyyy"
|
hornn/interviews
|
tools/bin/ext/figleaf/__init__.py
|
Python
|
apache-2.0
| 8,016
| 0.003244
|
"""
figleaf is another tool to trace Python code coverage.
figleaf uses the sys.settrace hook to record which statements are
executed by the CPython interpreter; this record can then be saved
into a file, or otherwise communicated back to a reporting script.
figleaf differs from the gold standard of Python coverage tools
('coverage.py') in several ways. First and foremost, figleaf uses the
same criterion for "interesting" lines of code as the sys.settrace
function, which obviates some of the complexity in coverage.py (but
does mean that your "loc" count goes down). Second, figleaf does not
record code executed in the Python standard library, which results in
a significant speedup. And third, the format in which the coverage
format is saved is very simple and easy to work with.
You might want to use figleaf if you're recording coverage from
multiple types of tests and need to aggregate the coverage in
interesting ways, and/or control when coverage is recorded.
coverage.py is a better choice for command-line execution, and its
reporting is a fair bit nicer.
Command line usage: ::
figleaf <python file to execute> <args to python file>
The figleaf output is saved into the file '.figleaf', which is an
*aggregate* of coverage reports from all figleaf runs from this
directory. '.figleaf' contains a pickled dictionary of sets; the keys
are source code filenames, and the sets contain all line numbers
executed by the Python interpreter. See the docs or command-line
programs in bin/ for more information.
High level AP
|
I: ::
* ``start(ignore_lib=True)`` -- start recording code coverage.
* ``stop()`` -- stop recording code coverage.
* ``get_trace_obj()`` -- return the (singleton) trace object.
* ``get_info()`` -- get the coverage dictionary
Classes & functions worth knowing about (lower level API):
* ``get_lines(fp)`` -- return the set of interesting lines in the fp.
* ``combine_coverage(d1, d2)`` -- combine coverage info from two dicts.
* ``read_coverage(filename)`` -- load the
|
coverage dictionary
* ``write_coverage(filename)`` -- write the coverage out.
* ``annotate_coverage(...)`` -- annotate a Python file with its coverage info.
Known problems:
-- module docstrings are *covered* but not found.
AUTHOR: C. Titus Brown, titus@idyll.org, with contributions from Iain Lowe.
'figleaf' is Copyright (C) 2006, 2007 C. Titus Brown. It is under the
BSD license.
"""
__version__ = "0.6.1"
# __all__ == @CTB
import sys
import os
from cPickle import dump, load
from optparse import OptionParser
import internals
# use builtin sets if in >= 2.4, otherwise use 'sets' module.
try:
set()
except NameError:
from sets import Set as set
def get_lines(fp):
"""
Return the set of interesting lines in the source code read from
this file handle.
"""
# rstrip is a workaround for http://bugs.python.org/issue4262
src = fp.read().rstrip() + "\n"
code = compile(src, "", "exec")
return internals.get_interesting_lines(code)
def combine_coverage(d1, d2):
"""
Given two coverage dictionaries, combine the recorded coverage
and return a new dictionary.
"""
keys = set(d1.keys())
keys.update(set(d2.keys()))
new_d = {}
for k in keys:
v = d1.get(k, set())
v2 = d2.get(k, set())
s = set(v)
s.update(v2)
new_d[k] = s
return new_d
def write_coverage(filename, append=True):
"""
Write the current coverage info out to the given filename. If
'append' is false, destroy any previously recorded coverage info.
"""
if _t is None:
return
data = internals.CoverageData(_t)
d = data.gather_files()
# sum existing coverage?
if append:
old = {}
fp = None
try:
fp = open(filename)
except IOError:
pass
if fp:
old = load(fp)
fp.close()
d = combine_coverage(d, old)
# ok, save.
outfp = open(filename, 'w')
try:
dump(d, outfp)
finally:
outfp.close()
def read_coverage(filename):
"""
Read a coverage dictionary in from the given file.
"""
fp = open(filename)
try:
d = load(fp)
finally:
fp.close()
return d
def dump_pickled_coverage(out_fp):
"""
Dump coverage information in pickled format into the given file handle.
"""
dump(_t, out_fp)
def load_pickled_coverage(in_fp):
"""
Replace (overwrite) coverage information from the given file handle.
"""
global _t
_t = load(in_fp)
def annotate_coverage(in_fp, out_fp, covered, all_lines,
mark_possible_lines=False):
"""
A simple example coverage annotator that outputs text.
"""
for i, line in enumerate(in_fp):
i = i + 1
if i in covered:
symbol = '>'
elif i in all_lines:
symbol = '!'
else:
symbol = ' '
symbol2 = ''
if mark_possible_lines:
symbol2 = ' '
if i in all_lines:
symbol2 = '-'
out_fp.write('%s%s %s' % (symbol, symbol2, line,))
def get_data():
if _t:
return internals.CoverageData(_t)
#######################
#
# singleton functions/top-level API
#
_t = None
def init(exclude_path=None, include_only=None):
from internals import CodeTracer
global _t
if _t is None:
_t = CodeTracer(exclude_path, include_only)
def start(ignore_python_lib=True):
"""
Start tracing code coverage. If 'ignore_python_lib' is True on
initial call, ignore all files that live below the same directory as
the 'os' module.
"""
global _t
if not _t:
exclude_path = None
if ignore_python_lib:
exclude_path = os.path.realpath(os.path.dirname(os.__file__))
init(exclude_path, None)
_t.start()
def start_section(name):
global _t
_t.start_section(name)
def stop_section():
global _t
_t.stop_section()
def stop():
"""
Stop tracing code coverage.
"""
global _t
if _t is not None:
_t.stop()
def get_trace_obj():
"""
Return the (singleton) trace object, if it exists.
"""
return _t
def get_info(section_name=None):
"""
Get the coverage dictionary from the trace object.
"""
if _t:
return get_data().gather_files(section_name)
#############
def display_ast():
l = internals.LineGrabber(open(sys.argv[1]))
l.pretty_print()
print l.lines
def main():
"""
Execute the given Python file with coverage, making it look like it is
__main__.
"""
ignore_pylibs = False
# gather args
n = 1
figleaf_args = []
for n in range(1, len(sys.argv)):
arg = sys.argv[n]
if arg.startswith('-'):
figleaf_args.append(arg)
else:
break
remaining_args = sys.argv[n:]
usage = "usage: %prog [options] [python_script arg1 arg2 ...]"
option_parser = OptionParser(usage=usage)
option_parser.add_option('-i', '--ignore-pylibs', action="store_true",
dest="ignore_pylibs", default=False,
help="ignore Python library modules")
(options, args) = option_parser.parse_args(args=figleaf_args)
assert len(args) == 0
if not remaining_args:
option_parser.error("you must specify a python script to run!")
ignore_pylibs = options.ignore_pylibs
## Reset system args so that the subsequently exec'd file can read
## from sys.argv
sys.argv = remaining_args
sys.path[0] = os.path.dirname(sys.argv[0])
cwd = os.getcwd()
start(ignore_pylibs) # START code coverage
import __main__
try:
execfile(sys.argv[0], __main__.__dict__)
finally:
stop() # STOP code coverage
write_coverage(os.path.join(cwd, '.figleaf'))
|
joshuahoman/vivisect
|
vdb/qt/registers.py
|
Python
|
apache-2.0
| 753
| 0.002656
|
from PyQt4 import QtCore, QtGui
import vtrace.qt
import vdb.qt.base
from vqt.main import *
class VdbRegistersWindow(vdb.qt.base.VdbWidgetWindow):
def __init__(self, db, dbt, parent=None):
vdb.qt.base.VdbWidgetWindow.__init__(self, db, dbt, parent=parent)
s
|
elf.regsWidget = vtrace.qt.RegistersView(trace=dbt, parent=parent)
vbox = QtGui.QVBoxLayout()
vbox.addWidget(self.regsWidget)
self.setLayout(vbox)
self.setWindowTitle('Registers')
vqtconnect(self.vqLoad, 'vdb:setregs')
vqtconnect(self.vqLoad, 'vdb:setthread')
def vqLoad(self):
|
'''
the widgets in RegistersView already register for notifications.
'''
self.regsWidget.reglist.vqLoad()
|
gramps-project/addons-source
|
PythonGramplet/PythonGramplet.gpr.py
|
Python
|
gpl-2.0
| 423
| 0.023641
|
register(GRAMPLET,
id="Python Gram
|
plet",
name=_("Python Shell"),
description = _("Interactive Python interpreter"),
status = STABLE,
fname="PythonGramplet.py",
height=250,
gramplet = '
|
PythonGramplet',
gramplet_title=_("Python Shell"),
version = '1.0.33',
gramps_target_version = "5.1",
help_url="PythonGramplet",
)
|
arunkgupta/gramps
|
gramps/gui/editors/editdate.py
|
Python
|
gpl-2.0
| 13,619
| 0.004846
|
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2002-2006 Donald N. Allingham
# Copyright (C) 2009 Douglas S. Blank
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# $Id$
"""
Date editing module for GRAMPS.
The EditDate provides visual feedback to the user via a pixamp
to indicate if the associated GtkEntry box contains a valid date. Green
means complete and regular date. Yellow means a valid, but not a regular date.
Red means that the date is not valid, and will be viewed as a text string
instead of a date.
The DateEditor provides a dialog in which the date can be
unambiguously built using UI controls such as menus and spin buttons.
"""
#-------------------------------------------------------------------------
#
# Python modules
#
#-------------------------------------------------------------------------
#-------------------------------------------------------------------------
#
# set up logging
#
#-------------------------------------------------------------------------
import logging
__LOG = logging.getLogger(".EditDate")
#-------------------------------------------------------------------------
#
# GNOME modules
#
#-------------------------------------------------------------------------
from gi.repository import Gtk
#-------------------------------------------------------------------------
#
# gramps modules
#
#-------------------------------------------------------------------------
from gramps.gen.ggettext import sgettext as _
from gramps.gen.lib.date import Date
from gramps.gen.datehandler import displayer
from gramps.gen.const import URL_MANUAL_PAGE
from ..display import display_help
from ..managedwindow import ManagedWindow
from ..glade import Glade
#-------------------------------------------------------------------------
#
# Constants
#
#-------------------------------------------------------------------------
MOD_TEXT = (
(Date.MOD_NONE , _('Regular')),
(Date.MOD_BEFORE , _('Before')),
(Date.MOD_AFTER , _('After')),
(Date.MOD_ABOUT , _('About')),
(Date.MOD_RANGE , _('Range')),
(Date.MOD_SPAN , _('Span')),
(Date.MOD_TEXTONLY , _('Text only')) )
QUAL_TEXT = (
(Date.QUAL_NONE, _('Regular')),
(Date.QUAL_ESTIMATED, _('Estimated')),
(Date.QUAL_CALCULATED, _('Calculated')) )
CAL_TO_MONTHS_NAMES = {
Date.CAL_GREGORIAN : displayer.short_months,
Date.CAL_JULIAN : displayer.short_months,
Date.CAL_HEBREW : displayer.hebrew,
Date.CAL_FRENCH : displayer.french,
Date.CAL_PERSIAN : displayer.persian,
Date.CAL_ISLAMIC : displayer.islamic,
Date.CAL_SWEDISH : displayer.swedi
|
sh }
WIKI_HELP_PAGE = '%s_-_Entering_and_Editing_Data:_Detaile
|
d_-_part_1' % URL_MANUAL_PAGE
WIKI_HELP_SEC = _('manual|Editing_Dates')
#-------------------------------------------------------------------------
#
# EditDate
#
#-------------------------------------------------------------------------
class EditDate(ManagedWindow):
"""
Dialog allowing to build the date precisely, to correct possible
limitations of parsing and/or underlying structure of Date.
"""
def __init__(self, date, uistate, track):
"""
Initiate and display the dialog.
"""
ManagedWindow.__init__(self, uistate, track, self)
# Create self.date as a copy of the given Date object.
self.date = Date(date)
self.top = Glade()
self.set_window(
self.top.toplevel,
self.top.get_object('title'),
_('Date selection'))
self.calendar_box = self.top.get_object('calendar_box')
for name in Date.ui_calendar_names:
self.calendar_box.get_model().append([name])
self.calendar_box.set_active(self.date.get_calendar())
self.calendar_box.connect('changed', self.switch_calendar)
self.quality_box = self.top.get_object('quality_box')
for item_number in range(len(QUAL_TEXT)):
self.quality_box.append_text(QUAL_TEXT[item_number][1])
if self.date.get_quality() == QUAL_TEXT[item_number][0]:
self.quality_box.set_active(item_number)
self.type_box = self.top.get_object('type_box')
for item_number in range(len(MOD_TEXT)):
self.type_box.append_text(MOD_TEXT[item_number][1])
if self.date.get_modifier() == MOD_TEXT[item_number][0]:
self.type_box.set_active(item_number)
self.type_box.connect('changed', self.switch_type)
self.start_month_box = self.top.get_object('start_month_box')
self.stop_month_box = self.top.get_object('stop_month_box')
month_names = CAL_TO_MONTHS_NAMES[self.date.get_calendar()]
for name in month_names:
self.start_month_box.append_text(name)
self.stop_month_box.append_text(name)
self.start_month_box.set_active(self.date.get_month())
self.stop_month_box.set_active(self.date.get_stop_month())
self.start_day = self.top.get_object('start_day')
self.start_day.set_value(self.date.get_day())
self.start_year = self.top.get_object('start_year')
self.start_year.set_value(self.date.get_year())
self.stop_day = self.top.get_object('stop_day')
self.stop_day.set_value(self.date.get_stop_day())
self.stop_year = self.top.get_object('stop_year')
self.stop_year.set_value(self.date.get_stop_year())
self.dual_dated = self.top.get_object('dualdated')
self.new_year = self.top.get_object('newyear')
self.new_year.set_text(self.date.newyear_to_str())
# Disable second date controls if not compound date
if not self.date.is_compound():
self.stop_day.set_sensitive(0)
self.stop_month_box.set_sensitive(0)
self.stop_year.set_sensitive(0)
# Disable the rest of controls if a text-only date
if self.date.get_modifier() == Date.MOD_TEXTONLY:
self.start_day.set_sensitive(0)
self.start_month_box.set_sensitive(0)
self.start_year.set_sensitive(0)
self.calendar_box.set_sensitive(0)
self.quality_box.set_sensitive(0)
self.dual_dated.set_sensitive(0)
self.new_year.set_sensitive(0)
self.text_entry = self.top.get_object('date_text_entry')
self.text_entry.set_text(self.date.get_text())
if self.date.get_slash():
self.dual_dated.set_active(1)
self.calendar_box.set_sensitive(0)
self.calendar_box.set_active(Date.CAL_JULIAN)
self.dual_dated.connect('toggled', self.switch_dual_dated)
# The dialog is modal -- since dates don't have names, we don't
# want to have several open dialogs, since then the user will
# loose track of which is which. Much like opening files.
self.return_date = None
self.show()
while True:
response = self.window.run()
if response == Gtk.ResponseType.HELP:
display_help(webpage=WIKI_HELP_PAGE,
section=WIKI_HELP_SEC)
elif response == Gtk.ResponseType.DELETE_EVENT:
break
else:
if response == Gtk.ResponseType.OK:
(the_quality, the_modifier, the_calendar, the_value,
the_text, the_newyear)
|
fin/froide
|
froide/account/migrations/0019_auto_20190309_1223.py
|
Python
|
mit
| 755
| 0
|
# Generated by Django 2.1.
|
7 on 2019-03-09 11:23
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
("account", "0018_auto_20190309_1153"),
]
operations = [
migrations.AddField(
model_name="taggeduser",
name="co
|
unt",
field=models.PositiveIntegerField(default=1),
),
migrations.AddField(
model_name="taggeduser",
name="tag_new",
field=models.ForeignKey(
null=True,
on_delete=django.db.models.deletion.CASCADE,
related_name="tagged_users",
to="account.UserTag",
),
),
]
|
Utkarshdevd/summer14python
|
getText.py
|
Python
|
mit
| 665
| 0.034586
|
import re
def getText(data):
res = []
resString = ""
pattern = re.compile(r"\(?M{0,4}(CM|CD|D?C{0,3})(
|
XC|XL|L?X{0,3})(IX|IV|V?I{0,3})\)(.*?)\. *\n", re.I)
iteratable = pattern.finditer(data)
newPattern = re.compile(r"\(?M{0,4}CM|CD|D?C{0,3}XC|XL|L?X{0,3}IX|IV|V?I{0,3}\)", re.I)
checkPattern = re.compile(r"\(?M{0,4}(CM|CD|D?C{0,3})(XC|XL|L?
|
X{0,3})(IX|IV|V?I{0,3})\) *", re.I)
resString = ""
for _ in iteratable:
resString = str(_.group())
if(newPattern.match(resString) == None):
for a in checkPattern.finditer(resString):
resString = resString.replace(a.group(), "")
res.append(resString)
else:
print "notCool"
continue
return res
|
0x326/academic-code-portfolio
|
2016-2021 Miami University/CSE 467 Computer and Network Security/2019-03-06 Homework 2.py
|
Python
|
mit
| 3,384
| 0.006206
|
#!/usr/bin/env python3
from typing import NamedTuple
from ciphers import HexString
rijndael_s_box = (
0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5, 0x30, 0x01, 0x67, 0x2b, 0xfe, 0xd7, 0xab, 0x76,
0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0, 0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0,
0xb7
|
, 0xfd, 0x93, 0x26, 0x36, 0x3f, 0xf7, 0xcc, 0x3
|
4, 0xa5, 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15,
0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05, 0x9a, 0x07, 0x12, 0x80, 0xe2, 0xeb, 0x27, 0xb2, 0x75,
0x09, 0x83, 0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0, 0x52, 0x3b, 0xd6, 0xb3, 0x29, 0xe3, 0x2f, 0x84,
0x53, 0xd1, 0x00, 0xed, 0x20, 0xfc, 0xb1, 0x5b, 0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf,
0xd0, 0xef, 0xaa, 0xfb, 0x43, 0x4d, 0x33, 0x85, 0x45, 0xf9, 0x02, 0x7f, 0x50, 0x3c, 0x9f, 0xa8,
0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5, 0xbc, 0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2,
0xcd, 0x0c, 0x13, 0xec, 0x5f, 0x97, 0x44, 0x17, 0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19, 0x73,
0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88, 0x46, 0xee, 0xb8, 0x14, 0xde, 0x5e, 0x0b, 0xdb,
0xe0, 0x32, 0x3a, 0x0a, 0x49, 0x06, 0x24, 0x5c, 0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79,
0xe7, 0xc8, 0x37, 0x6d, 0x8d, 0xd5, 0x4e, 0xa9, 0x6c, 0x56, 0xf4, 0xea, 0x65, 0x7a, 0xae, 0x08,
0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6, 0xb4, 0xc6, 0xe8, 0xdd, 0x74, 0x1f, 0x4b, 0xbd, 0x8b, 0x8a,
0x70, 0x3e, 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e, 0x61, 0x35, 0x57, 0xb9, 0x86, 0xc1, 0x1d, 0x9e,
0xe1, 0xf8, 0x98, 0x11, 0x69, 0xd9, 0x8e, 0x94, 0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf,
0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68, 0x41, 0x99, 0x2d, 0x0f, 0xb0, 0x54, 0xbb, 0x16)
question_3_original_state = bytes((0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06,
0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D,
0x0E, 0x0F))
question_3_key = bytes((0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01))
question_3b_state = bytes(state_byte ^ key_byte for state_byte, key_byte in zip(question_3_original_state, question_3_key))
question_3c_state = bytes(rijndael_s_box[state_byte] for state_byte in question_3b_state)
class RSAKey(NamedTuple):
exponent: int
modulus: int
class RSAKeyPair(NamedTuple):
public_key: RSAKey
private_key: RSAKey
def rsa_keygen() -> RSAKeyPair:
p, q = 877, 263
n = p * q
phi_n = (p - 1) * (q - 1)
e = 98143
k = 0
while (1 + k * phi_n) % e != 0:
k += 1
d = (1 + k * phi_n) // e
return RSAKeyPair(public_key=RSAKey(modulus=n, exponent=e), private_key=RSAKey(modulus=n, exponent=d))
def rsa_transform(message: int, key: RSAKey) -> int:
return pow(message, key.exponent, key.modulus)
if __name__ == '__main__':
print('Question 3A:')
print(HexString(question_3_original_state))
print()
print('Question 3B:')
print(HexString(question_3b_state))
print()
print('Question 3C:')
print(HexString(question_3c_state))
print()
print('Question 4:')
key_pair = rsa_keygen()
print(f'Key: {key_pair}')
ciphertext = rsa_transform(15, key=key_pair.public_key)
print(f'Ciphertext: {ciphertext}')
plaintext = rsa_transform(ciphertext, key=key_pair.private_key)
print(f'Plaintext: {plaintext}')
print()
|
choderalab/openpathsampling
|
openpathsampling/tests/test_volume.py
|
Python
|
lgpl-2.1
| 17,171
| 0.001223
|
"""
@author David W.H. Swenson
"""
from __future__ import absolute_import
from builtins import object
from nose.tools import (assert_equal, assert_not_equal, assert_is, raises,
assert_true, assert_false)
from nose.plugins.skip import Skip, SkipTest
from .test_helpers import (CallIdentity, raises_with_message_like,
make_1d_traj)
import unittest
import pytest
import numpy as np
from openpathsampling.integration_tools import unit, HAS_SIMTK_UNIT
import openpathsampling.volume as volume
import openpathsampling as paths
class Identity2(CallIdentity):
def __str__(self):
return "Id2"
def setup_module():
global op_id, volA, volB, volC, volD, volA2
op_id = CallIdentity()
volA = volume.CVDefinedVolume(op_id, -0.5, 0.5)
volB = volume.CVDefinedVolume(op_id, 0.25, 0.75)
volC = volume.CVDefinedVolume(op_id, -0.75, -0.25)
volD = volume.CVDefinedVolume(op_id, -0.75, 0.75)
volA2 = volume.CVDefinedVolume(Identity2(), -0.5, 0.5)
class TestEmptyVolume(object):
def test_empty_volume(self):
"""Empty volume is well-behaved"""
empty = volume.EmptyVolume()
test = 0.1
assert_equal(empty(test), False)
assert_equal((empty & volA)(test), False)
assert_equal((volA & empty)(test), False)
assert_equal((empty | volA)(test), True)
assert_equal((volA | empty)(test), True)
assert_equal((empty & volA).__str__(), "empty")
# assert_is: logical combos with empty should return same obj
assert_is((empty - volA), empty)
assert_is((volA - empty), volA)
assert_is((volA | empty), volA)
assert_is((empty | volA), volA)
assert_is((volA & empty), empty)
assert_is((empty & volA), empty)
assert_is((empty ^ volA), volA)
assert_is((volA ^ empty), volA)
assert_equal((~ empty).__str__(), "all")
def test_empty_volume_equality(self):
empty1 = volume.EmptyVolume()
empty2 = volume.EmptyVolume()
assert_true(empty1 == empty2)
assert_false(empty1 != empty2)
class TestFullVolume(object):
def test_full_volume(self):
"""Full volume is well-behaved"""
full = volume.FullVolume()
test = 0.1
assert_equal(full(test), True)
assert_equal((full & volA)(test), True)
assert_equal((volA & full)(test), True)
assert_equal((full | volA)(test), True)
assert_equal((volA | full)(test), True)
# assert_is: logical combos with full should return same obj
a
|
ssert_is((full & volA), volA)
assert_is((volA & full), volA)
assert_is((full | volA), full)
assert_is((volA | full), full)
assert_equal((volA - full), volume.EmptyVolume())
assert_equal((full - volA), ~ volA)
assert_equal((full ^ volA), ~ volA)
assert_equal((volA ^ full), ~ volA)
assert_equal((volA | full).__str__(), "all")
assert_equal((~ full).__str__(), "empty")
class TestCVDefinedVolume(object):
def test_upper_boundary(self):
|
assert_equal(volA(0.49), True)
assert_equal(volA(0.50), False)
assert_equal(volA(0.51), False)
def test_lower_boundary(self):
assert_equal(volA(-0.49), True)
assert_equal(volA(-0.50), True)
assert_equal(volA(-0.51), False)
def test_negation(self):
assert_equal((~volA)(0.25), False)
assert_equal((~volA)(0.75), True)
assert_equal((~volA)(0.5), True)
assert_equal((~volA)(-0.5), False)
def test_autocombinations(self):
# volA tests this in the CVRangeVolumes
assert_equal(volA | volA, volA)
assert_equal(volA & volA, volA)
assert_equal(volA ^ volA, volume.EmptyVolume())
assert_equal(volA - volA, volume.EmptyVolume())
# combo tests this with VolumeCombination of CVRangeVolumes
combo = (volD - volA)
assert_is(combo | combo, combo)
assert_is(combo & combo, combo)
assert_equal(combo ^ combo, volume.EmptyVolume())
assert_equal(combo - combo, volume.EmptyVolume())
def test_and_combinations(self):
assert_equal((volA & volB), volume.CVDefinedVolume(op_id, 0.25, 0.5))
assert_equal((volA & volB)(0.45), True)
assert_equal((volA & volB)(0.55), False)
assert_equal((volB & volC), volume.EmptyVolume())
# go to VolumeCombination if order parameter isn't the same
assert_equal((volA & volA2),
volume.VolumeCombination(volA, volA2,
lambda a, b: a and b,
'{0} and {1}')
)
def test_or_combinations(self):
assert_equal((volA | volB), volume.CVDefinedVolume(op_id, -0.5, 0.75))
assert_equal((volB | volC), volume.UnionVolume(volB, volC))
assert_equal((volB | volC)(0.0), False)
assert_equal((volB | volC)(0.5), True)
assert_equal((volB | volC)(-0.5), True)
# go to VolumeCombination if order parameters isn't the same
assert_equal((volA2 | volB),
volume.UnionVolume(volA2, volB))
def test_xor_combinations(self):
assert_equal((volA ^ volB),
volume.UnionVolume(
volume.CVDefinedVolume(op_id, -0.5, 0.25),
volume.CVDefinedVolume(op_id, 0.5, 0.75)
))
assert_equal((volA ^ volA2),
volume.SymmetricDifferenceVolume(volA, volA2))
def test_sub_combinations(self):
assert_equal((volA - volB), volume.CVDefinedVolume(op_id, -0.5, 0.25))
assert_equal((volB - volC), volB)
assert_equal((volA - volD), volume.EmptyVolume())
assert_equal((volB - volA), volume.CVDefinedVolume(op_id, 0.5, 0.75))
assert_equal((volD - volA),
volume.UnionVolume(
volume.CVDefinedVolume(op_id, -0.75, -0.5),
volume.CVDefinedVolume(op_id, 0.5, 0.75)
)
)
assert_equal((volA2 - volA),
volume.RelativeComplementVolume(volA2, volA))
def test_str(self):
assert_equal(volA.__str__(), "{x|Id(x) in [-0.5, 0.5]}")
assert_equal((~volA).__str__(), "(not {x|Id(x) in [-0.5, 0.5]})")
def test_unit_support(self):
if not paths.integration_tools.HAS_SIMTK_UNIT:
raise SkipTest
u = unit
vol = volume.CVDefinedVolume(
op_id, -0.5 * u.nanometers, 0.25 * u.nanometers)
assert(vol(-0.25 * u.nanometers))
assert(not vol(-0.75 * u.nanometers))
vol = volume.PeriodicCVDefinedVolume(
op_id,
-30 * u.nanometers, 90 * u.nanometers,
-180 * u.nanometers, 180 * u.nanometers)
assert (vol(50 * u.nanometers))
assert (not vol(-70 * u.nanometers))
@staticmethod
def _vol_for_cv_type(inp):
if not HAS_SIMTK_UNIT and inp == 'simtk':
pytest.skip()
func = {
'float': lambda s: 1.0,
'array': lambda s: np.array([1.0, 2.0]),
'array1': lambda s: np.array([1.0]),
'simtk': None
}[inp]
if func is None: # only if inp is 'simtk'
func = lambda s: 1.0 * unit.nanometers
cv = paths.FunctionCV('cv', func)
volume = paths.CVDefinedVolume(cv, 0.0, 1.5)
return volume
@pytest.mark.parametrize('inp', ['float', 'array', 'array1', 'simtk'])
def test_is_iterable(self, inp):
snap = make_1d_traj([0.0])[0]
volume = self._vol_for_cv_type(inp)
val = volume.collectivevariable(snap)
expected = inp in ['array', 'array1']
if expected:
with pytest.warns(UserWarning, match="returns an iterable"):
result = volume._is_iterable(val)
else:
result = volume._is_iterable(val)
assert result is expected
@pytest.mark.parametrize('inp', ['float', 'array1', 'simtk'])
@pytest.mark.filterwarnings("ignore:The CV
|
darkframemaster/pyrepo
|
app/local/__pycache__/ui.py
|
Python
|
mit
| 5,962
| 0.070665
|
#!/usr/bin/env python3
#-*- coding:utf-8 -*-
''' this is a simple UI for offline working '''
__author__='xuehao'
import re
import tkinter
import tkinter.simpledialog
import tkinter.messagebox
import gittime
import gitcount
''' list with a scrollbar and a lable of lines in the list '''
class myListBox(object):
def __init__(self,root,info=[]):
#一个带有一个滑动框的listbox
self.root=root
self.scrollbarY=tkinter.Scrollbar(root)
self.scrollbarY.pack(side='left',fill='y')
self.listbox=tkinter.Listbox(self.root,fg='white',bg='black',selectbackground='gray',width=150,height=30,yscrollcommand=self.scrollbarY.set)
item=len(info)-1
while item>=0:
print(item)
self.listbox.insert(0,info[item])
item-=1
self.listbox.pack(side='left',fill='both')
self.scrollbarY.config(command=self.listbox.yview)
self.label=tkinter.Label(self.root,text='All lines: '+str(self.listbox.size()))
self.label.pack()
#button=tkinter.Button(self.root,text='coder_list',bg='gray',)
#按钮实现list的显示
''' a window with two label and two entry '''
''' users use this window to init time '''
class initDia(object):
def __init__(self,root):
self.root=root
self.top=tkinter.Toplevel(root)
self.label=tkinter.Label(self.top,text='请输入你要查看的时间区间',fg='white',bg='black',width=40,height=5,justify=tkinter.CENTER)
self.label.pack()
self.label1=tkinter.Label(self.top,text='input start time(ex:year month day)',width=40)
self.label1.pack()
self.entry=tkinter.Entry(self.top,width=30)
self.entry.insert(1,'2010 12 1 0 0 0')
self.entry.pack()
self.entry.focus()
self.label2=tkinter.Label(self.top,text='input end time',width=40)
self.label2.pack()
self.entry1=tkinter.Entry(self.top,width=30)
self.entry1.insert(1,'2016 1 1 0 0 0')
self.entry1.pack()
self.button=tkinter.Button(self.top,text='Ok',bg='gray',width=37,command=self.Ok)
self.button.pack()
self.input=None
def Ok(self):
self.input=[self.entry.get(),self.entry1.get()]
if len(self.entry.get())==0 and len(self.entry1.get())==0:
self.input=None
self.top.destroy()
def Get(self):
if self.input:
return self.input
else:
return
''' a window with radios '''
''' users use this window to select data they need to see '''
class dataDia(object):
def __init__(self,root):
self.root=root
self.top=tkinter.Toplevel(self.root)
self.r=tkinter.StringVar()
self.r.set('0')
radio=tkinter.Radiobutton(self.top,variable=self.r,value='0',text='commit list')
radio.pack()
radio1=tkinter.Radiobutton(self.top,variable=self.r,value='1',text='time list')
radio1.pack()
radio2=tkinter.Radiobutton(self.top,variable=self.r,value='2',text='commit dic')
radio2.pack()
self.button=tkinter.Button(self.top,text='Ok',bg='gray',width=37,command=self.Ok)
self.button.pack()
def Ok(self):
self.top.destroy()
def Get(self):
return self.r.get()
''' Main window : start in here '''
class mainDialog(object): #主窗体
def __init__(self,root):#一个label 两个按钮
self.root=root
self.label1=tkinter.Label(self.root,bg='black',fg='white',text='welcome using git-count',width=30,height=5)
self.label1.pack()
self.buttonInit=tkinter.Button(self.root,text='init data',bg='gray',width=27,command=self.initDia) #绑定了Create这个事件
self.buttonInit.pack()
self.buttonDataChoose=tkinter.Button(self.root,text='other Data',bg='gray',width=27,command=self.dataDia)
self.buttonDataChoose.pack()
self.buttonQuit=tkinter.Button(self.root,text='Quit',bg='gray',width=27,command=self.Quit)
self.buttonQuit.pack()
# 初始化gitcount的变量
self.st_time=None
self.ed_time=None
self.commit=None
self.user=None
''' Do things in this func '''
def Main(self,stTime,edTime): #主程序入口
if len(stTime)==0:
stTime='2000 1 1 0 0 0'
if len(edTime)==0:
edTime=time.strftime('%Y %m %d %H %M %S',time.localtime())
# 初始化gitcount的变量
self.st_time=gittime.Time()
self.ed_time=gittime.Time()
self.st_time.set_str(stTime)
self.ed_time.set_str(edTime)
if self.st_time.cmp_with(self.ed_time)==True:
tkinter.messa
|
gebox.showerror('git count','start time bigger than end time!')
return
self.comInfo
|
=gitcount.Info()
self.user=gitcount.Coder()
self.comInfo.get_commit_dic(self.st_time,self.ed_time)
self.user.collect_stats(self.comInfo.commit_dic)
self.user.sort_coder()
listroot=tkinter.Tk()
listbox=myListBox(listroot,self.user.user_sort)
''' '''
def initDia(self): #init 按钮绑定的事件
d=initDia(self.root)
self.buttonInit.wait_window(d.top)
#self.buttonDataChoose.wait_window(d.top)
#self.buttonQuit.wait_window(d.top)
if d.Get()!=None:
if len(d.Get()[0])!=0:
if gittime.isTimeStr(d.Get()[0])==False:
tkinter.messagebox.showerror('git count','input time error in start time!')
return
if len(d.Get()[1])!=0:
if gittime.isTimeStr(d.Get()[1])==False:
tkinter.messagebox.showerror('git count','input time error in end time!')
return
self.Main(d.Get()[0],d.Get()[1])
else:
self.Main('2000 1 1 0 0 0',time.strftime('%Y %m %d %H %M %S',time.localtime()))
def dataDia(self):
d=dataDia(self.root)
self.buttonDataChoose.wait_window(d.top)
if self.st_time==None or self.ed_time==None or self.user==None or self.comInfo==None:
tkinter.messagebox.showerror('git count','please init data first!')
return
listroot=tkinter.Tk()
if d.Get()=='0':
listbox=myListBox(listroot,self.comInfo.commit_list)
elif d.Get()=='1':
tmp=[]
for i in self.comInfo.time_list:
tmp.append(i.reStr())
listbox=myListBox(listroot,tmp)
else:
listbox=myListBox(listroot,self.comInfo.commit_dic)
def Quit(self):
self.root.quit()
#main program
if __name__=="__main__":
root=tkinter.Tk() #生成root主窗口
button=mainDialog(root)
root.mainloop() #进入消息循环
|
Ban3/Limnoria
|
plugins/Filter/plugin.py
|
Python
|
bsd-3-clause
| 25,613
| 0.004295
|
# -*- encoding: utf-8 -*-
###
# Copyright (c) 2002-2005, Jeremiah Fincher
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions, and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the author of this software nor the name of
# contributors to this software may be used to endorse or promote products
# derived from this software without specific prior written consent.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
###
from __future__ import unicode_literals
import re
import sys
import codecs
import string
import random
import supybot.conf as conf
import supybot.utils as utils
from supybot.commands import *
import supybot.utils.minisix as minisix
import supybot.ircmsgs as ircmsgs
import supybot.ircutils as ircutils
import supybot.callbacks as callbacks
from supybot.i18n import PluginInternationalization, internationalizeDocstring
_ = PluginInternationalization('Filter')
class MyFilterProxy(object):
def reply(self, s):
self.s = s
class Filter(callbacks.Plugin):
"""This plugin offers several commands which transform text in some way.
It also provides the capability of using such commands to 'filter' the
output of the bot -- for instance, you could make everything the bot says
be in leetspeak, or Morse code, or any number of other kinds of filters.
Not very useful, but definitely quite fun :)"""
def __init__(self, irc):
self.__parent = super(Filter, self)
self.__parent.__init__(irc)
self.outFilters = ircutils.IrcDict()
def outFilter(self, irc, msg):
if msg.command == 'PRIVMSG':
if msg.args[0] in self.outFilters:
if ircmsgs.isAction(msg):
s = ircmsgs.unAction(msg)
else:
s = msg.args[1]
methods = self.outFilters[msg.args[0]]
for filtercommand in methods:
myIrc = MyFilterProxy()
filtercommand(myIrc, msg, [s])
s = myIrc.s
if ircmsgs.isAction(msg):
msg = ircmsgs.action(msg.args[0], s, msg=msg)
else:
msg = ircmsgs.IrcMsg(msg=msg, args=(msg.args[0], s))
return msg
_filterCommands = ['jeffk', 'leet', 'rot13', 'hexlify', 'binary',
'scramble', 'morse', 'reverse', 'colorize', 'squish',
'supa1337', 'stripcolor', 'aol', 'rainbow', 'spellit',
'hebrew', 'undup', 'gnu', 'shrink', 'uniud']
@internationalizeDocstring
def outfilter(self, irc, msg, args, channel, command):
"""[<channel>] [<command>]
Sets the outFilter of this plugin to be <command>. If no command is
given, unsets the outFilter. <channel> is only necessary if the
message isn't sent in the channel itself.
"""
if command:
if not self.isDisabled(command) and \
command in self._filterCommands:
method = getattr(self, command)
self.outFilters.setdefault(channel, []).append(method)
irc.replySuccess()
else:
irc.error(_('That\'s not a valid filter command.'))
else:
self.outFilters[channel] = []
irc.replySuccess()
outfilter = wrap(outfilter,
[('checkChannelCapability', 'op'),
additional('commandName')])
_hebrew_remover = utils.str.MultipleRemover('aeiou')
@internationalizeDocstring
def hebrew(self, irc, msg, args, text):
"""<text>
Removes all the vowels from <text>. (If you're curious why this is
named 'hebrew' it's because I (jemfinch) thought of it in Hebrew class,
and printed Hebrew often elides the vowels.)
"""
irc.reply(self._hebrew_remover(text))
hebrew = wrap(hebrew, ['text'])
def _squish(self, text):
return text.replace(' ', '')
@internationalizeDocstring
def squish(self, irc, msg, args, text):
"""<text>
Removes all the spaces from <text>.
"""
irc.reply(self._squish(text))
squish = wrap(squish, ['text'])
@internationalizeDocstring
def undup(self, irc, msg, args, text):
"""<text>
Returns <text>, with all consecutive duplicated letters removed.
"""
L = [text[0]]
for c in text:
if c != L[-1]:
L.append(c)
irc.reply(''.join(L))
undup = wrap(undup, ['text'])
@internationalizeDocstring
def binary(self, irc, msg, args, text):
"""<text>
Returns the binary representation of <text>.
"""
L = []
if minisix.PY3:
if isinstance(text, str):
|
bytes_ = text.encode()
else:
bytes_ = text
else:
if isinstance(text, unicode):
text = text.encode()
bytes_ = map(ord, text)
for i in bytes_:
LL = []
assert i<=256
counte
|
r = 8
while i:
counter -= 1
if i & 1:
LL.append('1')
else:
LL.append('0')
i >>= 1
while counter:
LL.append('0')
counter -= 1
LL.reverse()
L.extend(LL)
irc.reply(''.join(L))
binary = wrap(binary, ['text'])
def unbinary(self, irc, msg, args, text):
"""<text>
Returns the character representation of binary <text>.
Assumes ASCII, 8 digits per character.
"""
text = self._squish(text) # Strip spaces.
try:
L = [chr(int(text[i:(i+8)], 2)) for i in range(0, len(text), 8)]
irc.reply(''.join(L))
except ValueError:
irc.errorInvalid('binary string', text)
unbinary = wrap(unbinary, ['text'])
_hex_encoder = staticmethod(codecs.getencoder('hex_codec'))
def hexlify(self, irc, msg, args, text):
"""<text>
Returns a hexstring from the given string; a hexstring is a string
composed of the hexadecimal value of each character in the string
"""
irc.reply(self._hex_encoder(text.encode('utf8'))[0].decode('utf8'))
hexlify = wrap(hexlify, ['text'])
_hex_decoder = staticmethod(codecs.getdecoder('hex_codec'))
@internationalizeDocstring
def unhexlify(self, irc, msg, args, text):
"""<hexstring>
Returns the string corresponding to <hexstring>. Obviously,
<hexstring> must be a string of hexadecimal digits.
"""
try:
irc.reply(self._hex_decoder(text.encode('utf8'))[0]
.decode('utf8', 'replace'))
except TypeError:
irc.error(_('Invalid input.'))
unhexlify = wrap(unhexlify, ['text'])
_rot13_encoder = codecs.getencoder('rot-13')
@internationalizeDocstring
def r
|
morepath/morepath
|
morepath/pdbsupport.py
|
Python
|
bsd-3-clause
| 465
| 0
|
from pdb import Pdb # pragma: nocoverage
morepath_pdb = Pdb(
skip=["reg.*", "inspect", "repoze.lru"]
) # pragma: nocoverage
def set_trace(*args, **kw): # pragma: nocoverage
"""Set pdb trace as in ``import pdb; pdb.set_trace``, ignores ``reg``.
Use ``from morepath import pdbsupport; pdbsupport.set_trace()`` to use.
The debugger won't step into ``reg``, ``inspect`` or ``repoze.lru``.
"""
return morepath_pdb.set_trace(*ar
|
gs, **kw)
|
|
ProteinDF/ProteinDF_bridge
|
tests/test_ssbond.py
|
Python
|
gpl-3.0
| 649
| 0
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import unittest
import doctest
from proteindf_bridge.ssbond import SSBond
|
class SSBondTest(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
# def test_check(self):
# tmp_pdb = Pdb('./data/1hls.pdb')
# models = tmp_pdb.get_atomgroup()
# model = models.get_group('model_1')
# ssbond = SSBond()
# ssbond.check(models)
def load_tests(loader, tests, ignore):
from proteindf_bridge import ssbond
tests.addTests(doctest.DocTestSuite(ssbond))
return tests
if
|
__name__ == '__main__':
unittest.main()
|
MDAnalysis/mdanalysis
|
package/MDAnalysis/lib/picklable_file_io.py
|
Python
|
gpl-2.0
| 15,899
| 0
|
# -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding:utf-8 -*-
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
#
# MDAnalysis --- https://www.mdanalysis.org
# Copyright (c) 2006-2017 The MDAnalysis Development Team and contributors
# (see the file AUTHORS for the full list of names)
#
# Released under the GNU Public Licence, v2 or any higher version
#
# Please cite your use of MDAnalysis in published work:
#
# R. J. Gowers, M. Linke, J. Barnoud, T. J. E. Reddy, M. N. Melo, S. L. Seyler,
# D. L. Dotson, J. Domanski, S. Buchoux, I. M. Kenney, and O. Beckstein.
# MDAnalysis: A Python package for the rapid analysis of molecular dynamics
# simulations. In S. Benthall and S. Rostrup editors, Proceedings of the 15th
# Python in Science Conference, pages 102-109, Austin, TX, 2016. SciPy.
# doi: 10.25080/majora-629e541a-00e
#
# N. Michaud-Agrawal, E. J. Denning, T. B. Woolf, and O. Beckstein.
# MDAnalysis: A Toolkit for the Analysis of Molecular Dynamics Simulations.
# J. Comput. Chem. 32 (2011), 2319--2327, doi:10.1002/jcc.21787
#
"""
Picklable read-only I/O classes --- :mod:`MDAnalysis.lib.picklable_file_io`
===========================================================================
Provide with an interface for pickling read-only IO file object.
These classes are used for further pickling :class:`MDAnalysis.core.universe`
in a object composition approach.
.. autoclass:: FileIOPicklable
:members:
.. autoclass:: BufferIOPicklable
:members:
.. autoclass:: TextIOPicklable
:members:
.. autoclass:: BZ2Picklable
:members:
.. autoclass:: GzipPicklable
:members:
.. autofunction:: pickle_open
.. autofunction:: bz2_pickle_open
.. autofunction:: gzip_pickle_open
.. versionadded:: 2.0.0
"""
import io
import os
import bz2
import gzip
class FileIOPicklable(io.FileIO):
"""File object (read-only) that can be pickled.
This class provides a file-like object (as returned by :func:`open`,
namely :class:`io.FileIO`) that, unlike standard Python file objects,
can be pickled. Only read mode is supported.
When the file is pickled, filename and position of the open file handle in
the file are saved. On unpickling, the file is opened by filename,
and the file is seeked to the saved position.
This means that for a successful unpickle, the original file still has to
be accessible with its filename.
Note
----
This class only supports reading files in binary mode. If you need to open
a file in text mode, use the :func:`pickle_open`.
Parameters
----------
name : str
either a text or byte string giving the name (and the path
if the file isn't in the current working directory) of the file to
be opened.
mode : str
only reading ('r') mode works. It exists to be consistent
with a wider API.
Example
-------
::
>>> file = FileIOPicklable(PDB)
>>> file.readline()
>>> file_pickled = pickle.loads(pickle.dumps(file))
>>> print(file.tell(), file_pickled.tell())
55 55
See Also
---------
TextIOPicklable
BufferIOPicklable
.. versionadded:: 2.0.0
"""
def __init__(self, name, mode='r'):
self._mode = mode
super().__init__(name, mode)
def __getstate__(self):
if self._mode != 'r':
raise RuntimeError("Can only pickle files that were opened "
"in read mode, not {}".format(self._mode))
return self.name, self.tell()
def __setstate__(self, args):
name = args[0]
super().__init__(name, mode='r')
self.seek(args[1])
class BufferIOPicklable(io.BufferedReader):
"""A picklable buffer object for read-only FileIO object.
This class provides a buffered :class:`io.BufferedReader`
that can be pickled.
Note that this only works in read mode.
Parameters
----------
raw : FileIO object
Example
-------
::
file = FileIOPicklable('filename')
buffer_wrapped = BufferIOPicklable(file)
See Also
---------
FileIOPicklable
TextIOPicklable
.. versionadded:: 2.0.0
"""
def __init__(self, raw):
super().__init__(raw)
self.raw_class = raw.__class__
def __getstate__(self):
return self.raw_class, self.name, self.tell()
def __setstate__(self, args):
raw_class = args[0]
name = args[1]
raw = raw_class(name)
super().__init__(raw)
self.seek(args[2])
class TextIOPicklable(io.TextIOWrapper):
"""Character and line based picklable file-like object.
This class provides a file-like :class:`io.TextIOWrapper` object that can
be pickled. Note that this only works in read mode.
Note
----
After pickling, the current position is reset. `universe.trajectory[i]` has
to be used to return to its original frame.
Parameters
----------
raw : FileIO object
Example
-------
::
file = FileIOPicklable('filename')
text_wrapped = TextIOPicklable(file)
See Also
---------
FileIOPicklable
BufferIOPicklable
.. versionadded:: 2.0.0
"""
def __init__(self, raw):
super().__init__(raw)
self.raw_class = raw.__class__
def __getstate__(self):
try:
name = self.name
except AttributeError:
# This is kind of ugly--BZ2File does not save its name.
name = self.buffer._fp.name
return self.raw_class, name
def __setstate__(self, args):
raw_class = args[0]
name = args[1]
# raw_class is used for further expansion this functionality to
# Gzip files, which also requires a text wrapper.
raw = raw_class(name)
super().__init__(raw)
class BZ2Picklable(bz2.BZ2File):
"""File object (read-only) for bzip2 (de)compression that can be pickled.
This class provides a file-like object (as returned by :func:`bz2.open`,
namely :class:`bz2.BZ2File`) that, unlike standard Python file objects,
can be pickled. Only read mode is supported.
When the file is pickled, filename and position of the open file handle in
the file are saved. On unpickling, the file is opened by filename,
and the file is seeked to the saved position.
This means that for a successful unpickle, the original file still has to
be accessible with its filename.
Note
----
This class only supports reading files in binary mode. If you need to open
to open a compressed file in text mode, use :func:`bz2_pickle_open`.
Parameters
----------
name : str
either a text or byte string giving the name (and the path
if the file isn't in the current working directory) of the file to
be opened.
mode : str
can only be 'r', 'rb' to make pickle work.
Example
-------
::
>>> file = BZ2Pick
|
lable
|
(XYZ_bz2)
>>> file.readline()
>>> file_pickled = pickle.loads(pickle.dumps(file))
>>> print(file.tell(), file_pickled.tell())
5 5
See Also
---------
FileIOPicklable
BufferIOPicklable
TextIOPicklable
GzipPicklable
.. versionadded:: 2.0.0
"""
def __init__(self, name, mode='rb'):
self._bz_mode = mode
super().__init__(name, mode)
def __getstate__(self):
if not self._bz_mode.startswith('r'):
raise RuntimeError("Can only pickle files that were opened "
"in read mode, not {}".format(self._bz_mode))
return self._fp.name, self.tell()
def __setstate__(self, args):
super().__init__(args[0])
self.seek(args[1])
class GzipPicklable(gzip.GzipFile):
"""Gzip file object (read-only) that can be pickled.
This class provides a file-like object (as returned by :func:`gzip.open`,
namely :class:`gzip.GzipFile`) that, unlike standard Python file objects,
can be pickled. Only read mode is supported.
When the file is pickled, filename and position of the open file handle in
the file are saved. On unpickling, th
|
karlbright/beets
|
test/test_vfs.py
|
Python
|
mit
| 1,621
| 0.003085
|
# This file is part of beets.
# Copyright 2011, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Tests for the virtual filesystem builder.."""
import _common
from _common import unittest
from beets import library
from beets import vfs
class VFSTest
|
(unittest.TestCase):
def setUp(self):
self.lib = library.Library(':memory:', path_formats=[
('default', 'albums/$album/$title'),
('singleton:true', 'tracks/$artist/$title'),
])
self.lib.add(_common.item())
self.lib.add_album([_common.item()])
self.lib.save()
self.tr
|
ee = vfs.libtree(self.lib)
def test_singleton_item(self):
self.assertEqual(self.tree.dirs['tracks'].dirs['the artist'].
files['the title'], 1)
def test_album_item(self):
self.assertEqual(self.tree.dirs['albums'].dirs['the album'].
files['the title'], 2)
def suite():
return unittest.TestLoader().loadTestsFromName(__name__)
if __name__ == '__main__':
unittest.main(defaultTest='suite')
|
iniverno/RnR-LLC
|
simics-3.0-install/simics-3.0.31/amd64-linux/lib/telos_mote_components.py
|
Python
|
gpl-2.0
| 9,668
| 0.006309
|
# MODULE: telos-mote-components
# CLASS: telos-mote
from sim_core import *
from components import *
# Telos Mote
class telos_mote_component(component_object):
classname = 'telos-mote'
basename = 'system'
description = "A Telos Mote, based on the msp430 processor"
# connectors:
connectors = {
# serial 0
'usart0' : {'type' : 'serial', 'direction' : 'down',
'empty_ok' : True, 'hotplug' : True, 'multi' : False},
# serial 1
'usart1' : {'type' : 'serial', 'direction' : 'down',
'empty_ok' : True, 'hotplug' : True, 'multi' : False},
# zigbee radio
'zigbee' : {'type' : 'zigbee-link', 'direction' : 'down',
'empty_ok' : True, 'hotplug' : True, 'multi' : False}}
# settings attributes in the components: get & set functions
def get_cpu_frequency(self, idx):
return self.freq_mhz
def set_cpu_frequency(self, val, idx):
if self.obj.configured:
return Sim_Set_Illegal_Value
self.freq_mhz = val
return Sim_Set_Ok
# x,y,z location as three attributes, as the CLI cannot
# represent lists. This is the most compatible way.
def get_location_x(self, idx):
return self.location_x
def set_location_x(self, val, idx):
if self.obj.configured:
return Sim_Set_Illegal_Value
self.location_x = val
return Sim_Set_Ok
def get_location_y(self, idx):
return self.location_y
def set_location_y(self, val, idx):
if self.obj.configured:
return Sim_Set_Illegal_Value
self.location_y = val
return Sim_Set_Ok
def get_location_z(self, idx):
return self.location_z
def set_location_z(self, val, idx):
if self.obj.configured:
return Sim_Set_Illegal_Value
self.location_z = val
return Sim_Set_Ok
# adding all the objects found in a mote
def add_objects(self):
# forward declare all objects in the mote
self.o.phys_mem = pre_obj('phys_mem', 'memory-space')
self.o.cpu = pre_obj('cpu', 'msp430')
self.o.ram = pre_obj('memory', 'ram')
self.o.ram_image = pre_obj('memory_image', 'image')
self.o.io_ports = pre_obj('io_ports','telos-io-ports')
self.o.sfr = pre_obj('sfr','telos-sfr')
self.o.usart0 = pre_obj('usart0','telos-usart')
self.o.usart1 = pre_obj('usart1','telos-usart')
self.o.timer_a = pre_obj('timer_a','telos-timer')
self.o.timer_b = pre_obj('timer_b','telos-timer')
self.o.basic_clock = pre_obj('basic_clock','telos-basic-clock')
self.o.reset_button = pre_obj('reset_button','telos-button')
self.o.environment = pre_obj('environment','telos-environment')
# interrupt mapping space (not a pre object, just attribute for CPU)
# list devices here to have them automatically acknowledged by CPU
self.interrupt_source_list = [None for i in range(16)]
self.interrupt_source_list[13] = self.o.timer_b
self.interrupt_source_list[9] = self.o.usart0
self.interrupt_source_list[8] = self.o.usart0
self.interrupt_source_list[6] = self.o.timer_a
self.interrupt_source_list[3] = self.o.usart1
self.interrupt_source_list[2] = self.o.usart1
# CPU object
self.o.cpu.physical_memory = self.o.phys_mem
self.o.cpu.freq_mhz = self.freq_mhz # parameter to setup
self.o.cpu.interrupt_sources = self.interrupt_source_list
self.o.cpu.environment = self.o.environment
# RAM memory image
self.o.ram_image.size = 0x10000
self.o.ram_image.queue = self.o.cpu
# RAM object
self.o.ram.image = self.o.ram_image
# sfr
self.o.sfr.queue = self.o.cpu
self.o.sfr.usart0_dev = self.o.usart0
self.o.sfr.usart1_dev = self.o.usart1
# io ports
self.o.io_ports.queue = self.o.cpu
# usart 0
self.o.usart0.queue = self.o.cpu
self.o.usart0.irq_dev = self.o.cpu
self.o.usart0.tx_vector = 9
self.o.usart0.rx_vector = 8
self.o.usart0.sfr_dev = self.o.sfr
# usart 1
self.o.usart1.queue = self.o.cpu
self.o.usart1.irq_dev = self.o.cpu
self.o.usart1.tx_vector = 3
self.o.usart1.rx_vector = 2
self.o.usart1.sfr_dev = self.o.sfr
# timer A
self.o.timer_a.is_timer_a = True
self.o.timer_a.aclk_freq = 1.0 * (1 << 15) # 32 kHz
self.o.timer_a.smclk_freq = 1.0 * (1 << 20) # 1 MHz
self.o.timer_a.queue = self.o.cpu
self.o.timer_a.irq_dev = self.o.cpu
self.o.timer_a.irq_vector_ccr0 = 6
self.o.timer_a.irq_vector_tmiv = 5
# timer B
self.o.timer_b.is_timer_a = False
self.o.timer_b.aclk_freq = 1.0 * (1 << 15) # 32 kHz
self.o.timer_b.smclk_freq = 1.0 * (1 << 20) # 1 MHz
self.o.timer_b.queue = self.o.cpu
self
|
.o.timer_b.irq_dev = self.o.cpu
self.o.timer_b.irq_vector_ccr0 = 13
self.o.timer_b.irq_vector_tmiv = 12
# basic clock
self.o.basic_clock.queue = self.o.cpu
# buttons
self.o.reset_button.irq_dev = self.o.cpu
self.o.reset_button.irq_level = 15
# Memory map
self.o.phy
|
s_mem.map = [
# Memory is all considered as RAM, including what is
# really FLASH. This works for the currently tested code.
[0x00000200, self.o.ram, 0, 0x200, 0x10000 - 0x200],
# sfr registers
[0x00000000, self.o.sfr, 0, 0, 6],
# HOLE -- all holes are used to enable tracing accesses
# to devices which are not implemented
[0x00000006, self.o.ram, 0, 6, 0x18 - 0x6], ## HOLE
# Digital IO ports
[0x00000018, self.o.io_ports, 3, 0, 4],
[0x0000001c, self.o.io_ports, 4, 0, 4],
[0x00000020, self.o.io_ports, 1, 0, 7],
[0x00000028, self.o.io_ports, 2, 0, 7],
[0x00000030, self.o.io_ports, 5, 0, 4],
[0x00000034, self.o.io_ports, 6, 0, 4],
# HOLE
[0x00000038, self.o.ram, 0, 0x38, 0x56 - 0x38], ## HOLE
# basic clock
[0x00000056, self.o.basic_clock, 0, 0, 3],
# HOLE
[0x00000059, self.o.ram, 0, 0x59, 0x70 - 0x59], ## HOLE
# serial ports
[0x00000070, self.o.usart0, 0, 0, 8],
[0x00000078, self.o.usart1, 0, 0, 8],
# HOLE
[0x00000080, self.o.ram, 0, 0x80, 0x11e - 0x80], ## HOLE
# timer B
[0x0000011e, self.o.timer_b, 1, 0, 2],
# HOLE
[0x00000120, self.o.ram, 0, 0x120, 0x12e - 0x120], ## HOLE
# timer A
[0x0000012e, self.o.timer_a, 1, 0, 2],
# HOLE
[0x00000130, self.o.ram, 0, 0x130, 0x160 - 0x130], ## HOLE
# timer A, part 2
[0x00000160, self.o.timer_a, 0, 0, 0x20],
# timer B, part 2
[0x00000180, self.o.timer_b, 0, 0, 0x20],
# HOLE
[0x000001a0, self.o.ram, 0, 0x1a0, 0x200 - 0x1a0] ## HOLE
]
# environment parameters
self.o.environment.location_x = self.location_x
self.o.environment.location_y = self.location_y
self.o.environment.location_z = self.location_z
self.o.environment.temperature = 293 # nice default value
# connector information
def add_connector_info(self):
self.connector_info['usart0'] = [None, self.o.usart0, self.o.usart0.name]
self.connector_info['usart1'] = [None, self.o.usart1, self.o.usart1.name]
self.connector_info['zigbee'] = []
# serial port connectors
def connect_serial(self, connector, link, console):
if connector == 'usart0':
if link:
self.o.usart0.link = link
else:
print "Mote cannot connect to console"
if connector == 'usart1':
if link:
self.o.usart1.link = link
else:
|
rossonet/RAM
|
OctoPrint/src/octoprint/filemanager/storage.py
|
Python
|
lgpl-3.0
| 35,942
| 0.029886
|
# coding=utf-8
from __future__ import absolute_import
__author__ = "Gina Häußge <osd@foosel.net>"
__license__ = 'GNU Affero General Public License http://www.gnu.org/licenses/agpl.html'
__copyright__ = "Copyright (C) 2014 The OctoPrint Project - Released under terms of the AGPLv3 License"
import logging
import os
import pylru
import tempfile
import octoprint.filemanager
class StorageInterface(object):
"""
Interface of storage adapters for OctoPrint.
"""
@property
def analysis_backlog(self):
"""
Get an iterator over all items stored in the storage that need to be analysed by the :class:`~octoprint.filemanager.AnalysisQueue`.
The yielded elements are expected as storage specific absolute paths to the respective files. Don't forget
to recurse into folders if your storage adapter supports those.
:return: an iterator yielding all un-analysed files in the storage
"""
# empty generator pattern, yield is intentionally unreachable
return
yield
def file_exists(self, path):
"""
Returns whether the file indicated by ``path`` exists or not.
:param string path: the path to check for existence
:return: ``True`` if the file exists, ``False`` otherwise
"""
raise NotImplementedError()
def list_files(self, path=None, filter=None, recursive=True):
"""
List all files in storage starting at ``path``. If ``recursive`` is set to True (the default), also dives into
subfolders.
An optional filter function can be supplied which will be called with a file name and file data and which has
to return True if the file is to be included in the result or False if not.
The data structure of the returned result will be a dictionary mapping from file names to entry data. File nodes
will contain their metadata here, folder nodes will contain their contained files and folders. Example::
{
"some_folder": {
"type": "folder",
"children": {
"some_sub_folder": {
"type": "folder",
"children": { ... }
},
"some_file.gcode": {
"type": "machinecode",
"hash": "<sha1 hash>",
"links": [ ... ],
...
},
...
}
"test.gcode": {
"type": "machinecode",
"hash": "<sha1 hash>",
"links": [...],
...
},
"test.stl": {
"type": "model",
"hash": "<sha1 hash>",
"links": [...],
...
},
...
}
:param string path: base path from which to recursively list all files, optional, if not supplied listing will start
from root of base folder
:param function filter: a filter that matches the files that are to be returned, may be left out in which case no
filtering will take place
:param bool recursive: will also step into sub folders for building the complete list if set to True
:return: a dictionary mapping entry names to entry data that represents the whole file list
"""
raise NotImplementedError()
def add_folder(self, path, ignore_existing=True):
"""
Adds a folder as ``path``. The ``path`` will be sanitized.
:param string path: the path o
|
f the new folder
:param bool ignore_existing: if set to True, no error will be raised if the folder to be added already exists
:return: the sanitized name of the new folder to be used for future references to the folder
"""
raise NotImplementedError()
def remove_folder(self, path, recursive=True):
"""
Removes the folder at ``path``.
:param string path:
|
the path of the folder to remove
:param bool recursive: if set to True, contained folders and files will also be removed, otherwise and error will
be raised if the folder is not empty (apart from ``.metadata.yaml``) when it's to be removed
"""
raise NotImplementedError()
def add_file(self, path, file_object, printer_profile=None, links=None, allow_overwrite=False):
"""
Adds the file ``file_object`` as ``path``
:param string path: the file's new path, will be sanitized
:param object file_object: a file object that provides a ``save`` method which will be called with the destination path
where the object should then store its contents
:param object printer_profile: the printer profile associated with this file (if any)
:param list links: any links to add with the file
:param bool allow_overwrite: if set to True no error will be raised if the file already exists and the existing file
and its metadata will just be silently overwritten
:return: the sanitized name of the file to be used for future references to it
"""
raise NotImplementedError()
def remove_file(self, path):
"""
Removes the file at ``path``. Will also take care of deleting the corresponding entries
in the metadata and deleting all links pointing to the file.
:param string path: path of the file to remove
"""
raise NotImplementedError()
def get_metadata(self, path):
"""
Retrieves the metadata for the file ``path``.
:param path: virtual path to the file for which to retrieve the metadata
:return: the metadata associated with the file
"""
raise NotImplementedError()
def add_link(self, path, rel, data):
"""
Adds a link of relation ``rel`` to file ``path`` with the given ``data``.
The following relation types are currently supported:
* ``model``: adds a link to a model from which the file was created/sliced, expected additional data is the ``name``
and optionally the ``hash`` of the file to link to. If the link can be resolved against another file on the
current ``path``, not only will it be added to the links of ``name`` but a reverse link of type ``machinecode``
refering to ``name`` and its hash will also be added to the linked ``model`` file
* ``machinecode``: adds a link to a file containing machine code created from the current file (model), expected
additional data is the ``name`` and optionally the ``hash`` of the file to link to. If the link can be resolved
against another file on the current ``path``, not only will it be added to the links of ``name`` but a reverse
link of type ``model`` refering to ``name`` and its hash will also be added to the linked ``model`` file.
* ``web``: adds a location on the web associated with this file (e.g. a website where to download a model),
expected additional data is a ``href`` attribute holding the website's URL and optionally a ``retrieved``
attribute describing when the content was retrieved
Note that adding ``model`` links to files identifying as models or ``machinecode`` links to files identifying
as machine code will be refused.
:param path: path of the file for which to add a link
:param rel: type of relation of the link to add (currently ``model``, ``machinecode`` and ``web`` are supported)
:param data: additional data of the link to add
"""
raise NotImplementedError()
def remove_link(self, path, rel, data):
"""
Removes the link consisting of ``rel`` and ``data`` from file ``name`` on ``path``.
:param path: path of the file from which to remove the link
:param rel: type of relation of the link to remove (currently ``model``, ``machinecode`` and ``web`` are supported)
:param data: additional data of the link to remove, must match existing link
"""
raise NotImplementedError()
def set_additional_metadata(self, path, key, data, overwrite=False, merge=False):
"""
Adds additional metadata to the metadata of ``path``. Metadata in ``data`` will be saved under ``key``.
If ``overwrite`` is set and ``key`` already exists in ``name``'s metadata, the current value will be overwritten.
If ``merge`` is set and ``key`` already exists and both ``data`` and the existing data under ``key`` are dictionaries,
the two dictionaries will be merged recursively.
:param path: the virtual path to the file for which to add additional metadata
:param key: key of metadata to add
:param data: metadata t
|
OSGeoLabBp/tutorials
|
hungarian/python/code/ellipse.py
|
Python
|
cc0-1.0
| 775
| 0.002581
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
import math
from circle import Circle
class Ellipse(Circle):
""" class for 2D ellipses """
def __init__(self, x=0, y=0, p=2, r=1, b=1):
super(Ellipse, sel
|
f).__init__(x, y, p, r)
self.b =
|
b
def __str__(self):
return ("{0:." + str(self.p) + "f}, {1:." + str(self.p) + "f}, {2:." +
str(self.p) + "f}, {3:." + str(self.p) +
"f}").format(self.x, self.y, self.r, self.b)
def area(self):
""" area of ellipse """
return self.r * self.b * math.pi
def perimeter(self):
""" perimeter of ellipse (approximation)"""
return math.pi * (3 * (self.r + self.b) - ((3 * self.r + self.b) *
(self.r + 3 * self.b)) ** 0.5)
|
wendysuly/TeamTalk
|
win-client/3rdParty/src/json/devtools/batchbuild.py
|
Python
|
apache-2.0
| 11,585
| 0.018127
|
import collections
import itertools
import json
import os
import os.path
import re
import shutil
import string
import subprocess
import sys
import cgi
class BuildDesc:
def __init__(self, prepend_envs=None, variables=None, build_type=None, generator=None):
self.prepend_envs = prepend_envs or [] # [ { "var": "value" } ]
self.variables = variables or []
self.build_type = build_type
self.generator = generator
def merged_with( self, build_desc ):
"""Returns a new BuildDesc by merging field content.
Prefer build_desc fields to self fields for single valued field.
"""
return BuildDesc( self.prepend_envs + build_desc.prepend_envs,
self.variables + build_desc.variables,
build_desc.build_type or self.build_type,
build_desc.generator or self.generator )
def env( self ):
environ = os.environ.copy()
for values_by_name in self.prepend_envs:
for var, value in values_by_name.items():
var = var.upper()
if type(value) is unicode:
value = value.encode( sys.getdefaultencoding() )
if var in environ:
environ[var] = value + os.pathsep + environ[var]
else:
environ[var] = value
return environ
def cmake_args( self ):
args = ["-D%s" % var for var in self.variables]
# skip build type for Visual Studio solution as it cause warning
if self.build_type and 'Visual' not in self.generator:
args.append( "-DCMAKE_BUILD_TYPE=%s" % self.build_type )
if self.generator:
args.extend( ['-G', self.generator] )
return args
def __repr__( self ):
return "BuildDesc( %s, build_type=%s )" % (" ".join( self.cmake_args()), self.build_type)
class BuildData:
def __init__( self, desc, work_dir, source_dir ):
self.desc = desc
self.work_dir = work_dir
self.source_dir = source_dir
self.cmake_log_path = os.path.join( work_dir, 'batchbuild_cmake.log' )
self.build_log_path = os.path.join( work_dir, 'batchbuild_build.log' )
self.cmake_succeeded = False
self.build_succeeded = False
def execute_build(self):
print 'Build %s' % self.desc
self._make_new_work_dir( )
self.cmake_succeeded = self._generate_makefiles( )
if self.cmake_succeeded:
self.build_succeeded = self._build_using_makefiles( )
return self.build_succeeded
def _generate_makefiles(self):
print ' Generating makefiles: ',
cmd = ['cmake'] + self.desc.cmake_args( ) + [os.path.abspath( self.source_dir )]
succeeded = self._execute_build_subprocess( cmd, self.desc.env(), self.cmake_log_path )
print 'done' if succeeded else 'FAILED'
return succeeded
def _build_using_makefiles(self):
print ' Bu
|
ilding:
|
',
cmd = ['cmake', '--build', self.work_dir]
if self.desc.build_type:
cmd += ['--config', self.desc.build_type]
succeeded = self._execute_build_subprocess( cmd, self.desc.env(), self.build_log_path )
print 'done' if succeeded else 'FAILED'
return succeeded
def _execute_build_subprocess(self, cmd, env, log_path):
process = subprocess.Popen( cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, cwd=self.work_dir,
env=env )
stdout, _ = process.communicate( )
succeeded = (process.returncode == 0)
with open( log_path, 'wb' ) as flog:
log = ' '.join( cmd ) + '\n' + stdout + '\nExit code: %r\n' % process.returncode
flog.write( fix_eol( log ) )
return succeeded
def _make_new_work_dir(self):
if os.path.isdir( self.work_dir ):
print ' Removing work directory', self.work_dir
shutil.rmtree( self.work_dir, ignore_errors=True )
if not os.path.isdir( self.work_dir ):
os.makedirs( self.work_dir )
def fix_eol( stdout ):
"""Fixes wrong EOL produced by cmake --build on Windows (\r\r\n instead of \r\n).
"""
return re.sub( '\r*\n', os.linesep, stdout )
def load_build_variants_from_config( config_path ):
with open( config_path, 'rb' ) as fconfig:
data = json.load( fconfig )
variants = data[ 'cmake_variants' ]
build_descs_by_axis = collections.defaultdict( list )
for axis in variants:
axis_name = axis["name"]
build_descs = []
if "generators" in axis:
for generator_data in axis["generators"]:
for generator in generator_data["generator"]:
build_desc = BuildDesc( generator=generator,
prepend_envs=generator_data.get("env_prepend") )
build_descs.append( build_desc )
elif "variables" in axis:
for variables in axis["variables"]:
build_desc = BuildDesc( variables=variables )
build_descs.append( build_desc )
elif "build_types" in axis:
for build_type in axis["build_types"]:
build_desc = BuildDesc( build_type=build_type )
build_descs.append( build_desc )
build_descs_by_axis[axis_name].extend( build_descs )
return build_descs_by_axis
def generate_build_variants( build_descs_by_axis ):
"""Returns a list of BuildDesc generated for the partial BuildDesc for each axis."""
axis_names = build_descs_by_axis.keys()
build_descs = []
for axis_name, axis_build_descs in build_descs_by_axis.items():
if len(build_descs):
# for each existing build_desc and each axis build desc, create a new build_desc
new_build_descs = []
for prototype_build_desc, axis_build_desc in itertools.product( build_descs, axis_build_descs):
new_build_descs.append( prototype_build_desc.merged_with( axis_build_desc ) )
build_descs = new_build_descs
else:
build_descs = axis_build_descs
return build_descs
HTML_TEMPLATE = string.Template('''<html>
<head>
<title>$title</title>
<style type="text/css">
td.failed {background-color:#f08080;}
td.ok {background-color:#c0eec0;}
</style>
</head>
<body>
<table border="1">
<thead>
<tr>
<th>Variables</th>
$th_vars
</tr>
<tr>
<th>Build type</th>
$th_build_types
</tr>
</thead>
<tbody>
$tr_builds
</tbody>
</table>
</body></html>''')
def generate_html_report( html_report_path, builds ):
report_dir = os.path.dirname( html_report_path )
# Vertical axis: generator
# Horizontal: variables, then build_type
builds_by_generator = collections.defaultdict( list )
variables = set()
build_types_by_variable = collections.defaultdict( set )
build_by_pos_key = {} # { (generator, var_key, build_type): build }
for build in builds:
builds_by_generator[build.desc.generator].append( build )
var_key = tuple(sorted(build.desc.variables))
variables.add( var_key )
build_types_by_variable[var_key].add( build.desc.build_type )
pos_key = (build.desc.generator, var_key, build.desc.build_type)
build_by_pos_key[pos_key] = build
variables = sorted( variables )
th_vars = []
th_build_types = []
for variable in variables:
build_types = sorted( build_types_by_variable[variable] )
nb_build_type = len(build_types_by_variable[variable])
th_vars.append( '<th colspan="%d">%s</th>' % (nb_build_type, cgi.escape( ' '.join( variable ) ) ) )
for build_type in build_types:
th_build_types.append( '<th>%s</th>' % cgi.escape(build_type) )
tr_builds = []
for generator in sorted( builds_by_generator ):
tds = [ '<td>%s</td>\n' % cgi.escape( generator ) ]
for variable in variables:
build_types = sorted( build_types_by_variable[variable] )
for build_type in build_types:
pos_key = (generato
|
thesuperzapper/tensorflow
|
tensorflow/contrib/keras/python/keras/initializers_test.py
|
Python
|
apache-2.0
| 5,930
| 0.007589
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Keras initializers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.keras.python import keras
from tensorflow.python.ops import init_ops
from tensorflow.python.platform import test
class KerasInitializersTest(test.TestCase):
def _runner(self, init, shape, target_mean=None, target_std=None,
target_max=None, target_min=None):
variable = keras.backend.variable(init(shape))
output = keras.backend.get_value(variable)
lim = 3e-2
if target_std is not None:
self.assertGreater(lim, abs(output.std() - target_std))
if target_mean is not None:
self.assertGreater(lim, abs(output.mean() - target_mean))
if target_max is not None:
self.assertGreater(lim, abs(output.max() - target_max))
if target_min is not None:
self.assertGreater(lim, abs(output.min() - target_min))
# Test serialization (assumes deterministic behavior).
config = init.get_config()
reconstructed_init = init.__class__.from_config(config)
variable = keras.backend.variable(reconstructed_init(shape))
output_2 = keras.backend.get_value(variable)
self.assertAllClose(output, output_2, atol=1e-4)
def test_uniform(self):
tensor_shape = (9, 6, 7)
with self.test_session():
self._runner(keras.initializers.RandomUniform(minval=-1,
maxval=1,
seed=124),
tensor_shape,
target_mean=0., target_max=1, target_min=-1)
def test_normal(self):
tensor_shape = (8, 12, 99)
with self.test_session():
self._runner(keras.initializers.RandomNormal(mean=0, stddev=1, seed=153),
tensor_shape,
target_mean=0., target_std=1)
def test_truncated_normal(self):
tensor_shape = (12, 99, 7)
with self.test_session():
self._runner(keras.initializers.TruncatedNormal(mean=0,
stddev=1,
seed=126),
tensor_shape,
target_mean=0., target_std=None, target_max=2)
def test_constant(self):
tensor_shape = (5, 6, 4)
with self.test_session():
self._runner(keras.initializers.Constant(2), tensor_shape,
target_mean=2, target_max=2, target_min=2)
def test_lecun_uniform(self):
tensor_shape = (5, 6, 4, 2)
with self.test_session():
fan_in, _ = init_ops._compute_fans(tensor_shape)
scale = np.sqrt(3. / fan_in)
self._runner(keras.initializers.lecun_uniform(seed=123), tensor_shape,
target_mean=0., target_max=scale, target_min=-scale)
def test_glorot_uniform(self):
tensor_shape = (5, 6, 4, 2)
with self.test_session():
fan_in, fan_out = init_ops._compute_fans(tensor_shape)
scale = np.sqrt(6. / (fan_in + fan_out))
self._runner(keras.initializers.glorot_uniform(seed=123), tensor_shape,
target_mean=0., target_max=scale, target_min=-scale)
def test_he_uniform(self):
tensor_shape = (5, 6, 4, 2)
with self.test_session():
fan_in, _ = init_ops._compute_fans(tensor_shape)
scale = np.sqrt(6. / fan_in)
self._runner(keras.initializers.he_uniform(seed=123), tensor_shape,
target_mean=0., target_max=scale, target_min=-scale)
def test_glorot_norma
|
l(self):
tensor_shape = (5, 6, 4, 2)
with self.test_session():
fan_in, fan_out = init_ops._compute_fans(tensor_shape)
scale = np.sqrt(2. / (fan_in + fan_out))
self._runner(keras.initializers.glorot_normal(seed=123), tensor_shape,
target_mean=0., target_std=None, target_max=2 * scale)
|
def test_he_normal(self):
tensor_shape = (5, 6, 4, 2)
with self.test_session():
fan_in, _ = init_ops._compute_fans(tensor_shape)
scale = np.sqrt(2. / fan_in)
self._runner(keras.initializers.he_normal(seed=123), tensor_shape,
target_mean=0., target_std=None, target_max=2 * scale)
def test_orthogonal(self):
tensor_shape = (10, 10)
with self.test_session():
self._runner(keras.initializers.orthogonal(seed=123), tensor_shape,
target_mean=0.)
def test_identity(self):
with self.test_session():
tensor_shape = (3, 4, 5)
with self.assertRaises(ValueError):
self._runner(keras.initializers.identity(), tensor_shape,
target_mean=1. / tensor_shape[0], target_max=1.)
tensor_shape = (3, 3)
self._runner(keras.initializers.identity(), tensor_shape,
target_mean=1. / tensor_shape[0], target_max=1.)
def test_zero(self):
tensor_shape = (4, 5)
with self.test_session():
self._runner(keras.initializers.zeros(), tensor_shape,
target_mean=0., target_max=0.)
def test_one(self):
tensor_shape = (4, 5)
with self.test_session():
self._runner(keras.initializers.ones(), tensor_shape,
target_mean=1., target_max=1.)
if __name__ == '__main__':
test.main()
|
larsks/cloud-init
|
tests/cloud_tests/testcases/modules/set_password_expire.py
|
Python
|
gpl-3.0
| 727
| 0
|
# This file is part of cloud-init. See LICENSE file for license information.
"""cloud-init Integration Test Verify Script."""
from tests.cloud_tests.testcases import base
class TestPasswordExpire(base.CloudTestCase):
"""Test password module."""
def test_shadow(self):
"""Test user frozen in shadow."""
out = self.get_data_file('shadow')
self.assertIn('h
|
arry:!:', out)
self.assertIn('dick:!:', out)
self.assertIn('tom:!:', out)
self.assertIn('harry:!:', out)
def test_ssh
|
d_config(self):
"""Test sshd config allows passwords."""
out = self.get_data_file('sshd_config')
self.assertIn('PasswordAuthentication yes', out)
# vi: ts=4 expandtab
|
paulocmi/Prod_pc
|
prod_announcer/settings.py
|
Python
|
mit
| 5,111
| 0.001957
|
# Django settings for prod_announcer project.
import os
LOCAL = lambda x: os.path.join(os.path.sep.join(
os.path.abspath(
os.path.dirname(__file__)).split(os.path.sep)), x)
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
('Guilherme da Costa de Albuquerque', 'guilherme.albuquerque@uniriotec.br'),
('Paulo Cesar Matos Inacio', 'paulo.inacio@uniriotec.br'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'NAME': LOCAL('db.sqlite'), # Or path to database file if using sqlite3.
# The following settings are not used with sqlite3:
'USER': '',
'PASSWORD': '',
'HOST': '', # Empty for localhost through domain sockets or '127.0.0.1' for localhost through TCP.
'PORT': '', # Set to empty string for default.
}
}
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts
ALLOWED_HOSTS = []
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'America/Sao_Paulo'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'pt-br'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = Tr
|
ue
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/var/www/example.com/media/"
MEDIA_ROOT = LOCAL('media')
MEDIA_URL = '/media/'
STATIC_RO
|
OT = LOCAL('static_root')
STATIC_URL = '/static/'
STATICFILES_DIRS = (
LOCAL('static'),
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'y9)qm_=41thlp7l&_kc&ui&9eshkw(d=p3dp13xxapku+(n0%8'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'prod_announcer.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'prod_announcer.wsgi.application'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
LOCAL('templates')
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.admin',
'prod_announcer.produto',
'prod_announcer.loja_fisica',
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
'annoying'
)
SESSION_SERIALIZER = 'django.contrib.sessions.serializers.JSONSerializer'
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
|
kaushik94/sympy
|
sympy/utilities/_compilation/tests/test_compilation.py
|
Python
|
bsd-3-clause
| 1,775
| 0.001127
|
from __future__ import absolute_import
import shutil
from sympy.external import import_module
from sympy.utilities.pytest import skip
from sympy.utilities._compilation.compilation import compile_link_import_strings
numpy = import_module('numpy')
cython = import_module('cython')
_sources1 = [
('sigmoid.c', r"""
#include <math.h>
void sigmoid(int n, const double * const restrict in,
double * const restrict out, double lim){
for (int i=0; i<n; ++i){
const double x = in[i];
out[i] = x*pow(pow(x/lim, 8)+1, -1./8.);
}
}
"""),
('_sigmoid.pyx', r"""
import numpy as np
cimport numpy as cnp
cdef extern void c_sigmoid "sigmoid" (int, const double * const,
double * const, double)
def sigmoid(double [:] inp, double lim=350.0):
cdef cnp.ndarray[cnp.float64_t, ndim=1] out = np.empty(
inp.size, dtype=np.float64)
c_sigmoid(inp.size, &inp[0], &out[0], lim)
return out
""")
]
def npy(data, lim=350.0):
return data/((data/lim)**8+1)**(1/8.)
def test_compile_link_import_str
|
ings():
if not numpy:
skip("numpy not installed.")
if not cython:
skip("cython not installed.")
from sympy.utilities._compilation import has_c
if not has_c():
skip("No C compiler found.")
compile_kw = dict
|
(std='c99', include_dirs=[numpy.get_include()])
info = None
try:
mod, info = compile_link_import_strings(_sources1, compile_kwargs=compile_kw)
data = numpy.random.random(1024*1024*8) # 64 MB of RAM needed..
res_mod = mod.sigmoid(data)
res_npy = npy(data)
assert numpy.allclose(res_mod, res_npy)
finally:
if info and info['build_dir']:
shutil.rmtree(info['build_dir'])
|
nigelsmall/py2neo
|
py2neo/packages/httpstream/watch.py
|
Python
|
apache-2.0
| 3,162
| 0
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
# Copyright 2011-2014, Nigel Small
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import unicode_literals
import logging
import sys
import threading
__all__ = ["Watcher", "watch"]
def black(s):
return "\x1b[30m{}\x1b[0m".format(s)
def red(s):
return "\x1b[31m{}\x1b[0m".format(s)
def green(s):
return "\x1b[32m{}\x1b[0m".format(s)
def yellow(s):
return "\x1b[33m{}\x1b[0m".format(s)
def blue(s):
return "\x1b[34m{}\x1b[0m".format(s)
def magenta(s):
return "\x1b[35m{}\x1b[0m".format(s)
def cyan(s):
return "\x1b[
|
36m{}\x1b[0m".format(s)
def white(s):
return "\x1b[36m{}\x1b[0m".format(s)
def bright_black(s):
return "\x1b[30;1m{}\x1b[0m".format(s)
def bright_red(s):
return "\x1b[31;1m{}\x1b[0m".format(s)
def bright_green(s):
return "\x1b[32;1m{}\x1b[0m".format(s)
def bright_yellow(s):
return "\x1b[33;1m{}\x1b[0m".format(s)
def bright_blue(s):
return "\x1b[34;1m{}\x1b[0m".format(s)
def bright_magenta(s):
return "\x1b[35;1m{}\x1b[0m".format(s)
|
def bright_cyan(s):
return "\x1b[36;1m{}\x1b[0m".format(s)
def bright_white(s):
return "\x1b[37;1m{}\x1b[0m".format(s)
class ColourFormatter(logging.Formatter):
def format(self, record):
s = super(ColourFormatter, self).format(record)
if record.levelno == logging.CRITICAL:
return bright_red(s)
elif record.levelno == logging.ERROR:
return bright_yellow(s)
elif record.levelno == logging.WARNING:
return yellow(s)
elif record.levelno == logging.INFO:
return cyan(s)
elif record.levelno == logging.DEBUG:
return blue(s)
else:
return s
class Watcher(threading.local):
handlers = {}
def __init__(self, logger_name):
super(Watcher, self).__init__()
self.logger_name = logger_name
self.logger = logging.getLogger(self.logger_name)
self.formatter = ColourFormatter()
def watch(self, level=None, out=sys.stdout):
try:
self.logger.removeHandler(self.handlers[self.logger_name])
except KeyError:
pass
handler = logging.StreamHandler(out)
handler.setFormatter(self.formatter)
self.handlers[self.logger_name] = handler
self.logger.addHandler(handler)
if level is None:
level = logging.DEBUG if __debug__ else logging.INFO
self.logger.setLevel(level)
def watch(logger_name, level=logging.INFO, out=sys.stdout):
watcher = Watcher(logger_name)
watcher.watch(level, out)
|
l-vincent-l/APITaxi
|
APITaxi/tasks/send_request_operator.py
|
Python
|
agpl-3.0
| 2,485
| 0.004829
|
#coding: utf-8
from flask import current_app
from flask_restplus import marshal
from APITaxi_models.hail import Hail, HailLog
from ..descriptors.hail import hail_model
from ..extensions import celery, redis_store_saved
import requests, json
@celery.task()
def send_request_operator(hail_id, endpoint, operator_header_name,
operator_api_key, operator_email):
operator_api_key = operator_api_key.encode('utf-8')
operator_header_name = operator_header_name.encode('utf-8')
hail = Hail.cache.get(hail_id)
if not hail:
current_app.logger.error('Unable to find hail: {}'.format(hail_id))
return False
headers = {'Content-Type': 'application/json',
'Accept': 'application/json'}
if operator_header_name is not None and operator_header_name != '':
headers[operator_header_name] = operator_api_key
data = None
try:
data = json.dumps(marshal({"data": [hail]}, hail_model))
except ValueError:
current_app.logger.error('Unable to dump JSON ({})'.format(hail))
if data:
r = None
hail_log = HailLog('POST to operator', hail, data)
try:
r = requests.post(endpoint,
data=data,
headers=headers
)
except requests.exceptions.RequestException as e:
current_app.logger.error('Error calling: {}, endpoint: {}, headers: {}'.format(
operator_email, endpoint, headers))
current_app.logger.error(e)
hail_log.store(None, redis_store_saved, str(e))
if r:
hail_log.store(r, redis_store_saved)
if not r or r.status_code < 200 or r.status_code >= 300:
hail.status = 'failure'
current_app.extensions['sqlalchemy'].db.session.commit()
current_app.logger.error("Unable to reach hail's endpoint {} of operator {}"\
.format(endpoint, operator_email))
return False
r_json = None
try:
r_json = r.json()
except ValueError:
pass
if r_json and 'data' in r_json and len(r_json['data']) == 1\
and 'taxi_phone_number' in r_json['data'][0]:
hail.taxi_phone_number = unicode(r_json['data'][0]['taxi_phone_number'])
else:
current_app.logger.error('No JSON in operator answer of {} : {}'.format(
operator_email, r.text))
hail.status = 'received_by_operator'
|
current_app.extensions['sqlalchemy'].d
|
b.session.commit()
return True
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.