text stringlengths 4 1.02M | meta dict |
|---|---|
import keras
from keras.datasets import mnist
from keras.layers import Dense
from keras.layers import Bidirectional, LSTM
from keras.models import Sequential
from keras.optimizers import SGD
batch_size = 128
decay = 1e-6
epochs = 30
learning_rate = 0.1
momentum = 0.9
num_classes = 10
time_step = 28
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train = x_train.reshape(60000, time_step, 784 // time_step)
x_test = x_test.reshape(10000, time_step, 784 // time_step)
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
# convert class vectors to binary class matrices
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
model = Sequential()
model.add(Bidirectional(LSTM(128,
activation='tanh',
recurrent_activation='hard_sigmoid',
bias_initializer='zeros',
unit_forget_bias=True),
input_shape=(time_step, 784 // time_step)))
model.add(Dense(num_classes,
activation='softmax'))
model.summary()
model.compile(loss='categorical_crossentropy',
optimizer=SGD(lr=learning_rate, decay=decay, momentum=momentum, nesterov=True),
metrics=['accuracy'])
history = model.fit(x_train,
y_train,
batch_size=batch_size,
epochs=epochs,
validation_data=(x_test, y_test))
| {
"content_hash": "a5922cef9168913b858e034a35aed63d",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 93,
"avg_line_length": 33.3125,
"alnum_prop": 0.5959974984365228,
"repo_name": "paperrune/Neural-Networks",
"id": "09a24712de1d9416ab48bb1c62eaeb4f840aa13b",
"size": "1599",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "History/LSTM/Keras.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "85870"
},
{
"name": "Python",
"bytes": "78497"
}
],
"symlink_target": ""
} |
from IPython.utils.shimmodule import ShimModule
import IPython
def test_shimmodule_repr_does_not_fail_on_import_error():
shim_module = ShimModule("shim_module", mirror="mirrored_module_does_not_exist")
repr(shim_module)
def test_shimmodule_repr_forwards_to_module():
shim_module = ShimModule("shim_module", mirror="IPython")
assert repr(shim_module) == repr(IPython)
| {
"content_hash": "c8e0256c3a8a41fda9a37e636f1c94d1",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 84,
"avg_line_length": 32.25,
"alnum_prop": 0.7416020671834626,
"repo_name": "ipython/ipython",
"id": "6ea2629b42d9bcdc26bc0d0cb04624e2a60c0822",
"size": "387",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "IPython/utils/tests/test_shimmodule.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "425"
},
{
"name": "Jupyter Notebook",
"bytes": "958133"
},
{
"name": "Makefile",
"bytes": "4675"
},
{
"name": "Python",
"bytes": "2318171"
},
{
"name": "Shell",
"bytes": "12155"
}
],
"symlink_target": ""
} |
import datetime
import os
import subprocess
import sys
def regenerate_projects():
should_regenerate_projects = False
current_working_directory = os.getcwd()
projects_timestamp_file = os.path.join(current_working_directory, "projects/.projects_timestamp")
if not os.path.exists(projects_timestamp_file):
open(projects_timestamp_file, 'x')
should_regenerate_projects = True
if not should_regenerate_projects:
project_last_generated_time = os.path.getmtime(projects_timestamp_file)
print("Project files were last generated automatically at '" + str(datetime.datetime.fromtimestamp(project_last_generated_time)) + "'.")
project_files_file_path = os.path.join(current_working_directory, "projects/cmake.project")
if not os.path.exists(project_files_file_path):
print("Failed to locate project files file: " + project_files_file_path)
sys.exit(-1)
project_files_file = open(project_files_file_path, 'r')
for project_file in project_files_file:
project_file = project_file.strip()
project_file_path = os.path.join(current_working_directory, project_file)
current_project_file_modified_time = os.path.getmtime(project_file_path)
if current_project_file_modified_time > project_last_generated_time:
print("Project file '" + project_file + "' was modified at '" + str(datetime.datetime.fromtimestamp(current_project_file_modified_time)) + "', after last automatic project generation.")
should_regenerate_projects = True
break
if should_regenerate_projects:
print("Regenerating projects now ...\n")
projects_directory = os.path.join(current_working_directory, "projects")
if not os.path.isdir(projects_directory):
print("Failed to locate 'projects' directory here: " + projects_directory)
sys.exit(-1)
sys.stdout.flush();
# regenerate for each platform that the user has already generated for
valid_platforms = [ "win", "mac", "linux" ]
os.chdir(projects_directory)
for folder in os.listdir("."):
if folder in valid_platforms and os.path.isdir(folder):
result = subprocess.call("python generate.py " + folder)
if not result == 0:
sys.exit(result)
else:
print("Projects are up-to-date.")
| {
"content_hash": "74be0955a30ceb85818952dd1dbb41de",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 189,
"avg_line_length": 41.90384615384615,
"alnum_prop": 0.7342817806333181,
"repo_name": "chrisculy/Divida",
"id": "462023235f47792a2f5ac5c78a5ab15cad01fd20",
"size": "2179",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "hooks/regenerate_projects.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "630"
},
{
"name": "C++",
"bytes": "48331"
},
{
"name": "QMake",
"bytes": "803"
}
],
"symlink_target": ""
} |
import traceback
class GetParameterException(Exception): pass
class get_parameter(object):
def __init__(self, name, access_stack=[], callstack=None):
self.name = name
if not isinstance(name, str):
raise TypeError("bee.get_parameter must be called with a string argument")
self.name = name
self.access_stack = access_stack
self.callstack = callstack
def get(self, parent, parameters):
params = None
if parent is not None:
params = parent._hive_parameters
if parameters is not None:
params = parameters
try:
ret = params[self.name]
except KeyError:
if parent is not None:
raise KeyError("%s does not contain parameter '%s'" % (parent, self.name))
else:
raise KeyError("Parameters '%s' do not contain parameter '%s'" % (params.keys(), self.name))
try:
nn = ret
for mode, attr, dmmy in self.access_stack:
if mode == "getattr":
nn = getattr(nn, attr)
elif mode == "getitem":
nn = nn[attr]
elif mode == "call":
args, kwargs = attr, dmmy
nn = nn(*args, **kwargs)
else:
raise Exception(mode) # should never happen
ret = nn
except Exception as e:
stack = self.callstack
s1 = traceback.format_list(stack[:-1])
tbstack = traceback.extract_tb(sys.exc_info()[2])
s2 = traceback.format_list(tbstack[1:])
s3 = traceback.format_exception_only(type(e), e)
s = "\n" + "".join(s1 + s2 + s3)
raise GetParameterException(s)
return ret
def getinstance(self, __parent__=None):
return self
def __getattr__(self, attr):
if attr == "typename": raise AttributeError
callstack = self.callstack
if callstack is None: callstack = traceback.extract_stack()
access_stack = list(self.access_stack)
access_stack.append(("getattr", attr, None))
return get_parameter(self.name, access_stack, callstack)
def __getitem__(self, attr):
callstack = self.callstack
if callstack is None: callstack = traceback.extract_stack()
access_stack = list(self.access_stack)
access_stack.append(("getitem", attr, None))
return get_parameter(self.name, access_stack, callstack)
def __call__(self, *args, **kwargs):
callstack = self.callstack
if callstack is None: callstack = traceback.extract_stack()
access_stack = list(self.access_stack)
access_stack.append(("call", args, kwargs))
return get_parameter(self.name, access_stack, callstack)
| {
"content_hash": "d97d0425a587ef3b9a37f3f2ac9c2675",
"timestamp": "",
"source": "github",
"line_count": 77,
"max_line_length": 108,
"avg_line_length": 37.03896103896104,
"alnum_prop": 0.5631136044880786,
"repo_name": "agoose77/hivesystem",
"id": "70cf9f2af1581493cc328756f90259b9ca1319ec",
"size": "2852",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bee/get_parameter.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "2491478"
},
{
"name": "Shell",
"bytes": "1164"
}
],
"symlink_target": ""
} |
"""DigiByte P2P network half-a-node.
This python code was modified from ArtForz' public domain half-a-node, as
found in the mini-node branch of http://github.com/jgarzik/pynode.
P2PConnection: A low-level connection object to a node's P2P interface
P2PInterface: A high-level interface object for communicating to a node over P2P
P2PDataStore: A p2p interface class that keeps a store of transactions and blocks
and can respond correctly to getdata and getheaders messages"""
import asyncio
from collections import defaultdict
from io import BytesIO
import logging
import struct
import sys
import threading
from test_framework.messages import CBlockHeader, MIN_VERSION_SUPPORTED, msg_addr, msg_block, MSG_BLOCK, msg_blocktxn, msg_cmpctblock, msg_feefilter, msg_getaddr, msg_getblocks, msg_getblocktxn, msg_getdata, msg_getheaders, msg_headers, msg_inv, msg_mempool, msg_ping, msg_pong, msg_reject, msg_sendcmpct, msg_sendheaders, msg_tx, MSG_TX, MSG_TYPE_MASK, msg_verack, msg_version, NODE_NETWORK, NODE_WITNESS, sha256
from test_framework.util import wait_until
logger = logging.getLogger("TestFramework.mininode")
MESSAGEMAP = {
b"addr": msg_addr,
b"block": msg_block,
b"blocktxn": msg_blocktxn,
b"cmpctblock": msg_cmpctblock,
b"dandeliontx": msg_dandeliontx,
b"feefilter": msg_feefilter,
b"getaddr": msg_getaddr,
b"getblocks": msg_getblocks,
b"getblocktxn": msg_getblocktxn,
b"getdata": msg_getdata,
b"getheaders": msg_getheaders,
b"headers": msg_headers,
b"inv": msg_inv,
b"mempool": msg_mempool,
b"notfound": msg_notfound,
b"ping": msg_ping,
b"pong": msg_pong,
b"reject": msg_reject,
b"sendcmpct": msg_sendcmpct,
b"sendheaders": msg_sendheaders,
b"tx": msg_tx,
b"verack": msg_verack,
b"version": msg_version,
}
MAGIC_BYTES = {
"mainnet": b"\xf9\xbe\xb4\xd9", # mainnet
"testnet3": b"\x0b\x11\x09\x07", # testnet3
"regtest": b"\xfa\xbf\xb5\xda", # regtest
}
class P2PConnection(asyncio.Protocol):
"""A low-level connection object to a node's P2P interface.
This class is responsible for:
- opening and closing the TCP connection to the node
- reading bytes from and writing bytes to the socket
- deserializing and serializing the P2P message header
- logging messages as they are sent and received
This class contains no logic for handing the P2P message payloads. It must be
sub-classed and the on_message() callback overridden."""
def __init__(self):
# The underlying transport of the connection.
# Should only call methods on this from the NetworkThread, c.f. call_soon_threadsafe
self._transport = None
@property
def is_connected(self):
return self._transport is not None
def peer_connect(self, dstaddr, dstport, net="regtest"):
assert not self.is_connected
self.dstaddr = dstaddr
self.dstport = dstport
# The initial message to send after the connection was made:
self.on_connection_send_msg = None
self.recvbuf = b""
self.network = net
logger.debug('Connecting to DigiByte Node: %s:%d' % (self.dstaddr, self.dstport))
loop = NetworkThread.network_event_loop
conn_gen_unsafe = loop.create_connection(lambda: self, host=self.dstaddr, port=self.dstport)
conn_gen = lambda: loop.call_soon_threadsafe(loop.create_task, conn_gen_unsafe)
return conn_gen
def peer_disconnect(self):
# Connection could have already been closed by other end.
NetworkThread.network_event_loop.call_soon_threadsafe(lambda: self._transport and self._transport.abort())
# Connection and disconnection methods
def connection_made(self, transport):
"""asyncio callback when a connection is opened."""
assert not self._transport
logger.debug("Connected & Listening: %s:%d" % (self.dstaddr, self.dstport))
self._transport = transport
if self.on_connection_send_msg:
self.send_message(self.on_connection_send_msg)
self.on_connection_send_msg = None # Never used again
self.on_open()
def connection_lost(self, exc):
"""asyncio callback when a connection is closed."""
if exc:
logger.warning("Connection lost to {}:{} due to {}".format(self.dstaddr, self.dstport, exc))
else:
logger.debug("Closed connection to: %s:%d" % (self.dstaddr, self.dstport))
self._transport = None
self.recvbuf = b""
self.on_close()
# Socket read methods
def data_received(self, t):
"""asyncio callback when data is read from the socket."""
if len(t) > 0:
self.recvbuf += t
self._on_data()
def _on_data(self):
"""Try to read P2P messages from the recv buffer.
This method reads data from the buffer in a loop. It deserializes,
parses and verifies the P2P header, then passes the P2P payload to
the on_message callback for processing."""
try:
while True:
if len(self.recvbuf) < 4:
return
if self.recvbuf[:4] != MAGIC_BYTES[self.network]:
raise ValueError("got garbage %s" % repr(self.recvbuf))
if len(self.recvbuf) < 4 + 12 + 4 + 4:
return
command = self.recvbuf[4:4+12].split(b"\x00", 1)[0]
msglen = struct.unpack("<i", self.recvbuf[4+12:4+12+4])[0]
checksum = self.recvbuf[4+12+4:4+12+4+4]
if len(self.recvbuf) < 4 + 12 + 4 + 4 + msglen:
return
msg = self.recvbuf[4+12+4+4:4+12+4+4+msglen]
th = sha256(msg)
h = sha256(th)
if checksum != h[:4]:
raise ValueError("got bad checksum " + repr(self.recvbuf))
self.recvbuf = self.recvbuf[4+12+4+4+msglen:]
if command not in MESSAGEMAP:
raise ValueError("Received unknown command from %s:%d: '%s' %s" % (self.dstaddr, self.dstport, command, repr(msg)))
f = BytesIO(msg)
t = MESSAGEMAP[command]()
t.deserialize(f)
self._log_message("receive", t)
self.on_message(t)
except Exception as e:
logger.exception('Error reading message:', repr(e))
raise
def on_message(self, message):
"""Callback for processing a P2P payload. Must be overridden by derived class."""
raise NotImplementedError
# Socket write methods
def send_message(self, message):
"""Send a P2P message over the socket.
This method takes a P2P payload, builds the P2P header and adds
the message to the send buffer to be sent over the socket."""
if not self.is_connected:
raise IOError('Not connected')
self._log_message("send", message)
tmsg = self._build_message(message)
def maybe_write():
if not self._transport:
return
# Python <3.4.4 does not have is_closing, so we have to check for
# its existence explicitly as long as DigiByte Core supports all
# Python 3.4 versions.
if hasattr(self._transport, 'is_closing') and self._transport.is_closing():
return
self._transport.write(tmsg)
NetworkThread.network_event_loop.call_soon_threadsafe(maybe_write)
# Class utility methods
def _build_message(self, message):
"""Build a serialized P2P message"""
command = message.command
data = message.serialize()
tmsg = MAGIC_BYTES[self.network]
tmsg += command
tmsg += b"\x00" * (12 - len(command))
tmsg += struct.pack("<I", len(data))
th = sha256(data)
h = sha256(th)
tmsg += h[:4]
tmsg += data
return tmsg
def _log_message(self, direction, msg):
"""Logs a message being sent or received over the connection."""
if direction == "send":
log_message = "Send message to "
elif direction == "receive":
log_message = "Received message from "
log_message += "%s:%d: %s" % (self.dstaddr, self.dstport, repr(msg)[:500])
if len(log_message) > 500:
log_message += "... (msg truncated)"
logger.debug(log_message)
class P2PInterface(P2PConnection):
"""A high-level P2P interface class for communicating with a DigiByte node.
This class provides high-level callbacks for processing P2P message
payloads, as well as convenience methods for interacting with the
node over P2P.
Individual testcases should subclass this and override the on_* methods
if they want to alter message handling behaviour."""
def __init__(self):
super().__init__()
# Track number of messages of each type received and the most recent
# message of each type
self.message_count = defaultdict(int)
self.last_message = {}
# A count of the number of ping messages we've sent to the node
self.ping_counter = 1
# The network services received from the peer
self.nServices = 0
def peer_connect(self, *args, services=NODE_NETWORK|NODE_WITNESS, send_version=True, **kwargs):
create_conn = super().peer_connect(*args, **kwargs)
if send_version:
# Send a version msg
vt = msg_version()
vt.nServices = services
vt.addrTo.ip = self.dstaddr
vt.addrTo.port = self.dstport
vt.addrFrom.ip = "0.0.0.0"
vt.addrFrom.port = 0
self.on_connection_send_msg = vt # Will be sent soon after connection_made
return create_conn
# Message receiving methods
def on_message(self, message):
"""Receive message and dispatch message to appropriate callback.
We keep a count of how many of each message type has been received
and the most recent message of each type."""
with mininode_lock:
try:
command = message.command.decode('ascii')
self.message_count[command] += 1
self.last_message[command] = message
getattr(self, 'on_' + command)(message)
except:
print("ERROR delivering %s (%s)" % (repr(message), sys.exc_info()[0]))
raise
# Callback methods. Can be overridden by subclasses in individual test
# cases to provide custom message handling behaviour.
def on_open(self):
pass
def on_close(self):
pass
def on_addr(self, message): pass
def on_block(self, message): pass
def on_blocktxn(self, message): pass
def on_cmpctblock(self, message): pass
def on_dandeliontx(self, message): pass
def on_feefilter(self, message): pass
def on_getaddr(self, message): pass
def on_getblocks(self, message): pass
def on_getblocktxn(self, message): pass
def on_getdata(self, message): pass
def on_getheaders(self, message): pass
def on_headers(self, message): pass
def on_mempool(self, message): pass
def on_notfound(self, message): pass
def on_pong(self, message): pass
def on_reject(self, message): pass
def on_sendcmpct(self, message): pass
def on_sendheaders(self, message): pass
def on_tx(self, message): pass
def on_inv(self, message):
want = msg_getdata()
for i in message.inv:
if i.type != 0:
want.inv.append(i)
if len(want.inv):
self.send_message(want)
def on_ping(self, message):
self.send_message(msg_pong(message.nonce))
def on_verack(self, message):
self.verack_received = True
def on_version(self, message):
assert message.nVersion >= MIN_VERSION_SUPPORTED, "Version {} received. Test framework only supports versions greater than {}".format(message.nVersion, MIN_VERSION_SUPPORTED)
self.send_message(msg_verack())
self.nServices = message.nServices
# Connection helper methods
def wait_for_disconnect(self, timeout=60):
test_function = lambda: not self.is_connected
wait_until(test_function, timeout=timeout, lock=mininode_lock)
# Message receiving helper methods
def wait_for_block(self, blockhash, timeout=60):
test_function = lambda: self.last_message.get("block") and self.last_message["block"].block.rehash() == blockhash
wait_until(test_function, timeout=timeout, lock=mininode_lock)
def wait_for_header(self, blockhash, timeout=60):
def test_function():
last_headers = self.last_message.get('headers')
if not last_headers:
return False
return last_headers.headers[0].rehash() == blockhash
wait_until(test_function, timeout=timeout, lock=mininode_lock)
def wait_for_getdata(self, timeout=60):
"""Waits for a getdata message.
Receiving any getdata message will satisfy the predicate. the last_message["getdata"]
value must be explicitly cleared before calling this method, or this will return
immediately with success. TODO: change this method to take a hash value and only
return true if the correct block/tx has been requested."""
test_function = lambda: self.last_message.get("getdata")
wait_until(test_function, timeout=timeout, lock=mininode_lock)
def wait_for_getheaders(self, timeout=60):
"""Waits for a getheaders message.
Receiving any getheaders message will satisfy the predicate. the last_message["getheaders"]
value must be explicitly cleared before calling this method, or this will return
immediately with success. TODO: change this method to take a hash value and only
return true if the correct block header has been requested."""
test_function = lambda: self.last_message.get("getheaders")
wait_until(test_function, timeout=timeout, lock=mininode_lock)
def wait_for_inv(self, expected_inv, timeout=60):
"""Waits for an INV message and checks that the first inv object in the message was as expected."""
if len(expected_inv) > 1:
raise NotImplementedError("wait_for_inv() will only verify the first inv object")
test_function = lambda: self.last_message.get("inv") and \
self.last_message["inv"].inv[0].type == expected_inv[0].type and \
self.last_message["inv"].inv[0].hash == expected_inv[0].hash
wait_until(test_function, timeout=timeout, lock=mininode_lock)
def wait_for_verack(self, timeout=60):
test_function = lambda: self.message_count["verack"]
wait_until(test_function, timeout=timeout, lock=mininode_lock)
# Message sending helper functions
def send_and_ping(self, message):
self.send_message(message)
self.sync_with_ping()
# Sync up with the node
def sync_with_ping(self, timeout=60):
self.send_message(msg_ping(nonce=self.ping_counter))
test_function = lambda: self.last_message.get("pong") and self.last_message["pong"].nonce == self.ping_counter
wait_until(test_function, timeout=timeout, lock=mininode_lock)
self.ping_counter += 1
# One lock for synchronizing all data access between the network event loop (see
# NetworkThread below) and the thread running the test logic. For simplicity,
# P2PConnection acquires this lock whenever delivering a message to a P2PInterface.
# This lock should be acquired in the thread running the test logic to synchronize
# access to any data shared with the P2PInterface or P2PConnection.
mininode_lock = threading.RLock()
class NetworkThread(threading.Thread):
network_event_loop = None
def __init__(self):
super().__init__(name="NetworkThread")
# There is only one event loop and no more than one thread must be created
assert not self.network_event_loop
NetworkThread.network_event_loop = asyncio.new_event_loop()
def run(self):
"""Start the network thread."""
self.network_event_loop.run_forever()
def close(self, timeout=10):
"""Close the connections and network event loop."""
self.network_event_loop.call_soon_threadsafe(self.network_event_loop.stop)
wait_until(lambda: not self.network_event_loop.is_running(), timeout=timeout)
self.network_event_loop.close()
self.join(timeout)
class P2PDataStore(P2PInterface):
"""A P2P data store class.
Keeps a block and transaction store and responds correctly to getdata and getheaders requests."""
def __init__(self):
super().__init__()
self.reject_code_received = None
self.reject_reason_received = None
# store of blocks. key is block hash, value is a CBlock object
self.block_store = {}
self.last_block_hash = ''
# store of txs. key is txid, value is a CTransaction object
self.tx_store = {}
self.getdata_requests = []
def on_getdata(self, message):
"""Check for the tx/block in our stores and if found, reply with an inv message."""
for inv in message.inv:
self.getdata_requests.append(inv.hash)
if (inv.type & MSG_TYPE_MASK) == MSG_TX and inv.hash in self.tx_store.keys():
self.send_message(msg_tx(self.tx_store[inv.hash]))
elif (inv.type & MSG_TYPE_MASK) == MSG_BLOCK and inv.hash in self.block_store.keys():
self.send_message(msg_block(self.block_store[inv.hash]))
else:
logger.debug('getdata message type {} received.'.format(hex(inv.type)))
def on_getheaders(self, message):
"""Search back through our block store for the locator, and reply with a headers message if found."""
locator, hash_stop = message.locator, message.hashstop
# Assume that the most recent block added is the tip
if not self.block_store:
return
headers_list = [self.block_store[self.last_block_hash]]
maxheaders = 2000
while headers_list[-1].sha256 not in locator.vHave:
# Walk back through the block store, adding headers to headers_list
# as we go.
prev_block_hash = headers_list[-1].hashPrevBlock
if prev_block_hash in self.block_store:
prev_block_header = CBlockHeader(self.block_store[prev_block_hash])
headers_list.append(prev_block_header)
if prev_block_header.sha256 == hash_stop:
# if this is the hashstop header, stop here
break
else:
logger.debug('block hash {} not found in block store'.format(hex(prev_block_hash)))
break
# Truncate the list if there are too many headers
headers_list = headers_list[:-maxheaders - 1:-1]
response = msg_headers(headers_list)
if response is not None:
self.send_message(response)
def on_reject(self, message):
"""Store reject reason and code for testing."""
self.reject_code_received = message.code
self.reject_reason_received = message.reason
def send_blocks_and_test(self, blocks, node, *, success=True, request_block=True, reject_code=None, reject_reason=None, timeout=60):
"""Send blocks to test node and test whether the tip advances.
- add all blocks to our block_store
- send a headers message for the final block
- the on_getheaders handler will ensure that any getheaders are responded to
- if request_block is True: wait for getdata for each of the blocks. The on_getdata handler will
ensure that any getdata messages are responded to
- if success is True: assert that the node's tip advances to the most recent block
- if success is False: assert that the node's tip doesn't advance
- if reject_code and reject_reason are set: assert that the correct reject message is received"""
with mininode_lock:
self.reject_code_received = None
self.reject_reason_received = None
for block in blocks:
self.block_store[block.sha256] = block
self.last_block_hash = block.sha256
self.send_message(msg_headers([CBlockHeader(blocks[-1])]))
if request_block:
wait_until(lambda: blocks[-1].sha256 in self.getdata_requests, timeout=timeout, lock=mininode_lock)
if success:
wait_until(lambda: node.getbestblockhash() == blocks[-1].hash, timeout=timeout)
else:
assert node.getbestblockhash() != blocks[-1].hash
if reject_code is not None:
wait_until(lambda: self.reject_code_received == reject_code, lock=mininode_lock)
if reject_reason is not None:
wait_until(lambda: self.reject_reason_received == reject_reason, lock=mininode_lock)
def send_txs_and_test(self, txs, node, *, success=True, expect_disconnect=False, reject_code=None, reject_reason=None):
"""Send txs to test node and test whether they're accepted to the mempool.
- add all txs to our tx_store
- send tx messages for all txs
- if success is True/False: assert that the txs are/are not accepted to the mempool
- if expect_disconnect is True: Skip the sync with ping
- if reject_code and reject_reason are set: assert that the correct reject message is received."""
with mininode_lock:
self.reject_code_received = None
self.reject_reason_received = None
for tx in txs:
self.tx_store[tx.sha256] = tx
for tx in txs:
self.send_message(msg_tx(tx))
if expect_disconnect:
self.wait_for_disconnect()
else:
self.sync_with_ping()
raw_mempool = node.getrawmempool()
if success:
# Check that all txs are now in the mempool
for tx in txs:
assert tx.hash in raw_mempool, "{} not found in mempool".format(tx.hash)
else:
# Check that none of the txs are now in the mempool
for tx in txs:
assert tx.hash not in raw_mempool, "{} tx found in mempool".format(tx.hash)
if reject_code is not None:
wait_until(lambda: self.reject_code_received == reject_code, lock=mininode_lock)
if reject_reason is not None:
wait_until(lambda: self.reject_reason_received == reject_reason, lock=mininode_lock)
| {
"content_hash": "a304b60bddd160a6e20a433938beb753",
"timestamp": "",
"source": "github",
"line_count": 555,
"max_line_length": 413,
"avg_line_length": 41.14954954954955,
"alnum_prop": 0.6310535073123741,
"repo_name": "digibyte/digibyte",
"id": "3ab23112c0c568fe3c33690f95672f67120af66a",
"size": "23144",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/functional/test_framework/mininode.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "28453"
},
{
"name": "C",
"bytes": "1383691"
},
{
"name": "C++",
"bytes": "6176996"
},
{
"name": "CSS",
"bytes": "126479"
},
{
"name": "HTML",
"bytes": "21860"
},
{
"name": "Java",
"bytes": "30298"
},
{
"name": "M4",
"bytes": "196094"
},
{
"name": "Makefile",
"bytes": "118253"
},
{
"name": "Objective-C",
"bytes": "6742"
},
{
"name": "Objective-C++",
"bytes": "6587"
},
{
"name": "Python",
"bytes": "1705099"
},
{
"name": "QMake",
"bytes": "757"
},
{
"name": "Shell",
"bytes": "98920"
}
],
"symlink_target": ""
} |
from . import doc
def main(argv):
d = doc.Doc(argv[1])
for a in argv[2:]:
print(make_line(d, a))
def make_enviroment_name(name):
return name.upper().replace(".","_")
def make_line(doc, name):
temp = doc
for attr in name.split("."):
temp = getattr(temp, attr)
if type(temp) != str:
temp = str(temp)
temp = temp.replace(" ", "_")
return "export %s=%s" % ( make_enviroment_name(name) , temp)
# `./read_config.py gui.xml guidata.skin.scoreFontName guidata.skin.scoreFontHeight`
if __name__ == '__main__':
import sys
main(sys.argv)
| {
"content_hash": "f2a6de69357003f85389102f9107cd6c",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 85,
"avg_line_length": 24.24,
"alnum_prop": 0.5742574257425742,
"repo_name": "thedavecollins/ExEmGel",
"id": "c0856a08d99afe272991f46a20f32351d52f9689",
"size": "628",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "exemgel/read_config.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "8789"
}
],
"symlink_target": ""
} |
import tests.model_control.test_ozone_custom_models_enabled as testmod
testmod.build_model( ['None'] , ['LinearTrend'] , ['Seasonal_Second'] , ['SVR'] ); | {
"content_hash": "e856c27530f780204e3d9d26ce707863",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 82,
"avg_line_length": 38.75,
"alnum_prop": 0.7032258064516129,
"repo_name": "antoinecarme/pyaf",
"id": "795ce4f48c23dd9e0cf3a0ba5e6c2f0b2a0e6255",
"size": "155",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/model_control/detailed/transf_None/model_control_one_enabled_None_LinearTrend_Seasonal_Second_SVR.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "6773299"
},
{
"name": "Procfile",
"bytes": "24"
},
{
"name": "Python",
"bytes": "54209093"
},
{
"name": "R",
"bytes": "807"
},
{
"name": "Shell",
"bytes": "3619"
}
],
"symlink_target": ""
} |
import re
from decorator import decorator
class ValidationException(Exception):
pass
@decorator
def reply(f, *args, **kwargs):
""" if a command needs to show the message as a reply """
r = f(*args, **kwargs)
if r:
if isinstance(r, dict):
r['needs_reply'] = True
elif isinstance(r, basestring):
r = dict(answer=r, needs_reply=True)
return r
@decorator
def preview(f, *args, **kwargs):
""" if a command needs to show an url with a preview """
r = f(*args, **kwargs)
if r:
if isinstance(r, dict):
r['needs_preview'] = True
elif isinstance(r, basestring):
r = dict(answer=r, needs_preview=True)
return r
class Command(object):
# can be a single slash command or a
# list of commands
SLASH_COMMAND = None
# defines if the command needs a reply or not
REPLY_NEEDED = False
# if the command supplies this, we will parse the text using this
# regex and try to validate the command using it
REGEX = None
def __init__(self, bot, redis):
self._bot = bot
self._redis = redis
if isinstance(self.SLASH_COMMAND, basestring):
self.SLASH_COMMAND = [self.SLASH_COMMAND]
if self.SLASH_COMMAND:
self._commands = dict([(c.split(' ')[0].replace("/", ""), c) for c in self.SLASH_COMMAND])
command_re = r'^\/(%s)\s?(.*)?$' % '|'.join("\\b%s\\b" % c.replace('/', '') for c in self._commands.keys())
self._slash_re = re.compile(command_re, re.I)
self._slash_args_re = re.compile(r'\[(\w+)\]')
def __validate_slash_command(self, text, message):
result = self._slash_re.findall(text)
if not result:
return False
try:
command, args = result[0]
command = command.lower()
args = args.split()
except ValueError:
command, result[0][0].lower()
args = None
try:
full_command = self._commands[command]
command_args = self._slash_args_re.findall(full_command)
if len(command_args) == 1 and len(args) > 1:
args = [" ".join(args)]
if command_args and len(command_args) != len(args):
raise ValidationException("Wrong number of arguments %s" % full_command)
message['args'] = dict(zip(command_args, args))
except (KeyError, IndexError):
return False
message['command'] = command.lower()
return True
def __validate_regex(self, text, message):
result = self.REGEX.findall(text)
if not result:
return False
message['result'] = result[0]
return True
def can_respond(self, text, message):
if self.REGEX:
return self.__validate_regex(text, message)
if self.SLASH_COMMAND:
return self.__validate_slash_command(text, message)
return False
def __send_message(self, command_response, message):
if isinstance(command_response, basestring):
command_response = dict(answer=command_response)
if not isinstance(command_response, dict):
raise ValueError("Command response must be a dict")
if 'answer' not in command_response:
raise ValueError("Command response must have an answer")
answer = command_response['answer']
needs_reply = command_response.get('needs_reply', False)
needs_preview = command_response.get('needs_preview', False)
reply_id = message.get('message_id', None) if needs_reply else None
self._bot.send_message(message['chat']['id'], answer, reply_id, needs_preview)
def process(self, bot, message):
try:
text = message['text'].encode("utf-8").decode("utf-8")
text = text.replace(u'\xa0', ' ')
message['text'] = text
except KeyError:
message['text'] = ''
try:
if self.can_respond(text, message):
response = self.respond(text, message)
self.__send_message(response, message)
except ValidationException, e:
self.__send_message(dict(answer=e.message), message)
class Ping(Command):
""" Simple ping command to make sure the bot itself works """
SLASH_COMMAND = '/ping'
def respond(self, text, message):
return 'pong'
class Help(Command):
""" LOLHELP """
SLASH_COMMAND = '/help'
@reply
def respond(self, text, message):
return 'Deus ajuda quem cedo madruga'
COMMANDS = [Ping, Help]
| {
"content_hash": "73a974998bab07b7dba74739a9431749",
"timestamp": "",
"source": "github",
"line_count": 159,
"max_line_length": 119,
"avg_line_length": 29.371069182389938,
"alnum_prop": 0.574304068522484,
"repo_name": "fernandotakai/telegram_bot",
"id": "747e7dada034f44d7ddd3a8df17bdb1d6305f883",
"size": "4670",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "commands.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "8975"
}
],
"symlink_target": ""
} |
from UTIL.SYS import Error, LOG, LOG_INFO, LOG_WARNING, LOG_ERROR
import CCSDS.PACKET
import EGSE.CNC, EGSE.IF
import MC.IF
import PUS.PACKET
import UTIL.TASK
###########
# classes #
###########
# =============================================================================
class TCclient(EGSE.CNC.TCclient):
"""Subclass of EGSE.CNC.TCclient"""
# this client sends CnC commands
# and receives automatically ACK/NAK CnC responses
# ---------------------------------------------------------------------------
def __init__(self):
"""Initialise attributes only"""
EGSE.CNC.TCclient.__init__(self)
# ---------------------------------------------------------------------------
def connected(self):
"""hook for derived classes"""
LOG_INFO("TCclient.connected", "CNC")
EGSE.IF.s_cncClientConfiguration.connected = True
UTIL.TASK.s_processingTask.notifyCNCconnected()
# ---------------------------------------------------------------------------
def disconnected(self):
"""hook for derived classes"""
LOG_WARNING("TCclient.disconnected", "CNC")
EGSE.IF.s_cncClientConfiguration.connected = False
UTIL.TASK.s_processingTask.notifyCNCdisconnected()
# ---------------------------------------------------------------------------
def pushTCpacket(self, tcPacketDu):
"""Consumes a telecommand packet"""
# the CCSDS TC packet is not checked but directly send
self.sendCNCpacket(tcPacketDu.getBuffer())
# ---------------------------------------------------------------------------
def notifyCNCresponse(self, cncAckNakDU):
"""CnC response received: overloaded from EGSE.CNC.TCclient"""
LOG_INFO("notifyCNCresponse: message = " + cncAckNakDU.getCNCmessage(), "CNC")
MC.IF.s_tmModel.pushTMpacket(cncAckNakDU, None)
# ---------------------------------------------------------------------------
def notifyCCSDSresponse(self, tcAckNakDU):
"""TC response received: overloaded from EGSE.CNC.TCclient"""
LOG_INFO("notifyCCSDSresponse: status = " + tcAckNakDU.getStatus(), "CNC")
MC.IF.s_tmModel.pushTMpacket(tcAckNakDU, None)
# =============================================================================
class TMclient(EGSE.CNC.TMclient):
"""Subclass of EGSE.CNC.TMclient"""
# this client only receives CCSDS TM packets
# ---------------------------------------------------------------------------
def __init__(self):
"""Initialise attributes only"""
EGSE.CNC.TMclient.__init__(self)
# ---------------------------------------------------------------------------
def connected(self):
"""hook for derived classes"""
LOG_INFO("TMclient.connected", "CNC")
EGSE.IF.s_cncClientConfiguration.connected2 = True
UTIL.TASK.s_processingTask.notifyCNC2connected()
# ---------------------------------------------------------------------------
def disconnected(self):
"""hook for derived classes"""
LOG_WARNING("TMclient.disconnected", "CNC")
EGSE.IF.s_cncClientConfiguration.connected2 = False
UTIL.TASK.s_processingTask.notifyCNC2disconnected()
# ---------------------------------------------------------------------------
def notifyTMpacket(self, tmPacket):
"""TM packet received: overloaded from EGSE.CNC.TMclient"""
if PUS.PACKET.isPUSpacket(tmPacket):
# PUS packet
tmPacketDu = PUS.PACKET.TMpacket(tmPacket)
LOG_INFO("PUS TM packet extracted", "CNC")
else:
# CCSDS packet
tmPacketDu = CCSDS.PACKET.TMpacket(tmPacket)
LOG_INFO("CCSDS TM packet extracted", "CNC")
MC.IF.s_tmModel.pushTMpacket(tmPacketDu, None)
####################
# global variables #
####################
# CNC clients are singletons
s_client = None
s_client2 = None
#############
# functions #
#############
# functions to encapsulate access to s_client and s_client2
# -----------------------------------------------------------------------------
def createClients():
"""create the EGSE clients"""
global s_client, s_client2
cncHost = EGSE.IF.s_cncClientConfiguration.cncHost
if cncHost == "":
LOG_INFO("no CNC connection configured", "CNC")
return
s_client = TCclient()
s_client2 = TMclient()
# -----------------------------------------------------------------------------
def connectCNC():
"""Connect CNC TC link"""
LOG_INFO("Connect CNC TC link", "CNC")
cncHost = EGSE.IF.s_cncClientConfiguration.cncHost
cncPort = EGSE.IF.s_cncClientConfiguration.cncPort
if cncHost == "" or cncPort == "-1":
LOG_ERROR("no CNC TC link configured", "CNC")
return
if not s_client.connectToServer(cncHost, int(cncPort)):
LOG_ERROR("Connect TC link failed", "CNC")
# -----------------------------------------------------------------------------
def disconnectCNC():
"""Disonnect CNC TC link"""
LOG_INFO("Disonnect CNC TC link", "CNC")
cncHost = EGSE.IF.s_cncClientConfiguration.cncHost
cncPort = EGSE.IF.s_cncClientConfiguration.cncPort
if cncHost == "" or cncPort == "-1":
LOG_ERROR("no CNC TC link configured", "CNC")
return
s_client.disconnectFromServer()
# -----------------------------------------------------------------------------
def connectCNC2():
"""Connect CNC TM link"""
LOG_INFO("Connect CNC TM link", "CNC")
cncHost = EGSE.IF.s_cncClientConfiguration.cncHost
cncPort2 = EGSE.IF.s_cncClientConfiguration.cncPort2
if cncHost == "" or cncPort2 == "-1":
LOG_ERROR("no CNC TM link configured", "CNC")
return
if not s_client2.connectToServer(cncHost, int(cncPort2)):
LOG_ERROR("Connect TM link failed", "CNC")
# -----------------------------------------------------------------------------
def disconnectCNC2():
"""Disonnect CNC TM link"""
LOG_INFO("Disonnect CNC TM link", "CNC")
cncHost = EGSE.IF.s_cncClientConfiguration.cncHost
cncPort2 = EGSE.IF.s_cncClientConfiguration.cncPort2
if cncHost == "" or cncPort2 == "-1":
LOG_ERROR("no CNC TM link configured", "CNC")
return
s_client2.disconnectFromServer()
| {
"content_hash": "a578685d478fa765acc02049b7a75f1e",
"timestamp": "",
"source": "github",
"line_count": 143,
"max_line_length": 82,
"avg_line_length": 41.74825174825175,
"alnum_prop": 0.5331658291457286,
"repo_name": "Stefan-Korner/SpacePyLibrary",
"id": "40fc4cc4f0c897c876d4c92da68a461e7549a4f8",
"size": "7170",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "CS/CNCclient.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "11666"
},
{
"name": "Python",
"bytes": "1264766"
},
{
"name": "Shell",
"bytes": "23787"
}
],
"symlink_target": ""
} |
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ovirt_cluster_facts
short_description: Retrieve facts about one or more oVirt/RHV clusters
author: "Ondra Machacek (@machacekondra)"
version_added: "2.3"
description:
- "Retrieve facts about one or more oVirt/RHV clusters."
notes:
- "This module creates a new top-level C(ovirt_clusters) fact, which
contains a list of clusters."
options:
pattern:
description:
- "Search term which is accepted by oVirt/RHV search backend."
- "For example to search cluster X from datacenter Y use following pattern:
name=X and datacenter=Y"
extends_documentation_fragment: ovirt_facts
'''
EXAMPLES = '''
# Examples don't contain auth parameter for simplicity,
# look at ovirt_auth module to see how to reuse authentication:
# Gather facts about all clusters which names start with C<production>:
- ovirt_cluster_facts:
pattern:
name: 'production*'
- debug:
var: ovirt_clusters
'''
RETURN = '''
ovirt_clusters:
description: "List of dictionaries describing the clusters. Cluster attributes are mapped to dictionary keys,
all clusters attributes can be found at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/cluster."
returned: On success.
type: list
'''
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ovirt import (
check_sdk,
create_connection,
get_dict_of_struct,
ovirt_facts_full_argument_spec,
)
def main():
argument_spec = ovirt_facts_full_argument_spec(
pattern=dict(default='', required=False),
)
module = AnsibleModule(argument_spec)
check_sdk(module)
try:
auth = module.params.pop('auth')
connection = create_connection(auth)
clusters_service = connection.system_service().clusters_service()
clusters = clusters_service.list(search=module.params['pattern'])
module.exit_json(
changed=False,
ansible_facts=dict(
ovirt_clusters=[
get_dict_of_struct(
struct=c,
connection=connection,
fetch_nested=module.params.get('fetch_nested'),
attributes=module.params.get('nested_attributes'),
) for c in clusters
],
),
)
except Exception as e:
module.fail_json(msg=str(e), exception=traceback.format_exc())
finally:
connection.close(logout=auth.get('token') is None)
if __name__ == '__main__':
main()
| {
"content_hash": "e34e766e369f5a6d9bc2e72e5c606634",
"timestamp": "",
"source": "github",
"line_count": 90,
"max_line_length": 142,
"avg_line_length": 30.68888888888889,
"alnum_prop": 0.6299782766111514,
"repo_name": "SergeyCherepanov/ansible",
"id": "c7f5d8cdbaba4777940346303a9cb56f323926a7",
"size": "3498",
"binary": false,
"copies": "53",
"ref": "refs/heads/master",
"path": "ansible/ansible/modules/cloud/ovirt/ovirt_cluster_facts.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Shell",
"bytes": "824"
}
],
"symlink_target": ""
} |
import json
import logging
import re
from api.api_samples.python_client.api_client import CloudBoltAPIClient
from api.api_samples.python_client.samples.api_helpers import wait_for_order_completion
from common.methods import set_progress
from servicecatalog.models import ServiceBlueprint
from utilities.exceptions import CloudBoltException
from utilities.models import ConnectionInfo
# suppress logging from requests module
logger = logging.getLogger('requests')
logger.setLevel(40)
logger = logging.getLogger('py.warnings')
logger.setLevel(40)
API_CLIENT_CI = "CIT API Client"
BLUEPRINT = 82
NEW_RESOURCE_NAME = "test testuser"
BP_PAYLOAD = """
{
"group": "/api/v2/groups/2/",
"items": {
"deploy-items": [
{
"blueprint": "/api/v2/blueprints/82/",
"blueprint-items-arguments": {
"build-item-Create Office365 user": {
"parameters": {
"first-name-a364": "test",
"last-name-a364": "testuser",
"user-password-a364": "Ee199407#"
}
}
},
"resource-name": "Office365",
"resource-parameters": {}
}
]
},
"submit-now": "true"
}
"""
# END of BP specific variables
def get_order_id_from_href(order_href):
mo = re.search("/orders/([0-9]+)", order_href)
return int(mo.groups()[0])
def test_order_blueprint(client):
order = json.loads(client.post('/api/v2/orders/', body=BP_PAYLOAD))
order_href = order['_links']['self']['href']
order_id = get_order_id_from_href(order_href)
result = wait_for_order_completion(client, order_id, 180, 10)
if result != 0:
raise CloudBoltException("Blueprint Deployment order {} did not succeed.".format(order_id))
set_progress("Blueprint deployment order {} completed successfully.".format(order_id))
def test_delete_resource(client, resource):
body = "{}"
json.loads(client.post(
'/api/v2/resources/{}/{}/actions/1/'.format(resource.resource_type.name, resource.id), body=body))
def get_api_client():
ci = ConnectionInfo.objects.get(name=API_CLIENT_CI)
return CloudBoltAPIClient(
ci.username, ci.password, ci.ip, ci.port, protocol=ci.protocol)
def run(job, *args, **kwargs):
bp = ServiceBlueprint.objects.get(id=BLUEPRINT)
set_progress(
"Running Continuous Infrastructure Test for blueprint {}".format(bp)
)
client = get_api_client()
# Order the BP
set_progress("### ORDERING BLUEPRINT ###", tasks_done=0, total_tasks=3)
test_order_blueprint(client)
resource = bp.resource_set.filter(name__iexact=NEW_RESOURCE_NAME, lifecycle='ACTIVE').first()
set_progress(f"RESOURCE {resource}")
# Delete the resource from the database only
resource.delete()
set_progress("### DISCOVERING RESOURCES FOR BLUEPRINT ###", tasks_done=1)
bp.sync_resources()
# should be able to get the resource since the sync should have created it
resource = bp.resource_set.get(name__iexact=NEW_RESOURCE_NAME, lifecycle='ACTIVE')
set_progress("### DELETING RESOURCE FOR BLUEPRINT ###", tasks_done=2)
test_delete_resource(client, resource)
set_progress("ALL Tests completed!", tasks_done=3)
| {
"content_hash": "a1e3fbf1d5b298cafa85280b0e63d21d",
"timestamp": "",
"source": "github",
"line_count": 103,
"max_line_length": 106,
"avg_line_length": 32.56310679611651,
"alnum_prop": 0.6368515205724508,
"repo_name": "CloudBoltSoftware/cloudbolt-forge",
"id": "dcbada1e03f348a3c9a101e3db554ab5b22def43",
"size": "3354",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "blueprints/office365/cit/office_365_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "1665"
},
{
"name": "HTML",
"bytes": "165828"
},
{
"name": "JavaScript",
"bytes": "1871"
},
{
"name": "PowerShell",
"bytes": "5779"
},
{
"name": "Python",
"bytes": "1742154"
},
{
"name": "Shell",
"bytes": "16836"
}
],
"symlink_target": ""
} |
"""
:Authors: - Wilker Aziz
"""
#from .kbest import KBest
#from .ancestral import AncestralSampler
#from .viterbi import viterbi_derivation
#from .value import robust_value_recursion, derivation_value
#from .sample import sample_one, sample_k
| {
"content_hash": "1e100b2e011fe267d369548abbd023c3",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 60,
"avg_line_length": 27.11111111111111,
"alnum_prop": 0.7704918032786885,
"repo_name": "wilkeraziz/grasp",
"id": "ef7ea5af541384bba46e0d42c8479027dd286253",
"size": "244",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "grasp/inference/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "875492"
}
],
"symlink_target": ""
} |
"""
Configuration options registration and useful routines.
"""
import os
from oslo_config import cfg
import st2common.config as common_config
from st2common.constants.system import VERSION_STRING
CONF = cfg.CONF
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
def parse_args(args=None):
CONF(args=args, version=VERSION_STRING)
def register_opts():
_register_common_opts()
_register_app_opts()
def _register_common_opts():
common_config.register_opts()
def get_logging_config_path():
return cfg.CONF.api.logging
def _register_app_opts():
# Note "host", "port", "allow_origin", "mask_secrets" options are registered as part of
# st2common config since they are also used outside st2api
static_root = os.path.join(cfg.CONF.system.base_path, 'static')
template_path = os.path.join(BASE_DIR, 'templates/')
pecan_opts = [
cfg.StrOpt('root',
default='st2api.controllers.root.RootController',
help='Action root controller'),
cfg.StrOpt('static_root', default=static_root),
cfg.StrOpt('template_path', default=template_path),
cfg.ListOpt('modules', default=['st2api']),
cfg.BoolOpt('debug', default=False),
cfg.BoolOpt('auth_enable', default=True),
cfg.DictOpt('errors', default={'__force_dict__': True})
]
CONF.register_opts(pecan_opts, group='api_pecan')
logging_opts = [
cfg.BoolOpt('debug', default=False),
cfg.StrOpt('logging', default='conf/logging.conf',
help='location of the logging.conf file'),
cfg.IntOpt('max_page_size', default=100,
help=('Maximum limit (page size) argument which can be specified by the user '
'in a query string.'))
]
CONF.register_opts(logging_opts, group='api')
| {
"content_hash": "2054d93e02bbf397648e9746bf99699c",
"timestamp": "",
"source": "github",
"line_count": 59,
"max_line_length": 97,
"avg_line_length": 31.35593220338983,
"alnum_prop": 0.6416216216216216,
"repo_name": "tonybaloney/st2",
"id": "91e0c3c67342c2f27de0c53750661ffcff8b141d",
"size": "2630",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "st2api/st2api/config.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "198"
},
{
"name": "Makefile",
"bytes": "46066"
},
{
"name": "PowerShell",
"bytes": "299"
},
{
"name": "Python",
"bytes": "4278891"
},
{
"name": "Shell",
"bytes": "47687"
},
{
"name": "Slash",
"bytes": "677"
}
],
"symlink_target": ""
} |
import json
from django.test import TestCase
from django.core.urlresolvers import reverse
from django.contrib.auth.models import User as DjangoUser
from anaf.core.models import Group, Perspective, ModuleSetting
from anaf.messaging.models import Message, MessageStream, MailingList
from anaf.identities.models import Contact, ContactType
class MessagingApiTest(TestCase):
username = "api_test"
password = "api_password"
authentication_headers = {"CONTENT_TYPE": "application/json",
"HTTP_AUTHORIZATION": "Basic YXBpX3Rlc3Q6YXBpX3Bhc3N3b3Jk"}
content_type = 'application/json'
def setUp(self):
self.group, created = Group.objects.get_or_create(name='test')
self.user, created = DjangoUser.objects.get_or_create(username=self.username, is_staff=True)
self.user.set_password(self.password)
self.user.save()
self.perspective = Perspective(name='test')
self.perspective.set_default_user()
self.perspective.save()
ModuleSetting.set('default_perspective', self.perspective.id)
self.contact_type = ContactType(name='test')
self.contact_type.set_default_user()
self.contact_type.save()
self.contact = Contact(name='test', contact_type=self.contact_type)
self.contact.set_default_user()
self.contact.save()
self.user_contact = Contact(
name='test', related_user=self.user.profile, contact_type=self.contact_type)
self.user_contact.set_user(self.user.profile)
self.user_contact.save()
self.stream = MessageStream(name='test')
self.stream.set_default_user()
self.stream.save()
self.mlist = MailingList(name='test', from_contact=self.contact)
self.mlist.set_default_user()
self.mlist.save()
self.message = Message(
title='test', body='test', author=self.contact, stream=self.stream)
self.message.set_default_user()
self.message.save()
def test_unauthenticated_access(self):
"Test index page at /api/messaging/mlist"
response = self.client.get('/api/messaging/mlist')
# Redirects as unauthenticated
self.assertEquals(response.status_code, 401)
def test_get_mlist(self):
""" Test index page api/messaging/mlist """
response = self.client.get(
path=reverse('api_messaging_mlist'), **self.authentication_headers)
self.assertEquals(response.status_code, 200)
def test_get_one_mlist(self):
response = self.client.get(path=reverse('api_messaging_mlist', kwargs={
'object_ptr': self.mlist.id}), **self.authentication_headers)
self.assertEquals(response.status_code, 200)
def test_update_mlist(self):
updates = {"name": "API mailing list", "description": "API description update", "from_contact": self.contact.id,
"members": [self.contact.id, ]}
response = self.client.put(path=reverse('api_messaging_mlist', kwargs={'object_ptr': self.mlist.id}),
content_type=self.content_type, data=json.dumps(updates),
**self.authentication_headers)
self.assertEquals(response.status_code, 200)
data = json.loads(response.content)
self.assertEquals(data['name'], updates['name'])
self.assertEquals(data['description'], updates['description'])
self.assertEquals(data['from_contact']['id'], updates['from_contact'])
for i, member in enumerate(data['members']):
self.assertEquals(member['id'], updates['members'][i])
def test_get_streams(self):
""" Test index page api/messaging/streams """
response = self.client.get(
path=reverse('api_messaging_streams'), **self.authentication_headers)
self.assertEquals(response.status_code, 200)
def test_get_stream(self):
response = self.client.get(path=reverse('api_messaging_streams', kwargs={
'object_ptr': self.stream.id}), **self.authentication_headers)
self.assertEquals(response.status_code, 200)
def test_update_stream(self):
updates = {"name": "API stream", }
response = self.client.put(path=reverse('api_messaging_streams', kwargs={'object_ptr': self.stream.id}),
content_type=self.content_type, data=json.dumps(updates),
**self.authentication_headers)
self.assertEquals(response.status_code, 200)
data = json.loads(response.content)
self.assertEquals(data['name'], updates['name'])
def test_get_messages(self):
""" Test index page api/messaging/messages """
response = self.client.get(
path=reverse('api_messaging_messages'), **self.authentication_headers)
self.assertEquals(response.status_code, 200)
def test_get_message(self):
response = self.client.get(path=reverse('api_messaging_messages', kwargs={
'object_ptr': self.message.id}), **self.authentication_headers)
self.assertEquals(response.status_code, 200)
def test_send_message(self):
updates = {"title": "API message title", "body": "Test body", "stream": self.stream.id,
"multicomplete_recipients": u'test@test.com'}
response = self.client.post(path=reverse('api_messaging_messages'), content_type=self.content_type,
data=json.dumps(updates), **self.authentication_headers)
self.assertEquals(response.status_code, 200)
data = json.loads(response.content)
self.assertEquals(data['title'], updates['title'])
self.assertEquals(data['body'], updates['body'])
self.assertEquals(data['stream']['id'], updates['stream'])
def test_reply_to_message(self):
updates = {"title": "API test", "body": "Test body", "stream": self.stream.id,
"multicomplete_recipients": u'test@test.com'}
response = self.client.put(path=reverse('api_messaging_messages', kwargs={'object_ptr': self.message.id}),
content_type=self.content_type, data=json.dumps(updates),
**self.authentication_headers)
self.assertEquals(response.status_code, 200)
data = json.loads(response.content)
self.assertNotEquals(data['title'], updates['title'])
self.assertEquals(data['body'], updates['body'])
self.assertEquals(data['stream']['id'], updates['stream'])
| {
"content_hash": "2cab7ea77745119eb08f573fe228ed26",
"timestamp": "",
"source": "github",
"line_count": 141,
"max_line_length": 120,
"avg_line_length": 46.723404255319146,
"alnum_prop": 0.6355494839101397,
"repo_name": "tovmeod/anaf",
"id": "0f574acd25d276ebbf6d066eb12c1e25357b6b49",
"size": "6588",
"binary": false,
"copies": "1",
"ref": "refs/heads/drf",
"path": "anaf/messaging/api/tests.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "400736"
},
{
"name": "HTML",
"bytes": "1512873"
},
{
"name": "JavaScript",
"bytes": "2136807"
},
{
"name": "PHP",
"bytes": "25856"
},
{
"name": "Python",
"bytes": "2045934"
},
{
"name": "Shell",
"bytes": "18005"
},
{
"name": "TSQL",
"bytes": "147855"
}
],
"symlink_target": ""
} |
from sys import argv
# read the WYSS section for how to run this
script, first, second, third = argv
print("The script is called:", script)
print("Your first variable is:", first)
print("Your second variable is:", second)
print("Your third variable is:", third)
| {
"content_hash": "dc9729f34640fe198478ff38bae64ce0",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 43,
"avg_line_length": 29.333333333333332,
"alnum_prop": 0.7310606060606061,
"repo_name": "zedshaw/learn-python3-thw-code",
"id": "7d141c1fd99bacc6cbb1b8962e77bed62a6c39c9",
"size": "264",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ex13.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "2110"
},
{
"name": "Python",
"bytes": "62068"
},
{
"name": "Shell",
"bytes": "1544"
},
{
"name": "ShellSession",
"bytes": "113"
}
],
"symlink_target": ""
} |
import argparse
import bisect
import functools
import os
import socket
import struct
import sys
import time
import traceback
from pyasn1 import debug as pyasn1_debug
from pyasn1.codec.ber import decoder
from pyasn1.error import PyAsn1Error
from pyasn1.type import univ
from pysnmp import debug as pysnmp_debug
from pysnmp.carrier.asyncore.dgram import udp
from pysnmp.error import PySnmpError
from pysnmp.proto import api
from pysnmp.proto import rfc1905
from pysnmp.proto.rfc1902 import Bits
from pysnmp.proto.rfc1902 import Integer32
from pysnmp.proto.rfc1902 import OctetString
from pysnmp.proto.rfc1902 import Unsigned32
from pysnmp.smi import builder
from pysnmp.smi import compiler
from pysnmp.smi import rfc1902
from pysnmp.smi import view
from snmpsim import confdir
from snmpsim import error
from snmpsim import log
from snmpsim import utils
from snmpsim.record import dump
from snmpsim.record import mvc
from snmpsim.record import sap
from snmpsim.record import snmprec
from snmpsim.record import walk
pcap = utils.try_load('pcap')
RECORD_TYPES = {
dump.DumpRecord.ext: dump.DumpRecord(),
mvc.MvcRecord.ext: mvc.MvcRecord(),
sap.SapRecord.ext: sap.SapRecord(),
walk.WalkRecord.ext: walk.WalkRecord(),
snmprec.SnmprecRecord.ext: snmprec.SnmprecRecord(),
snmprec.CompressedSnmprecRecord.ext: snmprec.CompressedSnmprecRecord()
}
DESCRIPTION = (
'Snoops network traffic for SNMP responses, builds SNMP Simulator '
'data files. Can read capture files or listen live network interface.')
class SnmprecRecord(snmprec.SnmprecRecord):
def format_value(self, oid, value, **context):
(text_oid,
text_tag,
text_value) = snmprec.SnmprecRecord.format_value(
self, oid, value)
if context['variationModule']:
(plain_oid,
plain_tag,
plain_value) = snmprec.SnmprecRecord.format_value(
self, oid, value, nohex=True)
if plain_tag != text_tag:
context['hextag'], context['hexvalue'] = text_tag, text_value
else:
text_tag, text_value = plain_tag, plain_value
handler = context['variationModule']['record']
text_oid, text_tag, text_value = handler(
text_oid, text_tag, text_value, **context)
elif 'stopFlag' in context and context['stopFlag']:
raise error.NoDataNotification()
return text_oid, text_tag, text_value
def _parse_mib_object(arg, last=False):
if '::' in arg:
return rfc1902.ObjectIdentity(*arg.split('::', 1), last=last)
else:
return univ.ObjectIdentifier(arg)
def main():
variation_module = None
endpoints = {}
contexts = {}
stats = {
'UDP packets': 0,
'IP packets': 0,
'bad packets': 0,
'empty packets': 0,
'unknown L2 protocol': 0,
'SNMP errors': 0,
'SNMP exceptions': 0,
'agents seen': 0,
'contexts seen': 0,
'snapshots taken': 0,
'Response PDUs seen': 0,
'OIDs seen': 0
}
parser = argparse.ArgumentParser(description=DESCRIPTION)
parser.add_argument(
'-v', '--version', action='version',
version=utils.TITLE)
parser.add_argument(
'--quiet', action='store_true',
help='Do not print out informational messages')
parser.add_argument(
'--debug', choices=pysnmp_debug.flagMap,
action='append', type=str, default=[],
help='Enable one or more categories of SNMP debugging.')
parser.add_argument(
'--debug-asn1', choices=pyasn1_debug.FLAG_MAP,
action='append', type=str, default=[],
help='Enable one or more categories of ASN.1 debugging.')
parser.add_argument(
'--logging-method', type=lambda x: x.split(':'),
metavar='=<%s[:args]>]' % '|'.join(log.METHODS_MAP),
default='stderr', help='Logging method.')
parser.add_argument(
'--log-level', choices=log.LEVELS_MAP,
type=str, default='info', help='Logging level.')
parser.add_argument(
'--start-object', metavar='<MIB::Object|OID>', type=_parse_mib_object,
default=univ.ObjectIdentifier('1.3.6'),
help='Drop all simulation data records prior to this OID specified '
'as MIB object (MIB::Object) or OID (1.3.6.)')
parser.add_argument(
'--stop-object', metavar='<MIB::Object|OID>',
type=functools.partial(_parse_mib_object, last=True),
help='Drop all simulation data records after this OID specified '
'as MIB object (MIB::Object) or OID (1.3.6.)')
parser.add_argument(
'--mib-source', dest='mib_sources', metavar='<URI|PATH>',
action='append', type=str,
default=['http://mibs.snmplabs.com/asn1/@mib@'],
help='One or more URIs pointing to a collection of ASN.1 MIB files.'
'Optional "@mib@" token gets replaced with desired MIB module '
'name during MIB search.')
parser.add_argument(
'--destination-record-type', choices=RECORD_TYPES, default='snmprec',
help='Produce simulation data with record of this type')
parser.add_argument(
'--output-file', metavar='<FILE>', type=str,
help='SNMP simulation data file to write records to')
variation_group = parser.add_argument_group(
'Simulation data variation options')
variation_group.add_argument(
'--variation-modules-dir', action='append', type=str,
help='Search variation module by this path')
variation_group.add_argument(
'--variation-module', type=str,
help='Pass gathered simulation data through this variation module')
variation_group.add_argument(
'--variation-module-options', type=str, default='',
help='Variation module options')
parser.add_argument(
'--output-dir', metavar='<FILE>', type=str, default='.',
help='SNMP simulation data directory to place captured traffic in '
'form of simulation records. File names reflect traffic sources '
'on the network.')
variation_group.add_argument(
'--transport-id-offset', type=int, default=0,
help='When arranging simulation data files, start enumerating '
'receiving transport endpoints from this number.')
traffic_group = parser.add_argument_group('Traffic capturing options')
traffic_group.add_argument(
'--packet-filter', type=str, default='udp and src port 161',
help='Traffic filter (in tcpdump syntax) to use for picking SNMP '
'packets out of the rest of the traffic.')
variation_group.add_argument(
'--listen-interface', type=str,
help='Listen on this network interface.')
parser.add_argument(
'--promiscuous-mode', action='store_true',
help='Attempt to switch NIC to promiscuous mode. Depending on the '
'network, this may make traffic of surrounding machines visible. '
'Might require superuser privileges.')
parser.add_argument(
'--capture-file', metavar='<FILE>', type=str,
help='PCAP file with SNMP simulation data file to read from '
'instead of listening on a NIC.')
args = parser.parse_args()
if not pcap:
sys.stderr.write(
'ERROR: pylibpcap package is missing!\r\nGet it by running '
'`pip install '
'https://downloads.sourceforge.net/project/pylibpcap/pylibpcap'
'/0.6.4/pylibpcap-0.6.4.tar.gz`'
'\r\n')
parser.print_usage(sys.stderr)
return 1
proc_name = os.path.basename(sys.argv[0])
try:
log.set_logger(proc_name, *args.logging_method, force=True)
if args.log_level:
log.set_level(args.log_level)
except error.SnmpsimError as exc:
sys.stderr.write('%s\r\n%s\r\n' % exc)
parser.print_usage(sys.stderr)
sys.exit(1)
if (isinstance(args.start_object, rfc1902.ObjectIdentity) or
isinstance(args.stop_object, rfc1902.ObjectIdentity)):
mib_builder = builder.MibBuilder()
mib_view_controller = view.MibViewController(mib_builder)
compiler.addMibCompiler(mib_builder, sources=args.mib_sources)
try:
if isinstance(args.start_object, rfc1902.ObjectIdentity):
args.start_object.resolveWithMib(mib_view_controller)
if isinstance(args.stop_object, rfc1902.ObjectIdentity):
args.stop_object.resolveWithMib(mib_view_controller)
except PySnmpError as exc:
sys.stderr.write('ERROR: %s\r\n' % exc)
return 1
# Load variation module
if args.variation_module:
for variation_modules_dir in (
args.variation_modules_dir or confdir.variation):
log.info('Scanning "%s" directory for variation '
'modules...' % variation_modules_dir)
if not os.path.exists(variation_modules_dir):
log.info('Directory "%s" does not exist' % variation_modules_dir)
continue
mod = os.path.join(variation_modules_dir, args.variation_module + '.py')
if not os.path.exists(mod):
log.info('Variation module "%s" not found' % mod)
continue
ctx = {'path': mod, 'moduleContext': {}}
try:
with open(mod) as fl:
exec (compile(fl.read(), mod, 'exec'), ctx)
except Exception as exc:
log.error('Variation module "%s" execution '
'failure: %s' % (mod, exc))
return 1
variation_module = ctx
log.info('Variation module "%s" loaded' % args.variation_module)
break
else:
log.error('variation module "%s" not found' % args.variation_module)
return 1
# Variation module initialization
if variation_module:
log.info('Initializing variation module...')
for handler in ('init', 'record', 'shutdown'):
if handler not in variation_module:
log.error('missing "%s" handler at variation module '
'"%s"' % (handler, args.variation_module))
return 1
handler = variation_module['init']
try:
handler(options=args.variation_module_options, mode='recording',
startOID=args.start_object, stopOID=args.stop_object)
except Exception as exc:
log.error('Variation module "%s" initialization '
'FAILED: %s' % (args.variation_module, exc))
else:
log.info('Variation module "%s" '
'initialization OK' % args.variation_module)
pcap_obj = pcap.pcapObject()
if args.listen_interface:
if not args.quiet:
log.info(
'Listening on interface %s in %spromiscuous '
'mode' % (args.listen_interface,
'' if args.promiscuous_mode else 'non-'))
try:
pcap_obj.open_live(
args.listen_interface, 65536, args.promiscuous_mode, 1000)
except Exception as exc:
log.error(
'Error opening interface %s for snooping: '
'%s' % (args.listen_interface, exc))
return 1
elif args.capture_file:
if not args.quiet:
log.info('Opening capture file %s' % args.capture_file)
try:
pcap_obj.open_offline(args.capture_file)
except Exception as exc:
log.error('Error opening capture file %s for reading: '
'%s' % (args.capture_file, exc))
return 1
else:
sys.stderr.write(
'ERROR: no capture file or live interface specified\r\n')
parser.print_usage(sys.stderr)
return 1
if args.packet_filter:
if not args.quiet:
log.info('Applying packet filter \"%s\"' % args.packet_filter)
pcap_obj.setfilter(args.packet_filter, 0, 0)
if not args.quiet:
log.info('Processing records from %still '
'%s' % ('the beginning ' if args.start_object
else args.start_object,
args.stop_object if args.stop_object
else 'the end'))
def parse_packet(raw):
pkt = {}
# http://www.tcpdump.org/linktypes.html
ll_headers = {
0: 4,
1: 14,
108: 4,
228: 0
}
if pcap_obj.datalink() in ll_headers:
raw = raw[ll_headers[pcap_obj.datalink()]:]
else:
stats['unknown L2 protocol'] += 1
pkt['version'] = (ord(raw[0]) & 0xf0) >> 4
pkt['header_len'] = ord(raw[0]) & 0x0f
pkt['tos'] = ord(raw[1])
pkt['total_len'] = socket.ntohs(
struct.unpack('H', raw[2:4])[0])
pkt['id'] = socket.ntohs(
struct.unpack('H', raw[4:6])[0])
pkt['flags'] = (ord(raw[6]) & 0xe0) >> 5
pkt['fragment_offset'] = socket.ntohs(
struct.unpack('H', raw[6:8])[0] & 0x1f)
pkt['ttl'] = ord(raw[8])
pkt['protocol'] = ord(raw[9])
pkt['checksum'] = socket.ntohs(
struct.unpack('H', raw[10:12])[0])
pkt['source_address'] = pcap.ntoa(
struct.unpack('i', raw[12:16])[0])
pkt['destination_address'] = pcap.ntoa(
struct.unpack('i', raw[16:20])[0])
if pkt['header_len'] > 5:
pkt['options'] = raw[20:4 * (pkt['header_len'] - 5)]
else:
pkt['options'] = None
raw = raw[4 * pkt['header_len']:]
if pkt['protocol'] == 17:
pkt['source_port'] = socket.ntohs(
struct.unpack('H', raw[0:2])[0])
pkt['destination_port'] = socket.ntohs(
struct.unpack('H', raw[2:4])[0])
raw = raw[8:]
stats['UDP packets'] += 1
pkt['data'] = raw
stats['IP packets'] += 1
return pkt
def handle_snmp_message(d, t, private={}):
msg_ver = api.decodeMessageVersion(d['data'])
if msg_ver in api.protoModules:
p_mod = api.protoModules[msg_ver]
else:
stats['bad packets'] += 1
return
try:
rsp_msg, whole_msg = decoder.decode(
d['data'], asn1Spec=p_mod.Message())
except PyAsn1Error:
stats['bad packets'] += 1
return
if rsp_msg['data'].getName() == 'response':
rsp_pdu = p_mod.apiMessage.getPDU(rsp_msg)
error_status = p_mod.apiPDU.getErrorStatus(rsp_pdu)
if error_status:
stats['SNMP errors'] += 1
else:
endpoint = d['source_address'], d['source_port']
if endpoint not in endpoints:
endpoints[endpoint] = udp.domainName + (
args.transport_id_offset + len(endpoints),)
stats['agents seen'] += 1
context = '%s/%s' % (
p_mod.ObjectIdentifier(endpoints[endpoint]),
p_mod.apiMessage.getCommunity(rsp_msg))
if context not in contexts:
contexts[context] = {}
stats['contexts seen'] += 1
context = '%s/%s' % (
p_mod.ObjectIdentifier(endpoints[endpoint]),
p_mod.apiMessage.getCommunity(rsp_msg))
stats['Response PDUs seen'] += 1
if 'basetime' not in private:
private['basetime'] = t
for oid, value in p_mod.apiPDU.getVarBinds(rsp_pdu):
if oid < args.start_object:
continue
if args.stop_object and oid >= args.stop_object:
continue
if oid in contexts[context]:
if value != contexts[context][oid]:
stats['snapshots taken'] += 1
else:
contexts[context][oid] = [], []
contexts[context][oid][0].append(t - private['basetime'])
contexts[context][oid][1].append(value)
stats['OIDs seen'] += 1
def handle_packet(pktlen, data, timestamp):
if not data:
stats['empty packets'] += 1
return
handle_snmp_message(parse_packet(data), timestamp)
try:
if args.listen_interface:
log.info(
'Listening on interface "%s", kill me when you '
'are done.' % args.listen_interface)
while True:
pcap_obj.dispatch(1, handle_packet)
elif args.capture_file:
log.info('Processing capture file "%s"....' % args.capture_file)
args = pcap_obj.next()
while args:
handle_packet(*args)
args = pcap_obj.next()
except (TypeError, KeyboardInterrupt):
log.info('Shutting down process...')
finally:
data_file_handler = SnmprecRecord()
for context in contexts:
ext = os.path.extsep
ext += RECORD_TYPES[args.destination_record_type].ext
filename = os.path.join(args.output_dir, context + ext)
if not args.quiet:
log.info(
'Creating simulation context %s at '
'%s' % (context, filename))
try:
os.mkdir(os.path.dirname(filename))
except OSError:
pass
record = RECORD_TYPES[args.destination_record_type]
try:
output_file = record.open(filename, 'wb')
except IOError as exc:
log.error('writing %s: %s' % (filename, exc))
return 1
count = total = iteration = 0
time_offset = 0
req_time = time.time()
oids = sorted(contexts[context])
oids.append(oids[-1]) # duplicate last OID to trigger stopFlag
while True:
for oid in oids:
timeline, values = contexts[context][oid]
value = values[
min(len(values) - 1,
bisect.bisect_left(timeline, time_offset))
]
if value.tagSet in (rfc1905.NoSuchObject.tagSet,
rfc1905.NoSuchInstance.tagSet,
rfc1905.EndOfMibView.tagSet):
stats['SNMP exceptions'] += 1
continue
# remove value enumeration
if value.tagSet == Integer32.tagSet:
value = Integer32(value)
if value.tagSet == Unsigned32.tagSet:
value = Unsigned32(value)
if value.tagSet == Bits.tagSet:
value = OctetString(value)
# Build .snmprec record
ctx = {
'origOid': oid,
'origValue': value,
'count': count,
'total': total,
'iteration': iteration,
'reqTime': req_time,
'startOID': args.start_object,
'stopOID': args.stop_object,
'stopFlag': oids.index(oid) == len(oids) - 1,
'variationModule': variation_module
}
try:
line = data_file_handler.format(oid, value, **ctx)
except error.MoreDataNotification as exc:
count = 0
iteration += 1
moreDataNotification = exc
if 'period' in moreDataNotification:
time_offset += moreDataNotification['period']
log.info(
'%s OIDs dumped, advancing time window to '
'%.2f sec(s)...' % (total, time_offset))
break
except error.NoDataNotification:
pass
except error.SnmpsimError as exc:
log.error(exc)
continue
else:
output_file.write(line)
count += 1
total += 1
else:
break
output_file.flush()
output_file.close()
if variation_module:
log.info('Shutting down variation module '
'"%s"...' % args.variation_module)
handler = variation_module['shutdown']
try:
handler(options=args.variation_module_options,
mode='recording')
except Exception as exc:
log.error('Variation module "%s" shutdown FAILED: '
'%s' % (args.variation_module, exc))
else:
log.info(
'Variation module "%s" shutdown'
' OK' % args.variation_module)
log.info("""\
PCap statistics:
packets snooped: %s
packets dropped: %s
packets dropped: by interface %s\
""" % pcap_obj.stats())
log.info("""\
SNMP statistics:
%s\
""" % ' '.join(['%s: %s\r\n' % kv for kv in stats.items()]))
return 0
if __name__ == '__main__':
try:
rc = main()
except KeyboardInterrupt:
sys.stderr.write('shutting down process...')
rc = 0
except Exception as exc:
sys.stderr.write('process terminated: %s' % exc)
for line in traceback.format_exception(*sys.exc_info()):
sys.stderr.write(line.replace('\n', ';'))
rc = 1
sys.exit(rc)
| {
"content_hash": "14bf4a72e6517e1ae01544cdf3298be4",
"timestamp": "",
"source": "github",
"line_count": 690,
"max_line_length": 84,
"avg_line_length": 32.37246376811594,
"alnum_prop": 0.5357030935219591,
"repo_name": "etingof/snmpsim",
"id": "c5c435d0a94a061c80c46a11675244f5c2c971c7",
"size": "22542",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "snmpsim/commands/pcap2rec.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "287683"
},
{
"name": "Shell",
"bytes": "4190"
}
],
"symlink_target": ""
} |
import unittest, sys, os, optparse, re
import astropy, astropy.io.fits as pyfits, numpy as np
import photometry_db, ldac, utilities
##########################
__cvs_id__ = "$Id: save_slr.py,v 1.2 2010-09-01 01:38:56 dapple Exp $"
##########################
##################################################
### Photometry Global Database
##################################################
class Phot_db(object):
'''Provide lazy, proxy access to the photometry database of choice'''
def __init__(self, db, *args, **keywords):
self.db = db
self.instance = None
self.args = args
self.keywords = keywords
def __getattr__(self, name):
if self.instance is None:
self.instance = self.db(*self.args, **self.keywords)
return getattr(self.instance, name)
__default_photometry_db__ = Phot_db(photometry_db.Photometry_db)
####################################################
def main(argv = sys.argv):
###
def parse_spec(option, opt, value, parser):
key, val = value.split('=')
if not hasattr(parser.values, 'specification'):
setattr(parser.values, 'specification', {})
parser.values.specification[key] = val
###
parser = optparse.OptionParser()
parser.add_option('-c', '--cluster',
dest = 'cluster',
help = 'Cluster name',
default = None)
parser.add_option('-o', '--offsets',
dest = 'offsetfile',
help = 'Name of offset file',
metavar = 'FILE',
default = None)
parser.add_option('-i', '--input',
dest = 'inputfile',
help = 'Name of catalog which was calibrated',
metavar = 'FILE',
default = None)
parser.add_option('-s', '--spec', dest='specification',
action='callback',
type= 'string',
help='key=val set determines the uniqueness of this calibration',
default = {},
metavar = 'key=val',
callback = parse_spec)
parser.add_option('--fluxtype',
dest = 'fluxtype',
help = 'Flux Type to pull from ZPS table',
default = 'APER')
options, args = parser.parse_args(argv)
if options.cluster is None:
parser.error('Please specify cluster name')
if options.offsetfile is None:
parser.error('Please specify SLR offset file')
if options.inputfile is None:
parser.error('Please specify cat that SLR calibrated')
zplist = ldac.openObjectFile(options.inputfile, 'ZPS')
print zplist['filter']
saveSlrZP(cluster = options.cluster,
offsetFile = options.offsetfile,
zplist = zplist,
fluxtype = options.fluxtype,
**options.specification)
####################################################
# User Callable Functions
####################################################
def saveSlrZP(cluster, offsetFile, zplist, photometry_db = __default_photometry_db__, fluxtype = 'APER', **specifications):
offsets = {}
input = open(offsetFile)
for line in input.readlines():
tokens = line.split()
filter = tokens[1]
zp = float(tokens[2])
zperr = float(tokens[3])
if not re.match('MAG_', filter):
filter = 'MAG_%s-%s' % (fluxtype, filter)
offsets[filter] = (zp, zperr)
filters = {}
for filter, zp in zip(zplist['filter'], zplist['zeropoints']):
if not re.match('FLUX_', filter) and not re.match('MAG_', filter):
filter = 'MAG_%s-%s' % (fluxtype, filter)
match = re.match('FLUX_(.*)', filter)
if match:
filter = 'MAG_%s' % match.group(1)
filters[filter] = zp
slr_offsets = {}
for filterkey in offsets.keys():
filter = utilities.extractFullFilter(filterkey)
new_zp = filters[filterkey] + offsets[filterkey][0]
zperr = offsets[filterkey][1]
slrZP = photometry_db.registerSlrZP(cluster, filter = filter,
zp = float(new_zp), zperr = zperr,
fitFilter = filter, **specifications)
instrument, config, chip, stdfilter = utilities.parseFilter(filter)
slr_offsets[filter] = [instrument, stdfilter, slrZP]
for filterkey in filters.keys():
filter = utilities.extractFullFilter(filterkey)
if filter in slr_offsets:
slr_instrument, slr_stdfilter, slrZP = slr_offsets[filter]
photometry_db.updateCalibration(cluster, filter = filter, calibration = slrZP, **specifications)
else:
instrument, config, chip, stdfilter = utilities.parseFilter(filter)
for slr_filterkey, (slr_instrument, slr_stdfilter, slrZP) in slr_offsets.iteritems():
if slr_instrument == instrument and slr_stdfilter == stdfilter:
photometry_db.updateCalibration(cluster, filter = filter, calibration = slrZP, **specifications)
break
####################################################
# TESTING
####################################################
class TestingDBEntry(object):
def __init__(self, id, **fields):
self.id = id
self.fields = fields
def __getattr__(self, name):
if name in self.fields:
return self.fields[name]
raise AttributeError
###
class TestingDatabase(object):
def __init__(self):
self.reset()
###
def reset(self):
self.slr = []
self.calibrations = []
###
def registerSlrZP(self, cluster, fitFilter, zp, zperr, **specification):
entry = TestingDBEntry(len(self.slr), cluster = cluster,
fitFilter = fitFilter,
zp = zp,
zperr = zperr,
**specification)
self.slr.append(entry)
return entry
###
def updateCalibration(self, cluster, calibration, **specification):
self.calibrations.append(TestingDBEntry(len(self.calibrations), cluster = cluster, calibration = calibration, **specification))
###########
class TestSaveOffsets(unittest.TestCase):
def setUp(self):
self.db = TestingDatabase()
raw_slr_offsets = '''V SUBARU-10_2-1-W-J-V 0.039 0.0043
MPu MEGAPRIME-0-1-u 0.195374 0.016295
WHTB WHT-0-1-B 0.516663 0.0217352
'''
self.filternames = [ line.split()[1] for line in raw_slr_offsets.splitlines() ]
self.orig_zps = np.random.uniform(-4, 4, size=3) + 27.
class ZP(object):
def __init__(self, filter, zp, zperr):
self.filter = filter
self.zp = zp
self.zperr = zperr
self.slr_zps = {}
for i, line in enumerate(raw_slr_offsets.splitlines()):
tokens = line.split()
filter = tokens[1]
offset = float(tokens[2])
zperr = float(tokens[3])
self.slr_zps[filter] = ZP(filter, self.orig_zps[i] + offset, zperr)
self.offsetFile = 'test_save_slr.offsets'
output = open(self.offsetFile, 'w')
output.write(raw_slr_offsets)
output.close()
#######
def tearDown(self):
if os.path.exists(self.offsetFile):
os.remove(self.offsetFile)
#######
def testSaveOffsetsforSLR(self):
zplist = ldac.LDACCat(pyfits.BinTableHDU.from_columns(pyfits.ColDefs([pyfits.Column(name = 'filter', format='20A',
array = self.filternames),
pyfits.Column(name = 'zeropoints', format='E',
array = self.orig_zps)])))
saveSlrZP(cluster = 'testcluster', offsetFile = self.offsetFile,
zplist = zplist, fluxtype = 'iso', myspec = 'custom',
photometry_db = self.db)
self.assertEquals(len(self.db.slr), 3)
self.assertEquals(sorted([slr.fitFilter for slr in self.db.slr]), sorted(self.slr_zps.keys()))
for slr in self.db.slr:
match = self.slr_zps[slr.fitFilter]
self.assertEquals(slr.cluster, 'testcluster')
self.assertEquals(slr.fitFilter, match.filter)
self.assertTrue(np.abs(slr.zp - match.zp) < 0.001)
self.assertTrue(np.abs(slr.zperr - match.zperr) < 0.001)
self.assertEquals(slr.fluxtype, 'iso')
self.assertEquals(slr.myspec, 'custom')
#######
def testTransferOffsets(self):
transferFilters = 'SUBARU-9-2-W-J-V SUBARU-10_1-1-W-J-V MEGAPRIME-0-1-g'.split()
transfer_orig_zps = [23.4, 25.3, 22.4]
correspondingFilters = {'SUBARU-9-2-W-J-V' : 'SUBARU-10_2-1-W-J-V',
'SUBARU-10_1-1-W-J-V' : 'SUBARU-10_2-1-W-J-V',
'MEGAPRIME-0-1-g' : None}
filternames = self.filternames + transferFilters
orig_zps = self.orig_zps.tolist() + transfer_orig_zps
zplist = ldac.LDACCat(pyfits.BinTableHDU.from_columns(pyfits.ColDefs([pyfits.Column(name = 'filter', format='20A',
array = filternames),
pyfits.Column(name = 'zeropoints', format='E',
array = orig_zps)])))
saveSlrZP(cluster = 'testcluster', offsetFile = self.offsetFile,
zplist = zplist, fluxtype = 'iso', myspec = 'custom',
photometry_db = self.db)
for filter in filternames:
correspondingFilter = filter
if filter in correspondingFilters:
correspondingFilter = correspondingFilters[filter]
if correspondingFilter is not None:
slrmatch = None
for slr in self.db.slr:
if correspondingFilter == slr.fitFilter:
slrmatch = slr
break
self.assertTrue(slrmatch is not None)
calibmatch = None
for calib in self.db.calibrations:
if filter == calib.filter:
calibmatch = calib
break
self.assertTrue(calibmatch is not None)
self.assertEquals(calibmatch.cluster, 'testcluster')
self.assertEquals(calibmatch.filter, filter)
self.assertEquals(calibmatch.fluxtype, 'iso')
self.assertEquals(calibmatch.myspec, 'custom')
self.assertEquals(calibmatch.calibration, slrmatch)
#################
def test():
testcases = [TestSaveOffsets]
suite = unittest.TestSuite(map(unittest.TestLoader().loadTestsFromTestCase,
testcases))
unittest.TextTestRunner(verbosity=2).run(suite)
################################
### COMMAND LINE EXECUTABLE
################################
if __name__ == '__main__':
if len(sys.argv) == 2 and sys.argv[1] == 'test':
test()
else:
main()
| {
"content_hash": "b9995c4da1c043dc3499b9d2321834c4",
"timestamp": "",
"source": "github",
"line_count": 378,
"max_line_length": 135,
"avg_line_length": 31.415343915343914,
"alnum_prop": 0.5014736842105263,
"repo_name": "deapplegate/wtgpipeline",
"id": "f979a3455356ac53bc969443954dfe1a4c9c1361",
"size": "12001",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "save_slr.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Awk",
"bytes": "183"
},
{
"name": "C",
"bytes": "7161"
},
{
"name": "C++",
"bytes": "65083"
},
{
"name": "Makefile",
"bytes": "2574"
},
{
"name": "Perl",
"bytes": "38992"
},
{
"name": "Python",
"bytes": "13671330"
},
{
"name": "Roff",
"bytes": "48622"
},
{
"name": "Shell",
"bytes": "3637313"
},
{
"name": "XSLT",
"bytes": "54208"
}
],
"symlink_target": ""
} |
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
setup(
name="trulia",
version="0.0.3",
author="Matt Koskela",
author_email="mattkoskela@gmail.com",
packages=["trulia"],
url="https://github.com/mattkoskela/trulia",
license="LICENSE",
description="Python library for accessing trulia.com's REST API",
long_description=open("README.md").read(),
install_requires=[
"requests==2.20.0",
"xmltodict==0.9.0"
]
)
| {
"content_hash": "b3545af83de8e0b7d65514fcc06c5e4e",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 69,
"avg_line_length": 25.6,
"alnum_prop": 0.64453125,
"repo_name": "mattkoskela/trulia",
"id": "b44f51a8ba8b00c58bfcd898013613712c4956ba",
"size": "512",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "9063"
}
],
"symlink_target": ""
} |
import unittest
from hecuba import config, StorageDict
from hecuba.IStorage import IStorage
class PersistentDict(StorageDict):
'''
@TypeSpec dict<<key:int>, value:double>
'''
class IStorageTests(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.old = config.execution_name
config.NUM_TEST = 0 # HACK a new attribute to have a global counter
@classmethod
def tearDownClass(cls):
config.execution_name = cls.old
del config.NUM_TEST
# Create a new keyspace per test
def setUp(self):
config.NUM_TEST = config.NUM_TEST + 1
self.current_ksp = "IStorageTests{}".format(config.NUM_TEST).lower()
config.execution_name = self.current_ksp
def tearDown(self):
#config.session.execute("DROP KEYSPACE IF EXISTS {}".format(self.current_ksp))
pass
def stop_persistent_method_test(self):
key = 123
value = 456
name = 'istorage_pers'
base_dict = PersistentDict()
def check_stop_pers():
assert (isinstance(base_dict, IStorage))
base_dict.stop_persistent()
self.assertRaises(RuntimeError, check_stop_pers)
# PyCOMPSs requires uuid of type str
base_dict.make_persistent(name)
base_dict[key] = value
base_dict.stop_persistent()
self.assertIsNone(base_dict.storage_id)
self.assertIsNone(base_dict.storage_id)
self.assertRaises(RuntimeError, check_stop_pers)
base_dict.make_persistent(name)
self.assertEqual(base_dict[key], value)
base_dict.stop_persistent()
self.assertRaises(RuntimeError, check_stop_pers)
external_dict = PersistentDict(name)
self.assertEqual(external_dict[key], value)
def delete_persistent_method_test(self):
key = 123
value = 456
name = 'istorage_pers'
base_dict = PersistentDict()
def check_stop_pers():
assert (isinstance(base_dict, IStorage))
base_dict.stop_persistent()
def check_del_pers():
assert (isinstance(base_dict, IStorage))
base_dict.delete_persistent()
self.assertRaises(RuntimeError, check_del_pers)
# PyCOMPSs requires uuid of type str
base_dict.make_persistent(name)
base_dict[key] = value
base_dict.delete_persistent()
self.assertIsNone(base_dict.storage_id)
self.assertIsNone(base_dict.storage_id)
self.assertRaises(RuntimeError, check_del_pers)
self.assertRaises(RuntimeError, check_stop_pers)
base_dict.make_persistent(name)
def get_key():
res = base_dict[key]
self.assertRaises(KeyError, get_key)
base_dict.delete_persistent()
self.assertRaises(RuntimeError, check_del_pers)
self.assertRaises(RuntimeError, check_stop_pers)
external_dict = PersistentDict(name)
def get_key_ext():
res = external_dict[key]
self.assertRaises(KeyError, get_key_ext)
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "4da83afc73d6304e13b392a9335e1672",
"timestamp": "",
"source": "github",
"line_count": 120,
"max_line_length": 86,
"avg_line_length": 26.016666666666666,
"alnum_prop": 0.6274823830877643,
"repo_name": "bsc-dd/hecuba",
"id": "737940714da24502026a0b6dae4926d9ceff1c40",
"size": "3122",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "hecuba_py/tests/withcassandra/istorage_tests.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "725727"
},
{
"name": "CMake",
"bytes": "33556"
},
{
"name": "Dockerfile",
"bytes": "4837"
},
{
"name": "Java",
"bytes": "9467"
},
{
"name": "Makefile",
"bytes": "2042"
},
{
"name": "Python",
"bytes": "463347"
},
{
"name": "Shell",
"bytes": "118834"
}
],
"symlink_target": ""
} |
import _plotly_utils.basevalidators
class CategoryorderValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(
self,
plotly_name="categoryorder",
parent_name="layout.polar.radialaxis",
**kwargs,
):
super(CategoryorderValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
values=kwargs.pop(
"values",
[
"trace",
"category ascending",
"category descending",
"array",
"total ascending",
"total descending",
"min ascending",
"min descending",
"max ascending",
"max descending",
"sum ascending",
"sum descending",
"mean ascending",
"mean descending",
"median ascending",
"median descending",
],
),
**kwargs,
)
| {
"content_hash": "4cc2bc34a12d92ebea91b77fd1a15c54",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 79,
"avg_line_length": 32.108108108108105,
"alnum_prop": 0.43013468013468015,
"repo_name": "plotly/plotly.py",
"id": "4776cdb882cf510367bafb27ad45b60ca943b79c",
"size": "1188",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/validators/layout/polar/radialaxis/_categoryorder.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "545"
},
{
"name": "JavaScript",
"bytes": "2074"
},
{
"name": "PostScript",
"bytes": "565328"
},
{
"name": "Python",
"bytes": "31506317"
},
{
"name": "TypeScript",
"bytes": "71337"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blog', '0010_auto_20160916_1053'),
]
operations = [
migrations.AlterField(
model_name='post',
name='image',
field=models.ImageField(default='', upload_to='/Users/Jarvis/Desktop/Ingegnemagia/media'),
),
]
| {
"content_hash": "7196bdd9c2a0fee5e9484c3819edf502",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 102,
"avg_line_length": 23.555555555555557,
"alnum_prop": 0.6061320754716981,
"repo_name": "CaptainMich/IMS",
"id": "b85b0387893958d27a2bf069186569f0f4dabb10",
"size": "494",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "site/blog/migrations/0011_auto_20160916_1057.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "3350"
},
{
"name": "HTML",
"bytes": "13732"
},
{
"name": "JavaScript",
"bytes": "1621"
},
{
"name": "Python",
"bytes": "23442"
}
],
"symlink_target": ""
} |
"""pkgs module tests."""
import mox
import stubout
from google.apputils import app
from google.apputils import basetest
from simian.mac.munki import pkgs
class MunkiPackageInfoTest(mox.MoxTestBase):
def setUp(self):
mox.MoxTestBase.setUp(self)
self.stubs = stubout.StubOutForTesting()
self.mpi = pkgs.MunkiPackageInfo()
def tearDown(self):
self.mox.UnsetStubs()
self.stubs.UnsetAll()
def MockOut(self, method_name, args=None):
self.mox.StubOutWithMock(self.mpi, method_name)
def testIsOSX(self):
"""Test IsOSX()."""
mock_uname = self.mox.CreateMockAnything()
self.stubs.Set(pkgs.os, 'uname', mock_uname)
mock_uname().AndReturn(['Darwin', 'foo'])
mock_uname().AndReturn(['NotDarwin', 'foo'])
self.mox.ReplayAll()
self.assertTrue(self.mpi.IsOSX())
self.assertFalse(self.mpi.IsOSX())
self.mox.VerifyAll()
def testGetMunkiPath(self):
"""Test _GetMunkiPath()."""
base_path = self.mpi.munki_path
filename = 'bar'
joined = '/'.join([base_path, filename])
mock_join = self.mox.CreateMockAnything()
self.stubs.Set(pkgs.os.path, 'join', mock_join)
mock_join(base_path, filename).AndReturn(joined)
self.mox.ReplayAll()
self.assertEqual(joined, self.mpi._GetMunkiPath(filename))
self.mox.VerifyAll()
def testVerifyMunkiInstall(self):
"""Test VerifyMunkiInstall()."""
self.MockOut('IsOSX')
self.mpi.IsOSX().AndReturn(True)
self.mox.StubOutWithMock(pkgs.os.path, 'isdir')
pkgs.os.path.isdir(self.mpi.munki_path).AndReturn(True)
self.mox.StubOutWithMock(pkgs.os.path, 'isfile')
self.MockOut('_GetMunkiPath')
for f in self.mpi.REQUIRED_MUNKI_BINS:
self.mpi._GetMunkiPath(f).AndReturn(f)
pkgs.os.path.isfile(f).AndReturn(True)
self.mox.ReplayAll()
self.mpi.VerifyMunkiInstall()
self.assertTrue(self.mpi.munki_install_verified)
self.mpi.VerifyMunkiInstall()
self.mox.VerifyAll()
def testCreateFromPackage(self):
"""Test CreateFromPackage()."""
makepkginfo = '/tmp/makepkginfo'
catalogs = ['testing', 'stable']
filename = 'foo'
description = 'foo package description!!'
display_name = 'Display Name'
stdout = 'foo xml'
stderr = ''
status = 0
args = [makepkginfo, filename, '--description=%s' % description,
'--displayname=%s' % display_name,
'--catalog=testing', '--catalog=stable']
self.stubs.Set(
pkgs.plist, 'MunkiPackageInfoPlist', self.mox.CreateMockAnything())
mock_plist = self.mox.CreateMockAnything()
self.MockOut('VerifyMunkiInstall')
self.MockOut('_GetMunkiPath')
self.mpi.VerifyMunkiInstall().AndReturn(None)
self.mpi._GetMunkiPath(pkgs.MAKEPKGINFO).AndReturn(makepkginfo)
mock_popen = self.mox.CreateMockAnything()
self.stubs.Set(pkgs, 'subprocess', self.mox.CreateMock(pkgs.subprocess))
pkgs.subprocess.Popen(
args,
stdin=None, stdout=pkgs.subprocess.PIPE, stderr=pkgs.subprocess.PIPE,
close_fds=True,
shell=False).AndReturn(mock_popen)
mock_popen.communicate(None).AndReturn((stdout, stderr))
mock_popen.poll().AndReturn(status)
pkgs.plist.MunkiPackageInfoPlist(stdout).AndReturn(mock_plist)
mock_plist.Parse().AndReturn(None)
self.mox.ReplayAll()
self.mpi.CreateFromPackage(filename, description, display_name, catalogs)
self.assertEqual(self.mpi.filename, filename)
self.mox.VerifyAll()
def testCreateFromPackageError(self):
"""Test CreateFromPackage()."""
makepkginfo = '/tmp/makepkginfo'
catalogs = ['testing', 'stable']
filename = 'foo'
description = 'foo package description!'
display_name = 'Display Name'
stdout = ''
stderr = 'zomg error'
status = 123
args = [makepkginfo, filename, '--description=%s' % description,
'--displayname=%s' % display_name,
'--catalog=testing', '--catalog=stable']
self.MockOut('VerifyMunkiInstall')
self.MockOut('_GetMunkiPath')
self.mpi.VerifyMunkiInstall().AndReturn(None)
self.mpi._GetMunkiPath(pkgs.MAKEPKGINFO).AndReturn(makepkginfo)
mock_popen = self.mox.CreateMockAnything()
self.stubs.Set(pkgs, 'subprocess', self.mox.CreateMock(pkgs.subprocess))
pkgs.subprocess.Popen(
args,
stdin=None, stdout=pkgs.subprocess.PIPE, stderr=pkgs.subprocess.PIPE,
close_fds=True,
shell=False).AndReturn(mock_popen)
mock_popen.communicate(None).AndReturn((stdout, stderr))
mock_popen.poll().AndReturn(status)
self.mox.ReplayAll()
try:
self.mpi.CreateFromPackage(filename, description, display_name, catalogs)
self.fail('CreateFromPackage() should NOT return here')
except pkgs.Error, e:
self.assertEqual(
e.args[0],
('makepkginfo: exit status %d, stderr=%s' % (status, stderr)))
self.mox.VerifyAll()
def testGetPlist(self):
"""Test GetPlist()."""
self.mpi.plist = 'foo'
self.assertEqual(self.mpi.GetPlist(), 'foo')
def main(unused_argv):
basetest.main()
if __name__ == '__main__':
app.run()
| {
"content_hash": "257d5de13777086211f9842abe8b014a",
"timestamp": "",
"source": "github",
"line_count": 154,
"max_line_length": 79,
"avg_line_length": 33.064935064935064,
"alnum_prop": 0.6734092694422624,
"repo_name": "sillywilly42/simian",
"id": "aa081bceac3bc79a700be4e09c045f42ae5d01ec",
"size": "5714",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/tests/simian/mac/munki/pkgs_test.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "38117"
},
{
"name": "HTML",
"bytes": "96126"
},
{
"name": "JavaScript",
"bytes": "34481"
},
{
"name": "Makefile",
"bytes": "7246"
},
{
"name": "Python",
"bytes": "1402979"
},
{
"name": "Shell",
"bytes": "20790"
}
],
"symlink_target": ""
} |
"""
================================LICENSE======================================
Copyright (c) 2015 Chirag Mello & Mario Tambos
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
================================LICENSE======================================
"""
import types
import unittest
import numpy as np
import numexpr as ne
from collections import defaultdict
from functools import partial
from spatialpooler import common
from spatialpooler.utils import RingBuffer
class CommonTest(unittest.TestCase):
def setUp(self):
unittest.TestCase.setUp(self)
self.columns = np.zeros(shape=(2, 2, 2, 2))
self.distances =\
np.asarray([
[
[[0., 1.], [1., np.sqrt(2)]],
[[1., 0.], [np.sqrt(2), 1.]]
],
[
[[1., np.sqrt(2)], [0., 1.]],
[[np.sqrt(2), 1.], [1., 0.]]
]
])
def test_initialise_synapses(self):
shape = (2, 2, 2, 2)
p_connect = 0.5
connect_threshold = 0.2
cols, dists = common.initialise_synapses(shape, p_connect,
connect_threshold)
self.assertIsInstance(cols, np.ndarray)
self.assertIsInstance(dists, np.ndarray)
self.assertEqual(cols.shape, shape)
expr = ne.evaluate('0 <= cols')
self.assertTrue((expr | np.isnan(cols)).all())
expr = ne.evaluate('cols <= 1')
self.assertTrue((expr | np.isnan(cols)).all())
self.assertListEqual(dists.tolist(), self.distances.tolist())
def test_inhibit_columns(self):
inhibition_area = 2*np.pi
overlap = np.array([[2, 1], [2, 1]])
part_deque = partial(RingBuffer, input_array=np.zeros(2), copy=True)
activity = defaultdict(part_deque)
desired_activity = 1
active, activity =\
common.inhibit_columns(self.columns, self.distances,
inhibition_area, overlap, activity,
desired_activity)
self.assertIsInstance(active, np.ndarray)
self.assertEqual(active.shape, (2, 2))
self.assertIsInstance(activity, defaultdict)
self.assertListEqual(active.tolist(), [[True, False], [True, False]])
self.assertIsInstance(activity[0, 0], RingBuffer)
self.assertListEqual(list(activity[0, 0]), [0, 1])
self.assertListEqual(list(activity[1, 0]), [0, 1])
self.assertEqual(len(activity), 4)
def test_calculate_inhibition_area(self):
inhibition_area = common.update_inhibition_area(self.columns, 0.2)
self.assertIsInstance(inhibition_area, types.FloatType)
self.assertAlmostEqual(inhibition_area, 0.78539816)
def test_calculate_min_activity(self):
active = np.array([[1, 1], [1, 1]])
inhibition_area = np.pi
part_deque = partial(RingBuffer, input_array=np.zeros(2), copy=True)
activity = defaultdict(part_deque)
activity[0, 0].extend([1, 1])
activity[0, 1].extend([1])
activity[1, 0].extend([1])
activity[1, 1].extend([1])
min_activity_threshold = 1
min_activity =\
common.calculate_min_activity(self.columns, active, self.distances,
inhibition_area, activity,
min_activity_threshold)
self.assertIsInstance(min_activity, np.ndarray)
self.assertEqual(min_activity.shape, (2, 2))
self.assertListEqual(min_activity.tolist(), [[1., 2.], [2., 1.]])
def test_test_for_convergence(self):
# Test True
synapses_modified = np.zeros(10, dtype=np.bool)
converged = common.test_for_convergence(synapses_modified)
self.assertIsInstance(converged, types.BooleanType)
self.assertTrue(converged)
# Test False
synapses_modified = np.ones(10, dtype=np.bool)
converged = common.test_for_convergence(synapses_modified)
self.assertIsInstance(converged, types.BooleanType)
self.assertFalse(converged)
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "f07de11eca93fce6b0d4e03724161dcf",
"timestamp": "",
"source": "github",
"line_count": 125,
"max_line_length": 79,
"avg_line_length": 41.96,
"alnum_prop": 0.6003813155386082,
"repo_name": "g402chi/SpatialPooler",
"id": "415625d8bb69ae204ca369e0018ca7c3c96196e6",
"size": "5245",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "spatialpooler/test/test_common.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "119173"
}
],
"symlink_target": ""
} |
import datetime
import threading
import subprocess
import pymorse
import tempfile
import numpy as np
import rospy
from std_msgs.msg import Header
from geometry_msgs.msg import Point
from fire_rs.simulation.morse import MorseWildfire
import serialization
from supersaop.msg import Timed2DPointStamped, WildfireMap
class Resource:
def __init__(self):
self._value = None
self._ready = threading.Event()
def set(self, value):
self._value = value
self._ready.set()
def get(self):
val = self._value
self._ready.clear()
return val
def is_ready(self):
return self._ready.is_set()
def wait(self, timeout=None):
self._ready.wait(timeout)
class MorseProcess:
def __init__(self, path, sim_env, python_path=None):
self.path = path
self.sim_env = sim_env
self.python_path = None
if python_path is not None:
self.python_path = {'PYTHONPATH': python_path}
self.process = None
def run(self):
self.process = subprocess.Popen(args=['morse', 'run', self.sim_env], cwd=self.path,
env=self.python_path)
if self.is_alive():
rospy.loginfo("Morse is running")
def kill(self):
if self.process:
self.process.kill()
def is_alive(self):
if self.process:
if self.process.poll() is None:
return True
else:
return False
class MorseWildfireNode:
def __init__(self, wildfire_res):
rospy.init_node("morse_wildfire")
rospy.loginfo("Starting {}".format(self.__class__.__name__))
self.sub_predicted_wildfire = rospy.Subscriber("real_wildfire", WildfireMap,
callback=self.on_predicted_wildfire,
queue_size=1)
self.shared_wildfire_map = wildfire_res
def on_predicted_wildfire(self, msg: WildfireMap):
rospy.loginfo("Wildfire map received")
raster = serialization.geodata_from_raster_msg(msg.raster, "ignition")
self.shared_wildfire_map.set(raster)
if __name__ == '__main__':
address = ('localhost', 4000)
terrain_obj_name = "elevation"
wf = Resource()
node = MorseWildfireNode(wf)
m = MorseWildfire(address, terrain_obj_name)
stop_signal = False
def update_map():
while not (rospy.is_shutdown() or stop_signal):
wf.wait(timeout=1.)
if wf.is_ready():
m.set_wildfire_prediction_map(wf.get())
th = threading.Thread(target=update_map, daemon=True)
th.start()
try:
r = rospy.Rate(1 / 60.) # Once a minute
rospy.sleep(1.)
while not rospy.is_shutdown():
try:
rospy.loginfo("Updating wildfire in morse")
m.update(rospy.Time.now().to_sec())
except ConnectionError as e:
rospy.logerr(e)
except ValueError as e:
rospy.logwarn(e)
r.sleep()
except rospy.ROSInterruptException:
stop_signal = True
th.join()
| {
"content_hash": "798cdb2a98916e965e65bace5a21fee4",
"timestamp": "",
"source": "github",
"line_count": 118,
"max_line_length": 91,
"avg_line_length": 27.220338983050848,
"alnum_prop": 0.5712951432129514,
"repo_name": "fire-rs-laas/fire-rs-saop",
"id": "f122f6235fdb8d5fa6415aad88fbedb021c7e497",
"size": "4564",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "supersaop/src/morse_wildfire.py",
"mode": "33261",
"license": "bsd-2-clause",
"language": [
{
"name": "C",
"bytes": "5835"
},
{
"name": "C++",
"bytes": "2686996"
},
{
"name": "CMake",
"bytes": "15006"
},
{
"name": "Jupyter Notebook",
"bytes": "8281"
},
{
"name": "Makefile",
"bytes": "2014"
},
{
"name": "Python",
"bytes": "358887"
},
{
"name": "Shell",
"bytes": "5001"
}
],
"symlink_target": ""
} |
from six.moves.urllib import parse
from conveyor.conveyorheat.engine import constraints
from conveyor.conveyorheat.engine import properties
from conveyor.conveyorheat.engine import resource
from conveyor.conveyorheat.engine import support
from conveyor.i18n import _
class KeystoneRegion(resource.Resource):
"""Heat Template Resource for Keystone Region.
This plug-in helps to create, update and delete a keystone region. Also
it can be used for enable or disable a given keystone region.
"""
support_status = support.SupportStatus(
version='6.0.0',
message=_('Supported versions: keystone v3'))
default_client_name = 'keystone'
entity = 'regions'
PROPERTIES = (
ID, PARENT_REGION, DESCRIPTION, ENABLED
) = (
'id', 'parent_region', 'description', 'enabled'
)
properties_schema = {
ID: properties.Schema(
properties.Schema.STRING,
_('The user-defined region ID and should unique to the OpenStack '
'deployment. While creating the region, heat will url encode '
'this ID.')
),
PARENT_REGION: properties.Schema(
properties.Schema.STRING,
_('If the region is hierarchically a child of another region, '
'set this parameter to the ID of the parent region.'),
update_allowed=True,
constraints=[constraints.CustomConstraint('keystone.region')]
),
DESCRIPTION: properties.Schema(
properties.Schema.STRING,
_('Description of keystone region.'),
update_allowed=True
),
ENABLED: properties.Schema(
properties.Schema.BOOLEAN,
_('This region is enabled or disabled.'),
default=True,
update_allowed=True
)
}
def client(self):
return super(KeystoneRegion, self).client().client
def handle_create(self):
region_id = self.properties[self.ID]
description = self.properties[self.DESCRIPTION]
parent_region = self.properties[self.PARENT_REGION]
enabled = self.properties[self.ENABLED]
region = self.client().regions.create(
id=parse.quote(region_id) if region_id else None,
parent_region=parent_region,
description=description,
enabled=enabled)
self.resource_id_set(region.id)
def handle_update(self, json_snippet, tmpl_diff, prop_diff):
if prop_diff:
description = prop_diff.get(self.DESCRIPTION)
enabled = prop_diff.get(self.ENABLED)
parent_region = prop_diff.get(self.PARENT_REGION)
self.client().regions.update(
region=self.resource_id,
parent_region=parent_region,
description=description,
enabled=enabled
)
def resource_mapping():
return {
'OS::Keystone::Region': KeystoneRegion
}
| {
"content_hash": "d353f107562a1377d9dbcd94e41f24d7",
"timestamp": "",
"source": "github",
"line_count": 92,
"max_line_length": 78,
"avg_line_length": 32.5,
"alnum_prop": 0.6180602006688963,
"repo_name": "Hybrid-Cloud/conveyor",
"id": "ed98a1ccbb1d7ac4e2383e31af94ccaceb0606a4",
"size": "3565",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "conveyor/conveyorheat/engine/resources/openstack/keystone/region.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "3789174"
},
{
"name": "Shell",
"bytes": "16567"
}
],
"symlink_target": ""
} |
import logging
import logging.config
import yaml
import options
def configure():
with open("%s/config.yml" % options.PACKAGE_PATH, 'r') as ymlfile:
logging.config.dictConfig(yaml.load(ymlfile)['logging'])
| {
"content_hash": "be52d6a4cf70f344cdc7706d72bec99e",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 70,
"avg_line_length": 20.09090909090909,
"alnum_prop": 0.7149321266968326,
"repo_name": "mickael-grima/optimizedGPS",
"id": "5d358655b13f649a9bce8271cbfc00f690805246",
"size": "265",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "optimizedGPS/logger.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "246773"
}
],
"symlink_target": ""
} |
from merc import feature
from merc import message
class QuitFeature(feature.Feature):
NAME = __name__
install = QuitFeature.install
@QuitFeature.register_server_command
class Quit(message.Command):
NAME = "QUIT"
MIN_ARITY = 0
FORCE_TRAILING = True
def __init__(self, reason=None, *args):
self.reason = reason
@property
def FORCE_TRAILING(self):
return self.reason is not None
@message.Command.requires_registration
def handle_for(self, app, server, prefix):
app.users.remove_unsafe(app.users.get_by_uid(prefix))
app.network.link_broadcast(server, prefix, self)
def as_command_params(self):
params = []
if self.reason is not None:
params.append(self.reason)
return params
def send_uid(app, server, user):
host = user.host
if host[0] == ":":
host = "0" + host
server.send(app.network.local.sid,
Quit(user.nickname, str(user.hopcount),
str(int(user.creation_time.timestamp())), "+", user.username,
host, "0", user.uid, user.realname))
@QuitFeature.hook("user.remove.check")
def broadcast_quit_on_quit(app, user):
app.network.link_broadcast(None, user.link_prefix,
Quit(user.protocol.disconnect_reason))
| {
"content_hash": "97152d912936da46cae91d6e24886ff2",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 79,
"avg_line_length": 24.705882352941178,
"alnum_prop": 0.6555555555555556,
"repo_name": "merc-devel/merc",
"id": "2b26da02023fbfe3ad792026b030dab6243a19ca",
"size": "1260",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "merc/features/ts6/quit.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "158852"
}
],
"symlink_target": ""
} |
from mastermind_import import *
from settings import *
import math
import random
import threading
from time import gmtime, strftime
from time import sleep
ip = "localhost"
port = 6317
def PlayerColour(playerID, col):
bckgd = [0,0,0]
fgd = [255,127,39]
neutral = [127,127,127]
try:
if len(col)>0: pass
else: col = []
except: col = []
i=0
while len(col)<playerID:
i = i+1
a = math.pow(2,i-1)
belowmid = list(range(int(a/2-1),0,-2))
abovemid = list(range(int(a-1),int(a/2),-2))
j=[]
for k in range(len(belowmid)+len(abovemid)):
if int(math.fmod(k,2))==0: j.append(belowmid[int(k/2)])
else: j.append(abovemid[int(k/2)])
if len(j)<1:
teinte = 255;
col.append([0,0,teinte])
col.append([0,teinte,0])
col.append([teinte,0,0])
for grad in range(127,255,128):
col.append([grad,0,teinte])
col.append([0,teinte,grad])
col.append([teinte,grad,0])
col.append([0,grad,teinte])
col.append([grad,teinte,0])
col.append([teinte,0,grad])
else:
for k in j:
teinte = int( 255 * (1-(1/(math.pow(2,i)))*k) );
for m in j:
grad = int(teinte-(teinte-64)*(m/a))
col.append([grad,0,teinte])
col.append([0,teinte,grad])
col.append([teinte,grad,0])
col.append([0,grad,teinte])
col.append([grad,teinte,0])
col.append([teinte,0,grad])
temp = []
for k in range(len(col)):
doublon = False
for m in range(k-1):
if col[k] == col[m]: doublon = True
if 64 > math.sqrt( math.pow(col[k][0]-fgd[0],2) + math.pow(col[k][1]-fgd[1],2) + math.pow(col[k][2]-fgd[2],2) ):
doublon = True
if not doublon: temp.append(col[k])
col = temp
return [col, col[playerID]]
def IsOneTileMove(x1, y1, x2, y2):
if x1==x2-1 and y1==y2:
#print("case click on tile to the right")
return True
elif x1==x2+1 and y1==y2:
#print("case click on tile to the left")
return True
elif x1==x2 and y1==y2-1:
#print("case click on below tile")
return True
elif x1==x2 and y1==y2+1:
#print("case click on above tile")
return True
else:
#print("case not adjacent tile:", x1, "x", y1, " VS.", x2, "x", y2)
return False
class P4X_Server(MastermindServerTCP):
def __init__(self):
MastermindServerTCP.__init__(self, 0.5,0.5,60.0) #server refresh, connections' refresh, connection timeout
self.games = []
self.queue = None
self.currentIndex=0
self.chat = [None]*scrollback
self.mutex = threading.Lock()
self.logmsg = self.chat
def gamesetup(self, connection_object, data):
self.mutex.acquire()
#print(connection_object.gameid, "/", len(self.games))
if connection_object == self.games[connection_object.gameid-1].playerlist[0]:
self.games[connection_object.gameid-1].boardsize = [data["htiles"],data["vtiles"]]
self.games[connection_object.gameid-1].maxplayer = data["maxplayer"]
self.games[connection_object.gameid-1].populate()
tempcol = PlayerColour(1,self.games[connection_object.gameid-1].colorlist)
self.callback_client_send(connection_object, {"action": "joingame", "player":1, "playercolor": tempcol[1], "gameid": connection_object.gameid, "htiles":self.games[connection_object.gameid-1].boardsize[0], "vtiles":self.games[connection_object.gameid-1].boardsize[1]})
for m in self.games[connection_object.gameid-1].planetlist:
if m.ownership == self.games[connection_object.gameid-1].playerlist[0]:
self.callback_client_send(connection_object, {"action": "allocate", "object": "planet", "xcoord":m.coord[0],"ycoord":m.coord[1]})
for m in self.games[connection_object.gameid-1].shiplist:
if m.ownership == self.games[connection_object.gameid-1].playerlist[0]:
self.callback_client_send(connection_object, {"action": "allocate", "object": "spaceship", "xcoord":m.coord[0],"ycoord":m.coord[1]})
else:
for k in range(len(self.games[connection_object.gameid-1].playerlist)):
if connection_object == self.games[connection_object.gameid-1].playerlist[k]:
self.games[connection_object.gameid-1].allconnectcensus = self.games[connection_object.gameid-1].allconnectcensus+1
self.games[connection_object.gameid-1].populate()
tempcol = PlayerColour(self.games[connection_object.gameid-1].allconnectcensus,self.games[connection_object.gameid-1].colorlist)
self.callback_client_send(connection_object, {"action": "joingame", "player":self.games[connection_object.gameid-1].allconnectcensus, "playercolor": tempcol[1],"gameid": connection_object.gameid, "htiles":self.games[connection_object.gameid-1].boardsize[0], "vtiles":self.games[connection_object.gameid-1].boardsize[1]})
for m in self.games[connection_object.gameid-1].planetlist:
if m.ownership == self.games[connection_object.gameid-1].playerlist[k]:
self.callback_client_send(connection_object, {"action": "allocate", "object": "planet", "xcoord":m.coord[0],"ycoord":m.coord[1]})
for m in self.games[connection_object.gameid-1].shiplist:
if m.ownership == self.games[connection_object.gameid-1].playerlist[k]:
self.callback_client_send(connection_object, {"action": "allocate", "object": "spaceship", "xcoord":m.coord[0],"ycoord":m.coord[1]})
self.mutex.release()
def shipmove(self, connection_object, data):
self.mutex.acquire()
knownplanets=[]
for k in self.games[connection_object.gameid-1].shiplist:
if data["type"] == "split":
for j in range(len(data["from"])):
if k.coord[0]==data["from"][j].coord[0] and k.coord[1]==data["from"][j].coord[1]:
print(k.coord[0], " ", k.coord[1], " ", k.ownership, " VS. ", connection_object)
if k.ownership == connection_object:
print("Server splits")
self.games[connection_object.gameid-1].shiplist.append(Spaceship())
self.games[connection_object.gameid-1].shiplist[len(self.games[connection_object.gameid-1].shiplist)-1].locateto(k.coord[0],k.coord[1])
self.games[connection_object.gameid-1].shiplist[len(self.games[connection_object.gameid-1].shiplist)-1].ucount = int(k.ucount - int(k.ucount/2))
self.games[connection_object.gameid-1].shiplist[len(self.games[connection_object.gameid-1].shiplist)-1].moveleft = k.moveleft
self.games[connection_object.gameid-1].shiplist[len(self.games[connection_object.gameid-1].shiplist)-1].justsplit = True
self.games[connection_object.gameid-1].shiplist[len(self.games[connection_object.gameid-1].shiplist)-1].ownership = connection_object
k.locateto(data["to"][j].coord[0],data["to"][j].coord[1])
k.ucount = int(k.ucount/2)
k.moveleft = k.moveleft-1
k.justsplit = True
break
else:
print("Ownership mismatch")
if data["type"] == "plain":
for j in range(len(data["from"])):
if k.coord[0]==data["from"][j][0] and k.coord[1]==data["from"][j][1]:
print(k.coord[0], " ", k.coord[1], " ", k.ownership, " VS. ", connection_object)
if k.ownership == connection_object:
print("Server move")
k.locateto(data["to"][0],data["to"][1])
k.moveleft = k.moveleft-1
k.justsplit = False
break
else:
print("Ownership mismatch")
for j in self.games[connection_object.gameid-1].shiplist:
if j!=k and k.coord[0] == j.coord[0] and k.coord[1] == j.coord[1] and k.ownership == j.ownership:
k.ucount = k.ucount + j.ucount
if k.moveleft>j.moveleft:k.moveleft=j.moveleft
k.justsplit = False
self.games[connection_object.gameid-1].shiplist.remove(j)
for m in self.games[connection_object.gameid-1].planetlist:
if k.ownership==connection_object and IsOneTileMove(k.coord[0], k.coord[1], m.coord[0], m.coord[1]):
knownplanets.append([m.coord[0], m.coord[1], m.initpop, m.popul, m.maxpop, m.popgrowthrate])
self.callback_client_send(connection_object, {"action": "upknown", "object": "planet", "list":knownplanets})
self.mutex.release()
def callback_connect (self ):
print("This is callback_connect.")
#Something could go here
return super(P4X_Server,self).callback_connect()
def callback_disconnect (self ):
print("This is callback_disconnect.")
#Something could go here
return super(P4X_Server,self).callback_disconnect()
def callback_connect_client (self, connection_object ):
print("This is callback_connect_client.")
if self.queue==None:
self.queue=Game(connection_object, len(self.games))
self.games.append(self.queue)
connection_object.gameid=len(self.games)
else:
connection_object.gameid=len(self.games)
self.games[len(self.games)-1].playerlist.append(connection_object)
if len(self.games[len(self.games)-1].playerlist) >= self.games[len(self.games)-1].maxplayer:
print("Maximum number of players reached")
self.queue = None
#Something could go here
return super(P4X_Server,self).callback_connect_client(connection_object)
def callback_disconnect_client(self, connection_object ):
print("This is callback_disconnect_client.")
for k in range(len(self.games[connection_object.gameid-1].playerlist)-1):
if connection_object == self.games[connection_object.gameid-1].playerlist[k]:
print("Player quits")
self.games[connection_object.gameid-1].playerlist.remove( connection_object )
if len(self.games[connection_object.gameid-1].playerlist)<1:
print("Game shuts")
self.games.remove( self.games[connection_object.gameid-1] )
if len(self.games)<1:
print("No games left running on this server")
#self.callback_client_send(connection_object, "IdleTimeoutNotification")
#Something could go here
return super(P4X_Server,self).callback_disconnect_client(connection_object)
def callback_client_receive (self, connection_object ):
print("This is callback_client_receive.")
#Something could go here
return super(P4X_Server,self).callback_client_receive(connection_object)
def callback_client_handle (self, connection_object, data ):
cmd = data[0]
if cmd == "introduce":
self.add_message("Server: "+data[1]+" has joined.")
elif cmd == "gamesetup":
#self.add_message(data[1])
self.gamesetup(connection_object, data[1])
elif cmd == "shipmove":
self.shipmove(connection_object, data[1])
elif cmd == "add":
self.add_message(data[1])
elif cmd == "bip":
pass
elif cmd == "leave":
self.add_message("Server: "+data[1]+" has left.")
self.callback_client_send(connection_object, self.chat)
def callback_client_send (self, connection_object, data,compression=None):
print("This is callback_client_send.")
#Something could go here
return super(P4X_Server,self).callback_client_send(connection_object, data,compression)
# .__init__(time_server_refresh=0.5,time_connection_refresh=0.5,time_connection_timeout=5.0):
# --Creates a new server object.
# --The argument "time_server_refresh" determines how quickly the server checks for an end condition (i.e., if you call .accepting_disallow(), "time_server_refresh" is the maximum time it will hang). Larger numbers use less CPU time.
# --The argument "time_connection_refresh" is how fast the connection checks for an end condition (i.e., if a connection times out, "time_connection_refresh" is the maximum time past the timeout time the connection will persist). Larger numbers use less CPU time.
# --The argument "time_connection_timeout" determines how long a connection is allowed to remain idle before being considered dead. See "Notes.txt".
# .__del__():
# --Destructs the server object. Issues a MastermindWarningServer if .accepting_disallow() or .disconnect() was not called first, or if there are still active connections.
# .connect(ip,port):
# --Connects the server to the network. If the server is already connected, a MastermindWarningServer is issued and the call has no effect. MastermindErrorSocket may be raised on failure.
# --The argument "ip" is the ip to connect as.
# --The argument "port" is the port to connect as.
# .disconnect()
# --Disconnects the server from the network. If the server is already not connected, a MastermindWarningServer is issued and the call has no effect. If the server is actively accepting new clients, MastermindWarningServer will be issued, and the call will proceed after automatically calling .accepting_disallow(). If there are active connections, MastermindWarningServer will be issued, and the call will proceed after automatically calling .disconnect_clients().
# .disconnect_clients():
# --Disconnects all current clients. This is typically done before quitting the server; to be sure another client doesn't connect immediately after this call, you should call .accepting_disallow() first.
# .callback_connect():
# --Called when the server connects (i.e., when .connect(...) is successful). This method can be overridden to provide useful information. It's good practice to call "return super(MastermindServerTCP,self).callback_connect()" at the end of your override.
# .callback_disconnect():
# --Called when the server disconnects (i.e., when .disconnect(...) is called). This method can be overridden to provide useful information. It's good practice to call "return super(MastermindServerTCP,self).callback_disconnect()" at the end of your override.
# .callback_connect_client(connection_object):
# --Called when a new client connects. This method can be overridden to provide useful information. It's good practice to call "return super(MastermindServerTCP,self).callback_connect_client(connection_object)" at the end of your override.
# --The argument "connection_object" represents the appropriate connection. See the "Connection Objects" section for a description of useful properites.
# .callback_disconnect_client(connection_object):
# --Called when a client disconnects. This method can be overridden to provide useful information. It's good practice to call "return super(MastermindServerTCP,self).callback_disconnect_client(connection_object)" at the end of your override.
# --The argument "connection_object" represents the appropriate connection. See the "Connection Objects" section for a description of useful properites.
# .callback_client_receive(connection_object):
# --Called when data is about to be received from a connection. A pickling or zlib error from Python is conceivable if the data is corrupted in transit. This method can be overridden to provide useful information. It's good practice (and in this case essential) to call "return super(MastermindServerTCP,self).callback_client_receive(connection_object)" at the end of your override.
# --The argument "connection_object" represents the appropriate connection. See the "Connection Objects" section for a description of useful properites.
# .callback_client_handle(connection_object,data):
# --Called to handle data received from a connection. This method is often overridden to provide custom server logic and useful information. It's good practice (and in this case essential) to call "return super(MastermindServerTCP,self).callback_client_handle(connection_object,data)" at the end of your override.
# --The argument "connection_object" represents the appropriate connection. See the "Connection Objects" section for a description of useful properites.
# --The argument "data" is the data that the server received from the connection.
# .callback_client_send(connection_object,data,compression=None):
# --Called to when data is about to be sent to a connection. If sending fails, the connection is silently terminated. This method can be overridden to provide useful information. It's good practice (and in this case essential) to call "return super(MastermindServerTCP,self).callback_client_send(connection_object,data,compression)" at the end of your override.
# --The argument "connection_object" represents the appropriate connection. See the "Connection Objects" section for a description of useful properites.
# --The argument "data" is the data that the server is about to send to the connection.
# --The argument "compression" determines whether the data should be compressed before sending. See MastermindClientTCP.send(...).
# .accepting_allow():
# --Starts allowing clients to connect and create new connections to the server.
# .accepting_allow_wait_forever():
# --Uses the current thread to wait for new connections to the server and put them in their own threads. This is used internally by .accepting_allow(), but is useful by itself if you want to start your own server in its own file, and you don't care if you don't get control back.
# .accepting_disallow():
# --Stops allowing new clients to connect. This DOES NOT stop any current connections! The effect of calling .accepting_allow() after this and then having a new client connect has not been tested.
class Planet():
def __init__(self):
self.ownership = None
self.coord = [0,0]
self.initpop = 1000
self.popul = 1000
self.maxpop = 10000
self.popgrowthrate = 0.33 #sigmoidal
def locateto(self, xpos, ypos):
self.coord = [xpos, ypos]
def popgrow(self):
#Logistic Population Growth: Continuous and Discrete
#http://amrita.vlab.co.in/?sub=3&brch=65&sim=1110&cnt=1
self.popul = self.popul+ int(math.ceil(self.popgrowthrate * ((self.popul-self.initpop+2)*(1-((self.popul-self.initpop+1)/(self.maxpop-self.initpop))))-0.5))
class Spaceship():
def __init__(self):
self.ownership = None
self.coord = [0,0]
self.ucount = 100
self.gocoord = [0,0]
self.moveleft = 1
self.justsplit = False
def locateto(self, xpos, ypos):
self.coord = [xpos, ypos]
self.gocoord = [xpos, ypos]
class Game:
def __init__(self, player0, currentIndex):
print("This is NEW game.")
self.boardsize = [1,1]
self.maxplayer = 1
# whose turn (1 or 0)
self.turn = 0
#maps
self.board = [[0 for x in range(self.boardsize[0])] for y in range(self.boardsize[1])]
self.planetlist = []
self.shiplist = []
#initialize the players including the one who started the game
self.player0=player0
self.player1=None
self.playerlist = []
self.playerlist.append(self.player0)
self.allconnectcensus = 1
self.colorlist = PlayerColour(self.allconnectcensus,[])
self.colorlist = self.colorlist[0]
print(self.colorlist)
#gameid of game
self.gameid=currentIndex
def populate(self):
if len(self.planetlist)<1 and self.boardsize[0]>0 and self.boardsize[1]>0:
print("Distribute planet locations")
coorddraw = random.sample(range(self.boardsize[0]*self.boardsize[1]), math.floor((self.boardsize[0]*self.boardsize[1])*0.1))
for i in range(len(coorddraw)):
#print(i, "\t", int(math.floor(i/self.boardsize[0])), " x ", self.boardsize[0], " + ", int(math.fmod(i,self.boardsize[0])))
self.planetlist.append(Planet())
self.planetlist[i].locateto(int(math.fmod(coorddraw[i],self.boardsize[0])), int(math.floor(coorddraw[i]/self.boardsize[0])))
self.planetlist[i].ownership = None
for i in range(len(self.playerlist)):
planetowner = False
for k in self.planetlist:
if k.ownership == self.playerlist[i]:
planetowner = True
if not planetowner and i<len(self.planetlist):
print("Allocate home planet")
self.planetlist[i].ownership = self.playerlist[i]
planetowner = True
shipowner = False
for k in self.shiplist:
if k.ownership == self.playerlist[i]:
shipowner = True
if not shipowner:
print("Allocate initial ship", self.playerlist[i])
if planetowner:
self.shiplist.append(Spaceship())
self.shiplist[len(self.shiplist)-1].locateto(self.planetlist[i].coord[0], self.planetlist[i].coord[1])
self.shiplist[len(self.shiplist)-1].ownership = self.playerlist[i]
else:
coorddraw = random.sample(range(self.boardsize[0]*self.boardsize[1]), math.floor(self.boardsize[0]*self.boardsize[1]))
for k in range(len(coorddraw)):
emptytile = True
for m in self.shiplist:
if m.coord[0] == int(math.fmod(coorddraw[k],self.boardsize[0])) and m.coord[1] == int(math.floor(coorddraw[k]/self.boardsize[0])):
emptytile = False
for m in self.planetlist:
if m.coord[0] == int(math.fmod(coorddraw[k],self.boardsize[0])) and m.coord[1] == int(math.floor(coorddraw[k]/self.boardsize[0])):
emptytile = False
if emptytile:
self.shiplist.append(Spaceship())
self.shiplist[len(self.shiplist)-1].locateto(int(math.fmod(coorddraw[k],self.boardsize[0])), int(math.floor(coorddraw[k]/self.boardsize[0])))
self.shiplist[len(self.shiplist)-1].ownership = self.playerlist[i]
break
if __name__ == "__main__":
server = P4X_Server()
server.connect(server_ip,port)
try:
server.accepting_allow_wait_forever()
except:
#Only way to break is with an exception
pass
server.accepting_disallow()
server.disconnect_clients()
server.disconnect()
| {
"content_hash": "cea9db0067126807bdf4700aa69601b3",
"timestamp": "",
"source": "github",
"line_count": 396,
"max_line_length": 478,
"avg_line_length": 61.69191919191919,
"alnum_prop": 0.6019647973802702,
"repo_name": "ant1b/Planets4X",
"id": "00653a255ec4e08cfeaf601527aab612d1f644af",
"size": "24430",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Planets4X_archives/Planets4X_v00.07/server.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "809"
},
{
"name": "JavaScript",
"bytes": "445859"
},
{
"name": "Python",
"bytes": "4013101"
}
],
"symlink_target": ""
} |
import __builtin__
import locale
import os
import re
import sys
import textwrap
from urllib import quote, quote_plus, unquote
from unicodedata import east_asian_width
from trac.util.translation import _
CRLF = '\r\n'
class Empty(unicode):
"""A special tag object evaluating to the empty string"""
__slots__ = []
empty = Empty()
del Empty # shouldn't be used outside of Trac core
# -- Unicode
def to_unicode(text, charset=None):
"""Convert input to an `unicode` object.
For a `str` object, we'll first try to decode the bytes using the given
`charset` encoding (or UTF-8 if none is specified), then we fall back to
the latin1 encoding which might be correct or not, but at least preserves
the original byte sequence by mapping each byte to the corresponding
unicode code point in the range U+0000 to U+00FF.
For anything else, a simple `unicode()` conversion is attempted,
with special care taken with `Exception` objects.
"""
if isinstance(text, str):
try:
return unicode(text, charset or 'utf-8')
except UnicodeDecodeError:
return unicode(text, 'latin1')
elif isinstance(text, Exception):
# two possibilities for storing unicode strings in exception data:
try:
# custom __str__ method on the exception (e.g. PermissionError)
return unicode(text)
except UnicodeError:
# unicode arguments given to the exception (e.g. parse_date)
return ' '.join([to_unicode(arg) for arg in text.args])
return unicode(text)
def exception_to_unicode(e, traceback=False):
"""Convert an `Exception` to an `unicode` object.
In addition to `to_unicode`, this representation of the exception
also contains the class name and optionally the traceback.
"""
message = '%s: %s' % (e.__class__.__name__, to_unicode(e))
if traceback:
from trac.util import get_last_traceback
traceback_only = get_last_traceback().split('\n')[:-2]
message = '\n%s\n%s' % (to_unicode('\n'.join(traceback_only)), message)
return message
def path_to_unicode(path):
"""Convert a filesystem path to unicode, using the filesystem encoding."""
if isinstance(path, str):
try:
return unicode(path, sys.getfilesystemencoding())
except UnicodeDecodeError:
return unicode(path, 'latin1')
return unicode(path)
_ws_leading_re = re.compile(ur'\A[\s\u200b]+', re.UNICODE)
_ws_trailing_re = re.compile(ur'[\s\u200b]+\Z', re.UNICODE)
def stripws(text, leading=True, trailing=True):
"""Strips unicode white-spaces and ZWSPs from ``text``.
:param leading: strips leading spaces from ``text`` unless ``leading`` is
`False`.
:param trailing: strips trailing spaces from ``text`` unless ``trailing``
is `False`.
"""
if leading:
text = _ws_leading_re.sub('', text)
if trailing:
text = _ws_trailing_re.sub('', text)
return text
_js_quote = {'\\': '\\\\', '"': '\\"', '\b': '\\b', '\f': '\\f',
'\n': '\\n', '\r': '\\r', '\t': '\\t', "'": "\\'"}
for i in range(0x20) + [ord(c) for c in '&<>']:
_js_quote.setdefault(chr(i), '\\u%04x' % i)
_js_quote_re = re.compile(r'[\x00-\x1f\\"\b\f\n\r\t\'&<>]')
_js_string_re = re.compile(r'[\x00-\x1f\\"\b\f\n\r\t&<>]')
def javascript_quote(text):
"""Quote strings for inclusion in single or double quote delimited
Javascript strings
"""
if not text:
return ''
def replace(match):
return _js_quote[match.group(0)]
return _js_quote_re.sub(replace, text)
def to_js_string(text):
"""Embed the given string in a double quote delimited Javascript string
(conform to the JSON spec)
"""
if not text:
return '""'
def replace(match):
return _js_quote[match.group(0)]
return '"%s"' % _js_string_re.sub(replace, text)
def unicode_quote(value, safe='/'):
"""A unicode aware version of `urllib.quote`
:param value: anything that converts to a `str`. If `unicode`
input is given, it will be UTF-8 encoded.
:param safe: as in `quote`, the characters that would otherwise be
quoted but shouldn't here (defaults to '/')
"""
return quote(value.encode('utf-8') if isinstance(value, unicode)
else str(value), safe)
def unicode_quote_plus(value, safe=''):
"""A unicode aware version of `urllib.quote_plus`.
:param value: anything that converts to a `str`. If `unicode`
input is given, it will be UTF-8 encoded.
:param safe: as in `quote_plus`, the characters that would
otherwise be quoted but shouldn't here (defaults to
'/')
"""
return quote_plus(value.encode('utf-8') if isinstance(value, unicode)
else str(value), safe)
def unicode_unquote(value):
"""A unicode aware version of `urllib.unquote`.
:param str: UTF-8 encoded `str` value (for example, as obtained by
`unicode_quote`).
:rtype: `unicode`
"""
return unquote(value).decode('utf-8')
def unicode_urlencode(params, safe=''):
"""A unicode aware version of `urllib.urlencode`.
Values set to `empty` are converted to the key alone, without the
equal sign.
"""
if isinstance(params, dict):
params = params.iteritems()
l = []
for k, v in params:
if v is empty:
l.append(unicode_quote_plus(k, safe))
else:
l.append(unicode_quote_plus(k, safe) + '=' +
unicode_quote_plus(v, safe))
return '&'.join(l)
_qs_quote_safe = ''.join(chr(c) for c in xrange(0x21, 0x7f))
def quote_query_string(text):
"""Quote strings for query string
"""
return unicode_quote_plus(text, _qs_quote_safe)
def to_utf8(text, charset='latin1'):
"""Convert a string to UTF-8, assuming the encoding is either UTF-8, ISO
Latin-1, or as specified by the optional `charset` parameter.
.. deprecated :: 0.10
You should use `unicode` strings only.
"""
try:
# Do nothing if it's already utf-8
u = unicode(text, 'utf-8')
return text
except UnicodeError:
try:
# Use the user supplied charset if possible
u = unicode(text, charset)
except UnicodeError:
# This should always work
u = unicode(text, 'latin1')
return u.encode('utf-8')
class unicode_passwd(unicode):
"""Conceal the actual content of the string when `repr` is called."""
def __repr__(self):
return '*******'
def stream_encoding(stream):
"""Return the appropriate encoding for the given stream."""
encoding = getattr(stream, 'encoding', None)
# Windows returns 'cp0' to indicate no encoding
return encoding if encoding not in (None, 'cp0') else 'utf-8'
def console_print(out, *args, **kwargs):
"""Output the given arguments to the console, encoding the output
as appropriate.
:param kwargs: ``newline`` controls whether a newline will be appended
(defaults to `True`)
"""
cons_charset = stream_encoding(out)
out.write(' '.join([to_unicode(a).encode(cons_charset, 'replace')
for a in args]))
if kwargs.get('newline', True):
out.write('\n')
def printout(*args, **kwargs):
"""Do a `console_print` on `sys.stdout`."""
console_print(sys.stdout, *args, **kwargs)
def printerr(*args, **kwargs):
"""Do a `console_print` on `sys.stderr`."""
console_print(sys.stderr, *args, **kwargs)
def raw_input(prompt):
"""Input one line from the console and converts it to unicode as
appropriate.
"""
printout(prompt, newline=False)
return to_unicode(__builtin__.raw_input(), sys.stdin.encoding)
# -- Plain text formatting
def text_width(text, ambiwidth=1):
"""Determine the column width of `text` in Unicode characters.
The characters in the East Asian Fullwidth (F) or East Asian Wide (W)
have a column width of 2. The other characters in the East Asian
Halfwidth (H) or East Asian Narrow (Na) have a column width of 1.
That `ambiwidth` parameter is used for the column width of the East
Asian Ambiguous (A). If `1`, the same width as characters in US-ASCII.
This is expected by most users. If `2`, twice the width of US-ASCII
characters. This is expected by CJK users.
cf. http://www.unicode.org/reports/tr11/.
"""
twice = 'FWA' if ambiwidth == 2 else 'FW'
return sum([2 if east_asian_width(chr) in twice else 1
for chr in to_unicode(text)])
_default_ambiwidth = 1 # Default width of East Asian Ambiguous (A)
if os.name == 'nt':
try:
# `ctypes` is available since Python 2.5
import ctypes
codepage = ctypes.windll.kernel32.GetConsoleOutputCP()
except ImportError:
# Try to retrieve the codepage from stderr and stdout
codepage = (sys.stderr.encoding or sys.stdout.encoding or '')[2:]
codepage = codepage.isdigit() and int(codepage) or 0
if codepage in (932, # Japanese (Shift-JIS)
936, # Chinese Simplified (GB2312)
949, # Korean (Unified Hangul Code)
950): # Chinese Traditional (Big5)
_default_ambiwidth = 2
del codepage
else:
if re.match(r'zh|ja|kr', os.environ.get('LANG') or '', re.IGNORECASE):
_default_ambiwidth = 2
def print_table(data, headers=None, sep=' ', out=None, ambiwidth=None):
"""Print data according to a tabular layout.
:param data: a sequence of rows; assume all rows are of equal length.
:param headers: an optional row containing column headers; must be of
the same length as each row in `data`.
:param sep: column separator
:param out: output file descriptor (`None` means use `sys.stdout`)
:param ambiwidth: column width of the East Asian Ambiguous (A). If None,
detect ambiwidth with the locale settings. If others,
pass to the `ambiwidth` parameter of `text_width`.
"""
if out is None:
out = sys.stdout
charset = getattr(out, 'encoding', None) or 'utf-8'
if ambiwidth is None:
ambiwidth = _default_ambiwidth
data = list(data)
if headers:
data.insert(0, headers)
elif not data:
return
# Convert to an unicode object with `to_unicode`. If None, convert to a
# empty string.
def to_text(val):
if val is None:
return u''
return to_unicode(val)
def tw(text):
return text_width(text, ambiwidth=ambiwidth)
# Convert each cell to an unicode object
data = [[to_text(cell) for cell in row] for row in data]
num_cols = len(data[0])
col_width = [max(tw(row[idx]) for row in data)
for idx in xrange(num_cols)]
out.write('\n')
for ridx, row in enumerate(data):
for cidx, cell in enumerate(row):
if headers and ridx == 0:
sp = '%*s' % (tw(sep), ' ') # No separator in header
else:
sp = sep
if cidx + 1 == num_cols:
sp = '' # No separator after last column
line = u'%-*s%s' % (col_width[cidx] - tw(cell) + len(cell),
cell, sp)
line = line.encode(charset, 'replace')
out.write(line)
out.write('\n')
if ridx == 0 and headers:
out.write('-' * (tw(sep) * cidx + sum(col_width)))
out.write('\n')
out.write('\n')
def shorten_line(text, maxlen=75):
"""Truncates content to at most `maxlen` characters.
This tries to be (a bit) clever and attempts to find a proper word
boundary for doing so.
"""
if len(text or '') < maxlen:
return text
cut = max(text.rfind(' ', 0, maxlen), text.rfind('\n', 0, maxlen))
if cut < 0:
cut = maxlen
return text[:cut] + ' ...'
class UnicodeTextWrapper(textwrap.TextWrapper):
breakable_char_ranges = [
(0x1100, 0x11FF), # Hangul Jamo
(0x2E80, 0x2EFF), # CJK Radicals Supplement
(0x3000, 0x303F), # CJK Symbols and Punctuation
(0x3040, 0x309F), # Hiragana
(0x30A0, 0x30FF), # Katakana
(0x3130, 0x318F), # Hangul Compatibility Jamo
(0x3190, 0x319F), # Kanbun
(0x31C0, 0x31EF), # CJK Strokes
(0x3200, 0x32FF), # Enclosed CJK Letters and Months
(0x3300, 0x33FF), # CJK Compatibility
(0x3400, 0x4DBF), # CJK Unified Ideographs Extension A
(0x4E00, 0x9FFF), # CJK Unified Ideographs
(0xA960, 0xA97F), # Hangul Jamo Extended-A
(0xAC00, 0xD7AF), # Hangul Syllables
(0xD7B0, 0xD7FF), # Hangul Jamo Extended-B
(0xF900, 0xFAFF), # CJK Compatibility Ideographs
(0xFE30, 0xFE4F), # CJK Compatibility Forms
(0xFF00, 0xFFEF), # Halfwidth and Fullwidth Forms
(0x20000, 0x2FFFF, u'[\uD840-\uD87F][\uDC00-\uDFFF]'), # Plane 2
(0x30000, 0x3FFFF, u'[\uD880-\uD8BF][\uDC00-\uDFFF]'), # Plane 3
]
split_re = None
breakable_re = None
@classmethod
def _init_patterns(cls):
char_ranges = []
surrogate_pairs = []
for val in cls.breakable_char_ranges:
try:
high = unichr(val[0])
low = unichr(val[1])
char_ranges.append(u'%s-%s' % (high, low))
except ValueError:
# Narrow build, `re` cannot use characters >= 0x10000
surrogate_pairs.append(val[2])
char_ranges = u''.join(char_ranges)
if surrogate_pairs:
pattern = u'(?:[%s]|%s)+' % (char_ranges,
u'|'.join(surrogate_pairs))
else:
pattern = u'[%s]+' % char_ranges
cls.split_re = re.compile(
ur'(\s+|' + # any whitespace
pattern + u'|' + # breakable text
ur'[^\s\w]*\w+[^0-9\W]-(?=\w+[^0-9\W])|' + # hyphenated words
ur'(?<=[\w\!\"\'\&\.\,\?])-{2,}(?=\w))', # em-dash
re.UNICODE)
cls.breakable_re = re.compile(ur'\A' + pattern, re.UNICODE)
def __init__(self, cols, replace_whitespace=0, break_long_words=0,
initial_indent='', subsequent_indent='', ambiwidth=1):
textwrap.TextWrapper.__init__(
self, cols, replace_whitespace=0, break_long_words=0,
initial_indent=initial_indent,
subsequent_indent=subsequent_indent)
self.ambiwidth = ambiwidth
if self.split_re is None:
self._init_patterns()
def _split(self, text):
chunks = self.split_re.split(to_unicode(text))
chunks = filter(None, chunks)
return chunks
def _text_width(self, text):
return text_width(text, ambiwidth=self.ambiwidth)
def _wrap_chunks(self, chunks):
lines = []
chunks.reverse()
text_width = self._text_width
while chunks:
cur_line = []
cur_width = 0
if lines:
indent = self.subsequent_indent
else:
indent = self.initial_indent
width = self.width - text_width(indent)
if chunks[-1].strip() == '' and lines:
del chunks[-1]
while chunks:
chunk = chunks[-1]
w = text_width(chunk)
if cur_width + w <= width:
cur_line.append(chunks.pop())
cur_width += w
elif self.breakable_re.match(chunk):
left_space = width - cur_width
for i in xrange(len(chunk)):
w = text_width(chunk[i])
if left_space < w:
break
left_space -= w
if i > 0:
cur_line.append(chunk[:i])
chunk = chunk[i:]
chunks[-1] = chunk
w = text_width(chunk)
break
else:
break
if chunks and w > width:
self._handle_long_word(chunks, cur_line, cur_width, width)
if cur_line and cur_line[-1].strip() == '':
del cur_line[-1]
if cur_line:
lines.append(indent + ''.join(cur_line))
return lines
def wrap(t, cols=75, initial_indent='', subsequent_indent='',
linesep=os.linesep, ambiwidth=1):
"""Wraps the single paragraph in `t`, which contains unicode characters.
The every line is at most `cols` characters long.
That `ambiwidth` parameter is used for the column width of the East
Asian Ambiguous (A). If `1`, the same width as characters in US-ASCII.
This is expected by most users. If `2`, twice the width of US-ASCII
characters. This is expected by CJK users.
"""
t = t.strip().replace('\r\n', '\n').replace('\r', '\n')
wrapper = UnicodeTextWrapper(cols, replace_whitespace=0,
break_long_words=0,
initial_indent=initial_indent,
subsequent_indent=subsequent_indent,
ambiwidth=ambiwidth)
wrappedLines = []
for line in t.split('\n'):
wrappedLines += wrapper.wrap(line.rstrip()) or ['']
return linesep.join(wrappedLines)
def obfuscate_email_address(address):
"""Replace anything looking like an e-mail address (``'@something'``)
with a trailing ellipsis (``'@…'``)
"""
if address:
at = address.find('@')
if at != -1:
return address[:at] + u'@\u2026' + \
('>' if address[-1] == '>' else '')
return address
def breakable_path(path):
"""Make a path breakable after path separators, and conversely, avoid
breaking at spaces.
"""
if not path:
return path
prefix = ''
if path.startswith('/'): # Avoid breaking after a leading /
prefix = '/'
path = path[1:]
return prefix + path.replace('/', u'/\u200b').replace('\\', u'\\\u200b') \
.replace(' ', u'\u00a0')
def normalize_whitespace(text, to_space=u'\u00a0', remove=u'\u200b'):
"""Normalize whitespace in a string, by replacing special spaces by normal
spaces and removing zero-width spaces."""
if not text:
return text
for each in to_space:
text = text.replace(each, ' ')
for each in remove:
text = text.replace(each, '')
return text
def unquote_label(txt):
"""Remove (one level of) enclosing single or double quotes.
.. versionadded :: 1.0
"""
return txt[1:-1] if txt and txt[0] in "'\"" and txt[0] == txt[-1] else txt
# -- Conversion
def pretty_size(size, format='%.1f'):
"""Pretty print content size information with appropriate unit.
:param size: number of bytes
:param format: can be used to adjust the precision shown
"""
if size is None:
return ''
jump = 1024
if size < jump:
return _('%(size)s bytes', size=size)
units = ['KB', 'MB', 'GB', 'TB']
i = 0
while size >= jump and i < len(units):
i += 1
size /= 1024.
return (format + ' %s') % (size, units[i - 1])
def expandtabs(s, tabstop=8, ignoring=None):
"""Expand tab characters `'\\\\t'` into spaces.
:param tabstop: number of space characters per tab
(defaults to the canonical 8)
:param ignoring: if not `None`, the expansion will be "smart" and
go from one tabstop to the next. In addition,
this parameter lists characters which can be
ignored when computing the indent.
"""
if '\t' not in s:
return s
if ignoring is None:
return s.expandtabs(tabstop)
outlines = []
for line in s.split('\n'):
if '\t' not in line:
outlines.append(line)
continue
p = 0
s = []
for c in line:
if c == '\t':
n = tabstop - p % tabstop
s.append(' ' * n)
p += n
elif not ignoring or c not in ignoring:
p += 1
s.append(c)
else:
s.append(c)
outlines.append(''.join(s))
return '\n'.join(outlines)
def fix_eol(text, eol):
"""Fix end-of-lines in a text."""
lines = text.splitlines()
lines.append('')
return eol.join(lines)
def unicode_to_base64(text, strip_newlines=True):
"""Safe conversion of ``text`` to base64 representation using
utf-8 bytes.
Strips newlines from output unless ``strip_newlines`` is `False`.
"""
text = to_unicode(text)
if strip_newlines:
return text.encode('utf-8').encode('base64').replace('\n', '')
return text.encode('utf-8').encode('base64')
def unicode_from_base64(text):
"""Safe conversion of ``text`` to unicode based on utf-8 bytes."""
return text.decode('base64').decode('utf-8')
def levenshtein_distance(lhs, rhs):
"""Return the Levenshtein distance between two strings."""
if len(lhs) > len(rhs):
rhs, lhs = lhs, rhs
if not lhs:
return len(rhs)
prev = range(len(rhs) + 1)
for lidx, lch in enumerate(lhs):
curr = [lidx + 1]
for ridx, rch in enumerate(rhs):
cost = (lch != rch) * 2
curr.append(min(prev[ridx + 1] + 1, # deletion
curr[ridx] + 1, # insertion
prev[ridx] + cost)) # substitution
prev = curr
return prev[-1]
| {
"content_hash": "e794c138faf5ae72232e40afe3565de0",
"timestamp": "",
"source": "github",
"line_count": 659,
"max_line_length": 79,
"avg_line_length": 33.371775417298934,
"alnum_prop": 0.5653419425245544,
"repo_name": "trac-ja/trac-ja",
"id": "f188da2df6fe92e08810e9aa01afe915a48cfdb2",
"size": "22812",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "trac/util/text.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "78123"
},
{
"name": "JavaScript",
"bytes": "79829"
},
{
"name": "Perl",
"bytes": "2617"
},
{
"name": "Python",
"bytes": "2818601"
},
{
"name": "Shell",
"bytes": "11347"
}
],
"symlink_target": ""
} |
from __future__ import print_function
import httplib
import optparse
import os
import sys
import six.moves.urllib.parse as urlparse
from glance.common import utils
from glance.openstack.common import gettextutils
from glance.openstack.common import jsonutils
from glance.openstack.common import log
LOG = log.getLogger(__name__)
_LI = gettextutils._LI
_LE = gettextutils._LE
# If ../glance/__init__.py exists, add ../ to Python search path, so that
# it will override what happens to be installed in /usr/(local/)lib/python...
possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
os.pardir,
os.pardir))
if os.path.exists(os.path.join(possible_topdir, 'glance', '__init__.py')):
sys.path.insert(0, possible_topdir)
COMMANDS = """Commands:
help <command> Output help for one of the commands below
compare What is missing from the slave glance?
dump Dump the contents of a glance instance to local disk.
livecopy Load the contents of one glance instance into another.
load Load the contents of a local directory into glance.
size Determine the size of a glance instance if dumped to disk.
"""
IMAGE_ALREADY_PRESENT_MESSAGE = _('The image %s is already present on '
'the slave, but our check for it did '
'not find it. This indicates that we '
'do not have permissions to see all '
'the images on the slave server.')
class AuthenticationException(Exception):
pass
class ImageAlreadyPresentException(Exception):
pass
class ServerErrorException(Exception):
pass
class UploadException(Exception):
pass
class ImageService(object):
def __init__(self, conn, auth_token):
"""Initialize the ImageService.
conn: a httplib.HTTPConnection to the glance server
auth_token: authentication token to pass in the x-auth-token header
"""
self.auth_token = auth_token
self.conn = conn
def _http_request(self, method, url, headers, body,
ignore_result_body=False):
"""Perform an HTTP request against the server.
method: the HTTP method to use
url: the URL to request (not including server portion)
headers: headers for the request
body: body to send with the request
ignore_result_body: the body of the result will be ignored
Returns: a httplib response object
"""
if self.auth_token:
headers.setdefault('x-auth-token', self.auth_token)
LOG.debug('Request: %(method)s http://%(server)s:%(port)s'
'%(url)s with headers %(headers)s'
% {'method': method,
'server': self.conn.host,
'port': self.conn.port,
'url': url,
'headers': repr(headers)})
self.conn.request(method, url, body, headers)
response = self.conn.getresponse()
headers = self._header_list_to_dict(response.getheaders())
code = response.status
code_description = httplib.responses[code]
LOG.debug('Response: %(code)s %(status)s %(headers)s'
% {'code': code,
'status': code_description,
'headers': repr(headers)})
if code in [400, 500]:
raise ServerErrorException(response.read())
if code in [401, 403]:
raise AuthenticationException(response.read())
if code == 409:
raise ImageAlreadyPresentException(response.read())
if ignore_result_body:
# NOTE: because we are pipelining requests through a single HTTP
# connection, httplib requires that we read the response body
# before we can make another request. If the caller knows they
# don't care about the body, they can ask us to do that for them.
response.read()
return response
def get_images(self):
"""Return a detailed list of images.
Yields a series of images as dicts containing metadata.
"""
params = {'is_public': None}
while True:
url = '/v1/images/detail'
query = urlparse.urlencode(params)
if query:
url += '?%s' % query
response = self._http_request('GET', url, {}, '')
result = jsonutils.loads(response.read())
if not result or 'images' not in result or not result['images']:
return
for image in result.get('images', []):
params['marker'] = image['id']
yield image
def get_image(self, image_uuid):
"""Fetch image data from glance.
image_uuid: the id of an image
Returns: a httplib Response object where the body is the image.
"""
url = '/v1/images/%s' % image_uuid
return self._http_request('GET', url, {}, '')
@staticmethod
def _header_list_to_dict(headers):
"""Expand a list of headers into a dictionary.
headers: a list of [(key, value), (key, value), (key, value)]
Returns: a dictionary representation of the list
"""
d = {}
for (header, value) in headers:
if header.startswith('x-image-meta-property-'):
prop = header.replace('x-image-meta-property-', '')
d.setdefault('properties', {})
d['properties'][prop] = value
else:
d[header.replace('x-image-meta-', '')] = value
return d
def get_image_meta(self, image_uuid):
"""Return the metadata for a single image.
image_uuid: the id of an image
Returns: image metadata as a dictionary
"""
url = '/v1/images/%s' % image_uuid
response = self._http_request('HEAD', url, {}, '',
ignore_result_body=True)
return self._header_list_to_dict(response.getheaders())
@staticmethod
def _dict_to_headers(d):
"""Convert a dictionary into one suitable for a HTTP request.
d: a dictionary
Returns: the same dictionary, with x-image-meta added to every key
"""
h = {}
for key in d:
if key == 'properties':
for subkey in d[key]:
if d[key][subkey] is None:
h['x-image-meta-property-%s' % subkey] = ''
else:
h['x-image-meta-property-%s' % subkey] = d[key][subkey]
else:
h['x-image-meta-%s' % key] = d[key]
return h
def add_image(self, image_meta, image_data):
"""Upload an image.
image_meta: image metadata as a dictionary
image_data: image data as a object with a read() method
Returns: a tuple of (http response headers, http response body)
"""
url = '/v1/images'
headers = self._dict_to_headers(image_meta)
headers['Content-Type'] = 'application/octet-stream'
headers['Content-Length'] = int(image_meta['size'])
response = self._http_request('POST', url, headers, image_data)
headers = self._header_list_to_dict(response.getheaders())
LOG.debug('Image post done')
body = response.read()
return headers, body
def add_image_meta(self, image_meta):
"""Update image metadata.
image_meta: image metadata as a dictionary
Returns: a tuple of (http response headers, http response body)
"""
url = '/v1/images/%s' % image_meta['id']
headers = self._dict_to_headers(image_meta)
headers['Content-Type'] = 'application/octet-stream'
response = self._http_request('PUT', url, headers, '')
headers = self._header_list_to_dict(response.getheaders())
LOG.debug('Image post done')
body = response.read()
return headers, body
def get_image_service():
"""Get a copy of the image service.
This is done like this to make it easier to mock out ImageService.
"""
return ImageService
def replication_size(options, args):
"""%(prog)s size <server:port>
Determine the size of a glance instance if dumped to disk.
server:port: the location of the glance instance.
"""
# Make sure server info is provided
if len(args) < 1:
raise TypeError(_("Too few arguments."))
server, port = utils.parse_valid_host_port(args.pop())
total_size = 0
count = 0
imageservice = get_image_service()
client = imageservice(httplib.HTTPConnection(server, port),
options.slavetoken)
for image in client.get_images():
LOG.debug('Considering image: %(image)s' % {'image': image})
if image['status'] == 'active':
total_size += int(image['size'])
count += 1
print(_('Total size is %(size)d bytes across %(img_count)d images') %
{'size': total_size,
'img_count': count})
def replication_dump(options, args):
"""%(prog)s dump <server:port> <path>
Dump the contents of a glance instance to local disk.
server:port: the location of the glance instance.
path: a directory on disk to contain the data.
"""
# Make sure server and path are provided
if len(args) < 2:
raise TypeError(_("Too few arguments."))
path = args.pop()
server, port = utils.parse_valid_host_port(args.pop())
imageservice = get_image_service()
client = imageservice(httplib.HTTPConnection(server, port),
options.mastertoken)
for image in client.get_images():
LOG.debug('Considering: %s' % image['id'])
data_path = os.path.join(path, image['id'])
if not os.path.exists(data_path):
LOG.info(_LI('Storing: %s') % image['id'])
# Dump glance information
with open(data_path, 'w') as f:
f.write(jsonutils.dumps(image))
if image['status'] == 'active' and not options.metaonly:
# Now fetch the image. The metadata returned in headers here
# is the same as that which we got from the detailed images
# request earlier, so we can ignore it here. Note that we also
# only dump active images.
LOG.debug('Image %s is active' % image['id'])
image_response = client.get_image(image['id'])
with open(data_path + '.img', 'wb') as f:
while True:
chunk = image_response.read(options.chunksize)
if not chunk:
break
f.write(chunk)
def _dict_diff(a, b):
"""A one way dictionary diff.
a: a dictionary
b: a dictionary
Returns: True if the dictionaries are different
"""
# Only things the master has which the slave lacks matter
if set(a.keys()) - set(b.keys()):
LOG.debug('metadata diff -- master has extra keys: %(keys)s'
% {'keys': ' '.join(set(a.keys()) - set(b.keys()))})
return True
for key in a:
if str(a[key]) != str(b[key]):
LOG.debug('metadata diff -- value differs for key '
'%(key)s: master "%(master_value)s" vs '
'slave "%(slave_value)s"' %
{'key': key,
'master_value': a[key],
'slave_value': b[key]})
return True
return False
def replication_load(options, args):
"""%(prog)s load <server:port> <path>
Load the contents of a local directory into glance.
server:port: the location of the glance instance.
path: a directory on disk containing the data.
"""
# Make sure server and path are provided
if len(args) < 2:
raise TypeError(_("Too few arguments."))
path = args.pop()
server, port = utils.parse_valid_host_port(args.pop())
imageservice = get_image_service()
client = imageservice(httplib.HTTPConnection(server, port),
options.slavetoken)
updated = []
for ent in os.listdir(path):
if utils.is_uuid_like(ent):
image_uuid = ent
LOG.info(_LI('Considering: %s') % image_uuid)
meta_file_name = os.path.join(path, image_uuid)
with open(meta_file_name) as meta_file:
meta = jsonutils.loads(meta_file.read())
# Remove keys which don't make sense for replication
for key in options.dontreplicate.split(' '):
if key in meta:
LOG.debug('Stripping %(header)s from saved '
'metadata', {'header': key})
del meta[key]
if _image_present(client, image_uuid):
# NOTE(mikal): Perhaps we just need to update the metadata?
# Note that we don't attempt to change an image file once it
# has been uploaded.
LOG.debug('Image %s already present', image_uuid)
headers = client.get_image_meta(image_uuid)
for key in options.dontreplicate.split(' '):
if key in headers:
LOG.debug('Stripping %(header)s from slave '
'metadata', {'header': key})
del headers[key]
if _dict_diff(meta, headers):
LOG.info(_LI('Image %s metadata has changed') %
image_uuid)
headers, body = client.add_image_meta(meta)
_check_upload_response_headers(headers, body)
updated.append(meta['id'])
else:
if not os.path.exists(os.path.join(path, image_uuid + '.img')):
LOG.debug('%s dump is missing image data, skipping' %
image_uuid)
continue
# Upload the image itself
with open(os.path.join(path, image_uuid + '.img')) as img_file:
try:
headers, body = client.add_image(meta, img_file)
_check_upload_response_headers(headers, body)
updated.append(meta['id'])
except ImageAlreadyPresentException:
LOG.error(_LE(IMAGE_ALREADY_PRESENT_MESSAGE)
% image_uuid)
return updated
def replication_livecopy(options, args):
"""%(prog)s livecopy <fromserver:port> <toserver:port>
Load the contents of one glance instance into another.
fromserver:port: the location of the master glance instance.
toserver:port: the location of the slave glance instance.
"""
# Make sure from-server and to-server are provided
if len(args) < 2:
raise TypeError(_("Too few arguments."))
imageservice = get_image_service()
slave_server, slave_port = utils.parse_valid_host_port(args.pop())
slave_conn = httplib.HTTPConnection(slave_server, slave_port)
slave_client = imageservice(slave_conn, options.slavetoken)
master_server, master_port = utils.parse_valid_host_port(args.pop())
master_conn = httplib.HTTPConnection(master_server, master_port)
master_client = imageservice(master_conn, options.mastertoken)
updated = []
for image in master_client.get_images():
LOG.debug('Considering %(id)s' % {'id': image['id']})
for key in options.dontreplicate.split(' '):
if key in image:
LOG.debug('Stripping %(header)s from master metadata',
{'header': key})
del image[key]
if _image_present(slave_client, image['id']):
# NOTE(mikal): Perhaps we just need to update the metadata?
# Note that we don't attempt to change an image file once it
# has been uploaded.
headers = slave_client.get_image_meta(image['id'])
if headers['status'] == 'active':
for key in options.dontreplicate.split(' '):
if key in image:
LOG.debug('Stripping %(header)s from master '
'metadata', {'header': key})
del image[key]
if key in headers:
LOG.debug('Stripping %(header)s from slave '
'metadata', {'header': key})
del headers[key]
if _dict_diff(image, headers):
LOG.info(_LI('Image %s metadata has changed') %
image['id'])
headers, body = slave_client.add_image_meta(image)
_check_upload_response_headers(headers, body)
updated.append(image['id'])
elif image['status'] == 'active':
LOG.info(_LI('Image %s is being synced') % image['id'])
if not options.metaonly:
image_response = master_client.get_image(image['id'])
try:
headers, body = slave_client.add_image(image,
image_response)
_check_upload_response_headers(headers, body)
updated.append(image['id'])
except ImageAlreadyPresentException:
LOG.error(_LE(IMAGE_ALREADY_PRESENT_MESSAGE) % image['id'])
return updated
def replication_compare(options, args):
"""%(prog)s compare <fromserver:port> <toserver:port>
Compare the contents of fromserver with those of toserver.
fromserver:port: the location of the master glance instance.
toserver:port: the location of the slave glance instance.
"""
# Make sure from-server and to-server are provided
if len(args) < 2:
raise TypeError(_("Too few arguments."))
imageservice = get_image_service()
slave_server, slave_port = utils.parse_valid_host_port(args.pop())
slave_conn = httplib.HTTPConnection(slave_server, slave_port)
slave_client = imageservice(slave_conn, options.slavetoken)
master_server, master_port = utils.parse_valid_host_port(args.pop())
master_conn = httplib.HTTPConnection(master_server, master_port)
master_client = imageservice(master_conn, options.mastertoken)
differences = {}
for image in master_client.get_images():
if _image_present(slave_client, image['id']):
headers = slave_client.get_image_meta(image['id'])
for key in options.dontreplicate.split(' '):
if key in image:
LOG.debug('Stripping %(header)s from master metadata',
{'header': key})
del image[key]
if key in headers:
LOG.debug('Stripping %(header)s from slave metadata',
{'header': key})
del headers[key]
for key in image:
if image[key] != headers.get(key, None):
LOG.info(_LI('%(image_id)s: field %(key)s differs '
'(source is %(master_value)s, destination '
'is %(slave_value)s)')
% {'image_id': image['id'],
'key': key,
'master_value': image[key],
'slave_value': headers.get(key, 'undefined')})
differences[image['id']] = 'diff'
else:
LOG.debug('%(image_id)s is identical'
% {'image_id': image['id']})
elif image['status'] == 'active':
LOG.info(_LI('Image %s entirely missing from the destination')
% image['id'])
differences[image['id']] = 'missing'
return differences
def _check_upload_response_headers(headers, body):
"""Check that the headers of an upload are reasonable.
headers: the headers from the upload
body: the body from the upload
"""
if 'status' not in headers:
try:
d = jsonutils.loads(body)
if 'image' in d and 'status' in d['image']:
return
except Exception:
raise UploadException('Image upload problem: %s' % body)
def _image_present(client, image_uuid):
"""Check if an image is present in glance.
client: the ImageService
image_uuid: the image uuid to check
Returns: True if the image is present
"""
headers = client.get_image_meta(image_uuid)
return 'status' in headers
def parse_options(parser, cli_args):
"""Returns the parsed CLI options, command to run and its arguments, merged
with any same-named options found in a configuration file
parser: the option parser
cli_args: the arguments passed on the command line
Returns: a tuple of (the parsed options, the command, the command name)
"""
if not cli_args:
cli_args.append('-h') # Show options in usage output...
(options, args) = parser.parse_args(cli_args)
# HACK(sirp): Make the parser available to the print_help method
# print_help is a command, so it only accepts (options, args); we could
# one-off have it take (parser, options, args), however, for now, I think
# this little hack will suffice
options.__parser = parser
if not args:
parser.print_usage()
sys.exit(0)
command_name = args.pop(0)
command = lookup_command(parser, command_name)
return (options, command, args)
def print_help(options, args):
"""Print help specific to a command.
options: the parsed command line options
args: the command line
"""
if len(args) != 1:
print(COMMANDS)
sys.exit(1)
parser = options.__parser
command_name = args.pop()
command = lookup_command(parser, command_name)
print(command.__doc__ % {'prog': os.path.basename(sys.argv[0])})
def lookup_command(parser, command_name):
"""Lookup a command.
parser: the command parser
command_name: the command name
Returns: a method which implements that command
"""
BASE_COMMANDS = {'help': print_help}
REPLICATION_COMMANDS = {'compare': replication_compare,
'dump': replication_dump,
'livecopy': replication_livecopy,
'load': replication_load,
'size': replication_size}
commands = {}
for command_set in (BASE_COMMANDS, REPLICATION_COMMANDS):
commands.update(command_set)
try:
command = commands[command_name]
except KeyError:
parser.print_usage()
sys.exit(_("Unknown command: %s") % command_name)
return command
def main():
"""The main function."""
usage = """
%%prog <command> [options] [args]
%s
""" % COMMANDS
oparser = optparse.OptionParser(usage=usage.strip())
# Options
oparser.add_option('-c', '--chunksize', action="store", default=65536,
help="Amount of data to transfer per HTTP write.")
oparser.add_option('-d', '--debug', action="store_true", default=False,
help="Print debugging information.")
oparser.add_option('-D', '--dontreplicate', action="store",
default=('created_at date deleted_at location '
'updated_at'),
help="List of fields to not replicate.")
oparser.add_option('-m', '--metaonly', action="store_true", default=False,
help="Only replicate metadata, not images.")
oparser.add_option('-l', '--logfile', action="store", default='',
help="Path of file to log to.")
oparser.add_option('-s', '--syslog', action="store_true", default=False,
help="Log to syslog instead of a file.")
oparser.add_option('-t', '--token', action="store", default='',
help=("Pass in your authentication token if you have "
"one. If you use this option the same token is "
"used for both the master and the slave."))
oparser.add_option('-M', '--mastertoken', action="store", default='',
help=("Pass in your authentication token if you have "
"one. This is the token used for the master."))
oparser.add_option('-S', '--slavetoken', action="store", default='',
help=("Pass in your authentication token if you have "
"one. This is the token used for the slave."))
oparser.add_option('-v', '--verbose', action="store_true", default=False,
help="Print more verbose output.")
(options, command, args) = parse_options(oparser, sys.argv[1:])
# Setup logging
log.setup('glance')
if options.token:
options.slavetoken = options.token
options.mastertoken = options.token
try:
command(options, args)
except TypeError as e:
LOG.error(_LE(command.__doc__) % {'prog': command.__name__})
sys.exit("ERROR: %s" % e)
except ValueError as e:
LOG.error(_LE(command.__doc__) % {'prog': command.__name__})
sys.exit("ERROR: %s" % e)
if __name__ == '__main__':
main()
| {
"content_hash": "e193d189613f97b1a6d4be07161024ff",
"timestamp": "",
"source": "github",
"line_count": 729,
"max_line_length": 79,
"avg_line_length": 35.4636488340192,
"alnum_prop": 0.5560669941592852,
"repo_name": "redhat-openstack/glance",
"id": "0d60440b6fd90286c859a63e1e8214732f30a29e",
"size": "26569",
"binary": false,
"copies": "1",
"ref": "refs/heads/f22-patches",
"path": "glance/cmd/replicator.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "PLpgSQL",
"bytes": "12183"
},
{
"name": "Python",
"bytes": "3304893"
},
{
"name": "Shell",
"bytes": "7168"
}
],
"symlink_target": ""
} |
import sys
import os
import os.path
import hashlib
import datetime
import uuid
import random
import string
import time
import threading
import logging
from future.utils import iteritems
from pandaharvester.harvesterconfig import harvester_config
from pandaharvester.harvestercore.job_spec import JobSpec
from pandaharvester.harvestercore.file_spec import FileSpec
from pandaharvester.harvestercore.queue_config_mapper import QueueConfigMapper
from pandaharvester.harvestercore.plugin_factory import PluginFactory
from pandaharvester.harvesterbody.cacher import Cacher
from pandaharvester.harvestercore.db_proxy_pool import DBProxyPool as DBProxy
from pandaharvester.harvestercore.communicator_pool import CommunicatorPool
from pandaharvester.harvestercore import core_utils
from pandaharvester.harvestermisc import globus_utils
from globus_sdk import TransferClient
from globus_sdk import TransferData
from globus_sdk import NativeAppAuthClient
from globus_sdk import RefreshTokenAuthorizer
def dump(obj):
for attr in dir(obj):
if hasattr( obj, attr ):
print( "obj.%s = %s" % (attr, getattr(obj, attr)))
print len(sys.argv)
queueName = 'ALCF_Theta'
job_id = 1111
globus_sleep_time = 15
if len(sys.argv) > 1:
queueName = sys.argv[1]
if len(sys.argv) > 2:
job_id = int(sys.argv[2])
if len(sys.argv) > 3:
globus_sleep_time = int(sys.argv[3])
queueConfigMapper = QueueConfigMapper()
queueConfig = queueConfigMapper.get_queue(queueName)
initial_queueConfig_preparator = queueConfig.preparator
queueConfig.preparator['module'] = 'pandaharvester.harvesterpreparator.go_preparator'
queueConfig.preparator['name'] = 'GoPreparator'
modified_queueConfig_preparator = queueConfig.preparator
pluginFactory = PluginFactory()
# get stage-out plugin
preparatorCore = pluginFactory.get_plugin(queueConfig.preparator)
# logger
_logger = core_utils.setup_logger('stageInTest_go_preparator')
tmpLog = core_utils.make_logger(_logger, method_name='stageInTest_go_preparator')
tmpLog.debug('start')
for loggerName, loggerObj in logging.Logger.manager.loggerDict.iteritems():
#print "loggerName - {}".format(loggerName)
if loggerName.startswith('panda.log'):
if len(loggerObj.handlers) == 0:
continue
if loggerName.split('.')[-1] in ['db_proxy']:
continue
stdoutHandler = logging.StreamHandler(sys.stdout)
stdoutHandler.setFormatter(loggerObj.handlers[0].formatter)
loggerObj.addHandler(stdoutHandler)
msgStr = "plugin={0}".format(preparatorCore.__class__.__name__)
tmpLog.debug(msgStr)
msgStr = "Initial queueConfig.preparator = {}".format(initial_queueConfig_preparator)
tmpLog.debug(msgStr)
msgStr = "Modified queueConfig.preparator = {}".format(modified_queueConfig_preparator)
tmpLog.debug(msgStr)
scope = 'panda'
proxy = DBProxy()
communicator = CommunicatorPool()
cacher = Cacher(communicator, single_mode=True)
cacher.run()
Globus_srcPath = queueConfig.preparator['Globus_srcPath']
srcEndpoint = queueConfig.preparator['srcEndpoint']
basePath = queueConfig.preparator['basePath']
Globus_dstPath = queueConfig.preparator['Globus_dstPath']
dstEndpoint = queueConfig.preparator['dstEndpoint']
# need to get client_id and refresh_token from PanDA server via harvester cache mechanism
c_data = preparatorCore.dbInterface.get_cache('globus_secret')
client_id = None
refresh_token = None
if (not c_data == None) and c_data.data['StatusCode'] == 0 :
client_id = c_data.data['publicKey'] # client_id
refresh_token = c_data.data['privateKey'] # refresh_token
else :
client_id = None
refresh_token = None
tc = None
errStr = 'failed to get Globus Client ID and Refresh Token'
tmpLog.error(errStr)
sys.exit(1)
# create Globus transfer client to send initial files to remote Globus source
tmpStat, tc = globus_utils.create_globus_transfer_client(tmpLog,client_id,refresh_token)
if not tmpStat:
tc = None
errStr = 'failed to create Globus Transfer Client'
tmpLog.error(errStr)
sys.exit(1)
try:
# We are sending test files from our destination machine to the source machine
# Test endpoints for activation -
tmpStatsrc, srcStr = globus_utils.check_endpoint_activation(tmpLog,tc,dstEndpoint)
tmpStatdst, dstStr = globus_utils.check_endpoint_activation(tmpLog,tc,srcEndpoint)
if tmpStatsrc and tmpStatdst:
errStr = 'source Endpoint and destination Endpoint activated'
tmpLog.debug(errStr)
else:
errStr = ''
if not tmpStatsrc :
errStr += ' source Endpoint not activated '
if not tmpStatdst :
errStr += ' destination Endpoint not activated '
tmpLog.error(errStr)
sys.exit(2)
# We are sending test files from our destination machine to the source machine
# both endpoints activated now prepare to transfer data
tdata = TransferData(tc,dstEndpoint,srcEndpoint,sync_level="checksum")
except:
errStat, errMsg = globus_utils.handle_globus_exception(tmpLog)
sys.exit(1)
# create JobSpec
jobSpec = JobSpec()
jobSpec.jobParams = {
'scopeLog': 'panda',
'logFile': 'log',
}
jobSpec.computingSite = queueName
jobSpec.PandaID = job_id
jobSpec.modificationTime = datetime.datetime.now()
realDataset = 'panda.sgotest.' + uuid.uuid4().hex
ddmEndPointIn = 'BNL-OSG2_DATADISK'
inFiles_scope_str = ''
inFiles_str = ''
realDatasets_str = ''
realDatasetsIn_str = ''
ddmEndPointIn_str = ''
GUID_str = ''
fsize_str = ''
checksum_str = ''
scope_in_str = ''
# create up 5 files for input
for index in range(random.randint(1, 5)):
fileSpec = FileSpec()
assFileSpec = FileSpec()
fileSpec.fileType = 'input'
assFileSpec.lfn = 'panda.sgotest.' + uuid.uuid4().hex
fileSpec.lfn = assFileSpec.lfn
fileSpec.scope = 'panda'
inFiles_scope_str += 'panda,'
inFiles_str += fileSpec.lfn + ','
realDatasets_str += realDataset + ","
realDatasetsIn_str += realDataset + ","
ddmEndPointIn_str += ddmEndPointIn + ","
# some dummy inputs
GUID_str += 'd82e8e5e301b77489fd4da04bcdd6565,'
fsize_str += '3084569129,'
checksum_str += 'ad:9f60d29f,'
scope_in_str += 'panda,'
#
assFileSpec.fileType = 'input'
assFileSpec.fsize = random.randint(10, 100)
# create source file
hash = hashlib.md5()
hash.update('%s:%s' % (fileSpec.scope, fileSpec.lfn))
hash_hex = hash.hexdigest()
correctedscope = "/".join(scope.split('.'))
fileSpec.path = "{endPoint}/{scope}/{hash1}/{hash2}/{lfn}".format(endPoint=queueConfig.preparator['Globus_dstPath'],
scope=correctedscope,
hash1=hash_hex[0:2],
hash2=hash_hex[2:4],
lfn=fileSpec.lfn)
assFileSpec.path = fileSpec.path
fileSpec.add_associated_file(assFileSpec)
# now create the temporary file
tmpfile_path = "{mountPoint}/testdata/{lfn}".format(mountPoint=queueConfig.preparator['basePath'],
lfn=assFileSpec.lfn)
if not os.path.exists(os.path.dirname(tmpfile_path)):
tmpLog.debug("os.makedirs({})".format(os.path.dirname(tmpfile_path)))
os.makedirs(os.path.dirname(tmpfile_path))
oFile = open(tmpfile_path, 'w')
oFile.write(''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(assFileSpec.fsize)))
oFile.close()
# location of destination file
destfile_path = "{endPoint}/{scope}/{hash1}/{hash2}/{lfn}".format(endPoint=queueConfig.preparator['Globus_srcPath'],
scope=correctedscope,
hash1=hash_hex[0:2],
hash2=hash_hex[2:4],
lfn=fileSpec.lfn)
# add to Globus transfer list
tdata.add_item(tmpfile_path,destfile_path)
#print "dump(fileSpec)"
#dump(fileSpec)
# add input file to jobSpec
jobSpec.add_in_file(fileSpec)
#
tmpLog.debug("source file to transfer - {}".format(tmpfile_path))
tmpLog.debug("destination file to transfer - {}".format(destfile_path))
#print "dump(jobSpec)"
#dump(jobSpec)
# remove final ","
realDatasetsIn_str=realDatasetsIn_str[:-1]
inFiles_str = inFiles_str[:-1]
inFiles_scope_str = inFiles_scope_str[:-1]
GUID_str = GUID_str[:-1]
fsize_str = fsize_str[:-1]
checksum_str = checksum_str[:-1]
scope_in_str = scope_in_str[:-1]
jobSpec.jobParams['realDatasets'] = realDatasets_str
jobSpec.jobParams['ddmEndPointIn'] = ddmEndPointIn_str
jobSpec.jobParams['inFiles'] = inFiles_str
jobSpec.jobParams['GUID'] = GUID_str
jobSpec.jobParams['fsize'] = fsize_str
jobSpec.jobParams['checksum'] = checksum_str
jobSpec.jobParams['scopeIn'] = scope_in_str
jobSpec.jobParams['realDatasetsIn'] = realDatasetsIn_str
msgStr = "jobSpec.jobParams ={}".format(jobSpec.jobParams)
tmpLog.debug(msgStr)
# transfer dummy files to Remote site for input
transfer_result = tc.submit_transfer(tdata)
# check status code and message
tmpLog.debug(str(transfer_result))
if transfer_result['code'] == "Accepted":
# succeeded
# set transfer ID which are used for later lookup
transferID = transfer_result['task_id']
tmpLog.debug('done')
else:
tmpLog.error('Failed to send intial files')
sys.exit(3)
print "sleep {0} seconds".format(globus_sleep_time)
time.sleep(globus_sleep_time)
# enter polling loop to see if the intial files have transfered
maxloop = 5
iloop = 0
NotFound = True
while (iloop < maxloop) and NotFound :
# get transfer task
tmpStat, transferTasks = globus_utils.get_transfer_task_by_id(tmpLog,tc,transferID)
# return a temporary error when failed to get task
if not tmpStat:
errStr = 'failed to get transfer task'
tmpLog.error(errStr)
else:
# return a temporary error when task is missing
tmpLog.debug('transferTasks : {} '.format(transferTasks))
if transferID not in transferTasks:
errStr = 'transfer task ID - {} is missing'.format(transferID)
tmpLog.error(errStr)
else:
# succeeded in finding a transfer task by tranferID
if transferTasks[transferID]['status'] == 'SUCCEEDED':
tmpLog.debug('transfer task {} succeeded'.format(transferID))
NotFound = False
# failed
if transferTasks[transferID]['status'] == 'FAILED':
errStr = 'transfer task {} failed'.format(transferID)
tmpLog.error(errStr)
# another status
tmpStr = 'transfer task {0} status: {1}'.format(transferID,transferTasks[transferID]['status'])
tmpLog.debug(tmpStr)
if NotFound :
print "sleep {0} seconds".format(globus_sleep_time)
time.sleep(globus_sleep_time)
++iloop
if NotFound :
errStr = 'transfer task ID - {} is missing'.format(transferID)
tmpLog.error(errStr)
sys.exit(1)
#dump(queueConfig)
print "plugin={0}".format(preparatorCore.__class__.__name__)
print "testing stagein:"
print "BasePath from preparator configuration: %s " % preparatorCore.basePath
tmpStat, tmpOut = preparatorCore.trigger_preparation(jobSpec)
if tmpStat:
print " OK"
else:
print " NG {0}".format(tmpOut)
print "sleep {0} seconds".format(globus_sleep_time)
time.sleep(globus_sleep_time)
print "testing status check"
while True:
tmpStat, tmpOut = preparatorCore.check_stage_in_status(jobSpec)
if tmpStat == True:
print " OK"
break
elif tmpStat == False:
print " NG {0}".format(tmpOut)
sys.exit(1)
else:
print " still running. sleep 1 min"
time.sleep(60)
print
print "checking path resolution"
tmpStat, tmpOut = preparatorCore.resolve_input_paths(jobSpec)
if tmpStat:
print " OK {0}".format(jobSpec.jobParams['inFilePaths'])
else:
print " NG {0}".format(tmpOut)
| {
"content_hash": "8e0b073b3380888ec68b0a17e5e7571b",
"timestamp": "",
"source": "github",
"line_count": 331,
"max_line_length": 119,
"avg_line_length": 36.274924471299094,
"alnum_prop": 0.6817689681019405,
"repo_name": "PanDAWMS/panda-harvester",
"id": "168ee496d77e9ba760f47c281d530180dff9e3c2",
"size": "12007",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pandaharvester/harvestertest/stageInTest_globus.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1650803"
},
{
"name": "Shell",
"bytes": "21117"
}
],
"symlink_target": ""
} |
from django.db import models
class Category(models.Model):
title = models.CharField(max_length=100)
slug = models.SlugField(max_length=100, unique=True)
description = models.TextField()
created = models.DateTimeField(auto_now_add=True)
class Meta:
ordering = ( '-created' , 'title' )
def __str__(self):
return self.title
def __unicode__(self):
return self.title
def published_entries(self):
import datetime
now = datetime.datetime.now()
return self.entries.filter( publish_date__lte=now ).order_by( '-publish_date' )
class Entry(models.Model):
category = models.ForeignKey(Category, related_name='entries')
title = models.CharField(max_length=100)
slug = models.SlugField(max_length=100, unique=True)
description = models.TextField()
image = models.ImageField(upload_to='blog')
content = models.TextField()
publish_date = models.DateTimeField(auto_now_add=True)
update_date = models.DateTimeField(auto_now=True)
class Meta:
ordering = ( '-publish_date' , 'title' )
def __str__(self):
return self.title
def __unicode__(self):
return self.title
def get_url(self):
from django.core.urlresolvers import reverse
return reverse('sitelight.views.entry', args=[str(self.category.slug), str(self.slug)])
| {
"content_hash": "bccc7dd9a57d3e08f37dcfc3319e7a48",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 95,
"avg_line_length": 31.644444444444446,
"alnum_prop": 0.6327247191011236,
"repo_name": "ByteRockCode/django-sitelight",
"id": "2019a0cc0755b8e66f5fd5d80b647d78b810814b",
"size": "1424",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sitelight/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "4640"
}
],
"symlink_target": ""
} |
from jira.client import JIRA
# By default, the client will connect to a JIRA instance started from the Atlassian Plugin SDK
# (see https://developer.atlassian.com/display/DOCS/Installing+the+Atlassian+Plugin+SDK for details).
# Override this with the options parameter.
options = {
'server': 'https://jira.atlassian.com'
}
jira = JIRA(options)
# Get all projects viewable by anonymous users.
projects = jira.projects()
# Sort available project keys, then return the second, third, and fourth keys.
keys = sorted([project.key for project in projects])[2:5]
# Get an issue.
issue = jira.issue('JRA-1330')
# Find all comments made by Atlassians on this issue.
import re
atl_comments = [comment for comment in issue.fields.comment.comments
if re.search(r'@atlassian.com$', comment.author.emailAddress)]
# Add a comment to the issue.
jira.add_comment(issue, 'Comment text')
# Change the issue's summary and description.
issue.update(summary="I'm different!", description='Changed the summary to be different.')
# You can update the entire labels field like this
issue.update(labels=['AAA', 'BBB'])
# Or modify the List of existing labels. The new label is unicode with no spaces
issue.fields.labels.append(u'new_text')
issue.update(fields={"labels": issue.fields.labels})
# Send the issue away for good.
issue.delete()
| {
"content_hash": "2cf6b3d7ae5fb54b02b7b33779b1d6d4",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 101,
"avg_line_length": 34.46153846153846,
"alnum_prop": 0.7433035714285714,
"repo_name": "jameskeane/jira-python",
"id": "89a2133e329f0aff78e965a00f127cd10c0e2228",
"size": "1435",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/basic_use.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "207056"
},
{
"name": "Shell",
"bytes": "1431"
}
],
"symlink_target": ""
} |
"""
This module defines the events signaled by abinit during the execution. It also
provides a parser to extract these events form the main output file and the log file.
"""
import sys
import os.path
import datetime
import collections
import ruamel.yaml as yaml
import abc
import logging
import numpy as np
from monty.string import indent, is_string, list_strings
from monty.fnmatch import WildCard
from monty.termcolor import colored
from monty.inspect import all_subclasses
from monty.json import MontyDecoder
from pymatgen.core.structure import Structure
from monty.json import MSONable
from pymatgen.util.serialization import pmg_serialize
from .abiinspect import YamlTokenizer
logger = logging.getLogger(__name__)
__all__ = [
"EventsParser",
"get_event_handler_classes",
"ScfConvergenceWarning",
"NscfConvergenceWarning",
"RelaxConvergenceWarning",
"Correction",
"DilatmxError",
"DilatmxErrorHandler",
]
def straceback():
"""Returns a string with the traceback."""
import traceback
return traceback.format_exc()
class AbinitEvent(yaml.YAMLObject):
"""
Example (YAML syntax)::
Normal warning without any handler:
--- !Warning
message: |
This is a normal warning that won't
trigger any handler in the python code!
src_file: routine_name
src_line: 112
...
Critical warning that will trigger some action in the python code.
--- !ScfConvergeWarning
message: |
The human-readable message goes here!
src_file: foo.F90
src_line: 112
tolname: tolwfr
actual_tol: 1.0e-8
required_tol: 1.0e-10
nstep: 50
...
The algorithm to extract the YAML sections is very simple.
1) We use YamlTokenizer to extract the documents from the output file
2) If we have a tag that ends with "Warning", "Error", "Bug", "Comment
we know we have encountered a new ABINIT event
3) We parse the document with yaml.safe_load(doc.text) and we get the object
Note that:
# --- and ... become reserved words (whey they are placed at
the begining of a line) since they are used to mark the beginning and
the end of YAML documents.
# All the possible events should subclass `AbinitEvent` and define
the class attribute yaml_tag so that yaml.safe_load will know how to
build the instance.
"""
color = None
def __init__(self, src_file, src_line, message):
"""
Basic constructor for :class:`AbinitEvent`.
Args:
message: String with human-readable message providing info on the event.
src_file: String with the name of the Fortran file where the event is raised.
src_line Integer giving the line number in src_file.
"""
#print("src_file", src_file, "src_line", src_line)
self.message = message
self.src_file = src_file
self.src_line = src_line
@pmg_serialize
def as_dict(self):
# This is needed because the events printed in the main output file do not define scr_file and src_line
src_file = getattr(self, "src_file", "Unknown")
src_line = getattr(self, "src_line", 0)
return dict(message=self.message, src_file=src_file, src_line=src_line, yaml_tag=self.yaml_tag)
@classmethod
def from_dict(cls, d):
cls = as_event_class(d.get("yaml_tag"))
return cls(**{k: v for k, v in d.items() if k != "yaml_tag" and not k.startswith("@")})
@property
def header(self):
try:
return "<%s at %s:%s>" % (self.name, self.src_file, self.src_line)
except AttributeError:
# This is needed because the events printed in the main output file do not define scr_file and src_line
return "<%s at %s:%s>" % (self.name, "Unknown", 0)
def __repr__(self):
return self.header
def __str__(self):
return "\n".join((self.header, self.message))
def __eq__(self, other):
if other is None: return False
return self.message == other.message
def __ne__(self, other):
return not self.__eq__(other)
@property
def name(self):
"""Name of the event (class name)"""
return self.__class__.__name__
@property
def baseclass(self):
"""The baseclass of self."""
for cls in _BASE_CLASSES:
if isinstance(self, cls):
return cls
raise ValueError("Cannot determine the base class of %s" % self.__class__.__name__)
def correct(self, task):
"""
This method is called when an error is detected in a :class:`Task`
It should perform any corrective measures relating to the detected error.
The idea is similar to the one used in custodian but the handler receives
a :class:`Task` object so that we have access to its methods.
Returns:
(dict) JSON serializable dict that describes the errors and actions taken. E.g.
{"errors": list_of_errors, "actions": list_of_actions_taken}.
If this is an unfixable error, actions should be set to None.
"""
return 0
class AbinitComment(AbinitEvent):
"""Base class for Comment events"""
yaml_tag = '!COMMENT'
color = "blue"
class AbinitError(AbinitEvent):
"""Base class for Error events"""
yaml_tag = '!ERROR'
color = "red"
class AbinitYamlError(AbinitError):
"""
Raised if the YAML parser cannot parse the document and the doc tag is an Error.
It's an AbinitError because the msg produced by the code is not valid YAML!
"""
class AbinitBug(AbinitEvent):
"""Base class for Bug events"""
yaml_tag = '!BUG'
color = "red"
class AbinitWarning(AbinitEvent):
"""
Base class for Warning events (the most important class).
Developers should subclass this class to define the different exceptions
raised by the code and the possible actions that can be performed.
"""
yaml_tag = '!WARNING'
color = "magenta"
class AbinitCriticalWarning(AbinitWarning):
color = "red"
class AbinitYamlWarning(AbinitCriticalWarning):
"""
Raised if the YAML parser cannot parse the document and the doc tas is a Warning.
"""
###############################
# Warnings triggering restart #
###############################
class ScfConvergenceWarning(AbinitCriticalWarning):
"""Warning raised when the GS SCF cycle did not converge."""
yaml_tag = '!ScfConvergenceWarning'
class NscfConvergenceWarning(AbinitCriticalWarning):
"""Warning raised when the GS NSCF cycle did not converge."""
yaml_tag = '!NscfConvergenceWarning'
class RelaxConvergenceWarning(AbinitCriticalWarning):
"""Warning raised when the structural relaxation did not converge."""
yaml_tag = '!RelaxConvergenceWarning'
# TODO: for the time being we don't discern between GS and PhononCalculations.
#class PhononConvergenceWarning(AbinitCriticalWarning):
# """Warning raised when the phonon calculation did not converge."""
# yaml_tag = u'!PhononConvergenceWarning'
class QPSConvergenceWarning(AbinitCriticalWarning):
"""Warning raised when the QPS iteration (GW) did not converge."""
yaml_tag = '!QPSConvergenceWarning'
class HaydockConvergenceWarning(AbinitCriticalWarning):
"""Warning raised when the Haydock method (BSE) did not converge."""
yaml_tag = '!HaydockConvergenceWarning'
# Error classes providing a correct method.
# Register the concrete base classes.
_BASE_CLASSES = [
AbinitComment,
AbinitError,
AbinitBug,
AbinitWarning,
]
class EventReport(collections.abc.Iterable, MSONable):
"""
Iterable storing the events raised by an ABINIT calculation.
Attributes::
stat: information about a file as returned by os.stat
"""
def __init__(self, filename, events=None):
"""
List of ABINIT events.
Args:
filename: Name of the file
events: List of Event objects
"""
self.filename = os.path.abspath(filename)
self.stat = os.stat(self.filename)
self.start_datetime, self.end_datetime = None, None
self._events = []
self._events_by_baseclass = collections.defaultdict(list)
if events is not None:
for ev in events:
self.append(ev)
def __len__(self):
return len(self._events)
def __iter__(self):
return self._events.__iter__()
def __getitem__(self, slice):
return self._events[slice]
def __str__(self):
#has_colours = stream_has_colours(stream)
has_colours = True
lines = []
app = lines.append
app("Events found in %s\n" % self.filename)
for i, event in enumerate(self):
if has_colours:
app("[%d] %s" % (i+1, colored(event.header, color=event.color)))
app(indent(event.message, 4))
else:
app("[%d] %s" % (i+1, str(event)))
app("num_errors: %s, num_warnings: %s, num_comments: %s, completed: %s\n" % (
self.num_errors, self.num_warnings, self.num_comments, self.run_completed))
return "\n".join(lines)
def append(self, event):
"""Add an event to the list."""
self._events.append(event)
self._events_by_baseclass[event.baseclass].append(event)
def set_run_completed(self, boolean, start_datetime, end_datetime):
"""Set the value of _run_completed."""
self._run_completed = boolean
if (start_datetime, end_datetime) != (None, None):
# start_datetime: Sat Feb 28 23:54:27 2015
# end_datetime: Sat Feb 28 23:54:30 2015
try:
fmt = "%a %b %d %H:%M:%S %Y"
self.start_datetime = datetime.datetime.strptime(start_datetime, fmt)
self.end_datetime = datetime.datetime.strptime(end_datetime, fmt)
except Exception as exc:
# Maybe LOCALE != en_US
logger.warning(str(exc))
@property
def run_etime(self):
"""Wall-time of the run as `timedelta` object."""
if self.start_datetime is None or self.end_datetime is None:
return None
return self.end_datetime - self.start_datetime
@property
def run_completed(self):
"""True if the calculation terminated."""
try:
return self._run_completed
except AttributeError:
return False
@property
def comments(self):
"""List of comments found."""
return self.select(AbinitComment)
@property
def errors(self):
"""List of errors + bugs found."""
return self.select(AbinitError) + self.select(AbinitBug)
@property
def warnings(self):
"""List of warnings found."""
return self.select(AbinitWarning)
@property
def num_warnings(self):
"""Number of warnings reported."""
return len(self.warnings)
@property
def num_errors(self):
"""Number of errors reported."""
return len(self.errors)
@property
def num_comments(self):
"""Number of comments reported."""
return len(self.comments)
def select(self, base_class):
"""
Return the list of events that inherits from class base_class
"""
return self._events_by_baseclass[base_class]
def filter_types(self, event_types):
events = []
for ev in self:
if type(ev) in event_types: events.append(ev)
return self.__class__(filename=self.filename, events=events)
def get_events_of_type(self, event_class):
"""Return a list of events of the given class."""
return [ev for ev in self if type(ev) == event_class]
@pmg_serialize
def as_dict(self):
return dict(filename=self.filename, events=[e.as_dict() for e in self._events])
@classmethod
def from_dict(cls, d):
return cls(filename=d["filename"], events=[AbinitEvent.from_dict(e) for e in d["events"]])
class EventsParserError(Exception):
"""Base class for the exceptions raised by :class:`EventsParser`."""
class EventsParser:
"""
Parses the output or the log file produced by ABINIT and extract the list of events.
"""
Error = EventsParserError
def parse(self, filename, verbose=0):
"""
Parse the given file. Return :class:`EventReport`.
"""
run_completed, start_datetime, end_datetime = False, None, None
filename = os.path.abspath(filename)
report = EventReport(filename)
w = WildCard("*Error|*Warning|*Comment|*Bug|*ERROR|*WARNING|*COMMENT|*BUG")
import warnings
warnings.simplefilter('ignore', yaml.error.UnsafeLoaderWarning)
with YamlTokenizer(filename) as tokens:
for doc in tokens:
if w.match(doc.tag):
#print("got doc.tag", doc.tag,"--")
try:
#print(doc.text)
event = yaml.load(doc.text) # Can't use ruamel safe_load!
#yaml.load(doc.text, Loader=ruamel.yaml.Loader)
#print(event.yaml_tag, type(event))
except:
#raise
# Wrong YAML doc. Check tha doc tag and instantiate the proper event.
message = "Malformatted YAML document at line: %d\n" % doc.lineno
message += doc.text
# This call is very expensive when we have many exceptions due to malformatted YAML docs.
if verbose:
message += "Traceback:\n %s" % straceback()
if "error" in doc.tag.lower():
print("It seems an error. doc.tag:", doc.tag)
event = AbinitYamlError(message=message, src_file=__file__, src_line=0)
else:
event = AbinitYamlWarning(message=message, src_file=__file__, src_line=0)
event.lineno = doc.lineno
report.append(event)
# Check whether the calculation completed.
if doc.tag == "!FinalSummary":
#print(doc)
run_completed = True
d = doc.as_dict()
#print(d)
start_datetime, end_datetime = d["start_datetime"], d["end_datetime"]
report.set_run_completed(run_completed, start_datetime, end_datetime)
return report
def report_exception(self, filename, exc):
"""
This method is used when self.parser raises an Exception so that
we can report a customized :class:`EventReport` object with info the exception.
"""
# Build fake event.
event = AbinitError(src_file="Unknown", src_line=0, message=str(exc))
return EventReport(filename, events=[event])
class EventHandler(MSONable, metaclass=abc.ABCMeta):
"""
Abstract base class defining the interface for an EventHandler.
The__init__ should always provide default values for its arguments so that we can
easily instantiate the handlers with:
handlers = [cls() for cls in get_event_handler_classes()]
The defaul values should be chosen so to cover the most typical cases.
Each EventHandler should define the class attribute `can_change_physics`
that is true if the handler changes `important` parameters of the
run that are tightly connected to the physics of the system.
For example, an `EventHandler` that changes the value of `dilatmx` and
prepare the restart is not changing the physics. Similarly a handler
that changes the mixing algorithm. On the contrary, a handler that
changes the value of the smearing is modifying an important physical
parameter, and the user should be made aware of this so that
there's an explicit agreement between the user and the code.
The default handlers are those that do not change the physics,
other handlers can be installed by the user when constructing with the flow with
TODO
.. warning::
The EventHandler should perform any action at the level of the input files
needed to solve the problem and then prepare the task for a new submission
The handler should never try to resubmit the task. The submission must be
delegated to the scheduler or Fireworks.
"""
event_class = AbinitEvent
"""AbinitEvent subclass associated to this handler."""
#can_change_physics
FIXED = 1
NOT_FIXED = 0
def __init__(self):
"""Simple init for compatibility with introspection in as_dict/from_dict"""
return super(EventHandler,self).__init__()
@classmethod
def cls2str(cls):
lines = []
app = lines.append
ecls = cls.event_class
app("event name = %s" % ecls.yaml_tag)
app("event documentation: ")
lines.extend(ecls.__doc__.split("\n"))
app("handler documentation: ")
lines.extend(cls.__doc__.split("\n"))
return "\n".join(lines)
def __str__(self):
return "<%s>" % self.__class__.__name__
def can_handle(self, event):
"""True if this handler is associated to the given :class:`AbinitEvent`"""
return self.event_class == event.__class__
# TODO: defined CorrectionRecord object and provide helper functions to build it
def count(self, task):
"""
Return the number of times the event associated to this handler
has been already fixed in the :class:`Task`.
"""
return len([c for c in task.corrections if c["event"]["@class"] == self.event_class])
@abc.abstractmethod
def handle_task_event(self, task, event):
"""
Method to handle Abinit events.
Args:
task: :class:`Task` object.
event: :class:`AbinitEvent` found in the log file.
Return:
0 if no action has been applied, 1 if the problem has been fixed.
"""
@pmg_serialize
def as_dict(self):
"""
Basic implementation of as_dict if __init__ has no arguments. Subclasses may need to overwrite.
"""
d = {}
return d
@classmethod
def from_dict(cls, d):
"""
Basic implementation of from_dict if __init__ has no arguments. Subclasses may need to overwrite.
"""
return cls()
@classmethod
def compare_inputs(cls, new_input, old_input):
def vars_dict(d):
"""
make a simple dictionary and convert numpy arrays to lists
"""
new_d = {}
for key, value in d.items():
if isinstance(value, np.ndarray): value = value.tolist()
new_d[key] = value
return new_d
new_vars = vars_dict(new_input)
old_vars = vars_dict(old_input)
new_keys = set(new_vars.keys())
old_keys = set(old_vars.keys())
intersect = new_keys.intersection(old_keys)
added_keys = new_keys - intersect
removed_keys = old_keys - intersect
changed_keys = set(v for v in intersect if new_vars[v] != old_vars[v])
log_diff = {}
if added_keys:
log_diff['_set'] = {k: new_vars[k] for k in added_keys}
if changed_keys:
log_diff['_update'] = ({k: {'new': new_vars[k], 'old': old_vars[k]} for k in changed_keys})
if new_input.structure != old_input.structure:
log_diff['_change_structure'] = new_input.structure.as_dict()
if removed_keys:
log_diff['_pop'] = {k: old_vars[k] for k in removed_keys}
return log_diff
class Correction(MSONable):
def __init__(self, handler, actions, event, reset=False):
self.handler = handler
self.actions = actions
self.event = event
self.reset = reset
@pmg_serialize
def as_dict(self):
return dict(handler=self.handler.as_dict(), actions=self.actions, event=self.event.as_dict(), reset=self.reset)
@classmethod
def from_dict(cls, d):
dec = MontyDecoder()
return cls(handler=dec.process_decoded(d['handler']), actions=d['actions'],
event=dec.process_decoded(d['event']), reset=d['reset'])
#class WarningHandler(EventHandler):
# """Base class for handlers associated to ABINIT warnings."""
# event_class = AbinitWarning
#
#class BugHandler(EventHandler):
# """Base class for handlers associated to ABINIT bugs."""
# event_class = AbinitBug
class ErrorHandler(EventHandler):
"""Base class for handlers associated to ABINIT errors."""
event_class = AbinitError
_ABC_EVHANDLER_CLASSES = set([ErrorHandler,])
# Public API
def autodoc_event_handlers(stream=sys.stdout):
"""
Print to the given string, the documentation for the events
and the associated handlers.
"""
lines = []
for cls in all_subclasses(EventHandler):
if cls in _ABC_EVHANDLER_CLASSES: continue
event_class = cls.event_class
lines.extend(cls.cls2str().split("\n"))
# Here we enforce the abstract protocol of the class
# The unit test in tests_events will detect the problem.
if not hasattr(cls, "can_change_physics"):
raise RuntimeError("%s: can_change_physics must be defined" % cls)
stream.write("\n".join(lines) + "\n")
def get_event_handler_classes(categories=None):
"""Return the list of handler classes."""
classes = [c for c in all_subclasses(EventHandler) if c not in _ABC_EVHANDLER_CLASSES]
return classes
def as_event_class(obj):
"""
Convert obj into a subclass of AbinitEvent.
obj can be either a class or a string with the class name or the YAML tag
"""
if is_string(obj):
for c in all_subclasses(AbinitEvent):
if c.__name__ == obj or c.yaml_tag == obj: return c
raise ValueError("Cannot find event class associated to %s" % obj)
# Assume class.
assert obj in all_subclasses(AbinitEvent)
return obj
############################################
########## Concrete classes ################
############################################
class DilatmxError(AbinitError):
"""
This Error occurs in variable cell calculations when the increase in the
unit cell volume is too large.
"""
yaml_tag = '!DilatmxError'
class DilatmxErrorHandler(ErrorHandler):
"""
Handle DilatmxError. Abinit produces a netcdf file with the last structure before aborting
The handler changes the structure in the input with the last configuration and modify the value of dilatmx.
"""
event_class = DilatmxError
can_change_physics = False
def __init__(self, max_dilatmx=1.3):
self.max_dilatmx = max_dilatmx
@pmg_serialize
def as_dict(self):
return {'max_dilatmx': self.max_dilatmx}
@classmethod
def from_dict(cls, d):
return cls(max_dilatmx=d['max_dilatmx'])
def handle_task_event(self, task, event):
# Read the last structure dumped by ABINIT before aborting.
filepath = task.outdir.has_abiext("DILATMX_STRUCT.nc")
last_structure = Structure.from_file(filepath)
task._change_structure(last_structure)
#read the suggested dilatmx
# new_dilatmx = 1.05
# if new_dilatmx > self.max_dilatmx:
# msg = "Suggested dilatmx ({}) exceeds maximux configured value ({}).".format(new_dilatmx, self.max_dilatmx)
# return self.NOT_FIXED
# task.strategy.abinit_input.set_vars(dilatmx=new_dilatmx)
msg = "Take last structure from DILATMX_STRUCT.nc, will try to restart with dilatmx %s" % task.get_inpvar("dilatmx")
task.log_correction(event, msg)
# Note that we change the structure but we don't try restart from the previous WFK|DEN file
# because Abinit called mpi_abort and therefore no final WFK|DEN file has been produced.
return self.FIXED
def handle_input_event(self, abiinput, outdir, event):
try:
old_abiinput = abiinput.deepcopy()
# Read the last structure dumped by ABINIT before aborting.
filepath = outdir.has_abiext("DILATMX_STRUCT.nc")
last_structure = Structure.from_file(filepath)
abiinput.set_structure(last_structure)
#FIXME restart from DEN files not always working with interpolation
return Correction(self, self.compare_inputs(abiinput, old_abiinput), event, reset=True)
# return Correction(self, self.compare_inputs(abiinput, old_abiinput), event, event=False)
except Exception as exc:
logger.warning('Error while trying to apply the handler {}.'.format(str(self)), exc)
return None
class TolSymError(AbinitError):
"""
Class of errors raised by Abinit when it cannot detect the symmetries of the system.
The handler assumes the structure makes sense and the error is just due to numerical inaccuracies.
We increase the value of tolsym in the input file (default 1-8) so that Abinit can find the space group
and re-symmetrize the input structure.
"""
yaml_tag = '!TolSymError'
class TolSymErrorHandler(ErrorHandler):
"""
Increase the value of tolsym in the input file.
"""
event_class = TolSymError
can_change_physics = False
def __init__(self, max_nfixes=3):
self.max_nfixes = max_nfixes
@pmg_serialize
def as_dict(self):
return {'max_nfixes': self.max_nfixes}
@classmethod
def from_dict(cls, d):
return cls(max_nfixes=d['max_nfixes'])
def handle_task_event(self, task, event):
# TODO: Add limit on the number of fixes one can do for the same error
# For example in this case, the scheduler will stop after 20 submissions
if self.count(task) > self.max_nfixes:
return self.NOT_FIXED
old_tolsym = task.get_inpvar("tolsym")
new_tolsym = 1e-6 if old_tolsym is None else old_tolsym * 10
task.set_vars(tolsym=new_tolsym)
task.log_correction(event, "Increasing tolsym from %s to %s" % (old_tolsym, new_tolsym))
return self.FIXED
def handle_input_event(self, abiinput, outdir, event):
try:
old_abiinput = abiinput.deepcopy()
old_tolsym = abiinput["tolsym"]
new_tolsym = 1e-6 if old_tolsym is None else old_tolsym * 10
abiinput.set_vars(tolsym=new_tolsym)
return Correction(self, self.compare_inputs(abiinput, old_abiinput), event, reset=False)
except Exception as exc:
logger.warning('Error while trying to apply the handler {}.'.format(str(self)), exc)
return None
class MemanaError(AbinitError):
"""
Class of errors raised by the memory analyzer.
(the section that estimates the memory requirements from the input parameters).
"""
yaml_tag = '!MemanaError'
class MemanaErrorHandler(ErrorHandler):
"""
Set mem_test to 0 to bypass the memory check.
"""
event_class = MemanaError
can_change_physics = False
def handle_task_event(self, task, event):
task.set_vars(mem_test=0)
task.log_correction(event, "Find MemanaError. Setting mem_test to 0 in input file.")
return self.FIXED
def handle_input_event(self, abiinput, outdir, event):
try:
old_abiinput = abiinput.deepcopy()
abiinput.set_vars(mem_test=0)
return Correction(self, self.compare_inputs(abiinput, old_abiinput), event, reset=False)
except Exception as exc:
logger.warning('Error while trying to apply the handler {}.'.format(str(self)), exc)
return None
class MemoryError(AbinitError):
"""
This error occurs when a checked allocation fails in Abinit
The only way to go is to increase memory
"""
yaml_tag = '!MemoryError'
class MemoryErrorHandler(ErrorHandler):
"""
Handle MemoryError. Increase the resources requirements
"""
event_class = MemoryError
can_change_physics = False
def handle_task_event(self, task, event):
task.manager.increase_resources()
return self.FIXED
def handle_input_event(self, abiinput, outdir, event):
"""
Shouldn't do anything on the input
"""
return None
| {
"content_hash": "7f9ba2e7c2d5ea9b0195f4ee0db0b457",
"timestamp": "",
"source": "github",
"line_count": 874,
"max_line_length": 124,
"avg_line_length": 32.82151029748284,
"alnum_prop": 0.6198145436798438,
"repo_name": "montoyjh/pymatgen",
"id": "85cb71421ee3691ec1b8677b444ba8e94924099e",
"size": "28795",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pymatgen/io/abinit/events.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "5100"
},
{
"name": "CSS",
"bytes": "7550"
},
{
"name": "Common Lisp",
"bytes": "3029065"
},
{
"name": "Dockerfile",
"bytes": "275"
},
{
"name": "HTML",
"bytes": "827"
},
{
"name": "Makefile",
"bytes": "5573"
},
{
"name": "Perl",
"bytes": "229104"
},
{
"name": "Propeller Spin",
"bytes": "15152267"
},
{
"name": "Python",
"bytes": "7718850"
},
{
"name": "Roff",
"bytes": "1898220"
}
],
"symlink_target": ""
} |
"""
Copyright 2010-2018 University Of Southern California
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Module to prepare observation data files for processing by the
Broadband Platform
"""
from __future__ import division, print_function
# Import Python modules
import os
import sys
import shutil
# Import Broadband modules
import bband_utils
from install_cfg import InstallCfg
from station_list import StationList
import bbp_formatter
import gmpe_config
import rotd100
from rotd50 import RotD50
from correct_psa import CorrectPSA
SUPPORTED_OBS_FORMATS = ["acc_bbp", "acc_peer", "gmpe"]
class ObsSeismograms(object):
def __init__(self, i_r_stations,
i_a_obsdir, i_obs_format,
i_obs_corr, sim_id=0):
"""
Initialize basic parameters for the ObsSeismograms class
"""
self.sim_id = sim_id
self.r_stations = i_r_stations
self.a_obsdir = i_a_obsdir
self.obs_format = i_obs_format
self.obs_corrections = i_obs_corr
# Make observed seismograms are in a format we can handle
if i_obs_format not in SUPPORTED_OBS_FORMATS:
raise bband_utils.ParameterError("Format %s for " %
(self.obs_format) +
"observed seismograms "
"not supported")
def run(self):
"""
This function copies the observed seismograms for the stations
specified in r_stations to a temporary directory inside the
tmpdata directory and converts them to the format needed by
the goodness of fitness module
"""
print("ObsSeismograms".center(80, '-'))
# Initialize basic variables
install = InstallCfg.getInstance()
sim_id = self.sim_id
sta_base = os.path.basename(os.path.splitext(self.r_stations)[0])
self.log = os.path.join(install.A_OUT_LOG_DIR,
str(sim_id),
"%d.obs_seis.log" %
(sim_id))
# Input, tmp, and output directories
a_indir = os.path.join(install.A_IN_DATA_DIR, str(sim_id))
a_tmpdir = os.path.join(install.A_TMP_DATA_DIR, str(sim_id))
a_tmpdir_seis = os.path.join(install.A_TMP_DATA_DIR, str(sim_id),
"obs_seis_%s" % (sta_base))
a_outdir = os.path.join(install.A_OUT_DATA_DIR, str(sim_id))
a_outdir_seis = os.path.join(a_outdir, "obs_seis_%s" % (sta_base))
a_outdir_gmpe = os.path.join(a_outdir,
"gmpe_data_%s" % (sta_base))
#
# Make sure the output and tmp directories exist
#
dirs = [a_tmpdir, a_tmpdir_seis, a_outdir,
a_outdir_seis, a_outdir_gmpe]
bband_utils.mkdirs(dirs, print_cmd=False)
# Station file
a_statfile = os.path.join(a_indir, self.r_stations)
# List of observed seismogram files
filelist = os.listdir(self.a_obsdir)
slo = StationList(a_statfile)
site_list = slo.getStationList()
# Inialize the CorrectPSA module
if self.obs_corrections:
corr_psa = CorrectPSA(self.r_stations, "rd100",
os.path.join(a_indir,
self.obs_corrections),
a_tmpdir_seis, sim_id)
else:
corr_psa = None
# Go through each station
for site in site_list:
slon = float(site.lon)
slat = float(site.lat)
stat = site.scode
print("==> Processing data for station: %s" % (stat))
# Look for the files we need
expected_rd50_file = os.path.join(a_outdir,
"%d.%s.rd50" %
(sim_id, stat))
if not os.path.exists(expected_rd50_file):
# just skip it
print("Couldn't find file %s. " %
(expected_rd50_file) +
"This is not necessarily an error, as you may have " +
"run with a subset of a stations. Continuing " +
"with available stations.")
continue
# Ok, we have a calculated rd50/rd100 files for this station,
# let's look for the observed file
r_e_peer_file = None
r_n_peer_file = None
r_z_peer_file = None
r_bbp_file = "%s.bbp" % (stat)
# Do different things depending on the format of the
# observed seismograms
if self.obs_format == "acc_bbp":
# We need to look for the bbp file
if r_bbp_file not in filelist:
# No bbp file for this station
continue
print("==> Converting file: %s" % (r_bbp_file))
# Copy bbp file to the tmp seismogram directory
a_src_bbp_file = os.path.join(self.a_obsdir, r_bbp_file)
a_dst_bbp_file = os.path.join(a_tmpdir_seis, r_bbp_file)
shutil.copy2(a_src_bbp_file, a_dst_bbp_file)
# Now we need to create the peer files to process with rotd50
r_e_peer_file = os.path.join(a_tmpdir_seis, "%s_E.acc" % (stat))
r_n_peer_file = os.path.join(a_tmpdir_seis, "%s_N.acc" % (stat))
r_z_peer_file = os.path.join(a_tmpdir_seis, "%s_Z.acc" % (stat))
bbp_formatter.bbp2peer(a_dst_bbp_file,
r_n_peer_file,
r_e_peer_file,
r_z_peer_file)
elif self.obs_format == "acc_peer":
# Look for the E, N, and Z files
for my_file in filelist:
if my_file.endswith("%s_E.acc" % (stat)):
r_e_peer_file = my_file
if (r_n_peer_file is not None and
r_z_peer_file is not None):
break
elif my_file.endswith("%s_N.acc" % (stat)):
r_n_peer_file = my_file
if (r_e_peer_file is not None and
r_z_peer_file is not None):
break
elif my_file.endswith("%s_Z.acc" % (stat)):
r_z_peer_file = my_file
if (r_e_peer_file is not None and
r_n_peer_file is not None):
break
if ((r_e_peer_file is None) or
(r_n_peer_file is None) or
(r_z_peer_file is None)):
# Couldn't find all 3 files
continue
# print(r_e_peer_file, r_n_peer_file, r_z_peer_file)
# Copy all three files to the tmp seismogram directory
for eachfile in (r_e_peer_file, r_n_peer_file, r_z_peer_file):
a_src_peer_file = os.path.join(self.a_obsdir, eachfile)
a_dst_peer_file = os.path.join(a_tmpdir_seis, eachfile)
shutil.copy2(a_src_peer_file, a_dst_peer_file)
# Now we need to convert them into bbp format
bbp_formatter.peer2bbp(os.path.join(a_tmpdir_seis,
r_n_peer_file),
os.path.join(a_tmpdir_seis,
r_e_peer_file),
os.path.join(a_tmpdir_seis,
r_z_peer_file),
os.path.join(a_tmpdir_seis,
r_bbp_file))
elif self.obs_format == "gmpe":
# GMPE verification packages don't have actual
# seismograms, so there's nothing we need to do here!
a_src_gmpe_file = os.path.join(a_outdir_gmpe,
"%s-gmpe.ri50" % (stat))
# Create a copy in outdata averaging all gmpes
a_avg_rd50_file = os.path.join(a_outdir_seis,
"%s.rd50" % (stat))
gmpe_config.average_gmpe(stat,
a_src_gmpe_file,
a_avg_rd50_file)
# All done!
continue
else:
raise bband_utils.ParameterError("Format %s for " %
(self.obs_format) +
"observed seismograms "
"not supported")
out_rotd100_base = "%s.rd100" % (stat)
out_rotd100v_base = "%s.rd100.vertical" % (stat)
out_rotd50_base = "%s.rd50" % (stat)
out_rotd50v_base = "%s.rd50.vertical" % (stat)
# Run RotDXX on this file
if corr_psa is not None:
# First calculate rdXX
print("===> Calculating RotDXX for station: %s" % (stat))
rotd100.do_rotd100(a_tmpdir_seis, r_e_peer_file,
r_n_peer_file,
"%s-orig.rd100" % (stat), self.log)
#rotd100.do_rotd100(a_tmpdir_seis, r_z_peer_file,
# r_z_peer_file,
# "%s-orig.rd100.vertical" % (stat), self.log)
# Now we need to correct the RotD100 outputs using the
# user-supplied correction factors
print("===> Correcting PSA for station: %s" % (stat))
corr_psa.correct_station(stat, "rd100")
#corr_psa.correct_station(stat, "rd100.vertical")
else:
# Use final names for output files
print("===> Calculating RotDXX for station: %s" % (stat))
rotd100.do_rotd100(a_tmpdir_seis, r_e_peer_file,
r_n_peer_file,
out_rotd100_base, self.log)
#rotd100.do_rotd100(a_tmpdir_seis, r_z_peer_file,
# r_z_peer_file,
# out_rotd100v_base % (stat), self.log)
# Create rotd50 files as well
rotd100.do_split_rotd50(a_tmpdir_seis, out_rotd100_base,
out_rotd50_base, self.log)
#rotd100.do_split_rotd50(a_tmpdir_seis, out_rotd100v_base,
# out_rotd50v_base, self.log)
shutil.copy2(os.path.join(a_tmpdir_seis, out_rotd100_base),
os.path.join(a_outdir_seis, out_rotd100_base))
#shutil.copy2(os.path.join(a_tmpdir_seis, out_rotd100v_base),
# os.path.join(a_outdir_seis, out_rotd100v_base))
shutil.copy2(os.path.join(a_tmpdir_seis, out_rotd50_base),
os.path.join(a_outdir_seis, out_rotd50_base))
#shutil.copy2(os.path.join(a_tmpdir_seis, out_rotd50v_base),
# os.path.join(a_outdir_seis, out_rotd50v_base))
print("ObsSeismograms Completed".center(80, '-'))
if __name__ == "__main__":
print("Testing Module: %s" % (os.path.basename(sys.argv[0])))
if len(sys.argv) < 6:
print("Usage: %s " % (os.path.basename(sys.argv[0])) +
"station_file obs_dir obs_format obs_corr_file sim_id")
sys.exit(1)
OBS_SEIS = ObsSeismograms(sys.argv[1], sys.argv[2],
sys.argv[3], sys.argv[4],
int(sys.argv[5]))
OBS_SEIS.run()
| {
"content_hash": "c9e6232fe777827cc88ab7f30a6a57cb",
"timestamp": "",
"source": "github",
"line_count": 271,
"max_line_length": 80,
"avg_line_length": 46.23247232472325,
"alnum_prop": 0.4875887939979248,
"repo_name": "SCECcode/BBP",
"id": "239364d80b1b388786f927f324ac24b74e0823ef",
"size": "12551",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bbp/comps/obs_seismograms.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "1376897"
},
{
"name": "C++",
"bytes": "998"
},
{
"name": "Fortran",
"bytes": "2247676"
},
{
"name": "LLVM",
"bytes": "4224"
},
{
"name": "Makefile",
"bytes": "24088"
},
{
"name": "Matlab",
"bytes": "44651"
},
{
"name": "Python",
"bytes": "2124430"
},
{
"name": "Roff",
"bytes": "40096"
},
{
"name": "Shell",
"bytes": "5086"
}
],
"symlink_target": ""
} |
"""
Django settings for InertiaApi project.
Generated by 'django-admin startproject' using Django 1.10.4.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '-1t@2$k+tdghkw9vh9g#=@$3n*3kl^0q(pj-nlkpsu$t4l%uy('
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = [u'10.0.0.38']
# Application definition
INSTALLED_APPS = [
'InertiaDjango.apps.InertiadjangoConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'oauth2_provider',
'rest_framework',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
REST_FRAMEWORK = {
'DEFAULT_AUTHENTICATION_CLASSES': (
'oauth2_provider.ext.rest_framework.OAuth2Authentication',
),
'DEFAULT_PERMISSION_CLASSES': (
'rest_framework.permissions.IsAuthenticated',
)
}
OAUTH2_PROVIDER = {
# this is the list of available scopes
'SCOPES': {'read': 'Read scope', 'write': 'Write scope'}
}
ROOT_URLCONF = 'InertiaApi.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'InertiaApi.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/static/'
| {
"content_hash": "761fb7fa9921c1d927545eb58082ef4a",
"timestamp": "",
"source": "github",
"line_count": 137,
"max_line_length": 91,
"avg_line_length": 26.036496350364963,
"alnum_prop": 0.6834875245304177,
"repo_name": "Wilimitis9/Inertia",
"id": "e764def7985acbb970da74fd0fdbff27ad2b49b1",
"size": "3567",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "InertiaApi/InertiaApi/settings.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Java",
"bytes": "46715"
},
{
"name": "Python",
"bytes": "10156"
}
],
"symlink_target": ""
} |
from numpy import average, std
def standardize_matrix(m):
"""
Standardize each colum of the matrix 'm'
"""
averages = list(map(average, m))
stds = list(map(std, m))
return [list(map(lambda x: x if stds[i] == 0.0 else
(x - averages[i]) / stds[i], m[i]))
for i in range(len(m))] | {
"content_hash": "bbba1279e698b76806f707de3357ecd1",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 56,
"avg_line_length": 28.636363636363637,
"alnum_prop": 0.5777777777777777,
"repo_name": "moritz-wundke/pyMining",
"id": "577373d8158dc9513ea126806e946ba4cbae3627",
"size": "396",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pymining/utils/std.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "13970"
}
],
"symlink_target": ""
} |
from setuptools import setup
setup(name='topojson',
version='0.2.0',
description='An extension to GeoJSON that encodes topology.',
url='https://github.com/osgn/python-topojson',
download_url='https://github.com/osgn/python-topojson/archive/0.2.0.tar.gz',
packages=['topojson', 'geojson'],
package_dir={
'topojson': 'src/topojson',
'geojson': 'src/geojson',
},
license='BSD',
) | {
"content_hash": "28c56b532c96e11be204234a91a12573",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 80,
"avg_line_length": 26.9375,
"alnum_prop": 0.6357308584686775,
"repo_name": "osgn/python-topojson",
"id": "1fd7af982487a275e1a7b1bdc408b38c3276e496",
"size": "431",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "33636"
}
],
"symlink_target": ""
} |
import http.server
import socketserver
PORT_NUMBER = 8080
#This class will handles any incoming request from
#the browser
class myHandler(request, client_address, server):
#Handler for the GET requests
def do_GET(self):
self.send_response(200)
self.send_header('Content-type','text/html')
self.end_headers()
# Send the html message
self.wfile.write("Hello World !")
return
try:
#Create a web server and define the handler to manage the
#incoming request
#server = HTTPServer(('', PORT_NUMBER), myHandler)
server = socketserver.TCPServer(("", PORT_NUMBER), myHandler)
print ('Started httpserver on port ' , PORT_NUMBER)
#Wait forever for incoming htto requests
server.serve_forever()
except KeyboardInterrupt:
print ('^C received, shutting down the web server')
server.socket.close()
| {
"content_hash": "2fa61d5192b6ff4e96f649fa7731425a",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 65,
"avg_line_length": 28.70967741935484,
"alnum_prop": 0.6775280898876405,
"repo_name": "vivekzhere/scrachpad",
"id": "3a00945bf78d87fa8b8715e964a68411f21df3bf",
"size": "908",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "soft_arch/server.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "47758"
},
{
"name": "C++",
"bytes": "15554"
},
{
"name": "HTML",
"bytes": "10"
},
{
"name": "Java",
"bytes": "7536"
},
{
"name": "Makefile",
"bytes": "215"
},
{
"name": "Python",
"bytes": "4358"
}
],
"symlink_target": ""
} |
"""
Author: OMKAR PATHAK
Created On: 1st August 2017
"""
import inspect
def search(graph, start_vertex):
"""
Breadth first search algorithm
:param graph:
:param start_vertex:
:return:
"""
# Take a list for storing already visited vertexes
if start_vertex not in graph or graph[start_vertex] is None or graph[start_vertex] == []:
return None
# create a list to store all the vertexes for BFS and a set to store the visited vertices
visited, queue = set(), [start_vertex]
while queue:
vertex = queue.pop(0)
if vertex not in visited:
visited.add(vertex)
queue.extend(graph[vertex] - visited)
return visited
# TODO: Are these necessary?
def time_complexities():
"""
Return information on functions
time complexity
:return: string
"""
return "O(V + E) where V = Number of vertices and E = Number of Edges"
def get_code():
"""
easily retrieve the source code
of the function
:return: source code
"""
return inspect.getsource(search)
| {
"content_hash": "63f4a84c74cdb1b25ab000f9d58cc655",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 93,
"avg_line_length": 22.224489795918366,
"alnum_prop": 0.6308539944903582,
"repo_name": "OmkarPathak/pygorithm",
"id": "1541577a624771b4eb900c1f7de250a260a8bb0d",
"size": "1089",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pygorithm/searching/breadth_first_search.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "218"
},
{
"name": "Python",
"bytes": "483492"
}
],
"symlink_target": ""
} |
import json
class ChivesMemory():
def __init__(self):
self.userGender = ""
self.userName = ""
class ChivesMemoryEncoderDecoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, ChivesMemory):
return [obj.userGender, obj.userName]
return json.JSONEncoder.default(self, obj)
def as_chives_memory(self, dct):
if '__ChivesMemory__':
mem = ChivesMemory()
mem.userGender = dct['userGender']
mem.userName = dct['userName']
return mem | {
"content_hash": "bb4bbbb85a23c927911fb1d5d3a12335",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 51,
"avg_line_length": 29.105263157894736,
"alnum_prop": 0.5949367088607594,
"repo_name": "edbrown23/Chives",
"id": "abeaceb87531c93220a2071cde397e1d80fa3f38",
"size": "553",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ChivesMemory.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "10232"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import models
class RecoDocError(Exception):
"""Exception thrown when a problem occurs with Py4J."""
pass
class Project(models.Model):
'''A project.'''
name = models.CharField(max_length=255, unique=True)
'''att.'''
url = models.URLField()
'''att.'''
dir_name = models.CharField(max_length=20, unique=True, db_index=True)
'''Directory name on the filesystem.'''
def __unicode__(self):
return self.name
class ProjectRelease(models.Model):
'''A project has many releases. '''
project = models.ForeignKey(Project)
'''att.'''
release = models.CharField(max_length=20)
'''att.'''
is_major = models.BooleanField(default=True)
'''att.'''
first_date = models.DateTimeField(null=True, blank=True)
'''att.'''
last_date = models.DateTimeField(null=True, blank=True)
'''att.'''
def __unicode__(self):
return '{0} {1}'.format(self.project.name, self.release)
class Person(models.Model):
'''A person asks and answers questions.'''
name = models.CharField(max_length=255, null=True, blank=True, default='')
'''real name. not used now.'''
email = models.EmailField(null=True, blank=True)
'''att.'''
nickname = models.CharField(max_length=255, null=True, blank=True,
default='', db_index=True)
'''nickname used on forums'''
contributor = models.BooleanField(default=False)
''' '''
def __unicode__(self):
name = self.name
if name == None:
name = '__NA__'
nickname = self.nickname
if nickname == None:
nickname = ''
return '{0} ({1})'.format(nickname, name)
class SourceElement(models.Model):
'''A `SourceElement` represents anything that can be traced back to an
HTML document (e.g., a word in the documentation, a message in a forum or
a mailing list).'''
url = models.URLField(null=True, blank=True, max_length=500)
'''att.'''
file_path = models.CharField(max_length=500, null=True, blank=True)
'''att.'''
xpath = models.CharField(max_length=500, null=True, blank=True)
'''att.'''
index_from = models.PositiveIntegerField(default=0, null=True, blank=True)
'''att.'''
index_to = models.PositiveIntegerField(default=0, null=True, blank=True)
'''att.'''
class Meta:
abstract = True
| {
"content_hash": "616cf0019893fc7966f3372fc8304307",
"timestamp": "",
"source": "github",
"line_count": 95,
"max_line_length": 78,
"avg_line_length": 25.610526315789475,
"alnum_prop": 0.6165228113440198,
"repo_name": "bartdag/recodoc2",
"id": "30a5ecc466b9513363a96f27f17b0c5caa1e4146",
"size": "2433",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "recodoc2/apps/project/models.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "5581"
},
{
"name": "HTML",
"bytes": "32211467"
},
{
"name": "Java",
"bytes": "13646"
},
{
"name": "Perl",
"bytes": "503"
},
{
"name": "Python",
"bytes": "717834"
}
],
"symlink_target": ""
} |
import unittest
import os
import sys
sys.path.append(os.path.join('../lib'))
from myStatistics import MyStatistics
class outputTest(unittest.TestCase):
def test_getFieldStdev(self):
ms = MyStatistics()
self.assertEqual(ms.getFieldStdev([1,1,1], None), 0, "getFieldStdev error")
def test_getFieldMean(self):
ms = MyStatistics()
self.assertEqual(ms.getFieldMean([1, 2, 3], None), 2, "getFieldMean error")
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "39ced5e5990053a7ab84f0c4dbff8804",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 83,
"avg_line_length": 26.05263157894737,
"alnum_prop": 0.6646464646464646,
"repo_name": "WemyJu/TOC_proj",
"id": "1e39d62194383a68469ad8de8c6fc44c3d2d5e36",
"size": "495",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/myStatistics_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "16897"
}
],
"symlink_target": ""
} |
"""Views of the ``django-metrics-dashboard`` app."""
from django.conf import settings
from django.views.generic import TemplateView
from django.utils.decorators import method_decorator
from metrics_dashboard.decorators import permission_required
from metrics_dashboard.widget_pool import dashboard_widget_pool
class PermissionRequiredViewMixin(object):
"""
Mixin to protect a view and require ``can_view_dashboard`` permission.
Permission will only be required if the ``DASHBOARD_REQUIRE_LOGIN``
setting is ``True``.
"""
@method_decorator(
permission_required('metrics_dashboard.can_view_dashboard'))
def dispatch(self, request, *args, **kwargs):
return super(PermissionRequiredViewMixin, self).dispatch(
request, *args, **kwargs)
class DashboardView(PermissionRequiredViewMixin, TemplateView):
"""
Main view of the app. Displays the metrics dashboard.
Widgets on the dashboard get loaded individually via AJAX calls against
the ``DashboardAPIWidgetView``.
It also loads socket.io and reloads an individual widget's template when
the widget's data has been updated. This means, once this view is loaded,
the page doesn't have to be refreshed at all. The widgets will simply
update themselves.
"""
template_name = 'metrics_dashboard/dashboard.html'
def get_context_data(self, **kwargs):
ctx = super(DashboardView, self).get_context_data(**kwargs)
widgets = dashboard_widget_pool.get_widgets()
ctx.update({
'widgets': widgets,
'SOCKETIO_HOST': settings.SOCKETIO_HOST,
})
return ctx
class DashboardAPIWidgetView(PermissionRequiredViewMixin, TemplateView):
"""
View to be called via AJAX. Returns the template of a widget.
This allows us to update widgets individually whenever their data has been
updated.
"""
def dispatch(self, request, *args, **kwargs):
self.widget = dashboard_widget_pool.get_widgets()[
kwargs.get('widget_name')]
return super(DashboardAPIWidgetView, self).dispatch(
request, *args, **kwargs)
def get_context_data(self, **kwargs):
ctx = super(DashboardAPIWidgetView, self).get_context_data(**kwargs)
ctx.update(self.widget.get_context_data())
return ctx
def get_template_names(self):
return [self.widget.template_name, ]
| {
"content_hash": "3f02dcdd1e418f4abcb4ac1aba8aef69",
"timestamp": "",
"source": "github",
"line_count": 70,
"max_line_length": 78,
"avg_line_length": 34.614285714285714,
"alnum_prop": 0.6929426330994635,
"repo_name": "bitmazk/django-metrics-dashboard",
"id": "b030bb78a50268d8dde55ecee5324ef972325314",
"size": "2423",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "metrics_dashboard/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "38836"
},
{
"name": "Shell",
"bytes": "5137"
}
],
"symlink_target": ""
} |
import ckeditor_uploader.fields
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('inventory', '0006_auto_20201021_0135'),
]
operations = [
migrations.RenameField(
model_name='item',
old_name='image',
new_name='thumbnail',
),
migrations.AlterField(
model_name='item',
name='description',
field=ckeditor_uploader.fields.RichTextUploadingField(blank=True, verbose_name='Beskrivelse'),
),
migrations.AlterField(
model_name='item',
name='name',
field=models.CharField(max_length=50, verbose_name='Navn'),
),
]
| {
"content_hash": "05481e70739746c918de77488965e53d",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 106,
"avg_line_length": 27.333333333333332,
"alnum_prop": 0.573170731707317,
"repo_name": "hackerspace-ntnu/website",
"id": "57c4f756cadf12b54c0c8e7eb754e8657654c1a3",
"size": "787",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "inventory/migrations/0007_auto_20201021_0154.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "16771"
},
{
"name": "HTML",
"bytes": "235369"
},
{
"name": "JavaScript",
"bytes": "43249"
},
{
"name": "Python",
"bytes": "323186"
}
],
"symlink_target": ""
} |
import os
# Use the VERSION file to get version
version_file = os.path.join(os.path.dirname(__file__), 'src', 'graf', 'VERSION')
with open(version_file) as fh:
grafpython_version = fh.read().strip()
from setuptools import setup, find_packages
setup(
name = "graf-python",
description = "Python implementation of the Graph Annotation Framework (GrAF)",
version = grafpython_version,
url = "http://media.cidles.eu/poio/graf-python/",
#download_url = "https://s3.amazonaws.com/cidles/downloads/graf-python/graf-python-{0}.tar.gz".format(grafpython_version),
long_description = "Python implementation of the Graph Annotation Framework. (http://www.xces.org/ns/GrAF/1.0/)",
license = "Apache License, Version 2.0",
keywords = ['NLP', 'CL', 'natural language processing',
'computational linguistics', 'parsing', 'tagging',
'annotation', 'linguistics', 'language',
'natural language'],
maintainer = "Peter Bouda",
maintainer_email = "pbouda@cidles.eu",
author = "Peter Bouda",
author_email = "pbouda@cidles.eu",
classifiers = [
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Intended Audience :: Education',
'Intended Audience :: Information Technology',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Human Machine Interfaces',
'Topic :: Scientific/Engineering :: Information Analysis',
'Topic :: Text Processing',
'Topic :: Text Processing :: General',
'Topic :: Text Processing :: Indexing',
'Topic :: Text Processing :: Linguistic',
],
packages = [ 'graf' ],
package_dir = {'':'src'},
package_data = {'graf': ['VERSION']},
#install_requires=['PyYAML>=3.09'],
#test_suite = 'graf.test.simple',
) | {
"content_hash": "9840c586f732cfa8d73f86e6347ff113",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 126,
"avg_line_length": 42.19230769230769,
"alnum_prop": 0.6280765724703737,
"repo_name": "cidles/graf-python",
"id": "6be6437fa67fc30e10001c018d80a7d73250ace0",
"size": "2420",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "5737"
},
{
"name": "Python",
"bytes": "130431"
},
{
"name": "Shell",
"bytes": "5340"
}
],
"symlink_target": ""
} |
"""Model directory methods."""
import os
import tensorflow as tf
def incrementing_model_dir(dirname: str, start: int = 0) -> str:
"""Create, given some `dirname`, an incrementing model directory.
Args:
dirname: The base directory name.
start: The starting integer.
Returns:
A model directory `dirname/n` where 'n' is the maximum integer in `dirname`.
"""
if not tf.io.gfile.isdir(dirname):
return os.path.join(dirname, str(start))
files = tf.io.gfile.listdir(dirname)
integers = [int(f) for f in files if f.isdigit()]
return os.path.join(dirname, str(max(integers) + 1 if integers else start))
| {
"content_hash": "66920292efc360e665dbd4194dad15db",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 80,
"avg_line_length": 30.047619047619047,
"alnum_prop": 0.6925515055467512,
"repo_name": "tensorflow/gnn",
"id": "f75be2253f29f2fc871ed065c999b53603431afb",
"size": "1324",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "tensorflow_gnn/runner/utils/model_dir.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2491"
},
{
"name": "Python",
"bytes": "1770047"
},
{
"name": "Shell",
"bytes": "3120"
},
{
"name": "Starlark",
"bytes": "47061"
}
],
"symlink_target": ""
} |
"""Define version."""
import pathlib
import subprocess
here = pathlib.Path(__file__).resolve().parent
__all__ = ["__version__", "__branch__"]
# read from VERSION file
with open(str(here / "VERSION")) as f:
__version__ = f.read().strip()
try:
__branch__ = (
subprocess.run(
["git", "branch", "--show-current"], capture_output=True, cwd=here
)
.stdout.strip()
.decode()
)
except:
__branch__ = ""
if __branch__:
__version__ += "+" + __branch__
| {
"content_hash": "706480deab3ba28f5c3d2c7fd774b841",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 78,
"avg_line_length": 18.321428571428573,
"alnum_prop": 0.52046783625731,
"repo_name": "wright-group/PyCMDS",
"id": "1327365f96ebd1d56c23ab2732ef350bedf80b5c",
"size": "513",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pycmds/__version__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "212"
},
{
"name": "C",
"bytes": "15060"
},
{
"name": "C++",
"bytes": "43260"
},
{
"name": "Python",
"bytes": "516847"
}
],
"symlink_target": ""
} |
import nltk, os, pickle, time, sys, glob
from collections import defaultdict
from nltk.stem.snowball import EnglishStemmer
from invidx import Index
def read_as_string(filename):
with open(filename, 'r') as content_file:
content = content_file.read()
content = "".join(i for i in content if ord(i)<128)
return content
print(sys.argv)
if len(sys.argv) > 1:
if sys.argv[1] == "index":
time1 = time.time()
begin_doc_id = 0
index_id = 1
index = Index(nltk.word_tokenize, read_as_string, EnglishStemmer(), nltk.corpus.stopwords.words('english'))
for root,subf,files in os.walk("docs"):
for f in files:
try:
index.add(os.path.join(root,f))
except:
print("error indexing file: " + os.path.join(root,f))
if index.curr_docid() >= (begin_doc_id+100):
with open("pyindex"+str(index_id)+".pickle", 'w') as idx_file:
pickle.dump(index, idx_file)
index_id += 1
begin_doc_id = index.curr_docid()
index = Index(nltk.word_tokenize, read_as_string, EnglishStemmer(), nltk.corpus.stopwords.words('english'))
index.set_curr_docid(begin_doc_id)
with open("pyindex"+str(index_id)+".pickle", 'w') as idx_file:
pickle.dump(index, idx_file)
time2 = time.time()
print("index time: %g secs." % ((time2-time1),))
elif sys.argv[1] == "search":
idxfiles = glob.glob("pyindex*.pickle")
time4 = time.time()
res = []
for idxfile in idxfiles:
with open(idxfile, 'r') as idx_file:
index = pickle.load(idx_file)
res.extend(index.lookup(sys.argv[2]))
print(res)
time5 = time.time()
stime = time5-time4
print("search time: %g secs" % (stime,))
else:
print("usage: python invidx.py <index|search term>")
| {
"content_hash": "344590262f95a5b16665b7b7f620b32f",
"timestamp": "",
"source": "github",
"line_count": 59,
"max_line_length": 127,
"avg_line_length": 34.28813559322034,
"alnum_prop": 0.5477014335145823,
"repo_name": "JeffBezanson/julia_examples",
"id": "b60c526926b131797ce9ace696118ab1f0226421",
"size": "2023",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "dinvidx/pyinvidx/test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Julia",
"bytes": "17067"
},
{
"name": "Python",
"bytes": "3641"
},
{
"name": "Shell",
"bytes": "189"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from traits.api import Instance, Button, Bool, Str, List, provides, Property
# ============= standard library imports ========================
# ============= local library imports ==========================
from pychron.external_pipette.protocol import IPipetteManager
#from pychron.hardware.apis_controller import ApisController
from pychron.managers.manager import Manager
class InvalidPipetteError(BaseException):
def __init__(self, name, av):
self.available = '\n'.join(av)
self.name = name
def __repr__(self):
return 'Invalid Pipette name={} av={}'.format(self.name, self.available)
def __str__(self):
return repr(self)
@provides(IPipetteManager)
class SimpleApisManager(Manager):
controller = Instance('pychron.hardware.apis_controller.ApisController')
test_command = Str
test_command_response = Str
clear_test_response_button = Button
test_button = Button
testing = Bool
test_script_button = Button
display_response_info = Bool(True)
test_enabled = Property
available_pipettes = List
available_blanks = List
mode = 'client'
#for unittesting
_timeout_flag = False
def test_connection(self):
return self.controller.test_connection()
def set_extract_state(self, state, *args, **kw):
pass
def finish_loading(self):
blanks = self.controller.get_available_blanks()
airs = self.controller.get_available_airs()
if blanks:
self.available_blanks = blanks.split('\r')
if airs:
self.available_pipettes = airs.split('\r')
#setup linking
# v = self.controller.isolation_valve
# elm = self.application.get_service('pychron.extraction_line.extraction_line_manager.ExtractionLineManager')
# print 'exception', elm
# print v
# if elm:
# elm.link_valve_actuation(v, self.isolation_valve_state_change)
# else:
# self.warning('could not find Extraction Line Manager. Needed for valve actuation linking')
# def isolation_valve_state_change(self, name, action):
# self.controller.set_external_pumping(action == 'open')
def bind_preferences(self, prefid):
pass
def load_pipette_non_blocking(self, *args, **kw):
func = 'load_pipette'
# self.controller.set_external_pumping()
ret = self._load_pipette(self.available_pipettes, func, block=False, *args, **kw)
# self.controller.set_external_pumping()
return ret
def load_blank_non_blocking(self, *args, **kw):
func = 'load_blank'
# self.controller.set_external_pumping()
ret = self._load_pipette(self.available_blanks, func, block=False, *args, **kw)
# self.controller.set_external_pumping()
return ret
def load_pipette(self, *args, **kw):
func = 'load_pipette'
# self.controller.set_external_pumping()
ret = self._load_pipette(self.available_pipettes, func, *args, **kw)
# self.controller.set_external_pumping()
return ret
def load_blank(self, *args, **kw):
func = 'load_blank'
# self.controller.set_external_pumping()
ret = self._load_pipette(self.available_blanks, func, *args, **kw)
# self.controller.set_external_pumping()
return ret
#private
def _load_pipette(self, av, func, name, script=None, block=True, timeout=10, period=1):
if script is None:
self.debug('Script is none. check ExtractionPyScript.extract_pipette')
raise NotImplementedError
name = str(name)
if name not in av:
raise InvalidPipetteError(name, av)
func = getattr(self.controller, func)
func(name)
if block:
#wait for completion
return self._loading_complete(script, timeout=timeout, period=period)
else:
return True
def _loading_complete(self, script, **kw):
if self._timeout_flag:
return True
else:
return self.controller.script_loading_block(script, **kw)
def _test_script_button_fired(self):
self.testing = True
from pychron.pyscripts.extraction_line_pyscript import ExtractionPyScript
e = ExtractionPyScript(manager=self)
e.setup_context(extract_device='',
analysis_type='blank')
# e.extract_pipette('Blank AC pt1 cc', timeout=120)
e.extract_pipette('Blank Air pt1 cc', timeout=120)
# e.extract_pipette(self.available_pipettes[0], timeout=3)
self.testing = False
def _test_commmand_changed(self):
self._execute_test_command()
def _test_button_fired(self):
self._execute_test_command()
def _execute_test_command(self):
cmd = self._assemble_command()
if cmd:
if self.controller.is_connected():
resp = self.controller.ask(cmd)
r = resp if resp else 'No Response'
else:
resp = ''
r = 'No Connection'
tcr = '{}\n{} >> {}'.format(self.test_command_response, cmd, r)
if self.display_response_info:
tcr = '{}\n\tresponse length={}'.format(tcr, len(resp))
self.test_command_response = tcr
def _assemble_command(self):
cmd = self.test_command
if cmd.strip().endswith(','):
return
return cmd
def _controller_default(self):
from pychron.hardware.apis_controller import ApisController
v = ApisController(name='apis_controller')
return v
def _get_test_enabled(self):
return self.test_command and not self.testing
# class ApisManager(Manager):
# implements(IPipetteManager)
# controller = Instance(ApisController)
#
# available_pipettes = List(['1', '2'])
#
# #testing buttons
# test_load_1 = Button('Test Load 1')
# testing = Bool
# test_result = Str
#
# test_script_button = Button('Test Script')
#
# reload_canvas_button = Button('Reload Canvas')
#
# _timeout_flag = False
# canvas = Instance('pychron.canvas.canvas2D.extraction_line_canvas2D.ExtractionLineCanvas2D')
# valve_manager = Instance('pychron.extraction_line.valve_manager.ValveManager')
# mode = 'normal'
#
# def finish_loading(self):
# from pychron.extraction_line.valve_manager import ValveManager
#
# vm = ValveManager(extraction_line_manager=self)
# vm.load_valves_from_file('apis_valves.xml')
# for v in vm.valves.values():
# v.actuator = self.controller
#
# self.valve_manager = vm
# for p in vm.pipette_trackers:
# p.load()
# self._set_pipette_counts(p.name, p.counts)
#
# def open_valve(self, name, **kw):
# return self._change_valve_state(name, 'normal', 'open')
#
# def close_valve(self, name, **kw):
# return self._change_valve_state(name, 'normal', 'close')
#
# def set_selected_explanation_item(self, name):
# pass
#
#
#
# def set_extract_state(self, state):
# pass
#
# def load_pipette(self, name, timeout=10, period=1):
# name = str(name)
# if not name in self.available_pipettes:
# raise InvalidPipetteError(name)
#
# self.controller.load_pipette(name)
#
# #wait for completion
# return self._loading_complete(timeout=timeout, period=period)
#
# #private
# def _loading_complete(self, **kw):
# if self._timeout_flag:
# return True
# else:
# return self.controller.blocking_poll('get_loading_status', **kw)
#
# #testing buttons
# def _test_load_1_fired(self):
# self.debug('Test load 1 fired')
# self.testing = True
# self.test_result = ''
# try:
# ret = self.load_pipette('1', timeout=3)
# self.test_result = 'OK'
# except (TimeoutError, InvalidPipetteError), e:
# self.test_result = str(e)
# # self.test_result = 'OK' if ret else 'Failed'
# self.testing = False
#
# def _test_script_button_fired(self):
# self.testing = True
# from pychron.pyscripts.extraction_line_pyscript import ExtractionPyScript
#
# e = ExtractionPyScript(manager=self)
# e.setup_context(extract_device='')
# e.extract_pipette(1, timeout=3)
# self.testing = False
# def _change_valve_state(self, name, mode, action):
# result, change = False, False
# func = getattr(self.valve_manager, '{}_by_name'.format(action))
# ret = func(name, mode=mode)
# if ret:
# result, change = ret
# if isinstance(result, bool):
# if change:
# self.canvas.update_valve_state(name, True if action == 'open' else False)
# self.canvas.request_redraw()
#
# return result, change
#
# def _set_pipette_counts(self, name, value):
# c = self.canvas
# obj = c.scene.get_item('vlabel_{}'.format(name))
# if obj is not None:
# obj.value = value
# c.request_redraw()
# def _load_canvas(self, c):
# c.load_canvas_file('apis_canvas_config.xml',
# setup_name='apis_canvas')
# @on_trait_change('valve_manager:pipette_trackers:counts')
# def _update_pipette_counts(self, obj, name, old, new):
# self._set_pipette_counts(obj.name, new)
#
# def _reload_canvas_button_fired(self):
# self._load_canvas(self.canvas)
# self.canvas.request_redraw()
#
# def _canvas_default(self):
# from pychron.canvas.canvas2D.extraction_line_canvas2D import ExtractionLineCanvas2D
#
# c = ExtractionLineCanvas2D(manager=self)
# self._load_canvas(c)
# return c
# ============= EOF =============================================
| {
"content_hash": "f11eb6eb8f396a66f8b53e06b0b6e4ff",
"timestamp": "",
"source": "github",
"line_count": 303,
"max_line_length": 121,
"avg_line_length": 35.99669966996699,
"alnum_prop": 0.5468048042541487,
"repo_name": "UManPychron/pychron",
"id": "c15ccc0d04ff34352ce2f33b2d1a7ce62e3a1334",
"size": "11709",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "pychron/external_pipette/apis_manager.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "131"
},
{
"name": "C++",
"bytes": "3706"
},
{
"name": "CSS",
"bytes": "279"
},
{
"name": "Fortran",
"bytes": "455875"
},
{
"name": "HTML",
"bytes": "40346"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Processing",
"bytes": "11421"
},
{
"name": "Python",
"bytes": "10234954"
},
{
"name": "Shell",
"bytes": "10753"
}
],
"symlink_target": ""
} |
#!python3
# -*- coding:utf-8 -*-
import os
import sys
import time
import ctypes
import shutil
import subprocess
IsPy3 = sys.version_info[0] >= 3
if IsPy3:
import winreg
else:
import codecs
import _winreg as winreg
BuildType = 'Release'
IsRebuild = True
Build = 'Rebuild'
Update = False
Copy = False
CleanAll = False
BuildTimeout = 30*60
Bit = 'Win32'
Dlllib = 'dll'
MSBuild = None
IncrediBuild = None
UseMSBuild = True #默认用MSBuild编译,如果为False则用IncrediBuild编译
#不同项目只需修改下面5个变量
SlnFile = '../BigNumber.sln' #相对于本py脚本路径的相对路径
UpdateDir = [] #相对于本py脚本路径的相对路径,填空不更新
ExecBatList = [] #相对于本py脚本路径的相对路径,编译前调用的脚本,可填空,执行bat会先cd到bat目录再执行
MSBuildFirstProjects = [r'BigNumber'] #使用MSBuild需要工程文件在解决方案sln中的路径
# MSBuild首先编译的项目,填空不指定顺序
IncrediBuildFirstProjects = ['BigNumber'] #使用IncrediBuild只需工程名字
#IncrediBuild首先编译的项目,填空不指定顺序
class ConsoleColor():
'''This class defines the values of color for printing on console window'''
Black = 0
DarkBlue = 1
DarkGreen = 2
DarkCyan = 3
DarkRed = 4
DarkMagenta = 5
DarkYellow = 6
Gray = 7
DarkGray = 8
Blue = 9
Green = 10
Cyan = 11
Red = 12
Magenta = 13
Yellow = 14
White = 15
class Coord(ctypes.Structure):
_fields_ = [('X', ctypes.c_short), ('Y', ctypes.c_short)]
class SmallRect(ctypes.Structure):
_fields_ = [('Left', ctypes.c_short),
('Top', ctypes.c_short),
('Right', ctypes.c_short),
('Bottom', ctypes.c_short),
]
class ConsoleScreenBufferInfo(ctypes.Structure):
_fields_ = [('dwSize', Coord),
('dwCursorPosition', Coord),
('wAttributes', ctypes.c_uint),
('srWindow', SmallRect),
('dwMaximumWindowSize', Coord),
]
class Win32API():
'''Some native methods for python calling'''
StdOutputHandle = -11
ConsoleOutputHandle = None
DefaultColor = None
@staticmethod
def SetConsoleColor(color):
'''Change the text color on console window'''
if not Win32API.DefaultColor:
if not Win32API.ConsoleOutputHandle:
Win32API.ConsoleOutputHandle = ctypes.windll.kernel32.GetStdHandle(Win32API.StdOutputHandle)
bufferInfo = ConsoleScreenBufferInfo()
ctypes.windll.kernel32.GetConsoleScreenBufferInfo(Win32API.ConsoleOutputHandle, ctypes.byref(bufferInfo))
Win32API.DefaultColor = int(bufferInfo.wAttributes & 0xFF)
if IsPy3:
sys.stdout.flush() # need flush stdout in python 3
ctypes.windll.kernel32.SetConsoleTextAttribute(Win32API.ConsoleOutputHandle, color)
@staticmethod
def ResetConsoleColor():
'''Reset the default text color on console window'''
if IsPy3:
sys.stdout.flush() # need flush stdout in python 3
ctypes.windll.kernel32.SetConsoleTextAttribute(Win32API.ConsoleOutputHandle, Win32API.DefaultColor)
class Logger():
LogFile = '@AutomationLog.txt'
LineSep = '\n'
@staticmethod
def Write(log, consoleColor = -1, writeToFile = True, printToStdout = True):
'''
consoleColor: value in class ConsoleColor, such as ConsoleColor.DarkGreen
if consoleColor == -1, use default color
'''
if printToStdout:
isValidColor = (consoleColor >= ConsoleColor.Black and consoleColor <= ConsoleColor.White)
if isValidColor:
Win32API.SetConsoleColor(consoleColor)
try:
sys.stdout.write(log)
except UnicodeError as e:
Win32API.SetConsoleColor(ConsoleColor.Red)
isValidColor = True
sys.stdout.write(str(type(e)) + ' can\'t print the log!\n')
if isValidColor:
Win32API.ResetConsoleColor()
if not writeToFile:
return
if IsPy3:
logFile = open(Logger.LogFile, 'a+', encoding = 'utf-8')
else:
logFile = codecs.open(Logger.LogFile, 'a+', 'utf-8')
try:
logFile.write(log)
# logFile.flush() # need flush in python 3, otherwise log won't be saved
except Exception as ex:
logFile.close()
sys.stdout.write('can not write log with exception: {0} {1}'.format(type(ex), ex))
@staticmethod
def WriteLine(log, consoleColor = -1, writeToFile = True, printToStdout = True):
'''
consoleColor: value in class ConsoleColor, such as ConsoleColor.DarkGreen
if consoleColor == -1, use default color
'''
Logger.Write(log + Logger.LineSep, consoleColor, writeToFile, printToStdout)
@staticmethod
def Log(log, consoleColor = -1, writeToFile = True, printToStdout = True):
'''
consoleColor: value in class ConsoleColor, such as ConsoleColor.DarkGreen
if consoleColor == -1, use default color
'''
t = time.localtime()
log = '{0}-{1:02}-{2:02} {3:02}:{4:02}:{5:02} - {6}{7}'.format(t.tm_year, t.tm_mon, t.tm_mday,
t.tm_hour, t.tm_min, t.tm_sec, log, Logger.LineSep)
Logger.Write(log, consoleColor, writeToFile, printToStdout)
@staticmethod
def DeleteLog():
if os.path.exists(Logger.LogFile):
os.remove(Logger.LogFile)
def GetMSBuildPath():
if Bit == 'Win32':
cmd = 'call "%VS120COMNTOOLS%..\\..\\VC\\vcvarsall.bat" x86\nwhere msbuild'
elif Bit == 'x64':
cmd = 'call "%VS120COMNTOOLS%..\\..\\VC\\vcvarsall.bat" amd64\nwhere msbuild'
ftemp = open('GetMSBuildPath.bat', 'wt')
ftemp.write(cmd)
ftemp.close()
p = subprocess.Popen('GetMSBuildPath.bat', stdout = subprocess.PIPE)
p.wait()
lines = p.stdout.read().decode().splitlines()
os.remove('GetMSBuildPath.bat')
for line in lines:
if 'MSBuild.exe' in line:
return line
def GetIncrediBuildPath():
try:
key=winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, r'SOFTWARE\Classes\IncrediBuild.MonitorFile\shell\open\command')
value, typeId = winreg.QueryValueEx(key, '')
if value:
start = value.find('"')
end = value.find('"', start + 1)
path = value[start+1:end]
buildConsole = os.path.join(os.path.dirname(path), 'BuildConsole.exe')
return buildConsole
except FileNotFoundError as e:
Logger.WriteLine('can not find IncrediBuild', ConsoleColor.Red)
def UpdateCode():
# put git to path first
if not shutil.which('git.exe'):
Logger.Log('找不到git.exe. 请确认安装git时将git\bin目录路径加入到环境变量path中!!!\n, 跳过更新代码!!!', ConsoleColor.Yellow)
return false
oldDir = os.getcwd()
for dir in UpdateDir:
os.chdir(dir)
ret = os.system('git pull')
os.chdir(oldDir)
if ret != 0:
Logger.Log('update {0} failed'.format(dir), ConsoleColor.Yellow)
return false
return True
def BuildProject(cmd):
for i in range(6):
Logger.WriteLine(cmd, ConsoleColor.Cyan)
buildFailed = True
startTime = time.time()
p = subprocess.Popen(cmd) #IncrediBuild不能使用stdout=subprocess.PIPE,否则会导致p.wait()不返回,可能是IncrediBuild的bug
if IsPy3:
try:
buildFailed = p.wait(BuildTimeout)
except subprocess.TimeoutExpired as e:
Logger.Log('{0}'.format(e), ConsoleColor.Yellow)
p.kill()
else:
buildFailed = p.wait()
if not UseMSBuild:
#IncrediBuild的返回值不能说明编译是否成功,需要提取输出判断
fin = open('IncrediBuild.log')
for line in fin:
if line.startswith('=========='):
Logger.Write(line, ConsoleColor.Cyan, writeToFile = True if IsPy3 else False)
if IsPy3:
start = line.find('失败') + 3 #========== 生成: 成功 1 个,失败 0 个,最新 0 个,跳过 0 个 ==========
else:#为了兼容py2做的特殊处理,很恶心
start = 0
n2 = 0
while 1:
if line[start].isdigit():
n2 += 1
if n2 == 2:
break
start = line.find(' ', start)
start += 1
end = line.find(' ', start)
failCount = int(line[start:end])
buildFailed = failCount > 0
else:
Logger.Write(line, ConsoleColor.Red, writeToFile = True if IsPy3 else False, printToStdout = True if ' error ' in line else False)
fin.close()
costTime = time.time() - startTime
Logger.WriteLine('build cost time: {0:.1f}s\n'.format(costTime), ConsoleColor.Green)
if not buildFailed:
return True
return False
def BuildAllProjects():
buildSuccess = False
cmds = []
if UseMSBuild:
if IsRebuild:
if CleanAll:
cmds.append('{0} {1} /t:Clean /p:Configuration={2} /nologo /maxcpucount /filelogger /consoleloggerparameters:ErrorsOnly'.format(MSBuild, SlnFile, 'Debug'))
cmds.append('{0} {1} /t:Clean /p:Configuration={2} /nologo /maxcpucount /filelogger /consoleloggerparameters:ErrorsOnly'.format(MSBuild, SlnFile, 'Release'))
else:
cmds.append('{0} {1} /t:Clean /p:Configuration={2} /nologo /maxcpucount /filelogger /consoleloggerparameters:ErrorsOnly'.format(MSBuild, SlnFile, BuildType))
for project in MSBuildFirstProjects:
cmds.append('{0} {1} /t:{2} /p:Configuration={3};platform={4} /nologo /maxcpucount /filelogger /consoleloggerparameters:ErrorsOnly'.format(MSBuild, SlnFile, project, BuildType, Bit))
cmds.append('{0} {1} /p:Configuration={2};platform={3} /nologo /maxcpucount /filelogger /consoleloggerparameters:ErrorsOnly'.format(MSBuild, SlnFile, BuildType, Bit))
else: #IncrediBuild
if IsRebuild:
if CleanAll:
cmds.append('"{0}" {1} /clean /cfg="{2}|{3}" /nologo /out=IncrediBuild.log'.format(IncrediBuild, SlnFile, 'Debug', Bit))
cmds.append('"{0}" {1} /clean /cfg="{2}|{3}" /nologo /out=IncrediBuild.log'.format(IncrediBuild, SlnFile, 'Release', Bit))
else:
cmds.append('"{0}" {1} /clean /cfg="{2}|{3}" /nologo /out=IncrediBuild.log'.format(IncrediBuild, SlnFile, BuildType, Bit))
for project in IncrediBuildFirstProjects:
cmds.append('"{0}" {1} /build /prj={2} /cfg="{3}|{4}" /nologo /out=IncrediBuild.log'.format(IncrediBuild, SlnFile, project, BuildType, Bit))
cmds.append('"{0}" {1} /build /cfg="{2}|{3}" /nologo /out=IncrediBuild.log'.format(IncrediBuild, SlnFile, BuildType, Bit))
for cmd in cmds:
buildSuccess = BuildProject(cmd)
if not buildSuccess:
break
return buildSuccess
def main():
if UseMSBuild:
if not os.path.exists(MSBuild):
Logger.Log('can not find msbuild.exe', ConsoleColor.Red)
return 1
else:
if not os.path.exists(IncrediBuild):
Logger.Log('can not find msbuild.exe', ConsoleColor.Red)
return 1
dir = os.path.dirname(__file__)
if dir:
oldDir = os.getcwd()
os.chdir(dir)
if Update:
if not UpdateCode():
return 1
Logger.Log('git update succeed', ConsoleColor.Green)
if Copy:
for bat in ExecBatList:
oldBatDir = os.getcwd()
batDir = os.path.dirname(bat)
batName = os.path.basename(bat)
if batDir:
os.chdir(batDir)
start = time.clock()
os.system(batName)
Logger.Log('run "{}" cost {:.1f} seconds'.format(batName, time.clock() - start), ConsoleColor.Green)
if batDir:
os.chdir(oldBatDir)
buildSuccess = BuildAllProjects()
if buildSuccess:
Logger.Log('build succeed', ConsoleColor.Green)
else:
Logger.Log('build failed', ConsoleColor.Red)
if dir:
os.chdir(oldDir)
return 0 if buildSuccess else 1
if __name__ == '__main__':
Logger.Log('run with argv ' + str(sys.argv), ConsoleColor.Green)
sys.argv = [x.lower() for x in sys.argv]
start_time = time.time()
if 'debug' in sys.argv:
BuildType = 'Debug'
if 'lib' in sys.argv:
Dlllib = 'lib'
SlnFile = '../BigNumber_lib.sln'
MSBuildFirstProjects = [r'BigNumber_lib']
IncrediBuildFirstProjects = ['BigNumber_lib']
if '64' in sys.argv:
Bit = 'x64'
if 'build' in sys.argv:
IsRebuild = False
Build = 'Build'
if 'update' in sys.argv:
Update = True
if 'copy' in sys.argv:
Copy = True
if 'clean' in sys.argv:
CleanAll = True
if 'incredibuild' in sys.argv:
UseMSBuild = False
if UseMSBuild:
MSBuild = GetMSBuildPath()
if not MSBuild:
Logger.Log('can not find MSBuild.exe', ConsoleColor.Red)
exit(1)
else:
IncrediBuild = GetIncrediBuildPath()
if not IncrediBuild:
Logger.Log('can not find BuildConsole.exe', ConsoleColor.Red)
exit(1)
cwd = os.getcwd()
Logger.WriteLine('current dir is: {0}, {1}: {2}'.format(cwd, Build, BuildType))
ret = main()
end_time = time.time()
cost_time = end_time-start_time
Logger.WriteLine('all build cost time: {0:.2f} seconds'.format(cost_time), ConsoleColor.Green)
exit(ret) | {
"content_hash": "bdded7c847c1076116694b916985087d",
"timestamp": "",
"source": "github",
"line_count": 355,
"max_line_length": 195,
"avg_line_length": 38.23098591549296,
"alnum_prop": 0.5922487474211612,
"repo_name": "xylsxyls/xueyelingshuang",
"id": "b9831392ee971e222a5a5279af1c90546d0c4ac4",
"size": "14078",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/BigNumber/scripts/rebuild_BigNumber.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "70916"
},
{
"name": "C",
"bytes": "15759114"
},
{
"name": "C++",
"bytes": "10113598"
},
{
"name": "CMake",
"bytes": "226509"
},
{
"name": "COBOL",
"bytes": "20676"
},
{
"name": "HTML",
"bytes": "417"
},
{
"name": "Makefile",
"bytes": "303"
},
{
"name": "Python",
"bytes": "1481199"
},
{
"name": "QML",
"bytes": "266"
},
{
"name": "Shell",
"bytes": "93441"
}
],
"symlink_target": ""
} |
"""The default Windows Registry plugin."""
from plaso.events import windows_events
from plaso.lib import utils
from plaso.parsers import winreg
from plaso.parsers.winreg_plugins import interface
class DefaultPlugin(interface.KeyPlugin):
"""Default plugin that extracts minimum information from every registry key.
The default plugin will parse every registry key that is passed to it and
extract minimum information, such as a list of available values and if
possible content of those values. The timestamp used is the timestamp
when the registry key was last modified.
"""
NAME = 'winreg_default'
DESCRIPTION = u'Parser for Registry data.'
REG_TYPE = 'any'
REG_KEYS = []
# This is a special case, plugins normally never overwrite the priority.
# However the default plugin should only run when all others plugins have
# tried and failed.
WEIGHT = 3
def GetEntries(
self, parser_context, key=None, registry_type=None, **unused_kwargs):
"""Returns an event object based on a Registry key name and values.
Args:
parser_context: A parser context object (instance of ParserContext).
key: Optional Registry key (instance of winreg.WinRegKey).
The default is None.
registry_type: Optional Registry type string. The default is None.
"""
text_dict = {}
if key.number_of_values == 0:
text_dict[u'Value'] = u'No values stored in key.'
else:
for value in key.GetValues():
if not value.name:
value_name = '(default)'
else:
value_name = u'{0:s}'.format(value.name)
if value.data is None:
value_string = u'[{0:s}] Empty'.format(
value.data_type_string)
elif value.DataIsString():
string_decode = utils.GetUnicodeString(value.data)
value_string = u'[{0:s}] {1:s}'.format(
value.data_type_string, string_decode)
elif value.DataIsInteger():
value_string = u'[{0:s}] {1:d}'.format(
value.data_type_string, value.data)
elif value.DataIsMultiString():
if type(value.data) not in (list, tuple):
value_string = u'[{0:s}]'.format(value.data_type_string)
# TODO: Add a flag or some sort of an anomaly alert.
else:
value_string = u'[{0:s}] {1:s}'.format(
value.data_type_string, u''.join(value.data))
else:
value_string = u'[{0:s}]'.format(value.data_type_string)
text_dict[value_name] = value_string
event_object = windows_events.WindowsRegistryEvent(
key.last_written_timestamp, key.path, text_dict,
offset=key.offset, registry_type=registry_type)
parser_context.ProduceEvent(event_object, plugin_name=self.NAME)
# Even though the DefaultPlugin is derived from KeyPlugin it needs to
# overwrite the Process function to make sure it is called when no other
# plugin is available.
def Process(
self, parser_context, key=None, registry_type=None, **kwargs):
"""Process the key and return a generator to extract event objects.
Args:
parser_context: A parser context object (instance of ParserContext).
key: Optional Registry key (instance of winreg.WinRegKey).
The default is None.
registry_type: Optional Registry type string. The default is None.
"""
# Note that we should NOT call the Process function of the KeyPlugin here.
self.GetEntries(
parser_context, key=key, registry_type=registry_type, **kwargs)
winreg.WinRegistryParser.RegisterPlugin(DefaultPlugin)
| {
"content_hash": "be713647ceaa580feb27a3776aa62bf4",
"timestamp": "",
"source": "github",
"line_count": 97,
"max_line_length": 78,
"avg_line_length": 37.11340206185567,
"alnum_prop": 0.6647222222222222,
"repo_name": "cvandeplas/plaso",
"id": "6914f4b9c99b7eec4d3417a5f62aa2ca4cb450fc",
"size": "4298",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "plaso/parsers/winreg_plugins/default.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "2812257"
},
{
"name": "Shell",
"bytes": "22724"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
import logging
import re
import six
from storops.lib.common import cache
from storops.lib.parser import ParserConfigFactory, OutputParser
__author__ = 'Cedric Zhuang'
log = logging.getLogger(__name__)
class UnityParserConfigFactory(ParserConfigFactory):
@classmethod
def get_parser_clz(cls, data_src):
if data_src is None or data_src == 'rest':
ret = UnityRestParser
else:
raise ValueError('data_src {} not supported.'.format(data_src))
return ret
_factory_singleton = UnityParserConfigFactory()
@cache
def get_unity_parser(name):
return _factory_singleton.get(name)
class UnityRestParser(OutputParser):
data_src = 'rest'
def __init__(self):
super(UnityRestParser, self).__init__()
self.name = None
def parse_all(self, output, properties=None):
try:
output = output.contents
except AttributeError:
pass
return output
def parse(self, output, properties=None):
try:
output = output.first_content
except AttributeError:
pass
return self._parse_object(output, properties=None,
preloaded_props=properties)
def _parse_object(self, obj, properties=None, preloaded_props=None):
if properties is None:
properties = self.properties
ret = {}
for p in properties:
if isinstance(obj, list):
log.error('cannot parse list: {}. '
'a list converter must be specified.'.format(obj))
continue
if p.label in obj.keys():
value = p.convert(obj[p.label])
if preloaded_props is not None and isinstance(
preloaded_props, NestedProperties):
subtree = preloaded_props.get_child_subtree(p.key)
if (subtree is not None and
hasattr(value, 'set_preloaded_properties')):
value.set_preloaded_properties(subtree)
ret[p.key] = value
return ret
def init_from_config(self, config):
self.name = config.name
class NestedProperty(object):
def __init__(self, key):
self.key = key
@property
def label(self):
return self.under_score_to_camel_case(self.key)
@classmethod
def under_score_to_camel_case(cls, value):
ret = re.sub(r'_([a-z])', lambda a: a.group(1).upper(), value)
return ret
def get_first_level_key(self):
return self.key.split('.')[0]
def remove_first_level_key(self):
pos = self.key.find('.')
if pos >= 0:
return self.key[pos + 1:]
else:
return None
class NestedProperties(object):
def __init__(self, *keys):
self._props = map(NestedProperty, keys)
self._map = None
self._query_fields = None
@classmethod
def build(cls, properties):
ret = None
if not properties:
ret = None
elif isinstance(properties, six.text_type):
ret = NestedProperties(properties)
elif isinstance(properties, (list, tuple, set)):
ret = NestedProperties(*properties)
else:
log.error('invalid properties {} to build NestedProperties '
'object.'.format(properties))
return ret
@property
def _prop_map(self):
if not self._map:
map = {}
for p in self._props:
key = p.get_first_level_key()
child_prop = p.remove_first_level_key()
if child_prop is not None:
map.setdefault(key, []).append(child_prop)
else:
map.setdefault(key, [])
self._map = map
return self._map
def get_properties(self):
return self._prop_map.keys()
def get_child_subtree(self, prop):
if prop not in self._prop_map:
return None
if len(self._prop_map[prop]) == 0:
return None
return NestedProperties.build(self._prop_map[prop])
@property
def query_fields(self):
if self._query_fields is None:
self._query_fields = [a.label for a in self._props]
return tuple(self._query_fields)
| {
"content_hash": "960c28e69a7a33a7513b613d6c8ad977",
"timestamp": "",
"source": "github",
"line_count": 152,
"max_line_length": 76,
"avg_line_length": 29.05263157894737,
"alnum_prop": 0.5618206521739131,
"repo_name": "emc-openstack/storops",
"id": "27273ec5fdf8c23ec90e83a5838f4f37ce1d91a5",
"size": "5066",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "storops/unity/parser.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1807840"
},
{
"name": "Shell",
"bytes": "3895"
}
],
"symlink_target": ""
} |
__author__ = 'jcorbett'
from mongoengine import *
class ProjectReference(EmbeddedDocument):
id = ObjectIdField()
name = StringField()
| {
"content_hash": "909a40aef576b38aa2b769b4eaeeae39",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 41,
"avg_line_length": 16.22222222222222,
"alnum_prop": 0.6986301369863014,
"repo_name": "slickqa/slickqaweb",
"id": "49084d5e5fe2de7b6702bf5f665660822150e44b",
"size": "146",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "slickqaweb/model/projectReference.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "4471266"
},
{
"name": "Dockerfile",
"bytes": "4478"
},
{
"name": "HTML",
"bytes": "523433"
},
{
"name": "JavaScript",
"bytes": "2937024"
},
{
"name": "Python",
"bytes": "222075"
},
{
"name": "Ruby",
"bytes": "36812"
},
{
"name": "Shell",
"bytes": "3211"
}
],
"symlink_target": ""
} |
from django import template
register = template.Library()
@register.inclusion_tag('moon_tracker/scan_result.html')
def display_scan(scan, classes=""):
return {
'scan': scan,
'classes': classes
}
| {
"content_hash": "8d77d6a5d863aeefd6932bc9c4e81c81",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 56,
"avg_line_length": 18.583333333333332,
"alnum_prop": 0.6502242152466368,
"repo_name": "StephenSwat/eve_lunar_mining_organiser",
"id": "ae47d660404a3c78fa0837500d55da4ceb775712",
"size": "223",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "elmo/moon_tracker/templatetags/result_display.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "6345"
},
{
"name": "HTML",
"bytes": "24410"
},
{
"name": "Python",
"bytes": "49457"
}
],
"symlink_target": ""
} |
""" jabber """
import sys
import sleekxmpp
import zmq
from core.config import settings
from core.config.settings import logger
from core.people import person
from core.config.settings import REDIS
import redis
import atexit
"""
SleekXMPP: The Sleek XMPP Library
Copyright (C) 2010 Nathanael C. Fritz
This file is part of SleekXMPP.
See the file LICENSE for copying permission.
"""
SERVICE_NAME = 'jabber'
# Python versions before 3.0 do not use UTF-8 encoding
# by default. To ensure that Unicode is handled properly
# throughout SleekXMPP, we will set the default encoding
# ourselves to UTF-8.
if sys.version_info < (3, 0):
reload(sys)
sys.setdefaultencoding('utf8')
else:
raw_input = input
class SendSubscriptionBot(sleekxmpp.ClientXMPP):
"""
A basic SleekXMPP bot that will log in, send a subscription,
and then log out.
"""
pfrom = None
msg = None
def __init__(self, jid, password, recipient):
sleekxmpp.ClientXMPP.__init__(self, jid, password)
self.pfrom = jid
self.recipient = recipient
self.add_event_handler("session_start", self.start)
def start(self, event):
self.send_presence_subscription(
pfrom=self.pfrom, pto=self.recipient
)
self.disconnect(wait=True)
class SendMsgBot(sleekxmpp.ClientXMPP):
"""
A basic SleekXMPP bot that will log in, send a message,
and then log out.
"""
def __init__(self, jid, password, recipient, message):
sleekxmpp.ClientXMPP.__init__(self, jid, password)
# The message we wish to send, and the JID that
# will receive it.
self.recipient = recipient
self.msg = message
# The session_start event will be triggered when
# the bot establishes its connection with the server
# and the XML streams are ready for use. We want to
# listen for this event so that we we can intialize
# our roster.
self.add_event_handler("session_start", self.start)
def start(self, event):
"""
Process the session_start event.
Typical actions for the session_start event are
requesting the roster and broadcasting an intial
presence stanza.
Arguments:
event -- An empty dictionary. The session_start
event does not provide any additional
data.
"""
self.send_presence()
self.get_roster()
self.send_message(mto=self.recipient,
mbody=self.msg,
mtype='chat')
# Using wait=True ensures that the send queue will be
# emptied before ending the session.
self.disconnect(wait=True)
class EchoBot(sleekxmpp.ClientXMPP):
sock = None
nick = settings.MY_NAME
_redis = None
context = None
"""
A simple SleekXMPP bot that will echo messages it
receives, along with a short thank you message.
"""
def __init__(self, jid, password):
sleekxmpp.ClientXMPP.__init__(self, jid, password)
self.auto_subscribe = True
self.auto_authorize = True
# The session_start event will be triggered when
# the bot establishes its connection with the server
# and the XML streams are ready for use. We want to
# listen for this event so that we we can intialize
# our roster.
self.add_event_handler("session_start", self.start)
# The message event is triggered whenever a message
# stanza is received. Be aware that that includes
# MUC messages and error messages.
self.add_event_handler("message", self.message)
self.add_event_handler("presence_available", self.presence_available)
self.add_event_handler("groupchat_presence", self.muc_presence)
self.add_event_handler("groupchat_message", self.muc_message)
self.add_event_handler("groupchat_invite", self.accept_invite)
self.add_event_handler(
"groupchat_direct_invite",
self.accept_direct_invite
)
self._redis = redis.Redis(
host=REDIS['host'],
# password=REDIS['password'],
# unix_socket_path=REDIS['socket']
)
# send and listen commands
self.context = zmq.Context()
self.sock = self.context.socket(zmq.REQ)
self.sock.connect('ipc:///tmp/smarty-jabber')
def start(self, event):
"""
Process the session_start event.
Typical actions for the session_start event are
requesting the roster and broadcasting an intial
presence stanza.
Arguments:
event -- An empty dictionary. The session_start
event does not provide any additional
data.
"""
self.send_presence()
self.get_roster()
#self._start_thread("chat_send", self.chat_send)
def message(self, msg):
"""
Process incoming message stanzas. Be aware that this also
includes MUC messages and error messages. It is usually
a good idea to check the messages's type before processing
or sending replies.
Arguments:
msg -- The received message stanza. See the documentation
for stanza objects and the Message stanza to see
how it may be used.
"""
if msg['from'] != self.nick and msg['type'] != 'groupchat' and len(msg['body'].strip()) != 0:
pass
# logger.info("received: %s from %s of type %s resource %s",
# msg['body'],
# msg['from'],
# msg['type'],
# self.resource)
#temporary
person.update_list_from_jabber(
self.roster[settings.MY_ACCOUNTS['gmail']['email']]
)
self.sock.send_json({
'request': msg['body'],
'from': SERVICE_NAME,
'type': 'request',
'sender': str(msg['from'])
})
res_obj = self.sock.recv_json()
if isinstance(res_obj, dict):
# any string (the plain text message body)
msg['body'] = res_obj['text']
msg.reply("%(body)s" % msg).send()
def presence_available(self, iq):
logger.info(">>>>>> chat presence, here is iq %s", iq)
person.update_list_from_jabber(
self.roster[settings.MY_ACCOUNTS['gmail']['email']]
)
#logger.info("here we are 1 chat room %s", iq)
# this should be a request for read info from spinbackup
#self.outsock.send_json({'cmd': 'get_news'})
#msg = self.outsock.recv_json()
#if msg:
#logger.info("twitter bot tryes to send message to chat room %s", iq)
#self.send_message(
## self.nick
##mto=msg['from'].bare,
#mto='',
#mbody=msg['text'],
#mtype='groupchat'
#)
def muc_presence(self, iq):
"""docstring for groupchat_presence"""
#logger.info(">>>>>> groupchat presence worked, here we are in chat room %s", iq)
key = 'twitter_hourly'
msg = self._redis.get(key)
if msg:
self._redis.delete(key)
logger.info("twitter bot tryes to send message to chat room %s", iq)
self.send_message(
# self.nick
mto=iq['from'].bare,
mbody=msg,
mtype='groupchat'
)
msg = None
#def chat_send(self):
#while True:
#msg = self.outsock.recv_json()
#if msg:
#self.send_message(
## self.nick
#mto=msg['from'].bare,
#mbody=msg['text'],
#mtype='groupchat'
#)
def muc_message(self, msg):
"""
Process incoming message stanzas from any chat room. Be aware
that if you also have any handlers for the 'message' event,
message stanzas may be processed by both handlers, so check
the 'type' attribute when using a 'message' event handler.
Whenever the bot's nickname is mentioned, respond to
the message.
IMPORTANT: Always check that a message is not from yourself,
otherwise you will create an infinite loop responding
to your own messages.
This handler will reply to messages that mention
the bot's nickname.
Arguments:
msg -- The received message stanza. See the documentation
for stanza objects and the Message stanza to see
how it may be used.
"""
if msg['mucnick'] not in (self.nick, self.nick.lower()) and\
self.nick in msg['body'] and msg['type'] == 'groupchat':
"""self.send_message(mto=msg['from'].bare,
#mbody="I heard that, %s." % msg['mucnick'],
#mtype='groupchat')
"""
# cut nickname
if msg['body'].startswith(self.nick.lower()):
mesg = msg['body'].replace(self.nick.lower(), '', 1)
if msg['body'].startswith(self.nick):
mesg = msg['body'].replace(self.nick, '', 1)
if mesg.startswith(','):
mesg = mesg.replace(',', '', 1)
self.sock.send_json({
'request': mesg,
'from': SERVICE_NAME,
'type': 'request',
'sender': str(msg['from'])
})
res_obj = self.sock.recv_json()
if not res_obj:
self.send_message(
mto=msg['from'].bare,
mbody='one moment please..',
mtype='groupchat'
)
if res_obj:
self.send_message(
mto=msg['from'].bare,
mbody=res_obj['text'],
mtype='groupchat'
)
key = 'twitter_hourly'
tmsg = self._redis.get(key)
self._redis.delete(key)
if tmsg:
self.send_message(
mto=msg['from'].bare,
mbody=tmsg,
mtype='groupchat'
)
def muc_online(self, presence):
"""
Process a presence stanza from a chat room. In this case,
presences from users that have just come online are
handled by sending a welcome message that includes
the user's nickname and role in the room.
Arguments:
presence -- The received presence stanza. See the
documentation for the Presence stanza
to see how else it may be used.
"""
if presence['muc']['nick'] != self.nick:
self.send_message(mto=presence['from'].bare,
mbody="Hello, %s %s" % (presence['muc']['role'],
presence['muc']['nick']),
mtype='groupchat')
def accept_invite(self, inv):
self.plugin['xep_0045'].joinMUC(inv["from"], self.nick, wait=False)
def accept_direct_invite(self, inv):
self.plugin['xep_0045'].joinMUC(inv["from"], self.nick, wait=False)
if __name__ == '__main__':
# Setup the EchoBot and register plugins. Note that while plugins may
# have interdependencies, the order in which you register them does
# not matter.
xmpp = EchoBot(settings.MY_ACCOUNTS['gmail']['email'],
settings.MY_ACCOUNTS['gmail']['password'])
xmpp.auto_subscribe = True
xmpp.auto_authorize = True
xmpp.register_plugin('xep_0030') # Service Discovery
xmpp.register_plugin('xep_0045') # Multi-User Chat
xmpp.register_plugin('xep_0249') # XEP-0249: Direct MUC Invitations
xmpp.register_plugin('xep_0199') # XMPP Ping
xmpp.register_plugin('xep_0004') # Data Forms
xmpp.register_plugin('xep_0060') # PubSub
@atexit.register
def goodbye():
xmpp.sock.close()
xmpp.context.term()
# If you are working with an OpenFire server, you may need
# to adjust the SSL version used:
# xmpp.ssl_version = ssl.PROTOCOL_SSLv3
# If you want to verify the SSL certificates offered by a server:
# xmpp.ca_certs = "path/to/ca/cert"
# Connect to the XMPP server and start processing XMPP stanzas.
if xmpp.connect():
# If you do not have the pydns library installed, you will need
# to manually specify the name of the server if it does not match
# the one in the JID. For example, to use Google Talk you would
# need to use:
#
# if xmpp.connect(('talk.google.com', 5222)):
# ...
xmpp.process(threaded=False)
else:
logger.error("Unable to connect")
| {
"content_hash": "34076700d9db194075327648d70cfaac",
"timestamp": "",
"source": "github",
"line_count": 384,
"max_line_length": 101,
"avg_line_length": 33.96875,
"alnum_prop": 0.5582643360932229,
"repo_name": "vsilent/smarty-bot",
"id": "c01de17e3a270a466395e20a158124b7c920c791",
"size": "13090",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "core/lib/jabber/connect.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "2163"
},
{
"name": "Dockerfile",
"bytes": "741"
},
{
"name": "HTML",
"bytes": "4223"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "974421"
},
{
"name": "Shell",
"bytes": "556"
}
],
"symlink_target": ""
} |
"""
Copyright (c) 2013-2014 Heiko Strathmann
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
*
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
*
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
The views and conclusions contained in the software and documentation are those
of the authors and should not be interpreted as representing official policies,
either expressed or implied, of the author.
"""
from numpy import mod, log, sum, zeros, arange, inf
import numpy
from numpy.matlib import repmat
from numpy.random import rand, permutation
from kameleon_mcmc.distribution.Distribution import Distribution, Sample
class DiscreteRandomWalkProposal(Distribution):
def __init__(self, mu, spread, flip_at_least_one=True):
if not type(mu) is numpy.ndarray:
raise TypeError("Mean vector must be a numpy array")
if not len(mu.shape) == 1:
raise ValueError("Mean vector must be a 1D numpy array")
if not len(mu) > 0:
raise ValueError("Mean vector dimension must be positive")
if mu.dtype != numpy.bool8:
raise ValueError("Mean must be a bool8 numpy array")
Distribution.__init__(self, len(mu))
if not type(spread) is float:
raise TypeError("Spread must be a float")
if not (spread > 0. and spread < 1.):
raise ValueError("Spread must be a probability")
if not type(flip_at_least_one) is bool:
raise ValueError("Flip at least one must be a boolean")
self.mu = mu
self.spread = spread
self.flip_at_least_one = flip_at_least_one
def __str__(self):
s = self.__class__.__name__ + "=["
s += "spread=" + str(self.ps)
s += ", flip_at_least_one=" + str(self.flip_at_least_one)
s += ", " + Distribution.__str__(self)
s += "]"
return s
def sample(self, n=1):
if not type(n) is int:
raise TypeError("Number of samples must be integer")
if n <= 0:
raise ValueError("Number of samples (%d) needs to be positive", n)
# copy mean vector a couple of times
samples = repmat(self.mu, n, 1)
if self.flip_at_least_one is False:
# indices to flip, evenly distributed and the change probability is Bernoulli
change_inds = rand(n, self.dimension) < self.spread
else:
# sample number of changes from binomial(spread, d-1) to have at least one change
num_changes = 1 + sum(rand(n, self.dimension - 1) < self.spread, 1)
# randomly change that many indices
change_inds = zeros((n, self.dimension), dtype=numpy.bool8)
for i in range(n):
change_inds[i, arange(num_changes[i])] = True
change_inds[i] = change_inds[i, permutation(self.dimension)]
# flip all chosen indices
samples[change_inds] = mod(samples[change_inds] + 1, 2)
return Sample(samples)
def log_pdf(self, X):
if not type(X) is numpy.ndarray:
raise TypeError("X must be a numpy array")
if not len(X.shape) is 2:
raise TypeError("X must be a 2D numpy array")
# this also enforces correct data ranges
if X.dtype != numpy.bool8:
raise ValueError("X must be a bool8 numpy array")
if not X.shape[1] == self.dimension:
raise ValueError("Dimension of X does not match own dimension")
# hamming distance for all elements in X
k = sum(X != self.mu, 1)
# remove from distance as its always flipped
if self.flip_at_least_one:
k -= 1
# simple binomial probability for d-1 dimensions, where the normaliser cancel
d = self.dimension
if self.flip_at_least_one:
# one index is always flipped, so exclude
d -= 1
result = k * log(self.spread) + (d - k) * log(1 - self.spread)
if self.flip_at_least_one:
# cases with k<0 have zero probability since one element is *always*
# changed, return -inf
result[k < 0] = -inf
return result
| {
"content_hash": "75cfbdc7e75bb2f18dc10abb4822ea38",
"timestamp": "",
"source": "github",
"line_count": 139,
"max_line_length": 93,
"avg_line_length": 39.280575539568346,
"alnum_prop": 0.6276556776556776,
"repo_name": "karlnapf/kameleon-mcmc",
"id": "a03de4e7b5185abd7cd4125b2c2d6be9cbf9eea3",
"size": "5460",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "kameleon_mcmc/distribution/proposals/DiscreteRandomWalkProposal.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "436070"
},
{
"name": "Shell",
"bytes": "483"
}
],
"symlink_target": ""
} |
'''
Production Configurations
- Use djangosecure
- Use Amazon's S3 for storing static files and uploaded media
- Use sendgrid to send emails
- Use MEMCACHIER on Heroku
'''
from configurations import values
# See: http://django-storages.readthedocs.org/en/latest/backends/amazon-S3.html#settings
try:
from S3 import CallingFormat
AWS_CALLING_FORMAT = CallingFormat.SUBDOMAIN
except ImportError:
# TODO: Fix this where even if in Dev this class is called.
pass
from .common import Common
class Production(Common):
# This ensures that Django will be able to detect a secure connection
# properly on Heroku.
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
# INSTALLED_APPS
INSTALLED_APPS = Common.INSTALLED_APPS
# END INSTALLED_APPS
# SECRET KEY
SECRET_KEY = values.SecretValue()
# END SECRET KEY
# django-secure
INSTALLED_APPS += ("djangosecure", )
# MIDDLEWARE CONFIGURATION
MIDDLEWARE_CLASSES = (
# Make sure djangosecure.middleware.SecurityMiddleware is listed first
'djangosecure.middleware.SecurityMiddleware',
)
MIDDLEWARE_CLASSES += Common.MIDDLEWARE_CLASSES
# END MIDDLEWARE CONFIGURATION
# set this to 60 seconds and then to 518400 when you can prove it works
SECURE_HSTS_SECONDS = 60
SECURE_HSTS_INCLUDE_SUBDOMAINS = values.BooleanValue(True)
SECURE_FRAME_DENY = values.BooleanValue(True)
SECURE_CONTENT_TYPE_NOSNIFF = values.BooleanValue(True)
SECURE_BROWSER_XSS_FILTER = values.BooleanValue(True)
SESSION_COOKIE_SECURE = values.BooleanValue(False)
SESSION_COOKIE_HTTPONLY = values.BooleanValue(True)
SECURE_SSL_REDIRECT = values.BooleanValue(True)
# end django-secure
# SITE CONFIGURATION
# Hosts/domain names that are valid for this site
# See https://docs.djangoproject.com/en/1.6/ref/settings/#allowed-hosts
ALLOWED_HOSTS = ["*"]
# END SITE CONFIGURATION
INSTALLED_APPS += ("gunicorn", )
# STORAGE CONFIGURATION
# See: http://django-storages.readthedocs.org/en/latest/index.html
INSTALLED_APPS += (
'storages',
)
# See: http://django-storages.readthedocs.org/en/latest/backends/amazon-S3.html#settings
STATICFILES_STORAGE = DEFAULT_FILE_STORAGE = 'storages.backends.s3boto.S3BotoStorage'
# See: http://django-storages.readthedocs.org/en/latest/backends/amazon-S3.html#settings
AWS_ACCESS_KEY_ID = values.SecretValue()
AWS_SECRET_ACCESS_KEY = values.SecretValue()
AWS_STORAGE_BUCKET_NAME = values.SecretValue()
AWS_AUTO_CREATE_BUCKET = True
AWS_QUERYSTRING_AUTH = False
# See: https://github.com/antonagestam/collectfast
# For Django 1.7+, 'collectfast' should come before 'django.contrib.staticfiles'
AWS_PRELOAD_METADATA = True
INSTALLED_APPS = ('collectfast', ) + INSTALLED_APPS
# AWS cache settings, don't change unless you know what you're doing:
AWS_EXPIRY = 60 * 60 * 24 * 7
AWS_HEADERS = {
'Cache-Control': 'max-age=%d, s-maxage=%d, must-revalidate' % (
AWS_EXPIRY, AWS_EXPIRY)
}
# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-url
STATIC_URL = 'https://s3.amazonaws.com/%s/' % AWS_STORAGE_BUCKET_NAME
# END STORAGE CONFIGURATION
# EMAIL
DEFAULT_FROM_EMAIL = values.Value('mdid3 <noreply@testmdid3.local>')
EMAIL_HOST = values.Value('smtp.sendgrid.com')
EMAIL_HOST_PASSWORD = values.SecretValue(environ_prefix="", environ_name="SENDGRID_PASSWORD")
EMAIL_HOST_USER = values.SecretValue(environ_prefix="", environ_name="SENDGRID_USERNAME")
EMAIL_PORT = values.IntegerValue(587, environ_prefix="", environ_name="EMAIL_PORT")
EMAIL_SUBJECT_PREFIX = values.Value('[mdid3] ', environ_name="EMAIL_SUBJECT_PREFIX")
EMAIL_USE_TLS = True
SERVER_EMAIL = EMAIL_HOST_USER
# END EMAIL
# TEMPLATE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-dirs
TEMPLATE_LOADERS = (
('django.template.loaders.cached.Loader', (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)),
)
# END TEMPLATE CONFIGURATION
# CACHING
# Only do this here because thanks to django-pylibmc-sasl and pylibmc
# memcacheify is painful to install on windows.
try:
# See: https://github.com/rdegges/django-heroku-memcacheify
from memcacheify import memcacheify
CACHES = memcacheify()
except ImportError:
CACHES = values.CacheURLValue(default="memcached://127.0.0.1:11211")
# END CACHING
# Your production stuff: Below this line define 3rd party library settings
| {
"content_hash": "cbb6cc62c05dd42a85ade18346aca348",
"timestamp": "",
"source": "github",
"line_count": 131,
"max_line_length": 97,
"avg_line_length": 35.94656488549618,
"alnum_prop": 0.6973879804629433,
"repo_name": "hanleybrand/mdid3-core-a-goof-around",
"id": "1fde89385f421d710cccbcb69801a5731793e883",
"size": "4733",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mdid3/config/production.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "1212"
},
{
"name": "HTML",
"bytes": "23210"
},
{
"name": "JavaScript",
"bytes": "2387"
},
{
"name": "Python",
"bytes": "64001"
}
],
"symlink_target": ""
} |
import arrow
from flask import (
render_template, current_app,
url_for
)
from feedback.database import (
Column, db, Model
)
from feedback.utils import send_email
class Monthly(Model):
''' The monthly report model - this only contains
one field: a string of e-mails separated by commas
if necessary.
'''
__tablename__ = 'monthly-report'
id = Column(db.Integer, primary_key=True, index=True)
email_list = Column(db.String(200), nullable=True)
def __repr__(self):
return '<Monthly(id:{0}, emails:{1})>'.format(
self.id,
self.email_list)
def send_report(self):
''' From an instance of the Monthly model, send
out an e-mail saying that this months monthly
report is ready. This gets pinged from a server
task every month through Heroku. In theory.
'''
if self.email_list is None:
subj = 'Permitting Inspection Center Monthly Status Report'
current_app.logger.info(
'NO-EMAIL-ADDRESS | Subject: {}'.format(subj))
else:
subj = 'Permitting Inspection Center Monthly Status Report - {}'
from_email = current_app.config.get('ADMIN_EMAIL')
last_month = arrow.utcnow().replace(months=-1)
date_start, date_end = last_month.span('month')
date_header = date_start.format('MMMM, YYYY')
year = last_month.format('YYYY')
month = last_month.format('MM')
report = url_for(
'reports.overview', _external=True,
year=year, month=month)
send_email(
subj.format(date_header),
from_email,
self.email_list,
render_template('email/monthly_notification.txt',
date_header=date_header,
report=report),
render_template('email/monthly_notification.html',
date_header=date_header,
report=report))
| {
"content_hash": "d0c32c60f3fd187db2e6dde17145e4e0",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 76,
"avg_line_length": 34.377049180327866,
"alnum_prop": 0.5560324272770625,
"repo_name": "codeforamerica/mdc-feedback",
"id": "3867a37715c800813fbd7e99faea3a24d0a953a7",
"size": "2121",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "feedback/reports/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "43919"
},
{
"name": "HTML",
"bytes": "86259"
},
{
"name": "JavaScript",
"bytes": "403140"
},
{
"name": "Makefile",
"bytes": "874"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "98973"
},
{
"name": "Ruby",
"bytes": "930"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('mig_main', '0002_initial_split'),
('electees', '0003_electeeinterviewsurvey_instructions'),
]
operations = [
migrations.CreateModel(
name='ElecteeProcessVisibility',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('followups_visible', models.BooleanField(default=False)),
('term', models.ForeignKey(to='mig_main.AcademicTerm', unique=True)),
],
options={
},
bases=(models.Model,),
),
]
| {
"content_hash": "c9fd7126ccad779c6de59480a754f232",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 114,
"avg_line_length": 30.36,
"alnum_prop": 0.5757575757575758,
"repo_name": "tbpmig/mig-website",
"id": "e3e22dde9299c29b8a8381fd7baba933fdc4ce1d",
"size": "783",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "electees/migrations/0004_electeeprocessvisibility.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "8576"
},
{
"name": "HTML",
"bytes": "760931"
},
{
"name": "JavaScript",
"bytes": "64350"
},
{
"name": "Less",
"bytes": "2022"
},
{
"name": "Python",
"bytes": "1637977"
},
{
"name": "TeX",
"bytes": "5289"
}
],
"symlink_target": ""
} |
from util.tipo import tipo
class S_SKILL_CATEGORY(object):
def __init__(self, tracker, time, direction, opcode, data):
print(str(type(self)).split('.')[3]+'('+str(len(data))+'): '+ str(data.get_array_hex(1))[1:-1])
| {
"content_hash": "4dcfe3c0cdbba579e5d99b7d02af351c",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 103,
"avg_line_length": 45.6,
"alnum_prop": 0.618421052631579,
"repo_name": "jeff-alves/Tera",
"id": "078f0edfc572f2d9471fbd0112bdfa5c1a483dae",
"size": "228",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "game/message/unused/S_SKILL_CATEGORY.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "113659"
}
],
"symlink_target": ""
} |
from __future__ import with_statement
from contextlib import contextmanager
import os
import sys
import datetime
import imp
import copy
import fnmatch
protocol = imp.load_source('protocol', '../protocol.py')
nxDSCLog = imp.load_source('nxDSCLog', '../nxDSCLog.py')
helperlib = imp.load_source('helperlib', '../helperlib.py')
LG = nxDSCLog.DSCLog
# [ClassVersion("1.0.0"), FriendlyName("nxUser"),SupportsInventory()]
# class MSFT_nxUserResource : OMI_BaseResource
# {
# [Key, InventoryFilter] string UserName;
# [write,ValueMap{"Present", "Absent"},Values{"Present", "Absent"}] string Ensure;
# [write, InventoryFilter] string FullName;
# [write, InventoryFilter] string Description;
# [write] string Password;
# [write] boolean Disabled;
# [write] boolean PasswordChangeRequired;
# [write] string HomeDirectory;
# [write] string GroupID;
# [read] string UserID;
# };
global show_mof
show_mof = False
def init_vars(UserName, Ensure, FullName, Description, Password, Disabled, PasswordChangeRequired, HomeDirectory, GroupID):
if UserName is not None:
UserName = UserName.encode('ascii', 'ignore')
else:
UserName = ''
if Ensure is not None and Ensure != '':
Ensure = Ensure.encode('ascii', 'ignore').lower()
else:
Ensure = 'present'
if FullName is not None:
FullName = FullName.encode('ascii', 'ignore')
else:
FullName = ''
if Description is not None:
Description = Description.encode('ascii', 'ignore')
else:
Description = ''
if Password is not None:
Password = Password.encode('ascii', 'ignore')
else:
Password = ''
if Disabled is None:
Disabled = False
Disabled = ( Disabled == True ) # this arrives as a 0 or 1
if PasswordChangeRequired is None:
PasswordChangeRequired = False
PasswordChangeRequired = ( PasswordChangeRequired == True ) # this arrives as a 0 or 1
if HomeDirectory is not None:
HomeDirectory = HomeDirectory.encode('ascii', 'ignore')
else:
HomeDirectory = ''
if GroupID is not None:
GroupID = GroupID.encode('ascii', 'ignore')
else:
GroupID = ''
return UserName, Ensure, FullName, Description, Password, Disabled, PasswordChangeRequired, HomeDirectory, GroupID
def Set_Marshall(UserName, Ensure, FullName, Description, Password, Disabled, PasswordChangeRequired, HomeDirectory, GroupID):
if helperlib.CONFIG_SYSCONFDIR_DSC == "omsconfig":
return [-1]
(UserName, Ensure, FullName, Description, Password, Disabled, PasswordChangeRequired, HomeDirectory, GroupID) = \
init_vars(UserName, Ensure, FullName, Description, Password,
Disabled, PasswordChangeRequired, HomeDirectory, GroupID)
retval = Set(UserName, Ensure, FullName, Description, Password,
Disabled, PasswordChangeRequired, HomeDirectory, GroupID)
return retval
def Test_Marshall(UserName, Ensure, FullName, Description, Password, Disabled, PasswordChangeRequired, HomeDirectory, GroupID):
if helperlib.CONFIG_SYSCONFDIR_DSC == "omsconfig":
return [-1]
(UserName, Ensure, FullName, Description, Password, Disabled, PasswordChangeRequired, HomeDirectory, GroupID) = \
init_vars(UserName, Ensure, FullName, Description, Password,
Disabled, PasswordChangeRequired, HomeDirectory, GroupID)
retval = Test(UserName, Ensure, FullName, Description, Password,
Disabled, PasswordChangeRequired, HomeDirectory, GroupID)
return retval
def Get_Marshall(UserName, Ensure, FullName, Description, Password, Disabled, PasswordChangeRequired, HomeDirectory, GroupID):
if helperlib.CONFIG_SYSCONFDIR_DSC == "omsconfig":
return [-1]
arg_names = list(locals().keys())
(UserName, Ensure, FullName, Description, Password, Disabled, PasswordChangeRequired, HomeDirectory, GroupID) = \
init_vars(UserName, Ensure, FullName, Description, Password,
Disabled, PasswordChangeRequired, HomeDirectory, GroupID)
retval = 0
(retval, UserName, Ensure, FullName, Description, Password, Disabled, PasswordChangeRequired, HomeDirectory, GroupID, UserID) = Get(
UserName, Ensure, FullName, Description, Password, Disabled, PasswordChangeRequired, HomeDirectory, GroupID)
UserName = protocol.MI_String(UserName)
Ensure = protocol.MI_String(Ensure)
FullName = protocol.MI_String(FullName)
PasswordChangeRequired = protocol.MI_Boolean(PasswordChangeRequired)
Disabled = protocol.MI_Boolean(Disabled)
Description = protocol.MI_String(Description)
Password = protocol.MI_String(Password)
HomeDirectory = protocol.MI_String(HomeDirectory)
GroupID = protocol.MI_String(GroupID)
UserID = protocol.MI_String(UserID)
arg_names.append('UserID')
retd = {}
ld = locals()
for k in arg_names:
retd[k] = ld[k]
return retval, retd
def Inventory_Marshall(UserName, Ensure, FullName, Description, Password, Disabled, PasswordChangeRequired, HomeDirectory, GroupID):
(UserName, Ensure, FullName, Description, Password, Disabled, PasswordChangeRequired, HomeDirectory, GroupID) = \
init_vars(UserName, Ensure, FullName, Description, Password,
Disabled, PasswordChangeRequired, HomeDirectory, GroupID)
(retval, Inventory) = GetInventory(
UserName, Ensure, FullName, Description, Password, Disabled, PasswordChangeRequired, HomeDirectory, GroupID)
for d in Inventory:
d['UserName'] = protocol.MI_String(d['UserName'])
d['Ensure'] = protocol.MI_String('Present')
d['FullName'] = protocol.MI_String(d['FullName'])
d['PasswordChangeRequired'] = protocol.MI_Boolean(d['PasswordChangeRequired'])
d['Disabled'] = protocol.MI_Boolean(d['Disabled'])
d['Description'] = protocol.MI_String(d['Description'])
d['Password'] = protocol.MI_String(d['Password'])
d['HomeDirectory'] = protocol.MI_String(d['HomeDirectory'])
d['GroupID'] = protocol.MI_String(d['GroupID'])
d['UserID'] = protocol.MI_String(d['UserID'])
d = protocol.MI_Instance(d)
Inventory = protocol.MI_InstanceA(Inventory)
retd = {}
retd["__Inventory"] = Inventory
return retval, retd
############################################################
# Begin user defined DSC functions
############################################################
def SetShowMof(a):
global show_mof
show_mof = a
def ShowMof(op, UserName, Ensure, FullName, Description, Password, Disabled, PasswordChangeRequired, HomeDirectory, GroupID):
if not show_mof:
return
mof = ''
mof += op + ' nxUser MyUser \n'
mof += '{\n'
mof += ' UserName = "' + UserName + '"\n'
mof += ' Ensure = "' + Ensure + '"\n'
mof += ' FullName = "' + FullName + '"\n'
mof += ' Description = "' + Description + '"\n'
mof += ' Password = "' + Password + '"\n'
mof += ' Disabled = ' + str(Disabled) + '\n'
mof += ' PasswordChangeRequired = ' + str(PasswordChangeRequired) + '\n'
mof += ' HomeDirectory = "' + HomeDirectory + '"\n'
mof += ' GroupID = "' + str(GroupID) + '"\n'
mof += '}\n'
f = open('./test_mofs.log', 'a')
Print(mof, file=f)
LG().Log('INFO', mof)
f.close()
def Print(s, file=sys.stdout):
file.write(s + '\n')
@contextmanager
def opened_w_error(filename, mode="r"):
"""
This context ensures the file is closed.
"""
try:
f = open(filename, mode=mode)
except IOError, err:
yield None, err
else:
try:
yield f, None
finally:
f.close()
userdel_path = "/usr/sbin/userdel"
useradd_path = "/usr/sbin/useradd"
usermod_path = "/usr/sbin/usermod"
chage_path = "/usr/bin/chage"
def ReadPasswd(filename):
with opened_w_error(filename, 'rb') as (f, error):
if error:
Print("Exception opening file " + filename + " Error Code: " +
str(error.errno) + " Error: " + error.message + error.strerror, file=sys.stderr)
LG().Log('ERROR', "Exception opening file " + filename + " Error Code: " +
str(error.errno) + " Error: " + error.message + error.strerror)
return None
else:
lines = f.read().split("\n")
entries = dict()
for line in lines:
tokens = line.split(":")
if len(tokens) > 1:
entries[tokens[0]] = tokens[1:]
return entries
def PasswordExpired(shadow_entry):
# No entries for the "last" field means Password is Expired.
if shadow_entry[1] == "":
return True
# Passwords must be changed if their "last" day is 0
if shadow_entry[1] == "0":
return True
# "99999" means "never expire"
if shadow_entry[3] == "99999" or shadow_entry[3] == "" :
return False
day_0 = datetime.datetime.utcfromtimestamp(0)
day_now = datetime.datetime.today()
days_since_day_0 = (day_now - day_0).days
days_since_last_password_change = days_since_day_0 - int(shadow_entry[1])
number_of_days_password_is_valid_for = int(shadow_entry[3])
if days_since_last_password_change > number_of_days_password_is_valid_for:
return True
return False
def Set(UserName, Ensure, FullName, Description, Password, Disabled, PasswordChangeRequired, HomeDirectory, GroupID):
ShowMof('SET', UserName, Ensure, FullName, Description, Password,
Disabled, PasswordChangeRequired, HomeDirectory, GroupID)
passwd_entries = None
shadow_entries = None
passwd_entries = ReadPasswd("/etc/passwd")
if passwd_entries is None:
return [-1]
shadow_entries = ReadPasswd("/etc/shadow")
if shadow_entries is None:
return [-1]
old_passwd_entries = passwd_entries
usermod_string = ""
usermodonly_string = ""
if Ensure == "absent":
exit_code = os.system(userdel_path + " " + UserName)
else:
usermod_string = ""
if FullName or Description:
usermod_string += " -c \""
if FullName:
usermod_string += FullName
if Description:
usermod_string += "," + Description
usermod_string += "\""
if HomeDirectory:
usermod_string += " -d \"" + HomeDirectory + "\" -m "
if GroupID:
usermod_string += " -g " + GroupID
if UserName not in passwd_entries:
exit_code = os.system(
useradd_path + " " + usermod_string + " " + UserName)
if exit_code is not 0:
return [exit_code]
if len(usermodonly_string) > 0:
exit_code = os.system(
usermod_path + " " + usermodonly_string + " " + UserName)
else:
Print(usermod_string, file=sys.stderr)
LG().Log('INFO', usermod_string)
if len(usermodonly_string + usermod_string) > 0:
exit_code = os.system(
usermod_path + " " + usermodonly_string + usermod_string + " " + UserName)
disabled_user_string = ""
usermod_string = ""
if Disabled is True:
disabled_user_string = "!"
if len(Password) > 0:
usermod_string += " -p \"" + disabled_user_string + \
Password.replace("$", "\$") + "\""
elif Disabled is True:
usermodonly_string += " -L"
elif Disabled is False:
passwd_entries = ReadPasswd("/etc/passwd")
if passwd_entries is None:
return [-1]
shadow_entries = ReadPasswd("/etc/shadow")
if shadow_entries is None:
return [-1]
if UserName in shadow_entries:
cur_pass = shadow_entries[UserName][0]
if cur_pass == "!!":
Print("Unable to unlock user: " + UserName +
". Password is not set.", file=sys.stderr)
LG().Log('ERROR', "Unable to unlock user: " +
UserName + ". Password is not set.")
return [-1]
elif cur_pass[0] == '!':
if len(cur_pass) > 1:
usermodonly_string += " -U"
else:
Print("Unable to unlock user: " + UserName +
". Doing so would result in a passwordless account.", file=sys.stderr)
LG().Log('ERROR', "Unable to unlock user: " + UserName +
". Doing so would result in a passwordless account.")
return [-1]
Print(usermod_string, file=sys.stderr)
LG().Log('INFO', usermod_string)
if len(usermodonly_string + usermod_string) > 0:
exit_code = os.system(
usermod_path + " " + usermodonly_string + usermod_string + " " + UserName)
# force password change only if we created the account
if PasswordChangeRequired is True and UserName not in old_passwd_entries:
exit_code = os.system(chage_path + " -d 0 " + UserName)
return [exit_code]
def Test(UserName, Ensure, FullName, Description, Password, Disabled, PasswordChangeRequired, HomeDirectory, GroupID):
ShowMof('TEST', UserName, Ensure, FullName, Description, Password,
Disabled, PasswordChangeRequired, HomeDirectory, GroupID)
passwd_entries = None
shadow_entries = None
passwd_entries = ReadPasswd("/etc/passwd")
if passwd_entries is None:
return [-1]
shadow_entries = ReadPasswd("/etc/shadow")
if shadow_entries is None:
return [-1]
if not Ensure:
Ensure = "present"
if Ensure == "absent":
if UserName not in passwd_entries:
return [0]
else:
Print(UserName + " in passwd_entries", file=sys.stderr)
LG().Log('ERROR', UserName + " in passwd_entries")
return [-1]
elif Ensure == "present":
if UserName not in passwd_entries:
Print(UserName + " not in passwd_entries", file=sys.stderr)
LG().Log('ERROR', UserName + " not in passwd_entries")
return [-1]
if UserName not in shadow_entries:
Print(UserName + " not in shadow_entries", file=sys.stderr)
LG().Log('ERROR', UserName + " not in shadow_entries")
return [-1]
if len(passwd_entries[UserName]) < 6:
Print("Unable to read /etc/passwd entry for username: " +
UserName, file=sys.stderr)
LG().Log(
'ERROR', "Unable to read /etc/passwd entry for username: " + UserName)
return [-1]
if len(shadow_entries[UserName]) < 8:
Print("Unable to read /etc/shadow entry for username: " +
UserName, file=sys.stderr)
LG().Log(
'ERROR', "Unable to read /etc/shadow entry for username: " + UserName)
return [-1]
extra_fields = passwd_entries[UserName][3].split(",")
if FullName and extra_fields[0] != FullName:
Print("Incorrect full name (" + extra_fields[
0] + "), should be: " + FullName + ", for username: " + UserName, file=sys.stderr)
LG().Log('ERROR', "Incorrect full name (" +
extra_fields[0] + "), should be: " + FullName + ", for username: " + UserName)
return [-1]
if Description:
if len(extra_fields) < 2:
Print("There is no description.", file=sys.stderr)
LG().Log('ERROR', "There is no description.")
return [-1]
elif extra_fields[1] != Description:
Print(
"Incorrect description for username: " + UserName, file=sys.stderr)
LG().Log(
'ERROR', "Incorrect description for username: " + UserName)
return [-1]
if HomeDirectory and passwd_entries[UserName][4] != HomeDirectory:
Print("Home directories do not match", file=sys.stderr)
LG().Log('ERROR', "Home directories do not match")
return [-1]
if GroupID and passwd_entries[UserName][2] != GroupID:
Print("GroupID does not match", file=sys.stderr)
LG().Log('ERROR', "GroupID does not match")
return [-1]
if len(Password) > 0:
read_password = shadow_entries[UserName][0]
if len(read_password) is 0:
Print("Password does not match", file=sys.stderr)
LG().Log('ERROR', "Password does not match")
return [-1]
if read_password[0] == "!":
read_password = read_password[1:]
if read_password != Password:
Print("Password does not match", file=sys.stderr)
LG().Log('ERROR', "Password does not match")
return [-1]
if PasswordChangeRequired is True and not PasswordExpired(shadow_entries[UserName]):
Print(
"PasswordChangeRequired is True and the password is not expired.", file=sys.stderr)
LG().Log(
'ERROR', "PasswordChangeRequired is True and the password is not expired.")
return [-1]
elif PasswordChangeRequired is False and PasswordExpired(shadow_entries[UserName]):
Print(
"PasswordChangeRequired is False and the password is expired.", file=sys.stderr)
LG().Log(
'ERROR', "PasswordChangeRequired is False and the password is expired.")
return [-1]
if Disabled is True and shadow_entries[UserName][0][0] != "!":
Print("Account not disabled", file=sys.stderr)
LG().Log('ERROR', "Account not disabled")
return [-1]
if Disabled is False and shadow_entries[UserName][0][0] == "!":
Print("Account disabled", file=sys.stderr)
LG().Log('ERROR', "Account disabled")
return [-1]
return [0]
def Get(UserName, Ensure, FullName, Description, Password, Disabled, PasswordChangeRequired, HomeDirectory, GroupID):
ShowMof('GET', UserName, Ensure, FullName, Description, Password,
Disabled, PasswordChangeRequired, HomeDirectory, GroupID)
UserID = ''
passwd_entries = None
shadow_entries = None
passwd_entries = ReadPasswd("/etc/passwd")
if passwd_entries is None:
return [-1, UserName, Ensure, FullName, Description, Password, Disabled, PasswordChangeRequired, HomeDirectory, GroupID, UserID]
shadow_entries = ReadPasswd("/etc/shadow")
if shadow_entries is None:
return [-1, UserName, Ensure, FullName, Description, Password, Disabled, PasswordChangeRequired, HomeDirectory, GroupID, UserID]
exit_code = 0
if UserName not in passwd_entries:
FullName = Description = Password = HomeDirectory = GroupID = ""
return [exit_code, UserName, Ensure, FullName, Description, Password, Disabled, PasswordChangeRequired, HomeDirectory, GroupID, UserID]
extra_fields = passwd_entries[UserName][3].split(",")
FullName = extra_fields[0]
if len(extra_fields) > 1:
Description = extra_fields[1]
HomeDirectory = passwd_entries[UserName][4]
UserID = passwd_entries[UserName][1]
GroupID = passwd_entries[UserName][2]
Password = shadow_entries[UserName][0]
Disabled = False
if len(Password) > 0:
if Password[0] == "!":
Disabled = True
Password = '' # not showing the password.
if PasswordExpired(shadow_entries[UserName]):
PasswordChangeRequired = True
else:
PasswordChangeRequired = False
return [exit_code, UserName, Ensure, FullName, Description, Password, Disabled, PasswordChangeRequired, HomeDirectory, GroupID, UserID]
def GetInventory(UserName, Ensure, FullName, Description, Password, Disabled, PasswordChangeRequired, HomeDirectory, GroupID):
Inventory=[]
passwd_entries = None
passwd_entries = ReadPasswd("/etc/passwd")
if passwd_entries is None:
return [-1, Inventory]
exit_code = 0
d={}
for Uname in passwd_entries.keys():
if len(UserName) and not fnmatch.fnmatch(Uname,UserName):
continue
d['UserName'] = Uname
extra_fields = passwd_entries[Uname][3].split(",")
d['FullName'] = extra_fields[0]
if len(FullName) and not fnmatch.fnmatch(d['FullName'],FullName):
continue
d['Description'] = ''
if len(extra_fields) > 1:
d['Description'] = extra_fields[1]
if len(Description) and not fnmatch.fnmatch(d['Description'],Description):
continue
d['HomeDirectory'] = passwd_entries[Uname][4]
d['UserID'] = passwd_entries[Uname][1]
d['GroupID'] = passwd_entries[Uname][2]
d['Disabled'] = False
d['Password'] = '' # not showing the password.
d['PasswordChangeRequired'] = False
Inventory.append(copy.deepcopy(d))
return [exit_code, Inventory]
| {
"content_hash": "cee745ba2010291bfcb29f8825d32fd5",
"timestamp": "",
"source": "github",
"line_count": 526,
"max_line_length": 143,
"avg_line_length": 40.46958174904943,
"alnum_prop": 0.598205477521492,
"repo_name": "MSFTOSSMgmt/WPSDSCLinux",
"id": "accc0687dabd258abf142edaaea9f0c768a7c1d2",
"size": "21488",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "Providers/Scripts/2.6x-2.7x/Scripts/nxUser.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "5870322"
},
{
"name": "C#",
"bytes": "98943"
},
{
"name": "C++",
"bytes": "670183"
},
{
"name": "CMake",
"bytes": "13826"
},
{
"name": "HTML",
"bytes": "166861"
},
{
"name": "Makefile",
"bytes": "164013"
},
{
"name": "Objective-C",
"bytes": "61644"
},
{
"name": "PowerShell",
"bytes": "40239"
},
{
"name": "Python",
"bytes": "1858427"
},
{
"name": "Shell",
"bytes": "8136"
},
{
"name": "SourcePawn",
"bytes": "60242"
},
{
"name": "Yacc",
"bytes": "35814"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
import contextlib
import functools
import json
import logging
import traceback
from django.conf import settings
from django.conf.urls import url
from django.core.exceptions import ObjectDoesNotExist, ValidationError
from django.http import HttpResponse, Http404
from django.views.generic import View
from django.views.decorators.csrf import csrf_exempt
from .exceptions import ErrorResponse, AuthenticationFailed, SerializationError
from .http import Response
from .jsonapi import TopLevel, Included
logger = logging.getLogger(__name__)
class EndpointSet(View):
@classmethod
def as_view(cls, **initkwargs):
view_mapping_kwargs = initkwargs.pop("view_mapping_kwargs", {})
view = super(EndpointSet, cls).as_view(**initkwargs)
def view(request, *args, **kwargs):
self = cls(**initkwargs)
mapping = cls.view_mapping(**view_mapping_kwargs)
for verb, method in mapping.items():
if hasattr(self, method):
setattr(self, verb, getattr(self, method))
self.requested_method = mapping.get(request.method.lower())
self.args = args
self.kwargs = kwargs
self.request = request
return self.dispatch(request, *args, **kwargs)
functools.update_wrapper(view, cls, updated=())
functools.update_wrapper(view, cls.dispatch, assigned=())
return csrf_exempt(view)
def dispatch(self, request, *args, **kwargs):
try:
if request.method.lower() in self.http_method_names:
endpoint = getattr(self, request.method.lower(), self.http_method_not_allowed)
else:
endpoint = self.http_method_not_allowed
self.check_authentication(endpoint)
self.prepare()
self.check_permissions(endpoint)
response = endpoint(request, *args, **kwargs)
if not isinstance(response, HttpResponse):
raise ValueError("view did not return an HttpResponse (got: {})".format(type(response)))
except Exception as exc:
response = self.handle_exception(exc)
return response
@property
def debug(self):
return settings.DEBUG or getattr(settings, "PINAX_API_DEBUG", False)
def handle_exception(self, exc):
if isinstance(exc, ErrorResponse):
return exc.response
elif isinstance(exc, Http404):
return self.render_error(exc.args[0], status=404)
else:
logger.error("{}: {}".format(exc.__class__.__name__, str(exc)), exc_info=True)
if self.debug:
return self.render_error(
traceback.format_exc().splitlines()[-1],
status=500
)
else:
return self.render_error("unknown server error", status=500)
def prepare(self):
pass
def check_authentication(self, endpoint):
user = None
backends = []
backends.extend(getattr(endpoint, "authentication", []))
backends.extend(getattr(self, "middleware", {}).get("authentication", []))
for backend in backends:
try:
user = backend.authenticate(self.request)
except AuthenticationFailed as exc:
raise ErrorResponse(**self.error_response_kwargs(str(exc), status=401))
if user:
self.request.user = user
break
else:
if not self.request.user.is_authenticated():
raise ErrorResponse(**self.error_response_kwargs("Authentication Required.", status=401))
def check_permissions(self, endpoint):
perms = []
perms.extend(getattr(endpoint, "permissions", []))
perms.extend(getattr(self, "middleware", {}).get("permissions", []))
for perm in perms:
res = perm(self.request, view=self)
if res is None:
continue
if isinstance(res, tuple):
ok, status, msg = res
else:
ok, status, msg = res, 403, "Permission Denied."
if not ok:
raise ErrorResponse(**self.error_response_kwargs(msg, status=status))
def parse_data(self):
# @@@ this method is not the most ideal implementation generally, but
# until a better design comes along, we roll with it!
try:
return json.loads(self.request.body.decode(settings.DEFAULT_CHARSET))
except json.JSONDecodeError as e:
raise ErrorResponse(**self.error_response_kwargs(str(e), title="Invalid JSON", status=400))
@contextlib.contextmanager
def validate(self, resource_class, collection=False, obj=None):
"""
Generator yields either a validated resource (collection=False)
or a resource generator callable (collection=True).
ValidationError exceptions resulting from subsequent (after yield)
resource manipulation cause an immediate ErrorResponse.
"""
data = self.parse_data()
if "data" not in data:
raise ErrorResponse(**self.error_response_kwargs('Missing "data" key in payload.', status=400))
if collection and not isinstance(data["data"], list):
raise ErrorResponse(**self.error_response_kwargs("Data must be in a list.", status=400))
try:
if collection:
yield (self.validate_resource(resource_class, resource_data, obj) for resource_data in data["data"])
else:
yield self.validate_resource(resource_class, data["data"], obj)
except ValidationError as exc:
raise ErrorResponse(
TopLevel.from_validation_error(exc, resource_class).serializable(),
status=400,
)
def validate_resource(self, resource_class, resource_data, obj=None):
"""
Validates resource data for a resource class.
"""
if "attributes" not in resource_data:
raise ErrorResponse(**self.error_response_kwargs('Missing "attributes" key in data.', status=400))
resource = resource_class()
resource.populate(resource_data, obj=obj)
return resource
def render(self, resource, **kwargs):
try:
payload = self.create_top_level(resource, **kwargs).serializable(request=self.request)
except SerializationError as exc:
return self.render_error(str(exc), status=400)
else:
return Response(payload, status=200)
def render_create(self, resource, **kwargs):
try:
payload = self.create_top_level(resource, **kwargs).serializable(request=self.request)
except SerializationError as exc:
return self.render_error(str(exc), status=400)
else:
res = Response(payload, status=201)
res["Location"] = resource.get_self_link(request=self.request)
return res
def render_delete(self):
return Response({}, status=204)
def error_response_kwargs(self, message, title=None, status=400, extra=None):
if extra is None:
extra = {}
err = dict(extra)
err.update({
"status": str(status),
"detail": message,
})
if title is not None:
err["title"] = title
return {
"data": TopLevel(errors=[err]).serializable(),
"status": status,
}
def render_error(self, *args, **kwargs):
return Response(**self.error_response_kwargs(*args, **kwargs))
def get_object_or_404(self, qs, **kwargs):
try:
return qs.get(**kwargs)
except ObjectDoesNotExist:
raise Http404("{} does not exist.".format(qs.model._meta.verbose_name.capitalize()))
def create_top_level(self, resource, linkage=False, **kwargs):
kwargs.update(
{
"data": resource,
"links": True,
"linkage": linkage,
}
)
if "include" in self.request.GET:
kwargs["included"] = Included(self.request.GET["include"].split(","))
return TopLevel(**kwargs)
class ResourceEndpointSet(EndpointSet):
parent = None
@classmethod
def view_mapping(cls, collection):
if collection:
mapping = {
"get": "list",
"post": "create",
}
else:
mapping = {
"get": "retrieve",
"patch": "update",
"delete": "destroy",
}
return mapping
@classmethod
def as_urls(cls):
urls = [
url(
r"^{}$".format(cls.url.collection_regex()),
cls.as_view(view_mapping_kwargs=dict(collection=True)),
name="{}-list".format(cls.url.base_name)
),
url(
r"^{}$".format(cls.url.detail_regex()),
cls.as_view(view_mapping_kwargs=dict(collection=False)),
name="{}-detail".format(cls.url.base_name)
)
]
for related_name, endpointset in cls.relationships.items():
urls.extend(endpointset.as_urls(cls.url, related_name))
return urls
class RelationshipEndpointSet(EndpointSet):
@classmethod
def view_mapping(cls):
return {
"get": "retrieve",
"post": "create",
"patch": "update",
"delete": "destroy",
}
@classmethod
def as_urls(cls, base_url, related_name):
urls = [
url(
r"^{}/relationships/{}$".format(
base_url.detail_regex(),
related_name,
),
cls.as_view(),
name="-".join([base_url.base_name, related_name, "relationship", "detail"])
),
]
return urls
def create_top_level(self, *args, **kwargs):
kwargs["linkage"] = True
return super(RelationshipEndpointSet, self).create_top_level(*args, **kwargs)
| {
"content_hash": "cc4f7175d66916d21425bc3821563a9b",
"timestamp": "",
"source": "github",
"line_count": 285,
"max_line_length": 116,
"avg_line_length": 35.8280701754386,
"alnum_prop": 0.5756537067867985,
"repo_name": "pinax/pinax-api",
"id": "5bb51aede4e86749912aa2a32dc885efaef40bd4",
"size": "10211",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pinax/api/endpoints.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "167"
},
{
"name": "Python",
"bytes": "101872"
}
],
"symlink_target": ""
} |
from django.utils import translation
from parler.models import TranslationDoesNotExist
from .testapp.models import AnyLanguageModel, EmptyModel, NotRequiredModel, SimpleModel
from .utils import AppTestCase
class ModelAttributeTests(AppTestCase):
"""
Test model construction
"""
def test_untranslated_get(self):
"""
Test the metaclass of the model.
"""
try:
value = SimpleModel().tr_title
except Exception as e:
self.assertIsInstance(e, TranslationDoesNotExist)
self.assertIsInstance(e, AttributeError)
else:
self.fail(f"Expected exception from reading untranslated title, got {repr(value)}.")
# Raising attribute error gives some additional benefits:
self.assertEqual(getattr(SimpleModel(), "tr_title", "FOO"), "FOO")
self.assertFalse(hasattr(SimpleModel(), "tr_title"))
def test_default_language(self):
"""
Test whether simple language assignments work.
"""
with translation.override("ca-fr"):
x = SimpleModel(
id=99
) # uses get_language(), ID is to avoid reading cached items for 'en'
self.assertEqual(x.get_current_language(), translation.get_language())
self.assertEqual(translation.get_language(), "ca-fr")
x.shared = "SHARED"
x.tr_title = "TRANS_CA"
x.save()
# Refetch
with translation.override("en"):
x = SimpleModel.objects.get(pk=x.pk)
self.assertRaises(TranslationDoesNotExist, lambda: x.tr_title)
# Switch context
x.set_current_language("ca-fr")
self.assertEqual(x.tr_title, "TRANS_CA")
def test_get_language(self):
"""
See how ``.language().get()`` works.
"""
with translation.override("fr"):
# Despite being
# Initialize form in other language.
x = SimpleModel(shared="SHARED", tr_title="TRANS", _current_language="nl")
self.assertEqual(x.get_current_language(), "nl")
x.save()
x2 = SimpleModel.objects.language("nl").get(pk=x.pk)
self.assertEqual(x2.get_current_language(), "nl")
self.assertEqual(x2.shared, "SHARED")
self.assertEqual(x2.tr_title, "TRANS")
def test_init_args(self):
"""
Test whether passing translated attributes to __init__() works.
"""
x = SimpleModel(tr_title="TRANS_TITLE")
self.assertEqual(x.tr_title, "TRANS_TITLE")
y = SimpleModel(tr_title="TRANS_TITLE", _current_language="nl")
self.assertEqual(y.get_current_language(), "nl")
self.assertEqual(y.tr_title, "TRANS_TITLE")
def test_create_args(self):
y = SimpleModel.objects.language("nl").create(tr_title="TRANS_TITLE")
self.assertEqual(y.get_current_language(), "nl")
self.assertEqual(y.tr_title, "TRANS_TITLE")
def test_save_multiple(self):
"""
Test the save_translations() function to store multiple languages.
"""
x = SimpleModel()
x.set_current_language("en")
x.tr_title = "TITLE_EN"
x.set_current_language("fr")
x.tr_title = "TITLE_FR"
x.set_current_language("es")
x.tr_title = "TITLE_ES"
x.set_current_language("nl")
x.tr_title = "TITLE_NL"
x.save()
# Check if all translations are saved.
self.assertEqual(
sorted(x.translations.values_list("tr_title", flat=True)),
["TITLE_EN", "TITLE_ES", "TITLE_FR", "TITLE_NL"],
)
self.assertEqual(sorted(x.get_available_languages()), ["en", "es", "fr", "nl"])
self.assertTrue(x.has_translation("en"))
self.assertTrue(x.has_translation("es"))
self.assertFalse(x.has_translation("fi"))
# Update 2 translations.
# Only those should be updated in the database.
x.set_current_language("es")
x.tr_title = "TITLE_ES2"
x.set_current_language("nl")
x.tr_title = "TITLE_NL2"
self.assertNumQueries(2, x.save_translations())
# Even unmodified language is automatically saved.
x.set_current_language("it", initialize=True)
self.assertTrue(x.has_translation("it")) # does return true for this object.
self.assertNumQueries(1, lambda: x.save_translations())
self.assertEqual(sorted(x.get_available_languages()), ["en", "es", "fr", "it", "nl"])
def test_empty_model(self):
"""
Test whether a translated model without any fields still works.
"""
x = EmptyModel()
x.set_current_language("en", initialize=True)
x.set_current_language("fr", initialize=True)
x.set_current_language("es")
x.set_current_language("nl", initialize=True)
x.save()
self.assertEqual(sorted(x.get_available_languages()), ["en", "fr", "nl"])
def test_create_translation(self):
x = SimpleModel.objects.create()
x.create_translation("en", tr_title="TITLE_EN")
x.create_translation("fr", tr_title="TITLE_FR")
self.assertEqual(sorted(x.get_available_languages()), ["en", "fr"])
def test_delete_translation(self):
x = SimpleModel.objects.create(pk=1000)
x.create_translation("en", tr_title="TITLE_EN")
x.create_translation("fr", tr_title="TITLE_FR")
self.assertEqual(sorted(x.get_available_languages()), ["en", "fr"])
num_deleted = x.delete_translation("fr")
self.assertEqual(num_deleted, 1)
self.assertEqual(sorted(x.get_available_languages()), ["en"])
def test_delete_return(self):
x = SimpleModel.objects.create()
x.create_translation("en", tr_title="TITLE_EN")
expect = (2, {"testapp.SimpleModelTranslation": 1, "testapp.SimpleModel": 1})
self.assertEqual(x.delete(), expect)
def test_fallback_language(self):
"""
Test whether the fallback language will be returned.
"""
x = SimpleModel()
x.set_current_language(self.conf_fallback)
x.tr_title = "TITLE_FALLBACK"
x.set_current_language(self.other_lang1)
x.tr_title = "TITLE_XX"
x.save()
with translation.override(self.other_lang2):
x = SimpleModel.objects.get(pk=x.pk)
self.assertEqual(x.tr_title, "TITLE_FALLBACK")
def test_fallback_variant(self):
"""Test de-us falls back to de"""
x = SimpleModel()
x.set_current_language("de")
x.tr_title = "Hallo-de"
x.set_current_language("en")
x.tr_title = "Hello-en"
x.save()
with translation.override("de-ch"):
x = SimpleModel.objects.get(pk=x.pk)
self.assertEqual(x.tr_title, "Hallo-de")
def test_fallback_language_no_current(self):
"""
Test whether the fallback language will be returned,
even when the current language does not have a translation.
"""
x = SimpleModel()
x.set_current_language(self.conf_fallback)
x.tr_title = "TITLE_FALLBACK"
self.assertEqual(
x.safe_translation_getter("tr_title", language_code=self.other_lang1), "TITLE_FALLBACK"
)
def test_any_fallback_model(self):
"""
Test whether a failure in the fallback language can return any saved language (if configured for it).
"""
x = AnyLanguageModel()
x.set_current_language(self.other_lang1)
x.tr_title = "TITLE_XX"
x.save()
with translation.override(self.other_lang2):
x = AnyLanguageModel.objects.get(pk=x.pk)
self.assertRaises(
TranslationDoesNotExist, lambda: x._get_translated_model(use_fallback=True)
)
self.assertEqual(
x.tr_title, "TITLE_XX"
) # Even though there is no current language, there is a value.
self.assertNumQueries(
0, lambda: x._get_any_translated_model()
) # Can fetch from cache next time.
self.assertEqual(x._get_any_translated_model().language_code, self.other_lang1)
def test_any_fallback_function(self):
x = SimpleModel()
x.set_current_language(self.other_lang1)
x.tr_title = "TITLE_XX"
x.save()
with translation.override(self.other_lang2):
x = SimpleModel.objects.get(pk=x.pk)
self.assertRaises(
TranslationDoesNotExist, lambda: x._get_translated_model(use_fallback=True)
)
self.assertIs(
x.safe_translation_getter("tr_title", "DEFAULT"), "DEFAULT"
) # No lanuage, gives default
self.assertEqual(
x.safe_translation_getter("tr_title", any_language=True), "TITLE_XX"
) # Even though there is no current language, there is a value.
self.assertNumQueries(
0, lambda: x._get_any_translated_model()
) # Can fetch from cache next time.
self.assertEqual(x._get_any_translated_model().language_code, self.other_lang1)
def test_create_without_translation(self):
"""
Test whether master object is created without translation, in case that no translation attribute is set
"""
x = NotRequiredModel()
self.assertNumQueries(1, lambda: x.save()) # only master object created
self.assertEqual(sorted(x.get_available_languages()), [])
def test_create_with_default_attributes(self):
"""
Test whether translation model is created even attribute has default value
"""
x = NotRequiredModel()
x.tr_title = "DEFAULT_TRANS_TITLE"
self.assertNumQueries(2, lambda: x.save()) # master and translation object created
self.assertEqual(sorted(x.get_available_languages()), [self.conf_fallback])
def test_save_ignore_fallback_marker(self):
"""
Test whether the ``save_translations()`` method skips fallback languages
"""
x = SimpleModel()
x.set_current_language(self.other_lang1)
x.tr_title = "TITLE_XX"
x.set_current_language(self.other_lang2)
# try fetching, causing an fallback marker
x.safe_translation_getter("tr_title", any_language=True)
# Now save. This should not raise errors
x.save()
def test_model_with_zero_pk(self):
"""
tests that the translated model is returned also when the pk is 0
"""
x = SimpleModel()
x.set_current_language(self.other_lang1)
x.pk = 0
x.tr_title = "EMPTY_PK"
x.save()
# now fetch it from db
try:
SimpleModel.objects.get(pk=x.pk)
except TranslationDoesNotExist:
self.fail("zero pk is not supported!")
def test_translatedfieldsmodel_str(self):
"""Test converting TranslatedFieldsModel to string"""
missing_language_code = "xx"
obj = SimpleModel.objects.create(tr_title="Something")
# Adjust translation object to use language_code that is not
# configured. It is easier because various Django version behave
# differently if we try to use not configured language.
translation = obj.translations.get()
translation.language_code = missing_language_code
translation.save()
# Try to get str() of the TranslatedFieldsModel instance.
try:
translation_as_str = str(obj.translations.get())
except KeyError:
self.fail("Converting translation to string raises KeyError")
# Check that we get language code as a fallback, when language is
# not configured.
self.assertEqual(translation_as_str, missing_language_code)
def test_get_or_create_no_defaults(self):
y, created = SimpleModel.objects.language("nl").get_or_create(shared="XYZ")
self.assertTrue(created)
self.assertEqual(y.get_current_language(), "nl")
self.assertRaises(TranslationDoesNotExist, lambda: y.tr_title)
def test_get_or_create_defaults(self):
y, created = SimpleModel.objects.language("nl").get_or_create(
shared="XXX", defaults={"tr_title": "TRANS_TITLE"}
)
self.assertTrue(created)
self.assertEqual(y.get_current_language(), "nl")
self.assertEqual(y.tr_title, "TRANS_TITLE")
| {
"content_hash": "b59cbcdeb11d9f40ab6c79f94082ec14",
"timestamp": "",
"source": "github",
"line_count": 342,
"max_line_length": 111,
"avg_line_length": 36.69590643274854,
"alnum_prop": 0.6030278884462151,
"repo_name": "edoburu/django-parler",
"id": "8a42a7251ec4c1bad6174f4145689dbb4537a794",
"size": "12550",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "parler/tests/test_model_attributes.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "5290"
},
{
"name": "HTML",
"bytes": "5896"
},
{
"name": "Python",
"bytes": "237421"
}
],
"symlink_target": ""
} |
from .futures import FutureBase
from . import async_task
from . import _debug
_debug_options = _debug.options
def result(value):
"""An async equivalent for return for async methods: result(x); return."""
assert not isinstance(
value, FutureBase
), "You probably forgot to yield this value before returning"
raise async_task.AsyncTaskResult(value)
| {
"content_hash": "7a168451aa699acd9a7194dbdb68039a",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 78,
"avg_line_length": 26.785714285714285,
"alnum_prop": 0.7173333333333334,
"repo_name": "quora/asynq",
"id": "8ec36fa70e5b0d21204e1e6321c76625e8447aef",
"size": "949",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "asynq/utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Cython",
"bytes": "14533"
},
{
"name": "Python",
"bytes": "215286"
}
],
"symlink_target": ""
} |
"""
Django settings for cacaomovilcom project.
For more information on this file, see
https://docs.djangoproject.com/en/dev/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/dev/ref/settings/
"""
from __future__ import absolute_import, unicode_literals
import environ
ROOT_DIR = environ.Path(__file__) - 3 # (/a/b/myfile.py - 3 = /)
APPS_DIR = ROOT_DIR.path('cacaomovilcom')
env = environ.Env()
# APP CONFIGURATION
# ------------------------------------------------------------------------------
DJANGO_APPS = (
# Default Django apps:
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.flatpages',
# Useful template tags:
# 'django.contrib.humanize',
# Admin
'django.contrib.admin',
)
THIRD_PARTY_APPS = (
'crispy_forms', # Form layouts
'allauth', # registration
'allauth.account', # registration
'allauth.socialaccount', # registration
'ckeditor',
'extended_flatpages',
'easy_thumbnails',
'filer',
'mptt',
)
# Apps specific for this project go here.
LOCAL_APPS = (
'cacaomovilcom.users', # custom users app
# Your stuff: custom apps go here
'cacaomovilcom.portal',
)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#installed-apps
INSTALLED_APPS = DJANGO_APPS + THIRD_PARTY_APPS + LOCAL_APPS
# MIDDLEWARE CONFIGURATION
# ------------------------------------------------------------------------------
MIDDLEWARE_CLASSES = (
# Make sure djangosecure.middleware.SecurityMiddleware is listed first
'django.contrib.flatpages.middleware.FlatpageFallbackMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
# MIGRATIONS CONFIGURATION
# ------------------------------------------------------------------------------
MIGRATION_MODULES = {
'sites': 'cacaomovilcom.contrib.sites.migrations'
}
# DEBUG
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#debug
DEBUG = env.bool('DJANGO_DEBUG', False)
# FIXTURE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-FIXTURE_DIRS
FIXTURE_DIRS = (
str(APPS_DIR.path('fixtures')),
)
# EMAIL CONFIGURATION
# ------------------------------------------------------------------------------
EMAIL_BACKEND = env('DJANGO_EMAIL_BACKEND', default='django.core.mail.backends.smtp.EmailBackend')
# MANAGER CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#admins
ADMINS = (
("""Javier Wilson""", 'javier.wilson@gmail.com'),
)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#managers
MANAGERS = ADMINS
# DATABASE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#databases
DATABASES = {
# Raises ImproperlyConfigured exception if DATABASE_URL not in os.environ
'default': env.db('DATABASE_URL', default='postgres:///cacaomovilcom'),
}
DATABASES['default']['ATOMIC_REQUESTS'] = True
# GENERAL CONFIGURATION
# ------------------------------------------------------------------------------
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'America/Managua'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#language-code
LANGUAGE_CODE = 'en-us'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#site-id
SITE_ID = 1
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-i18n
USE_I18N = True
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-l10n
USE_L10N = True
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-tz
USE_TZ = True
# TEMPLATE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#templates
TEMPLATES = [
{
# See: https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-TEMPLATES-BACKEND
'BACKEND': 'django.template.backends.django.DjangoTemplates',
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-dirs
'DIRS': [
str(APPS_DIR.path('templates')),
],
'OPTIONS': {
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-debug
'debug': DEBUG,
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-loaders
# https://docs.djangoproject.com/en/dev/ref/templates/api/#loader-types
'loaders': [
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
],
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-context-processors
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.template.context_processors.tz',
'django.contrib.messages.context_processors.messages',
# Your stuff: custom template context processors go here
'cacaomovilcom.portal.context_processors.products',
],
},
#'APP_DIRS': True,
},
]
# See: http://django-crispy-forms.readthedocs.io/en/latest/install.html#template-packs
CRISPY_TEMPLATE_PACK = 'bootstrap3'
# STATIC FILE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-root
STATIC_ROOT = str(ROOT_DIR('staticfiles'))
# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-url
STATIC_URL = '/static/'
# See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#std:setting-STATICFILES_DIRS
STATICFILES_DIRS = (
str(APPS_DIR.path('static')),
)
# See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#staticfiles-finders
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
# MEDIA CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-root
MEDIA_ROOT = str(APPS_DIR('media'))
# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-url
MEDIA_URL = '/media/'
# URL Configuration
# ------------------------------------------------------------------------------
ROOT_URLCONF = 'config.urls'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#wsgi-application
WSGI_APPLICATION = 'config.wsgi.application'
# AUTHENTICATION CONFIGURATION
# ------------------------------------------------------------------------------
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'allauth.account.auth_backends.AuthenticationBackend',
)
# Some really nice defaults
ACCOUNT_AUTHENTICATION_METHOD = 'username'
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_EMAIL_VERIFICATION = 'mandatory'
ACCOUNT_ALLOW_REGISTRATION = env.bool('DJANGO_ACCOUNT_ALLOW_REGISTRATION', True)
ACCOUNT_ADAPTER = 'cacaomovilcom.users.adapters.AccountAdapter'
SOCIALACCOUNT_ADAPTER = 'cacaomovilcom.users.adapters.SocialAccountAdapter'
# Custom user app defaults
# Select the correct user model
AUTH_USER_MODEL = 'users.User'
LOGIN_REDIRECT_URL = 'users:redirect'
LOGIN_URL = 'account_login'
# SLUGLIFIER
AUTOSLUG_SLUGIFY_FUNCTION = 'slugify.slugify'
# Location of root django.contrib.admin URL, use {% url 'admin:index' %}
ADMIN_URL = r'^admin/'
# Your common stuff: Below this line define 3rd party library settings
# CKEditor
CKEDITOR_JQUERY_URL = '//ajax.googleapis.com/ajax/libs/jquery/2.1.1/jquery.min.js'
CKEDITOR_UPLOAD_PATH = "ckuploads/"
CKEDITOR_IMAGE_BACKEND = "pillow"
CKEDITOR_CONFIGS = {
'default': {
'toolbar': [
['Format', 'Bold', 'Italic', 'Underline', 'SpellChecker',
'-', 'NumberedList', 'BulletedList', 'Indent', 'Outdent', 'JustifyLeft', 'JustifyCenter',
'-', 'JustifyRight', 'JustifyBlock', 'PasteText', 'PasteFromWord',
'-', 'Find', 'Replace', 'Cut', 'Copy', 'Paste',
'-', 'Image', 'Table', 'Link', 'Unlink', 'SectionLink', 'Undo', 'Redo', 'Source',
'Maximize',
],
],
'width': 'auto',
'allowedContent': True,
'removePlugins': 'stylesheetparser',
'extraAllowedContent': 'iframe[*]',
},
}
# THUMBNAILS
THUMBNAIL_ALIASES = {
'': {
'avatar': {'size': (100,100), 'crop': True},
'forum': {'size': (203,103), 'crop': False},
'small': {'size': (203,103), 'crop': False},
'medium': {'size': (250,250), 'crop': False},
},
}
| {
"content_hash": "79c56eac6fd0b3dbaa8daab1792ac275",
"timestamp": "",
"source": "github",
"line_count": 273,
"max_line_length": 102,
"avg_line_length": 35.99267399267399,
"alnum_prop": 0.60451862405862,
"repo_name": "javierwilson/cacaomovilcom",
"id": "16324f2e693e9c53e84d811ec30a6e82c682f16b",
"size": "9850",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "config/settings/common.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "10895"
},
{
"name": "HTML",
"bytes": "43388"
},
{
"name": "JavaScript",
"bytes": "529"
},
{
"name": "Python",
"bytes": "53621"
},
{
"name": "Shell",
"bytes": "3932"
}
],
"symlink_target": ""
} |
'''
follow.py
Takes a list of input ingredient names. If there is more than one, arranges
each item in the list in a row, importing as necessary. If there is only one,
arrange next to another object in the scene. If the scene is empty and there's
only one argument, just set it at the origin.
Thomas Storey
2016
'''
import sys
import argparse
import bpy
import numpy as np
import os
import bmesh
from math import *
from mathutils import *
import random
def getObject(objdir, objname):
if (bpy.data.objects.get(objname) == None):
objpath = os.path.join(objdir, objname+".obj")
bpy.ops.import_scene.obj(filepath=objpath,
axis_forward='Y',axis_up='Z')
return bpy.data.objects[objname]
def rotateObjectRandomZ(obj):
# define the rotation
rot_mat = Matrix.Rotation(random.random()*pi*2, 4, 'Z')
# decompose world_matrix's components, and from them assemble 4x4 matrices
orig_loc, orig_rot, orig_scale = obj.matrix_world.decompose()
orig_loc_mat = Matrix.Translation(orig_loc)
orig_rot_mat = orig_rot.to_matrix().to_4x4()
xscale = Matrix.Scale(orig_scale[0],4,(1,0,0))
yscale = Matrix.Scale(orig_scale[1],4,(0,1,0))
zscale = Matrix.Scale(orig_scale[2],4,(0,0,1))
orig_scale_mat = xscale * yscale * zscale
# assemble the new matrix
obj.matrix_world = orig_loc_mat * rot_mat * orig_rot_mat * orig_scale_mat
def translateObjectRandomXY(obj):
# define the translation
loc_mat = Matrix.Translation(((random.random()-0.5)*2.0,
(random.random()-0.5)*2.0,
0.0))
# decompose world_matrix's components, and from them assemble 4x4 matrices
orig_loc, orig_rot, orig_scale = obj.matrix_world.decompose()
orig_loc_mat = Matrix.Translation(orig_loc)
orig_rot_mat = orig_rot.to_matrix().to_4x4()
xscale = Matrix.Scale(orig_scale[0],4,(1,0,0))
yscale = Matrix.Scale(orig_scale[1],4,(0,1,0))
zscale = Matrix.Scale(orig_scale[2],4,(0,0,1))
orig_scale_mat = xscale * yscale * zscale
# assemble the new matrix
obj.matrix_world = loc_mat * orig_rot_mat * orig_scale_mat
def arrange(anchor, obj, i, off, axis):
if(axis == 'X'):
obj.location = ([anchor.location.x + i * off,
anchor.location.y,
anchor.location.z])
elif axis == 'Y':
obj.location = ([anchor.location.x,
anchor.location.y + i * off,
anchor.location.z])
elif axis == 'Z':
obj.location = ([anchor.location.x,
anchor.location.y,
anchor.location.z + i * off])
def execute(inputs, output):
ctx = bpy.context
scn = ctx.scene
cwd = os.getcwd()
objdir = os.path.join(cwd, 'objs')
if len(inputs) > 1:
# arrange each input next to each other
# first put the first one at the origin
objname = inputs[0]
print("following " + objname)
obj = getObject(objdir, objname)
obj.location = Vector([0,0,0])
# get narrowest dimension
(l, w, h) = (obj.dimensions.x, obj.dimensions.y, obj.dimensions.z)
offset = min(l,w,h)
for objname, i in zip(inputs[1:], range(1, len(inputs))):
if(offset == l):
arrange(obj, getObject(objdir, objname), i, offset, 'X')
elif(offset == w):
arrange(obj, getObject(objdir, objname), i, offset, 'Y')
else:
arrange(obj, getObject(objdir, objname), i, offset, 'Z')
elif len(bpy.data.objects) > 1:
# select a random object and arrange the input next to it
objname = inputs[0]
print("following " + objname)
obj = getObject(objdir, objname)
anchor = None
for other in bpy.data.objects:
if other.name != objname:
anchor = other
break
(l, w, h) = (anchor.dimensions.x,
anchor.dimensions.y,
anchor.dimensions.z)
offset = min(l,w,h)
if(offset == l):
arrange(anchor, obj, 1, offset, 'X')
elif(offset == w):
arrange(anchor, obj, 1, offset, 'Y')
else:
arrange(anchor, obj, 1, offset, 'Z')
else:
# put input at the origin
objname = inputs[0]
print("following " + objname)
obj = getObject(objdir, objname)
obj.location = Vector([0,0,0])
# save out .blend
if not output == None:
bpy.ops.wm.save_as_mainfile(filepath=output,
check_existing=False,relative_remap=False)
else:
bpy.ops.wm.save_mainfile(check_existing=False,relative_remap=False)
def main():
argv = sys.argv
if "--" not in argv:
argv = []
else:
argv = argv[argv.index("--") + 1:]
usage_text =\
"Usage: blender -b [.blend file] --python " + __file__ + " -- [options]"
parser = argparse.ArgumentParser(description=usage_text)
parser.add_argument("-i", "--input", dest="input", type=str, required=True,
help="Comma delimited list of .objs to import. Exclude the file extension.")
parser.add_argument("-o", "--output", dest="output", type=str, required=False,
help="Name of blend file to save to, if not the same as the one being opened.")
args = parser.parse_args(argv)
output = ""
if not argv:
parser.print_help()
return
if not args.input:
print("input argument not given. aborting.")
parser.print_help()
return
if not args.output:
output = None
else:
output = args.output+".blend"
inputs = args.input.split(",")
execute(inputs, output)
print("followed " + ", ".join(inputs))
if __name__ == "__main__":
main()
| {
"content_hash": "8d52842d18f667c0fc6a7885b15193be",
"timestamp": "",
"source": "github",
"line_count": 163,
"max_line_length": 83,
"avg_line_length": 35.466257668711656,
"alnum_prop": 0.5877875800034597,
"repo_name": "thomasrstorey/recipesfordisaster",
"id": "df182e50537010580629a4ea07812c829a2c4dee",
"size": "5781",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "actions/follow.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "13519"
},
{
"name": "HTML",
"bytes": "7575"
},
{
"name": "JavaScript",
"bytes": "117656"
},
{
"name": "Python",
"bytes": "584598"
}
],
"symlink_target": ""
} |
from StringIO import StringIO
from PIL import Image
import iso_media
params_num = { "LBAS": 0,
"LAEN": 3 }
def wrapLayers(layers, quality = 95, diffquality = 90):
def wrapLayer(layer, boxtype, quality, parameters):
def writeImageBuffer():
buf = ""
for param in parameters:
for val in param:
buf += iso_media.write_int16(val)
output = StringIO()
layer.save(output, "WEBP", quality=quality)
buf += output.getvalue()
output.close()
return buf
return iso_media.write_box(boxtype, writeImageBuffer())
first = True
buf = ""
offset = 0
offsetTable = []
for layer, parameters, res in layers:
currLayer = wrapLayer( layer,
"LBAS" if first else "LAEN",
quality if first else diffquality,
parameters)
offsetTable.append((offset + len(currLayer), res))
buf += currLayer
first = False
return buf, offsetTable
def unwrapLayers(buf):
def readImageBuffer(imgbuf, type):
offset = 0
parameters = []
for i in range(params_num[type]):
parameters.append((iso_media.read_int16(imgbuf[offset:]),
iso_media.read_int16(imgbuf[offset+2:])))
offset += 4
io = StringIO(imgbuf[offset:])
img = Image.open(io)
io.close()
return img, parameters
bufLen = len(buf)
offset = 0
layers = []
while offset < bufLen:
boxLen, boxType, payload = iso_media.read_box(buf[offset:])
offset += boxLen
layers.append(readImageBuffer(payload, boxType))
return layers
| {
"content_hash": "c2c5192986f569e6a11546b44ca43aed",
"timestamp": "",
"source": "github",
"line_count": 60,
"max_line_length": 72,
"avg_line_length": 29.933333333333334,
"alnum_prop": 0.5395322939866369,
"repo_name": "yoavweiss/Responsive-Image-Container",
"id": "a5d089c7dd0800f041cd2b09329dc984e385cbad",
"size": "1796",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "wrapper.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "15443"
}
],
"symlink_target": ""
} |
import time
import json
import os
from datetime import datetime, timedelta
from world import world
from bigml.api import HTTP_CREATED
from bigml.api import HTTP_ACCEPTED
from bigml.api import FINISHED
from bigml.api import FAULTY
from bigml.api import get_status
from read_library_steps import i_get_the_library
#@step(r'the library code is "(.*)" and the value of "(.*)" is "(.*)"')
def the_library_code_and_attributes(step, source_code, param, param_value):
res_param_value = world.library[param]
if res_param_value == param_value:
assert True
else:
assert False, ("The library %s is %s "
"and the expected %s is %s" %
(param, param_value, param, param_value))
#@step(r'I create a whizzml library from a excerpt of code "(.*)"$')
def i_create_a_library(step, source_code):
resource = world.api.create_library(source_code)
world.status = resource['code']
assert world.status == HTTP_CREATED
world.location = resource['location']
world.library = resource['object']
world.libraries.append(resource['resource'])
#@step(r'I update the library with "(.*)", "(.*)"$')
def i_update_a_library(step, param, param_value):
resource = world.api.update_library(world.library['resource'],
{param: param_value})
world.status = resource['code']
assert world.status == HTTP_ACCEPTED
world.location = resource['location']
world.library = resource['object']
#@step(r'I wait until the library status code is either (\d) or (-\d) less than (\d+)')
def wait_until_library_status_code_is(step, code1, code2, secs):
start = datetime.utcnow()
library_id = world.library['resource']
i_get_the_library(step, library_id)
status = get_status(world.library)
while (status['code'] != int(code1) and
status['code'] != int(code2)):
time.sleep(3)
assert datetime.utcnow() - start < timedelta(seconds=int(secs))
i_get_the_library(step, library_id)
status = get_status(world.library)
assert status['code'] == int(code1)
#@step(r'I wait until the library is ready less than (\d+)')
def the_library_is_finished(step, secs):
wait_until_library_status_code_is(step, FINISHED, FAULTY, secs)
| {
"content_hash": "eff5ed9d60130ab01d8620bbe1627d6d",
"timestamp": "",
"source": "github",
"line_count": 64,
"max_line_length": 87,
"avg_line_length": 36,
"alnum_prop": 0.6497395833333334,
"repo_name": "xaowoodenfish/python-1",
"id": "9fa8f0808a9ca75f4af2e7f5077cefcf5bd83d36",
"size": "2922",
"binary": false,
"copies": "1",
"ref": "refs/heads/next",
"path": "bigml/tests/create_library_steps.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "691861"
}
],
"symlink_target": ""
} |
"""
Provide exception classes for :mod:`migrate.versioning`
"""
class Error(Exception):
"""Error base class."""
class ApiError(Error):
"""Base class for API errors."""
class KnownError(ApiError):
"""A known error condition."""
class UsageError(ApiError):
"""A known error condition where help should be displayed."""
class ControlledSchemaError(Error):
"""Base class for controlled schema errors."""
class InvalidVersionError(ControlledSchemaError):
"""Invalid version number."""
class DatabaseNotControlledError(ControlledSchemaError):
"""Database should be under version control, but it's not."""
class DatabaseAlreadyControlledError(ControlledSchemaError):
"""Database shouldn't be under version control, but it is"""
class WrongRepositoryError(ControlledSchemaError):
"""This database is under version control by another repository."""
class NoSuchTableError(ControlledSchemaError):
"""The table does not exist."""
class PathError(Error):
"""Base class for path errors."""
class PathNotFoundError(PathError):
"""A path with no file was required; found a file."""
class PathFoundError(PathError):
"""A path with a file was required; found no file."""
class RepositoryError(Error):
"""Base class for repository errors."""
class InvalidRepositoryError(RepositoryError):
"""Invalid repository error."""
class ScriptError(Error):
"""Base class for script errors."""
class InvalidScriptError(ScriptError):
"""Invalid script error."""
class InvalidVersionError(Error):
"""Invalid version error."""
| {
"content_hash": "51f43a610795a7751dc9f6278cf3d2cc",
"timestamp": "",
"source": "github",
"line_count": 75,
"max_line_length": 71,
"avg_line_length": 21.413333333333334,
"alnum_prop": 0.7160647571606475,
"repo_name": "denny820909/builder",
"id": "00b5dd6894bf485726e15533f859f1866789ac10",
"size": "1606",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "lib/python2.7/site-packages/sqlalchemy_migrate-0.6-py2.6.egg/migrate/versioning/exceptions.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "106"
},
{
"name": "C",
"bytes": "68706"
},
{
"name": "CSS",
"bytes": "18630"
},
{
"name": "D",
"bytes": "532"
},
{
"name": "GAP",
"bytes": "14120"
},
{
"name": "HTML",
"bytes": "69377"
},
{
"name": "Makefile",
"bytes": "1220"
},
{
"name": "Objective-C",
"bytes": "1291"
},
{
"name": "Python",
"bytes": "21088388"
},
{
"name": "Shell",
"bytes": "2766"
},
{
"name": "Smarty",
"bytes": "393"
}
],
"symlink_target": ""
} |
class ConfigurationItem(object):
def __init__(self,uuid,ip_mgmt,login,password):
self.uuid = uuid
self.ip_mgmt = ip_mgmt
self.login = login
self.password = password
self.ci_parents = [] # List to store parents ci
self.ci_children = [] # List to store children ci
# TODO : Maintain a structure to query only the drivers that make
# sens for the CI.
@property
def ci_type(self):
return self.__ci_type
@ci_type.setter
def ci_type(self, ci_type):
self.__ci_type = ci_type
@property
def data(self):
return self.__data
@data.setter
def data(self, data):
self.__data = data
| {
"content_hash": "d05ec467f817443cfa2a3cc69838b1a4",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 73,
"avg_line_length": 24.242424242424242,
"alnum_prop": 0.51125,
"repo_name": "uggla/alexandria",
"id": "7647653b0fa2f45dbe4adfcb443dfc10daa15f3c",
"size": "818",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "alexandria/configuration_item.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "13931"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
"""
This module provides classes to define everything related to band structures.
"""
__author__ = "Geoffroy Hautier, Shyue Ping Ong, Michael Kocher"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "1.0"
__maintainer__ = "Geoffroy Hautier"
__email__ = "geoffroy@uclouvain.be"
__status__ = "Development"
__date__ = "March 14, 2012"
import numpy as np
import math
import itertools
import collections
from pymatgen.core.structure import Structure
from pymatgen.core.lattice import Lattice
from pymatgen.electronic_structure.core import Spin, Orbital
from pymatgen.serializers.json_coders import PMGSONable
class Kpoint(PMGSONable):
"""
Class to store kpoint objects. A kpoint is defined with a lattice and frac
or cartesian coordinates syntax similar than the site object in
pymatgen.core.structure.
Args:
coords: coordinate of the kpoint as a numpy array
lattice: A pymatgen.core.lattice.Lattice lattice object representing
the reciprocal lattice of the kpoint
to_unit_cell: Translates fractional coordinate to the basic unit
cell, i.e., all fractional coordinates satisfy 0 <= a < 1.
Defaults to False.
coords_are_cartesian: Boolean indicating if the coordinates given are
in cartesian or fractional coordinates (by default fractional)
label: the label of the kpoint if any (None by default)
"""
def __init__(self, coords, lattice, to_unit_cell=False,
coords_are_cartesian=False, label=None):
self._lattice = lattice
self._fcoords = lattice.get_fractional_coords(coords) \
if coords_are_cartesian else coords
self._label = label
if to_unit_cell:
for i in range(len(self._fcoords)):
self._fcoords[i] -= math.floor(self._fcoords[i])
self._ccoords = lattice.get_cartesian_coords(self._fcoords)
@property
def lattice(self):
"""
The lattice associated with the kpoint. It's a
pymatgen.core.lattice.Lattice object
"""
return self._lattice
@property
def label(self):
"""
The label associated with the kpoint
"""
return self._label
@property
def frac_coords(self):
"""
The fractional coordinates of the kpoint as a numpy array
"""
return np.copy(self._fcoords)
@property
def cart_coords(self):
"""
The cartesian coordinates of the kpoint as a numpy array
"""
return np.copy(self._ccoords)
@property
def a(self):
"""
Fractional a coordinate of the kpoint
"""
return self._fcoords[0]
@property
def b(self):
"""
Fractional b coordinate of the kpoint
"""
return self._fcoords[1]
@property
def c(self):
"""
Fractional c coordinate of the kpoint
"""
return self._fcoords[2]
def __str__(self):
"""
Returns a string with fractional, cartesian coordinates and label
"""
return "{} {} {}".format(self.frac_coords, self.cart_coords,
self.label)
def as_dict(self):
"""
Json-serializable dict representation of a kpoint
"""
return {"lattice": self.lattice.as_dict(),
"fcoords": list(self.frac_coords),
"ccoords": list(self.cart_coords), "label": self.label,
"@module": self.__class__.__module__,
"@class": self.__class__.__name__}
class BandStructure(object):
"""
This is the most generic band structure data possible
it's defined by a list of kpoints + energies for each of them
Args:
kpoints: list of kpoint as numpy arrays, in frac_coords of the
given lattice by default
eigenvals: dict of energies for spin up and spin down
{Spin.up:[][],Spin.down:[][]}, the first index of the array
[][] refers to the band and the second to the index of the
kpoint. The kpoints are ordered according to the order of the
kpoints array. If the band structure is not spin polarized, we
only store one data set under Spin.up
lattice: The reciprocal lattice as a pymatgen Lattice object.
label_dict: (dict) of {} this link a kpoint (in frac coords or
cartesian coordinates depending on the coords).
coords_are_cartesian: Whether coordinates are cartesian.
efermi: fermi energy
labels_dict: (dict) of {} this links a kpoint (in frac coords or
cartesian coordinates depending on the coords) to a label.
coords_are_cartesian: Whether coordinates are cartesian.
structure: The crystal structure (as a pymatgen Structure object)
associated with the band structure. This is needed if we
provide projections to the band structure
projections: dict of orbital projections for spin up and spin down
{Spin.up:[][{Orbital:[]}],Spin.down:[][{Orbital:[]}]. The
format follows the one from eigenvals: The first index of the
array refers to the band and the second to the index of the
kpoint. The kpoints are ordered according to the order of the
kpoints array. For each band and kpoint, we associate a
dictionary indicating projections on orbitals and on different
sites the keys of the dictionary are Orbital objects and the
values are the projections on each site ordered as in the
structure object. If the band structure is not spin polarized,
we only store one data set under Spin.up.
"""
def __init__(self, kpoints, eigenvals, lattice, efermi, labels_dict=None,
coords_are_cartesian=False, structure=None, projections=None):
self._efermi = efermi
self._lattice_rec = lattice
self._kpoints = []
self._labels_dict = {}
self._structure = structure
self._projections = projections if projections else {}
if labels_dict is None:
labels_dict = {}
if len(self._projections) != 0 and self._structure is None:
raise Exception("if projections are provided a structure object"
" needs also to be given")
for k in kpoints:
#let see if this kpoint has been assigned a label
label = None
for c in labels_dict:
if np.linalg.norm(k - np.array(labels_dict[c])) < 0.0001:
label = c
self._labels_dict[label] = Kpoint(
k, lattice, label=label,
coords_are_cartesian=coords_are_cartesian)
self._kpoints.append(
Kpoint(k, lattice, label=label,
coords_are_cartesian=coords_are_cartesian))
self._bands = eigenvals
self._nb_bands = len(eigenvals[Spin.up])
self._is_spin_polarized = False
if len(self._bands) == 2:
self._is_spin_polarized = True
@property
def kpoints(self):
"""
the list of kpoints (as Kpoint objects) in the band structure
"""
return self._kpoints
@property
def lattice(self):
"""
the lattice of the band structure as a pymatgen Lattice object
"""
return self._lattice_rec
@property
def efermi(self):
"""
the fermi energy
"""
return self._efermi
@property
def is_spin_polarized(self):
"""
True if the band structure is spin-polarized, False otherwise
"""
return self._is_spin_polarized
@property
def bands(self):
"""
returns the eigenvalues for each kpoints as a dictionary
{Spin.up:[][],Spin.down:[][]}, the first index of the array
[][] refers to the band and the second to the index of the
kpoint. The kpoints are ordered according to the order of the
self.kpoints. If the band structure is not spin polarized, we
only store one data set under Spin.up
"""
return self._bands
@property
def nb_bands(self):
"""
returns the number of bands in the band structure
"""
return self._nb_bands
def get_projection_on_elements(self):
"""
Method returning a dictionary of projections on elements.
Returns:
a dictionary in the {Spin.up:[][{Element:values}],
Spin.down:[][{Element:values}]} format
if there is no projections in the band structure
returns an empty dict
"""
if len(self._projections) == 0:
return {}
if self.is_spin_polarized:
result = {Spin.up: [], Spin.down: []}
else:
result = {Spin.up: []}
structure = self._structure
for spin in result:
result[spin] = [[collections.defaultdict(float)
for i in range(len(self._kpoints))]
for j in range(self._nb_bands)]
for i, j, k in itertools.product(list(range(self._nb_bands)),
list(range(len(self._kpoints))),
list(range(structure.num_sites))):
for orb in self._projections[Spin.up][i][j]:
result[spin][i][j][str(structure[k].specie)] += \
self._projections[spin][i][j][orb][k]
return result
def get_projections_on_elts_and_orbitals(self, dictio):
"""
Method returning a dictionary of projections on elements and specific
orbitals
Args:
dictio: A dictionary of Elements and Orbitals for which we want
to have projections on. It is given as: {Element:[orbitals]},
e.g., {'Cu':['d','s']}
Returns:
A dictionary of projections on elements in the
{Spin.up:[][{Element:{orb:values}}],
Spin.down:[][{Element:{orb:values}}]} format
if there is no projections in the band structure returns an empty
dict.
"""
if len(self._projections) == 0:
return {}
if self.is_spin_polarized:
result = {Spin.up: [], Spin.down: []}
else:
result = {Spin.up: []}
structure = self._structure
for spin in result:
result[spin] = [[{str(e): collections.defaultdict(float)
for e in dictio}
for i in range(len(self._kpoints))]
for j in range(self._nb_bands)]
for i, j, k in itertools.product(
list(range(self._nb_bands)), list(range(len(self._kpoints))),
list(range(structure.num_sites))):
for orb in self._projections[Spin.up][i][j]:
if str(structure[k].specie) in dictio:
if str(orb)[0] in dictio[str(structure[k].specie)]:
result[spin][i][j][str(structure[k].specie)]\
[str(orb)[0]] += \
self._projections[spin][i][j][orb][k]
return result
def is_metal(self):
"""
Check if the band structure indicates a metal by looking if the fermi
level crosses a band.
Returns:
True if a metal, False if not
"""
for i in range(self._nb_bands):
below = False
above = False
for j in range(len(self._kpoints)):
if self._bands[Spin.up][i][j] < self._efermi:
below = True
if self._bands[Spin.up][i][j] > self._efermi:
above = True
if above and below:
return True
if self.is_spin_polarized:
below = False
above = False
for j in range(len(self._kpoints)):
if self._bands[Spin.down][i][j] < self._efermi:
below = True
if self._bands[Spin.down][i][j] > self._efermi:
above = True
if above and below:
return True
return False
def get_vbm(self):
"""
Returns data about the VBM.
Returns:
dict as {"band_index","kpoint_index","kpoint","energy"}
- "band_index": A dict with spin keys pointing to a list of the
indices of the band containing the VBM (please note that you
can have several bands sharing the VBM) {Spin.up:[],
Spin.down:[]}
- "kpoint_index": The list of indices in self._kpoints for the
kpoint vbm. Please note that there can be several
kpoint_indices relating to the same kpoint (e.g., Gamma can
occur at different spots in the band structure line plot)
- "kpoint": The kpoint (as a kpoint object)
- "energy": The energy of the VBM
- "projections": The projections along sites and orbitals of the
VBM if any projection data is available (else it is an empty
dictionnary). The format is similar to the projections field in
BandStructure: {spin:{'Orbital': [proj]}} where the array
[proj] is ordered according to the sites in structure
"""
if self.is_metal():
return {"band_index": [], "kpoint_index": [],
"kpoint": [], "energy": None, "projections": {}}
max_tmp = -float("inf")
index = None
kpointvbm = None
for i in range(self._nb_bands):
for j in range(len(self._kpoints)):
for spin in self._bands:
if self._bands[spin][i][j] < self._efermi:
if self._bands[spin][i][j] > max_tmp:
max_tmp = self._bands[spin][i][j]
index = j
kpointvbm = self._kpoints[j]
list_ind_kpts = []
if kpointvbm.label is not None:
for i in range(len(self._kpoints)):
if self._kpoints[i].label == kpointvbm.label:
list_ind_kpts.append(i)
else:
list_ind_kpts.append(index)
#get all other bands sharing the vbm
list_ind_band = {Spin.up: []}
if self.is_spin_polarized:
list_ind_band = {Spin.up: [], Spin.down: []}
for spin in self._bands:
for i in range(self._nb_bands):
if math.fabs(self._bands[spin][i][index] - max_tmp) < 0.001:
list_ind_band[spin].append(i)
proj = {}
if len(self._projections) != 0:
for spin in list_ind_band:
if len(list_ind_band[spin]) == 0:
continue
proj[spin] =\
self._projections[spin][list_ind_band[spin][0]][
list_ind_kpts[0]]
return {'band_index': list_ind_band,
'kpoint_index': list_ind_kpts,
'kpoint': kpointvbm, 'energy': max_tmp,
'projections': proj}
def get_cbm(self):
"""
Returns data about the CBM.
Returns:
{"band_index","kpoint_index","kpoint","energy"}
- "band_index": A dict with spin keys pointing to a list of the
indices of the band containing the VBM (please note that you
can have several bands sharing the VBM) {Spin.up:[],
Spin.down:[]}
- "kpoint_index": The list of indices in self._kpoints for the
kpoint vbm. Please note that there can be several
kpoint_indices relating to the same kpoint (e.g., Gamma can
occur at different spots in the band structure line plot)
- "kpoint": The kpoint (as a kpoint object)
- "energy": The energy of the VBM
- "projections": The projections along sites and orbitals of the
VBM if any projection data is available (else it is an empty
dictionnary). The format is similar to the projections field in
BandStructure: {spin:{'Orbital': [proj]}} where the array
[proj] is ordered according to the sites in structure
"""
if self.is_metal():
return {"band_index": [], "kpoint_index": [],
"kpoint": [], "energy": None, "projections": {}}
max_tmp = float("inf")
index = None
kpointcbm = None
for spin in self._bands:
for i in range(self._nb_bands):
for j in range(len(self._kpoints)):
if self._bands[spin][i][j] > self._efermi:
if self._bands[spin][i][j] < max_tmp:
max_tmp = self._bands[spin][i][j]
index = j
kpointcbm = self._kpoints[j]
list_index_kpoints = []
if kpointcbm.label is not None:
for i in range(len(self._kpoints)):
if self._kpoints[i].label == kpointcbm.label:
list_index_kpoints.append(i)
else:
list_index_kpoints.append(index)
#get all other bands sharing the vbm
list_index_band = {Spin.up: []}
if self.is_spin_polarized:
list_index_band = {Spin.up: [], Spin.down: []}
for spin in self._bands:
for i in range(self._nb_bands):
if math.fabs(self._bands[spin][i][index] - max_tmp) < 0.001:
list_index_band[spin].append(i)
proj = {}
if len(self._projections) != 0:
for spin in list_index_band:
if len(list_index_band[spin]) == 0:
continue
proj[spin] = self._projections[spin][list_index_band[spin][0]][
list_index_kpoints[0]]
return {'band_index': list_index_band,
'kpoint_index': list_index_kpoints,
'kpoint': kpointcbm, 'energy': max_tmp,
'projections': proj}
def get_band_gap(self):
"""
Returns band gap data.
Returns:
A dict {"energy","direct","transition"}:
"energy": band gap energy
"direct": A boolean telling if the gap is direct or not
"transition": kpoint labels of the transition (e.g., "\Gamma-X")
"""
if self.is_metal():
return {"energy": 0.0, "direct": False, "transition": None}
cbm = self.get_cbm()
vbm = self.get_vbm()
result = dict(direct=False, energy=0.0, transition=None)
result["energy"] = cbm["energy"] - vbm["energy"]
if cbm["kpoint"].label == vbm["kpoint"].label or \
np.linalg.norm(cbm["kpoint"].cart_coords
- vbm["kpoint"].cart_coords) < 0.01:
result["direct"] = True
result["transition"] = "-".join(
[str(c.label) if c.label is not None else
str("(") + ",".join(["{0:.3f}".format(c.frac_coords[i])
for i in range(3)])
+ str(")") for c in [vbm["kpoint"], cbm["kpoint"]]])
return result
def get_direct_band_gap(self):
"""
Returns the direct band gap.
Returns:
the value of the direct band gap
"""
if self.is_metal():
return 0.0
lowest_conduction_band = []
highest_valence_band = []
for j in range(len(self._bands[Spin.up])):
for i in range(len(self.kpoints)):
if self._bands[Spin.up][j][i] > self._efermi:
lowest_conduction_band.append(self._bands[Spin.up][j][i])
highest_valence_band.append(self._bands[Spin.up][j-1][i])
if self.is_spin_polarized:
lowest_conduction_band_d = []
highest_valence_band_d = []
for j in range(len(self._bands[Spin.down])):
for i in range(len(self.kpoints)):
if self._bands[Spin.down][j][i] > self._efermi:
lowest_conduction_band_d.append(self._bands[Spin.down][j][i])
highest_valence_band_d.append(self._bands[Spin.down][j-1][i])
diff = []
for i in range(len(self.kpoints)):
diff.append(min([lowest_conduction_band[i],lowest_conduction_band_d[i]])
- max([highest_valence_band[i],highest_valence_band_d[i]]))
return min(diff)
diff = []
for i in range(len(self.kpoints)):
diff.append(lowest_conduction_band[i] - highest_valence_band[i])
return min(diff)
def as_dict(self):
"""
Json-serializable dict representation of BandStructureSymmLine.
"""
d = {"module": self.__class__.__module__,
"class": self.__class__.__name__,
"lattice_rec": self._lattice_rec.as_dict(), "efermi": self._efermi,
"kpoints": []}
#kpoints are not kpoint objects dicts but are frac coords (this makes
#the dict smaller and avoids the repetition of the lattice
for k in self._kpoints:
d["kpoints"].append(k.as_dict()["fcoords"])
d["bands"] = {str(int(spin)): self._bands[spin]
for spin in self._bands}
d["is_metal"] = self.is_metal()
vbm = self.get_vbm()
d["vbm"] = {"energy": vbm["energy"],
"kpoint_index": vbm["kpoint_index"],
"band_index": {str(int(spin)): vbm["band_index"][spin]
for spin in vbm["band_index"]},
'projections': {str(spin): {str(orb):
vbm['projections'][spin][orb]
for orb in vbm['projections'][spin]}
for spin in vbm['projections']}}
cbm = self.get_cbm()
d['cbm'] = {'energy': cbm['energy'],
'kpoint_index': cbm['kpoint_index'],
'band_index': {str(int(spin)): cbm['band_index'][spin]
for spin in cbm['band_index']},
'projections': {str(spin): {str(orb):
cbm['projections'][spin][orb]
for orb in cbm['projections'][spin]}
for spin in cbm['projections']}}
d['band_gap'] = self.get_band_gap()
d['labels_dict'] = {}
d['is_spin_polarized'] = self.is_spin_polarized
for c in self._labels_dict:
d['labels_dict'][c] = self._labels_dict[c].as_dict()['fcoords']
d['projections'] = {}
if len(self._projections) != 0:
d['structure'] = self._structure.as_dict()
d['projections'] = {
str(int(spin)): [
[{str(orb): [
self._projections[spin][i][j][orb][k]
for k in range(len(self._projections[spin][i][j][orb]))]
for orb in self._projections[spin][i][j]}
for j in range(len(self._projections[spin][i]))]
for i in range(len(self._projections[spin]))]
for spin in self._projections}
return d
@classmethod
def from_dict(cls, d):
"""
Create from dict.
Args:
A dict with all data for a band structure object.
Returns:
A BandStructure object
"""
labels_dict = d['labels_dict']
projections = {}
structure = None
if 'structure' in d:
structure = Structure.from_dict(d['structure'])
if 'projections' in d and len(d['projections']) != 0:
projections = {
Spin.from_int(int(spin)): [
[{Orbital.from_string(orb): [
d['projections'][spin][i][j][orb][k]
for k in range(len(d['projections'][spin][i][j][orb]))]
for orb in d['projections'][spin][i][j]}
for j in range(len(d['projections'][spin][i]))]
for i in range(len(d['projections'][spin]))]
for spin in d['projections']}
return BandStructure(
d['kpoints'], {Spin.from_int(int(k)): d['bands'][k]
for k in d['bands']},
Lattice(d['lattice_rec']['matrix']), d['efermi'],
labels_dict, structure=structure, projections=projections)
class BandStructureSymmLine(BandStructure, PMGSONable):
"""
This object stores band structures along selected (symmetry) lines in the
Brillouin zone. We call the different symmetry lines (ex: \Gamma to Z)
"branches".
Args:
kpoints: list of kpoint as numpy arrays, in frac_coords of the
given lattice by default
eigenvals: dict of energies for spin up and spin down
{Spin.up:[][],Spin.down:[][]}, the first index of the array
[][] refers to the band and the second to the index of the
kpoint. The kpoints are ordered according to the order of the
kpoints array. If the band structure is not spin polarized, we
only store one data set under Spin.up.
lattice: The reciprocal lattice.
efermi: fermi energy
label_dict: (dict) of {} this link a kpoint (in frac coords or
cartesian coordinates depending on the coords).
coords_are_cartesian: Whether coordinates are cartesian.
structure: The crystal structure (as a pymatgen Structure object)
associated with the band structure. This is needed if we
provide projections to the band structure.
projections: dict of orbital projections for spin up and spin down
{Spin.up:[][{Orbital:[]}],Spin.down:[][{Orbital:[]}]. The
format follows the one from eigenvals: the first index of the
array refers to the band and the second to the index of the
kpoint. The kpoints are ordered according to the order of the
kpoints array. For each band and kpoint, we associate a
dictionary indicating projections on orbitals and on different
sites the keys of the dictionary are Orbital objects and the
values are the projections on each site ordered as in the
structure object. If the band structure is not spin polarized,
we only store one data set under Spin.up.
"""
def __init__(self, kpoints, eigenvals, lattice, efermi, labels_dict,
coords_are_cartesian=False, structure=None,
projections=None):
BandStructure.__init__(self, kpoints, eigenvals, lattice, efermi,
labels_dict, coords_are_cartesian, structure,
projections)
self._distance = []
self._branches = []
one_group = []
branches_tmp = []
#get labels and distance for each kpoint
previous_kpoint = self._kpoints[0]
previous_distance = 0.0
previous_label = self._kpoints[0].label
for i in range(len(self._kpoints)):
label = self._kpoints[i].label
if label is not None and previous_label is not None:
self._distance.append(previous_distance)
else:
self._distance.append(
np.linalg.norm(self._kpoints[i].cart_coords -
previous_kpoint.cart_coords) +
previous_distance)
previous_kpoint = self._kpoints[i]
previous_distance = self._distance[i]
if label:
if previous_label:
if len(one_group) != 0:
branches_tmp.append(one_group)
one_group = []
previous_label = label
one_group.append(i)
if len(one_group) != 0:
branches_tmp.append(one_group)
for b in branches_tmp:
self._branches.append({"start_index": b[0], "end_index": b[-1],
"name": (self._kpoints[b[0]].label + "-" +
self._kpoints[b[-1]].label)})
self._is_spin_polarized = False
if len(self._bands) == 2:
self._is_spin_polarized = True
def get_equivalent_kpoints(self, index):
"""
Returns the list of kpoint indices equivalent (meaning they are the
same frac coords) to the given one.
Args:
index: the kpoint index
Returns:
a list of equivalent indices
TODO: now it uses the label we might want to use coordinates instead
(in case there was a mislabel)
"""
#if the kpoint has no label it can"t have a repetition along the band
#structure line object
if self._kpoints[index].label is None:
return [index]
list_index_kpoints = []
for i in range(len(self._kpoints)):
if self._kpoints[i].label == self._kpoints[index].label:
list_index_kpoints.append(i)
return list_index_kpoints
def get_branch(self, index):
"""
Returns in what branch(es) is the kpoint. There can be several
branches.
Args:
index: the kpoint index
Returns:
A list of dictionaries [{"name","start_index","end_index","index"}]
indicating all branches in which the k_point is. It takes into
account the fact that one kpoint (e.g., \Gamma) can be in several
branches
"""
to_return = []
for i in self.get_equivalent_kpoints(index):
for b in self._branches:
if b["start_index"] <= i <= b["end_index"]:
to_return.append({"name": b["name"],
"start_index": b["start_index"],
"end_index": b["end_index"],
"index": i})
return to_return
def apply_scissor(self, new_band_gap):
"""
Apply a scissor operator (shift of the CBM) to fit the given band gap.
If it's a metal. We look for the band crossing the fermi level
and shift this one up. This will not work all the time for metals!
Args:
new_band_gap: the band gap the scissor band structure need to have.
Returns:
a BandStructureSymmLine object with the applied scissor shift
"""
if self.is_metal():
#moves then the highest index band crossing the fermi level
#find this band...
max_index = -1000
#spin_index = None
for i in range(self._nb_bands):
below = False
above = False
for j in range(len(self._kpoints)):
if self._bands[Spin.up][i][j] < self._efermi:
below = True
if self._bands[Spin.up][i][j] > self._efermi:
above = True
if above and below:
if i > max_index:
max_index = i
#spin_index = Spin.up
if self.is_spin_polarized:
below = False
above = False
for j in range(len(self._kpoints)):
if self._bands[Spin.down][i][j] < self._efermi:
below = True
if self._bands[Spin.down][i][j] > self._efermi:
above = True
if above and below:
if i > max_index:
max_index = i
#spin_index = Spin.down
old_dict = self.as_dict()
shift = new_band_gap
for spin in old_dict['bands']:
for k in range(len(old_dict['bands'][spin])):
for v in range(len(old_dict['bands'][spin][k])):
if k >= max_index:
old_dict['bands'][spin][k][v] = \
old_dict['bands'][spin][k][v] + shift
else:
shift = new_band_gap - self.get_band_gap()['energy']
old_dict = self.as_dict()
for spin in old_dict['bands']:
for k in range(len(old_dict['bands'][spin])):
for v in range(len(old_dict['bands'][spin][k])):
if old_dict['bands'][spin][k][v] >= \
old_dict['cbm']['energy']:
old_dict['bands'][spin][k][v] = \
old_dict['bands'][spin][k][v] + shift
old_dict['efermi'] = old_dict['efermi'] + shift
return BandStructureSymmLine.from_dict(old_dict)
def as_dict(self):
"""
Json-serializable dict representation of BandStructureSymmLine.
"""
d = {"module": self.__class__.__module__,
"class": self.__class__.__name__,
"lattice_rec": self._lattice_rec.as_dict(), "efermi": self._efermi,
"kpoints": []}
#kpoints are not kpoint objects dicts but are frac coords (this makes
#the dict smaller and avoids the repetition of the lattice
for k in self._kpoints:
d["kpoints"].append(k.as_dict()["fcoords"])
d["branches"] = self._branches
d["bands"] = {str(int(spin)): self._bands[spin]
for spin in self._bands}
d["is_metal"] = self.is_metal()
vbm = self.get_vbm()
d["vbm"] = {"energy": vbm["energy"],
"kpoint_index": vbm["kpoint_index"],
"band_index": {str(int(spin)): vbm["band_index"][spin]
for spin in vbm["band_index"]},
'projections': {str(spin): {str(orb):
vbm['projections'][spin][orb]
for orb in vbm['projections'][spin]}
for spin in vbm['projections']}}
cbm = self.get_cbm()
d['cbm'] = {'energy': cbm['energy'],
'kpoint_index': cbm['kpoint_index'],
'band_index': {str(int(spin)): cbm['band_index'][spin]
for spin in cbm['band_index']},
'projections': {str(spin): {str(orb):
cbm['projections'][spin][orb]
for orb in cbm['projections'][spin]}
for spin in cbm['projections']}}
d['band_gap'] = self.get_band_gap()
d['labels_dict'] = {}
d['is_spin_polarized'] = self.is_spin_polarized
# MongoDB does not accept keys starting with $. Add a blanck space to fix the problem
for c in self._labels_dict:
mongo_key = c if not c.startswith("$") else " " + c
d['labels_dict'][mongo_key] = self._labels_dict[c].as_dict()['fcoords']
d['projections'] = {}
if len(self._projections) != 0:
d['structure'] = self._structure.as_dict()
d['projections'] = {
str(int(spin)): [
[{str(orb): [
self._projections[spin][i][j][orb][k]
for k in range(len(self._projections[spin][i][j][orb]))]
for orb in self._projections[spin][i][j]}
for j in range(len(self._projections[spin][i]))]
for i in range(len(self._projections[spin]))]
for spin in self._projections}
return d
@classmethod
def from_dict(cls, d):
"""
Args:
A dict with all data for a band structure symm line object.
Returns:
A BandStructureSymmLine object
"""
# Strip the label to recover initial string (see trick used in as_dict to handle $ chars)
labels_dict = {k.strip(): v for k, v in d['labels_dict'].items()}
projections = {}
structure = None
if 'projections' in d and len(d['projections']) != 0:
structure = Structure.from_dict(d['structure'])
projections = {
Spin.from_int(int(spin)): [
[{Orbital.from_string(orb): [
d['projections'][spin][i][j][orb][k]
for k in range(len(d['projections'][spin][i][j][orb]))]
for orb in d['projections'][spin][i][j]}
for j in range(len(d['projections'][spin][i]))]
for i in range(len(d['projections'][spin]))]
for spin in d['projections']}
return BandStructureSymmLine(
d['kpoints'], {Spin.from_int(int(k)): d['bands'][k]
for k in d['bands']},
Lattice(d['lattice_rec']['matrix']), d['efermi'],
labels_dict, structure=structure, projections=projections)
def get_reconstructed_band_structure(list_bs, efermi=None):
"""
This method takes a list of band structures
and reconstruct one band structure object from all of them
this is typically very useful when you split non self consistent
band structure runs in several independent jobs and want to merge back
the results
Args:
list_bs: A list of BandStructure
efermi: The fermi energy of the reconstructed band structure. If
None is assigned an average of all the fermi energy in each
object in the list_bs is used.
Returns:
A BandStructure or BandStructureSymmLine object (depending on
the type of the list_bs objects)
"""
if efermi is None:
efermi = sum([b.efermi for b in list_bs]) / len(list_bs)
kpoints = []
labels_dict = {}
rec_lattice = list_bs[0]._lattice_rec
nb_bands = min([list_bs[i]._nb_bands for i in range(len(list_bs))])
for bs in list_bs:
for k in bs._kpoints:
kpoints.append(k.frac_coords)
for k, v in bs._labels_dict.items():
labels_dict[k] = v.frac_coords
eigenvals = {Spin.up: [list_bs[0]._bands[Spin.up][i]
for i in range(nb_bands)]}
for i in range(nb_bands):
for bs in list_bs[1:]:
for e in bs._bands[Spin.up][i]:
eigenvals[Spin.up][i].append(e)
if list_bs[0].is_spin_polarized:
eigenvals[Spin.down] = [list_bs[0]._bands[Spin.down][i]
for i in range(nb_bands)]
for i in range(nb_bands):
for bs in list_bs[1:]:
for e in bs._bands[Spin.down][i]:
eigenvals[Spin.down][i].append(e)
projections = {}
if len(list_bs[0]._projections) != 0:
projections = {Spin.up: [list_bs[0]._projections[Spin.up][i]
for i in range(nb_bands)]}
for i in range(nb_bands):
for bs in list_bs[1:]:
projections[Spin.up][i].extend(bs._projections[Spin.up][i])
if list_bs[0].is_spin_polarized:
projections[Spin.down] = [list_bs[0]._projections[Spin.down][i]
for i in range(nb_bands)]
for i in range(nb_bands):
for bs in list_bs[1:]:
projections[Spin.down][i].extend(
bs._projections[Spin.down][i])
if isinstance(list_bs[0], BandStructureSymmLine):
return BandStructureSymmLine(kpoints, eigenvals, rec_lattice,
efermi, labels_dict,
structure=list_bs[0]._structure,
projections=projections)
else:
return BandStructure(kpoints, eigenvals, rec_lattice, efermi,
labels_dict, structure=list_bs[0]._structure,
projections=projections)
| {
"content_hash": "7ce869cc0665bd6bc4dc79a1c956800c",
"timestamp": "",
"source": "github",
"line_count": 979,
"max_line_length": 97,
"avg_line_length": 41.83248212461696,
"alnum_prop": 0.5188748351809347,
"repo_name": "yanikou19/pymatgen",
"id": "5105c74ec86f0808a1bdb5d5f2c04f75ceaec857",
"size": "40971",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pymatgen/electronic_structure/bandstructure.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "7429"
},
{
"name": "JavaScript",
"bytes": "3638"
},
{
"name": "Propeller Spin",
"bytes": "4026362"
},
{
"name": "Python",
"bytes": "3368797"
},
{
"name": "Shell",
"bytes": "5100"
}
],
"symlink_target": ""
} |
'''
Created on 4 Feb 2016
@author: fressi
'''
from __future__ import absolute_import
import sys
import profiterole
_COMMANDS = {}
def main(argv=None):
'Profit main entry point.'
if argv is None:
argv = sys.argv
if '--version' in argv[1:]:
print_message(profiterole.__version__)
return 0
try:
command = _COMMANDS[argv[1]]
except (KeyError, IndexError):
print_message(
"Invalid command: {!s}", ' '.join((repr(a) for a in argv[1:])))
return 1
else:
return command(argv)
def print_message(message, *args, **kwargs):
'Formats and prints a message to standard output.'
sys.stdout.write((message + '\n').format(*args, **kwargs))
def command(func):
'Registers a function as a command.'
assert callable(func)
_COMMANDS[func.__name__.replace('_', '-')] = func
return func
@command
def get_sources(argv):
'get-sources command entry point.'
return 0
| {
"content_hash": "b37ac7a6cb4223873983ba7e36eb595b",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 75,
"avg_line_length": 17.854545454545455,
"alnum_prop": 0.6018329938900203,
"repo_name": "FedericoRessi/profiterole",
"id": "e33ece5d60c7aa8dc3a27a4b1e642a950ce253dd",
"size": "982",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "profiterole/command.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "6074"
}
],
"symlink_target": ""
} |
import hurl
urlpatterns = hurl.patterns('pipeye.accounts.views', {
'preferences': 'preferences',
})
| {
"content_hash": "4d88369dea2568eea86ef8de6f8e8145",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 54,
"avg_line_length": 21,
"alnum_prop": 0.7047619047619048,
"repo_name": "oinopion/pipeye",
"id": "70152a27be6d24b0d0a045f14c54f788e547a94b",
"size": "105",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pipeye/accounts/urls.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "JavaScript",
"bytes": "58516"
},
{
"name": "Python",
"bytes": "60140"
}
],
"symlink_target": ""
} |
import numpy as np
from keras.callbacks import TensorBoard, LambdaCallback
from keras.layers import Dense, Activation, Convolution1D, MaxPooling1D, Concatenate, Flatten, Dropout
from keras.models import Sequential, model_from_yaml
from keras.optimizers import Adam
class CNNModel:
def __init__(self, model):
self.model = model
@classmethod
def createCNN(cls, sequence_length, alphabet_size, filters=512, kernel_size=20, strides=1, pool_size=2, depth=None,
dropouts=None, lr=0.001, decay=0.0):
if not depth:
depth = int(np.ceil(np.log(sequence_length) / np.log(kernel_size)))
if not dropouts:
dropouts = (0.5, 0.5)
train_model = Sequential()
# train_model.add(Dropout(dropouts[0],input_shape=(sequence_length, alphabet_size)))
train_model.add(Convolution1D(filters=filters, kernel_size=kernel_size, strides=strides, activation='relu',
input_shape=(sequence_length, alphabet_size)))
for i in range(1, depth):
train_model.add(Convolution1D(filters=filters, kernel_size=kernel_size, strides=strides, activation='relu'))
train_model.add(MaxPooling1D(pool_size=pool_size))
train_model.add(Dropout(dropouts[0]))
train_model.add(Flatten())
train_model.add(Dense(alphabet_size))
train_model.add(Activation('softmax'))
optimizer = Adam(lr=lr, decay=decay)
train_model.compile(optimizer=optimizer, loss='categorical_crossentropy', metrics=['accuracy'])
train_model.summary()
return cls(train_model)
def save(self, filename):
with open(filename + '.model.yaml', 'w') as out:
out.write(self.model.to_yaml())
self.model.save_weights(filename + '.model.weights')
@classmethod
def load(cls, filename, lr=0.001, decay=0):
with open(filename + '.model.yaml', 'r') as infile:
model = model_from_yaml(infile.read())
model.load_weights(filename + '.model.weights')
optimizer = Adam(lr=lr, decay=decay)
model.compile(optimizer=optimizer, loss='categorical_crossentropy', metrics=['accuracy'])
return cls(model)
def sequence_length(self):
return self.model.input_shape[1]
def depth(self):
return len(self.model.layers) - 2
def fit(self, X, y, batch_size, epochs, shuffle=False, filename=None, tf_log_path=None, callbacks=None,
validation_data=None):
if callbacks:
callbacks = list(callbacks)
else:
callbacks = []
if filename:
save_on_epochs_callback = LambdaCallback(
on_epoch_end=lambda epoch, logs: self.save(filename))
callbacks.append(save_on_epochs_callback)
if tf_log_path:
tensorboard_callback = TensorBoard(log_dir=tf_log_path, histogram_freq=0, write_graph=True,
write_images=False)
tensorboard_callback.set_model(self.model)
tensorboard_callback.set_model(self.model)
callbacks.append(tensorboard_callback)
self.model.fit(X, y, validation_data=validation_data, batch_size=batch_size, epochs=epochs,
shuffle=shuffle, callbacks=callbacks)
def predict(self, x):
if len(x.shape) == 2:
if self.sequence_length() and self.sequence_length() > x.shape[0]:
missing = self.sequence_length() - x.shape[0]
zeros = np.zeros((missing, x.shape[1]), dtype=np.bool)
x = np.append(zeros, x, axis=0)
x = np.expand_dims(x, axis=0)
return self.model.predict(x)[0]
| {
"content_hash": "31b5b35e7f89ea1fa39ae84f1447615d",
"timestamp": "",
"source": "github",
"line_count": 98,
"max_line_length": 120,
"avg_line_length": 38.142857142857146,
"alnum_prop": 0.6166399143927234,
"repo_name": "aesuli/text-model",
"id": "b5876f4d1973445aabd56f84546611a540f075b0",
"size": "3738",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cnnmodel.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "45241"
}
],
"symlink_target": ""
} |
import sys
# sums all columns in biom table into one column
def collapse_biom(biom,out):
with open(biom) as b:
temp = b.readline()
temp = temp.split(']}}],"columns":')[0]
temp = temp.split(']],"rows": [{"id": "')
rows = temp[1]
data = temp[0].split(',"data": [[')[1]
# Data is in the next format:
# x1, x2, x3
# x1, x2 are matrix coordinates
# x3 is number of reads which is always 1
# as in biom each column is a single read and each row is a single taxon
data = data.split('],[')
rows = rows.split(']}},{"id": "')
values = {}
# Go through matrix and add up all values for each taxon
if all([x[-1] == '1' for x in data]):
prev_line = 0
last_column = 0
for el in data:
temp = el.split(',')
now_line = int(temp[0])
if now_line != prev_line:
now_column = int(temp[1])
# Assign as many count to a taxon as many reads (columns) were asssigned to it
# (Every column may have no more than 1 non zero value which is 1)
num = now_column - last_column
values[rows[prev_line]] = num
last_column = now_column
prev_line = now_line
values[rows[prev_line]] = num
else:
for i in range(len(data)):
if not(rows[i] in values):
values[rows[i]] = int(data[i].split(',')[-1])
else:
values[rows[i]] += int(data[i].split(',')[-1])
lines = []
with open(out, 'a') as t:
for el in values:
temp = el.split('", "metadata": {"taxonomy": [')
lines.append(temp[0] + '\t' + str(values[el]) + '\t' + temp[1] + '\n')
lines.sort(key = lambda x: int(x.split('\t')[1]), reverse=True)
for line in lines:
t.write(line)
collapse_biom(sys.argv[1],sys.argv[2]) | {
"content_hash": "05866921e4843954fc9437c518137e87",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 94,
"avg_line_length": 37.588235294117645,
"alnum_prop": 0.5101721439749609,
"repo_name": "lotrus28/TaboCom",
"id": "7ea64bfa784352db3f475d07e09e602c6e3c6d19",
"size": "1917",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "qiime_16s/collapse_biom.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "190515"
},
{
"name": "R",
"bytes": "37394"
},
{
"name": "Shell",
"bytes": "16562"
}
],
"symlink_target": ""
} |
"""
Django settings for executive project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '7wjs^*f9z-zr+)#k284*1tljivf4ebxrmmy)6#2iz&nkb!34=u'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
#PROJECT_ROOT = os.path.abspath(os.path.dirname(__file__))
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
("C:/projects/vcloudexecutive/executive/templates/"),
("C:/projects/vcloudexecutive/executive/cloudcontrol/templates/"),
)
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'cloudcontrol',
'executive',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'executive.urls'
WSGI_APPLICATION = 'executive.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'en-gb'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = (
"C:/Projects/vcloudexecutive/executive/static/",
)
| {
"content_hash": "2bf2ae5cca4acda480c096a40e6cf020",
"timestamp": "",
"source": "github",
"line_count": 105,
"max_line_length": 88,
"avg_line_length": 26.895238095238096,
"alnum_prop": 0.7234419263456091,
"repo_name": "bmcollier/vcloudexecutive",
"id": "e3f5ae3ea16fe4a9a4c2e7c34359e87aed3a56c8",
"size": "2824",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "executive/executive/settings.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "438"
},
{
"name": "JavaScript",
"bytes": "761"
},
{
"name": "Python",
"bytes": "7864"
}
],
"symlink_target": ""
} |
from requests import request
import rclpy
from rclpy.node import Node
from std_srvs.srv import Empty
from bitbots_msgs.msg import Buttons
from rclpy.node import Node
from rclpy.duration import Duration
rclpy.init(args=None)
node = Node('zero_on_button')
zero_l = node.create_client(Empty, "/foot_pressure_left/set_foot_zero")
zero_r = node.create_client(Empty, "/foot_pressure_right/set_foot_zero")
button_prev_state = False
press_time = node.get_clock().now() - Duration(seconds=1.0)
def cb(msg):
global button_prev_state, press_time
print("New msg")
print(msg.button1)
print(not button_prev_state)
print(node.get_clock().now() - press_time > Duration(seconds=1.0))
if msg.button3 and not button_prev_state and node.get_clock().now() - press_time > Duration(seconds=1.0):
request = Empty.Request()
zero_l.call_async(request)
zero_r.call_async(request)
press_time = node.get_clock().now()
button_prev_state = msg.button3
node.create_subscription(Buttons, "/buttons", cb, 1)
rclpy.spin(node)
| {
"content_hash": "a22539bc3dcdefc1b0dc878b19376276",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 109,
"avg_line_length": 33,
"alnum_prop": 0.7064393939393939,
"repo_name": "bit-bots/bitbots_lowlevel",
"id": "7f874c7560a53af24917e89ed3c96e54bb991692",
"size": "1079",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bitbots_ros_control/scripts/zero_on_button.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "132796"
},
{
"name": "CMake",
"bytes": "5100"
},
{
"name": "Python",
"bytes": "32411"
}
],
"symlink_target": ""
} |
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: win_chocolatey
version_added: '1.9'
short_description: Manage packages using chocolatey
description:
- Manage packages using Chocolatey.
- If Chocolatey is missing from the system, the module will install it.
requirements:
- chocolatey >= 0.10.5 (will be upgraded if older)
options:
allow_empty_checksums:
description:
- Allow empty checksums to be used for downloaded resource from non-secure
locations.
- Use M(win_chocolatey_feature) with the name C(allowEmptyChecksums) to
control this option globally.
type: bool
default: no
version_added: '2.2'
allow_multiple:
description:
- Allow the installation of multiple packages when I(version) is specified.
- Having multiple packages at different versions can cause issues if the
package doesn't support this. Use at your own risk.
type: bool
default: no
version_added: '2.8'
allow_prerelease:
description:
- Allow the installation of pre-release packages.
- If I(state) is C(latest), the latest pre-release package will be
installed.
type: bool
default: no
version_added: '2.6'
architecture:
description:
- Force Chocolatey to install the package of a specific process
architecture.
- When setting C(x86), will ensure Chocolatey installs the x86 package
even when on an x64 bit OS.
type: str
choices: [ default, x86 ]
default: default
version_added: '2.7'
force:
description:
- Forces the install of a package, even if it already is installed.
- Using I(force) will cause Ansible to always report that a change was
made.
type: bool
default: no
install_args:
description:
- Arguments to pass to the native installer.
- These are arguments that are passed directly to the installer the
Chocolatey package runs, this is generally an advanced option.
type: str
version_added: '2.1'
ignore_checksums:
description:
- Ignore the checksums provided by the package.
- Use M(win_chocolatey_feature) with the name C(checksumFiles) to control
this option globally.
type: bool
default: no
version_added: '2.2'
ignore_dependencies:
description:
- Ignore dependencies, only install/upgrade the package itself.
type: bool
default: no
version_added: '2.1'
name:
description:
- Name of the package(s) to be installed.
- Set to C(all) to run the action on all the installed packages.
type: list
required: yes
package_params:
description:
- Parameters to pass to the package.
- These are parameters specific to the Chocolatey package and are generally
documented by the package itself.
- Before Ansible 2.7, this option was just I(params).
type: str
version_added: '2.1'
aliases: [ params ]
pinned:
description:
- Whether to pin the Chocolatey package or not.
- If omitted then no checks on package pins are done.
- Will pin/unpin the specific version if I(version) is set.
- Will pin the latest version of a package if C(yes), I(version) is not set
and and no pin already exists.
- Will unpin all versions of a package if C(no) and I(version) is not set.
- This is ignored when C(state=absent).
type: bool
version_added: '2.8'
proxy_url:
description:
- Proxy URL used to install chocolatey and the package.
- Use M(win_chocolatey_config) with the name C(proxy) to control this
option globally.
type: str
version_added: '2.4'
proxy_username:
description:
- Proxy username used to install Chocolatey and the package.
- Before Ansible 2.7, users with double quote characters C(") would need to
be escaped with C(\) beforehand. This is no longer necessary.
- Use M(win_chocolatey_config) with the name C(proxyUser) to control this
option globally.
type: str
version_added: '2.4'
proxy_password:
description:
- Proxy password used to install Chocolatey and the package.
- This value is exposed as a command argument and any privileged account
can see this value when the module is running Chocolatey, define the
password on the global config level with M(win_chocolatey_config) with
name C(proxyPassword) to avoid this.
type: str
version_added: '2.4'
skip_scripts:
description:
- Do not run I(chocolateyInstall.ps1) or I(chocolateyUninstall.ps1) scripts
when installing a package.
type: bool
default: no
version_added: '2.4'
source:
description:
- Specify the source to retrieve the package from.
- Use M(win_chocolatey_source) to manage global sources.
- This value can either be the URL to a Chocolatey feed, a path to a folder
containing C(.nupkg) packages or the name of a source defined by
M(win_chocolatey_source).
- This value is also used when Chocolatey is not installed as the location
of the install.ps1 script and only supports URLs for this case.
type: str
source_username:
description:
- A username to use with I(source) when accessing a feed that requires
authentication.
- It is recommended you define the credentials on a source with
M(win_chocolatey_source) instead of passing it per task.
type: str
version_added: '2.7'
source_password:
description:
- The password for I(source_username).
- This value is exposed as a command argument and any privileged account
can see this value when the module is running Chocolatey, define the
credentials with a source with M(win_chocolatey_source) to avoid this.
type: str
version_added: '2.7'
state:
description:
- State of the package on the system.
- When C(absent), will ensure the package is not installed.
- When C(present), will ensure the package is installed.
- When C(downgrade), will allow Chocolatey to downgrade a package if
I(version) is older than the installed version.
- When C(latest), will ensure the package is installed to the latest
available version.
- When C(reinstalled), will uninstall and reinstall the package.
type: str
choices: [ absent, downgrade, latest, present, reinstalled ]
default: present
timeout:
description:
- The time to allow chocolatey to finish before timing out.
type: int
default: 2700
version_added: '2.3'
aliases: [ execution_timeout ]
validate_certs:
description:
- Used when downloading the Chocolatey install script if Chocolatey is not
already installed, this does not affect the Chocolatey package install
process.
- When C(no), no SSL certificates will be validated.
- This should only be used on personally controlled sites using self-signed
certificate.
type: bool
default: yes
version_added: '2.7'
version:
description:
- Specific version of the package to be installed.
- When I(state) is set to C(absent), will uninstall the specific version
otherwise all versions of that package will be removed.
- If a different version of package is installed, I(state) must be C(latest)
or I(force) set to C(yes) to install the desired version.
- Provide as a string (e.g. C('6.1')), otherwise it is considered to be
a floating-point number and depending on the locale could become C(6,1),
which will cause a failure.
- If I(name) is set to C(chocolatey) and Chocolatey is not installed on the
host, this will be the version of Chocolatey that is installed. You can
also set the C(chocolateyVersion) environment var.
type: str
notes:
- This module will install or upgrade Chocolatey when needed.
- When using verbosity 2 or less (C(-vv)) the C(stdout) output will be restricted.
When using verbosity 4 (C(-vvvv)) the C(stdout) output will be more verbose.
When using verbosity 5 (C(-vvvvv)) the C(stdout) output will include debug output.
- Some packages, like hotfixes or updates need an interactive user logon in
order to install. You can use C(become) to achieve this, see
:ref:`become_windows`.
Even if you are connecting as local Administrator, using C(become) to
become Administrator will give you an interactive user logon, see examples
below.
- If C(become) is unavailable, use M(win_hotfix) to install hotfixes instead
of M(win_chocolatey) as M(win_hotfix) avoids using C(wusa.exe) which cannot
be run without C(become).
seealso:
- module: win_chocolatey_config
- module: win_chocolatey_facts
- module: win_chocolatey_feature
- module: win_chocolatey_source
- module: win_feature
- module: win_hotfix
description: Use when C(become) is unavailable, to avoid using C(wusa.exe).
- module: win_package
- module: win_updates
- name: Chocolatey website
description: More information about the Chocolatey tool.
link: http://chocolatey.org/
- name: Chocolatey packages
description: An overview of the available Chocolatey packages.
link: http://chocolatey.org/packages
- ref: become_windows
description: Some packages, like hotfixes or updates need an interactive user logon
in order to install. You can use C(become) to achieve this.
author:
- Trond Hindenes (@trondhindenes)
- Peter Mounce (@petemounce)
- Pepe Barbe (@elventear)
- Adam Keech (@smadam813)
- Pierre Templier (@ptemplier)
- Jordan Borean (@jborean93)
'''
# TODO:
# * Better parsing when a package has dependencies - currently fails
# * Time each item that is run
# * Support 'changed' with gems - would require shelling out to `gem list` first and parsing, kinda defeating the point of using chocolatey.
# * Version provided not as string might be translated to 6,6 depending on Locale (results in errors)
EXAMPLES = r'''
- name: Install git
win_chocolatey:
name: git
state: present
- name: Upgrade installed packages
win_chocolatey:
name: all
state: latest
- name: Install notepadplusplus version 6.6
win_chocolatey:
name: notepadplusplus
version: '6.6'
- name: Install notepadplusplus 32 bit version
win_chocolatey:
name: notepadplusplus
architecture: x86
- name: Install git from specified repository
win_chocolatey:
name: git
source: https://someserver/api/v2/
- name: Install git from a pre configured source (win_chocolatey_source)
win_chocolatey:
name: git
source: internal_repo
- name: Ensure Chocolatey itself is installed and use internal repo as source
win_chocolatey:
name: chocolatey
source: http://someserver/chocolatey
- name: Uninstall git
win_chocolatey:
name: git
state: absent
- name: Install multiple packages
win_chocolatey:
name:
- procexp
- putty
- windirstat
state: present
- name: Install multiple packages sequentially
win_chocolatey:
name: '{{ item }}'
state: present
loop:
- procexp
- putty
- windirstat
- name: Uninstall multiple packages
win_chocolatey:
name:
- procexp
- putty
- windirstat
state: absent
- name: Install curl using proxy
win_chocolatey:
name: curl
proxy_url: http://proxy-server:8080/
proxy_username: joe
proxy_password: p@ssw0rd
- name: Install a package that requires 'become'
win_chocolatey:
name: officepro2013
become: yes
become_user: Administrator
become_method: runas
- name: install and pin Notepad++ at 7.6.3
win_chocolatey:
name: notepadplusplus
version: 7.6.3
pinned: yes
state: present
- name: remove all pins for Notepad++ on all versions
win_chocolatey:
name: notepadplusplus
pinned: no
state: present
'''
RETURN = r'''
command:
description: The full command used in the chocolatey task.
returned: changed
type: str
sample: choco.exe install -r --no-progress -y sysinternals --timeout 2700 --failonunfound
rc:
description: The return code from the chocolatey task.
returned: always
type: int
sample: 0
stdout:
description: The stdout from the chocolatey task. The verbosity level of the
messages are affected by Ansible verbosity setting, see notes for more
details.
returned: changed
type: str
sample: Chocolatey upgraded 1/1 packages.
'''
| {
"content_hash": "10bbd055e48682f0e09c57d26661d405",
"timestamp": "",
"source": "github",
"line_count": 368,
"max_line_length": 140,
"avg_line_length": 33.58423913043478,
"alnum_prop": 0.7021603689618902,
"repo_name": "thaim/ansible",
"id": "70876edb36f17b8e3ba65b22d99659575817eb9c",
"size": "12690",
"binary": false,
"copies": "17",
"ref": "refs/heads/fix-broken-link",
"path": "lib/ansible/modules/windows/win_chocolatey.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "7"
},
{
"name": "Shell",
"bytes": "246"
}
],
"symlink_target": ""
} |
import json
import os
import random
import re
import subprocess
from ccmlib import common
from dtest import Tester, debug
from tools import known_failure, since
class TestOfflineTools(Tester):
# In 2.0, we will get this error log message due to jamm not being
# in the classpath
ignore_log_patterns = ["Unable to initialize MemoryMeter"]
def sstablelevelreset_test(self):
"""
Insert data and call sstablelevelreset on a series of
tables. Confirm level is reset to 0 using its output.
Test a variety of possible errors and ensure response is resonable.
@since 2.1.5
@jira_ticket CASSANDRA-7614
"""
cluster = self.cluster
cluster.populate(1).start(wait_for_binary_proto=True)
node1 = cluster.nodelist()[0]
# test by trying to run on nonexistent keyspace
cluster.stop(gently=False)
(output, error, rc) = node1.run_sstablelevelreset("keyspace1", "standard1", output=True)
self.assertIn("ColumnFamily not found: keyspace1/standard1", error)
# this should return exit code 1
self.assertEqual(rc, 1, msg=str(rc))
# now test by generating keyspace but not flushing sstables
cluster.start(wait_for_binary_proto=True)
node1.stress(['write', 'n=100', 'no-warmup', '-schema', 'replication(factor=1)',
'-rate', 'threads=8'])
cluster.stop(gently=False)
(output, error, rc) = node1.run_sstablelevelreset("keyspace1", "standard1", output=True)
self._check_stderr_error(error)
self.assertIn("Found no sstables, did you give the correct keyspace", output)
self.assertEqual(rc, 0, msg=str(rc))
# test by writing small amount of data and flushing (all sstables should be level 0)
cluster.start(wait_for_binary_proto=True)
session = self.patient_cql_connection(node1)
session.execute("ALTER TABLE keyspace1.standard1 with compaction={'class': 'LeveledCompactionStrategy', 'sstable_size_in_mb':1};")
node1.stress(['write', 'n=1K', 'no-warmup', '-schema', 'replication(factor=1)',
'-rate', 'threads=8'])
node1.flush()
cluster.stop(gently=False)
(output, error, rc) = node1.run_sstablelevelreset("keyspace1", "standard1", output=True)
self._check_stderr_error(error)
self.assertIn("since it is already on level 0", output)
self.assertEqual(rc, 0, msg=str(rc))
# test by loading large amount data so we have multiple levels and checking all levels are 0 at end
cluster.start(wait_for_binary_proto=True)
node1.stress(['write', 'n=50K', 'no-warmup', '-schema', 'replication(factor=1)',
'-rate', 'threads=8'])
cluster.flush()
self.wait_for_compactions(node1)
cluster.stop()
initial_levels = self.get_levels(node1.run_sstablemetadata(keyspace="keyspace1", column_families=["standard1"]))
(output, error, rc) = node1.run_sstablelevelreset("keyspace1", "standard1", output=True)
final_levels = self.get_levels(node1.run_sstablemetadata(keyspace="keyspace1", column_families=["standard1"]))
self._check_stderr_error(error)
self.assertEqual(rc, 0, msg=str(rc))
debug(initial_levels)
debug(final_levels)
# let's make sure there was at least L1 beforing resetting levels
self.assertTrue(max(initial_levels) > 0)
# let's check all sstables are on L0 after sstablelevelreset
self.assertTrue(max(final_levels) == 0)
def get_levels(self, data):
(out, err, rc) = data
return map(int, re.findall("SSTable Level: ([0-9])", out))
def wait_for_compactions(self, node):
pattern = re.compile("pending tasks: 0")
while True:
output, err = node.nodetool("compactionstats", capture_output=True)
if pattern.search(output):
break
@known_failure(failure_source='test',
jira_url='https://issues.apache.org/jira/browse/CASSANDRA-12275',
flaky=False,
notes='windows')
def sstableofflinerelevel_test(self):
"""
Generate sstables of varying levels.
Reset sstables to L0 with sstablelevelreset
Run sstableofflinerelevel and ensure tables are promoted correctly
Also test a variety of bad inputs including nonexistent keyspace and sstables
@since 2.1.5
@jira_ticket CASSANRDA-8031
"""
cluster = self.cluster
cluster.set_configuration_options(values={'compaction_throughput_mb_per_sec': 0})
cluster.populate(1).start(wait_for_binary_proto=True)
node1 = cluster.nodelist()[0]
# NOTE - As of now this does not return when it encounters Exception and causes test to hang, temporarily commented out
# test by trying to run on nonexistent keyspace
# cluster.stop(gently=False)
# (output, error, rc) = node1.run_sstableofflinerelevel("keyspace1", "standard1", output=True)
# self.assertTrue("java.lang.IllegalArgumentException: Unknown keyspace/columnFamily keyspace1.standard1" in error)
# # this should return exit code 1
# self.assertEqual(rc, 1, msg=str(rc))
# cluster.start()
# now test by generating keyspace but not flushing sstables
node1.stress(['write', 'n=1', 'no-warmup',
'-schema', 'replication(factor=1)',
'-col', 'n=FIXED(10)', 'SIZE=FIXED(1024)',
'-rate', 'threads=8'])
cluster.stop(gently=False)
(output, error, rc) = node1.run_sstableofflinerelevel("keyspace1", "standard1", output=True)
self.assertIn("No sstables to relevel for keyspace1.standard1", output)
self.assertEqual(rc, 1, msg=str(rc))
# test by flushing (sstable should be level 0)
cluster.start(wait_for_binary_proto=True)
session = self.patient_cql_connection(node1)
debug("Altering compaction strategy to LCS")
session.execute("ALTER TABLE keyspace1.standard1 with compaction={'class': 'LeveledCompactionStrategy', 'sstable_size_in_mb':1};")
node1.stress(['write', 'n=1K', 'no-warmup',
'-schema', 'replication(factor=1)',
'-col', 'n=FIXED(10)', 'SIZE=FIXED(1024)',
'-rate', 'threads=8'])
node1.flush()
cluster.stop()
(output, error, rc) = node1.run_sstableofflinerelevel("keyspace1", "standard1", output=True)
self.assertIn("L0=1", output)
self.assertEqual(rc, 0, msg=str(rc))
cluster.start(wait_for_binary_proto=True)
# test by loading large amount data so we have multiple sstables
# must write enough to create more than just L1 sstables
keys = 8 * cluster.data_dir_count
node1.stress(['write', 'n={0}K'.format(keys), 'no-warmup',
'-schema', 'replication(factor=1)',
'-col', 'n=FIXED(10)', 'SIZE=FIXED(1024)',
'-rate', 'threads=8'])
node1.flush()
debug("Waiting for compactions to finish")
self.wait_for_compactions(node1)
debug("Stopping node")
cluster.stop()
debug("Done stopping node")
# Let's reset all sstables to L0
debug("Getting initial levels")
initial_levels = list(self.get_levels(node1.run_sstablemetadata(keyspace="keyspace1", column_families=["standard1"])))
self.assertNotEqual([], initial_levels)
debug('initial_levels:')
debug(initial_levels)
debug("Running sstablelevelreset")
(output, error, rc) = node1.run_sstablelevelreset("keyspace1", "standard1", output=True)
debug("Getting final levels")
final_levels = list(self.get_levels(node1.run_sstablemetadata(keyspace="keyspace1", column_families=["standard1"])))
self.assertNotEqual([], final_levels)
debug('final levels:')
debug(final_levels)
# let's make sure there was at least 3 levels (L0, L1 and L2)
self.assertGreater(max(initial_levels), 1)
# let's check all sstables are on L0 after sstablelevelreset
self.assertEqual(max(final_levels), 0)
# time to relevel sstables
debug("Getting initial levels")
initial_levels = self.get_levels(node1.run_sstablemetadata(keyspace="keyspace1", column_families=["standard1"]))
debug("Running sstableofflinerelevel")
(output, error, rc) = node1.run_sstableofflinerelevel("keyspace1", "standard1", output=True)
debug("Getting final levels")
final_levels = self.get_levels(node1.run_sstablemetadata(keyspace="keyspace1", column_families=["standard1"]))
debug(output)
debug(error)
debug(initial_levels)
debug(final_levels)
# let's check sstables were promoted after releveling
self.assertGreater(max(final_levels), 1)
@since('2.2')
def sstableverify_test(self):
"""
Generate sstables and test offline verification works correctly
Test on bad input: nonexistent keyspace and sstables
Test on potential situations: deleted sstables, corrupted sstables
"""
cluster = self.cluster
cluster.populate(3).start(wait_for_binary_proto=True)
node1, node2, node3 = cluster.nodelist()
# test on nonexistent keyspace
(out, err, rc) = node1.run_sstableverify("keyspace1", "standard1", output=True)
self.assertIn("Unknown keyspace/table keyspace1.standard1", err)
self.assertEqual(rc, 1, msg=str(rc))
# test on nonexistent sstables:
node1.stress(['write', 'n=100', 'no-warmup', '-schema', 'replication(factor=3)',
'-rate', 'threads=8'])
(out, err, rc) = node1.run_sstableverify("keyspace1", "standard1", output=True)
self.assertEqual(rc, 0, msg=str(rc))
# Generate multiple sstables and test works properly in the simple case
node1.stress(['write', 'n=100K', 'no-warmup', '-schema', 'replication(factor=3)',
'-rate', 'threads=8'])
node1.flush()
node1.stress(['write', 'n=100K', 'no-warmup', '-schema', 'replication(factor=3)',
'-rate', 'threads=8'])
node1.flush()
cluster.stop()
(out, error, rc) = node1.run_sstableverify("keyspace1", "standard1", output=True)
self.assertEqual(rc, 0, msg=str(rc))
# STDOUT of the sstableverify command consists of multiple lines which may contain
# Java-normalized paths. To later compare these with Python-normalized paths, we
# map over each line of out and replace Java-normalized paths with Python equivalents.
outlines = map(lambda line: re.sub("(?<=path=').*(?=')",
lambda match: os.path.normcase(match.group(0)),
line),
out.splitlines())
# check output is correct for each sstable
sstables = self._get_final_sstables(node1, "keyspace1", "standard1")
for sstable in sstables:
verified = False
hashcomputed = False
for line in outlines:
if sstable in line:
if "Verifying BigTableReader" in line:
verified = True
elif "Checking computed hash of BigTableReader" in line:
hashcomputed = True
else:
debug(line)
debug(verified)
debug(hashcomputed)
debug(sstable)
self.assertTrue(verified and hashcomputed)
# now try intentionally corrupting an sstable to see if hash computed is different and error recognized
sstable1 = sstables[1]
with open(sstable1, 'r') as f:
sstabledata = bytearray(f.read())
with open(sstable1, 'w') as out:
position = random.randrange(0, len(sstabledata))
sstabledata[position] = (sstabledata[position] + 1) % 256
out.write(sstabledata)
# use verbose to get some coverage on it
(out, error, rc) = node1.run_sstableverify("keyspace1", "standard1", options=['-v'], output=True)
# Process sstableverify output to normalize paths in string to Python casing as above
error = re.sub("(?<=Corrupted: ).*", lambda match: os.path.normcase(match.group(0)), error)
self.assertIn("Corrupted: " + sstable1, error)
self.assertEqual(rc, 1, msg=str(rc))
def sstableexpiredblockers_test(self):
cluster = self.cluster
cluster.populate(1).start(wait_for_binary_proto=True)
[node1] = cluster.nodelist()
session = self.patient_cql_connection(node1)
self.create_ks(session, 'ks', 1)
session.execute("create table ks.cf (key int PRIMARY KEY, val int) with gc_grace_seconds=0")
# create a blocker:
session.execute("insert into ks.cf (key, val) values (1,1)")
node1.flush()
session.execute("delete from ks.cf where key = 2")
node1.flush()
session.execute("delete from ks.cf where key = 3")
node1.flush()
[(out, error, rc)] = node1.run_sstableexpiredblockers(keyspace="ks", column_family="cf")
self.assertIn("blocks 2 expired sstables from getting dropped", out)
def sstableupgrade_test(self):
"""
Test that sstableupgrade functions properly offline on a same-version Cassandra sstable, a
stdout message of "Found 0 sstables that need upgrading." should be returned.
"""
# Set up original node version to test for upgrade
cluster = self.cluster
testversion = cluster.version()
original_install_dir = cluster.get_install_dir()
debug('Original install dir: {}'.format(original_install_dir))
# Set up last major version to upgrade from, assuming 2.1 branch is the oldest tested version
if testversion < '2.2':
# Upgrading from 2.0->2.1 fails due to the jamm 0.2.5->0.3.0 jar update.
# ** This will happen again next time jamm version is upgraded.
# CCM doesn't handle this upgrade correctly and results in an error when flushing 2.1:
# Error opening zip file or JAR manifest missing : /home/mshuler/git/cassandra/lib/jamm-0.2.5.jar
# The 2.1 installed jamm version is 0.3.0, but bin/cassandra.in.sh used by nodetool still has 0.2.5
# (when this is fixed in CCM issue #463, install version='git:cassandra-2.0' as below)
self.skipTest('Skipping 2.1 test due to jamm.jar version upgrade problem in CCM node configuration.')
elif testversion < '3.0':
debug('Test version: {} - installing git:cassandra-2.1'.format(testversion))
cluster.set_install_dir(version='git:cassandra-2.1')
# As of 3.5, sstable format 'ma' from 3.0 is still the latest - install 2.2 to upgrade from
else:
debug('Test version: {} - installing git:cassandra-2.2'.format(testversion))
cluster.set_install_dir(version='git:cassandra-2.2')
# Start up last major version, write out an sstable to upgrade, and stop node
cluster.populate(1).start(wait_for_binary_proto=True)
[node1] = cluster.nodelist()
# Check that node1 is actually what we expect
debug('Downgraded install dir: {}'.format(node1.get_install_dir()))
session = self.patient_cql_connection(node1)
self.create_ks(session, 'ks', 1)
session.execute('create table ks.cf (key int PRIMARY KEY, val int) with gc_grace_seconds=0')
session.execute('insert into ks.cf (key, val) values (1,1)')
node1.flush()
cluster.stop()
debug('Beginning ks.cf sstable: {}'.format(node1.get_sstables(keyspace='ks', column_family='cf')))
# Upgrade Cassandra to original testversion and run sstableupgrade
cluster.set_install_dir(original_install_dir)
# Check that node1 is actually upgraded
debug('Upgraded to original install dir: {}'.format(node1.get_install_dir()))
# Perform a node start/stop so system tables get internally updated, otherwise we may get "Unknown keyspace/table ks.cf"
cluster.start(wait_for_binary_proto=True)
node1.flush()
cluster.stop()
[(out, error, rc)] = node1.run_sstableupgrade(keyspace='ks', column_family='cf')
debug(out)
debug(error)
debug('Upgraded ks.cf sstable: {}'.format(node1.get_sstables(keyspace='ks', column_family='cf')))
self.assertIn('Found 1 sstables that need upgrading.', out)
# Check that sstableupgrade finds no upgrade needed on current version.
[(out, error, rc)] = node1.run_sstableupgrade(keyspace='ks', column_family='cf')
debug(out)
debug(error)
self.assertIn('Found 0 sstables that need upgrading.', out)
@since('3.0')
def sstabledump_test(self):
"""
Test that sstabledump functions properly offline to output the contents of a table.
"""
cluster = self.cluster
cluster.populate(1).start(wait_for_binary_proto=True)
[node1] = cluster.nodelist()
session = self.patient_cql_connection(node1)
self.create_ks(session, 'ks', 1)
session.execute('create table ks.cf (key int PRIMARY KEY, val int) with gc_grace_seconds=0')
session.execute('insert into ks.cf (key, val) values (1,1)')
node1.flush()
cluster.stop()
[(out, error, rc)] = node1.run_sstabledump(keyspace='ks', column_families=['cf'])
debug(out)
debug(error)
# Load the json output and check that it contains the inserted key=1
s = json.loads(out)
debug(s)
self.assertEqual(len(s), 1)
dumped_row = s[0]
self.assertEqual(dumped_row['partition']['key'], ['1'])
# Check that we only get the key back using the enumerate option
[(out, error, rc)] = node1.run_sstabledump(keyspace='ks', column_families=['cf'], enumerate_keys=True)
debug(out)
debug(error)
s = json.loads(out)
debug(s)
self.assertEqual(len(s), 1)
dumped_row = s[0][0]
self.assertEqual(dumped_row, '1')
def _check_stderr_error(self, error):
acceptable = ["Max sstable size of", "Consider adding more capacity", "JNA link failure", "Class JavaLaunchHelper is implemented in both"]
if len(error) > 0:
for line in error.splitlines():
self.assertTrue(any([msg in line for msg in acceptable]),
'Found line \n\n"{line}"\n\n in error\n\n{error}'.format(line=line, error=error))
def _get_final_sstables(self, node, ks, table):
"""
Return the node final sstable data files, excluding the temporary tables.
If sstableutil exists (>= 3.0) then we rely on this tool since the table
file names no longer contain tmp in their names (CASSANDRA-7066).
"""
# Get all sstable data files
allsstables = map(os.path.normcase, node.get_sstables(ks, table))
# Remove any temporary files
tool_bin = node.get_tool('sstableutil')
if os.path.isfile(tool_bin):
args = [tool_bin, '--type', 'tmp', ks, table]
env = common.make_cassandra_env(node.get_install_cassandra_root(), node.get_node_cassandra_root())
p = subprocess.Popen(args, env=env, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(stdout, stderr) = p.communicate()
tmpsstables = map(os.path.normcase, stdout.splitlines())
ret = list(set(allsstables) - set(tmpsstables))
else:
ret = [sstable for sstable in allsstables if "tmp" not in sstable[50:]]
return ret
| {
"content_hash": "255d767bf6cea8dae8366381fc5d81d6",
"timestamp": "",
"source": "github",
"line_count": 433,
"max_line_length": 146,
"avg_line_length": 46.314087759815244,
"alnum_prop": 0.6206243143512516,
"repo_name": "mambocab/cassandra-dtest",
"id": "43e0c8c051e9550a4454fd914b547117a4ea7980",
"size": "20054",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "offline_tools_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "2158572"
},
{
"name": "Shell",
"bytes": "1999"
}
],
"symlink_target": ""
} |
def create(replies):
"""Used for faking out scripts so they can run like
the book needs but be sectioned to appear to be normal.
"""
def input(prompt=None):
reply = replies.pop(0)
if prompt: print(prompt, end=' ')
print(reply)
return reply
return input, input
| {
"content_hash": "3bf698c4aab0777dfa756d3663b941a5",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 59,
"avg_line_length": 24.23076923076923,
"alnum_prop": 0.6095238095238096,
"repo_name": "zedshaw/learn-python3-thw-code",
"id": "dbd4ace318135454f2ca37055ebbadfd316a83e4",
"size": "315",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fake_input.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "2110"
},
{
"name": "Python",
"bytes": "62068"
},
{
"name": "Shell",
"bytes": "1544"
},
{
"name": "ShellSession",
"bytes": "113"
}
],
"symlink_target": ""
} |
import xmlrpclib, socket, ConfigParser, os, os.path, time
import siuxmethodlib
class TimeoutTransport (xmlrpclib.Transport):
"""
Custom XML-RPC transport class for HTTP connections, allowing a timeout in
the base connection.
"""
def __init__(self, timeout=socket._GLOBAL_DEFAULT_TIMEOUT, use_datetime=0):
xmlrpclib.Transport.__init__(self, use_datetime)
self._timeout = timeout
def make_connection(self, host):
# If using python 2.6, since that implementation normally returns the
# HTTP compatibility class, which doesn't have a timeout feature.
#import httplib
#host, extra_headers, x509 = self.get_host_info(host)
#return httplib.HTTPConnection(host, timeout=self._timeout)
conn = xmlrpclib.Transport.make_connection(self, host)
conn.timeout = self._timeout
return conn
class SiUXclient(siuxmethodlib.SiUXmethod):
def __init__( self, auth='', timeout=10 ):
""" init """
self._auth = auth
# default config
self.__repeat = 3
self.__url = 'http://api.esiux.net:3035/RPC2'
# server connect
t = TimeoutTransport( timeout=timeout )
self.__server = xmlrpclib.ServerProxy( self.__url, transport=t)
def _call( self, methodName, *args ):
""" call rpc method """
# call rpc method
for no in range( self.__repeat+1 ):
try:
methodTest = getattr( self.__server, methodName )
ret = methodTest( *args )
# rpc error
except rpc.Fault, fe:
log.ni( 'RPC Err: method %s(%s) : %s [repeat=%d]', (methodName, str(args), fe, no), ERR=3 )
ret = { 'status':503, 'statusMessage':'Service Unavailable', 'errorMessage':str(fe), 'statusCode':'SERVER_UNAVAILABLE', 'found':0 }
# method error
except Exception, msg:
if msg == "SiUXclient instance has no attribute '%s'" % (methodName):
log.ni( 'RPC Err: method %s(%s) : %s [repeat=%d]', (methodName, str(args), msg, no), ERR=3 )
ret = { 'status':501, 'statusMessage':'Not Implemented', 'errorMessage':str(msg), 'statusCode':'NOT_IMPLEMENTED', 'found':0 }
break
log.ni( 'RPC Err: method %s(%s) : %s [repeat=%d]', (methodName, str(args), msg, no), ERR=3 )
ret = { 'status':500, 'statusMessage':'Server Error', 'errorMessage':str(msg), 'statusCode':'SERVER_ERR', 'found':0 }
if ret['status'] < 500:
break
time.sleep(1)
return ret
def methodList( self ):
"list methods for SiUX api"
try:
listMethod = self.__server.system.listMethods()
except rpc.Fault, fe:
log.ni( 'RPC Err: method system.listMethods() : %s', (fe,), ERR=3 )
ret = { 'status':503, 'statusMessage':'Service Unavailable', 'errorMessage':str(fe), 'statusCode':'SERVER_UNAVAILABLE', 'found':0 }
except Exception, msg:
log.ni( 'RPC Err: method system.listMethods() : %s', (msg,), ERR=3 )
return { 'status':500, 'statusMessage':'Server Error', 'errorMessage':str(msg), 'statusCode':'SERVER_ERR', 'found':0 }
data = []
found = 0
for methodName in listMethod:
if methodName.startswith( 'system.'):
continue
methods = []
no = 0
for name in methodName.split('.'):
if no == 0:
methods.append( name.lower() )
else:
methods.append( '%s%s' % (name[0].upper(), name[1:].lower()) )
no = no + 1
method = ''.join(methods)
data.append( {'methodName':method} )
found = found + 1
return { 'status':200, 'statusMessage':'OK', 'statusCode':'OK', 'data':data, 'found':found }
if __name__ == '__main__':
import pprint
# config
auth = '<YOUR_API_KEY>'
# init
S = SiUXclient( auth=auth )
# test method list
pprint.pprint( S.methodList() )
# test method
pprint.pprint( S.sourceList() )
| {
"content_hash": "77de83cc7ad321e676e39022b6bbdf07",
"timestamp": "",
"source": "github",
"line_count": 131,
"max_line_length": 163,
"avg_line_length": 31.137404580152673,
"alnum_prop": 0.5670507477322874,
"repo_name": "eSiUX/siux-python",
"id": "69bcb3862f86af1e541b84fcd463ac6ab38438b3",
"size": "4098",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "siux/siuxlib.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "54385"
}
],
"symlink_target": ""
} |
from django.conf.urls import patterns, url
from core.views import report, user, scheduler, examinations, api, user_examinations, department, examination, doc
from core.views.management import department
urlpatterns = [
url(r'^$', examination.user_examination_list_view, name='user_examination_list_view'),
url(r'^(?P<user_examination_id>\d+)/$', examination.user_examination_process_view, name='user_examination_answer_view'),
url(r'^(?P<user_examination_id>\d+)/(?P<user_examination_question_log_id>\d+)/$', examination.user_examination_process_view, name='user_examination_answer_view'),
url(r'^view/(?P<user_examination_id>\d+)/$', examination.user_examination_detail_view, name='user_examination_detail_view'),
# url(r'^api/import/department/$', api.department_import, name='department_import'),
# url(r'^api/import/user/$', api.user_import, name='user_import'),
url(r'^adm/report/$', report.user_examination_report_list_view, name='user_examination_report_list_view'),
url(r'^adm/departments/$', report.departments_report_list_view, name='departments_report_list_view'),
url(r'^adm/departments/(?P<department_id>\d+)/$', report.department_users_list_view, name='department_users_list_view'),
url(r'^adm/departments/(?P<department_id>\d+)/(?P<user_id>\d+)/$', report.department_user_examinations_list_view, name='department_user_examinations_list_view'),
url(r'^adm/users/(?P<user_id>\d+)/undelete/$', user.user_undelete_view, name='user_undelete_view'),
url(r'^adm/users/(?P<user_id>\d+)/delete/$', user.user_delete_view, name='user_delete_view'),
url(r'^adm/users/(?P<user_id>\d+)/$', user.user_create_or_update_view, name='user_update_view'),
url(r'^adm/users/create/$', user.user_create_or_update_view, name='user_create_view'),
url(r'^adm/users/deleted/$', user.user_deleted_list_view, name='user_deleted_list_view'),
url(r'^adm/users/$', user.user_list_view, name='user_list_view'),
url(r'^adm/scheduler/(?P<scheduler_id>\d+)/detail/$', scheduler.scheduler_detail_view, name='scheduler_detail_view'),
url(r'^adm/scheduler/(?P<scheduler_id>\d+)/$', scheduler.scheduler_create_or_update_view, name='scheduler_update_view'),
url(r'^adm/scheduler/create/$', scheduler.scheduler_create_or_update_view, name='scheduler_create_view'),
url(r'^adm/scheduler/$', scheduler.scheduler_list_view, name='scheduler_list_view'),
url(r'^adm/examination/(?P<examination_id>\d+)/questions/(?P<question_id>\d+)/answer/(?P<answer_id>\d+)/delete/$', examinations.question_answer_delete_view, name='question_answer_delete_view'),
url(r'^adm/examination/(?P<examination_id>\d+)/questions/(?P<question_id>\d+)/answer/(?P<answer_id>\d+)/$', examinations.question_answer_create_or_update_view, name='question_answer_update_view'),
url(r'^adm/examination/(?P<examination_id>\d+)/questions/(?P<question_id>\d+)/answer/create/$', examinations.question_answer_create_or_update_view, name='question_answer_create_view'),
url(r'^adm/examination/(?P<examination_id>\d+)/questions/(?P<question_id>\d+)/delete/$', examinations.examination_question_delete_view, name='examination_question_delete_view'),
url(r'^adm/examination/(?P<examination_id>\d+)/questions/(?P<question_id>\d+)/$', examinations.examination_question_create_or_update_view, name='examination_question_update_view'),
url(r'^adm/examination/(?P<examination_id>\d+)/questions/create/$', examinations.examination_question_create_or_update_view, name='examination_question_create_view'),
url(r'^adm/examination/(?P<examination_id>\d+)/questions/$', examinations.examination_question_list_view, name='examination_question_list_view'),
url(r'^adm/examination/(?P<examination_id>\d+)/delete/$', examinations.examination_delete_view, name='examination_delete_view'),
url(r'^adm/examination/(?P<examination_id>\d+)/$', examinations.examination_create_or_update_view, name='examination_update_view'),
url(r'^adm/examination/create/$', examinations.examination_create_or_update_view, name='examination_create_view'),
url(r'^adm/examination/deleted/$', examinations.examination_deleted_list_view, name='examination_deleted_list_view'),
url(r'^adm/examination/$', examinations.examination_list_view, name='adm_examination_list_view'),
url(r'^adm/user_examination/(?P<user_examination_id>\d+)/$', user_examinations.user_examination_create_or_update_view, name='adm_user_examination_update_view'),
url(r'^adm/user_examination/(?P<user_examination_id>\d+)/delete/$', user_examinations.user_examination_delete_view, name='adm_user_examination_delete_view'),
url(r'^adm/user_examination/create/$', user_examinations.user_examination_create_or_update_view, name='adm_user_examination_create_view'),
url(r'^adm/user_examination/deleted/$', user_examinations.user_examination_deleted_list_view, name='adm_user_examination_deleted_list_view'),
url(r'^adm/user_examination/$', user_examinations.user_examination_list_view, name='adm_user_examination_list_view'),
url(r'^adm/department/(?P<department_id>\d+)/delete/$', department.department_delete_view, name='department_delete_view'),
url(r'^adm/department/(?P<department_id>\d+)/$', department.department_create_or_update_view, name='department_update_view'),
url(r'^adm/department/create/$', department.department_create_or_update_view, name='department_create_view'),
url(r'^adm/department/deleted/$', department.department_deleted_list_view, name='department_deleted_list_view'),
url(r'^adm/department/$', department.department_list_view, name='department_list_view'),
]
| {
"content_hash": "6366f975d6327703b2ca3f4771edf29d",
"timestamp": "",
"source": "github",
"line_count": 57,
"max_line_length": 200,
"avg_line_length": 98.63157894736842,
"alnum_prop": 0.7223408039843472,
"repo_name": "telminov/personnel-testing",
"id": "b1f2bac4229fb8e16aeaecd0b4158c330820ffc6",
"size": "5622",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "core/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "73039"
},
{
"name": "HTML",
"bytes": "52156"
},
{
"name": "JavaScript",
"bytes": "457005"
},
{
"name": "Python",
"bytes": "127094"
}
],
"symlink_target": ""
} |
import unittest
import _mysql
import MySQLdb
from MySQLdb.constants import FIELD_TYPE
from configdb import connection_factory
import warnings
warnings.simplefilter("ignore")
class TestDBAPISet(unittest.TestCase):
def test_set_equality(self):
self.assertTrue(MySQLdb.STRING == MySQLdb.STRING)
def test_set_inequality(self):
self.assertTrue(MySQLdb.STRING != MySQLdb.NUMBER)
def test_set_equality_membership(self):
self.assertTrue(FIELD_TYPE.VAR_STRING == MySQLdb.STRING)
def test_set_inequality_membership(self):
self.assertTrue(FIELD_TYPE.DATE != MySQLdb.STRING)
class CoreModule(unittest.TestCase):
"""Core _mysql module features."""
def test_NULL(self):
"""Should have a NULL constant."""
self.assertEqual(_mysql.NULL, 'NULL')
def test_version(self):
"""Version information sanity."""
self.assertTrue(isinstance(_mysql.__version__, str))
self.assertTrue(isinstance(_mysql.version_info, tuple))
self.assertEqual(len(_mysql.version_info), 5)
def test_client_info(self):
self.assertTrue(isinstance(_mysql.get_client_info(), str))
def test_thread_safe(self):
self.assertTrue(isinstance(_mysql.thread_safe(), int))
class CoreAPI(unittest.TestCase):
"""Test _mysql interaction internals."""
def setUp(self):
self.conn = connection_factory(use_unicode=True)
def tearDown(self):
self.conn.close()
def test_thread_id(self):
tid = self.conn.thread_id()
self.assertTrue(isinstance(tid, int),
"thread_id didn't return an int.")
self.assertRaises(TypeError, self.conn.thread_id, ('evil',),
"thread_id shouldn't accept arguments.")
def test_affected_rows(self):
self.assertEquals(self.conn.affected_rows(), 0,
"Should return 0 before we do anything.")
#def test_debug(self):
## FIXME Only actually tests if you lack SUPER
#self.assertRaises(MySQLdb.OperationalError,
#self.conn.dump_debug_info)
def test_charset_name(self):
self.assertTrue(isinstance(self.conn.character_set_name(), str),
"Should return a string.")
def test_host_info(self):
self.assertTrue(isinstance(self.conn.get_host_info(), str),
"Should return a string.")
def test_proto_info(self):
self.assertTrue(isinstance(self.conn.get_proto_info(), int),
"Should return an int.")
def test_server_info(self):
self.assertTrue(isinstance(self.conn.get_server_info(), str),
"Should return an str.")
| {
"content_hash": "edd80864538ae790550989d25930e2c1",
"timestamp": "",
"source": "github",
"line_count": 87,
"max_line_length": 72,
"avg_line_length": 32.45977011494253,
"alnum_prop": 0.6094192634560907,
"repo_name": "hipnusleo/laserjet",
"id": "9dcedfc1e184cd1666a7ea235488a6a357b6425b",
"size": "2824",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "resource/pypi/MySQL-python-1.2.5/tests/test_MySQLdb_nonstandard.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "3096"
},
{
"name": "Batchfile",
"bytes": "13184"
},
{
"name": "C",
"bytes": "672858"
},
{
"name": "C++",
"bytes": "9678"
},
{
"name": "Go",
"bytes": "6671"
},
{
"name": "HTML",
"bytes": "850945"
},
{
"name": "Java",
"bytes": "14456"
},
{
"name": "Makefile",
"bytes": "14373"
},
{
"name": "Python",
"bytes": "5156663"
}
],
"symlink_target": ""
} |
import _plotly_utils.basevalidators
class ColorsrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(
self, plotly_name="colorsrc", parent_name="scattergl.marker", **kwargs
):
super(ColorsrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
**kwargs,
)
| {
"content_hash": "61eda68206ad7bcaf9aaf5d607a1c1d2",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 78,
"avg_line_length": 32.30769230769231,
"alnum_prop": 0.6071428571428571,
"repo_name": "plotly/plotly.py",
"id": "207f192dea157339a3bf4538beb9cda9a1946a0d",
"size": "420",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/validators/scattergl/marker/_colorsrc.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "545"
},
{
"name": "JavaScript",
"bytes": "2074"
},
{
"name": "PostScript",
"bytes": "565328"
},
{
"name": "Python",
"bytes": "31506317"
},
{
"name": "TypeScript",
"bytes": "71337"
}
],
"symlink_target": ""
} |
"""Kd-tree for nearest neighbours queries.
This implementation currently runs faster than SciPy's KDTree but is often
slower than a brute-force approach in Hienoi's use cases, killing the primary
purpose for such a data structure. Still, it is left here as an educational
resource.
"""
import collections
import heapq
import itertools
import sys
import numpy
from hienoi._dynamicarray import DynamicArray
if sys.version_info[0] == 2:
_range = xrange
_zip = itertools.izip
else:
_range = range
_zip = zip
_heappushpop = heapq.heappushpop
_heappush = heapq.heappush
_npmax = numpy.maximum
_INITIAL_NEIGHBOURS_CAPACITY = 32
class KDTree(object):
"""K-d tree.
Nodes are stored contiguously in memory.
"""
def __init__(self, data, bucket_size=128):
if bucket_size < 1:
raise ValueError("A minimum bucket size of 1 is expected.")
self._data = data
self._n, self._k = self._data.shape
self._nodes = None
self._buckets = []
self._bucket_size = bucket_size
self._node_dtype = numpy.dtype([
('size', numpy.intp),
('bucket', numpy.intp),
('lower_bounds', (numpy.float_, self._k)),
('upper_bounds', (numpy.float_, self._k)),
])
self._neighbour_dtype = numpy.dtype([
('squared_distance', numpy.float_),
('index', numpy.intp),
])
self._build()
def search(self, point, count, radius, sort):
"""Retrieve the neighbours to a point."""
if count is None:
count = self._n
elif count < 1:
return numpy.empty(0, dtype=self._neighbour_dtype)
if radius is None:
radius = numpy.inf
elif radius < 0.0:
return numpy.empty(0, dtype=self._neighbour_dtype)
point = numpy.asarray(point, dtype=numpy.float_)
if count >= self._n:
return self._search_all_within_radius(point, radius, sort)
else:
return self._search_k_nearests(point, count, radius, sort)
def _build(self):
"""Build the k-d tree."""
data = self._data
buckets = self._buckets
bucket_size = self._bucket_size
# First pass: build the tree using a DFS ordering.
nodes = []
parents = []
i = 0
stack = collections.deque(((numpy.arange(self._n), None),))
while stack:
indices, parent = stack.popleft()
points = data[indices]
lower_bounds = numpy.amin(points, axis=0)
upper_bounds = numpy.amax(points, axis=0)
if len(points) <= bucket_size:
bucket = len(buckets)
buckets.append(indices)
else:
bucket = -1
# Split the longest side of the bounds.
side_lengths = upper_bounds - lower_bounds
split_axis = numpy.argmax(side_lengths)
split_location = (lower_bounds[split_axis]
+ upper_bounds[split_axis]) / 2.0
axis_data = points[:, split_axis]
left_indices = indices[
numpy.nonzero(axis_data <= split_location)[0]]
right_indices = indices[
numpy.nonzero(axis_data > split_location)[0]]
stack.appendleft((right_indices, i))
stack.appendleft((left_indices, i))
size = 1
nodes.append((size, bucket, lower_bounds, upper_bounds))
parents.append(parent)
i += 1
# Second pass: set the 'size' attribute for each node in the tree.
# Iterate over the nodes in reverse order to perform a bottom-up
# traversal where the size of each node is added to the size of its
# parent.
self._nodes = numpy.array(nodes, dtype=self._node_dtype)
node_sizes = self._nodes['size']
for i in reversed(_range(1, len(self._nodes))):
node_sizes[parents[i]] += node_sizes[i]
def _search_k_nearests(self, point, count, radius, sort):
"""Search the nearest points within a radius."""
data = self._data
nodes = self._nodes
buckets = self._buckets
node_sizes = nodes['size']
node_buckets = nodes['bucket']
node_lower_bounds = nodes['lower_bounds']
node_upper_bounds = nodes['upper_bounds']
# A max heap would be appropriate to store the nearest points found,
# alas Python only provides a min heap as part of the `heapq` module.
# As a hacky workaround, nearest distances are negated.
nearests = []
dist_limit = radius ** 2
pt_root_dist = _pt_to_node_near_dist(
point, node_lower_bounds[0], node_upper_bounds[0])
stack = collections.deque(((0, pt_root_dist),))
while stack:
i, pt_node_dist = stack.popleft()
if pt_node_dist > dist_limit:
# The node's bounds are too far, skip this branch.
pass
elif node_sizes[i] == 1:
# This is a leaf node, see if there are any nearest points.
indices = buckets[node_buckets[i]]
points = data[indices]
dists = numpy.sum(
(point[numpy.newaxis, :] - points) ** 2, axis=-1)
for j, dist in _zip(indices, dists):
if dist < dist_limit:
if len(nearests) >= count:
dist_limit = -_heappushpop(nearests, (-dist, j))[0]
else:
_heappush(nearests, (-dist, j))
else:
# Inspect the child nodes.
left_node_idx = i + 1
right_node_idx = i + 1 + node_sizes[left_node_idx]
pt_left_node_dist = _pt_to_node_near_dist(
point, node_lower_bounds[left_node_idx],
node_upper_bounds[left_node_idx])
pt_right_node_dist = _pt_to_node_near_dist(
point, node_lower_bounds[right_node_idx],
node_upper_bounds[right_node_idx])
if pt_left_node_dist <= pt_right_node_dist:
stack.appendleft((right_node_idx, pt_right_node_dist))
stack.appendleft((left_node_idx, pt_left_node_dist))
else:
stack.appendleft((left_node_idx, pt_left_node_dist))
stack.appendleft((right_node_idx, pt_right_node_dist))
out = numpy.array(nearests, dtype=self._neighbour_dtype)
out['squared_distance'] *= -1.0
if sort:
# Here is the biggest performance killer. Runs in a single thread.
out.sort(order='squared_distance')
return out
def _search_all_within_radius(self, point, radius, sort):
"""Search all the points within a radius."""
data = self._data
nodes = self._nodes
buckets = self._buckets
node_sizes = nodes['size']
node_buckets = nodes['bucket']
node_lower_bounds = nodes['lower_bounds']
node_upper_bounds = nodes['upper_bounds']
radius **= 2
neighbours = DynamicArray(_INITIAL_NEIGHBOURS_CAPACITY,
self._neighbour_dtype)
pt_root_dist = _pt_to_node_near_dist(
point, node_lower_bounds[0], node_upper_bounds[0])
stack = collections.deque(((0, pt_root_dist),))
while stack:
i, pt_node_near_dist = stack.popleft()
if pt_node_near_dist > radius:
# The node's bounds are too far, skip this branch.
pass
elif (_pt_to_node_far_dist(
point, node_lower_bounds[i], node_upper_bounds[i])
<= radius):
# The node's bounds are within the radius, recursively retrieve
# all the points.
children = nodes[i:i + node_sizes[i]]
leaves = numpy.extract(children['size'] == 1, children)
count = sum(len(buckets[bucket])
for bucket in leaves['bucket'])
j = len(neighbours)
neighbours.resize(j + count)
neighbours.data['squared_distance'][j:] = numpy.nan
for bucket in leaves['bucket']:
indices = buckets[bucket]
count = len(indices)
neighbours.data['index'][j:j + count] = indices
j += count
elif node_sizes[i] == 1:
# This is a leaf node, see if there are any points within the
# radius.
indices = buckets[node_buckets[i]]
points = data[indices]
dists = numpy.sum(
(point[numpy.newaxis, :] - points) ** 2, axis=-1)
is_within = dists <= radius
j = len(neighbours)
neighbours.resize(j + numpy.sum(is_within))
neighbours.data['squared_distance'][j:] = dists[is_within]
neighbours.data['index'][j:] = indices[is_within]
else:
# Inspect the child nodes.
left_node_idx = i + 1
right_node_idx = i + 1 + node_sizes[left_node_idx]
pt_left_node_dist = _pt_to_node_near_dist(
point, node_lower_bounds[left_node_idx],
node_upper_bounds[left_node_idx])
pt_right_node_dist = _pt_to_node_near_dist(
point, node_lower_bounds[right_node_idx],
node_upper_bounds[right_node_idx])
if pt_left_node_dist <= pt_right_node_dist:
stack.appendleft((right_node_idx, pt_right_node_dist))
stack.appendleft((left_node_idx, pt_left_node_dist))
else:
stack.appendleft((left_node_idx, pt_left_node_dist))
stack.appendleft((right_node_idx, pt_right_node_dist))
# Compute all the distances.
is_nan = numpy.isnan(neighbours.data['squared_distance'])
indices = neighbours.data['index'][numpy.nonzero(is_nan)]
points = data[indices]
dists = numpy.sum((point[numpy.newaxis, :] - points) ** 2, axis=-1)
neighbours.data['squared_distance'][is_nan] = dists
if sort:
# Here is the biggest performance killer. Runs in a single thread.
neighbours.data.sort(order='squared_distance')
return neighbours.data
def _pt_to_node_near_dist(point, lower_bounds, upper_bounds):
return numpy.sum(
_npmax(0.0, _npmax(point - upper_bounds, lower_bounds - point))
** 2)
def _pt_to_node_far_dist(point, lower_bounds, upper_bounds):
return numpy.sum(
_npmax(0.0, _npmax(upper_bounds - point, point - lower_bounds))
** 2)
| {
"content_hash": "205caa7cf849ca7baf385a60dd1ded93",
"timestamp": "",
"source": "github",
"line_count": 284,
"max_line_length": 79,
"avg_line_length": 38.79225352112676,
"alnum_prop": 0.5365344467640919,
"repo_name": "christophercrouzet/hienoi",
"id": "c20bfa0e08d2cf576c5eef48a476e3f71876b85e",
"size": "11017",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "hienoi/_kdtree.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "GLSL",
"bytes": "3289"
},
{
"name": "Makefile",
"bytes": "709"
},
{
"name": "Python",
"bytes": "173994"
}
],
"symlink_target": ""
} |
import os
from setuptools import setup
# Utility function to read the README file.
# Used for the long_description. It's nice, because now 1) we have a top level
# README file and 2) it's easier to type in the README file than to put a raw
# string in below ...
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name = "xlstmpl",
version = "0.0.1",
author = "Rafael Basask",
author_email = "basask@gmail.com",
description = ("An templating system for xls files"),
license = "BSD",
keywords = "xls xlsx template excel office",
url = "",
packages=['xlstmpl',],
long_description=read('README.md'),
classifiers=[
"Development Status :: 3 - Alpha",
"Topic :: Utilities",
"License :: OSI Approved :: BSD License",
],
)
| {
"content_hash": "863aeaa7f78add648087894c4b698a66",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 79,
"avg_line_length": 31.85185185185185,
"alnum_prop": 0.6127906976744186,
"repo_name": "Basask/xls-tmpl",
"id": "19991291590866cccc6dca55b614b514b6f3a0b2",
"size": "860",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "2822"
}
],
"symlink_target": ""
} |
import httplib, urllib, base64, twitter, os
from pprint import pprint
import json
def sentiment(scoreArray):
headers = {
# Request headers
'Content-Type': 'application/json',
'Ocp-Apim-Subscription-Key': '<your key>',
}
params = urllib.urlencode({
})
with open('tweetOutput.json', 'r') as f:
body= f.read()
#print (str(body))
t= open("tweetScores.json","w+")
try:
conn = httplib.HTTPSConnection('eastus.api.cognitive.microsoft.com')
conn.request("POST", "/text/analytics/v2.0/sentiment?%s" % params, body, headers)
f.close()
response = conn.getresponse()
data = response.read()
scoreData = json.loads(data)
t.write(json.dumps(scoreData))
#pprint(scoreData)
num = 0
while num< 19:
scoreArray.append(scoreData['documents'][num]['score'])
print scoreArray[num]
num = num + 1
conn.close()
except Exception as e:
print str(e)
#print scoreArray
return scoreArray
def tweets(handle):
api = twitter.Api(consumer_key='2sEZ4ECzxAit4ijPiApU0DZyK',
consumer_secret='bgb3CaTh0QENGuMT1MTp4VzXO1dBKnkivZ4cc5EfepBs0X3Lf6',
access_token_key='54722501-nGzzqpFE0HVNMKs4lD5PZJ7dHoebP8TTTxCGNypNi',
access_token_secret='W1C1ZVk3dNFyurCxs8LYtWlm6bed2ZZ8hNvEGlspJtZr5')
twitterURL= "https://api.twitter.com/1.1/statuses/home_timeline.json"
user = handle
index = 0
statuses = api.GetUserTimeline(screen_name=user)
f= open("tweetOutput.json","w+")
f.write("{\n\"documents\": [\n" )
tweets = [i.AsDict() for i in statuses]
count = 0
for t in tweets:
#print( t['text'])
number = str(index)
distweet=t['text']
distweet= distweet.encode('ascii', 'ignore').decode('ascii')
okay = json.dumps(distweet)
if index < 20:
f.write("\n{\n\"language\": \"en\", \n\"id\": \""+ number + "\",\n\"text\":" + okay +"\n},")
if index == 20:
f.write("\n{\n\"language\": \"en\", \n\"id\": \""+ number + "\",\n\"text\":" + okay +"\n}")
index = index + 1
#print(f.read))
f.write("\n]\n}")
def calculateAveTweetScore(scoreArray):
i= 0
disAvg = 0
#print len(scoreArray)
while i<len(scoreArray):
disAvg = scoreArray[i] + disAvg
i = i + 1
if len(scoreArray)!=0:
avg = disAvg/(len(scoreArray))
print "WHO AMI"
print avg
return avg
else:
return 0
def printAvg(avg):
return avg
def cleanup():
os.remove("tweetOutput.json")
os.remove("tweetScores.json")
def main(handle):
scoreArray = []
array = []
tweets(handle)
array = sentiment(scoreArray)
#calculateMeanTweet(scoreArray)
#calaculateHappyTweet(scoreArray)
avg = calculateAveTweetScore(array)
cleanup()
return avg
if __name__=="__main__":
main() | {
"content_hash": "a1ee18078925f525d66ad5ba66accd89",
"timestamp": "",
"source": "github",
"line_count": 95,
"max_line_length": 104,
"avg_line_length": 31.221052631578946,
"alnum_prop": 0.5863115306810519,
"repo_name": "rbitia/aci-demos",
"id": "c24c941f49cb811a438113662b0e6fb5fd54574c",
"size": "2966",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "vk-computervision/textanalysis.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "186171"
},
{
"name": "Dockerfile",
"bytes": "1609"
},
{
"name": "HTML",
"bytes": "238829"
},
{
"name": "JavaScript",
"bytes": "15656"
},
{
"name": "Makefile",
"bytes": "409"
},
{
"name": "PowerShell",
"bytes": "172"
},
{
"name": "Python",
"bytes": "27793"
},
{
"name": "Shell",
"bytes": "3678"
},
{
"name": "Smarty",
"bytes": "2612"
}
],
"symlink_target": ""
} |
import os
from stat import (
S_IRUSR, S_IWUSR, S_IXUSR,
S_IRGRP, S_IWGRP, S_IXGRP,
S_IROTH, S_IWOTH, S_IXOTH,
S_ISVTX, S_ISGID
)
from nbgrader.apps.baseapp import TransferApp, transfer_aliases, transfer_flags
from nbgrader.utils import get_username, check_mode
aliases = {}
aliases.update(transfer_aliases)
aliases.update({
})
flags = {}
flags.update(transfer_flags)
flags.update({
})
class SubmitApp(TransferApp):
name = u'nbgrader-submit'
description = u'Submit an assignment to the nbgrader exchange'
aliases = aliases
flags = flags
examples = """
Submit an assignment for grading. For the usage of students.
You must run this command from the directory containing the assignments
sub-directory. For example, if you want to submit an assignment named
`assignment1`, that must be a sub-directory of your current working directory.
If you are inside the `assignment1` directory, it won't work.
To fetch an assignment you must first know the `course_id` for your course.
If you don't know it, ask your instructor.
To submit `assignment1` to the course `phys101`:
nbgrader submit assignment1 --course phys101
You can submit an assignment multiple times and the instructor will always
get the most recent version. Your assignment submission are timestamped
so instructors can tell when you turned it in. No other students will
be able to see your submissions.
"""
def init_src(self):
self.src_path = os.path.abspath(self.extra_args[0])
self.assignment_id = os.path.split(self.src_path)[-1]
if not os.path.isdir(self.src_path):
self.fail("Assignment not found: {}".format(self.src_path))
def init_dest(self):
self.course_path = os.path.join(self.exchange_directory, self.course_id)
self.inbound_path = os.path.join(self.course_path, 'inbound')
self.assignment_filename = get_username() + '+' + self.assignment_id + '+' + self.timestamp
self.dest_path = os.path.join(self.inbound_path, self.assignment_filename)
if not os.path.isdir(self.inbound_path):
self.fail("Inbound directory doesn't exist: {}".format(self.inbound_path))
if not check_mode(self.inbound_path, write=True, execute=True):
self.fail("You don't have write permissions to the directory: {}".format(self.inbound_path))
def copy_files(self):
self.log.info("Source: {}".format(self.src_path))
self.log.info("Destination: {}".format(self.dest_path))
self.do_copy(self.src_path, self.dest_path)
with open(os.path.join(self.dest_path, "timestamp.txt"), "w") as fh:
fh.write(self.timestamp)
# Make this 0777=ugo=rwx so the instructor can delete later. Hidden from other users by the timestamp.
os.chmod(
self.dest_path,
S_IRUSR|S_IWUSR|S_IXUSR|S_IRGRP|S_IWGRP|S_IXGRP|S_IROTH|S_IWOTH|S_IXOTH
)
self.log.info("Submitted as: {} {} {}".format(
self.course_id, self.assignment_id, str(self.timestamp)
))
| {
"content_hash": "8015a21ba8448e1d28664f1eefccfec4",
"timestamp": "",
"source": "github",
"line_count": 82,
"max_line_length": 110,
"avg_line_length": 38.59756097560975,
"alnum_prop": 0.6584518167456556,
"repo_name": "MatKallada/nbgrader",
"id": "d2c3929daaa3c65971502860a47914ed90d30760",
"size": "3165",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "nbgrader/apps/submitapp.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "4114"
},
{
"name": "JavaScript",
"bytes": "162308"
},
{
"name": "Python",
"bytes": "512777"
},
{
"name": "Smarty",
"bytes": "25636"
}
],
"symlink_target": ""
} |
from runner.koan import *
class Proxy:
def __init__(self, target_object):
# WRITE CODE HERE
self._obj = target_object
# WRITE CODE HERE
# The proxy object should pass the following Koan:
#
class AboutProxyObjectProject(Koan):
def test_proxy_method_returns_wrapped_object(self):
# NOTE: The Television class is defined below
tv = Proxy(Television())
self.assertTrue(isinstance(tv, Proxy))
def test_tv_methods_still_perform_their_function(self):
tv = Proxy(Television())
tv.channel = 10
tv.power()
self.assertEqual(10, tv.channel)
self.assertTrue(tv.is_on())
def test_proxy_records_messages_sent_to_tv(self):
tv = Proxy(Television())
tv.power()
tv.channel = 10
self.assertEqual(['power', 'channel'], tv.messages())
def test_proxy_handles_invalid_messages(self):
tv = Proxy(Television())
ex = None
with self.assertRaises(AttributeError):
tv.no_such_method()
def test_proxy_reports_methods_have_been_called(self):
tv = Proxy(Television())
tv.power()
tv.power()
self.assertTrue(tv.was_called('power'))
self.assertFalse(tv.was_called('channel'))
def test_proxy_counts_method_calls(self):
tv = Proxy(Television())
tv.power()
tv.channel = 48
tv.power()
self.assertEqual(2, tv.number_of_times_called('power'))
self.assertEqual(1, tv.number_of_times_called('channel'))
self.assertEqual(0, tv.number_of_times_called('is_on'))
def test_proxy_can_record_more_than_just_tv_objects(self):
proxy = Proxy("Py Ohio 2010")
result = proxy.upper()
self.assertEqual("PY OHIO 2010", result)
result = proxy.split()
self.assertEqual(["Py", "Ohio", "2010"], result)
self.assertEqual(['upper', 'split'], proxy.messages())
# ====================================================================
# The following code is to support the testing of the Proxy class. No
# changes should be necessary to anything below this comment.
# Example class using in the proxy testing above.
class Television:
def __init__(self):
self._channel = None
self._power = None
@property
def channel(self):
return self._channel
@channel.setter
def channel(self, value):
self._channel = value
def power(self):
if self._power == 'on':
self._power = 'off'
else:
self._power = 'on'
def is_on(self):
return self._power == 'on'
# Tests for the Television class. All of theses tests should pass.
class TelevisionTest(Koan):
def test_it_turns_on(self):
tv = Television()
tv.power()
self.assertTrue(tv.is_on())
def test_it_also_turns_off(self):
tv = Television()
tv.power()
tv.power()
self.assertFalse(tv.is_on())
def test_edge_case_on_off(self):
tv = Television()
tv.power()
tv.power()
tv.power()
self.assertTrue(tv.is_on())
tv.power()
self.assertFalse(tv.is_on())
def test_can_set_the_channel(self):
tv = Television()
tv.channel = 11
self.assertEqual(11, tv.channel)
| {
"content_hash": "81863d7781821e65e7f413a4860c652b",
"timestamp": "",
"source": "github",
"line_count": 137,
"max_line_length": 70,
"avg_line_length": 24.386861313868614,
"alnum_prop": 0.5779706674648308,
"repo_name": "dstanek/python_koans",
"id": "ed2afa12e11b6bc51d3ac6d3ded15ce10942ad81",
"size": "4043",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python3/koans/about_proxy_object_project.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1633"
},
{
"name": "Python",
"bytes": "329043"
},
{
"name": "Shell",
"bytes": "167"
}
],
"symlink_target": ""
} |
import random
from django.test import TestCase
from django.core.cache import cache as django_cache
from django.utils.timezone import now
from pretix.base.models import Event, Organizer
class CacheTest(TestCase):
"""
This test case tests the invalidation of the event related
cache.
"""
@classmethod
def setUpTestData(cls):
o = Organizer.objects.create(name='Dummy', slug='dummy')
cls.event = Event.objects.create(
organizer=o, name='Dummy', slug='dummy',
date_from=now(),
)
def setUp(self):
self.cache = self.event.get_cache()
randint = random.random()
self.testkey = "test" + str(randint)
def test_interference(self):
django_cache.clear()
self.cache.set(self.testkey, "foo")
self.assertIsNone(django_cache.get(self.testkey))
self.assertIn(self.cache.get(self.testkey), (None, "foo"))
def test_longkey(self):
self.cache.set(self.testkey * 100, "foo")
self.assertEquals(self.cache.get(self.testkey * 100), "foo")
def test_invalidation(self):
self.cache.set(self.testkey, "foo")
self.cache.clear()
self.assertIsNone(self.cache.get(self.testkey))
def test_many(self):
inp = {
'a': 'foo',
'b': 'bar',
}
self.cache.set_many(inp)
self.assertEquals(inp, self.cache.get_many(inp.keys()))
| {
"content_hash": "ce61f20b09be27d3f4e8994e01b353f7",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 68,
"avg_line_length": 29.244897959183675,
"alnum_prop": 0.6127006280530356,
"repo_name": "Unicorn-rzl/pretix",
"id": "31f6c645f4d03cb0de4a3d26e66a78e59445958c",
"size": "1433",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/tests/base/test_cache.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "39129"
},
{
"name": "HTML",
"bytes": "153518"
},
{
"name": "JavaScript",
"bytes": "8986"
},
{
"name": "Makefile",
"bytes": "423"
},
{
"name": "Python",
"bytes": "593486"
},
{
"name": "Shell",
"bytes": "287"
}
],
"symlink_target": ""
} |
from concurrent.futures import ProcessPoolExecutor
def formula(a, n):
v = (a-1)**n + (a+1)**n
return v % (a*a)
def maxR(a, lim=10000):
return max(formula(a, i) for i in range(lim + 1))
if __name__ == "__main__":
executor = ProcessPoolExecutor(12)
results = executor.map(maxR, range(999, 1001))
print(sum(results)) | {
"content_hash": "1f3263f37bc71ed2f01e8705de8e697e",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 53,
"avg_line_length": 26.153846153846153,
"alnum_prop": 0.6205882352941177,
"repo_name": "ulikoehler/ProjectEuler",
"id": "5a08e2023b3ab2506fea5b292ababc67abdf21b5",
"size": "363",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Euler120.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "5478"
},
{
"name": "Haskell",
"bytes": "42546"
},
{
"name": "Jupyter Notebook",
"bytes": "55552"
},
{
"name": "Python",
"bytes": "10178"
}
],
"symlink_target": ""
} |
'''get repository information for use in a shell prompt
Take a string, parse any special variables inside, and output the result.
Useful mostly for putting information about the current repository into
a shell prompt.
'''
from __future__ import with_statement
import re
import os
import subprocess
from datetime import datetime, timedelta
from os import path
from mercurial import extensions, commands, cmdutil, help
from mercurial.node import hex, short
# `revrange' has been moved into module `scmutil' since v1.9.
try :
from mercurial import scmutil
revrange = scmutil.revrange
except :
revrange = cmdutil.revrange
CACHE_PATH = ".hg/prompt/cache"
CACHE_TIMEOUT = timedelta(minutes=15)
FILTER_ARG = re.compile(r'\|.+\((.*)\)')
def _cache_remote(repo, kind):
cache = path.join(repo.root, CACHE_PATH, kind)
c_tmp = cache + '.temp'
# This is kind of a hack and I feel a little bit dirty for doing it.
IGNORE = open('NUL:','w') if subprocess.mswindows else open('/dev/null','w')
subprocess.call(['hg', kind, '--quiet'], stdout=file(c_tmp, 'w'), stderr=IGNORE)
os.rename(c_tmp, cache)
return
def _with_groups(groups, out):
out_groups = [groups[0]] + [groups[-1]]
if any(out_groups) and not all(out_groups):
print 'Error parsing prompt string. Mismatched braces?'
out = out.replace('%', '%%')
return ("%s" + out + "%s") % (out_groups[0][:-1] if out_groups[0] else '',
out_groups[1][1:] if out_groups[1] else '')
def _get_filter(name, g):
'''Return the filter with the given name, or None if it was not used.'''
matching_filters = filter(lambda s: s and s.startswith('|%s' % name), g)
if not matching_filters:
return None
# Later filters will override earlier ones, for now.
f = matching_filters[-1]
return f
def _get_filter_arg(f):
if not f:
return None
args = FILTER_ARG.match(f).groups()
if args:
return args[0]
else:
return None
def prompt(ui, repo, fs='', **opts):
'''get repository information for use in a shell prompt
Take a string and output it for use in a shell prompt. You can use
keywords in curly braces::
$ hg prompt "currently on {branch}"
currently on default
You can also use an extended form of any keyword::
{optional text here{keyword}more optional text}
This will expand the inner {keyword} and output it along with the extra
text only if the {keyword} expands successfully. This is useful if you
have a keyword that may not always apply to the current state and you
have some text that you would like to see only if it is appropriate::
$ hg prompt "currently at {bookmark}"
currently at
$ hg prompt "{currently at {bookmark}}"
$ hg bookmark my-bookmark
$ hg prompt "{currently at {bookmark}}"
currently at my-bookmark
See 'hg help prompt-keywords' for a list of available keywords.
'''
def _basename(m):
return _with_groups(m.groups(), path.basename(repo.root)) if repo.root else ''
def _bookmark(m):
try:
book = extensions.find('bookmarks').current(repo)
except AttributeError:
book = getattr(repo, '_bookmarkcurrent', None)
except KeyError:
book = getattr(repo, '_bookmarkcurrent', None)
if book:
cur = repo['.'].node()
if repo._bookmarks[book] == cur:
return _with_groups(m.groups(), book)
else:
return ''
def _branch(m):
g = m.groups()
branch = repo.dirstate.branch()
quiet = _get_filter('quiet', g)
out = branch if (not quiet) or (branch != 'default') else ''
return _with_groups(g, out) if out else ''
def _closed(m):
g = m.groups()
quiet = _get_filter('quiet', g)
p = repo[None].parents()[0]
pn = p.node()
branch = repo.dirstate.branch()
closed = (p.extra().get('close')
and pn in repo.branchheads(branch, closed=True))
out = 'X' if (not quiet) and closed else ''
return _with_groups(g, out) if out else ''
def _count(m):
g = m.groups()
query = [g[1][1:]] if g[1] else ['all()']
return _with_groups(g, str(len(revrange(repo, query))))
def _node(m):
g = m.groups()
parents = repo[None].parents()
p = 0 if '|merge' not in g else 1
p = p if len(parents) > p else None
format = short if '|short' in g else hex
node = format(parents[p].node()) if p is not None else None
return _with_groups(g, str(node)) if node else ''
def _patch(m):
g = m.groups()
try:
extensions.find('mq')
except KeyError:
return ''
q = repo.mq
if _get_filter('quiet', g) and not len(q.series):
return ''
if _get_filter('topindex', g):
if len(q.applied):
out = str(len(q.applied) - 1)
else:
out = ''
elif _get_filter('applied', g):
out = str(len(q.applied))
elif _get_filter('unapplied', g):
out = str(len(q.unapplied(repo)))
elif _get_filter('count', g):
out = str(len(q.series))
else:
out = q.applied[-1].name if q.applied else ''
return _with_groups(g, out) if out else ''
def _patches(m):
g = m.groups()
try:
extensions.find('mq')
except KeyError:
return ''
join_filter = _get_filter('join', g)
join_filter_arg = _get_filter_arg(join_filter)
sep = join_filter_arg if join_filter else ' -> '
patches = repo.mq.series
applied = [p.name for p in repo.mq.applied]
unapplied = filter(lambda p: p not in applied, patches)
if _get_filter('hide_applied', g):
patches = filter(lambda p: p not in applied, patches)
if _get_filter('hide_unapplied', g):
patches = filter(lambda p: p not in unapplied, patches)
if _get_filter('reverse', g):
patches = reversed(patches)
pre_applied_filter = _get_filter('pre_applied', g)
pre_applied_filter_arg = _get_filter_arg(pre_applied_filter)
post_applied_filter = _get_filter('post_applied', g)
post_applied_filter_arg = _get_filter_arg(post_applied_filter)
pre_unapplied_filter = _get_filter('pre_unapplied', g)
pre_unapplied_filter_arg = _get_filter_arg(pre_unapplied_filter)
post_unapplied_filter = _get_filter('post_unapplied', g)
post_unapplied_filter_arg = _get_filter_arg(post_unapplied_filter)
for n, patch in enumerate(patches):
if patch in applied:
if pre_applied_filter:
patches[n] = pre_applied_filter_arg + patches[n]
if post_applied_filter:
patches[n] = patches[n] + post_applied_filter_arg
elif patch in unapplied:
if pre_unapplied_filter:
patches[n] = pre_unapplied_filter_arg + patches[n]
if post_unapplied_filter:
patches[n] = patches[n] + post_unapplied_filter_arg
return _with_groups(g, sep.join(patches)) if patches else ''
def _queue(m):
g = m.groups()
try:
extensions.find('mq')
except KeyError:
return ''
q = repo.mq
out = os.path.basename(q.path)
if out == 'patches' and not os.path.isdir(q.path):
out = ''
elif out.startswith('patches-'):
out = out[8:]
return _with_groups(g, out) if out else ''
def _remote(kind):
def _r(m):
g = m.groups()
cache_dir = path.join(repo.root, CACHE_PATH)
cache = path.join(cache_dir, kind)
if not path.isdir(cache_dir):
os.makedirs(cache_dir)
cache_exists = path.isfile(cache)
cache_time = (datetime.fromtimestamp(os.stat(cache).st_mtime)
if cache_exists else None)
if not cache_exists or cache_time < datetime.now() - CACHE_TIMEOUT:
if not cache_exists:
open(cache, 'w').close()
subprocess.Popen(['hg', 'prompt', '--cache-%s' % kind])
if cache_exists:
with open(cache) as c:
count = len(c.readlines())
if g[1]:
return _with_groups(g, str(count)) if count else ''
else:
return _with_groups(g, '') if count else ''
else:
return ''
return _r
def _rev(m):
g = m.groups()
parents = repo[None].parents()
parent = 0 if '|merge' not in g else 1
parent = parent if len(parents) > parent else None
rev = parents[parent].rev() if parent is not None else -1
return _with_groups(g, str(rev)) if rev >= 0 else ''
def _root(m):
return _with_groups(m.groups(), repo.root) if repo.root else ''
def _status(m):
g = m.groups()
st = repo.status(unknown=True)[:5]
modified = any(st[:4])
unknown = len(st[-1]) > 0
flag = ''
if '|modified' not in g and '|unknown' not in g:
flag = '!' if modified else '?' if unknown else ''
else:
if '|modified' in g:
flag += '!' if modified else ''
if '|unknown' in g:
flag += '?' if unknown else ''
return _with_groups(g, flag) if flag else ''
def _tags(m):
g = m.groups()
sep = g[2][1:] if g[2] else ' '
tags = repo[None].tags()
quiet = _get_filter('quiet', g)
if quiet:
tags = filter(lambda tag: tag != 'tip', tags)
return _with_groups(g, sep.join(tags)) if tags else ''
def _task(m):
try:
task = extensions.find('tasks').current(repo)
return _with_groups(m.groups(), task) if task else ''
except KeyError:
return ''
def _tip(m):
g = m.groups()
format = short if '|short' in g else hex
tip = repo[len(repo) - 1]
rev = tip.rev()
tip = format(tip.node()) if '|node' in g else tip.rev()
return _with_groups(g, str(tip)) if rev >= 0 else ''
def _update(m):
if not repo.branchtags():
# We are in an empty repository.
return ''
current_rev = repo[None].parents()[0]
to = repo[repo.branchtags()[current_rev.branch()]]
return _with_groups(m.groups(), '^') if current_rev != to else ''
if opts.get("angle_brackets"):
tag_start = r'\<([^><]*?\<)?'
tag_end = r'(\>[^><]*?)?>'
brackets = '<>'
else:
tag_start = r'\{([^{}]*?\{)?'
tag_end = r'(\}[^{}]*?)?\}'
brackets = '{}'
patterns = {
'bookmark': _bookmark,
'branch(\|quiet)?': _branch,
'closed(\|quiet)?': _closed,
'count(\|[^%s]*?)?' % brackets[-1]: _count,
'node(?:'
'(\|short)'
'|(\|merge)'
')*': _node,
'patch(?:'
'(\|topindex)'
'|(\|applied)'
'|(\|unapplied)'
'|(\|count)'
'|(\|quiet)'
')*': _patch,
'patches(?:' +
'(\|join\([^%s]*?\))' % brackets[-1] +
'|(\|reverse)' +
'|(\|hide_applied)' +
'|(\|hide_unapplied)' +
'|(\|pre_applied\([^%s]*?\))' % brackets[-1] +
'|(\|post_applied\([^%s]*?\))' % brackets[-1] +
'|(\|pre_unapplied\([^%s]*?\))' % brackets[-1] +
'|(\|post_unapplied\([^%s]*?\))' % brackets[-1] +
')*': _patches,
'queue': _queue,
'rev(\|merge)?': _rev,
'root': _root,
'root\|basename': _basename,
'status(?:'
'(\|modified)'
'|(\|unknown)'
')*': _status,
'tags(?:' +
'(\|quiet)' +
'|(\|[^%s]*?)' % brackets[-1] +
')*': _tags,
'task': _task,
'tip(?:'
'(\|node)'
'|(\|short)'
')*': _tip,
'update': _update,
'incoming(\|count)?': _remote('incoming'),
'outgoing(\|count)?': _remote('outgoing'),
}
if opts.get("cache_incoming"):
_cache_remote(repo, 'incoming')
if opts.get("cache_outgoing"):
_cache_remote(repo, 'outgoing')
for tag, repl in patterns.items():
fs = re.sub(tag_start + tag + tag_end, repl, fs)
ui.status(fs)
def _pull_with_cache(orig, ui, repo, *args, **opts):
"""Wrap the pull command to delete the incoming cache as well."""
res = orig(ui, repo, *args, **opts)
cache = path.join(repo.root, CACHE_PATH, 'incoming')
if path.isfile(cache):
os.remove(cache)
return res
def _push_with_cache(orig, ui, repo, *args, **opts):
"""Wrap the push command to delete the outgoing cache as well."""
res = orig(ui, repo, *args, **opts)
cache = path.join(repo.root, CACHE_PATH, 'outgoing')
if path.isfile(cache):
os.remove(cache)
return res
def uisetup(ui):
extensions.wrapcommand(commands.table, 'pull', _pull_with_cache)
extensions.wrapcommand(commands.table, 'push', _push_with_cache)
try:
extensions.wrapcommand(extensions.find("fetch").cmdtable, 'fetch', _pull_with_cache)
except KeyError:
pass
cmdtable = {
"prompt":
(prompt, [
('', 'angle-brackets', None, 'use angle brackets (<>) for keywords'),
('', 'cache-incoming', None, 'used internally by hg-prompt'),
('', 'cache-outgoing', None, 'used internally by hg-prompt'),
],
'hg prompt STRING')
}
help.helptable += (
(['prompt-keywords', 'prompt-keywords'], ('Keywords supported by hg-prompt'),
(r'''hg-prompt currently supports a number of keywords.
Some keywords support filters. Filters can be chained when it makes
sense to do so. When in doubt, try it!
bookmark
Display the current bookmark (requires the bookmarks extension).
branch
Display the current branch.
|quiet
Display the current branch only if it is not the default branch.
closed
Display `X` if working on a closed branch (i.e. committing now would reopen
the branch).
count
Display the number of revisions in the given revset (the revset `all()`
will be used if none is given).
See `hg help revsets` for more information.
|REVSET
The revset to count.
incoming
Display nothing, but if the default path contains incoming changesets the
extra text will be expanded.
For example: `{incoming changes{incoming}}` will expand to
`incoming changes` if there are changes, otherwise nothing.
Checking for incoming changesets is an expensive operation, so `hg-prompt`
will cache the results in `.hg/prompt/cache/` and refresh them every 15
minutes.
|count
Display the number of incoming changesets (if greater than 0).
node
Display the (full) changeset hash of the current parent.
|short
Display the hash as the short, 12-character form.
|merge
Display the hash of the changeset you're merging with.
outgoing
Display nothing, but if the current repository contains outgoing
changesets (to default) the extra text will be expanded.
For example: `{outgoing changes{outgoing}}` will expand to
`outgoing changes` if there are changes, otherwise nothing.
Checking for outgoing changesets is an expensive operation, so `hg-prompt`
will cache the results in `.hg/prompt/cache/` and refresh them every 15
minutes.
|count
Display the number of outgoing changesets (if greater than 0).
patch
Display the topmost currently-applied patch (requires the mq
extension).
|count
Display the number of patches in the queue.
|topindex
Display (zero-based) index of the topmost applied patch in the series
list (as displayed by :hg:`qtop -v`), or the empty string if no patch
is applied.
|applied
Display the number of currently applied patches in the queue.
|unapplied
Display the number of currently unapplied patches in the queue.
|quiet
Display a number only if there are any patches in the queue.
patches
Display a list of the current patches in the queue. It will look like
this:
:::console
$ hg prompt '{patches}'
bottom-patch -> middle-patch -> top-patch
|reverse
Display the patches in reverse order (i.e. topmost first).
|hide_applied
Do not display applied patches.
|hide_unapplied
Do not display unapplied patches.
|join(SEP)
Display SEP between each patch, instead of the default ` -> `.
|pre_applied(STRING)
Display STRING immediately before each applied patch. Useful for
adding color codes.
|post_applied(STRING)
Display STRING immediately after each applied patch. Useful for
resetting color codes.
|pre_unapplied(STRING)
Display STRING immediately before each unapplied patch. Useful for
adding color codes.
|post_unapplied(STRING)
Display STRING immediately after each unapplied patch. Useful for
resetting color codes.
queue
Display the name of the current MQ queue.
rev
Display the repository-local changeset number of the current parent.
|merge
Display the repository-local changeset number of the changeset you're
merging with.
root
Display the full path to the root of the current repository, without a
trailing slash.
|basename
Display the directory name of the root of the current repository. For
example, if the repository is in `/home/u/myrepo` then this keyword
would expand to `myrepo`.
status
Display `!` if the repository has any changed/added/removed files,
otherwise `?` if it has any untracked (but not ignored) files, otherwise
nothing.
|modified
Display `!` if the current repository contains files that have been
modified, added, removed, or deleted, otherwise nothing.
|unknown
Display `?` if the current repository contains untracked files,
otherwise nothing.
tags
Display the tags of the current parent, separated by a space.
|quiet
Display the tags of the current parent, excluding the tag "tip".
|SEP
Display the tags of the current parent, separated by `SEP`.
task
Display the current task (requires the tasks extension).
tip
Display the repository-local changeset number of the current tip.
|node
Display the (full) changeset hash of the current tip.
|short
Display a short form of the changeset hash of the current tip (must be
used with the **|node** filter)
update
Display `^` if the current parent is not the tip of the current branch,
otherwise nothing. In effect, this lets you see if running `hg update`
would do something.
''')),
)
| {
"content_hash": "cf811957cc6218b45a53f423215187dc",
"timestamp": "",
"source": "github",
"line_count": 637,
"max_line_length": 92,
"avg_line_length": 30.601255886970172,
"alnum_prop": 0.5687682757913097,
"repo_name": "rushi/dotfiles",
"id": "3851cad6ba3472d1d85c08fd604b777805b94daa",
"size": "19516",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "libs/hg-prompt/prompt.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "1165"
},
{
"name": "Python",
"bytes": "30243"
},
{
"name": "Shell",
"bytes": "26178"
},
{
"name": "Vim Script",
"bytes": "1454"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import, division, print_function
from .generator import MarkovGenerator
__all__ = [
'MarkovGenerator',
]
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
| {
"content_hash": "8a8c17184bc702c7fccc17967b2bffc2",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 64,
"avg_line_length": 23.5,
"alnum_prop": 0.723404255319149,
"repo_name": "kdmurray91/mpg",
"id": "4f9ae96d55f80317f9535495d114869663b54df8",
"size": "1351",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mpg/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "6772"
},
{
"name": "Jupyter Notebook",
"bytes": "245015"
},
{
"name": "Makefile",
"bytes": "144"
},
{
"name": "Python",
"bytes": "91185"
}
],
"symlink_target": ""
} |
from . import viewsets
from rest_framework import routers
api_router = routers.SimpleRouter()
api_router.register('category', viewsets.CategoryViewSet)
api_router.register('source', viewsets.SourceViewSet)
api_router.register('seed', viewsets.SeedViewSet)
api_router.register('blacklist', viewsets.BlacklistViewSet)
| {
"content_hash": "739deda69e666b8da45bc98843eec1bf",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 59,
"avg_line_length": 31.9,
"alnum_prop": 0.8087774294670846,
"repo_name": "WebArchivCZ/Seeder",
"id": "462755d5f69dea1369279bc0f509543326c6b45a",
"size": "319",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Seeder/api/__init__.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "40916"
},
{
"name": "HTML",
"bytes": "191411"
},
{
"name": "JavaScript",
"bytes": "35092"
},
{
"name": "PHP",
"bytes": "996"
},
{
"name": "Python",
"bytes": "298522"
},
{
"name": "Shell",
"bytes": "691"
}
],
"symlink_target": ""
} |
import datetime
import random
from django.conf import settings
from django.core.mail import send_mail
from django.utils.translation import ugettext_lazy as _
from django.template.loader import render_to_string
from django.contrib.auth.models import User
from django.contrib.sites.models import RequestSite
from django.contrib.sites.models import Site
from django.db import models
from invite_registration import signals
from invite_registration.invite_registration_settings import INITIAL_NUMBER_INVITATIONS
DEFAULT_ALPHABET = 'az7er5tyu1io0pq4sd9fg6hjk8lmw3xcv2bn'
class InviteRequest(models.Model):
email = models.EmailField(_('Email address'), unique=True)
def __unicode__(self):
return self.email
class Invitation(models.Model):
"""
Invitation model
"""
code = models.CharField(blank=True, max_length=6,
verbose_name=_(u"Invitation code"))
user = models.ForeignKey(User, verbose_name=_(u'Invitor'),
related_name='invitations')
email = models.EmailField(verbose_name=_(u"Email"))
date_invited = models.DateTimeField(default=datetime.datetime.now,
verbose_name=_(u'date invited'))
class Meta:
verbose_name = _(u'Invitation')
verbose_name_plural = _(u'Invitations')
ordering = ('-date_invited',)
def __unicode__(self):
return _('%(username)s invited %(email)s on %(date)s') % {
'username': self.user.username,
'email': self.email,
'date': str(self.date_invited.date()),
}
def save(self,*args, **kwargs):
if not self.id:
self.code = ''.join(random.sample(DEFAULT_ALPHABET, 6))
self.user.invitation_use.add_sent()
super(Invitation, self).save(*args, **kwargs)
def accepted(self, invited_user):
"""
increment number of accepted invitations
invitation.signals.invitation_accepted``
"""
self.user.invitation_use.add_accepted()
signals.invitation_accepted.send(sender=self,
inviting_user=self.user,
invited_user=invited_user)
accepted.alters_data = True
def send_email(self, request=None):
"""
Send invitation email.
``invitation.signals.invitation_sent`` is sent on completion.
"""
if Site._meta.installed:
site = Site.objects.get_current()
elif request is not None:
site = RequestSite(request)
invitation_context = {'invitation': self, 'site': site}
subject = render_to_string('iinvite_registration/invitation_email_subject.txt',
invitation_context)
# Email subject *must not* contain newlines
subject = ''.join(subject.splitlines())
message = render_to_string('iinvite_registration/invitation_email.txt',
invitation_context)
send_mail(subject, message, settings.DEFAULT_FROM_EMAIL, [self.email])
signals.invitation_sent.send(sender=self)
class InvitationUse(models.Model):
"""
Invitation use of user.
"""
user = models.OneToOneField(User,
related_name='invitation_use')
available = models.IntegerField(_(u'available invitations'),
default=INITIAL_NUMBER_INVITATIONS)
sent = models.IntegerField(_(u'invitations sent'), default=0)
accepted = models.IntegerField(_(u'invitations accepted'), default=0)
def __unicode__(self):
return _(u'invitation use for %(username)s') % {
'username': self.user.username}
def can_send(self):
return True if self.available > 0 else False
def add_sent(self):
"""
user sent an invitation
"""
self.available -= 1
self.sent += 1
self.save()
add_sent.alters_data = True
def add_accepted(self):
"""
a new invitation has been accepted
"""
self.accepted += 1
self.save()
add_accepted.alters_data = True
| {
"content_hash": "90abb9a17e8aafd29d92cf220ea555ec",
"timestamp": "",
"source": "github",
"line_count": 116,
"max_line_length": 87,
"avg_line_length": 37.543103448275865,
"alnum_prop": 0.5818599311136624,
"repo_name": "mouradmourafiq/django-invite-registration",
"id": "5cfa267b4893db5a6ed0022a9cba8052d17fe7ae",
"size": "4379",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "invite_registration/models.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "22687"
}
],
"symlink_target": ""
} |
from django.utils.decorators import method_decorator
from django.contrib.admin.views.decorators import staff_member_required
from django.views.generic import TemplateView
from django.views.generic.edit import CreateView
try:
from django.urls import reverse, reverse_lazy
except ImportError:
from django.core.urlresolvers import reverse, reverse_lazy
from django.utils import timezone
from django.http import HttpResponseRedirect
from applications.sms_ussd.forms import SendSMSCreateForm
from applications.sms_ussd.models import SMS, USSD
__author__ = 'AlexStarov'
class SMS_USSDPanelView(TemplateView, ):
template_name = 'sms_ussd/sms_ussd_panel.html'
success_url = reverse_lazy('admin_page:sms_ussd_send_sms')
@method_decorator(staff_member_required)
def dispatch(self, request, *args, **kwargs):
return super(SMS_USSDPanelView, self).dispatch(request, *args, **kwargs)
class SendSMSCreateView(CreateView, ):
form_class = SendSMSCreateForm
template_name = 'sms_ussd/sendSMS_form.html'
model = SMS
success_url = reverse_lazy('admin_page:sms_ussd_send_sms')
@method_decorator(staff_member_required)
def dispatch(self, request, *args, **kwargs):
return super(SendSMSCreateView, self).dispatch(request, *args, **kwargs)
def post(self, request, *args, **kwargs):
"""
Handles POST requests, instantiating a form instance with the passed
POST variables and then checked for validity.
"""
form_class = self.get_form_class()
form = self.get_form(form_class)
if form.is_valid():
return self.form_valid(form, kwargs={'request': request, }, )
else:
return self.form_invalid(form)
def form_valid(self, form, **kwargs):
"""
If the form is valid, save the associated model.
"""
self.object = form.save(commit=False, )
self.object.received_at = timezone.now()
self.object.direction = 2 # Outgouing
self.object.user_id = kwargs['kwargs']['request'].user.pk
self.object.sessionid = kwargs['kwargs']['request'].session.session_key
self.object.to_phone_char = form.cleaned_data['phone']
self.object.to_code = form.cleaned_data['to_code']
self.object.to_phone = form.cleaned_data['to_phone']
self.object.message = form.cleaned_data['message']
self.object.save()
return HttpResponseRedirect(self.get_success_url(), )
class SendUSSDCreateView(CreateView, ):
form_class = SendSMSCreateForm
template_name = 'sms_ussd/sendUSSD_form.html'
model = USSD
success_url = reverse_lazy('admin_page:sms_ussd_send_ussd')
@method_decorator(staff_member_required)
def dispatch(self, request, *args, **kwargs):
return super(SendUSSDCreateView, self).dispatch(request, *args, **kwargs)
def post(self, request, *args, **kwargs):
"""
Handles POST requests, instantiating a form instance with the passed
POST variables and then checked for validity.
"""
form_class = self.get_form_class()
form = self.get_form(form_class)
if form.is_valid():
return self.form_valid(form, kwargs={'request': request, }, )
else:
return self.form_invalid(form)
def form_valid(self, form, **kwargs):
"""
If the form is valid, save the associated model.
"""
self.object = form.save(commit=False, )
self.object.received_at = timezone.now()
self.object.direction = 2 # Outgouing
self.object.user_id = kwargs['kwargs']['request'].user.pk
self.object.sessionid = kwargs['kwargs']['request'].session.session_key
self.object.to_phone_char = form.cleaned_data['phone']
self.object.to_code = form.cleaned_data['to_code']
self.object.to_phone = form.cleaned_data['to_phone']
self.object.message = form.cleaned_data['message']
self.object.save()
return HttpResponseRedirect(self.get_success_url(), )
| {
"content_hash": "6fb24053658c73478a6cc087d51f3688",
"timestamp": "",
"source": "github",
"line_count": 116,
"max_line_length": 81,
"avg_line_length": 35.14655172413793,
"alnum_prop": 0.6595535933284278,
"repo_name": "AlexStarov/Shop",
"id": "45a9f5103dd850652a389d5fbf49352d92c343e5",
"size": "4101",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "applications/adminSite/sms_ussd/views.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "268281"
},
{
"name": "HTML",
"bytes": "138853"
},
{
"name": "JavaScript",
"bytes": "10629133"
},
{
"name": "PHP",
"bytes": "14"
},
{
"name": "Python",
"bytes": "1532862"
},
{
"name": "Shell",
"bytes": "2089"
}
],
"symlink_target": ""
} |
#------------------------------------------------------------
# Arduino Yun UDP Echo Server
#------------------------------------------------------------
# Version 1.6
# April 14, 2018
# MIT License (in root of git repo)
# by Tim Seemann
#
#
# Takes UDP datagram packets at its UDP_PORT as input and
# echoes them to the Arduino Yun's ATmega32U4 processor.
# This script takes a lot of elements from the [Yun's
# Bridge client](https://github.com/arduino/YunBridge/blob/master/bridge/bridgeclient.py)
# but does not use it directly. Instead, we interact with
# the TSPJSONClient directly and manage our own sockets.
# This adds a significant speed increase over the Bridge
# Client's implementation.
#
# [Check here for setup instructions](https://github.com/timsee/ArduCor/tree/master/samples/yun)
#
#------------------------------------------------------------
#-----
# imports
import socket
from time import sleep
import sys
# Adds a yun specific-library to the sys path
sys.path.insert(0, '/usr/lib/python2.7/bridge')
# imports the yun specific library
from bridgeclient import BridgeClient
#-----
# config
# port for the UDP connection to bind to
UDP_PORT = 10008
# set by the arduino project, defines maximum size packet it will accept
max_packet_size = 512
# Echoing back commands slows down the speed that lights update but it gives
# more reliability since you know when a packet is received. Set this to
# false to increase the light update speed.
should_echo = True
#-----
# bridge setup
# set up the serial port for communication with
# the yun's microprocessor
print "Setup the Arduino bridge..."
bridge = BridgeClient()
# Very important! Without this comannd, communication will work
# but will be significantly slower, since it will open and close
# sockets in each function.
bridge.begin()
#-----
print "Setup the UDP Socket..."
# set up UDP server. Python can do a simple server
# in just a few lines..
sock = socket.socket(socket.AF_INET, # Internet
socket.SOCK_DGRAM) # UDP
sock.bind(("", UDP_PORT))
#-----
# loop
#repeats ad nauseam
while True:
# waits until it receives data
data, addr = sock.recvfrom(512)
header = data[:2]
# print "received %r from %r" % (data, addr)
if data == "DISCOVERY_PACKET":
major_api = bridge.get('major_api')
minor_api = bridge.get('minor_api')
using_crc = bridge.get('using_crc')
hardware_count = bridge.get('hardware_count')
max_packet_size = bridge.get('max_packet_size')
hardware_name = bridge.get('hardware_name')
hardware_type = bridge.get('hardware_type')
product_type = bridge.get('product_type')
hardware_capabilities = '0'
data += ','
data += str(major_api)
data += ','
data += str(minor_api)
data += ','
data += str(using_crc)
data += ','
data += str(hardware_capabilities)
data += ','
data += str(max_packet_size)
data += ','
data += str(hardware_count)
data += '@'
data += hardware_name
data += ','
data += hardware_type
data += ','
data += product_type
data += '&'
# sends discovery packet
sock.sendto(data, (addr[0], UDP_PORT))
elif header == "6&":
bridge.put('udp', data)
state_update = bridge.get('state_update')
sock.sendto(state_update, (addr[0], UDP_PORT))
elif header == "7&":
bridge.put('udp', data)
custom_array_update = bridge.get('custom_array_update')
sock.sendto(custom_array_update, (addr[0], UDP_PORT))
elif len(data) <= max_packet_size:
# puts data on arduino bridge
bridge.put('udp', data)
if should_echo:
sock.sendto(data, (addr[0], UDP_PORT))
| {
"content_hash": "4c05afd2b8d6010b60a67b9c00b7b402",
"timestamp": "",
"source": "github",
"line_count": 124,
"max_line_length": 96,
"avg_line_length": 31.629032258064516,
"alnum_prop": 0.5915349311575727,
"repo_name": "timsee/RGB-LED-Routines",
"id": "eac4e9e9483b64252b92e907956b61d23dd5a743",
"size": "3941",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "samples/Corluma/yun/udp-server/yun-udp-server.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "16753"
},
{
"name": "C++",
"bytes": "85740"
},
{
"name": "Shell",
"bytes": "9095"
}
],
"symlink_target": ""
} |
"""
This is the place that takes the basic configuration of the LaTeX
build project.
"""
LATEX_PROJECT = 'manual'
DEFAULT_TARGET = 'pdf'
IMAGES_DIRECTORY = 'images'
GENERATED_DIRECTORY = 'generated'
CHAPTER_DIRECTORY = 'chapters'
FILE_EXTENSIONS = {'eps': '.eps',
'pdf': '.pdf',
'png': '.png',
'jpg': '.jpg',
'gnuplot': '.gnuplot',
'dot': '.dot',
'msc': '.msc'}
MAKEINDEX_EXTENSIONS = ['.glg', '.glo', '.gls']
| {
"content_hash": "148736e207e7fcce71d267a66e943c12",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 65,
"avg_line_length": 24.045454545454547,
"alnum_prop": 0.499054820415879,
"repo_name": "zer0infinity/CuteTestForCoastTest",
"id": "bc575729ede98771f8a46ff2761b4d7410cbaba9",
"size": "529",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "manual/build_config.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "359712"
},
{
"name": "C++",
"bytes": "6391959"
},
{
"name": "HTML",
"bytes": "460716"
},
{
"name": "Objective-C",
"bytes": "3279"
},
{
"name": "PLSQL",
"bytes": "25209"
},
{
"name": "Python",
"bytes": "14181"
},
{
"name": "Scilab",
"bytes": "10"
},
{
"name": "Shell",
"bytes": "35113"
},
{
"name": "TeX",
"bytes": "24789"
}
],
"symlink_target": ""
} |
from django.conf.urls import include, url
from django.contrib import admin
urlpatterns = [
# Examples:
# url(r'^$', 'pganalytics.views.home', name='home'),
# url(r'^blog/', include('blog.urls')),
url(r'^admin/', include(admin.site.urls)),
]
| {
"content_hash": "9658383e8fbe26ca676407e3a31cd4fc",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 56,
"avg_line_length": 25.9,
"alnum_prop": 0.637065637065637,
"repo_name": "fearofcode/nagini",
"id": "1ff5decd61f139495548e2d32675baac0165e75d",
"size": "259",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pganalytics/pganalytics/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "16568"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.