text stringlengths 4 1.02M | meta dict |
|---|---|
DEV = False
DEBUG = True
| {
"content_hash": "a9de7a28d726a2b74cf4f0405f9af0cf",
"timestamp": "",
"source": "github",
"line_count": 2,
"max_line_length": 12,
"avg_line_length": 12.5,
"alnum_prop": 0.68,
"repo_name": "iamsudip/he-clog",
"id": "ae41be76eeb500522621688ea0f3033ebce5afb1",
"size": "113",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "server/constants.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "13016"
},
{
"name": "Shell",
"bytes": "110"
},
{
"name": "Thrift",
"bytes": "72"
}
],
"symlink_target": ""
} |
import os
import json
import signal
from ducktape.utils.util import wait_until
from ducktape.services.background_thread import BackgroundThreadService
from kafkatest.directory_layout.kafka_path import KafkaPathResolverMixin
from ducktape.cluster.remoteaccount import RemoteCommandError
class TransactionalMessageCopier(KafkaPathResolverMixin, BackgroundThreadService):
"""This service wraps org.apache.kafka.tools.TransactionalMessageCopier for
use in system testing.
"""
PERSISTENT_ROOT = "/mnt/transactional_message_copier"
STDOUT_CAPTURE = os.path.join(PERSISTENT_ROOT, "transactional_message_copier.stdout")
STDERR_CAPTURE = os.path.join(PERSISTENT_ROOT, "transactional_message_copier.stderr")
LOG_DIR = os.path.join(PERSISTENT_ROOT, "logs")
LOG_FILE = os.path.join(LOG_DIR, "transactional_message_copier.log")
LOG4J_CONFIG = os.path.join(PERSISTENT_ROOT, "tools-log4j.properties")
logs = {
"transactional_message_copier_stdout": {
"path": STDOUT_CAPTURE,
"collect_default": True},
"transactional_message_copier_stderr": {
"path": STDERR_CAPTURE,
"collect_default": True},
"transactional_message_copier_log": {
"path": LOG_FILE,
"collect_default": True}
}
def __init__(self, context, num_nodes, kafka, transactional_id, consumer_group,
input_topic, input_partition, output_topic, max_messages=-1,
transaction_size=1000, transaction_timeout=None, enable_random_aborts=True,
use_group_metadata=False, group_mode=False):
super(TransactionalMessageCopier, self).__init__(context, num_nodes)
self.kafka = kafka
self.transactional_id = transactional_id
self.consumer_group = consumer_group
self.transaction_size = transaction_size
self.transaction_timeout = transaction_timeout
self.input_topic = input_topic
self.input_partition = input_partition
self.output_topic = output_topic
self.max_messages = max_messages
self.message_copy_finished = False
self.consumed = -1
self.remaining = -1
self.stop_timeout_sec = 60
self.enable_random_aborts = enable_random_aborts
self.use_group_metadata = use_group_metadata
self.group_mode = group_mode
self.loggers = {
"org.apache.kafka.clients.producer": "TRACE",
"org.apache.kafka.clients.consumer": "TRACE"
}
def _worker(self, idx, node):
node.account.ssh("mkdir -p %s" % TransactionalMessageCopier.PERSISTENT_ROOT,
allow_fail=False)
# Create and upload log properties
log_config = self.render('tools_log4j.properties',
log_file=TransactionalMessageCopier.LOG_FILE)
node.account.create_file(TransactionalMessageCopier.LOG4J_CONFIG, log_config)
# Configure security
self.security_config = self.kafka.security_config.client_config(node=node)
self.security_config.setup_node(node)
cmd = self.start_cmd(node, idx)
self.logger.debug("TransactionalMessageCopier %d command: %s" % (idx, cmd))
try:
for line in node.account.ssh_capture(cmd):
line = line.strip()
data = self.try_parse_json(line)
if data is not None:
with self.lock:
self.remaining = int(data["remaining"])
self.consumed = int(data["consumed"])
self.logger.info("%s: consumed %d, remaining %d" %
(self.transactional_id, self.consumed, self.remaining))
if data["stage"] == "ShutdownComplete":
if self.remaining == 0:
# We are only finished if the remaining
# messages at the time of shutdown is 0.
#
# Otherwise a clean shutdown would still print
# a 'shutdown complete' messages even though
# there are unprocessed messages, causing
# tests to fail.
self.logger.info("%s : Finished message copy" % self.transactional_id)
self.message_copy_finished = True
else:
self.logger.info("%s : Shut down without finishing message copy." %\
self.transactional_id)
except RemoteCommandError as e:
self.logger.debug("Got exception while reading output from copier, \
probably because it was SIGKILL'd (exit code 137): %s" % str(e))
def start_cmd(self, node, idx):
cmd = "export LOG_DIR=%s;" % TransactionalMessageCopier.LOG_DIR
cmd += " export KAFKA_OPTS=%s;" % self.security_config.kafka_opts
cmd += " export KAFKA_LOG4J_OPTS=\"-Dlog4j.configuration=file:%s\"; " % TransactionalMessageCopier.LOG4J_CONFIG
cmd += self.path.script("kafka-run-class.sh", node) + " org.apache.kafka.tools." + "TransactionalMessageCopier"
cmd += " --broker-list %s" % self.kafka.bootstrap_servers(self.security_config.security_protocol)
cmd += " --transactional-id %s" % self.transactional_id
cmd += " --consumer-group %s" % self.consumer_group
cmd += " --input-topic %s" % self.input_topic
cmd += " --output-topic %s" % self.output_topic
cmd += " --input-partition %s" % str(self.input_partition)
cmd += " --transaction-size %s" % str(self.transaction_size)
if self.transaction_timeout is not None:
cmd += " --transaction-timeout %s" % str(self.transaction_timeout)
if self.enable_random_aborts:
cmd += " --enable-random-aborts"
if self.use_group_metadata:
cmd += " --use-group-metadata"
if self.group_mode:
cmd += " --group-mode"
if self.max_messages > 0:
cmd += " --max-messages %s" % str(self.max_messages)
cmd += " 2>> %s | tee -a %s &" % (TransactionalMessageCopier.STDERR_CAPTURE, TransactionalMessageCopier.STDOUT_CAPTURE)
return cmd
def clean_node(self, node):
self.kill_node(node, clean_shutdown=False)
node.account.ssh("rm -rf " + self.PERSISTENT_ROOT, allow_fail=False)
self.security_config.clean_node(node)
def pids(self, node):
try:
cmd = "jps | grep -i TransactionalMessageCopier | awk '{print $1}'"
pid_arr = [pid for pid in node.account.ssh_capture(cmd, allow_fail=True, callback=int)]
return pid_arr
except (RemoteCommandError, ValueError) as e:
self.logger.error("Could not list pids: %s" % str(e))
return []
def alive(self, node):
return len(self.pids(node)) > 0
def start_node(self, node):
BackgroundThreadService.start_node(self, node)
wait_until(lambda: self.alive(node), timeout_sec=60, err_msg="Node %s: Message Copier failed to start" % str(node.account))
def kill_node(self, node, clean_shutdown=True):
pids = self.pids(node)
sig = signal.SIGTERM if clean_shutdown else signal.SIGKILL
for pid in pids:
node.account.signal(pid, sig)
wait_until(lambda: len(self.pids(node)) == 0, timeout_sec=60, err_msg="Node %s: Message Copier failed to stop" % str(node.account))
def stop_node(self, node, clean_shutdown=True):
self.kill_node(node, clean_shutdown)
stopped = self.wait_node(node, timeout_sec=self.stop_timeout_sec)
assert stopped, "Node %s: did not stop within the specified timeout of %s seconds" % \
(str(node.account), str(self.stop_timeout_sec))
def restart(self, clean_shutdown):
if self.is_done:
return
node = self.nodes[0]
with self.lock:
self.consumed = -1
self.remaining = -1
self.stop_node(node, clean_shutdown)
self.start_node(node)
def try_parse_json(self, string):
"""Try to parse a string as json. Return None if not parseable."""
try:
record = json.loads(string)
return record
except ValueError:
self.logger.debug("Could not parse as json: %s" % str(string))
return None
@property
def is_done(self):
return self.message_copy_finished
def progress_percent(self):
with self.lock:
if self.remaining < 0:
return 0
if self.consumed + self.remaining == 0:
return 100
return (float(self.consumed)/float(self.consumed + self.remaining)) * 100
| {
"content_hash": "28c505e81a38d8bb8d2734bf8ff13784",
"timestamp": "",
"source": "github",
"line_count": 193,
"max_line_length": 139,
"avg_line_length": 46.36787564766839,
"alnum_prop": 0.5924684322270645,
"repo_name": "lindong28/kafka",
"id": "675c7d71531f3bc380587b949e82842f10e1ca18",
"size": "9730",
"binary": false,
"copies": "4",
"ref": "refs/heads/trunk",
"path": "tests/kafkatest/services/transactional_message_copier.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "32046"
},
{
"name": "Dockerfile",
"bytes": "8910"
},
{
"name": "HTML",
"bytes": "3739"
},
{
"name": "Java",
"bytes": "32991308"
},
{
"name": "Python",
"bytes": "1135446"
},
{
"name": "Roff",
"bytes": "39396"
},
{
"name": "Scala",
"bytes": "9820349"
},
{
"name": "Shell",
"bytes": "103586"
},
{
"name": "XSLT",
"bytes": "7116"
}
],
"symlink_target": ""
} |
from sys import argv
from os.path import exists
script, from_file, to_file = argv
print"Copying from %s to %s" % (from_file, to_file)
# we could do these two on one line too, how?
input= open(from_file)
indata = input.read()
print "The input file is %d bytes long" % len(indata)
print "Does the output file exist? %r" % exists(to_file)
print "Ready, hit RETURN to continue,CTRL-C to abort."
raw_input()
output = open(to_file,'w')
output.write(indata)
print "Alright, all done."
output.close()
input.close()
| {
"content_hash": "e3e0bb1b972d19ceaafe480fee782db8",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 56,
"avg_line_length": 21.458333333333332,
"alnum_prop": 0.7029126213592233,
"repo_name": "AisakaTiger/Learn-Python-The-Hard-Way",
"id": "dcfe0164c33018a2f5d9e1a58412172187c53b70",
"size": "515",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ex17.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "497"
},
{
"name": "Python",
"bytes": "43256"
}
],
"symlink_target": ""
} |
import numpy as np
import scipy.sparse
import logging
import modest
import matplotlib.pyplot as plt
try:
import petsc4py
petsc4py.init()
from petsc4py import PETSc
except ImportError:
print(
'could not import PETSc. '
'PETSc can be installed by following the instructions at '
'https://www.mcs.anl.gov/petsc. Interfacing with PETSc requires '
'petsc4py which can be found at https://bitbucket.org/petsc/petsc4py. '
'Installing the latest version of petsc4py can be done with the command\n\n'
' pip install https://bitbucket.org/petsc/petsc4py/get/master.tar.gz\n')
raise
logger = logging.getLogger(__name__)
def _monitor(solver, its, fgnorm):
'''
this function is called for each iteration of a KSP solver
'''
logger.info('preconditioned residual norm at iteration %s: %.5e' % (its,fgnorm))
def petsc_solve(G,d,ksp='lgmres',pc='jacobi',rtol=1e-6,atol=1e-6,maxiter=1000,view=False):
'''
Solves a linear system using PETSc
Parameters
----------
G: (N,N) CSR sparse matrix
d: (N,) data vector
ksp: solve the system with this PETSc
routine. See PETSc documentation for a complete list of options.
'preonly' means that the system is solved with just the
preconditioner. This is done when the preconditioner is 'lu',
which means that the system is directly solved with LU
factorization. If the system is too large to allow for a direct
solution then use an iterative solver such as 'lgmres' or
'gmres'
pc: type of preconditioner. See PETSc documentation
for a complete list of options. 'jacobi' seems to work best for
iterative solvers. Use 'lu' if the solver is 'preonly'
rtol: relative tolerance for iterative solvers
atol: absolute tolerance for iterative solvers
maxiter: maximum number of iterations
view: logs information about the solver and monitors its
convergence
'''
converged_reason_lookup = {
1:'KSP_CONVERGED_RTOL_NORMAL',
9:'KSP_CONVERGED_ATOL_NORMAL',
2:'KSP_CONVERGED_RTOL',
3:'KSP_CONVERGED_ATOL',
4:'KSP_CONVERGED_ITS',
5:'KSP_CONVERGED_CG_NEG_CURVE',
6:'KSP_CONVERGED_CG_CONSTRAINED',
7:'KSP_CONVERGED_STEP_LENGTH',
8:'KSP_CONVERGED_HAPPY_BREAKDOWN',
-2:'KSP_DIVERGED_NULL',
-3:'KSP_DIVERGED_ITS',
-4:'KSP_DIVERGED_DTOL',
-5:'KSP_DIVERGED_BREAKDOWN',
-6:'KSP_DIVERGED_BREAKDOWN_BICG',
-7:'KSP_DIVERGED_NONSYMMETRIC',
-8:'KSP_DIVERGED_INDEFINITE_PC',
-9:'KSP_DIVERGED_NANORINF',
-10:'KSP_DIVERGED_INDEFINITE_MAT',
-11:'KSP_DIVERGED_PCSETUP_FAILED',
0:'KSP_CONVERGED_ITERATING'}
if not scipy.sparse.isspmatrix(G):
logger.info('system matrix is dense and will now be converted to a CSR sparse matrix')
G = scipy.sparse.csr_matrix(G)
#G += scipy.sparse.diags(1e-10*np.ones(G.shape[0]),0)
G = G.tocsr()
#fig,ax = plt.subplots()
#ax.imshow(G.toarray(),interpolation='none')
#plt.show()
# instantiate LHS
A = PETSc.Mat().createAIJ(size=G.shape,csr=(G.indptr,G.indices,G.data))
# instantiate RHS
d = PETSc.Vec().createWithArray(d)
# create empty solution vector
soln = np.zeros(G.shape[1])
soln = PETSc.Vec().createWithArray(soln)
# instantiate solver
ksp_solver = PETSc.KSP()
ksp_solver.create()
ksp_solver.setType(ksp)
ksp_solver.setOperators(A)
ksp_solver.setTolerances(rtol=rtol,atol=atol,max_it=maxiter)
pc_solver = ksp_solver.getPC()
pc_solver.setType(pc)
# set tolerance for zero pivot
pc_solver.setFactorPivot(1e-100)
# solve and get information
if view:
ksp_solver.view()
ksp_solver.setMonitor(_monitor)
ksp_solver.solve(d,soln)
conv_number = ksp_solver.getConvergedReason()
conv_reason = converged_reason_lookup[conv_number]
if conv_number > 0:
logger.debug('KSP solver converged due to %s' % conv_reason)
else:
logger.warning('KSP solver diverged due to %s' % conv_reason)
print('WARNING: KSP solver diverged due to %s' % conv_reason)
return soln.getArray()
| {
"content_hash": "fb957bd9b54ebd50eb829ac7c68be52e",
"timestamp": "",
"source": "github",
"line_count": 135,
"max_line_length": 90,
"avg_line_length": 29.94814814814815,
"alnum_prop": 0.6871135295572595,
"repo_name": "treverhines/ModEst",
"id": "241cb9bb41e700f078c5b90d18f9484b6c64f567",
"size": "4105",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "modest/petsc.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "FORTRAN",
"bytes": "43150"
},
{
"name": "Python",
"bytes": "153229"
}
],
"symlink_target": ""
} |
"""
Stomatogastric ganglion Central Pattern Generator:
generates pyloric rhythm of the lobster
Network model translated from:
Prinz, Bucher, Marder, Nature Neuroscience, 2004;
STG neuron models translated from:
Prinz, Billimoria, Marder, J.Neurophys., 2003.
Translated into MOOSE by Aditya Gilra, Bangalore, 2013, revised 2014.
"""
#import os
#os.environ['NUMPTHREADS'] = '1'
import sys
sys.path.extend(['../../../python','synapses'])
import moose
from moose.utils import *
from moose.neuroml.NeuroML import NeuroML
import matplotlib.pyplot as plt
import numpy as np
simdt = 25e-6 # s
plotdt = 25e-6 # s
runtime = 10.0 # s
cells_path = '/cells' # neuromlR.readNeuroMLFromFile creates cells in '/cells'
def loadRunSTGNeuroML_L123(filename):
'Loads and runs the pyloric rhythm generator from NeuroML files.'
# for graded synapses, else NeuroML event-based are used
from load_synapses import load_synapses
moose.Neutral('/library')
# set graded to False to use event based synapses
# if False, neuroml event-based synapses get searched for and loaded
# True to load graded synapses
graded_syn = True
#graded_syn = False
if graded_syn:
load_synapses()
neuromlR = NeuroML()
## readNeuroMLFromFile below returns:
# This returns
# populationDict = {
# 'populationname1':('cellName',{('instanceid1'):moosecell, ... })
# , ...
# }
# (cellName and instanceid are strings, mooosecell is a moose.Neuron object instance)
# and
# projectionDict = {
# 'projName1':('source','target',[('syn_name1','pre_seg_path','post_seg_path')
# ,...])
# , ...
# }
populationDict, projectionDict = \
neuromlR.readNeuroMLFromFile(filename)
soma1_path = populationDict['AB_PD'][1][0].path+'/Soma_0'
soma1Vm = setupTable('somaVm',moose.Compartment(soma1_path),'Vm')
soma2_path = populationDict['LP'][1][0].path+'/Soma_0'
soma2Vm = setupTable('somaVm',moose.Compartment(soma2_path),'Vm')
soma3_path = populationDict['PY'][1][0].path+'/Soma_0'
soma3Vm = setupTable('somaVm',moose.Compartment(soma3_path),'Vm')
# monitor channel current
channel_path = soma1_path + '/KCa_STG'
channel_Ik = setupTable('KCa_Ik',moose.element(channel_path),'Ik')
# monitor Ca
capool_path = soma1_path + '/CaPool_STG'
capool_Ca = setupTable('CaPool_Ca',moose.element(capool_path),'Ca')
# monitor synaptic current
soma2 = moose.element(soma2_path)
print "Children of",soma2_path,"are:"
for child in soma2.children:
print child.className, child.path
if graded_syn:
syn_path = soma2_path+'/DoubExpSyn_Ach__cells-0-_AB_PD_0-0-_Soma_0'
syn = moose.element(syn_path)
else:
syn_path = soma2_path+'/DoubExpSyn_Ach'
syn = moose.element(syn_path)
syn_Ik = setupTable('DoubExpSyn_Ach_Ik',syn,'Ik')
print "Reinit MOOSE ... "
resetSim(['/elec',cells_path], simdt, plotdt, simmethod='ee')
print "Using graded synapses? = ", graded_syn
print "Running model filename = ",filename," ... "
moose.start(runtime)
tvec = np.arange(0.0,runtime+2*plotdt,plotdt)
print soma1Vm.vector
tvec = tvec[ : soma1Vm.vector.size ]
fig = plt.figure(facecolor='w',figsize=(10,6))
axA = plt.subplot2grid((3,2),(0,0),rowspan=3,colspan=1,frameon=False)
img = plt.imread( 'STG.png' )
imgplot = axA.imshow( img )
for tick in axA.get_xticklines():
tick.set_visible(False)
for tick in axA.get_yticklines():
tick.set_visible(False)
axA.set_xticklabels([])
axA.set_yticklabels([])
ax = plt.subplot2grid((3,2),(0,1),rowspan=1,colspan=1)
ax.plot(tvec,soma1Vm.vector*1000,label='AB_PD',color='g',linestyle='solid')
ax.set_xticklabels([])
ax.set_ylabel('AB_PD (mV)')
ax = plt.subplot2grid((3,2),(1,1),rowspan=1,colspan=1)
ax.plot(tvec,soma2Vm.vector*1000,label='LP',color='r',linestyle='solid')
ax.set_xticklabels([])
ax.set_ylabel('LP (mV)')
ax = plt.subplot2grid((3,2),(2,1),rowspan=1,colspan=1)
ax.plot(tvec,soma3Vm.vector*1000,label='PY',color='b',linestyle='solid')
ax.set_ylabel('PY (mV)')
ax.set_xlabel('time (s)')
fig.tight_layout()
fig = plt.figure(facecolor='w')
plt.plot(tvec,soma2Vm.vector*1000,label='LP',color='r',linestyle='solid')
plt.plot(tvec,soma3Vm.vector*1000,label='PY',color='b',linestyle='solid')
plt.legend()
plt.xlabel('time (s)')
plt.ylabel('Soma Vm (mV)')
plt.figure(facecolor='w')
plt.plot(tvec,channel_Ik.vector,color='b',linestyle='solid')
plt.title('KCa current; Ca conc')
plt.xlabel('time (s)')
plt.ylabel('Ik (Amp)')
plt.twinx()
plt.plot(tvec,capool_Ca.vector,color='r',linestyle='solid')
plt.ylabel('Ca (mol/m^3)')
plt.figure(facecolor='w')
plt.plot(tvec,syn_Ik.vector,color='b',linestyle='solid')
plt.title('Ach syn current in '+soma2_path)
plt.xlabel('time (s)')
plt.ylabel('Isyn (S)')
print "Showing plots ..."
plt.show()
filename = "Generated.net.xml"
if __name__ == "__main__":
'''
Inside the moose-examples/neuroml/lobster_ploric/ directory supplied with MOOSE, run
``python STG_net.py``
(other channels and morph xml files are already present in this same directory).
read the pdf documentation for a tutorial by Aditya Gilra.
'''
if len(sys.argv)>=2:
filename = sys.argv[1]
loadRunSTGNeuroML_L123(filename)
| {
"content_hash": "d770c530d0220e5345ccc7d9eacec54e",
"timestamp": "",
"source": "github",
"line_count": 155,
"max_line_length": 89,
"avg_line_length": 35.464516129032255,
"alnum_prop": 0.6490813170820448,
"repo_name": "h-mayorquin/camp_india_2016",
"id": "a668fc136b50341597abf4f8d8693ac4af927fff",
"size": "5537",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tutorials/chemical switches/moose/neuroml/lobster_pyloric/STG_net.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "AMPL",
"bytes": "33891"
},
{
"name": "C",
"bytes": "205445"
},
{
"name": "GAP",
"bytes": "71247"
},
{
"name": "Jupyter Notebook",
"bytes": "2211795"
},
{
"name": "OpenEdge ABL",
"bytes": "1723"
},
{
"name": "Python",
"bytes": "251481"
},
{
"name": "Shell",
"bytes": "564"
}
],
"symlink_target": ""
} |
"""
dyn_pages/signup.py
Author: Trey Stout
Date Added: ?
Give us yo money.
"""
## STD LIBS
## OUR LIBS
from zoto_base_page import zoto_base_page
## 3RD PARTY LIBS
class signup(zoto_base_page):
local_js_includes = [
"select_box.lib.js",
"lookahead.lib.js",
"tag_cloud.lib.js",
"globber.lib.js",
"albums.lib.js",
"album_modals.lib.js",
"subscribe.lib.js",
"states.js",
"countries.js"
]
page_manager_js = "managers/user_signup.js"
def render_title(self, ctx, data):
return ctx.tag["Zoto: Signup"]
def locateChild(self, ctx, segments):
return self, []
| {
"content_hash": "c86b9c3757de5f30e5bcf0dfd1f3bbcb",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 44,
"avg_line_length": 15.81081081081081,
"alnum_prop": 0.6547008547008547,
"repo_name": "kordless/zoto-server",
"id": "3fd1816df9161533f19da113de024d6729b5e311",
"size": "585",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "aztk/web/signup.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "1489011"
},
{
"name": "PHP",
"bytes": "15394"
},
{
"name": "Python",
"bytes": "905967"
},
{
"name": "Shell",
"bytes": "1052"
}
],
"symlink_target": ""
} |
"""Support for the yandex speechkit tts service."""
import asyncio
from http import HTTPStatus
import logging
import aiohttp
import async_timeout
import voluptuous as vol
from homeassistant.components.tts import CONF_LANG, PLATFORM_SCHEMA, Provider
from homeassistant.const import CONF_API_KEY
from homeassistant.helpers.aiohttp_client import async_get_clientsession
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
YANDEX_API_URL = "https://tts.voicetech.yandex.net/generate?"
SUPPORT_LANGUAGES = ["ru-RU", "en-US", "tr-TR", "uk-UK"]
SUPPORT_CODECS = ["mp3", "wav", "opus"]
SUPPORT_VOICES = [
"jane",
"oksana",
"alyss",
"omazh",
"zahar",
"ermil",
"levitan",
"ermilov",
"silaerkan",
"kolya",
"kostya",
"nastya",
"sasha",
"nick",
"erkanyavas",
"zhenya",
"tanya",
"anton_samokhvalov",
"tatyana_abramova",
"voicesearch",
"ermil_with_tuning",
"robot",
"dude",
"zombie",
"smoky",
]
SUPPORTED_EMOTION = ["good", "evil", "neutral"]
MIN_SPEED = 0.1
MAX_SPEED = 3
CONF_CODEC = "codec"
CONF_VOICE = "voice"
CONF_EMOTION = "emotion"
CONF_SPEED = "speed"
DEFAULT_LANG = "en-US"
DEFAULT_CODEC = "mp3"
DEFAULT_VOICE = "zahar"
DEFAULT_EMOTION = "neutral"
DEFAULT_SPEED = 1
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_API_KEY): cv.string,
vol.Optional(CONF_LANG, default=DEFAULT_LANG): vol.In(SUPPORT_LANGUAGES),
vol.Optional(CONF_CODEC, default=DEFAULT_CODEC): vol.In(SUPPORT_CODECS),
vol.Optional(CONF_VOICE, default=DEFAULT_VOICE): vol.In(SUPPORT_VOICES),
vol.Optional(CONF_EMOTION, default=DEFAULT_EMOTION): vol.In(SUPPORTED_EMOTION),
vol.Optional(CONF_SPEED, default=DEFAULT_SPEED): vol.Range(
min=MIN_SPEED, max=MAX_SPEED
),
}
)
SUPPORTED_OPTIONS = [CONF_CODEC, CONF_VOICE, CONF_EMOTION, CONF_SPEED]
async def async_get_engine(hass, config, discovery_info=None):
"""Set up VoiceRSS speech component."""
return YandexSpeechKitProvider(hass, config)
class YandexSpeechKitProvider(Provider):
"""VoiceRSS speech api provider."""
def __init__(self, hass, conf):
"""Init VoiceRSS TTS service."""
self.hass = hass
self._codec = conf.get(CONF_CODEC)
self._key = conf.get(CONF_API_KEY)
self._speaker = conf.get(CONF_VOICE)
self._language = conf.get(CONF_LANG)
self._emotion = conf.get(CONF_EMOTION)
self._speed = str(conf.get(CONF_SPEED))
self.name = "YandexTTS"
@property
def default_language(self):
"""Return the default language."""
return self._language
@property
def supported_languages(self):
"""Return list of supported languages."""
return SUPPORT_LANGUAGES
@property
def supported_options(self):
"""Return list of supported options."""
return SUPPORTED_OPTIONS
async def async_get_tts_audio(self, message, language, options=None):
"""Load TTS from yandex."""
websession = async_get_clientsession(self.hass)
actual_language = language
options = options or {}
try:
with async_timeout.timeout(10):
url_param = {
"text": message,
"lang": actual_language,
"key": self._key,
"speaker": options.get(CONF_VOICE, self._speaker),
"format": options.get(CONF_CODEC, self._codec),
"emotion": options.get(CONF_EMOTION, self._emotion),
"speed": options.get(CONF_SPEED, self._speed),
}
request = await websession.get(YANDEX_API_URL, params=url_param)
if request.status != HTTPStatus.OK:
_LOGGER.error(
"Error %d on load URL %s", request.status, request.url
)
return (None, None)
data = await request.read()
except (asyncio.TimeoutError, aiohttp.ClientError):
_LOGGER.error("Timeout for yandex speech kit API")
return (None, None)
return (self._codec, data)
| {
"content_hash": "3defb5e74bdc9ee26af10ec03ff17cc9",
"timestamp": "",
"source": "github",
"line_count": 148,
"max_line_length": 87,
"avg_line_length": 28.75,
"alnum_prop": 0.6044653349001176,
"repo_name": "aronsky/home-assistant",
"id": "ec0868b244396eb2bfcd1fd7218bb6b3ad21ac15",
"size": "4255",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "homeassistant/components/yandextts/tts.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2443"
},
{
"name": "Python",
"bytes": "38448521"
},
{
"name": "Shell",
"bytes": "4910"
}
],
"symlink_target": ""
} |
from django import template
from django.urls import reverse
from django.utils.safestring import mark_safe
from bson.objectid import ObjectId
from mongoengine import Document
from mongoengine.fields import URLField
register = template.Library()
@register.simple_tag()
def get_document_value(document, key):
'''
Returns the display value of a field for a particular MongoDB document.
'''
value = getattr(document, key)
if isinstance(value, ObjectId):
return value
if isinstance(document._fields.get(key), URLField):
return mark_safe("""<a href="{0}">{1}</a>""".format(value, value))
if isinstance(value, Document):
app_label = value.__module__.replace(".models", "")
document_name = value._class_name
url = reverse(
"document_detail",
kwargs={'app_label': app_label, 'document_name': document_name,
'id': value.id})
return mark_safe("""<a href="{0}">{1}</a>""".format(url, value))
return value
| {
"content_hash": "7eadbba7174c49debb1896fd7bf94bd6",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 75,
"avg_line_length": 31,
"alnum_prop": 0.6441837732160313,
"repo_name": "pydanny/django-mongonaut",
"id": "a77ef3784094304ec866162831261d07b14a51c0",
"size": "1048",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "mongonaut/templatetags/mongonaut_tags.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "15054"
},
{
"name": "JavaScript",
"bytes": "7102"
},
{
"name": "Python",
"bytes": "64969"
}
],
"symlink_target": ""
} |
import doppelbot.cli
doppelbot.cli.main()
| {
"content_hash": "4c46bf622033298bc7115bc9056639ad",
"timestamp": "",
"source": "github",
"line_count": 2,
"max_line_length": 20,
"avg_line_length": 21,
"alnum_prop": 0.8095238095238095,
"repo_name": "michaeljoseph/doppelbot",
"id": "cca0eba241457afc9b3cd063fdbcc28a74e29e79",
"size": "43",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "doppelbot/__main__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "15684"
},
{
"name": "Shell",
"bytes": "6466"
}
],
"symlink_target": ""
} |
from .certificate_verification_description import CertificateVerificationDescription
from .certificate_body_description import CertificateBodyDescription
from .certificate_properties import CertificateProperties
from .certificate_description import CertificateDescription
from .certificate_list_description import CertificateListDescription
from .certificate_properties_with_nonce import CertificatePropertiesWithNonce
from .certificate_with_nonce_description import CertificateWithNonceDescription
from .shared_access_signature_authorization_rule import SharedAccessSignatureAuthorizationRule
from .ip_filter_rule import IpFilterRule
from .event_hub_properties import EventHubProperties
from .routing_service_bus_queue_endpoint_properties import RoutingServiceBusQueueEndpointProperties
from .routing_service_bus_topic_endpoint_properties import RoutingServiceBusTopicEndpointProperties
from .routing_event_hub_properties import RoutingEventHubProperties
from .routing_storage_container_properties import RoutingStorageContainerProperties
from .routing_endpoints import RoutingEndpoints
from .route_properties import RouteProperties
from .fallback_route_properties import FallbackRouteProperties
from .routing_properties import RoutingProperties
from .storage_endpoint_properties import StorageEndpointProperties
from .messaging_endpoint_properties import MessagingEndpointProperties
from .feedback_properties import FeedbackProperties
from .cloud_to_device_properties import CloudToDeviceProperties
from .operations_monitoring_properties import OperationsMonitoringProperties
from .iot_hub_properties import IotHubProperties
from .iot_hub_sku_info import IotHubSkuInfo
from .iot_hub_description import IotHubDescription
from .resource import Resource
from .operation_display import OperationDisplay
from .operation import Operation
from .error_details import ErrorDetails, ErrorDetailsException
from .iot_hub_quota_metric_info import IotHubQuotaMetricInfo
from .registry_statistics import RegistryStatistics
from .job_response import JobResponse
from .iot_hub_capacity import IotHubCapacity
from .iot_hub_sku_description import IotHubSkuDescription
from .event_hub_consumer_group_info import EventHubConsumerGroupInfo
from .operation_inputs import OperationInputs
from .iot_hub_name_availability_info import IotHubNameAvailabilityInfo
from .export_devices_request import ExportDevicesRequest
from .import_devices_request import ImportDevicesRequest
from .operation_paged import OperationPaged
from .iot_hub_description_paged import IotHubDescriptionPaged
from .iot_hub_sku_description_paged import IotHubSkuDescriptionPaged
from .str_paged import StrPaged
from .job_response_paged import JobResponsePaged
from .iot_hub_quota_metric_info_paged import IotHubQuotaMetricInfoPaged
from .shared_access_signature_authorization_rule_paged import SharedAccessSignatureAuthorizationRulePaged
from .iot_hub_client_enums import (
AccessRights,
IpFilterActionType,
RoutingSource,
OperationMonitoringLevel,
Capabilities,
IotHubSku,
IotHubSkuTier,
JobType,
JobStatus,
IotHubScaleType,
IotHubNameUnavailabilityReason,
)
__all__ = [
'CertificateVerificationDescription',
'CertificateBodyDescription',
'CertificateProperties',
'CertificateDescription',
'CertificateListDescription',
'CertificatePropertiesWithNonce',
'CertificateWithNonceDescription',
'SharedAccessSignatureAuthorizationRule',
'IpFilterRule',
'EventHubProperties',
'RoutingServiceBusQueueEndpointProperties',
'RoutingServiceBusTopicEndpointProperties',
'RoutingEventHubProperties',
'RoutingStorageContainerProperties',
'RoutingEndpoints',
'RouteProperties',
'FallbackRouteProperties',
'RoutingProperties',
'StorageEndpointProperties',
'MessagingEndpointProperties',
'FeedbackProperties',
'CloudToDeviceProperties',
'OperationsMonitoringProperties',
'IotHubProperties',
'IotHubSkuInfo',
'IotHubDescription',
'Resource',
'OperationDisplay',
'Operation',
'ErrorDetails', 'ErrorDetailsException',
'IotHubQuotaMetricInfo',
'RegistryStatistics',
'JobResponse',
'IotHubCapacity',
'IotHubSkuDescription',
'EventHubConsumerGroupInfo',
'OperationInputs',
'IotHubNameAvailabilityInfo',
'ExportDevicesRequest',
'ImportDevicesRequest',
'OperationPaged',
'IotHubDescriptionPaged',
'IotHubSkuDescriptionPaged',
'StrPaged',
'JobResponsePaged',
'IotHubQuotaMetricInfoPaged',
'SharedAccessSignatureAuthorizationRulePaged',
'AccessRights',
'IpFilterActionType',
'RoutingSource',
'OperationMonitoringLevel',
'Capabilities',
'IotHubSku',
'IotHubSkuTier',
'JobType',
'JobStatus',
'IotHubScaleType',
'IotHubNameUnavailabilityReason',
]
| {
"content_hash": "abc2ede496811fa3dcb964ab80d4b6f3",
"timestamp": "",
"source": "github",
"line_count": 121,
"max_line_length": 105,
"avg_line_length": 40.01652892561984,
"alnum_prop": 0.8139198678232136,
"repo_name": "AutorestCI/azure-sdk-for-python",
"id": "c0dc0016be9984ce2ebe44823f58813e2f47e5d1",
"size": "5316",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "azure-mgmt-iothub/azure/mgmt/iothub/models/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "34619070"
}
],
"symlink_target": ""
} |
"""
A lightweight wrapper around Python's sqlite3 database, with a dict-like interface
and multi-thread access support::
>>> mydict = SqliteDict('some.db', autocommit=True) # the mapping will be persisted to file `some.db`
>>> mydict['some_key'] = any_picklable_object
>>> print mydict['some_key']
>>> print len(mydict) # etc... all dict functions work
Pickle is used internally to serialize the values. Keys are strings.
If you don't use autocommit (default is no autocommit for performance), then
don't forget to call `mydict.commit()` when done with a transaction.
"""
import sqlite3
import os
import sys
import tempfile
import random
import logging
import traceback
from threading import Thread
try:
__version__ = __import__('pkg_resources').get_distribution('sqlitedict').version
except:
__version__ = '?'
major_version = sys.version_info[0]
if major_version < 3: # py <= 2.x
if sys.version_info[1] < 5: # py <= 2.4
raise ImportError("sqlitedict requires python 2.5 or higher (python 3.3 or higher supported)")
# necessary to use exec()_ as this would be a SyntaxError in python3.
# this is an exact port of six.reraise():
def exec_(_code_, _globs_=None, _locs_=None):
"""Execute code in a namespace."""
if _globs_ is None:
frame = sys._getframe(1)
_globs_ = frame.f_globals
if _locs_ is None:
_locs_ = frame.f_locals
del frame
elif _locs_ is None:
_locs_ = _globs_
exec("""exec _code_ in _globs_, _locs_""")
exec_("def reraise(tp, value, tb=None):\n"
" raise tp, value, tb\n")
else:
def reraise(tp, value, tb=None):
if value is None:
value = tp()
if value.__traceback__ is not tb:
raise value.with_traceback(tb)
raise value
try:
from cPickle import dumps, loads, HIGHEST_PROTOCOL as PICKLE_PROTOCOL
except ImportError:
from pickle import dumps, loads, HIGHEST_PROTOCOL as PICKLE_PROTOCOL
# some Python 3 vs 2 imports
try:
from collections import UserDict as DictClass
except ImportError:
from UserDict import DictMixin as DictClass
try:
from queue import Queue
except ImportError:
from Queue import Queue
logger = logging.getLogger(__name__)
def open(*args, **kwargs):
"""See documentation of the SqliteDict class."""
return SqliteDict(*args, **kwargs)
def encode(obj):
"""Serialize an object using pickle to a binary format accepted by SQLite."""
return sqlite3.Binary(dumps(obj, protocol=PICKLE_PROTOCOL))
def decode(obj):
"""Deserialize objects retrieved from SQLite."""
return loads(bytes(obj))
class SqliteDict(DictClass):
VALID_FLAGS = ['c', 'r', 'w', 'n']
def __init__(self, filename=None, tablename='unnamed', flag='c',
autocommit=False, journal_mode="DELETE", encode=encode, decode=decode):
"""
Initialize a thread-safe sqlite-backed dictionary. The dictionary will
be a table `tablename` in database file `filename`. A single file (=database)
may contain multiple tables.
If no `filename` is given, a random file in temp will be used (and deleted
from temp once the dict is closed/deleted).
If you enable `autocommit`, changes will be committed after each operation
(more inefficient but safer). Otherwise, changes are committed on `self.commit()`,
`self.clear()` and `self.close()`.
Set `journal_mode` to 'OFF' if you're experiencing sqlite I/O problems
or if you need performance and don't care about crash-consistency.
The `flag` parameter. Exactly one of:
'c': default mode, open for read/write, creating the db/table if necessary.
'w': open for r/w, but drop `tablename` contents first (start with empty table)
'r': open as read-only
'n': create a new database (erasing any existing tables, not just `tablename`!).
The `encode` and `decode` parameters are used to customize how the values
are serialized and deserialized.
The `encode` parameter must be a function that takes a single Python
object and returns a serialized representation.
The `decode` function must be a function that takes the serialized
representation produced by `encode` and returns a deserialized Python
object.
The default is to use pickle.
"""
self.in_temp = filename is None
if self.in_temp:
randpart = hex(random.randint(0, 0xffffff))[2:]
filename = os.path.join(tempfile.gettempdir(), 'sqldict' + randpart)
if flag not in SqliteDict.VALID_FLAGS:
raise RuntimeError("Unrecognized flag: %s" % flag)
self.flag = flag
if flag == 'n':
if os.path.exists(filename):
os.remove(filename)
dirname = os.path.dirname(filename)
if dirname:
if not os.path.exists(dirname):
raise RuntimeError('Error! The directory does not exist, %s' % dirname)
self.filename = filename
if '"' in tablename:
raise ValueError('Invalid tablename %r' % tablename)
self.tablename = tablename
self.autocommit = autocommit
self.journal_mode = journal_mode
self.encode = encode
self.decode = decode
logger.info("opening Sqlite table %r in %s" % (tablename, filename))
MAKE_TABLE = 'CREATE TABLE IF NOT EXISTS "%s" (key TEXT PRIMARY KEY, value BLOB)' % self.tablename
self.conn = self._new_conn()
self.conn.execute(MAKE_TABLE)
self.conn.commit()
if flag == 'w':
self.clear()
def _new_conn(self):
return SqliteMultithread(self.filename, autocommit=self.autocommit, journal_mode=self.journal_mode)
def __enter__(self):
if not hasattr(self, 'conn') or self.conn is None:
self.conn = self._new_conn()
return self
def __exit__(self, *exc_info):
self.close()
def __str__(self):
return "SqliteDict(%s)" % (self.filename)
def __repr__(self):
return str(self) # no need of something complex
def __len__(self):
# `select count (*)` is super slow in sqlite (does a linear scan!!)
# As a result, len() is very slow too once the table size grows beyond trivial.
# We could keep the total count of rows ourselves, by means of triggers,
# but that seems too complicated and would slow down normal operation
# (insert/delete etc).
GET_LEN = 'SELECT COUNT(*) FROM "%s"' % self.tablename
rows = self.conn.select_one(GET_LEN)[0]
return rows if rows is not None else 0
def __bool__(self):
# No elements is False, otherwise True
GET_MAX = 'SELECT MAX(ROWID) FROM "%s"' % self.tablename
m = self.conn.select_one(GET_MAX)[0]
# Explicit better than implicit and bla bla
return True if m is not None else False
def iterkeys(self):
GET_KEYS = 'SELECT key FROM "%s" ORDER BY rowid' % self.tablename
for key in self.conn.select(GET_KEYS):
yield key[0]
def itervalues(self):
GET_VALUES = 'SELECT value FROM "%s" ORDER BY rowid' % self.tablename
for value in self.conn.select(GET_VALUES):
yield self.decode(value[0])
def iteritems(self):
GET_ITEMS = 'SELECT key, value FROM "%s" ORDER BY rowid' % self.tablename
for key, value in self.conn.select(GET_ITEMS):
yield key, self.decode(value)
def keys(self):
return self.iterkeys() if major_version > 2 else list(self.iterkeys())
def values(self):
return self.itervalues() if major_version > 2 else list(self.itervalues())
def items(self):
return self.iteritems() if major_version > 2 else list(self.iteritems())
def __contains__(self, key):
HAS_ITEM = 'SELECT 1 FROM "%s" WHERE key = ?' % self.tablename
return self.conn.select_one(HAS_ITEM, (key,)) is not None
def __getitem__(self, key):
GET_ITEM = 'SELECT value FROM "%s" WHERE key = ?' % self.tablename
item = self.conn.select_one(GET_ITEM, (key,))
if item is None:
raise KeyError(key)
return self.decode(item[0])
def __setitem__(self, key, value):
if self.flag == 'r':
raise RuntimeError('Refusing to write to read-only SqliteDict')
ADD_ITEM = 'REPLACE INTO "%s" (key, value) VALUES (?,?)' % self.tablename
self.conn.execute(ADD_ITEM, (key, self.encode(value)))
def __delitem__(self, key):
if self.flag == 'r':
raise RuntimeError('Refusing to delete from read-only SqliteDict')
if key not in self:
raise KeyError(key)
DEL_ITEM = 'DELETE FROM "%s" WHERE key = ?' % self.tablename
self.conn.execute(DEL_ITEM, (key,))
def update(self, items=(), **kwds):
if self.flag == 'r':
raise RuntimeError('Refusing to update read-only SqliteDict')
try:
items = items.items()
except AttributeError:
pass
items = [(k, self.encode(v)) for k, v in items]
UPDATE_ITEMS = 'REPLACE INTO "%s" (key, value) VALUES (?, ?)' % self.tablename
self.conn.executemany(UPDATE_ITEMS, items)
if kwds:
self.update(kwds)
def __iter__(self):
return self.iterkeys()
def clear(self):
if self.flag == 'r':
raise RuntimeError('Refusing to clear read-only SqliteDict')
CLEAR_ALL = 'DELETE FROM "%s";' % self.tablename # avoid VACUUM, as it gives "OperationalError: database schema has changed"
self.conn.commit()
self.conn.execute(CLEAR_ALL)
self.conn.commit()
@staticmethod
def get_tablenames(filename):
"""get the names of the tables in an sqlite db as a list"""
if not os.path.isfile(filename):
raise IOError('file %s does not exist' % (filename))
GET_TABLENAMES = 'SELECT name FROM sqlite_master WHERE type="table"'
with sqlite3.connect(filename) as conn:
cursor = conn.execute(GET_TABLENAMES)
res = cursor.fetchall()
return [name[0] for name in res]
def commit(self, blocking=True):
"""
Persist all data to disk.
When `blocking` is False, the commit command is queued, but the data is
not guaranteed persisted (default implication when autocommit=True).
"""
if self.conn is not None:
self.conn.commit(blocking)
sync = commit
def close(self, do_log=True, force=False):
if do_log:
logger.debug("closing %s" % self)
if hasattr(self, 'conn') and self.conn is not None:
if self.conn.autocommit and not force:
# typically calls to commit are non-blocking when autocommit is
# used. However, we need to block on close() to ensure any
# awaiting exceptions are handled and that all data is
# persisted to disk before returning.
self.conn.commit(blocking=True)
self.conn.close(force=force)
self.conn = None
if self.in_temp:
try:
os.remove(self.filename)
except:
pass
def terminate(self):
"""Delete the underlying database file. Use with care."""
if self.flag == 'r':
raise RuntimeError('Refusing to terminate read-only SqliteDict')
self.close()
if self.filename == ':memory:':
return
logger.info("deleting %s" % self.filename)
try:
if os.path.isfile(self.filename):
os.remove(self.filename)
except (OSError, IOError):
logger.exception("failed to delete %s" % (self.filename))
def __del__(self):
# like close(), but assume globals are gone by now (do not log!)
try:
self.close(do_log=False, force=True)
except Exception:
# prevent error log flood in case of multiple SqliteDicts
# closed after connection lost (exceptions are always ignored
# in __del__ method.
pass
# Adding extra methods for python 2 compatibility (at import time)
if major_version == 2:
SqliteDict.__nonzero__ = SqliteDict.__bool__
del SqliteDict.__bool__ # not needed and confusing
#endclass SqliteDict
class SqliteMultithread(Thread):
"""
Wrap sqlite connection in a way that allows concurrent requests from multiple threads.
This is done by internally queueing the requests and processing them sequentially
in a separate thread (in the same order they arrived).
"""
def __init__(self, filename, autocommit, journal_mode):
super(SqliteMultithread, self).__init__()
self.filename = filename
self.autocommit = autocommit
self.journal_mode = journal_mode
# use request queue of unlimited size
self.reqs = Queue()
self.setDaemon(True) # python2.5-compatible
self.exception = None
self.log = logging.getLogger('sqlitedict.SqliteMultithread')
self.start()
def run(self):
if self.autocommit:
conn = sqlite3.connect(self.filename, isolation_level=None, check_same_thread=False)
else:
conn = sqlite3.connect(self.filename, check_same_thread=False)
conn.execute('PRAGMA journal_mode = %s' % self.journal_mode)
conn.text_factory = str
cursor = conn.cursor()
conn.commit()
cursor.execute('PRAGMA synchronous=OFF')
res = None
while True:
req, arg, res, outer_stack = self.reqs.get()
if req == '--close--':
assert res, ('--close-- without return queue', res)
break
elif req == '--commit--':
conn.commit()
if res:
res.put('--no more--')
else:
try:
cursor.execute(req, arg)
except Exception as err:
# Retry once
try:
cursor.execute(req, arg)
except Exception as err:
self.exception = (e_type, e_value, e_tb) = sys.exc_info()
inner_stack = traceback.extract_stack()
# An exception occurred in our thread, but we may not
# immediately able to throw it in our calling thread, if it has
# no return `res` queue: log as level ERROR both the inner and
# outer exception immediately.
#
# Any iteration of res.get() or any next call will detect the
# inner exception and re-raise it in the calling Thread; though
# it may be confusing to see an exception for an unrelated
# statement, an ERROR log statement from the 'sqlitedict.*'
# namespace contains the original outer stack location.
self.log.error('Inner exception:')
for item in traceback.format_list(inner_stack):
self.log.error(item)
self.log.error('') # deliniate traceback & exception w/blank line
for item in traceback.format_exception_only(e_type, e_value):
self.log.error(item)
self.log.error('') # exception & outer stack w/blank line
self.log.error('Outer stack:')
for item in traceback.format_list(outer_stack):
self.log.error(item)
self.log.error('Exception will be re-raised at next call.')
if res:
for rec in cursor:
res.put(rec)
res.put('--no more--')
if self.autocommit:
conn.commit()
self.log.debug('received: %s, send: --no more--', req)
conn.close()
res.put('--no more--')
def check_raise_error(self):
"""
Check for and raise exception for any previous sqlite query.
For the `execute*` family of method calls, such calls are non-blocking and any
exception raised in the thread cannot be handled by the calling Thread (usually
MainThread). This method is called on `close`, and prior to any subsequent
calls to the `execute*` methods to check for and raise an exception in a
previous call to the MainThread.
"""
if self.exception:
e_type, e_value, e_tb = self.exception
# clear self.exception, if the caller decides to handle such
# exception, we should not repeatedly re-raise it.
self.exception = None
self.log.error('An exception occurred from a previous statement, view '
'the logging namespace "sqlitedict" for outer stack.')
# The third argument to raise is the traceback object, and it is
# substituted instead of the current location as the place where
# the exception occurred, this is so that when using debuggers such
# as `pdb', or simply evaluating the naturally raised traceback, we
# retain the original (inner) location of where the exception
# occurred.
reraise(e_type, e_value, e_tb)
def execute(self, req, arg=None, res=None):
"""
`execute` calls are non-blocking: just queue up the request and return immediately.
"""
self.check_raise_error()
# NOTE: This might be a lot of information to pump into an input
# queue, affecting performance. I've also seen earlier versions of
# jython take a severe performance impact for throwing exceptions
# so often.
stack = traceback.extract_stack()[:-1]
self.reqs.put((req, arg or tuple(), res, stack))
def executemany(self, req, items):
for item in items:
self.execute(req, item)
self.check_raise_error()
def select(self, req, arg=None):
"""
Unlike sqlite's native select, this select doesn't handle iteration efficiently.
The result of `select` starts filling up with values as soon as the
request is dequeued, and although you can iterate over the result normally
(`for res in self.select(): ...`), the entire result will be in memory.
"""
res = Queue() # results of the select will appear as items in this queue
self.execute(req, arg, res)
while True:
rec = res.get()
self.check_raise_error()
if rec == '--no more--':
break
yield rec
def select_one(self, req, arg=None):
"""Return only the first row of the SELECT, or None if there are no matching rows."""
try:
return next(iter(self.select(req, arg)))
except StopIteration:
return None
def commit(self, blocking=True):
if blocking:
# by default, we await completion of commit() unless
# blocking=False. This ensures any available exceptions for any
# previous statement are thrown before returning, and that the
# data has actually persisted to disk!
self.select_one('--commit--')
else:
# otherwise, we fire and forget as usual.
self.execute('--commit--')
def close(self, force=False):
if force:
# If a SqliteDict is being killed or garbage-collected, then select_one()
# could hang forever because run() might already have exited and therefore
# can't process the request. Instead, push the close command to the requests
# queue directly. If run() is still alive, it will exit gracefully. If not,
# then there's nothing we can do anyway.
self.reqs.put(('--close--', None, Queue(), None))
else:
# we abuse 'select' to "iter" over a "--close--" statement so that we
# can confirm the completion of close before joining the thread and
# returning (by semaphore '--no more--'
self.select_one('--close--')
self.join()
#endclass SqliteMultithread
| {
"content_hash": "46d997449c8cd921ce8880623fb03dd9",
"timestamp": "",
"source": "github",
"line_count": 536,
"max_line_length": 133,
"avg_line_length": 38.58582089552239,
"alnum_prop": 0.591238758340586,
"repo_name": "naparuba/kunai",
"id": "39f7c7c82d3af733a09676c7f92eff77946e0224",
"size": "21037",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "opsbro/misc/sqlitedict.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "487"
},
{
"name": "C",
"bytes": "345490"
},
{
"name": "C++",
"bytes": "29298"
},
{
"name": "CSS",
"bytes": "12718"
},
{
"name": "HTML",
"bytes": "12328"
},
{
"name": "JavaScript",
"bytes": "558040"
},
{
"name": "Makefile",
"bytes": "8523"
},
{
"name": "Python",
"bytes": "2180654"
},
{
"name": "Shell",
"bytes": "18255"
},
{
"name": "Smarty",
"bytes": "133"
}
],
"symlink_target": ""
} |
""" cythonize
Cythonize pyx files into C files as needed.
Usage: cythonize [root_dir]
Default [root_dir] is 'dismalpy'.
Checks pyx files to see if they have been changed relative to their
corresponding C files. If they have, then runs cython on these files to
recreate the C files.
The script thinks that the pyx files have changed relative to the C files
by comparing hashes stored in a database file.
Simple script to invoke Cython (and Tempita) on all .pyx (.pyx.in)
files; while waiting for a proper build system. Uses file hashes to
figure out if rebuild is needed.
Originally written by Dag Sverre Seljebotn, and copied [to statsmodels] from:
https://raw.github.com/dagss/private-scipy-refactor/cythonize/cythonize.py
Later copied here from:
https://raw.github.com/statsmodels/statsmodels/master/tools/cythonize.py
Note: this script does not check any of the dependent C libraries; it only
operates on the Cython .pyx files.
"""
from __future__ import division, print_function, absolute_import
import os
import re
import sys
import hashlib
import subprocess
HASH_FILE = 'cythonize.dat'
DEFAULT_ROOT = 'dismalpy'
# WindowsError is not defined on unix systems
try:
WindowsError
except NameError:
WindowsError = None
#
# Rules
#
def process_pyx(fromfile, tofile):
try:
from Cython.Compiler.Version import version as cython_version
from distutils.version import LooseVersion
if LooseVersion(cython_version) < LooseVersion('0.19'):
raise Exception('Building DismalPy requires Cython >= 0.19')
except ImportError:
pass
flags = ['--fast-fail']
if tofile.endswith('.cxx'):
flags += ['--cplus']
try:
try:
r = subprocess.call(['cython'] + flags + ["-o", tofile, fromfile])
if r != 0:
raise Exception('Cython failed')
except OSError:
# There are ways of installing Cython that don't result in a cython
# executable on the path, see gh-2397.
r = subprocess.call([sys.executable, '-c',
'import sys; from Cython.Compiler.Main import '
'setuptools_main as main; sys.exit(main())'] + flags +
["-o", tofile, fromfile])
if r != 0:
raise Exception('Cython failed')
except OSError:
raise OSError('Cython needs to be installed')
def process_tempita_pyx(fromfile, tofile):
try:
try:
from Cython import Tempita as tempita
except ImportError:
import tempita
except ImportError:
raise Exception('Building DismalPy requires Tempita: '
'pip install --user Tempita')
with open(fromfile, "r") as f:
tmpl = f.read()
pyxcontent = tempita.sub(tmpl)
assert fromfile.endswith('.pyx.in')
pyxfile = fromfile[:-len('.pyx.in')] + '.pyx'
with open(pyxfile, "w") as f:
f.write(pyxcontent)
process_pyx(pyxfile, tofile)
rules = {
# fromext : function
'.pyx' : process_pyx,
'.pyx.in' : process_tempita_pyx
}
#
# Hash db
#
def load_hashes(filename):
# Return { filename : (sha1 of input, sha1 of output) }
if os.path.isfile(filename):
hashes = {}
with open(filename, 'r') as f:
for line in f:
filename, inhash, outhash = line.split()
hashes[filename] = (inhash, outhash)
else:
hashes = {}
return hashes
def save_hashes(hash_db, filename):
with open(filename, 'w') as f:
for key, value in sorted(hash_db.items()):
f.write("%s %s %s\n" % (key, value[0], value[1]))
def sha1_of_file(filename):
h = hashlib.sha1()
with open(filename, "rb") as f:
h.update(f.read())
return h.hexdigest()
#
# Main program
#
def normpath(path):
path = path.replace(os.sep, '/')
if path.startswith('./'):
path = path[2:]
return path
def get_hash(frompath, topath):
from_hash = sha1_of_file(frompath)
to_hash = sha1_of_file(topath) if os.path.exists(topath) else None
return (from_hash, to_hash)
def process(path, fromfile, tofile, processor_function, hash_db):
fullfrompath = os.path.join(path, fromfile)
fulltopath = os.path.join(path, tofile)
current_hash = get_hash(fullfrompath, fulltopath)
if current_hash == hash_db.get(normpath(fullfrompath), None):
print('%s has not changed' % fullfrompath)
return
orig_cwd = os.getcwd()
try:
os.chdir(path)
print('Processing %s' % fullfrompath)
processor_function(fromfile, tofile)
finally:
os.chdir(orig_cwd)
# changed target file, recompute hash
current_hash = get_hash(fullfrompath, fulltopath)
# store hash in db
hash_db[normpath(fullfrompath)] = current_hash
def find_process_files(root_dir):
hash_db = load_hashes(HASH_FILE)
for cur_dir, dirs, files in os.walk(root_dir):
for filename in files:
in_file = os.path.join(cur_dir, filename + ".in")
if filename.endswith('.pyx') and os.path.isfile(in_file):
continue
for fromext, function in rules.items():
if filename.endswith(fromext):
toext = ".c"
with open(os.path.join(cur_dir, filename), 'rb') as f:
data = f.read()
m = re.search(br"^\s*#\s*distutils:\s*language\s*=\s*c\+\+\s*$", data, re.I|re.M)
if m:
toext = ".cxx"
fromfile = filename
tofile = filename[:-len(fromext)] + toext
process(cur_dir, fromfile, tofile, function, hash_db)
save_hashes(hash_db, HASH_FILE)
def main():
try:
root_dir = sys.argv[1]
except IndexError:
root_dir = DEFAULT_ROOT
find_process_files(root_dir)
if __name__ == '__main__':
main() | {
"content_hash": "69679a60f6b4d5ec103b01573e9a065e",
"timestamp": "",
"source": "github",
"line_count": 197,
"max_line_length": 105,
"avg_line_length": 30.548223350253807,
"alnum_prop": 0.6010302426055167,
"repo_name": "dismalpy/dismalpy",
"id": "1d256f35bc18e927ef4bf95c0a42155ea4ba1a1e",
"size": "6040",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tools/cythonize.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "C",
"bytes": "381"
},
{
"name": "Matlab",
"bytes": "1383"
},
{
"name": "Python",
"bytes": "1252699"
},
{
"name": "R",
"bytes": "10458"
},
{
"name": "Stata",
"bytes": "22680"
}
],
"symlink_target": ""
} |
import unittest
from flask import url_for, session
from app import create_app, db
from app.models import Article, Comment, Tag
class TestAdmin(unittest.TestCase):
def setUp(self):
self.app = create_app('testing')
self.app_context = self.app.app_context()
self.app_context.push()
db.create_all()
self.client = self.app.test_client()
def tearDown(self):
db.session.remove()
db.drop_all()
self.app_context.pop()
def test_login(self):
self.client.get(url_for('admin.login_page'))
response = self.client.post(url_for('admin.login_page'), data={
'name': 'test',
'password': '1234'
}, follow_redirects=True)
self.assertEqual(response.status_code, 200)
self.assertIn('Bernie', response.get_data(as_text=True))
response = self.client.post(url_for('admin.login_page'), data={
'name': 'alice',
'password': 'wonderland'
}, follow_redirects=True)
self.assertIn('Under construction', response.get_data(as_text=True))
def test_new_article(self):
self.client.post(url_for('admin.login_page'), data={
'name': 'test',
'password': '1234'
})
response = self.client.post(url_for('admin.new_article'), data={
'title': 'A Day To Celebrate',
'subtitle': 'Holiday Special!',
'formatted_title': '<strong>A</strong> Day To Celebrate',
'tags': 'The Lion, the Witch, the Wardrobe',
'content': '##This is a test\n\nyes it is',
'image_url': 'https://upload.wikimedia.org/wikipedia/en/c/cb/The_Chronicles_of_Narnia_box_set_cover.jpg',
}, follow_redirects=True)
self.assertIn('<title>A Day To Celebrate - Will Skywalker\'s Ranch</title>', response.get_data(as_text=True))
def test_manage_article(self):
self.client.post(url_for('admin.login_page'), data={
'name': 'test',
'password': '1234'
})
response = self.client.post(url_for('admin.new_article'), data={
'title': 'A Day To Celebrate',
'subtitle': 'Holiday Special!',
'tags': 'The Lion, the Witch, the Wardrobe',
'content': '##This is a test\n\nyes it is',
'image_url': 'https://upload.wikimedia.org/wikipedia/en/c/cb/The_Chronicles_of_Narnia_box_set_cover.jpg',
})
self.client.get(url_for('admin.manage_article', num=1))
response = self.client.post(url_for('admin.manage_article', num=1), data={
'title': 'Another Day To Celebrate',
'formatted_title': '<em>Another</em> Day To Celebrate',
'subtitle': 'Holiday Special!',
'tags': 'The Lion, the Witch, PCMR',
'content': '##This is a test\n\nyes it is',
'image_url': 'https://upload.wikimedia.org/wikipedia/en/c/cb/The_Chronicles_of_Narnia_box_set_cover.jpg',
}, follow_redirects=True)
data = response.get_data(as_text=True)
self.assertNotIn('<a href="/blog/tag/the Wardrobe">the Wardrobe</a>', data)
self.assertIn('<a href="/blog/tag/PCMR">PCMR</a>', data)
self.assertIn('<h2><em>Another</em> Day To Celebrate</h2>', data)
self.client.post(url_for('admin.login_page'), data={
'name': 'test',
'password': 'baaaaad'
})
self.assertIn('Under construction',
self.client.get(url_for('admin.manage_article', num=1)).get_data(as_text=True))
def test_manage_comment(self):
pass
| {
"content_hash": "d92022d3de53e697a09e463069efd155",
"timestamp": "",
"source": "github",
"line_count": 93,
"max_line_length": 117,
"avg_line_length": 39.215053763440864,
"alnum_prop": 0.5744447491088566,
"repo_name": "WillSkywalker/blog",
"id": "dd02d200d1d07583c4b4acfef96d7a9df1144c00",
"size": "3647",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_admin.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "98832"
},
{
"name": "HTML",
"bytes": "51893"
},
{
"name": "JavaScript",
"bytes": "15163"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "33385"
}
],
"symlink_target": ""
} |
"""Axis-related chart objects."""
from pptx.dml.chtfmt import ChartFormat
from pptx.enum.chart import (
XL_AXIS_CROSSES,
XL_CATEGORY_TYPE,
XL_TICK_LABEL_POSITION,
XL_TICK_MARK,
)
from pptx.oxml.ns import qn
from pptx.oxml.simpletypes import ST_Orientation
from pptx.shared import ElementProxy
from pptx.text.text import Font, TextFrame
from pptx.util import lazyproperty
class _BaseAxis(object):
"""Base class for chart axis objects. All axis objects share these properties."""
def __init__(self, xAx):
super(_BaseAxis, self).__init__()
self._element = xAx # axis element, c:catAx or c:valAx
self._xAx = xAx
@property
def axis_title(self):
"""An |AxisTitle| object providing access to title properties.
Calling this property is destructive in the sense that it adds an
axis title element (`c:title`) to the axis XML if one is not already
present. Use :attr:`has_title` to test for presence of axis title
non-destructively.
"""
return AxisTitle(self._element.get_or_add_title())
@lazyproperty
def format(self):
"""
The |ChartFormat| object providing access to the shape formatting
properties of this axis, such as its line color and fill.
"""
return ChartFormat(self._element)
@property
def has_major_gridlines(self):
"""
Read/write boolean value specifying whether this axis has gridlines
at its major tick mark locations. Assigning |True| to this property
causes major gridlines to be displayed. Assigning |False| causes them
to be removed.
"""
if self._element.majorGridlines is None:
return False
return True
@has_major_gridlines.setter
def has_major_gridlines(self, value):
if bool(value) is True:
self._element.get_or_add_majorGridlines()
else:
self._element._remove_majorGridlines()
@property
def has_minor_gridlines(self):
"""
Read/write boolean value specifying whether this axis has gridlines
at its minor tick mark locations. Assigning |True| to this property
causes minor gridlines to be displayed. Assigning |False| causes them
to be removed.
"""
if self._element.minorGridlines is None:
return False
return True
@has_minor_gridlines.setter
def has_minor_gridlines(self, value):
if bool(value) is True:
self._element.get_or_add_minorGridlines()
else:
self._element._remove_minorGridlines()
@property
def has_title(self):
"""Read/write boolean specifying whether this axis has a title.
|True| if this axis has a title, |False| otherwise. Assigning |True|
causes an axis title to be added if not already present. Assigning
|False| causes any existing title to be deleted.
"""
if self._element.title is None:
return False
return True
@has_title.setter
def has_title(self, value):
if bool(value) is True:
self._element.get_or_add_title()
else:
self._element._remove_title()
@lazyproperty
def major_gridlines(self):
"""
The |MajorGridlines| object representing the major gridlines for
this axis.
"""
return MajorGridlines(self._element)
@property
def major_tick_mark(self):
"""
Read/write :ref:`XlTickMark` value specifying the type of major tick
mark to display on this axis.
"""
majorTickMark = self._element.majorTickMark
if majorTickMark is None:
return XL_TICK_MARK.CROSS
return majorTickMark.val
@major_tick_mark.setter
def major_tick_mark(self, value):
self._element._remove_majorTickMark()
if value is XL_TICK_MARK.CROSS:
return
self._element._add_majorTickMark(val=value)
@property
def maximum_scale(self):
"""
Read/write float value specifying the upper limit of the value range
for this axis, the number at the top or right of the vertical or
horizontal value scale, respectively. The value |None| indicates the
upper limit should be determined automatically based on the range of
data point values associated with the axis.
"""
return self._element.scaling.maximum
@maximum_scale.setter
def maximum_scale(self, value):
scaling = self._element.scaling
scaling.maximum = value
@property
def minimum_scale(self):
"""
Read/write float value specifying lower limit of value range, the
number at the bottom or left of the value scale. |None| if no minimum
scale has been set. The value |None| indicates the lower limit should
be determined automatically based on the range of data point values
associated with the axis.
"""
return self._element.scaling.minimum
@minimum_scale.setter
def minimum_scale(self, value):
scaling = self._element.scaling
scaling.minimum = value
@property
def minor_tick_mark(self):
"""
Read/write :ref:`XlTickMark` value specifying the type of minor tick
mark for this axis.
"""
minorTickMark = self._element.minorTickMark
if minorTickMark is None:
return XL_TICK_MARK.CROSS
return minorTickMark.val
@minor_tick_mark.setter
def minor_tick_mark(self, value):
self._element._remove_minorTickMark()
if value is XL_TICK_MARK.CROSS:
return
self._element._add_minorTickMark(val=value)
@property
def reverse_order(self):
"""Read/write bool value specifying whether to reverse plotting order for axis.
For a category axis, this reverses the order in which the categories are
displayed. This may be desired, for example, on a (horizontal) bar-chart where
by default the first category appears at the bottom. Since we read from
top-to-bottom, many viewers may find it most natural for the first category to
appear on top.
For a value axis, it reverses the direction of increasing value from
bottom-to-top to top-to-bottom.
"""
return self._element.orientation == ST_Orientation.MAX_MIN
@reverse_order.setter
def reverse_order(self, value):
self._element.orientation = (
ST_Orientation.MAX_MIN if bool(value) is True else ST_Orientation.MIN_MAX
)
@lazyproperty
def tick_labels(self):
"""
The |TickLabels| instance providing access to axis tick label
formatting properties. Tick labels are the numbers appearing on
a value axis or the category names appearing on a category axis.
"""
return TickLabels(self._element)
@property
def tick_label_position(self):
"""
Read/write :ref:`XlTickLabelPosition` value specifying where the tick
labels for this axis should appear.
"""
tickLblPos = self._element.tickLblPos
if tickLblPos is None:
return XL_TICK_LABEL_POSITION.NEXT_TO_AXIS
if tickLblPos.val is None:
return XL_TICK_LABEL_POSITION.NEXT_TO_AXIS
return tickLblPos.val
@tick_label_position.setter
def tick_label_position(self, value):
tickLblPos = self._element.get_or_add_tickLblPos()
tickLblPos.val = value
@property
def visible(self):
"""
Read/write. |True| if axis is visible, |False| otherwise.
"""
delete = self._element.delete_
if delete is None:
return False
return False if delete.val else True
@visible.setter
def visible(self, value):
if value not in (True, False):
raise ValueError("assigned value must be True or False, got: %s" % value)
delete = self._element.get_or_add_delete_()
delete.val = not value
class AxisTitle(ElementProxy):
"""Provides properties for manipulating axis title."""
def __init__(self, title):
super(AxisTitle, self).__init__(title)
self._title = title
@lazyproperty
def format(self):
"""|ChartFormat| object providing access to shape formatting.
Return the |ChartFormat| object providing shape formatting properties
for this axis title, such as its line color and fill.
"""
return ChartFormat(self._element)
@property
def has_text_frame(self):
"""Read/write Boolean specifying presence of a text frame.
Return |True| if this axis title has a text frame, and |False|
otherwise. Assigning |True| causes a text frame to be added if not
already present. Assigning |False| causes any existing text frame to
be removed along with any text contained in the text frame.
"""
if self._title.tx_rich is None:
return False
return True
@has_text_frame.setter
def has_text_frame(self, value):
if bool(value) is True:
self._title.get_or_add_tx_rich()
else:
self._title._remove_tx()
@property
def text_frame(self):
"""|TextFrame| instance for this axis title.
Return a |TextFrame| instance allowing read/write access to the text
of this axis title and its text formatting properties. Accessing this
property is destructive as it adds a new text frame if not already
present.
"""
rich = self._title.get_or_add_tx_rich()
return TextFrame(rich, self)
class CategoryAxis(_BaseAxis):
"""A category axis of a chart."""
@property
def category_type(self):
"""
A member of :ref:`XlCategoryType` specifying the scale type of this
axis. Unconditionally ``CATEGORY_SCALE`` for a |CategoryAxis| object.
"""
return XL_CATEGORY_TYPE.CATEGORY_SCALE
class DateAxis(_BaseAxis):
"""A category axis with dates as its category labels.
This axis-type has some special display behaviors such as making length of equal
periods equal and normalizing month start dates despite unequal month lengths.
"""
@property
def category_type(self):
"""
A member of :ref:`XlCategoryType` specifying the scale type of this
axis. Unconditionally ``TIME_SCALE`` for a |DateAxis| object.
"""
return XL_CATEGORY_TYPE.TIME_SCALE
class MajorGridlines(ElementProxy):
"""Provides access to the properties of the major gridlines appearing on an axis."""
def __init__(self, xAx):
super(MajorGridlines, self).__init__(xAx)
self._xAx = xAx # axis element, catAx or valAx
@lazyproperty
def format(self):
"""
The |ChartFormat| object providing access to the shape formatting
properties of this data point, such as line and fill.
"""
majorGridlines = self._xAx.get_or_add_majorGridlines()
return ChartFormat(majorGridlines)
class TickLabels(object):
"""A service class providing access to formatting of axis tick mark labels."""
def __init__(self, xAx_elm):
super(TickLabels, self).__init__()
self._element = xAx_elm
@lazyproperty
def font(self):
"""
The |Font| object that provides access to the text properties for
these tick labels, such as bold, italic, etc.
"""
defRPr = self._element.defRPr
font = Font(defRPr)
return font
@property
def number_format(self):
"""
Read/write string (e.g. "$#,##0.00") specifying the format for the
numbers on this axis. The syntax for these strings is the same as it
appears in the PowerPoint or Excel UI. Returns 'General' if no number
format has been set. Note that this format string has no effect on
rendered tick labels when :meth:`number_format_is_linked` is |True|.
Assigning a format string to this property automatically sets
:meth:`number_format_is_linked` to |False|.
"""
numFmt = self._element.numFmt
if numFmt is None:
return "General"
return numFmt.formatCode
@number_format.setter
def number_format(self, value):
numFmt = self._element.get_or_add_numFmt()
numFmt.formatCode = value
self.number_format_is_linked = False
@property
def number_format_is_linked(self):
"""
Read/write boolean specifying whether number formatting should be
taken from the source spreadsheet rather than the value of
:meth:`number_format`.
"""
numFmt = self._element.numFmt
if numFmt is None:
return False
souceLinked = numFmt.sourceLinked
if souceLinked is None:
return True
return numFmt.sourceLinked
@number_format_is_linked.setter
def number_format_is_linked(self, value):
numFmt = self._element.get_or_add_numFmt()
numFmt.sourceLinked = value
@property
def offset(self):
"""
Read/write int value in range 0-1000 specifying the spacing between
the tick mark labels and the axis as a percentange of the default
value. 100 if no label offset setting is present.
"""
lblOffset = self._element.lblOffset
if lblOffset is None:
return 100
return lblOffset.val
@offset.setter
def offset(self, value):
if self._element.tag != qn("c:catAx"):
raise ValueError("only a category axis has an offset")
self._element._remove_lblOffset()
if value == 100:
return
lblOffset = self._element._add_lblOffset()
lblOffset.val = value
class ValueAxis(_BaseAxis):
"""An axis having continuous (as opposed to discrete) values.
The vertical axis is generally a value axis, however both axes of an XY-type chart
are value axes.
"""
@property
def crosses(self):
"""
Member of :ref:`XlAxisCrosses` enumeration specifying the point on
this axis where the other axis crosses, such as auto/zero, minimum,
or maximum. Returns `XL_AXIS_CROSSES.CUSTOM` when a specific numeric
crossing point (e.g. 1.5) is defined.
"""
crosses = self._cross_xAx.crosses
if crosses is None:
return XL_AXIS_CROSSES.CUSTOM
return crosses.val
@crosses.setter
def crosses(self, value):
cross_xAx = self._cross_xAx
if value == XL_AXIS_CROSSES.CUSTOM:
if cross_xAx.crossesAt is not None:
return
cross_xAx._remove_crosses()
cross_xAx._remove_crossesAt()
if value == XL_AXIS_CROSSES.CUSTOM:
cross_xAx._add_crossesAt(val=0.0)
else:
cross_xAx._add_crosses(val=value)
@property
def crosses_at(self):
"""
Numeric value on this axis at which the perpendicular axis crosses.
Returns |None| if no crossing value is set.
"""
crossesAt = self._cross_xAx.crossesAt
if crossesAt is None:
return None
return crossesAt.val
@crosses_at.setter
def crosses_at(self, value):
cross_xAx = self._cross_xAx
cross_xAx._remove_crosses()
cross_xAx._remove_crossesAt()
if value is None:
return
cross_xAx._add_crossesAt(val=value)
@property
def major_unit(self):
"""
The float number of units between major tick marks on this value
axis. |None| corresponds to the 'Auto' setting in the UI, and
specifies the value should be calculated by PowerPoint based on the
underlying chart data.
"""
majorUnit = self._element.majorUnit
if majorUnit is None:
return None
return majorUnit.val
@major_unit.setter
def major_unit(self, value):
self._element._remove_majorUnit()
if value is None:
return
self._element._add_majorUnit(val=value)
@property
def minor_unit(self):
"""
The float number of units between minor tick marks on this value
axis. |None| corresponds to the 'Auto' setting in the UI, and
specifies the value should be calculated by PowerPoint based on the
underlying chart data.
"""
minorUnit = self._element.minorUnit
if minorUnit is None:
return None
return minorUnit.val
@minor_unit.setter
def minor_unit(self, value):
self._element._remove_minorUnit()
if value is None:
return
self._element._add_minorUnit(val=value)
@property
def _cross_xAx(self):
"""
The axis element in the same group (primary/secondary) that crosses
this axis.
"""
crossAx_id = self._element.crossAx.val
expr = '(../c:catAx | ../c:valAx | ../c:dateAx)/c:axId[@val="%d"]' % crossAx_id
cross_axId = self._element.xpath(expr)[0]
return cross_axId.getparent()
| {
"content_hash": "e6d0f0d764f96725cc764cbdba47973f",
"timestamp": "",
"source": "github",
"line_count": 521,
"max_line_length": 88,
"avg_line_length": 33.17658349328215,
"alnum_prop": 0.624703500144634,
"repo_name": "scanny/python-pptx",
"id": "66f325185d9f648f9dc36936ab8536df91b0bf86",
"size": "17304",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pptx/chart/axis.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Gherkin",
"bytes": "124592"
},
{
"name": "Makefile",
"bytes": "2055"
},
{
"name": "PLpgSQL",
"bytes": "48599"
},
{
"name": "Python",
"bytes": "2152173"
}
],
"symlink_target": ""
} |
"""
sentry.runner.commands.repair
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2015 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import, print_function
import os
import click
import six
from contextlib import contextmanager
from django.db import transaction
from sentry.runner.decorators import configuration
from sentry.utils.strings import iter_callsign_choices
class RollbackLocally(Exception):
pass
@contextmanager
def catchable_atomic():
try:
with transaction.atomic():
yield
except RollbackLocally:
pass
def get_callsigns(projects):
rv = {}
for project in projects:
if project.callsign is not None:
rv[project.callsign] = project.id
continue
for callsign in iter_callsign_choices(project.name):
if callsign in rv:
continue
rv[callsign] = project.id
break
return dict((v, k) for k, v in six.iteritems(rv))
def sync_docs():
click.echo('Forcing documentation sync')
from sentry.utils.integrationdocs import sync_docs, DOC_FOLDER
if os.access(DOC_FOLDER, os.W_OK):
try:
sync_docs()
except Exception as e:
click.echo(' - skipping, failure: %s' % e)
elif os.path.isdir(DOC_FOLDER):
click.echo(' - skipping, path cannot be written to: %r' % DOC_FOLDER)
else:
click.echo(' - skipping, path does not exist: %r' % DOC_FOLDER)
def repair_callsigns():
from sentry.utils.query import RangeQuerySetWrapperWithProgressBar, \
RangeQuerySetWrapper
from sentry.models.counter import increment_project_counter
from sentry.models import Organization, Group, Project, ProjectOption
click.echo('Repairing callsigns')
queryset = Organization.objects.all()
for org in RangeQuerySetWrapperWithProgressBar(queryset):
projects = list(org.project_set.all())
callsigns = get_callsigns(projects)
for project in projects:
if project.callsign is None:
Project.objects.filter(
pk=project.id,
callsign=None
).update(callsign=callsigns[project.id])
ProjectOption.objects.filter(
project=project,
key='sentry:reviewed-callsign'
).delete()
q = Group.objects.filter(
project=project,
short_id=None,
)
for group in RangeQuerySetWrapper(q):
with catchable_atomic():
pending_short_id = increment_project_counter(
project)
updated = Group.objects.filter(
pk=group.id,
short_id=None
).update(short_id=pending_short_id)
if updated == 0:
raise RollbackLocally()
def create_missing_dsns():
from sentry.models import Project, ProjectKey
click.echo('Creating missing DSNs')
queryset = Project.objects.filter(key_set__isnull=True)
for project in queryset:
try:
ProjectKey.objects.get_or_create(
project=project,
)
except ProjectKey.MultipleObjectsReturned:
pass
def fix_group_counters():
from sentry.models import Activity
from django.db import connection
click.echo('Correcting Group.num_comments counter')
cursor = connection.cursor()
cursor.execute("""
UPDATE sentry_groupedmessage SET num_comments = (
SELECT COUNT(*) from sentry_activity
WHERE type = %s and group_id = sentry_groupedmessage.id
)
""", [Activity.NOTE])
@click.command()
@click.option('--with-docs/--without-docs', default=False,
help='Synchronize and repair embedded documentation. This '
'is disabled by default.')
@click.option('--with-callsigns/--without-callsigns', default=False,
help='Repair and fill callsigns. This is disabled by default.')
@configuration
def repair(with_docs, with_callsigns):
"""Attempt to repair any invalid data.
This by default will correct some common issues like projects missing
DSNs or counters desynchronizing. Optionally it can also synchronize
the current client documentation from the Sentry documentation server
(--with-docs) and repair missing or broken callsigns and short IDs
(--with-callsigns).
"""
if with_docs:
sync_docs()
if with_callsigns:
repair_callsigns()
create_missing_dsns()
fix_group_counters()
| {
"content_hash": "101a0096e909d017a407b021622ce144",
"timestamp": "",
"source": "github",
"line_count": 153,
"max_line_length": 77,
"avg_line_length": 30.947712418300654,
"alnum_prop": 0.6166842661034847,
"repo_name": "alexm92/sentry",
"id": "6e748b12fff5d89c594c44da9ff1aefdf8256679",
"size": "4735",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "src/sentry/runner/commands/repair.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "156715"
},
{
"name": "HTML",
"bytes": "191265"
},
{
"name": "JavaScript",
"bytes": "457236"
},
{
"name": "Makefile",
"bytes": "4689"
},
{
"name": "Python",
"bytes": "7262450"
}
],
"symlink_target": ""
} |
import BaseHTTPServer, SimpleHTTPServer
import ssl
import sys
from SimpleHTTPServer import SimpleHTTPRequestHandler
import base64
key = ""
class AuthHandler(SimpleHTTPRequestHandler):
''' Main class to present webpages and authentication. '''
def do_HEAD(self):
print "send header"
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
def do_AUTHHEAD(self):
print "send header"
self.send_response(401)
self.send_header('WWW-Authenticate', 'Basic realm=\"Test\"')
self.send_header('Content-type', 'text/html')
self.end_headers()
def do_GET(self):
global key
''' Present frontpage with user authentication. '''
if self.headers.getheader('Authorization') == None:
self.do_AUTHHEAD()
self.wfile.write('no auth header received')
pass
elif self.headers.getheader('Authorization') == 'Basic '+key:
SimpleHTTPRequestHandler.do_GET(self)
pass
else:
self.do_AUTHHEAD()
self.wfile.write(self.headers.getheader('Authorization'))
self.wfile.write('not authenticated')
pass
if len(sys.argv) < 4:
print "usage SimpleAuthServer.py [host:port] [username:password] [certfile]"
sys.exit()
host = sys.argv[1].split(":")[0]
port = int(sys.argv[1].split(":")[1])
key = base64.b64encode(sys.argv[2])
httpd = BaseHTTPServer.HTTPServer((host, port), AuthHandler)
httpd.socket = ssl.wrap_socket(httpd.socket, certfile=sys.argv[3], server_side=True)
httpd.serve_forever()
| {
"content_hash": "de1a267028245b070346f6455e561c2a",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 84,
"avg_line_length": 33.326530612244895,
"alnum_prop": 0.6387017758726271,
"repo_name": "felipehuici/telerobot",
"id": "0bb4e2aa923e0c25bdc460f5ed27ce65078d64aa",
"size": "1633",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "simple_https_server.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "729"
},
{
"name": "Python",
"bytes": "26513"
}
],
"symlink_target": ""
} |
import os
from fab_deploy2 import functions
from fab_deploy2.tasks import ContextTask
from fabric.api import run, sudo
class HiRedisSetup(ContextTask):
"""
Setup hiredis
"""
context_name = 'hiredis'
default_context = {
'package_name' : 'libhiredis-dev'
}
name = "setup"
def run(self):
functions.execute_on_host('utils.install_package', package_name=self.package_name)
setup = HiRedisSetup()
| {
"content_hash": "a6e936915aa8baf6bf61e7dd146f3214",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 90,
"avg_line_length": 20.227272727272727,
"alnum_prop": 0.6696629213483146,
"repo_name": "ff0000/red-fab-deploy2",
"id": "e19b0a94962eeecd39ef3dc5bc30022b7a54acc1",
"size": "445",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fab_deploy2/operating_systems/ubuntu/hiredis.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "7546"
},
{
"name": "Python",
"bytes": "197452"
},
{
"name": "Shell",
"bytes": "62903"
}
],
"symlink_target": ""
} |
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "testapp.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| {
"content_hash": "5eb93d717d7562efb5b2cf627c874e27",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 71,
"avg_line_length": 22.9,
"alnum_prop": 0.7074235807860262,
"repo_name": "kevana/django-test",
"id": "e1e33e37ffac922e6e31f51e3f93d0b498efc6a2",
"size": "252",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "manage.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "27656"
},
{
"name": "HTML",
"bytes": "4751"
},
{
"name": "Makefile",
"bytes": "1543"
},
{
"name": "Python",
"bytes": "8736"
}
],
"symlink_target": ""
} |
import unittest
from twork import utils
from twork.web.server import TApplication
@utils.common.singleton
class _Base(object):
pass
class Base(object):
pass
class Derived(Base):
pass
class WebApplication(TApplication):
pass
class SingletonTest(unittest.TestCase):
def setUp(self):
pass
def test_singleton(self):
assert WebApplication.instance() is not None
assert _Base() is _Base()
assert Derived() is not Derived()
| {
"content_hash": "eae94269d9ec8c548286da245c92791f",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 52,
"avg_line_length": 15.64516129032258,
"alnum_prop": 0.6824742268041237,
"repo_name": "bufferx/twork",
"id": "27c299813e5647a44668db59578dfcbc3f59d90a",
"size": "485",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/utils_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "1494"
},
{
"name": "Python",
"bytes": "56206"
},
{
"name": "Shell",
"bytes": "3773"
}
],
"symlink_target": ""
} |
import numpy as nm
from sfepy.base.base import assert_, OneTypeList, Container, Struct
class Functions(Container):
"""Container to hold all user-defined functions."""
def from_conf(conf):
objs = OneTypeList(Function)
for key, fc in conf.iteritems():
fun = Function(name = fc.name,
function = fc.function,
is_constant = False,
extra_args = {})
objs.append(fun)
obj = Functions(objs)
return obj
from_conf = staticmethod(from_conf)
class Function(Struct):
"""Base class for user-defined functions."""
def __init__(self, name, function, is_constant=False, extra_args=None):
Struct.__init__(self, name = name, function = function,
is_constant = is_constant)
if extra_args is None:
extra_args = {}
self.extra_args = extra_args
def __call__(self, *args, **kwargs):
_kwargs = dict(kwargs)
_kwargs.update(self.extra_args)
return self.function(*args, **_kwargs)
def set_function(self, function, is_constant=False):
self.function = function
self.is_constant = is_constant
def set_extra_args(self, **extra_args):
self.extra_args = extra_args
class ConstantFunction(Function):
"""Function with constant values."""
def __init__(self, values):
"""Make a function out of a dictionary of constant values. When
called with coors argument, the values are repeated for each
coordinate."""
name = '_'.join(['get_constants'] + values.keys())
def get_constants(ts=None, coors=None, mode=None, **kwargs):
out = {}
if mode == 'special':
for key, val in values.iteritems():
if '.' in key:
vkey = key.split('.')[1]
out[vkey] = val
elif (mode == 'qp'):
for key, val in values.iteritems():
if '.' in key: continue
val = nm.array(val, dtype=nm.float64, ndmin=3)
out[key] = nm.tile(val, (coors.shape[0], 1, 1))
elif (mode == 'special_constant') or (mode is None):
for key, val in values.iteritems():
if '.' in key: continue
out[key] = val
else:
raise ValueError('unknown function mode! (%s)' % mode)
return out
Function.__init__(self, name = name, function = get_constants,
is_constant = True)
class ConstantFunctionByRegion(Function):
"""
Function with constant values in regions.
"""
def __init__(self, values):
"""
Make a function out of a dictionary of constant values per region. When
called with coors argument, the values are repeated for each
coordinate in each of the given regions.
"""
name = '_'.join(['get_constants_by_region'] + values.keys())
def get_constants(ts=None, coors=None, mode=None,
term=None, problem=None, **kwargs):
out = {}
if mode == 'qp':
qps = term.get_physical_qps()
assert_(qps.num == coors.shape[0])
for key, val in values.iteritems():
if '.' in key: continue
rval = nm.array(val[val.keys()[0]], dtype=nm.float64,
ndmin=3)
s0 = rval.shape[1:]
matdata = nm.zeros(qps.shape[:2] + s0, dtype=nm.float64)
for rkey, rval in val.iteritems():
region = problem.domain.regions[rkey]
rval = nm.array(rval, dtype=nm.float64, ndmin=3)
ii = term.region.get_cell_indices(region.cells)
matdata[ii] = rval
out[key] = matdata.reshape((-1,) + s0)
return out
Function.__init__(self, name=name, function=get_constants,
is_constant=True)
| {
"content_hash": "bea348d9e6ecae3934727964d0f851d2",
"timestamp": "",
"source": "github",
"line_count": 121,
"max_line_length": 79,
"avg_line_length": 34.53719008264463,
"alnum_prop": 0.5116056472840392,
"repo_name": "RexFuzzle/sfepy",
"id": "f4dda6e59fcdbd8b4cee22074b6436554165567e",
"size": "4179",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sfepy/discrete/functions.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "443017"
},
{
"name": "C++",
"bytes": "2619"
},
{
"name": "GLSL",
"bytes": "6058"
},
{
"name": "Makefile",
"bytes": "184"
},
{
"name": "Python",
"bytes": "2420488"
},
{
"name": "Shell",
"bytes": "71"
}
],
"symlink_target": ""
} |
__all__ = ['exporter','serializer', 'dictionary', 'duolingo', 'yandex', 'user']
from memlingo.dictionary import WordDictionary
from memlingo.user import User
| {
"content_hash": "7ee1d16aa7d253ec5021c264adb5e16d",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 79,
"avg_line_length": 20.375,
"alnum_prop": 0.7177914110429447,
"repo_name": "pedroallenrevez/MemLinguo",
"id": "9ef2999f640979371ddd89e6548105089a5c2207",
"size": "163",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "memlingo/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "21064"
}
],
"symlink_target": ""
} |
from __future__ import print_function
import hashlib
prefix = 'yzbqklnj'
prefix_m = hashlib.md5(prefix)
def find_tail(startstring):
tail = 0
while True:
curr_m = prefix_m.copy()
curr_m.update(str(tail))
md5_hash_hex = str(curr_m.hexdigest())
if md5_hash_hex[:len(startstring)] == startstring:
return tail
tail += 1
print(find_tail('00000'))
print(find_tail('000000'))
| {
"content_hash": "d1340b8f03831a8f87de398a56ab20f9",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 58,
"avg_line_length": 23.157894736842106,
"alnum_prop": 0.6022727272727273,
"repo_name": "v-ek/advofcode",
"id": "7d4e53fe3758d1a17c135a63ec8a44584cc0c373",
"size": "440",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "day4.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "10684"
}
],
"symlink_target": ""
} |
'''
Class to cache songs into local storage.
'''
from singleton import Singleton
import threading
import subprocess
from const import Constant
from config import Config
import os
import logger
import signal
log = logger.getLogger(__name__)
class Cache(Singleton):
def __init__(self):
if hasattr(self, '_init'):
return
self._init = True
self.const = Constant()
self.config = Config()
self.download_lock = threading.Lock()
self.check_lock = threading.Lock()
self.downloading = []
self.aria2c = None
self.stop = False
self.enable = self.config.get_item("cache")
self.aria2c_parameters = self.config.get_item("aria2c_parameters")
def start_download(self):
check = self.download_lock.acquire(False)
if not check:
return False
while True:
if self.stop:
break
if not self.enable:
break
self.check_lock.acquire()
if len(self.downloading) <= 0:
self.check_lock.release()
break
data = self.downloading.pop()
self.check_lock.release()
song_id = data[0]
song_name = data[1]
artist = data[2]
url = data[3]
onExit = data[4]
output_path = Constant.download_dir
output_file = str(artist) + " - " + str(song_name) + ".mp3"
try:
para = ['aria2c', '--auto-file-renaming=false', '--allow-overwrite=true', '-d', output_path, '-o',
output_file, url]
para[1:1] = self.aria2c_parameters
self.aria2c = subprocess.Popen(para,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.aria2c.wait()
except Exception:
log.debug(str(song_id) + " Cache Error")
if self.aria2c.returncode == 0:
log.debug(str(song_id) + " Cache OK")
onExit(song_id, output_path + "/" + output_file)
self.download_lock.release()
def add(self, song_id, song_name, artist, url, onExit):
self.check_lock.acquire()
self.downloading.append([song_id, song_name, artist, url, onExit])
self.check_lock.release()
def quit(self):
self.stop = True
try:
os.kill(self.aria2c.pid, signal.SIGKILL)
except:
pass
| {
"content_hash": "03311f052ae806d7ece693ff58c409c1",
"timestamp": "",
"source": "github",
"line_count": 83,
"max_line_length": 114,
"avg_line_length": 30.819277108433734,
"alnum_prop": 0.5312744331508992,
"repo_name": "miuc/musicbox",
"id": "32c466e8aefb9b97a178f32ed29ccbaa53ee7671",
"size": "2625",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "NEMbox/cache.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "106767"
}
],
"symlink_target": ""
} |
from pybrain.tools.shortcuts import buildNetwork
class Brain:
def __init__(self):
self.move_network = buildNetwork(9, 3, 2, bias=True)
def process_input(self, input):
a = self.move_network.activate(input)
return a[0], a[1]
| {
"content_hash": "449b613471214fc2238826c75ee0f6ba",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 60,
"avg_line_length": 18.714285714285715,
"alnum_prop": 0.6297709923664122,
"repo_name": "nlowery/org-sandbox",
"id": "b1e56c171229bf14b9d585db47deb5de7b002ad5",
"size": "262",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "organism/brain.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "21127"
}
],
"symlink_target": ""
} |
from .request import req
def follow(url_token):
"""Follow a user.
Args:
url_token: The url token (or slug) of the user.
"""
url = '/api/v4/members/%s/followers' % url_token
return req.post(url)
def unfollow(url_token):
"""Unfollow a user.
Args:
url_token: The url token (or slug) of the user.
"""
url = '/api/v4/members/%s/followers' % url_token
return req.delete(url)
def message(user_id, content):
"""Send message to a user.
Args:
user_id: Hash id of the user.
content: Message content in string.
"""
url = '/api/v4/messages'
data = {
'content': content,
'type': 'common',
'receiver_hash': user_id
}
return req.post(url, json=data)
def vote(answer_id, val):
"""Vote an answer.
Args:
answer_id: The id of the answer.
val: Voting type in number. 1 for voting up, 0 for voting neutral, -1 for voting down.
"""
url = '/api/v4/answers/%s/voters' % answer_id
if val > 0:
data = {'type': 'up'}
elif val < 0:
data = {'type': 'down'}
else:
data = {'type': 'neutral'}
return req.post(url, json=data)
def vote_up(answer_id):
"""Vote up an answer.
Args:
answer_id: The id of the answer.
"""
return vote(answer_id, 1)
def vote_neutral(answer_id):
"""Vote neutral an answer.
Args:
answer_id: The id of the answer.
"""
return vote(answer_id, 0)
def vote_down(answer_id):
"""Vote down an answer.
Args:
answer_id: The id of the answer.
"""
return vote(answer_id, -1)
def report(resource_id, resource_type, reason_type):
"""Report a question, an answer, or a member
For a question, valid reason_type are:
- personal
- spam
- TODO add more reason_types for question
For an answer, valid reason_type are:
- spam
- TODO add more reason_types for answer
For a user, valid reason_type are:
- spam
- TODO add more reason_types for user
Args:
resource_id: The id of the entity (question id, answer id, member name)
resource_type: question, answer, member
reason_type: spam, etc.
"""
url = '/api/v4/reports'
data = {
'resource_id': resource_id,
'type': resource_type,
'reason_type': reason_type,
'source': 'web'
}
return req.post(url, json=data)
| {
"content_hash": "32af5f4f4b7af6f5de84ca9859978d42",
"timestamp": "",
"source": "github",
"line_count": 110,
"max_line_length": 94,
"avg_line_length": 22.5,
"alnum_prop": 0.5664646464646464,
"repo_name": "syaning/zhihuapi-py",
"id": "fe10a13f1cfbd814da4c00b970e4f20823943a9d",
"size": "2475",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "zhihuapi/action.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "36369"
}
],
"symlink_target": ""
} |
from os.path import abspath, dirname, join
from shutil import which
from preggy import expect
from tests.base import TestCase
from thumbor.config import Config
from thumbor.context import RequestParameters, ServerParameters
from thumbor.engines.gif import Engine
from thumbor.importer import Importer
STORAGE_PATH = abspath(join(dirname(__file__), "../fixtures/images/"))
class GitEngineTestCase(TestCase):
def get_config(self):
return Config(
SECURITY_KEY="ACME-SEC",
ENGINE="thumbor.engines.gif",
IMAGE_METADATA_READ_FORMATS="exif,xmp",
LOADER="thumbor.loaders.file_loader",
FILE_LOADER_ROOT_PATH=STORAGE_PATH,
STORAGE="thumbor.storages.no_storage",
USE_GIFSICLE_ENGINE=True,
RESPECT_ORIENTATION=True,
)
def get_importer(self):
return Importer(self.config)
def get_server(self):
server = ServerParameters(8889, "localhost", "thumbor.conf", None, "info", None)
server.security_key = "ACME-SEC"
server.gifsicle_path = which("gifsicle")
return server
def get_context(self):
context = super().get_context()
req = RequestParameters(url="/foo/bar.gif")
context.request = req
return context
def test_create_engine(self):
engine = Engine(self.context)
expect(engine).to_be_instance_of(Engine)
def test_load_image(self):
engine = Engine(self.context)
with open(join(STORAGE_PATH, "animated.gif"), "rb") as image_file:
buffer = image_file.read()
image = engine.create_image(buffer)
expect(image.format).to_equal("GIF")
def test_errors_on_gifsicle_should_not_raises_errors_when_output(self):
engine = Engine(self.context)
with open(join(STORAGE_PATH, "SmallFullColourGIF.gif"), "rb") as image_file:
buffer = image_file.read()
engine.load(buffer, ".gif")
result = engine.run_gifsicle("--some-invalid-opt")
expect(result).Not.to_be_null()
def test_is_multiple_should_returns_true_if_gif_has_many_frames(self):
engine = Engine(self.context)
with open(join(STORAGE_PATH, "animated.gif"), "rb") as image_file:
buffer = image_file.read()
engine.load(buffer, ".gif")
expect(engine.is_multiple()).to_be_true()
def test_is_multiple_should_returns_false_if_gif_has_one_frame(self):
engine = Engine(self.context)
with open(join(STORAGE_PATH, "animated-one-frame.gif"), "rb") as image_file:
buffer = image_file.read()
engine.load(buffer, ".gif")
expect(engine.is_multiple()).to_be_false()
def test_convert_to_grayscale_should_update_image(self):
engine = Engine(self.context)
with open(join(STORAGE_PATH, "animated.gif"), "rb") as image_file:
buffer = image_file.read()
engine.load(buffer, ".gif")
buffer = engine.read()
engine.convert_to_grayscale()
expect(buffer).not_to_equal(engine.read())
def test_convert_to_grayscale_should_not_update_image(self):
engine = Engine(self.context)
with open(join(STORAGE_PATH, "animated.gif"), "rb") as image_file:
buffer = image_file.read()
engine.load(buffer, ".gif")
buffer = engine.read()
engine.convert_to_grayscale(False)
expect(buffer).to_equal(engine.read())
| {
"content_hash": "6a78563fe8ade90db8f56604f233e764",
"timestamp": "",
"source": "github",
"line_count": 99,
"max_line_length": 88,
"avg_line_length": 34.878787878787875,
"alnum_prop": 0.631624674196351,
"repo_name": "scorphus/thumbor",
"id": "0f78169a151762c05705c6240f65b555369f9a19",
"size": "3705",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/engines/test_gif.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "58654"
},
{
"name": "JavaScript",
"bytes": "2514"
},
{
"name": "Makefile",
"bytes": "11518"
},
{
"name": "Python",
"bytes": "604965"
},
{
"name": "Shell",
"bytes": "331"
}
],
"symlink_target": ""
} |
"""Get status of disk."""
from status_base import mongo_collection
from status_base import mongo_database
from status_base import save
from status_base import schedule_log
from status_base import setup_environment
from status_base import get_parameters
from urllib.request import urlopen
setup_environment()
def status():
"""Run PM2 Monitor."""
schedule_log("Starting URL checker")
status = True
output = ''
data = {
'results': []
}
urls = get_parameters()
schedule_log('Got %s URLs' % len(urls))
schedule_log('%s' % urls)
for url in urls:
schedule_log('Checking: %s' % url)
try:
get_code = urlopen(url).getcode()
schedule_log('Got code: %s' % get_code)
data['results'].append({
'url': url,
'status': get_code
})
if get_code != 200:
status = False
except Exception as ex:
status = False
schedule_log('Exception: %s' % ex)
data['results'].append({
'url': url,
'status': '%s' % ex
})
save(status, data, mongo_database(), mongo_collection(), output)
schedule_log("Finished")
| {
"content_hash": "d3840e8b8bbaaba1942701fd3222b883",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 68,
"avg_line_length": 24.076923076923077,
"alnum_prop": 0.5503194888178914,
"repo_name": "CornerstoneLabs/service-dashboard",
"id": "827bbb6a78264c33a0c0a88e79a05b62de39a6cf",
"size": "1252",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dashboard/fabric-plugins/status_url_check.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "182533"
},
{
"name": "HTML",
"bytes": "201419"
},
{
"name": "JavaScript",
"bytes": "46752"
},
{
"name": "Makefile",
"bytes": "104"
},
{
"name": "Python",
"bytes": "13011"
}
],
"symlink_target": ""
} |
from app import app
from app import manager
if __name__ == "__main__":
manager.run()
| {
"content_hash": "f4bfdab0250eb5d3845f7a1195cf4c39",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 26,
"avg_line_length": 15.166666666666666,
"alnum_prop": 0.6153846153846154,
"repo_name": "VincentTide/vincenttide",
"id": "84a5c8439938d832d5e4b8c1b92216bdf831ab40",
"size": "91",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "manage.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2688"
},
{
"name": "HTML",
"bytes": "33347"
},
{
"name": "JavaScript",
"bytes": "1784"
},
{
"name": "Python",
"bytes": "19595"
}
],
"symlink_target": ""
} |
"""Test the avoid_reuse and setwalletflag features."""
from test_framework.test_framework import SyscoinTestFramework
from test_framework.util import (
assert_approx,
assert_equal,
assert_raises_rpc_error,
)
def reset_balance(node, discardaddr):
'''Throw away all owned coins by the node so it gets a balance of 0.'''
balance = node.getbalance(avoid_reuse=False)
if balance > 0.5:
node.sendtoaddress(address=discardaddr, amount=balance, subtractfeefromamount=True, avoid_reuse=False)
def count_unspent(node):
'''Count the unspent outputs for the given node and return various statistics'''
r = {
"total": {
"count": 0,
"sum": 0,
},
"reused": {
"count": 0,
"sum": 0,
},
}
supports_reused = True
for utxo in node.listunspent(minconf=0):
r["total"]["count"] += 1
r["total"]["sum"] += utxo["amount"]
if supports_reused and "reused" in utxo:
if utxo["reused"]:
r["reused"]["count"] += 1
r["reused"]["sum"] += utxo["amount"]
else:
supports_reused = False
r["reused"]["supported"] = supports_reused
return r
def assert_unspent(node, total_count=None, total_sum=None, reused_supported=None, reused_count=None, reused_sum=None, margin=0.001):
'''Make assertions about a node's unspent output statistics'''
stats = count_unspent(node)
if total_count is not None:
assert_equal(stats["total"]["count"], total_count)
if total_sum is not None:
assert_approx(stats["total"]["sum"], total_sum, margin)
if reused_supported is not None:
assert_equal(stats["reused"]["supported"], reused_supported)
if reused_count is not None:
assert_equal(stats["reused"]["count"], reused_count)
if reused_sum is not None:
assert_approx(stats["reused"]["sum"], reused_sum, margin)
def assert_balances(node, mine, margin=0.001):
'''Make assertions about a node's getbalances output'''
got = node.getbalances()["mine"]
for k,v in mine.items():
assert_approx(got[k], v, margin)
class AvoidReuseTest(SyscoinTestFramework):
def set_test_params(self):
self.num_nodes = 2
# This test isn't testing txn relay/timing, so set whitelist on the
# peers for instant txn relay. This speeds up the test run time 2-3x.
self.extra_args = [["-whitelist=noban@127.0.0.1"]] * self.num_nodes
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
'''Set up initial chain and run tests defined below'''
self.test_persistence()
self.test_immutable()
self.generate(self.nodes[0], 110)
self.test_change_remains_change(self.nodes[1])
reset_balance(self.nodes[1], self.nodes[0].getnewaddress())
self.test_sending_from_reused_address_without_avoid_reuse()
reset_balance(self.nodes[1], self.nodes[0].getnewaddress())
self.test_sending_from_reused_address_fails("legacy")
reset_balance(self.nodes[1], self.nodes[0].getnewaddress())
self.test_sending_from_reused_address_fails("p2sh-segwit")
reset_balance(self.nodes[1], self.nodes[0].getnewaddress())
self.test_sending_from_reused_address_fails("bech32")
reset_balance(self.nodes[1], self.nodes[0].getnewaddress())
self.test_getbalances_used()
reset_balance(self.nodes[1], self.nodes[0].getnewaddress())
self.test_full_destination_group_is_preferred()
reset_balance(self.nodes[1], self.nodes[0].getnewaddress())
self.test_all_destination_groups_are_used()
def test_persistence(self):
'''Test that wallet files persist the avoid_reuse flag.'''
self.log.info("Test wallet files persist avoid_reuse flag")
# Configure node 1 to use avoid_reuse
self.nodes[1].setwalletflag('avoid_reuse')
# Flags should be node1.avoid_reuse=false, node2.avoid_reuse=true
assert_equal(self.nodes[0].getwalletinfo()["avoid_reuse"], False)
assert_equal(self.nodes[1].getwalletinfo()["avoid_reuse"], True)
self.restart_node(1)
self.connect_nodes(0, 1)
# Flags should still be node1.avoid_reuse=false, node2.avoid_reuse=true
assert_equal(self.nodes[0].getwalletinfo()["avoid_reuse"], False)
assert_equal(self.nodes[1].getwalletinfo()["avoid_reuse"], True)
# Attempting to set flag to its current state should throw
assert_raises_rpc_error(-8, "Wallet flag is already set to false", self.nodes[0].setwalletflag, 'avoid_reuse', False)
assert_raises_rpc_error(-8, "Wallet flag is already set to true", self.nodes[1].setwalletflag, 'avoid_reuse', True)
def test_immutable(self):
'''Test immutable wallet flags'''
self.log.info("Test immutable wallet flags")
# Attempt to set the disable_private_keys flag; this should not work
assert_raises_rpc_error(-8, "Wallet flag is immutable", self.nodes[1].setwalletflag, 'disable_private_keys')
tempwallet = ".wallet_avoidreuse.py_test_immutable_wallet.dat"
# Create a wallet with disable_private_keys set; this should work
self.nodes[1].createwallet(wallet_name=tempwallet, disable_private_keys=True)
w = self.nodes[1].get_wallet_rpc(tempwallet)
# Attempt to unset the disable_private_keys flag; this should not work
assert_raises_rpc_error(-8, "Wallet flag is immutable", w.setwalletflag, 'disable_private_keys', False)
# Unload temp wallet
self.nodes[1].unloadwallet(tempwallet)
def test_change_remains_change(self, node):
self.log.info("Test that change doesn't turn into non-change when spent")
reset_balance(node, node.getnewaddress())
addr = node.getnewaddress()
txid = node.sendtoaddress(addr, 1)
out = node.listunspent(minconf=0, query_options={'minimumAmount': 2})
assert_equal(len(out), 1)
assert_equal(out[0]['txid'], txid)
changeaddr = out[0]['address']
# Make sure it's starting out as change as expected
assert node.getaddressinfo(changeaddr)['ischange']
for logical_tx in node.listtransactions():
assert logical_tx.get('address') != changeaddr
# Spend it
reset_balance(node, node.getnewaddress())
# It should still be change
assert node.getaddressinfo(changeaddr)['ischange']
for logical_tx in node.listtransactions():
assert logical_tx.get('address') != changeaddr
def test_sending_from_reused_address_without_avoid_reuse(self):
'''
Test the same as test_sending_from_reused_address_fails, except send the 10 SYS with
the avoid_reuse flag set to false. This means the 10 SYS send should succeed,
where it fails in test_sending_from_reused_address_fails.
'''
self.log.info("Test sending from reused address with avoid_reuse=false")
fundaddr = self.nodes[1].getnewaddress()
retaddr = self.nodes[0].getnewaddress()
self.nodes[0].sendtoaddress(fundaddr, 10)
self.generate(self.nodes[0], 1)
# listunspent should show 1 single, unused 10 sys output
assert_unspent(self.nodes[1], total_count=1, total_sum=10, reused_supported=True, reused_count=0)
# getbalances should show no used, 10 sys trusted
assert_balances(self.nodes[1], mine={"used": 0, "trusted": 10})
# node 0 should not show a used entry, as it does not enable avoid_reuse
assert("used" not in self.nodes[0].getbalances()["mine"])
self.nodes[1].sendtoaddress(retaddr, 5)
self.generate(self.nodes[0], 1)
# listunspent should show 1 single, unused 5 sys output
assert_unspent(self.nodes[1], total_count=1, total_sum=5, reused_supported=True, reused_count=0)
# getbalances should show no used, 5 sys trusted
assert_balances(self.nodes[1], mine={"used": 0, "trusted": 5})
self.nodes[0].sendtoaddress(fundaddr, 10)
self.generate(self.nodes[0], 1)
# listunspent should show 2 total outputs (5, 10 sys), one unused (5), one reused (10)
assert_unspent(self.nodes[1], total_count=2, total_sum=15, reused_count=1, reused_sum=10)
# getbalances should show 10 used, 5 sys trusted
assert_balances(self.nodes[1], mine={"used": 10, "trusted": 5})
self.nodes[1].sendtoaddress(address=retaddr, amount=10, avoid_reuse=False)
# listunspent should show 1 total outputs (5 sys), unused
assert_unspent(self.nodes[1], total_count=1, total_sum=5, reused_count=0)
# getbalances should show no used, 5 sys trusted
assert_balances(self.nodes[1], mine={"used": 0, "trusted": 5})
# node 1 should now have about 5 sys left (for both cases)
assert_approx(self.nodes[1].getbalance(), 5, 0.001)
assert_approx(self.nodes[1].getbalance(avoid_reuse=False), 5, 0.001)
def test_sending_from_reused_address_fails(self, second_addr_type):
'''
Test the simple case where [1] generates a new address A, then
[0] sends 10 SYS to A.
[1] spends 5 SYS from A. (leaving roughly 5 SYS useable)
[0] sends 10 SYS to A again.
[1] tries to spend 10 SYS (fails; dirty).
[1] tries to spend 4 SYS (succeeds; change address sufficient)
'''
self.log.info("Test sending from reused {} address fails".format(second_addr_type))
fundaddr = self.nodes[1].getnewaddress(label="", address_type="legacy")
retaddr = self.nodes[0].getnewaddress()
self.nodes[0].sendtoaddress(fundaddr, 10)
self.generate(self.nodes[0], 1)
# listunspent should show 1 single, unused 10 sys output
assert_unspent(self.nodes[1], total_count=1, total_sum=10, reused_supported=True, reused_count=0)
# getbalances should show no used, 10 sys trusted
assert_balances(self.nodes[1], mine={"used": 0, "trusted": 10})
self.nodes[1].sendtoaddress(retaddr, 5)
self.generate(self.nodes[0], 1)
# listunspent should show 1 single, unused 5 sys output
assert_unspent(self.nodes[1], total_count=1, total_sum=5, reused_supported=True, reused_count=0)
# getbalances should show no used, 5 sys trusted
assert_balances(self.nodes[1], mine={"used": 0, "trusted": 5})
if not self.options.descriptors:
# For the second send, we transmute it to a related single-key address
# to make sure it's also detected as re-use
fund_spk = self.nodes[0].getaddressinfo(fundaddr)["scriptPubKey"]
fund_decoded = self.nodes[0].decodescript(fund_spk)
if second_addr_type == "p2sh-segwit":
new_fundaddr = fund_decoded["segwit"]["p2sh-segwit"]
elif second_addr_type == "bech32":
new_fundaddr = fund_decoded["segwit"]["address"]
else:
new_fundaddr = fundaddr
assert_equal(second_addr_type, "legacy")
self.nodes[0].sendtoaddress(new_fundaddr, 10)
self.generate(self.nodes[0], 1)
# listunspent should show 2 total outputs (5, 10 btc), one unused (5), one reused (10)
assert_unspent(self.nodes[1], total_count=2, total_sum=15, reused_count=1, reused_sum=10)
# getbalances should show 10 used, 5 btc trusted
assert_balances(self.nodes[1], mine={"used": 10, "trusted": 5})
# node 1 should now have a balance of 5 (no dirty) or 15 (including dirty)
assert_approx(self.nodes[1].getbalance(), 5, 0.001)
assert_approx(self.nodes[1].getbalance(avoid_reuse=False), 15, 0.001)
assert_raises_rpc_error(-6, "Insufficient funds", self.nodes[1].sendtoaddress, retaddr, 10)
self.nodes[1].sendtoaddress(retaddr, 4)
# listunspent should show 2 total outputs (1, 10 btc), one unused (1), one reused (10)
assert_unspent(self.nodes[1], total_count=2, total_sum=11, reused_count=1, reused_sum=10)
# getbalances should show 10 used, 1 btc trusted
assert_balances(self.nodes[1], mine={"used": 10, "trusted": 1})
# node 1 should now have about 1 btc left (no dirty) and 11 (including dirty)
assert_approx(self.nodes[1].getbalance(), 1, 0.001)
assert_approx(self.nodes[1].getbalance(avoid_reuse=False), 11, 0.001)
def test_getbalances_used(self):
'''
getbalances and listunspent should pick up on reused addresses
immediately, even for address reusing outputs created before the first
transaction was spending from that address
'''
self.log.info("Test getbalances used category")
# node under test should be completely empty
assert_equal(self.nodes[1].getbalance(avoid_reuse=False), 0)
new_addr = self.nodes[1].getnewaddress()
ret_addr = self.nodes[0].getnewaddress()
# send multiple transactions, reusing one address
for _ in range(101):
self.nodes[0].sendtoaddress(new_addr, 1)
self.generate(self.nodes[0], 1)
# send transaction that should not use all the available outputs
# per the current coin selection algorithm
self.nodes[1].sendtoaddress(ret_addr, 5)
# getbalances and listunspent should show the remaining outputs
# in the reused address as used/reused
assert_unspent(self.nodes[1], total_count=2, total_sum=96, reused_count=1, reused_sum=1, margin=0.01)
assert_balances(self.nodes[1], mine={"used": 1, "trusted": 95}, margin=0.01)
def test_full_destination_group_is_preferred(self):
'''
Test the case where [1] only has 101 outputs of 1 SYS in the same reused
address and tries to send a small payment of 0.5 SYS. The wallet
should use 100 outputs from the reused address as inputs and not a
single 1 SYS input, in order to join several outputs from the reused
address.
'''
self.log.info("Test that full destination groups are preferred in coin selection")
# Node under test should be empty
assert_equal(self.nodes[1].getbalance(avoid_reuse=False), 0)
new_addr = self.nodes[1].getnewaddress()
ret_addr = self.nodes[0].getnewaddress()
# Send 101 outputs of 1 SYS to the same, reused address in the wallet
for _ in range(101):
self.nodes[0].sendtoaddress(new_addr, 1)
self.generate(self.nodes[0], 1)
# Sending a transaction that is smaller than each one of the
# available outputs
txid = self.nodes[1].sendtoaddress(address=ret_addr, amount=0.5)
inputs = self.nodes[1].getrawtransaction(txid, 1)["vin"]
# The transaction should use 100 inputs exactly
assert_equal(len(inputs), 100)
def test_all_destination_groups_are_used(self):
'''
Test the case where [1] only has 202 outputs of 1 SYS in the same reused
address and tries to send a payment of 200.5 SYS. The wallet
should use all 202 outputs from the reused address as inputs.
'''
self.log.info("Test that all destination groups are used")
# Node under test should be empty
assert_equal(self.nodes[1].getbalance(avoid_reuse=False), 0)
new_addr = self.nodes[1].getnewaddress()
ret_addr = self.nodes[0].getnewaddress()
# Send 202 outputs of 1 SYS to the same, reused address in the wallet
for _ in range(202):
self.nodes[0].sendtoaddress(new_addr, 1)
self.generate(self.nodes[0], 1)
# Sending a transaction that needs to use the full groups
# of 100 inputs but also the incomplete group of 2 inputs.
txid = self.nodes[1].sendtoaddress(address=ret_addr, amount=200.5)
inputs = self.nodes[1].getrawtransaction(txid, 1)["vin"]
# The transaction should use 202 inputs exactly
assert_equal(len(inputs), 202)
if __name__ == '__main__':
AvoidReuseTest().main()
| {
"content_hash": "d0aa3e3fa334916cde6a548e155e654c",
"timestamp": "",
"source": "github",
"line_count": 365,
"max_line_length": 132,
"avg_line_length": 44.43287671232877,
"alnum_prop": 0.6400295967443581,
"repo_name": "syscoin/syscoin",
"id": "738b5cadbb2c3a38dcc79cebcc4a47946fd5e55d",
"size": "16432",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/functional/wallet_avoidreuse.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "28178"
},
{
"name": "C",
"bytes": "1285088"
},
{
"name": "C++",
"bytes": "12653307"
},
{
"name": "CMake",
"bytes": "50978"
},
{
"name": "Cap'n Proto",
"bytes": "1256"
},
{
"name": "Dockerfile",
"bytes": "1721"
},
{
"name": "HTML",
"bytes": "21833"
},
{
"name": "Java",
"bytes": "30986"
},
{
"name": "JavaScript",
"bytes": "31802"
},
{
"name": "M4",
"bytes": "260893"
},
{
"name": "Makefile",
"bytes": "146223"
},
{
"name": "Objective-C++",
"bytes": "5497"
},
{
"name": "Python",
"bytes": "2965506"
},
{
"name": "QMake",
"bytes": "438"
},
{
"name": "Sage",
"bytes": "56850"
},
{
"name": "Scheme",
"bytes": "25953"
},
{
"name": "Shell",
"bytes": "212830"
},
{
"name": "TypeScript",
"bytes": "10706"
}
],
"symlink_target": ""
} |
import errno
import json
import logging
import os
import platform
import shutil
import stat
import socket
import subprocess
import time
import tempfile
try:
from subprocess import DEVNULL
except ImportError:
DEVNULL = open(os.devnull, 'wb')
from bottle import request
from mongo_orchestration.common import DEFAULT_BIND
from mongo_orchestration.compat import reraise
from mongo_orchestration.errors import TimeoutError, RequestError
from mongo_orchestration.singleton import Singleton
logger = logging.getLogger(__name__)
def _host():
"""Get the Host from the most recent HTTP request."""
host_and_port = request.urlparts[1]
try:
host, _ = host_and_port.split(':')
except ValueError:
# No port yet. Host defaults to '127.0.0.1' in bottle.request.
return DEFAULT_BIND
return host or DEFAULT_BIND
class PortPool(Singleton):
__ports = set()
__closed = set()
__id = None
def __init__(self, min_port=1025, max_port=2000, port_sequence=None):
"""
Args:
min_port - min port number (ignoring if 'port_sequence' is not None)
max_port - max port number (ignoring if 'port_sequence' is not None)
port_sequence - iterate sequence which contains numbers of ports
"""
if not self.__id: # singleton checker
self.__id = id(self)
self.__init_range(min_port, max_port, port_sequence)
def __init_range(self, min_port=1025, max_port=2000, port_sequence=None):
if port_sequence:
self.__ports = set(port_sequence)
else:
self.__ports = set(range(min_port, max_port + 1))
self.__closed = set()
self.refresh()
def __check_port(self, port):
"""check port status
return True if port is free, False else
"""
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
s.bind((_host(), port))
return True
except socket.error:
return False
finally:
s.close()
def release_port(self, port):
"""release port"""
if port in self.__closed:
self.__closed.remove(port)
self.__ports.add(port)
def port(self, check=False):
"""return next opened port
Args:
check - check is port realy free
"""
if not self.__ports: # refresh ports if sequence is empty
self.refresh()
try:
port = self.__ports.pop()
if check:
while not self.__check_port(port):
self.release_port(port)
port = self.__ports.pop()
except (IndexError, KeyError):
raise IndexError("Could not find a free port,\nclosed ports: {closed}".format(closed=self.__closed))
self.__closed.add(port)
return port
def refresh(self, only_closed=False):
"""refresh ports status
Args:
only_closed - check status only for closed ports
"""
if only_closed:
opened = filter(self.__check_port, self.__closed)
self.__closed = self.__closed.difference(opened)
self.__ports = self.__ports.union(opened)
else:
ports = self.__closed.union(self.__ports)
self.__ports = set(filter(self.__check_port, ports))
self.__closed = ports.difference(self.__ports)
def change_range(self, min_port=1025, max_port=2000, port_sequence=None):
"""change Pool port range"""
self.__init_range(min_port, max_port, port_sequence)
def wait_for(port_num, timeout):
"""waits while process starts.
Args:
port_num - port number
timeout - specify how long, in seconds, a command can take before times out.
return True if process started, return False if not
"""
logger.debug("wait for {port_num}".format(**locals()))
t_start = time.time()
sleeps = 0.1
while time.time() - t_start < timeout:
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
s.connect((_host(), port_num))
return True
except (IOError, socket.error):
time.sleep(sleeps)
finally:
s.close()
return False
def repair_mongo(name, dbpath):
"""repair mongodb after usafe shutdown"""
cmd = [name, "--dbpath", dbpath, "--repair"]
proc = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
timeout = 30
t_start = time.time()
while time.time() - t_start < timeout:
proc.stdout.flush()
line = str(proc.stdout.readline())
if "dbexit: really exiting now" in line:
return
return
def mprocess(name, config_path, port=None, timeout=180, silence_stdout=True):
"""start 'name' process with params from config_path.
Args:
name - process name or path
config_path - path to file where should be stored configuration
port - process's port
timeout - specify how long, in seconds, a command can take before times out.
if timeout <=0 - doesn't wait for complete start process
silence_stdout - if True (default), redirect stdout to /dev/null
return tuple (Popen object, host) if process started, return (None, None) if not
"""
logger.debug("mprocess({name}, {config_path}, {port}, {timeout})".format(**locals()))
if not (config_path and isinstance(config_path, str) and os.path.exists(config_path)):
raise OSError("can't find config file {config_path}".format(**locals()))
cfg = read_config(config_path)
cmd = [name, "--config", config_path]
if cfg.get('port', None) is None or port:
port = port or PortPool().port(check=True)
cmd.extend(['--port', str(port)])
host = "{host}:{port}".format(host=_host(), port=port)
try:
logger.debug("execute process: {cmd}".format(**locals()))
proc = subprocess.Popen(
cmd,
stdout=DEVNULL if silence_stdout else None,
stderr=subprocess.STDOUT)
if proc.poll() is not None:
logger.debug("process is not alive")
raise OSError("Process started, but died immediately.")
except (OSError, TypeError) as err:
message = "exception while executing process: {err}".format(err=err)
logger.debug(message)
raise OSError(message)
if timeout > 0 and wait_for(port, timeout):
logger.debug("process '{name}' has started: pid={proc.pid}, host={host}".format(**locals()))
return (proc, host)
elif timeout > 0:
logger.debug("hasn't connected to pid={proc.pid} with host={host} during timeout {timeout} ".format(**locals()))
logger.debug("terminate process with pid={proc.pid}".format(**locals()))
kill_mprocess(proc)
proc_alive(proc) and time.sleep(3) # wait while process stoped
message = ("Could not connect to process during "
"{timeout} seconds".format(timeout=timeout))
raise TimeoutError(errno.ETIMEDOUT, message)
return (proc, host)
def kill_mprocess(process):
"""kill process
Args:
process - Popen object for process
"""
if process and proc_alive(process):
process.terminate()
process.communicate()
return not proc_alive(process)
def cleanup_mprocess(config_path, cfg):
"""remove all process's stuff
Args:
config_path - process's options file
cfg - process's config
"""
for key in ('keyFile', 'logPath', 'dbpath'):
remove_path(cfg.get(key, None))
isinstance(config_path, str) and os.path.exists(config_path) and remove_path(config_path)
def remove_path(path):
"""remove path from file system
If path is None - do nothing"""
if path is None or not os.path.exists(path):
return
if platform.system() == 'Windows':
# Need to have write permission before deleting the file.
os.chmod(path, stat.S_IWRITE)
if os.path.isdir(path):
shutil.rmtree(path)
if os.path.isfile(path):
try:
shutil.os.remove(path)
except OSError:
logger.exception("Could not remove path: %s" % path)
def write_config(params, config_path=None):
"""write mongo*'s config file
Args:
params - options wich file contains
config_path - path to the config_file, will create if None
Return config_path
where config_path - path to mongo*'s options file
"""
if config_path is None:
config_path = tempfile.mktemp(prefix="mongo-")
cfg = params.copy()
if 'setParameter' in cfg:
set_parameters = cfg.pop('setParameter')
try:
for key, value in set_parameters.items():
cfg['setParameter = ' + key] = value
except AttributeError:
reraise(RequestError,
'Not a valid value for setParameter: %r '
'Expected "setParameter": {<param name> : value, ...}'
% set_parameters)
# fix boolean value
for key, value in cfg.items():
if isinstance(value, bool):
cfg[key] = json.dumps(value)
with open(config_path, 'w') as fd:
data = '\n'.join('%s=%s' % (key, item) for key, item in cfg.items())
fd.write(data)
return config_path
def read_config(config_path):
"""read config_path and return options as dictionary"""
result = {}
with open(config_path, 'r') as fd:
for line in fd.readlines():
if '=' in line:
key, value = line.split('=', 1)
try:
result[key] = json.loads(value)
except ValueError:
result[key] = value.rstrip('\n')
return result
def proc_alive(process):
"""Check if process is alive. Return True or False."""
return process.poll() is None if process else False
| {
"content_hash": "b94bd0d6beb9568e4ba0e67acf33eeda",
"timestamp": "",
"source": "github",
"line_count": 300,
"max_line_length": 120,
"avg_line_length": 33.593333333333334,
"alnum_prop": 0.5909902758483826,
"repo_name": "agilemobiledev/mongo-orchestration",
"id": "f50d2d1e4ccbae842e48986fe1da58262a886722",
"size": "10690",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "mongo_orchestration/process.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "PowerShell",
"bytes": "9908"
},
{
"name": "Python",
"bytes": "262650"
},
{
"name": "Shell",
"bytes": "9394"
}
],
"symlink_target": ""
} |
from sklearn.externals import joblib
from preprocessData import getDataXY, get_accuracy
trainX, trainY, testX, testY, validX, validY = getDataXY()
print type(trainY)
models = ['Models/dt0.58.pkl', 'Models/NB0.59.pkl', 'Models/NC0.57.pkl',
'Models/NNP0.61.pkl', 'Models/sgd0.54.pkl']
all_pre = []
for model in models:
print model
clf = joblib.load(model)
predicY = clf.predict(validX)
all_pre.append(predicY)
def voteIt(allResults):
votRes = []
for item in range(len(allResults[0])):
all_item_same_set = []
for clf_num in range(len(allResults)):
all_item_same_set.append(allResults[clf_num][item])
if all_item_same_set.count('sing') > all_item_same_set.count('nosing'):
votRes.append('sing')
else:
votRes.append('nosing')
return votRes
print get_accuracy(voteIt(all_pre), testY)
| {
"content_hash": "4066724a864ebea45d11322f69c0284c",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 83,
"avg_line_length": 21.372093023255815,
"alnum_prop": 0.6235038084874864,
"repo_name": "TheaGao/SklearnModel",
"id": "f678b30434cfbcab5a3a8a898d0a71ec304355f1",
"size": "919",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "voteModels.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "38251"
}
],
"symlink_target": ""
} |
"""Test ACL."""
import datetime
import json
import webtest
from ceilometer.api import app
from ceilometer.api.controllers import v2 as v2_api
from ceilometer.openstack.common import timeutils
from ceilometer.publisher import utils
from ceilometer import sample
from ceilometer.tests import api as acl
from ceilometer.tests.api.v2 import FunctionalTest
from ceilometer.tests import db as tests_db
VALID_TOKEN = '4562138218392831'
VALID_TOKEN2 = '4562138218392832'
class FakeMemcache(object):
@staticmethod
def get(key):
if key == "tokens/%s" % VALID_TOKEN:
dt = timeutils.utcnow() + datetime.timedelta(minutes=5)
return json.dumps(({'access': {
'token': {'id': VALID_TOKEN},
'user': {
'id': 'user_id1',
'name': 'user_name1',
'tenantId': '123i2910',
'tenantName': 'mytenant',
'roles': [
{'name': 'admin'},
]},
}}, timeutils.isotime(dt)))
if key == "tokens/%s" % VALID_TOKEN2:
dt = timeutils.utcnow() + datetime.timedelta(minutes=5)
return json.dumps(({'access': {
'token': {'id': VALID_TOKEN2},
'user': {
'id': 'user_id2',
'name': 'user-good',
'tenantId': 'project-good',
'tenantName': 'goodies',
'roles': [
{'name': 'Member'},
]},
}}, timeutils.isotime(dt)))
@staticmethod
def set(key, value, **kwargs):
pass
class TestAPIACL(FunctionalTest,
tests_db.MixinTestsWithBackendScenarios):
def setUp(self):
super(TestAPIACL, self).setUp()
self.environ = {'fake.cache': FakeMemcache()}
for cnt in [
sample.Sample(
'meter.test',
'cumulative',
'',
1,
'user-good',
'project-good',
'resource-good',
timestamp=datetime.datetime(2012, 7, 2, 10, 40),
resource_metadata={'display_name': 'test-server',
'tag': 'self.sample'},
source='test_source'),
sample.Sample(
'meter.mine',
'gauge',
'',
1,
'user-fred',
'project-good',
'resource-56',
timestamp=datetime.datetime(2012, 7, 2, 10, 43),
resource_metadata={'display_name': 'test-server',
'tag': 'self.sample4'},
source='test_source')]:
msg = utils.meter_message_from_counter(
cnt,
self.CONF.publisher.metering_secret)
self.conn.record_metering_data(msg)
def get_json(self, path, expect_errors=False, headers=None,
q=[], **params):
return super(TestAPIACL, self).get_json(path,
expect_errors=expect_errors,
headers=headers,
q=q,
extra_environ=self.environ,
**params)
def _make_app(self):
self.CONF.set_override("cache", "fake.cache", group=acl.OPT_GROUP_NAME)
file_name = self.path_get('etc/ceilometer/api_paste.ini')
self.CONF.set_override("api_paste_config", file_name)
return webtest.TestApp(app.load_app())
def test_non_authenticated(self):
response = self.get_json('/meters', expect_errors=True)
self.assertEqual(401, response.status_int)
def test_authenticated_wrong_role(self):
response = self.get_json('/meters',
expect_errors=True,
headers={
"X-Roles": "Member",
"X-Tenant-Name": "admin",
"X-Project-Id":
"bc23a9d531064583ace8f67dad60f6bb",
})
self.assertEqual(401, response.status_int)
# FIXME(dhellmann): This test is not properly looking at the tenant
# info. We do not correctly detect the improper tenant. That's
# really something the keystone middleware would have to do using
# the incoming token, which we aren't providing.
#
# def test_authenticated_wrong_tenant(self):
# response = self.get_json('/meters',
# expect_errors=True,
# headers={
# "X-Roles": "admin",
# "X-Tenant-Name": "achoo",
# "X-Project-Id": "bc23a9d531064583ace8f67dad60f6bb",
# })
# self.assertEqual(401, response.status_int)
def test_authenticated(self):
data = self.get_json('/meters',
headers={"X-Auth-Token": VALID_TOKEN,
"X-Roles": "admin",
"X-Tenant-Name": "admin",
"X-Project-Id":
"bc23a9d531064583ace8f67dad60f6bb",
})
ids = set(r['resource_id'] for r in data)
self.assertEqual(set(['resource-good', 'resource-56']), ids)
def test_with_non_admin_missing_project_query(self):
data = self.get_json('/meters',
headers={"X-Roles": "Member",
"X-Auth-Token": VALID_TOKEN2,
"X-Project-Id": "project-good"})
ids = set(r['resource_id'] for r in data)
self.assertEqual(set(['resource-good', 'resource-56']), ids)
def test_with_non_admin(self):
data = self.get_json('/meters',
headers={"X-Roles": "Member",
"X-Auth-Token": VALID_TOKEN2,
"X-Project-Id": "project-good"},
q=[{'field': 'project_id',
'value': 'project-good',
}])
ids = set(r['resource_id'] for r in data)
self.assertEqual(set(['resource-good', 'resource-56']), ids)
def test_non_admin_wrong_project(self):
data = self.get_json('/meters',
expect_errors=True,
headers={"X-Roles": "Member",
"X-Auth-Token": VALID_TOKEN2,
"X-Project-Id": "project-good"},
q=[{'field': 'project_id',
'value': 'project-wrong',
}])
self.assertEqual(401, data.status_int)
def test_non_admin_two_projects(self):
data = self.get_json('/meters',
expect_errors=True,
headers={"X-Roles": "Member",
"X-Auth-Token": VALID_TOKEN2,
"X-Project-Id": "project-good"},
q=[{'field': 'project_id',
'value': 'project-good',
},
{'field': 'project_id',
'value': 'project-naughty',
}])
self.assertEqual(401, data.status_int)
def test_non_admin_get_events(self):
# NOTE(herndon): wsme does not handle the error that is being
# raised in by requires_admin dues to the decorator ordering. wsme
# does not play nice with other decorators, and so requires_admin
# must call wsme.wsexpose, and not the other way arou. The
# implication is that I can't look at the status code in the
# return value. Work around is to catch the exception here and
# verify that the status code is correct.
try:
# Intentionally *not* using assertRaises here so I can look
# at the status code of the exception.
self.get_json('/event_types', expect_errors=True,
headers={"X-Roles": "Member",
"X-Auth-Token": VALID_TOKEN2,
"X-Project-Id": "project-good"})
except v2_api.ClientSideError as ex:
self.assertEqual(401, ex.code)
else:
self.fail()
| {
"content_hash": "6b78446bf6490f81f60ad3a3182e48b4",
"timestamp": "",
"source": "github",
"line_count": 213,
"max_line_length": 79,
"avg_line_length": 42.051643192488264,
"alnum_prop": 0.4510438762978676,
"repo_name": "tanglei528/ceilometer",
"id": "0cd27b3ed00505ef65db09a613286b898496ad97",
"size": "9634",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "ceilometer/tests/api/v2/test_acl_scenarios.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "6284"
},
{
"name": "JavaScript",
"bytes": "64962"
},
{
"name": "Python",
"bytes": "2381636"
},
{
"name": "Shell",
"bytes": "995"
}
],
"symlink_target": ""
} |
try:
import progressbar
progressbar_loaded = True
except ImportError:
progressbar_loaded = False
import threading
class WrappedProgressBar:
"""Set up the command-line progress bar with max_value
Wraps the progressbar.py tool to avoid LGPL'ing this code
"""
def __init__(self, max_value):
self.max = max_value
self.done = 0
self.lock = threading.Lock()
if progressbar_loaded:
foo = [progressbar.Bar('=', '[', ']'), ' ', progressbar.Percentage()]
self.bar = progressbar.ProgressBar(maxval=max_value, widgets=foo)
def increment(self):
with self.lock:
self.done += 1
if progressbar_loaded:
self.bar.update(self.done)
else:
print "\t", self.done, "/", self.max
self.update()
def finish(self):
with self.lock:
if progressbar_loaded:
self.bar.finish()
| {
"content_hash": "4fef1dedabf5749c851a338ed86af6d2",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 81,
"avg_line_length": 29.363636363636363,
"alnum_prop": 0.5675954592363261,
"repo_name": "timothy-king/telescope-exec",
"id": "52ae440babf2ad5f83c3fa8bae7fe76b5e55b34b",
"size": "969",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/wrappedprogressbar.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "OCaml",
"bytes": "2275"
},
{
"name": "Python",
"bytes": "65819"
},
{
"name": "Shell",
"bytes": "938"
}
],
"symlink_target": ""
} |
from __future__ import print_function
import argparse
import datetime
import errno
import os
import string
import subprocess
import sys
from json import dumps, loads
DEFAULT_DATA_TABLE_NAMES = ["mentalist_databases"]
def mentalist_download_enterobase( data_manager_dict, kmer_size, scheme, type, params, target_directory, data_table_names=DEFAULT_DATA_TABLE_NAMES ):
char_to_full_organism_name = {
'E': 'Escherichia/Shigella',
'S': 'Salmonella',
'Y': 'Yersinia'
}
translation_table = string.maketrans(string.punctuation, ("_" * 32))
base_path = char_to_full_organism_name[scheme].lower().replace(" ", "_").translate(translation_table) + "_enterobase"
today = datetime.date.today().isoformat()
scheme_files_path = base_path + "_scheme_" + today
database_path = base_path + "_k" + str(kmer_size) + "_" + today
database_name = base_path + "_k" + str(kmer_size) + "_" + today + ".jld"
display_name = char_to_full_organism_name[scheme] + " k=" + str(kmer_size) + " (Enterobase) " + today
args = [ 'mentalist', 'download_enterobase', '-s', scheme, '-t', type, '-k', str(kmer_size), '--db', database_name, '-o', scheme_files_path]
proc = subprocess.Popen( args=args, shell=False, cwd=target_directory )
return_code = proc.wait()
if return_code:
print("Error building database.", file=sys.stderr)
sys.exit( return_code )
data_table_entry = dict( value=database_path, dbkey='Enterobase', name=display_name, path=database_name )
for data_table_name in data_table_names:
_add_data_table_entry( data_manager_dict, data_table_name, data_table_entry )
def _add_data_table_entry( data_manager_dict, data_table_name, data_table_entry ):
data_manager_dict['data_tables'] = data_manager_dict.get( 'data_tables', {} )
data_manager_dict['data_tables'][ data_table_name ] = data_manager_dict['data_tables'].get( data_table_name, [] )
data_manager_dict['data_tables'][ data_table_name ].append( data_table_entry )
return data_manager_dict
def main():
parser = argparse.ArgumentParser()
parser.add_argument('params')
parser.add_argument( '-s', '--scheme', dest='scheme', default=None, help="Scheme: ('E'=Escherichia/Shigella, 'S'=Salmonella, 'Y'=Yersinia)")
parser.add_argument( '-k', '--kmer_size', dest='kmer_size', type=int, default=None, help='kmer Size' )
parser.add_argument( '-t', '--type', dest='type', default=None, help="Type: ('cg'=cgMLST, 'wg'=wgMLST')")
args = parser.parse_args()
params = loads( open( args.params ).read() )
target_directory = params[ 'output_data' ][0]['extra_files_path']
try:
os.mkdir( target_directory )
except OSError as exc:
if exc.errno == errno.EEXIST and os.path.isdir( target_directory ):
pass
else:
raise
data_manager_dict = {}
# build the index
mentalist_download_enterobase( data_manager_dict, args.kmer_size, args.scheme, args.type, params, target_directory, DEFAULT_DATA_TABLE_NAMES )
# save info to json file
open( args.params, 'wb' ).write( dumps( data_manager_dict ) )
if __name__ == "__main__":
main()
| {
"content_hash": "f2eb21ceb9ce568a323400b35939df50",
"timestamp": "",
"source": "github",
"line_count": 77,
"max_line_length": 149,
"avg_line_length": 41.324675324675326,
"alnum_prop": 0.650534255185418,
"repo_name": "WGS-TB/MentaLiST",
"id": "59986ec079a4c7f858a46798d2e6aeb79e41243a",
"size": "3205",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "galaxy/data_managers/data_manager_mentalist_download_enterobase/data_manager/mentalist_download_enterobase.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Julia",
"bytes": "78744"
},
{
"name": "Python",
"bytes": "22586"
},
{
"name": "Shell",
"bytes": "481"
}
],
"symlink_target": ""
} |
"""Dataset snapshot and related functionality."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import gen_experimental_dataset_ops as ged_ops
COMPRESSION_GZIP = "GZIP"
COMPRESSION_NONE = None
class _SnapshotDataset(dataset_ops.UnaryUnchangedStructureDataset):
"""A Dataset that captures a snapshot or reads from a snapshot."""
def __init__(self,
input_dataset,
path,
compression=None,
reader_path_prefix=None,
writer_path_prefix=None,
shard_size_bytes=None,
pending_snapshot_expiry_seconds=None):
self._compression = compression if compression is not None else ""
self._reader_path_prefix = (
reader_path_prefix if reader_path_prefix is not None else "")
self._writer_path_prefix = (
writer_path_prefix if writer_path_prefix is not None else "")
self._shard_size_bytes = (
shard_size_bytes
if shard_size_bytes is not None else 10 * 1024 * 1024 * 1024)
self._pending_snapshot_expiry_seconds = (
pending_snapshot_expiry_seconds
if pending_snapshot_expiry_seconds is not None else 86400)
self._input_dataset = input_dataset
self._path = ops.convert_to_tensor(path, dtype=dtypes.string, name="path")
variant_tensor = ged_ops.snapshot_dataset(
self._input_dataset._variant_tensor, # pylint: disable=protected-access
path=self._path,
compression=self._compression,
reader_path_prefix=self._reader_path_prefix,
writer_path_prefix=self._writer_path_prefix,
shard_size_bytes=self._shard_size_bytes,
pending_snapshot_expiry_seconds=self._pending_snapshot_expiry_seconds,
**dataset_ops.flat_structure(self))
super(_SnapshotDataset, self).__init__(input_dataset, variant_tensor)
def snapshot(path,
compression=None,
reader_path_prefix=None,
writer_path_prefix=None,
shard_size_bytes=None,
pending_snapshot_expiry_seconds=None):
"""Writes to/reads from a snapshot of a dataset.
This function attempts to determine whether a valid snapshot exists at the
`path`, and reads from the snapshot if so. If not, it will run the
preprocessing pipeline as usual, and write out a snapshot of the data
processed for future use.
Args:
path: A directory where we want to save our snapshots and/or read from a
previously saved snapshot.
compression: The type of compression to apply to the Dataset. Currently
supports "GZIP" or None. Defaults to None (no compression).
reader_path_prefix: A prefix to add to the path when reading from snapshots.
Defaults to None.
writer_path_prefix: A prefix to add to the path when writing to snapshots.
Defaults to None.
shard_size_bytes: The size of each shard to be written by the snapshot
dataset op. Defaults to 10 GiB.
pending_snapshot_expiry_seconds: How long to wait (in seconds) before
the snapshot op considers a previously unfinished snapshot to be stale.
Returns:
A `Dataset` transformation function, which can be passed to
`tf.data.Dataset.apply`.
"""
def _apply_fn(dataset):
return _SnapshotDataset(dataset, path, compression, reader_path_prefix,
writer_path_prefix, shard_size_bytes,
pending_snapshot_expiry_seconds)
return _apply_fn
| {
"content_hash": "e2fc046caf6e34c8a7b24ccbf927520e",
"timestamp": "",
"source": "github",
"line_count": 92,
"max_line_length": 80,
"avg_line_length": 39.95652173913044,
"alnum_prop": 0.6817192600652884,
"repo_name": "alsrgv/tensorflow",
"id": "9581f73748000a8e7285e9ad441b1859ce9f943e",
"size": "4365",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tensorflow/python/data/experimental/ops/snapshot.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "3568"
},
{
"name": "Batchfile",
"bytes": "15317"
},
{
"name": "C",
"bytes": "755360"
},
{
"name": "C#",
"bytes": "8446"
},
{
"name": "C++",
"bytes": "68001148"
},
{
"name": "CMake",
"bytes": "204596"
},
{
"name": "Dockerfile",
"bytes": "73602"
},
{
"name": "Go",
"bytes": "1627121"
},
{
"name": "HTML",
"bytes": "4680118"
},
{
"name": "Java",
"bytes": "842866"
},
{
"name": "Jupyter Notebook",
"bytes": "1665584"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "101157"
},
{
"name": "Objective-C",
"bytes": "104061"
},
{
"name": "Objective-C++",
"bytes": "175222"
},
{
"name": "PHP",
"bytes": "17570"
},
{
"name": "Pascal",
"bytes": "3239"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "Python",
"bytes": "48843099"
},
{
"name": "RobotFramework",
"bytes": "891"
},
{
"name": "Ruby",
"bytes": "4733"
},
{
"name": "Shell",
"bytes": "488241"
},
{
"name": "Smarty",
"bytes": "27495"
},
{
"name": "Swift",
"bytes": "56155"
},
{
"name": "TSQL",
"bytes": "921"
}
],
"symlink_target": ""
} |
import os
from taxinet import TaxiNet, TaxiCombinerNet
import torch
import pandas as pd
import numpy as np
from datetime import datetime
from math import sin, cos, sqrt, atan2, radians
from torch.autograd import Variable
from random import random
from sklearn.decomposition import PCA
from sklearn.cluster import MiniBatchKMeans
import xgbhelpers as h
import xgboost as xgb
# TODO args feature
RUN_FEATURE_EXTRACTION = False
MAX_DISTANCE = 100 * 10**3 # 100 km
MAX_DURATION = 24 * 60 * 60 # 24 hours
ENSEMBLE_COUNT = 2
# ===============================
# Date extraction
# ===============================
if (RUN_FEATURE_EXTRACTION):
# read data
test = pd.read_csv('./data/test.csv')
train = pd.read_csv('./data/train.csv')
# label data
test['set'] = 'test'
train['set'] = 'train'
# instantiate the loss column in the test set so that schemas match
test['trip_duration'] = np.NaN
# union `join='outer'` the train and test data so that encoding can be done holistically
# and reset the index to be monotically increasing
combined = pd.concat([test, train], join='outer')
combined.set_index([list(range(0, combined.shape[0]))], inplace=True)
# ===============================
# Feature engineering and cleanup
# ===============================
# drop unneeded column(s)
# store_and_fwd_flag looks meaningless and dropoff_datetime is only available
# in the train, and is redundant with trip_duration - pickup_datetime
combined.drop(['store_and_fwd_flag', 'dropoff_datetime'], axis=1, inplace=True)
# segment datetime into year / month / day / hour columns
# this should help training differentiate between e.g. weekends vs. weekdays
# first encode string as datetime object
combined['pickup_datetime'] = combined['pickup_datetime'].apply(
lambda dt: datetime.strptime(dt, '%Y-%m-%d %H:%M:%S'))
# then encode the important components as integers (ignoring seconds to keep the training simpler)
combined['year'] = combined['pickup_datetime'].apply(lambda dt: dt.year)
combined['month'] = combined['pickup_datetime'].apply(lambda dt: dt.month)
combined['week'] = combined['pickup_datetime'].apply(lambda dt: dt.week)
combined['weekday'] = combined['pickup_datetime'].apply(lambda dt: dt.weekday())
combined['hour'] = combined['pickup_datetime'].apply(lambda dt: dt.hour)
combined['minute'] = combined['pickup_datetime'].apply(lambda dt: dt.minute)
# finally drop the datetime object which won't be useful for training
combined.drop('pickup_datetime', axis=1, inplace=True)
# round latitude/longitude to a practical precision. 5 decimal places == 1 meter
# (should not affect loss estimate but should improve training)
combined['dropoff_latitude'] = combined['dropoff_latitude'].apply(lambda loc: round(loc, 5))
combined['dropoff_longitude'] = combined['dropoff_longitude'].apply(lambda loc: round(loc, 5))
combined['pickup_latitude'] = combined['pickup_latitude'].apply(lambda loc: round(loc, 5))
combined['pickup_longitude'] = combined['pickup_longitude'].apply(lambda loc: round(loc, 5))
# 1 km clusters
combined['dropoff_latitude_cluster'] = combined['dropoff_latitude'].apply(lambda loc: round(loc, 2))
combined['dropoff_longitude_cluster'] = combined['dropoff_longitude'].apply(lambda loc: round(loc, 2))
combined['pickup_latitude_cluster'] = combined['pickup_latitude'].apply(lambda loc: round(loc, 2))
combined['pickup_longitude_cluster'] = combined['pickup_longitude'].apply(lambda loc: round(loc, 2))
# compute an "as the crow flies" curvature distance to nearest 10th of a meter
# https://en.wikipedia.org/wiki/Haversine_formula
R = 6373000 # approximate radius of earth in meters
rad = lambda coor: radians(abs(coor))
a = lambda lat1, lon1, lat2, lon2: \
sin((rad(lat2) - rad(lat1)) / 2)**2 + \
cos(rad(lat1)) * cos(rad(lat2)) * sin((rad(lon2) - rad(lon1)) / 2)**2
c = lambda a: 2 * atan2(sqrt(a), sqrt(1 - a))
distance = lambda lat1, long1, lat2, lon2: round(R * c(a(lat1, long1, lat2, lon2)), 1)
combined['crows_distance'] = combined.apply(
lambda row: distance(
row['dropoff_latitude'],
row['dropoff_longitude'],
row['pickup_latitude'],
row['pickup_longitude']),
axis=1)
# drop suspicious rows from the training data
combined = combined[
(combined['set'] == 'test') |
((combined['set'] == 'train') &
(combined['crows_distance'] <= MAX_DISTANCE) &
(combined['trip_duration'] <= MAX_DURATION))]
# PCA looks pointless, but who knows.
coords = np.vstack((combined[['pickup_latitude', 'pickup_longitude']].values,
combined[['dropoff_latitude', 'dropoff_longitude']].values))
pca = PCA().fit(coords)
combined['pickup_pca0'] = pca.transform(combined[['pickup_latitude', 'pickup_longitude']])[:, 0]
combined['pickup_pca1'] = pca.transform(combined[['pickup_latitude', 'pickup_longitude']])[:, 1]
combined['dropoff_pca0'] = pca.transform(combined[['dropoff_latitude', 'dropoff_longitude']])[:, 0]
combined['dropoff_pca1'] = pca.transform(combined[['dropoff_latitude', 'dropoff_longitude']])[:, 1]
combined['pca_manhattan'] = \
np.abs(combined['dropoff_pca1'] - combined['pickup_pca1']) + \
np.abs(combined['dropoff_pca0'] - combined['pickup_pca0'])
# cluster the lat/lon using KMeans
sample_ind = np.random.permutation(len(coords))[:500000]
kmeans = MiniBatchKMeans(n_clusters=100, batch_size=10000).fit(coords[sample_ind])
combined['pickup_cluster'] = kmeans.predict(combined[['pickup_latitude', 'pickup_longitude']])
combined['dropoff_cluster'] = kmeans.predict(combined[['dropoff_latitude', 'dropoff_longitude']])
combined.to_csv('./data/combined.csv', sep=',', index=None)
else:
# already done pre=processing
combined = pd.read_csv('./data/combined.csv')
# take the log of the measure. this'll give a normal distribution as well as
# allow us to use RMSE as the loss function instead of RMSLE on the original
combined['trip_duration'] = combined['trip_duration'].apply(lambda t: max(0.01, t))
combined['trip_duration'] = np.log(combined['trip_duration'].values)
# bring in external data about actual distance by road
# source: https://www.kaggle.com/oscarleo/new-york-city-taxi-with-osrm
usecols = ['id', 'total_distance', 'total_travel_time', 'number_of_steps']
fr1 = pd.read_csv('./data/osrm/fastest_routes_train_part_1.csv', usecols=usecols)
fr2 = pd.read_csv('./data/osrm/fastest_routes_train_part_2.csv', usecols=usecols)
test_street_info = pd.read_csv('./data/osrm/fastest_routes_test.csv', usecols=usecols)
train_street_info = pd.concat([fr1, fr2])
train = combined[combined['set'] == 'train'] # filter back down to train rows
train = train.merge(train_street_info, how='left', on='id')
train.dropna(inplace=True) # there was 1 null row introduced by the join
# ==============================================
# Train XGB model to estimate trip duration
# ==============================================
exclude = ['id', 'set'] # we won't use these columns for training
loss_column = 'trip_duration' # this is what we're trying to predict
features = [col for col in train.columns if col not in exclude and col != loss_column]
print('\nTraining and scoring XGBoost...')
baseline_params = h.get_params(algorithm='xgb', ptype='start', ver=3)
#alg = xgb.XGBClassifier( **baseline_params )
alg = xgb.XGBRegressor( **baseline_params )
# tune the model
xgb_model, importance = \
h.fit_model(
alg,
train,
features=features,
loss=loss_column,
useTrainCV=True,
folds=5,
metrics=['rmse'],
chatty=2,
show_report=True)
# ==============================================
# Produce estimates for XGB
# ==============================================
X = train[features]
if hasattr(xgb_model, 'best_ntree_limit'):
xgb_ytmp = xgb_model.predict(X, ntree_limit=xgb_model.best_ntree_limit)
else:
xgb_ytmp = xgb_model.predict(X)
# reshape for tensor input
xgb_ytmp = xgb_ytmp.reshape(xgb_ytmp.shape[0], 1)
# ==============================================
# Train the neural net to estimate trip duration
# ==============================================
epochs = 40 # number of passes across the training data
batch_size = 2**9 # number of samples trained per pass
# (use big batches when using batchnorm)
lr_decay_factor = 0.5
lr_decay_epoch = max(1, round(lr_decay_factor * 0.6 * epochs))
early_stopping_rounds = 26
lr = 0.013
cv = 0.1
feature_count = len(features)
# instantiate the neural net(s)
nets = [
TaxiNet(
feature_count,
learn_rate=lr + (lr * (random() - 0.5) * 0.4), # decays over time (+- 40%)
cuda=False,
max_output=MAX_DURATION) for _ in range(ENSEMBLE_COUNT)
]
# train each neural net
trained_nets = []
estimates = []
_, train_x, _ = next(nets[0].get_batches(train, loss_column, batch_size=train.shape[0], exclude=exclude))
train_x.volatile=True
for ii, net in enumerate(nets):
print("Training net {}.".format(ii))
net.learn_loop(train, loss_column, epochs, batch_size, exclude,
lr_decay_factor, lr_decay_epoch, cv, early_stopping_rounds)
estimate = net.forward(train_x)
trained_nets.append(net)
estimates.append(estimate.data.numpy())
# arrange the estimates of the ensemble as features into a new design matrix
estimates.append(xgb_ytmp)
estimates.append(train['crows_distance'].values.reshape(train['crows_distance'].values.shape[0], 1))
estimates.append(train[loss_column].values.reshape(train[loss_column].values.shape[0], 1))
new_features = np.hstack(estimates)
new_features = pd.DataFrame(new_features)
# train the stacked model
print("Training regressor net.")
exclude = []
# the stacked regressor will have the N neural nets + the XGB model as input
stacked_feature_count = (new_features.shape[1] - 1)
stacked_regressor = TaxiCombinerNet(stacked_feature_count, learn_rate=0.004, max_output=MAX_DURATION)
stacked_regressor.learn_loop(
new_features,
stacked_feature_count, # the last column is the loss column
epochs,
batch_size,
exclude=[],
lr_decay_factor=lr_decay_factor,
lr_decay_epoch=lr_decay_epoch,
cv=cv,
early_stopping_rounds=early_stopping_rounds)
# ==============================================
# Produce estimates for the test set
# ==============================================
exclude = ['id', 'set'] # we won't use these columns for training
test_estimates = []
test = combined[combined['set'] == 'test'] # filter back down to test rows
test = test.merge(test_street_info, how='left', on='id')
_, test_x, test_y = next(stacked_regressor.get_batches(test, loss_column, batch_size=test.shape[0], exclude=exclude))
test_x.volatile = True
test_y.volatile = True
for ii, net in enumerate(trained_nets): # TODO : multiprocess
print("Evaluating net {}.".format(ii))
test_estimate = net.forward(test_x)
test_estimates.append(test_estimate.data.numpy())
# predict model (check for early stopping rounds)
print('Evaluating XGB.')
X = test[features]
if hasattr(xgb_model, 'best_ntree_limit'):
xgb_ytmp = xgb_model.predict(X, ntree_limit=xgb_model.best_ntree_limit)
else:
xgb_ytmp = xgb_model.predict(X)
# reshape for tensor input
xgb_ytmp = xgb_ytmp.reshape(xgb_ytmp.shape[0], 1)
test_estimates.append(xgb_ytmp)
test_estimates.append(test['crows_distance'].values.reshape(test['crows_distance'].values.shape[0], 1))
print("Evaluating regressor.")
test_estimates = np.hstack(test_estimates)
test_estimates = Variable(torch.Tensor(test_estimates), volatile=True)
exclude = []
test_output = stacked_regressor(test_estimates)
if stacked_regressor.cuda:
test[loss_column] = test_output.cpu().numpy()
else:
test[loss_column] = test_output.data.numpy()
# convert from log space back to linear space for final estimates
test[loss_column] = np.exp(test[loss_column].values)
test['trip_duration_xgb'] = np.exp(test_estimates.data.numpy()[:,2].reshape(test_estimates.data.shape[0], 1))
test_end_time = datetime.utcnow()
test_out = test[['id', loss_column]]
test_out_xgb = test[['id', 'trip_duration_xgb']]
test_out_xgb.columns = ['id', 'trip_duration']
model_path = \
'./models/{}_{:.3}'.format(
datetime.strftime(test_end_time,"%Y-%m-%d-%H-%M-%S"),
stacked_regressor.best_cv_loss)
os.mkdir(model_path)
test_out.to_csv('{}/submission.csv'.format(model_path), sep=',', index=None)
test_out_xgb.to_csv('{}/submission_xgbonly.csv'.format(model_path), sep=',', index=None)
torch.save(stacked_regressor.state_dict(), '{}/regressor.nn'.format(model_path))
for ii, n in enumerate(trained_nets):
torch.save(n.state_dict(), '{}/ensemble_{}.nn'.format(model_path, ii))
# save the XGB model and a standalone model submission
h.save_model(xgb_model, '{}/xgb.model'.format(model_path))
| {
"content_hash": "58dae4188e5d2a86eab87b52b6356f2b",
"timestamp": "",
"source": "github",
"line_count": 315,
"max_line_length": 117,
"avg_line_length": 42.07936507936508,
"alnum_prop": 0.6491889852885704,
"repo_name": "deo1/deo1",
"id": "1328f57366ebcaf29f8866c25fa8ddeba19ec9c5",
"size": "13648",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "KaggleNycTaxi/load_train_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "80598"
},
{
"name": "Python",
"bytes": "183821"
}
],
"symlink_target": ""
} |
import os
import collections
import time
import datetime
import threading
import copy
import pickle
import traceback
if os.name=='nt': # Windows
import _winreg as winreg
elif os.name=='posix': # Linux
import glob
import json
from SmartMeshSDK import sdk_version
from SmartMeshSDK.utils import FormatUtils as u, \
SerialScanner
from SmartMeshSDK.IpMgrConnectorSerial import IpMgrConnectorSerial
from SmartMeshSDK.IpMgrConnectorMux import IpMgrSubscribe
from SmartMeshSDK.ApiException import APIError, \
ConnectionError
from SmartMeshSDK.protocols.Hr import HrParser
from SmartMeshSDK.protocols.oap import OAPDispatcher, \
OAPClient, \
OAPMessage, \
OAPDefines as oapdefs
#============================ helpers =========================================
def currentUtcTime():
return time.strftime("%a, %d %b %Y %H:%M:%S UTC", time.gmtime())
def logCrash(threadName,err):
output = []
output += ["============================================================="]
output += [currentUtcTime()]
output += [""]
output += ["CRASH in Thread {0}!".format(threadName)]
output += [""]
output += ["=== exception type ==="]
output += [str(type(err))]
output += [""]
output += ["=== traceback ==="]
output += [traceback.format_exc()]
output = '\n'.join(output)
print output
def reversedict(d):
return dict((v,k) for (k,v) in d.iteritems())
def stringifyMacIpAddresses(indict):
'''
in: {
'field1': 123,
'macAddress': [0,1,2,3,4,5,6,7],
}
out: {
'field1': 123,
'macAddress': '00-01-02-03-04-05-06-07',
}
'''
outdict = indict
for name in ['macAddress','source','dest']:
try:
assert len(outdict[name])==8
outdict[name] = u.formatMacString(outdict[name])
except KeyError:
pass
for name in ['ipv6Address']:
try:
assert len(outdict[name])==16
outdict[name] = u.formatIpString(outdict[name])
except KeyError:
pass
return outdict
def destringifyMacAddresses(d):
'''
in: {
'field1': 123,
'macAddress': '00-01-02-03-04-05-06-07',
}
out: {
'field1': 123,
'macAddress': [0,1,2,3,4,5,6,7],
}
returns whether was destringified
'''
wasDestringified = False
for name in ['macAddress','source','dest']:
try:
if type(d[name]) in [str,unicode]:
d[name] = [int(b,16) for b in d[name].split('-')]
wasDestringified = True
except KeyError:
pass
return wasDestringified
#============================ classes =========================================
class ManagerHandler(threading.Thread):
'''
\brief Connects to the manager, re-connects automatically
'''
def __init__(self,serialport,notifHandler):
# store params
self.serialport = serialport
self.notifHandler = notifHandler
# local variables
self.reconnectEvent = threading.Event()
self.dataLock = threading.RLock()
self.connector = None
self.goOn = True
# start the thread
threading.Thread.__init__(self)
self.name = 'ManagerHandler@{0}'.format(self.serialport)
self.start()
def run(self):
try:
while self.goOn:
try:
# connect to the manager
self.connector = IpMgrConnectorSerial.IpMgrConnectorSerial()
self.connector.connect({
'port': self.serialport,
})
# subscribe to notifications
self.subscriber = IpMgrSubscribe.IpMgrSubscribe(self.connector)
self.subscriber.start()
self.subscriber.subscribe(
notifTypes = [
IpMgrSubscribe.IpMgrSubscribe.NOTIFDATA,
],
fun = self._notifAll,
isRlbl = False,
)
self.subscriber.subscribe(
notifTypes = [
IpMgrSubscribe.IpMgrSubscribe.NOTIFEVENT,
IpMgrSubscribe.IpMgrSubscribe.NOTIFHEALTHREPORT,
IpMgrSubscribe.IpMgrSubscribe.NOTIFIPDATA,
IpMgrSubscribe.IpMgrSubscribe.NOTIFLOG,
],
fun = self._notifAll,
isRlbl = True,
)
self.subscriber.subscribe(
notifTypes = [
IpMgrSubscribe.IpMgrSubscribe.ERROR,
IpMgrSubscribe.IpMgrSubscribe.FINISH,
],
fun = self._notifErrorFinish,
isRlbl = True,
)
except Exception as err:
try:
self.connector.disconnect()
except Exception:
pass
# wait to reconnect
time.sleep(1)
else:
self.reconnectEvent.clear()
self.reconnectEvent.wait()
try:
self.connector.disconnect()
except Exception:
pass
except Exception as err:
logCrash(self.name,err)
#======================== public ==========================================
def close(self):
try:
self.connector.disconnect()
except Exception:
pass
self.goOn = False
def isConnected(self):
try:
return self.connector.isConnected
except AttributeError:
return False
#======================== private =========================================
#=== Dust API notifications
def _notifAll(self, notif_name, dust_notif):
try:
self.notifHandler(
self.serialport,
notif_name,
dust_notif,
)
except Exception as err:
logCrash(self.name,err)
def _notifErrorFinish(self,notifName,dust_notif):
try:
assert notifName in [
IpMgrSubscribe.IpMgrSubscribe.ERROR,
IpMgrSubscribe.IpMgrSubscribe.FINISH,
]
if not self.reconnectEvent.isSet():
self.reconnectEvent.set()
except Exception as err:
logCrash(self.name,err)
class SnapshotThread(threading.Thread):
'''
\brief one instance per JsonManager, waits to be triggered, does snapshot on one manager
'''
def __init__(self,raw_POST,notifCb):
# store params
self.raw_POST = raw_POST
self.notifCb = notifCb
# local variable
self.dataLock = threading.RLock()
self.snapshotsTodo = []
self.snapshotnowSem = threading.Semaphore(0)
self.lastsnapshots = {}
# start the thread
threading.Thread.__init__(self)
self.name = 'SnapshotThread'
self.daemon = True
self.start()
def run(self):
try:
while True:
# wait for trigger
self.snapshotnowSem.acquire()
# get the manager and correlationID
with self.dataLock:
(manager,correlationID) = self.snapshotsTodo.pop(0)
# do the snapshot
try:
snapshot = {}
# timestamp_start
snapshot['timestamp_start'] = currentUtcTime()
# getSystemInfo()
resp = self.raw_POST(
commandArray = ["getSystemInfo"],
fields = {},
manager = manager,
)
snapshot['getSystemInfo'] = stringifyMacIpAddresses(resp)
# getNetworkConfig()
resp = self.raw_POST(
commandArray = ["getNetworkConfig"],
fields = {},
manager = manager,
)
snapshot['getNetworkConfig'] = stringifyMacIpAddresses(resp)
# getNetworkInfo()
resp = self.raw_POST(
commandArray = ["getNetworkInfo"],
fields = {},
manager = manager,
)
snapshot['getNetworkInfo'] = stringifyMacIpAddresses(resp)
# getMoteConfig() on all motes
snapshot['getMoteConfig'] = {}
macs = []
currentMac = [0]*8
while True:
resp = self.raw_POST(
commandArray = ["getMoteConfig"],
fields = {
"macAddress": currentMac,
"next": True
},
manager = manager,
)
if resp['RC'] != 0:
break
mac = resp['macAddress']
macString = u.formatMacString(mac)
snapshot['getMoteConfig'][macString] = stringifyMacIpAddresses(resp)
macs += [mac]
currentMac = mac
# getMoteInfo() on all motes
snapshot['getMoteInfo'] = {}
for mac in macs:
resp = self.raw_POST(
commandArray = ["getMoteInfo"],
fields = {
"macAddress": mac,
},
manager = manager,
)
macString = u.formatMacString(mac)
snapshot['getMoteInfo'][macString] = stringifyMacIpAddresses(resp)
# getPathInfo() on all paths on all motes
snapshot['getPathInfo'] = {}
for mac in macs:
macString = u.formatMacString(mac)
snapshot['getPathInfo'][macString] = {}
currentPathId = 0
while True:
resp = self.raw_POST(
commandArray = ["getNextPathInfo"],
fields = {
"macAddress": mac,
"filter": 0,
"pathId": currentPathId
},
manager = manager,
)
if resp["RC"] != 0:
break
snapshot['getPathInfo'][macString][currentPathId] = stringifyMacIpAddresses(resp)
currentPathId = resp["pathId"]
# getMoteLinks() on all paths on all motes
snapshot['getMoteLinks'] = {}
for mac in macs:
macString = u.formatMacString(mac)
snapshot['getMoteLinks'][macString] = {}
snapshot['getMoteLinks'][macString]['links'] = []
currentidx = 0
while True:
resp = self.raw_POST(
commandArray = ["getMoteLinks"],
fields = {
"macAddress": mac,
"idx": currentidx,
},
manager = manager,
)
if resp["RC"] != 0:
break
# add all "metadata" fields, i.e. every before the list of links
for (k,v) in resp.items():
if ("_" not in k) and (k not in ['numLinks','idx']):
snapshot['getMoteLinks'][macString][k] = v
# populate list of links
for i in range(resp['numLinks']):
thisLink = {}
suffix = '_{0}'.format(i+1)
for (k,v) in resp.items():
if k.endswith(suffix):
name = k[:-len(suffix)]
thisLink[name] = v
snapshot['getMoteLinks'][macString]['links'] += [thisLink]
currentidx += resp['numLinks']
# timestamp_stop
snapshot['timestamp_stop'] = currentUtcTime()
# epoch_stop
snapshot['epoch_stop'] = time.time()
# remember the last snapshot for each manager
with self.dataLock:
self.lastsnapshots[manager] = snapshot
except Exception as err:
notifJson = {
'valid': False,
'err': str(err),
}
else:
notifJson = {
'valid': True,
'snapshot': snapshot,
}
finally:
notifJson['manager'] = manager
if correlationID:
notifJson['correlationID'] = correlationID
self.notifCb(
notifName = 'snapshot',
notifJson = notifJson,
)
except Exception as err:
logCrash(self.name,err)
#======================== public ==========================================
def doSnapshot(self,manager,correlationID):
with self.dataLock:
self.snapshotsTodo += [(manager,correlationID)]
self.snapshotnowSem.release()
def getLastsnapshots(self):
with self.dataLock:
returnVal = copy.deepcopy(self.lastsnapshots)
now = time.time()
for m in returnVal.keys():
returnVal[m]['age_seconds'] = int(now-returnVal[m]['epoch_stop'])
return returnVal
class DeleMgrThread(threading.Thread):
HOUSEKEEPING_PERIOD = 10
def __init__(self,jsonManager):
# store params
self.jsonManager = jsonManager
# start thread
threading.Thread.__init__(self)
self.name = 'DeleMgrThread'
self.daemon = True
self.start()
def run(self):
while True:
for (m,c) in self.jsonManager.status_GET()['managers'].items():
if c=='disconnected':
self.jsonManager.managers_DELETE([m])
time.sleep(self.HOUSEKEEPING_PERIOD)
class JsonManager(object):
OAP_TIMEOUT = 30.000
def __init__(self, autoaddmgr, autodeletemgr, serialport, notifCb, configfilename=None):
# store params
self.autoaddmgr = autoaddmgr
self.autodeletemgr = autodeletemgr
self.serialport = serialport
self.notifCb = notifCb
self.configfilename = configfilename
# local variables
self.startTime = time.time()
self.dataLock = threading.RLock()
self._loadConfig() # populates self.config dictionnary
self.managerHandlers = {}
self.oapDispatch = OAPDispatcher.OAPDispatcher()
self.oapDispatch.register_notif_handler(self._manager_oap_notif_handler)
self.oapClients = {}
self.snapshotThread = SnapshotThread(
self.raw_POST,
self.notifCb,
)
self.outstandingEvents = {}
self.responses = {}
self.hrParser = HrParser.HrParser()
# connect to managers (if any)
self._syncManagers()
# if autoaddmgr, have SerialScanner looks for manager
if self.autoaddmgr:
self.serialScanner = SerialScanner.SerialScanner()
self.serialScanner.availableManagerNotifier(
cb = self._availablemanagers_cb,
period = 60,
)
# if autodeletemgr, start DeleMgrThread
if self.autodeletemgr:
DeleMgrThread(self)
#======================== public ==========================================
#=== status
def status_GET(self):
return {
'SmartMesh SDK version': '.'.join([str(b) for b in sdk_version.VERSION]),
'current time': self._formatTime(),
'running since': '{0} ({1} ago)'.format(
self._formatTime(self.startTime),
datetime.timedelta(seconds=time.time()-self.startTime)
),
'threads running': [t.getName() for t in threading.enumerate()],
'managers': self._formatManagersStatus(),
}
#=== raw
def raw_POST(self, commandArray, fields, manager):
if type(manager)==int:
manager = sorted(self.managerHandlers.keys())[manager]
# mac addresses: '00-01-02-03-04-05-06-07' -> [0,1,2,3,4,5,6,7]
wasDestringified = destringifyMacAddresses(fields)
with self.dataLock:
try:
returnVal = self.managerHandlers[manager].connector.send(
commandArray = commandArray,
fields = fields,
)
except APIError as err:
returnVal = {
'RC': err.rc,
}
if wasDestringified:
# mac addresses: [0,1,2,3,4,5,6,7] -> '00-01-02-03-04-05-06-07'
stringifyMacIpAddresses(returnVal)
return returnVal
#=== oap
# /info
def oap_info_GET(self,mac):
return self._oap_send_and_wait_for_reply(
mac = mac,
method = OAPMessage.CmdType.GET,
resource = 'info',
)
# /main
def oap_main_GET(self,mac):
return self._oap_send_and_wait_for_reply(
mac = mac,
method = OAPMessage.CmdType.GET,
resource = 'main',
)
def oap_main_PUT(self,mac,body):
return self._oap_send_and_wait_for_reply(
mac = mac,
method = OAPMessage.CmdType.PUT,
resource = 'main',
body = body,
)
# /digital_in
def oap_digital_in_GET(self,mac,pin):
return self._oap_send_and_wait_for_reply(
mac = mac,
method = OAPMessage.CmdType.GET,
resource = 'digital_in',
subresource = pin,
)
def oap_digital_in_PUT(self,mac,pin,body):
return self._oap_send_and_wait_for_reply(
mac = mac,
method = OAPMessage.CmdType.PUT,
resource = 'digital_in',
subresource = pin,
body = body,
)
# /digital_out
def oap_digital_out_PUT(self,mac,pin,body):
return self._oap_send_and_wait_for_reply(
mac = mac,
method = OAPMessage.CmdType.PUT,
resource = 'digital_out',
subresource = pin,
body = body,
)
# /analog
def oap_analog_GET(self,mac,pin):
return self._oap_send_and_wait_for_reply(
mac = mac,
method = OAPMessage.CmdType.GET,
resource = 'analog',
subresource = pin,
)
def oap_analog_PUT(self,mac,pin,body):
return self._oap_send_and_wait_for_reply(
mac = mac,
method = OAPMessage.CmdType.PUT,
resource = 'analog',
subresource = pin,
body = body,
)
# /temperature
def oap_temperature_GET(self,mac):
return self._oap_send_and_wait_for_reply(
mac = mac,
method = OAPMessage.CmdType.GET,
resource = 'temperature',
)
def oap_temperature_PUT(self,mac,body):
return self._oap_send_and_wait_for_reply(
mac = mac,
method = OAPMessage.CmdType.PUT,
resource = 'temperature',
body = body,
)
# /pkgen
def oap_pkgen_echo_GET(self,mac):
return self._oap_send_and_wait_for_reply(
mac = mac,
method = OAPMessage.CmdType.GET,
resource = 'pkgen',
subresource = 0,
)
def oap_pkgen_PUT(self,mac,body):
return self._oap_send_and_wait_for_reply(
mac = mac,
method = OAPMessage.CmdType.PUT,
resource = 'pkgen',
body = body,
)
#=== helpers
def serialports_GET(self):
try:
serialports = []
if os.name=='nt':
key = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, 'HARDWARE\\DEVICEMAP\\SERIALCOMM')
for i in range(winreg.QueryInfoKey(key)[1]):
try:
val = winreg.EnumValue(key,i)
except:
pass
else:
if val[0].find('VCP')>-1:
serialports.append(str(val[1]))
elif os.name=='posix':
serialports = glob.glob('/dev/ttyUSB*')
serialports.sort()
return {
'serialports': serialports
}
except Exception as err:
return ['Could not scan for serial port. Error={0}'.format(err)]
def motes_GET(self):
with self.dataLock:
returnVal = {m:self._list_motes_per_manager(m) for m in self.managerHandlers.keys()}
return returnVal
def oapmotes_GET(self):
with self.dataLock:
return {'oapmotes': self.oapClients.keys(),}
def snapshot_POST(self,manager,correlationID=None):
self.snapshotThread.doSnapshot(manager,correlationID)
def snapshot_GET(self):
return self.snapshotThread.getLastsnapshots()
def managers_PUT(self,newmanagers):
with self.dataLock:
for m in newmanagers:
if m not in self.config['managers']:
self.config['managers'] += [m]
self._saveConfig()
self._syncManagers()
def managers_DELETE(self,oldmanagers):
with self.dataLock:
for m in oldmanagers:
try:
self.config['managers'].remove(m)
except ValueError:
pass # happens when manager doesn't exist
self._saveConfig()
self._syncManagers()
#=== config
def config_GET(self):
with self.dataLock:
return copy.deepcopy(self.config)
def config_POST(self,newconfig):
with self.dataLock:
self.config = self._recursive_dict_update(
self.config,
newconfig
)
self._saveConfig()
self._syncManagers()
with self.dataLock:
return copy.deepcopy(self.config)
#=== close
def close(self):
for (k,v) in self.managerHandlers.items():
try:
v.close()
except:
pass
#======================== private =========================================
#=== api
# status
def _formatManagersStatus(self):
returnVal = {}
with self.dataLock:
for (k,v) in self.managerHandlers.items():
if v.isConnected():
returnVal[k] = 'connected'
else:
returnVal[k] = 'disconnected'
return returnVal
# oap
def _oap_send_and_wait_for_reply(self, mac, method, resource, subresource=None, body={}):
# use only lowercase
mac = mac.lower()
# add an oapClient, if needed
self._oap_add_client_if_needed(mac)
# create data_tags
data_tags = []
if method==OAPMessage.CmdType.PUT:
for (i,(n,t,d)) in enumerate(oapdefs.FIELDS[resource]):
if d.count('w'):
if t=='INT8U':
data_tags += [OAPMessage.TLVByte( t=i,v=body[n])]
elif t=='INT16U':
data_tags += [OAPMessage.TLVShort( t=i,v=body[n])]
elif t=='INT32U':
data_tags += [OAPMessage.TLVLong( t=i,v=body[n])]
elif t=='INT8U[16]':
temp = body[n]
temp = ''.join([chr(int(b,16)) for b in [temp[2*j:2*j+2] for j in range(len(temp)/2)]])
data_tags += [OAPMessage.TLVString( t=i,v=temp)]
# send OAP request
addr = [b for b in oapdefs.ADDRESS[resource]]
if subresource!=None:
addr += [subresource]
self.oapClients[mac].send(
cmd_type = method,
addr = addr,
data_tags = data_tags,
cb = self._oap_handle_response,
)
# wait for and handle OAP response
with self.dataLock:
if mac in self.outstandingEvents:
raise SystemError('busy waiting for response')
event = threading.Event()
self.outstandingEvents[mac] = event
if event.wait(self.OAP_TIMEOUT):
# received response
with self.dataLock:
response = self.responses[mac]
del self.responses[mac]
return self._oap_format_response(resource,response)
else:
# timeout
with self.dataLock:
del self.outstandingEvents[mac]
bottle.response.status = 504 # Gateway Timeout
bottle.response.content_type = 'application/json'
return json.dumps({
'body': 'timeout!',
})
def _oap_handle_response(self, mac, oap_resp):
macString = u.formatMacString(mac)
with self.dataLock:
if macString in self.responses:
raise SystemError('response unread')
if macString in self.outstandingEvents:
self.responses[macString] = oap_resp
self.outstandingEvents[macString].set()
del self.outstandingEvents[macString]
else:
# received a response I'm not waiting for anymore (increase timeout?)'
pass
def _oap_format_response(self,resource,response):
'''
{
'command': 1,
'result': 0,
'tags': [
(255, 1, array('B', [0])),
(0, 1, array('B', [1])),
(1, 1, array('B', [3])),
(2, 1, array('B', [0])),
(3, 2, array('B', [0, 24])),
(4, 2, array('B', [0, 1])),
(5, 4, array('B', [0, 0, 0, 0])),
(6, 4, array('B', [0, 0, 0, 0]))
]
}
{
'resource': [0],
'method': 1,
'status': 0,
'fields': {
0: [1],
1: [3],
2: [0],
3: [0, 24],
4: [0, 1],
5: [0, 0, 0, 0],
6: [0, 0, 0, 0]
}
}
'''
tags = {k:v.tolist() for (k,_,v) in response['tags']}
returnVal = {}
# status
returnVal['status'] = reversedict(oapdefs.RC)[response['result']]
# resource
returnVal['resource'] = reversedict(oapdefs.ADDRESS)[tuple(tags[255])]
del tags[255]
# method
returnVal['method'] = reversedict(oapdefs.COMMAND)[response['command']]
# fields
desc = oapdefs.FIELDS[resource]
fields = {}
for (k,v) in tags.items():
(n,t,d) = desc[k]
if t=='INT8U[16]':
fields[n] = ''.join(["%.2x"%i for i in v])
elif v==[]:
fields[n] = v
else:
fields[n] = int(''.join([chr(b) for b in v]).encode('hex'), 16)
returnVal['fields'] = fields
return returnVal
def _oap_add_client_if_needed(self,mac):
if type(mac)==str:
macString = mac
mac = [int(b,16) for b in mac.split('-')]
else:
macString = u.formatMacString(mac)
with self.dataLock:
if macString not in self.oapClients:
# get MACs per manager
for (manager,motes) in self.motes_GET().items():
if macString in manager:
break
# create OAPClient
self.oapClients[macString] = OAPClient.OAPClient(
mac,
self.managerHandlers[manager].connector.dn_sendData,
self.oapDispatch,
)
# helpers
def _list_motes_per_manager(self,manager):
returnVal = []
try:
currentMac = (0,0,0,0,0,0,0,0) # start getMoteConfig() iteration with the 0 MAC address
continueAsking = True
while continueAsking:
try:
with self.dataLock:
res = self.managerHandlers[manager].connector.dn_getMoteConfig(currentMac,True)
except APIError:
continueAsking = False
else:
if ((not res.isAP) and (res.state in [4,])):
returnVal.append(u.formatMacString(res.macAddress))
currentMac = res.macAddress
except ConnectionError as err:
pass # happens when manager is disconnected
return returnVal
# manager management
def _syncManagers(self):
with self.dataLock:
# add
for m in self.config['managers']:
if m not in self.managerHandlers:
self.managerHandlers[m] = ManagerHandler(m,self._manager_raw_notif_handler)
# remove
for m in self.managerHandlers.keys():
if m not in self.config['managers']:
self.managerHandlers[m].close()
del self.managerHandlers[m]
def _availablemanagers_cb(self,serialport):
self.managers_PUT([serialport])
# config
def _loadConfig(self):
with self.dataLock:
try:
if self.configfilename:
self.config = pickle.load(open(self.configfilename,"rb"))
else:
raise IOError
except IOError as err:
if self.serialport:
managers = [self.serialport]
else:
managers = []
self.config = {
'managers': managers,
'notification_urls': {
'event': [
'http://127.0.0.1:1880/event',
'http://127.0.0.1:8081/event',
],
'notifLog': [
'http://127.0.0.1:1880/notifLog',
],
'notifData': [
'http://127.0.0.1:1880/notifData',
],
'notifIpData': [
'http://127.0.0.1:1880/notifIpData',
],
'notifHealthReport':[
'http://127.0.0.1:1880/notifHealthReport',
],
'oap': [
'http://127.0.0.1:1880/oap',
],
'hr': [
'http://127.0.0.1:1880/hr',
],
'snapshot': [
'http://127.0.0.1:1880/snapshot',
'http://127.0.0.1:8081/snapshot',
],
}
}
self._saveConfig()
def _saveConfig(self):
if self.configfilename:
with self.dataLock:
pickle.dump(self.config, open(self.configfilename,"wb"))
#=== notifications
def _manager_raw_notif_handler(self,manager,notifName,notif):
# parse further
if notifName==IpMgrSubscribe.IpMgrSubscribe.NOTIFDATA:
# try to parse data notifications as OAP (fails if not OAP payload, no problem)
self.oapDispatch.dispatch_pkt(notifName,notif)
elif notifName==IpMgrSubscribe.IpMgrSubscribe.NOTIFHEALTHREPORT:
hr = self.hrParser.parseHr(notif.payload)
# POST HR to some URL
self.notifCb(
notifName = 'hr',
notifJson = {
'name': 'hr',
'mac': u.formatMacString(notif.macAddress),
'hr': hr,
},
)
# POST raw notification to some URL
if notifName.startswith('event'):
nm = 'event'
else:
nm = notifName
fields = stringifyMacIpAddresses(notif._asdict())
self.notifCb(
notifName = nm,
notifJson = {
'manager': manager,
'name': notifName,
'fields': fields,
},
)
def _manager_oap_notif_handler(self,mac,notif):
macString = u.formatMacString(mac)
# add an oapClient, if needed
self._oap_add_client_if_needed(mac)
# POST OAP notification to some URLs
fields = stringifyMacIpAddresses(notif._asdict())
self.notifCb(
notifName = 'oap',
notifJson = {
'name': 'oap',
'mac': macString,
'fields': fields,
},
)
#=== formatting
def _formatTime(self,ts=None):
return time.strftime("%m/%d/%Y %H:%M:%S",time.localtime(ts))
#=== helpers
def _recursive_dict_update(self,d,u):
for k, v in u.items():
if isinstance(v, collections.Mapping):
r = self._recursive_dict_update(d.get(k, {}), v)
d[k] = r
else:
d[k] = u[k]
return d
| {
"content_hash": "f2cc197dc781d5cf12da34471be283a9",
"timestamp": "",
"source": "github",
"line_count": 1063,
"max_line_length": 111,
"avg_line_length": 35.12605832549389,
"alnum_prop": 0.4304614478159565,
"repo_name": "realms-team/solmanager",
"id": "a6ec75398ca688b85753b2fcfece7df7508bb32e",
"size": "37520",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "libs/smartmeshsdk-REL-1.3.0.1/libs/SmartMeshSDK/utils/JsonManager.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "3408"
},
{
"name": "CSS",
"bytes": "1148"
},
{
"name": "HTML",
"bytes": "1568"
},
{
"name": "JavaScript",
"bytes": "1430296"
},
{
"name": "Makefile",
"bytes": "8195"
},
{
"name": "Python",
"bytes": "3428922"
},
{
"name": "Smarty",
"bytes": "5800"
}
],
"symlink_target": ""
} |
from django.db import models
from paigow.models import PGGame
from pgtile import PGTile
class PGDeal( models.Model ):
# make sure the DB table name is what we want
class Meta:
app_label = 'paigow'
# This 32-character hex string defines the deal
deck = models.CharField( max_length = 32 )
# it's always part of some game, and some deal number
game = models.ForeignKey( PGGame )
deal_number = models.PositiveSmallIntegerField()
# The deal shows as the ordering
def __unicode__( self ):
return "Deal " + str( self.deal_number ) + "in game " + str( self.game )
# Create it with an array of tiles and the game/deal#
@classmethod
def create( cls, tiles, game, deal_number ):
from pgtile import PGTile
deck_vals = ""
for tile in tiles:
deck_vals += tile.tile_char
return cls( deck = deck_vals, game = game, deal_number = deal_number )
# return the tile for any given offset
def tile( self, offset ):
# sanity check
if ( offset < 0 or offset > 31 ):
return None
# return the appropriate number
return PGTile.with_char( self.deck[offset] )
# ----------------------------------------------------
# Test PGDeal class
from django.test import TestCase
class PGDealTest( TestCase ):
fixtures = [ 'pgtile.json' ]
def test_basic( self ):
from pggame import PGGame
tiles = PGTile.objects.all()
game = PGGame.create( "Test for PGDeal" )
deal = PGDeal.create( tiles, game, 0 )
self.assertIsNone( deal.tile( -1 ) )
for i in range(32):
self.assertIsNotNone( deal.tile( i ) )
self.assertIsNone( deal.tile( 32 ) )
def test_save_get( self ):
from pggame import PGGame
from pgtile import PGTile
game = PGGame.create( 'test for deal' )
game.save()
tiles = PGTile.objects.all()
deal = PGDeal.create( tiles, game, 1 )
deal.save()
deal = PGDeal.objects.filter( game = game, deal_number = 1 )
self.assertIsNotNone( deal )
def test_get_tiles( self ):
from pggame import PGGame
from pgtile import PGTile
game = PGGame.create( 'test for deal' )
game.save()
tiles = PGTile.objects.all()
deal = PGDeal.create( tiles, game, 1 )
deal.save()
deals = PGDeal.objects.filter( game = game, deal_number = 1 )
self.assertEqual( len(deals), 1 )
for i in range(32):
self.assertIsNotNone( deals[0].tile(i) )
# run the test when invoked as a test (this is boilerplate
# code at the bottom of every python file that has unit
# tests in it).
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "7064e3eb12fa8badc79ab087634180ba",
"timestamp": "",
"source": "github",
"line_count": 99,
"max_line_length": 76,
"avg_line_length": 26.454545454545453,
"alnum_prop": 0.6269568537609774,
"repo_name": "rudisherry666/paigow",
"id": "a677642b2e5bab50ced408d855218890e68c6704",
"size": "3467",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "paigow/models/pgdeal.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "116905"
},
{
"name": "Perl",
"bytes": "10172"
},
{
"name": "Python",
"bytes": "133508"
}
],
"symlink_target": ""
} |
"""Contains main loop for the AI"""
from board_functions import pp
from board_functions import make_board
from board_functions import num_empty
from board_functions import rand_tile
from board_functions import move
from brain import get_dir
BOARD = make_board()
#Main loop
while True:
pp(BOARD)
EMPTY = num_empty(BOARD)
MAX_DEPTH = 1
if EMPTY > 3:
MAX_DEPTH = 1
elif EMPTY > 1:
MAX_DEPTH = 2
if not dir:
print "Game Over"
break
BOARD = rand_tile(move(BOARD, get_dir(BOARD, MAX_DEPTH)).shift())
print '\n'
| {
"content_hash": "ca5813ec1e0cbb7769b1bde25ac84b6d",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 69,
"avg_line_length": 17.575757575757574,
"alnum_prop": 0.646551724137931,
"repo_name": "DavidGrey/2048_Python-AI",
"id": "f034028c266842c32caba8b896860e923bbf1617",
"size": "580",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "2048_snake-AI/run.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "8607"
}
],
"symlink_target": ""
} |
import hycohanz as hfss
raw_input('Press "Enter" to connect to HFSS.>')
[oAnsoftApp, oDesktop] = hfss.setup_interface()
raw_input('Press "Enter" to create a new project.>')
oProject = hfss.new_project(oDesktop)
raw_input('Press "Enter" to insert a new DrivenModal design named HFSSDesign1.>')
oDesign = hfss.insert_design(oProject, "HFSSDesign1", "DrivenModal")
raw_input('Press "Enter" to set the active editor to "3D Modeler" (The default and only known correct value).>')
oEditor = hfss.set_active_editor(oDesign)
raw_input('Press "Enter" to insert some circle properties into the design.>')
hfss.add_property(oDesign, "xcenter", hfss.Expression("1m"))
hfss.add_property(oDesign, "ycenter", hfss.Expression("2m"))
hfss.add_property(oDesign, "zcenter", hfss.Expression("3m"))
hfss.add_property(oDesign, "diam", hfss.Expression("1m"))
raw_input('Press "Enter" to draw a circle using the properties.>')
hfss.create_circle(oEditor, hfss.Expression("xcenter"),
hfss.Expression("ycenter"),
hfss.Expression("zcenter"),
hfss.Expression("diameter")/2)
raw_input('Press "Enter" to quit HFSS.>')
hfss.quit_application(oDesktop)
del oEditor
del oDesign
del oProject
del oDesktop
del oAnsoftApp
| {
"content_hash": "1decab2b9e682a83c397de6fb34000b7",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 112,
"avg_line_length": 30.853658536585368,
"alnum_prop": 0.7051383399209487,
"repo_name": "Dr-Drive/hycohanz",
"id": "c5a13d6fd1c76a8fe3a1992b8225bc609be92630",
"size": "1265",
"binary": false,
"copies": "1",
"ref": "refs/heads/devel",
"path": "examples/create_circle.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "194707"
}
],
"symlink_target": ""
} |
"""Tests for tensorflow.ops.image_ops."""
import colorsys
import contextlib
import functools
import itertools
import math
import os
import time
from absl.testing import parameterized
import numpy as np
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import session
from tensorflow.python.compat import compat
from tensorflow.python.data.experimental.ops import get_single_element
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.eager import backprop
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.framework import config as tf_config
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_spec
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gen_image_ops
from tensorflow.python.ops import image_ops
from tensorflow.python.ops import image_ops_impl
from tensorflow.python.ops import io_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import stateless_random_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import googletest
from tensorflow.python.platform import test
class RGBToHSVTest(test_util.TensorFlowTestCase):
def testBatch(self):
# Build an arbitrary RGB image
np.random.seed(7)
batch_size = 5
shape = (batch_size, 2, 7, 3)
for nptype in [np.float32, np.float64]:
inp = np.random.rand(*shape).astype(nptype)
# Convert to HSV and back, as a batch and individually
with self.cached_session():
batch0 = constant_op.constant(inp)
batch1 = image_ops.rgb_to_hsv(batch0)
batch2 = image_ops.hsv_to_rgb(batch1)
split0 = array_ops.unstack(batch0)
split1 = list(map(image_ops.rgb_to_hsv, split0))
split2 = list(map(image_ops.hsv_to_rgb, split1))
join1 = array_ops.stack(split1)
join2 = array_ops.stack(split2)
batch1, batch2, join1, join2 = self.evaluate(
[batch1, batch2, join1, join2])
# Verify that processing batch elements together is the same as separate
self.assertAllClose(batch1, join1)
self.assertAllClose(batch2, join2)
self.assertAllClose(batch2, inp)
def testRGBToHSVRoundTrip(self):
data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
for nptype in [np.float32, np.float64]:
rgb_np = np.array(data, dtype=nptype).reshape([2, 2, 3]) / 255.
with self.cached_session():
hsv = image_ops.rgb_to_hsv(rgb_np)
rgb = image_ops.hsv_to_rgb(hsv)
rgb_tf = self.evaluate(rgb)
self.assertAllClose(rgb_tf, rgb_np)
def testRGBToHSVDataTypes(self):
# Test case for GitHub issue 54855.
data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
for dtype in [
dtypes.float32, dtypes.float64, dtypes.float16, dtypes.bfloat16
]:
with self.cached_session(use_gpu=False):
rgb = math_ops.cast(
np.array(data, np.float32).reshape([2, 2, 3]) / 255., dtype=dtype)
hsv = image_ops.rgb_to_hsv(rgb)
val = image_ops.hsv_to_rgb(hsv)
out = self.evaluate(val)
self.assertAllClose(rgb, out, atol=1e-2)
class RGBToYIQTest(test_util.TensorFlowTestCase):
@test_util.run_without_tensor_float_32(
"Calls rgb_to_yiq and yiq_to_rgb, which use matmul")
def testBatch(self):
# Build an arbitrary RGB image
np.random.seed(7)
batch_size = 5
shape = (batch_size, 2, 7, 3)
for nptype in [np.float32, np.float64]:
inp = np.random.rand(*shape).astype(nptype)
# Convert to YIQ and back, as a batch and individually
with self.cached_session():
batch0 = constant_op.constant(inp)
batch1 = image_ops.rgb_to_yiq(batch0)
batch2 = image_ops.yiq_to_rgb(batch1)
split0 = array_ops.unstack(batch0)
split1 = list(map(image_ops.rgb_to_yiq, split0))
split2 = list(map(image_ops.yiq_to_rgb, split1))
join1 = array_ops.stack(split1)
join2 = array_ops.stack(split2)
batch1, batch2, join1, join2 = self.evaluate(
[batch1, batch2, join1, join2])
# Verify that processing batch elements together is the same as separate
self.assertAllClose(batch1, join1, rtol=1e-4, atol=1e-4)
self.assertAllClose(batch2, join2, rtol=1e-4, atol=1e-4)
self.assertAllClose(batch2, inp, rtol=1e-4, atol=1e-4)
class RGBToYUVTest(test_util.TensorFlowTestCase):
@test_util.run_without_tensor_float_32(
"Calls rgb_to_yuv and yuv_to_rgb, which use matmul")
def testBatch(self):
# Build an arbitrary RGB image
np.random.seed(7)
batch_size = 5
shape = (batch_size, 2, 7, 3)
for nptype in [np.float32, np.float64]:
inp = np.random.rand(*shape).astype(nptype)
# Convert to YUV and back, as a batch and individually
with self.cached_session():
batch0 = constant_op.constant(inp)
batch1 = image_ops.rgb_to_yuv(batch0)
batch2 = image_ops.yuv_to_rgb(batch1)
split0 = array_ops.unstack(batch0)
split1 = list(map(image_ops.rgb_to_yuv, split0))
split2 = list(map(image_ops.yuv_to_rgb, split1))
join1 = array_ops.stack(split1)
join2 = array_ops.stack(split2)
batch1, batch2, join1, join2 = self.evaluate(
[batch1, batch2, join1, join2])
# Verify that processing batch elements together is the same as separate
self.assertAllClose(batch1, join1, rtol=1e-4, atol=1e-4)
self.assertAllClose(batch2, join2, rtol=1e-4, atol=1e-4)
self.assertAllClose(batch2, inp, rtol=1e-4, atol=1e-4)
class GrayscaleToRGBTest(test_util.TensorFlowTestCase):
def _RGBToGrayscale(self, images):
is_batch = True
if len(images.shape) == 3:
is_batch = False
images = np.expand_dims(images, axis=0)
out_shape = images.shape[0:3] + (1,)
out = np.zeros(shape=out_shape, dtype=np.uint8)
for batch in range(images.shape[0]):
for y in range(images.shape[1]):
for x in range(images.shape[2]):
red = images[batch, y, x, 0]
green = images[batch, y, x, 1]
blue = images[batch, y, x, 2]
gray = 0.2989 * red + 0.5870 * green + 0.1140 * blue
out[batch, y, x, 0] = int(gray)
if not is_batch:
out = np.squeeze(out, axis=0)
return out
def _TestRGBToGrayscale(self, x_np):
y_np = self._RGBToGrayscale(x_np)
with self.cached_session():
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.rgb_to_grayscale(x_tf)
y_tf = self.evaluate(y)
self.assertAllEqual(y_tf, y_np)
def testBasicRGBToGrayscale(self):
# 4-D input with batch dimension.
x_np = np.array(
[[1, 2, 3], [4, 10, 1]], dtype=np.uint8).reshape([1, 1, 2, 3])
self._TestRGBToGrayscale(x_np)
# 3-D input with no batch dimension.
x_np = np.array([[1, 2, 3], [4, 10, 1]], dtype=np.uint8).reshape([1, 2, 3])
self._TestRGBToGrayscale(x_np)
def testBasicGrayscaleToRGB(self):
# 4-D input with batch dimension.
x_np = np.array([[1, 2]], dtype=np.uint8).reshape([1, 1, 2, 1])
y_np = np.array(
[[1, 1, 1], [2, 2, 2]], dtype=np.uint8).reshape([1, 1, 2, 3])
with self.cached_session():
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.grayscale_to_rgb(x_tf)
y_tf = self.evaluate(y)
self.assertAllEqual(y_tf, y_np)
# 3-D input with no batch dimension.
x_np = np.array([[1, 2]], dtype=np.uint8).reshape([1, 2, 1])
y_np = np.array([[1, 1, 1], [2, 2, 2]], dtype=np.uint8).reshape([1, 2, 3])
with self.cached_session():
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.grayscale_to_rgb(x_tf)
y_tf = self.evaluate(y)
self.assertAllEqual(y_tf, y_np)
def testGrayscaleToRGBInputValidation(self):
# tests whether the grayscale_to_rgb function raises
# an exception if the input images' last dimension is
# not of size 1, i.e. the images have shape
# [batch size, height, width] or [height, width]
# tests if an exception is raised if a three dimensional
# input is used, i.e. the images have shape [batch size, height, width]
with self.cached_session():
# 3-D input with batch dimension.
x_np = np.array([[1, 2]], dtype=np.uint8).reshape([1, 1, 2])
x_tf = constant_op.constant(x_np, shape=x_np.shape)
# this is the error message we expect the function to raise
err_msg = "Last dimension of a grayscale image should be size 1"
with self.assertRaisesRegex(ValueError, err_msg):
image_ops.grayscale_to_rgb(x_tf)
# tests if an exception is raised if a two dimensional
# input is used, i.e. the images have shape [height, width]
with self.cached_session():
# 1-D input without batch dimension.
x_np = np.array([[1, 2]], dtype=np.uint8).reshape([2])
x_tf = constant_op.constant(x_np, shape=x_np.shape)
# this is the error message we expect the function to raise
err_msg = "must be at least two-dimensional"
with self.assertRaisesRegex(ValueError, err_msg):
image_ops.grayscale_to_rgb(x_tf)
def testShapeInference(self):
# Shape function requires placeholders and a graph.
with ops.Graph().as_default():
# Shape inference works and produces expected output where possible
rgb_shape = [7, None, 19, 3]
gray_shape = rgb_shape[:-1] + [1]
with self.cached_session():
rgb_tf = array_ops.placeholder(dtypes.uint8, shape=rgb_shape)
gray = image_ops.rgb_to_grayscale(rgb_tf)
self.assertEqual(gray_shape, gray.get_shape().as_list())
with self.cached_session():
gray_tf = array_ops.placeholder(dtypes.uint8, shape=gray_shape)
rgb = image_ops.grayscale_to_rgb(gray_tf)
self.assertEqual(rgb_shape, rgb.get_shape().as_list())
# Shape inference does not break for unknown shapes
with self.cached_session():
rgb_tf_unknown = array_ops.placeholder(dtypes.uint8)
gray_unknown = image_ops.rgb_to_grayscale(rgb_tf_unknown)
self.assertFalse(gray_unknown.get_shape())
with self.cached_session():
gray_tf_unknown = array_ops.placeholder(dtypes.uint8)
rgb_unknown = image_ops.grayscale_to_rgb(gray_tf_unknown)
self.assertFalse(rgb_unknown.get_shape())
class AdjustGamma(test_util.TensorFlowTestCase):
def test_adjust_gamma_less_zero_float32(self):
"""White image should be returned for gamma equal to zero"""
with self.cached_session():
x_data = np.random.uniform(0, 1.0, (8, 8))
x_np = np.array(x_data, dtype=np.float32)
x = constant_op.constant(x_np, shape=x_np.shape)
err_msg = "Gamma should be a non-negative real number"
with self.assertRaisesRegex(
(ValueError, errors.InvalidArgumentError), err_msg):
image_ops.adjust_gamma(x, gamma=-1)
def test_adjust_gamma_less_zero_uint8(self):
"""White image should be returned for gamma equal to zero"""
with self.cached_session():
x_data = np.random.uniform(0, 255, (8, 8))
x_np = np.array(x_data, dtype=np.uint8)
x = constant_op.constant(x_np, shape=x_np.shape)
err_msg = "Gamma should be a non-negative real number"
with self.assertRaisesRegex(
(ValueError, errors.InvalidArgumentError), err_msg):
image_ops.adjust_gamma(x, gamma=-1)
def test_adjust_gamma_less_zero_tensor(self):
"""White image should be returned for gamma equal to zero"""
with self.cached_session():
x_data = np.random.uniform(0, 1.0, (8, 8))
x_np = np.array(x_data, dtype=np.float32)
x = constant_op.constant(x_np, shape=x_np.shape)
y = constant_op.constant(-1.0, dtype=dtypes.float32)
err_msg = "Gamma should be a non-negative real number"
with self.assertRaisesRegex(
(ValueError, errors.InvalidArgumentError), err_msg):
image = image_ops.adjust_gamma(x, gamma=y)
self.evaluate(image)
def _test_adjust_gamma_uint8(self, gamma):
"""Verifying the output with expected results for gamma
correction for uint8 images
"""
with self.cached_session():
x_np = np.random.uniform(0, 255, (8, 8)).astype(np.uint8)
x = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.adjust_gamma(x, gamma=gamma)
y_tf = np.trunc(self.evaluate(y))
# calculate gamma correction using numpy
# firstly, transform uint8 to float representation
# then perform correction
y_np = np.power(x_np / 255.0, gamma)
# convert correct numpy image back to uint8 type
y_np = np.trunc(np.clip(y_np * 255.5, 0, 255.0))
self.assertAllClose(y_tf, y_np, 1e-6)
def _test_adjust_gamma_float32(self, gamma):
"""Verifying the output with expected results for gamma
correction for float32 images
"""
with self.cached_session():
x_np = np.random.uniform(0, 1.0, (8, 8))
x = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.adjust_gamma(x, gamma=gamma)
y_tf = self.evaluate(y)
y_np = np.clip(np.power(x_np, gamma), 0, 1.0)
self.assertAllClose(y_tf, y_np, 1e-6)
def test_adjust_gamma_one_float32(self):
"""Same image should be returned for gamma equal to one"""
self._test_adjust_gamma_float32(1.0)
def test_adjust_gamma_one_uint8(self):
self._test_adjust_gamma_uint8(1.0)
def test_adjust_gamma_zero_uint8(self):
"""White image should be returned for gamma equal
to zero for uint8 images
"""
self._test_adjust_gamma_uint8(gamma=0.0)
def test_adjust_gamma_less_one_uint8(self):
"""Verifying the output with expected results for gamma
correction with gamma equal to half for uint8 images
"""
self._test_adjust_gamma_uint8(gamma=0.5)
def test_adjust_gamma_greater_one_uint8(self):
"""Verifying the output with expected results for gamma
correction for uint8 images
"""
self._test_adjust_gamma_uint8(gamma=1.0)
def test_adjust_gamma_less_one_float32(self):
"""Verifying the output with expected results for gamma
correction with gamma equal to half for float32 images
"""
self._test_adjust_gamma_float32(0.5)
def test_adjust_gamma_greater_one_float32(self):
"""Verifying the output with expected results for gamma
correction with gamma equal to two for float32 images
"""
self._test_adjust_gamma_float32(1.0)
def test_adjust_gamma_zero_float32(self):
"""White image should be returned for gamma equal
to zero for float32 images
"""
self._test_adjust_gamma_float32(0.0)
class AdjustHueTest(test_util.TensorFlowTestCase):
def testAdjustNegativeHue(self):
x_shape = [2, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
delta = -0.25
y_data = [0, 13, 1, 54, 226, 59, 8, 234, 150, 255, 39, 1]
y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape)
with self.cached_session():
x = constant_op.constant(x_np, shape=x_shape)
y = image_ops.adjust_hue(x, delta)
y_tf = self.evaluate(y)
self.assertAllEqual(y_tf, y_np)
def testAdjustPositiveHue(self):
x_shape = [2, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
delta = 0.25
y_data = [13, 0, 11, 226, 54, 221, 234, 8, 92, 1, 217, 255]
y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape)
with self.cached_session():
x = constant_op.constant(x_np, shape=x_shape)
y = image_ops.adjust_hue(x, delta)
y_tf = self.evaluate(y)
self.assertAllEqual(y_tf, y_np)
def testBatchAdjustHue(self):
x_shape = [2, 1, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
delta = 0.25
y_data = [13, 0, 11, 226, 54, 221, 234, 8, 92, 1, 217, 255]
y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape)
with self.cached_session():
x = constant_op.constant(x_np, shape=x_shape)
y = image_ops.adjust_hue(x, delta)
y_tf = self.evaluate(y)
self.assertAllEqual(y_tf, y_np)
def _adjustHueNp(self, x_np, delta_h):
self.assertEqual(x_np.shape[-1], 3)
x_v = x_np.reshape([-1, 3])
y_v = np.ndarray(x_v.shape, dtype=x_v.dtype)
channel_count = x_v.shape[0]
for i in range(channel_count):
r = x_v[i][0]
g = x_v[i][1]
b = x_v[i][2]
h, s, v = colorsys.rgb_to_hsv(r, g, b)
h += delta_h
h = math.fmod(h + 10.0, 1.0)
r, g, b = colorsys.hsv_to_rgb(h, s, v)
y_v[i][0] = r
y_v[i][1] = g
y_v[i][2] = b
return y_v.reshape(x_np.shape)
def _adjustHueTf(self, x_np, delta_h):
with self.cached_session():
x = constant_op.constant(x_np)
y = image_ops.adjust_hue(x, delta_h)
y_tf = self.evaluate(y)
return y_tf
def testAdjustRandomHue(self):
x_shapes = [
[2, 2, 3],
[4, 2, 3],
[2, 4, 3],
[2, 5, 3],
[1000, 1, 3],
]
test_styles = [
"all_random",
"rg_same",
"rb_same",
"gb_same",
"rgb_same",
]
for x_shape in x_shapes:
for test_style in test_styles:
x_np = np.random.rand(*x_shape) * 255.
delta_h = np.random.rand() * 2.0 - 1.0
if test_style == "all_random":
pass
elif test_style == "rg_same":
x_np[..., 1] = x_np[..., 0]
elif test_style == "rb_same":
x_np[..., 2] = x_np[..., 0]
elif test_style == "gb_same":
x_np[..., 2] = x_np[..., 1]
elif test_style == "rgb_same":
x_np[..., 1] = x_np[..., 0]
x_np[..., 2] = x_np[..., 0]
else:
raise AssertionError("Invalid test style: %s" % (test_style))
y_np = self._adjustHueNp(x_np, delta_h)
y_tf = self._adjustHueTf(x_np, delta_h)
self.assertAllClose(y_tf, y_np, rtol=2e-5, atol=1e-5)
def testInvalidShapes(self):
fused = False
if not fused:
# The tests are known to pass with the fused adjust_hue. We will enable
# them when the fused implementation is the default.
return
x_np = np.random.rand(2, 3) * 255.
delta_h = np.random.rand() * 2.0 - 1.0
fused = False
with self.assertRaisesRegex(ValueError, "Shape must be at least rank 3"):
self._adjustHueTf(x_np, delta_h)
x_np = np.random.rand(4, 2, 4) * 255.
delta_h = np.random.rand() * 2.0 - 1.0
with self.assertRaisesOpError("input must have 3 channels"):
self._adjustHueTf(x_np, delta_h)
def testInvalidDeltaValue(self):
"""Delta value must be in the inetrval of [-1,1]."""
if not context.executing_eagerly():
self.skipTest("Eager mode only")
else:
with self.cached_session():
x_shape = [2, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
x = constant_op.constant(x_np, shape=x_np.shape)
err_msg = r"delta must be in the interval \[-1, 1\]"
with self.assertRaisesRegex(
(ValueError, errors.InvalidArgumentError), err_msg):
image_ops.adjust_hue(x, delta=1.5)
class FlipImageBenchmark(test.Benchmark):
def _benchmarkFlipLeftRight(self, device, cpu_count):
image_shape = [299, 299, 3]
warmup_rounds = 100
benchmark_rounds = 1000
config = config_pb2.ConfigProto()
if cpu_count is not None:
config.inter_op_parallelism_threads = 1
config.intra_op_parallelism_threads = cpu_count
with session.Session("", graph=ops.Graph(), config=config) as sess:
with ops.device(device):
inputs = variables.Variable(
random_ops.random_uniform(image_shape, dtype=dtypes.float32) * 255,
trainable=False,
dtype=dtypes.float32)
run_op = image_ops.flip_left_right(inputs)
self.evaluate(variables.global_variables_initializer())
for i in range(warmup_rounds + benchmark_rounds):
if i == warmup_rounds:
start = time.time()
self.evaluate(run_op)
end = time.time()
step_time = (end - start) / benchmark_rounds
tag = device + "_%s" % (cpu_count if cpu_count is not None else "_all")
print("benchmarkFlipLeftRight_299_299_3_%s step_time: %.2f us" %
(tag, step_time * 1e6))
self.report_benchmark(
name="benchmarkFlipLeftRight_299_299_3_%s" % (tag),
iters=benchmark_rounds,
wall_time=step_time)
def _benchmarkRandomFlipLeftRight(self, device, cpu_count):
image_shape = [299, 299, 3]
warmup_rounds = 100
benchmark_rounds = 1000
config = config_pb2.ConfigProto()
if cpu_count is not None:
config.inter_op_parallelism_threads = 1
config.intra_op_parallelism_threads = cpu_count
with session.Session("", graph=ops.Graph(), config=config) as sess:
with ops.device(device):
inputs = variables.Variable(
random_ops.random_uniform(image_shape, dtype=dtypes.float32) * 255,
trainable=False,
dtype=dtypes.float32)
run_op = image_ops.random_flip_left_right(inputs)
self.evaluate(variables.global_variables_initializer())
for i in range(warmup_rounds + benchmark_rounds):
if i == warmup_rounds:
start = time.time()
self.evaluate(run_op)
end = time.time()
step_time = (end - start) / benchmark_rounds
tag = device + "_%s" % (cpu_count if cpu_count is not None else "_all")
print("benchmarkRandomFlipLeftRight_299_299_3_%s step_time: %.2f us" %
(tag, step_time * 1e6))
self.report_benchmark(
name="benchmarkRandomFlipLeftRight_299_299_3_%s" % (tag),
iters=benchmark_rounds,
wall_time=step_time)
def _benchmarkBatchedRandomFlipLeftRight(self, device, cpu_count):
image_shape = [16, 299, 299, 3]
warmup_rounds = 100
benchmark_rounds = 1000
config = config_pb2.ConfigProto()
if cpu_count is not None:
config.inter_op_parallelism_threads = 1
config.intra_op_parallelism_threads = cpu_count
with session.Session("", graph=ops.Graph(), config=config) as sess:
with ops.device(device):
inputs = variables.Variable(
random_ops.random_uniform(image_shape, dtype=dtypes.float32) * 255,
trainable=False,
dtype=dtypes.float32)
run_op = image_ops.random_flip_left_right(inputs)
self.evaluate(variables.global_variables_initializer())
for i in range(warmup_rounds + benchmark_rounds):
if i == warmup_rounds:
start = time.time()
self.evaluate(run_op)
end = time.time()
step_time = (end - start) / benchmark_rounds
tag = device + "_%s" % (cpu_count if cpu_count is not None else "_all")
print("benchmarkBatchedRandomFlipLeftRight_16_299_299_3_%s step_time: "
"%.2f us" %
(tag, step_time * 1e6))
self.report_benchmark(
name="benchmarkBatchedRandomFlipLeftRight_16_299_299_3_%s" % (tag),
iters=benchmark_rounds,
wall_time=step_time)
def benchmarkFlipLeftRightCpu1(self):
self._benchmarkFlipLeftRight("/cpu:0", 1)
def benchmarkFlipLeftRightCpuAll(self):
self._benchmarkFlipLeftRight("/cpu:0", None)
def benchmarkFlipLeftRightGpu(self):
self._benchmarkFlipLeftRight(test.gpu_device_name(), None)
def benchmarkRandomFlipLeftRightCpu1(self):
self._benchmarkRandomFlipLeftRight("/cpu:0", 1)
def benchmarkRandomFlipLeftRightCpuAll(self):
self._benchmarkRandomFlipLeftRight("/cpu:0", None)
def benchmarkRandomFlipLeftRightGpu(self):
self._benchmarkRandomFlipLeftRight(test.gpu_device_name(), None)
def benchmarkBatchedRandomFlipLeftRightCpu1(self):
self._benchmarkBatchedRandomFlipLeftRight("/cpu:0", 1)
def benchmarkBatchedRandomFlipLeftRightCpuAll(self):
self._benchmarkBatchedRandomFlipLeftRight("/cpu:0", None)
def benchmarkBatchedRandomFlipLeftRightGpu(self):
self._benchmarkBatchedRandomFlipLeftRight(test.gpu_device_name(), None)
class AdjustHueBenchmark(test.Benchmark):
def _benchmarkAdjustHue(self, device, cpu_count):
image_shape = [299, 299, 3]
warmup_rounds = 100
benchmark_rounds = 1000
config = config_pb2.ConfigProto()
if cpu_count is not None:
config.inter_op_parallelism_threads = 1
config.intra_op_parallelism_threads = cpu_count
with self.benchmark_session(config=config, device=device) as sess:
inputs = variables.Variable(
random_ops.random_uniform(image_shape, dtype=dtypes.float32) * 255,
trainable=False,
dtype=dtypes.float32)
delta = constant_op.constant(0.1, dtype=dtypes.float32)
outputs = image_ops.adjust_hue(inputs, delta)
run_op = control_flow_ops.group(outputs)
self.evaluate(variables.global_variables_initializer())
for i in range(warmup_rounds + benchmark_rounds):
if i == warmup_rounds:
start = time.time()
self.evaluate(run_op)
end = time.time()
step_time = (end - start) / benchmark_rounds
tag = device + "_%s" % (cpu_count if cpu_count is not None else "_all")
print("benchmarkAdjustHue_299_299_3_%s step_time: %.2f us" %
(tag, step_time * 1e6))
self.report_benchmark(
name="benchmarkAdjustHue_299_299_3_%s" % (tag),
iters=benchmark_rounds,
wall_time=step_time)
def benchmarkAdjustHueCpu1(self):
self._benchmarkAdjustHue("/cpu:0", 1)
def benchmarkAdjustHueCpuAll(self):
self._benchmarkAdjustHue("/cpu:0", None)
def benchmarkAdjustHueGpu(self):
self._benchmarkAdjustHue(test.gpu_device_name(), None)
class AdjustSaturationBenchmark(test.Benchmark):
def _benchmarkAdjustSaturation(self, device, cpu_count):
image_shape = [299, 299, 3]
warmup_rounds = 100
benchmark_rounds = 1000
config = config_pb2.ConfigProto()
if cpu_count is not None:
config.inter_op_parallelism_threads = 1
config.intra_op_parallelism_threads = cpu_count
with self.benchmark_session(config=config, device=device) as sess:
inputs = variables.Variable(
random_ops.random_uniform(image_shape, dtype=dtypes.float32) * 255,
trainable=False,
dtype=dtypes.float32)
delta = constant_op.constant(0.1, dtype=dtypes.float32)
outputs = image_ops.adjust_saturation(inputs, delta)
run_op = control_flow_ops.group(outputs)
self.evaluate(variables.global_variables_initializer())
for _ in range(warmup_rounds):
self.evaluate(run_op)
start = time.time()
for _ in range(benchmark_rounds):
self.evaluate(run_op)
end = time.time()
step_time = (end - start) / benchmark_rounds
tag = device + "_%s" % (cpu_count if cpu_count is not None else "_all")
print("benchmarkAdjustSaturation_299_299_3_%s step_time: %.2f us" %
(tag, step_time * 1e6))
self.report_benchmark(
name="benchmarkAdjustSaturation_299_299_3_%s" % (tag),
iters=benchmark_rounds,
wall_time=step_time)
def benchmarkAdjustSaturationCpu1(self):
self._benchmarkAdjustSaturation("/cpu:0", 1)
def benchmarkAdjustSaturationCpuAll(self):
self._benchmarkAdjustSaturation("/cpu:0", None)
def benchmarkAdjustSaturationGpu(self):
self._benchmarkAdjustSaturation(test.gpu_device_name(), None)
class ResizeBilinearBenchmark(test.Benchmark):
def _benchmarkResize(self, image_size, num_channels):
batch_size = 1
num_ops = 1000
img = variables.Variable(
random_ops.random_normal(
[batch_size, image_size[0], image_size[1], num_channels]),
name="img")
deps = []
for _ in range(num_ops):
with ops.control_dependencies(deps):
resize_op = image_ops.resize_bilinear(
img, [299, 299], align_corners=False)
deps = [resize_op]
benchmark_op = control_flow_ops.group(*deps)
with self.benchmark_session() as sess:
self.evaluate(variables.global_variables_initializer())
results = self.run_op_benchmark(
sess,
benchmark_op,
name=("resize_bilinear_%s_%s_%s" % (image_size[0], image_size[1],
num_channels)))
print("%s : %.2f ms/img" %
(results["name"],
1000 * results["wall_time"] / (batch_size * num_ops)))
def benchmarkSimilar3Channel(self):
self._benchmarkResize((183, 229), 3)
def benchmarkScaleUp3Channel(self):
self._benchmarkResize((141, 186), 3)
def benchmarkScaleDown3Channel(self):
self._benchmarkResize((749, 603), 3)
def benchmarkSimilar1Channel(self):
self._benchmarkResize((183, 229), 1)
def benchmarkScaleUp1Channel(self):
self._benchmarkResize((141, 186), 1)
def benchmarkScaleDown1Channel(self):
self._benchmarkResize((749, 603), 1)
class ResizeBicubicBenchmark(test.Benchmark):
def _benchmarkResize(self, image_size, num_channels):
batch_size = 1
num_ops = 1000
img = variables.Variable(
random_ops.random_normal(
[batch_size, image_size[0], image_size[1], num_channels]),
name="img")
deps = []
for _ in range(num_ops):
with ops.control_dependencies(deps):
resize_op = image_ops.resize_bicubic(
img, [299, 299], align_corners=False)
deps = [resize_op]
benchmark_op = control_flow_ops.group(*deps)
with self.benchmark_session() as sess:
self.evaluate(variables.global_variables_initializer())
results = self.run_op_benchmark(
sess,
benchmark_op,
min_iters=20,
name=("resize_bicubic_%s_%s_%s" % (image_size[0], image_size[1],
num_channels)))
print("%s : %.2f ms/img" %
(results["name"],
1000 * results["wall_time"] / (batch_size * num_ops)))
def benchmarkSimilar3Channel(self):
self._benchmarkResize((183, 229), 3)
def benchmarkScaleUp3Channel(self):
self._benchmarkResize((141, 186), 3)
def benchmarkScaleDown3Channel(self):
self._benchmarkResize((749, 603), 3)
def benchmarkSimilar1Channel(self):
self._benchmarkResize((183, 229), 1)
def benchmarkScaleUp1Channel(self):
self._benchmarkResize((141, 186), 1)
def benchmarkScaleDown1Channel(self):
self._benchmarkResize((749, 603), 1)
def benchmarkSimilar4Channel(self):
self._benchmarkResize((183, 229), 4)
def benchmarkScaleUp4Channel(self):
self._benchmarkResize((141, 186), 4)
def benchmarkScaleDown4Channel(self):
self._benchmarkResize((749, 603), 4)
class ResizeAreaBenchmark(test.Benchmark):
def _benchmarkResize(self, image_size, num_channels):
batch_size = 1
num_ops = 1000
img = variables.Variable(
random_ops.random_normal(
[batch_size, image_size[0], image_size[1], num_channels]),
name="img")
deps = []
for _ in range(num_ops):
with ops.control_dependencies(deps):
resize_op = image_ops.resize_area(img, [299, 299], align_corners=False)
deps = [resize_op]
benchmark_op = control_flow_ops.group(*deps)
with self.benchmark_session() as sess:
self.evaluate(variables.global_variables_initializer())
results = self.run_op_benchmark(
sess,
benchmark_op,
name=("resize_area_%s_%s_%s" % (image_size[0], image_size[1],
num_channels)))
print("%s : %.2f ms/img" %
(results["name"],
1000 * results["wall_time"] / (batch_size * num_ops)))
def benchmarkSimilar3Channel(self):
self._benchmarkResize((183, 229), 3)
def benchmarkScaleUp3Channel(self):
self._benchmarkResize((141, 186), 3)
def benchmarkScaleDown3Channel(self):
self._benchmarkResize((749, 603), 3)
def benchmarkSimilar1Channel(self):
self._benchmarkResize((183, 229), 1)
def benchmarkScaleUp1Channel(self):
self._benchmarkResize((141, 186), 1)
def benchmarkScaleDown1Channel(self):
self._benchmarkResize((749, 603), 1)
class AdjustSaturationTest(test_util.TensorFlowTestCase):
def testHalfSaturation(self):
x_shape = [2, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
saturation_factor = 0.5
y_data = [6, 9, 13, 140, 180, 226, 135, 121, 234, 172, 255, 128]
y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape)
with self.cached_session():
x = constant_op.constant(x_np, shape=x_shape)
y = image_ops.adjust_saturation(x, saturation_factor)
y_tf = self.evaluate(y)
self.assertAllEqual(y_tf, y_np)
def testTwiceSaturation(self):
x_shape = [2, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
saturation_factor = 2.0
y_data = [0, 5, 13, 0, 106, 226, 30, 0, 234, 89, 255, 0]
y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape)
with self.cached_session():
x = constant_op.constant(x_np, shape=x_shape)
y = image_ops.adjust_saturation(x, saturation_factor)
y_tf = self.evaluate(y)
self.assertAllEqual(y_tf, y_np)
def testBatchSaturation(self):
x_shape = [2, 1, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
saturation_factor = 0.5
y_data = [6, 9, 13, 140, 180, 226, 135, 121, 234, 172, 255, 128]
y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape)
with self.cached_session():
x = constant_op.constant(x_np, shape=x_shape)
y = image_ops.adjust_saturation(x, saturation_factor)
y_tf = self.evaluate(y)
self.assertAllEqual(y_tf, y_np)
def _adjustSaturationNp(self, x_np, scale):
self.assertEqual(x_np.shape[-1], 3)
x_v = x_np.reshape([-1, 3])
y_v = np.ndarray(x_v.shape, dtype=x_v.dtype)
channel_count = x_v.shape[0]
for i in range(channel_count):
r = x_v[i][0]
g = x_v[i][1]
b = x_v[i][2]
h, s, v = colorsys.rgb_to_hsv(r, g, b)
s *= scale
s = min(1.0, max(0.0, s))
r, g, b = colorsys.hsv_to_rgb(h, s, v)
y_v[i][0] = r
y_v[i][1] = g
y_v[i][2] = b
return y_v.reshape(x_np.shape)
def testAdjustRandomSaturation(self):
x_shapes = [
[2, 2, 3],
[4, 2, 3],
[2, 4, 3],
[2, 5, 3],
[1000, 1, 3],
]
test_styles = [
"all_random",
"rg_same",
"rb_same",
"gb_same",
"rgb_same",
]
with self.cached_session():
for x_shape in x_shapes:
for test_style in test_styles:
x_np = np.random.rand(*x_shape) * 255.
scale = np.random.rand()
if test_style == "all_random":
pass
elif test_style == "rg_same":
x_np[..., 1] = x_np[..., 0]
elif test_style == "rb_same":
x_np[..., 2] = x_np[..., 0]
elif test_style == "gb_same":
x_np[..., 2] = x_np[..., 1]
elif test_style == "rgb_same":
x_np[..., 1] = x_np[..., 0]
x_np[..., 2] = x_np[..., 0]
else:
raise AssertionError("Invalid test style: %s" % (test_style))
y_baseline = self._adjustSaturationNp(x_np, scale)
y_fused = self.evaluate(image_ops.adjust_saturation(x_np, scale))
self.assertAllClose(y_fused, y_baseline, rtol=2e-5, atol=1e-5)
class FlipTransposeRotateTest(test_util.TensorFlowTestCase,
parameterized.TestCase):
def testInvolutionLeftRight(self):
x_np = np.array([[1, 2, 3], [1, 2, 3]], dtype=np.uint8).reshape([2, 3, 1])
with self.cached_session():
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.flip_left_right(image_ops.flip_left_right(x_tf))
y_tf = self.evaluate(y)
self.assertAllEqual(y_tf, x_np)
def testInvolutionLeftRightWithBatch(self):
x_np = np.array(
[[[1, 2, 3], [1, 2, 3]], [[1, 2, 3], [1, 2, 3]]],
dtype=np.uint8).reshape([2, 2, 3, 1])
with self.cached_session():
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.flip_left_right(image_ops.flip_left_right(x_tf))
y_tf = self.evaluate(y)
self.assertAllEqual(y_tf, x_np)
def testLeftRight(self):
x_np = np.array([[1, 2, 3], [1, 2, 3]], dtype=np.uint8).reshape([2, 3, 1])
y_np = np.array([[3, 2, 1], [3, 2, 1]], dtype=np.uint8).reshape([2, 3, 1])
with self.cached_session():
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.flip_left_right(x_tf)
y_tf = self.evaluate(y)
self.assertAllEqual(y_tf, y_np)
def testLeftRightWithBatch(self):
x_np = np.array(
[[[1, 2, 3], [1, 2, 3]], [[1, 2, 3], [1, 2, 3]]],
dtype=np.uint8).reshape([2, 2, 3, 1])
y_np = np.array(
[[[3, 2, 1], [3, 2, 1]], [[3, 2, 1], [3, 2, 1]]],
dtype=np.uint8).reshape([2, 2, 3, 1])
with self.cached_session():
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.flip_left_right(x_tf)
y_tf = self.evaluate(y)
self.assertAllEqual(y_tf, y_np)
def testRandomFlipLeftRightStateful(self):
# Test random flip with single seed (stateful).
with ops.Graph().as_default():
x_np = np.array([[1, 2, 3], [1, 2, 3]], dtype=np.uint8).reshape([2, 3, 1])
y_np = np.array([[3, 2, 1], [3, 2, 1]], dtype=np.uint8).reshape([2, 3, 1])
seed = 42
with self.cached_session():
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.random_flip_left_right(x_tf, seed=seed)
self.assertTrue(y.op.name.startswith("random_flip_left_right"))
count_flipped = 0
count_unflipped = 0
for _ in range(100):
y_tf = self.evaluate(y)
if y_tf[0][0] == 1:
self.assertAllEqual(y_tf, x_np)
count_unflipped += 1
else:
self.assertAllEqual(y_tf, y_np)
count_flipped += 1
# 100 trials
# Mean: 50
# Std Dev: ~5
# Six Sigma: 50 - (5 * 6) = 20
self.assertGreaterEqual(count_flipped, 20)
self.assertGreaterEqual(count_unflipped, 20)
def testRandomFlipLeftRight(self):
x_np = np.array([[1, 2, 3], [1, 2, 3]], dtype=np.uint8).reshape([2, 3, 1])
y_np = np.array([[3, 2, 1], [3, 2, 1]], dtype=np.uint8).reshape([2, 3, 1])
with self.cached_session():
x_tf = constant_op.constant(x_np, shape=x_np.shape)
count_flipped = 0
count_unflipped = 0
for seed in range(100):
y_tf = self.evaluate(image_ops.random_flip_left_right(x_tf, seed=seed))
if y_tf[0][0] == 1:
self.assertAllEqual(y_tf, x_np)
count_unflipped += 1
else:
self.assertAllEqual(y_tf, y_np)
count_flipped += 1
self.assertEqual(count_flipped, 45)
self.assertEqual(count_unflipped, 55)
# TODO(b/162345082): stateless random op generates different random number
# with xla_gpu. Update tests such that there is a single ground truth result
# to test against.
@parameterized.named_parameters(
("_RandomFlipLeftRight", image_ops.stateless_random_flip_left_right),
("_RandomFlipUpDown", image_ops.stateless_random_flip_up_down),
)
def testRandomFlipStateless(self, func):
with test_util.use_gpu():
x_np = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.uint8).reshape([2, 3, 1])
y_np = np.array([[3, 2, 1], [6, 5, 4]], dtype=np.uint8).reshape([2, 3, 1])
if "RandomFlipUpDown" in self.id():
y_np = np.array(
[[4, 5, 6], [1, 2, 3]], dtype=np.uint8).reshape([2, 3, 1])
x_tf = constant_op.constant(x_np, shape=x_np.shape)
iterations = 2
flip_counts = [None for _ in range(iterations)]
flip_sequences = ["" for _ in range(iterations)]
test_seed = (1, 2)
split_seeds = stateless_random_ops.split(test_seed, 10)
seeds_list = self.evaluate(split_seeds)
for i in range(iterations):
count_flipped = 0
count_unflipped = 0
flip_seq = ""
for seed in seeds_list:
y_tf = func(x_tf, seed=seed)
y_tf_eval = self.evaluate(y_tf)
if y_tf_eval[0][0] == 1:
self.assertAllEqual(y_tf_eval, x_np)
count_unflipped += 1
flip_seq += "U"
else:
self.assertAllEqual(y_tf_eval, y_np)
count_flipped += 1
flip_seq += "F"
flip_counts[i] = (count_flipped, count_unflipped)
flip_sequences[i] = flip_seq
# Verify that results are deterministic.
for i in range(1, iterations):
self.assertAllEqual(flip_counts[0], flip_counts[i])
self.assertAllEqual(flip_sequences[0], flip_sequences[i])
# TODO(b/162345082): stateless random op generates different random number
# with xla_gpu. Update tests such that there is a single ground truth result
# to test against.
@parameterized.named_parameters(
("_RandomFlipLeftRight", image_ops.stateless_random_flip_left_right),
("_RandomFlipUpDown", image_ops.stateless_random_flip_up_down)
)
def testRandomFlipStatelessWithBatch(self, func):
with test_util.use_gpu():
batch_size = 16
# create single item of test data
x_np_raw = np.array(
[[1, 2, 3], [4, 5, 6]], dtype=np.uint8).reshape([1, 2, 3, 1])
y_np_raw = np.array(
[[3, 2, 1], [6, 5, 4]], dtype=np.uint8).reshape([1, 2, 3, 1])
if "RandomFlipUpDown" in self.id():
y_np_raw = np.array(
[[4, 5, 6], [1, 2, 3]], dtype=np.uint8).reshape([1, 2, 3, 1])
# create batched test data
x_np = np.vstack([x_np_raw for _ in range(batch_size)])
y_np = np.vstack([y_np_raw for _ in range(batch_size)])
x_tf = constant_op.constant(x_np, shape=x_np.shape)
iterations = 2
flip_counts = [None for _ in range(iterations)]
flip_sequences = ["" for _ in range(iterations)]
test_seed = (1, 2)
split_seeds = stateless_random_ops.split(test_seed, 10)
seeds_list = self.evaluate(split_seeds)
for i in range(iterations):
count_flipped = 0
count_unflipped = 0
flip_seq = ""
for seed in seeds_list:
y_tf = func(x_tf, seed=seed)
y_tf_eval = self.evaluate(y_tf)
for j in range(batch_size):
if y_tf_eval[j][0][0] == 1:
self.assertAllEqual(y_tf_eval[j], x_np[j])
count_unflipped += 1
flip_seq += "U"
else:
self.assertAllEqual(y_tf_eval[j], y_np[j])
count_flipped += 1
flip_seq += "F"
flip_counts[i] = (count_flipped, count_unflipped)
flip_sequences[i] = flip_seq
for i in range(1, iterations):
self.assertAllEqual(flip_counts[0], flip_counts[i])
self.assertAllEqual(flip_sequences[0], flip_sequences[i])
def testRandomFlipLeftRightWithBatch(self):
batch_size = 16
seed = 42
# create single item of test data
x_np_raw = np.array(
[[1, 2, 3], [1, 2, 3]], dtype=np.uint8
).reshape([1, 2, 3, 1])
y_np_raw = np.array(
[[3, 2, 1], [3, 2, 1]], dtype=np.uint8
).reshape([1, 2, 3, 1])
# create batched test data
x_np = np.vstack([x_np_raw for _ in range(batch_size)])
y_np = np.vstack([y_np_raw for _ in range(batch_size)])
with self.cached_session():
x_tf = constant_op.constant(x_np, shape=x_np.shape)
count_flipped = 0
count_unflipped = 0
for seed in range(100):
y_tf = self.evaluate(image_ops.random_flip_left_right(x_tf, seed=seed))
# check every element of the batch
for i in range(batch_size):
if y_tf[i][0][0] == 1:
self.assertAllEqual(y_tf[i], x_np[i])
count_unflipped += 1
else:
self.assertAllEqual(y_tf[i], y_np[i])
count_flipped += 1
self.assertEqual(count_flipped, 772)
self.assertEqual(count_unflipped, 828)
def testInvolutionUpDown(self):
x_np = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.uint8).reshape([2, 3, 1])
with self.cached_session():
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.flip_up_down(image_ops.flip_up_down(x_tf))
y_tf = self.evaluate(y)
self.assertAllEqual(y_tf, x_np)
def testInvolutionUpDownWithBatch(self):
x_np = np.array(
[[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]],
dtype=np.uint8).reshape([2, 2, 3, 1])
with self.cached_session():
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.flip_up_down(image_ops.flip_up_down(x_tf))
y_tf = self.evaluate(y)
self.assertAllEqual(y_tf, x_np)
def testUpDown(self):
x_np = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.uint8).reshape([2, 3, 1])
y_np = np.array([[4, 5, 6], [1, 2, 3]], dtype=np.uint8).reshape([2, 3, 1])
with self.cached_session():
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.flip_up_down(x_tf)
y_tf = self.evaluate(y)
self.assertAllEqual(y_tf, y_np)
def testUpDownWithBatch(self):
x_np = np.array(
[[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]],
dtype=np.uint8).reshape([2, 2, 3, 1])
y_np = np.array(
[[[4, 5, 6], [1, 2, 3]], [[10, 11, 12], [7, 8, 9]]],
dtype=np.uint8).reshape([2, 2, 3, 1])
with self.cached_session():
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.flip_up_down(x_tf)
y_tf = self.evaluate(y)
self.assertAllEqual(y_tf, y_np)
def testRandomFlipUpDownStateful(self):
# Test random flip with single seed (stateful).
with ops.Graph().as_default():
x_np = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.uint8).reshape([2, 3, 1])
y_np = np.array([[4, 5, 6], [1, 2, 3]], dtype=np.uint8).reshape([2, 3, 1])
seed = 42
with self.cached_session():
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.random_flip_up_down(x_tf, seed=seed)
self.assertTrue(y.op.name.startswith("random_flip_up_down"))
count_flipped = 0
count_unflipped = 0
for _ in range(100):
y_tf = self.evaluate(y)
if y_tf[0][0] == 1:
self.assertAllEqual(y_tf, x_np)
count_unflipped += 1
else:
self.assertAllEqual(y_tf, y_np)
count_flipped += 1
# 100 trials
# Mean: 50
# Std Dev: ~5
# Six Sigma: 50 - (5 * 6) = 20
self.assertGreaterEqual(count_flipped, 20)
self.assertGreaterEqual(count_unflipped, 20)
def testRandomFlipUpDown(self):
x_np = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.uint8).reshape([2, 3, 1])
y_np = np.array([[4, 5, 6], [1, 2, 3]], dtype=np.uint8).reshape([2, 3, 1])
with self.cached_session():
x_tf = constant_op.constant(x_np, shape=x_np.shape)
count_flipped = 0
count_unflipped = 0
for seed in range(100):
y_tf = self.evaluate(image_ops.random_flip_up_down(x_tf, seed=seed))
if y_tf[0][0] == 1:
self.assertAllEqual(y_tf, x_np)
count_unflipped += 1
else:
self.assertAllEqual(y_tf, y_np)
count_flipped += 1
self.assertEqual(count_flipped, 45)
self.assertEqual(count_unflipped, 55)
def testRandomFlipUpDownWithBatch(self):
batch_size = 16
seed = 42
# create single item of test data
x_np_raw = np.array(
[[1, 2, 3], [4, 5, 6]], dtype=np.uint8
).reshape([1, 2, 3, 1])
y_np_raw = np.array(
[[4, 5, 6], [1, 2, 3]], dtype=np.uint8
).reshape([1, 2, 3, 1])
# create batched test data
x_np = np.vstack([x_np_raw for _ in range(batch_size)])
y_np = np.vstack([y_np_raw for _ in range(batch_size)])
with self.cached_session():
x_tf = constant_op.constant(x_np, shape=x_np.shape)
count_flipped = 0
count_unflipped = 0
for seed in range(100):
y_tf = self.evaluate(image_ops.random_flip_up_down(x_tf, seed=seed))
# check every element of the batch
for i in range(batch_size):
if y_tf[i][0][0] == 1:
self.assertAllEqual(y_tf[i], x_np[i])
count_unflipped += 1
else:
self.assertAllEqual(y_tf[i], y_np[i])
count_flipped += 1
self.assertEqual(count_flipped, 772)
self.assertEqual(count_unflipped, 828)
def testInvolutionTranspose(self):
x_np = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.uint8).reshape([2, 3, 1])
with self.cached_session():
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.transpose(image_ops.transpose(x_tf))
y_tf = self.evaluate(y)
self.assertAllEqual(y_tf, x_np)
def testInvolutionTransposeWithBatch(self):
x_np = np.array(
[[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]],
dtype=np.uint8).reshape([2, 2, 3, 1])
with self.cached_session():
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.transpose(image_ops.transpose(x_tf))
y_tf = self.evaluate(y)
self.assertAllEqual(y_tf, x_np)
def testTranspose(self):
x_np = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.uint8).reshape([2, 3, 1])
y_np = np.array([[1, 4], [2, 5], [3, 6]], dtype=np.uint8).reshape([3, 2, 1])
with self.cached_session():
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.transpose(x_tf)
y_tf = self.evaluate(y)
self.assertAllEqual(y_tf, y_np)
def testTransposeWithBatch(self):
x_np = np.array(
[[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]],
dtype=np.uint8).reshape([2, 2, 3, 1])
y_np = np.array(
[[[1, 4], [2, 5], [3, 6]], [[7, 10], [8, 11], [9, 12]]],
dtype=np.uint8).reshape([2, 3, 2, 1])
with self.cached_session():
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.transpose(x_tf)
y_tf = self.evaluate(y)
self.assertAllEqual(y_tf, y_np)
def testPartialShapes(self):
# Shape function requires placeholders and a graph.
with ops.Graph().as_default():
p_unknown_rank = array_ops.placeholder(dtypes.uint8)
p_unknown_dims_3 = array_ops.placeholder(
dtypes.uint8, shape=[None, None, None])
p_unknown_dims_4 = array_ops.placeholder(
dtypes.uint8, shape=[None, None, None, None])
p_unknown_width = array_ops.placeholder(dtypes.uint8, shape=[64, None, 3])
p_unknown_batch = array_ops.placeholder(
dtypes.uint8, shape=[None, 64, 64, 3])
p_wrong_rank = array_ops.placeholder(dtypes.uint8, shape=[None, None])
p_zero_dim = array_ops.placeholder(dtypes.uint8, shape=[64, 0, 3])
#Ops that support 3D input
for op in [
image_ops.flip_left_right, image_ops.flip_up_down,
image_ops.random_flip_left_right, image_ops.random_flip_up_down,
image_ops.transpose, image_ops.rot90
]:
transformed_unknown_rank = op(p_unknown_rank)
self.assertIsNone(transformed_unknown_rank.get_shape().ndims)
transformed_unknown_dims_3 = op(p_unknown_dims_3)
self.assertEqual(3, transformed_unknown_dims_3.get_shape().ndims)
transformed_unknown_width = op(p_unknown_width)
self.assertEqual(3, transformed_unknown_width.get_shape().ndims)
with self.assertRaisesRegex(ValueError, "must be > 0"):
op(p_zero_dim)
#Ops that support 4D input
for op in [
image_ops.flip_left_right, image_ops.flip_up_down,
image_ops.random_flip_left_right, image_ops.random_flip_up_down,
image_ops.transpose, image_ops.rot90
]:
transformed_unknown_dims_4 = op(p_unknown_dims_4)
self.assertEqual(4, transformed_unknown_dims_4.get_shape().ndims)
transformed_unknown_batch = op(p_unknown_batch)
self.assertEqual(4, transformed_unknown_batch.get_shape().ndims)
with self.assertRaisesRegex(ValueError,
"must be at least three-dimensional"):
op(p_wrong_rank)
def testRot90GroupOrder(self):
image = np.arange(24, dtype=np.uint8).reshape([2, 4, 3])
with self.cached_session():
rotated = image
for _ in range(4):
rotated = image_ops.rot90(rotated)
self.assertAllEqual(image, self.evaluate(rotated))
def testRot90GroupOrderWithBatch(self):
image = np.arange(48, dtype=np.uint8).reshape([2, 2, 4, 3])
with self.cached_session():
rotated = image
for _ in range(4):
rotated = image_ops.rot90(rotated)
self.assertAllEqual(image, self.evaluate(rotated))
def testRot90NumpyEquivalence(self):
image = np.arange(24, dtype=np.uint8).reshape([2, 4, 3])
with self.cached_session():
for k in range(4):
y_np = np.rot90(image, k=k)
self.assertAllEqual(
y_np, self.evaluate(image_ops.rot90(image, k)))
def testRot90NumpyEquivalenceWithBatch(self):
image = np.arange(48, dtype=np.uint8).reshape([2, 2, 4, 3])
with self.cached_session():
for k in range(4):
y_np = np.rot90(image, k=k, axes=(1, 2))
self.assertAllEqual(
y_np, self.evaluate(image_ops.rot90(image, k)))
def testFlipImageUnknownShape(self):
expected_output = constant_op.constant([[[[3, 4, 5], [0, 1, 2]],
[[9, 10, 11], [6, 7, 8]]]])
def generator():
image_input = np.array(
[[[[0, 1, 2], [3, 4, 5]], [[6, 7, 8], [9, 10, 11]]]], np.int32)
yield image_input
dataset = dataset_ops.Dataset.from_generator(
generator,
output_types=dtypes.int32,
output_shapes=tensor_shape.TensorShape([1, 2, 2, 3]))
dataset = dataset.map(image_ops.flip_left_right)
image_flipped_via_dataset_map = get_single_element.get_single_element(
dataset.take(1))
self.assertAllEqual(image_flipped_via_dataset_map, expected_output)
class AdjustContrastTest(test_util.TensorFlowTestCase):
def _testContrast(self, x_np, y_np, contrast_factor):
with self.cached_session():
x = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.adjust_contrast(x, contrast_factor)
y_tf = self.evaluate(y)
self.assertAllClose(y_tf, y_np, 1e-6)
def testDoubleContrastUint8(self):
x_shape = [1, 2, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
y_data = [0, 0, 0, 62, 169, 255, 28, 0, 255, 135, 255, 0]
y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape)
self._testContrast(x_np, y_np, contrast_factor=2.0)
def testDoubleContrastFloat(self):
x_shape = [1, 2, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.float64).reshape(x_shape) / 255.
y_data = [
-45.25, -90.75, -92.5, 62.75, 169.25, 333.5, 28.75, -84.75, 349.5,
134.75, 409.25, -116.5
]
y_np = np.array(y_data, dtype=np.float64).reshape(x_shape) / 255.
self._testContrast(x_np, y_np, contrast_factor=2.0)
def testHalfContrastUint8(self):
x_shape = [1, 2, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
y_data = [22, 52, 65, 49, 118, 172, 41, 54, 176, 67, 178, 59]
y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape)
self._testContrast(x_np, y_np, contrast_factor=0.5)
def testBatchDoubleContrast(self):
x_shape = [2, 1, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
y_data = [0, 0, 0, 81, 200, 255, 10, 0, 255, 116, 255, 0]
y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape)
self._testContrast(x_np, y_np, contrast_factor=2.0)
def _adjustContrastNp(self, x_np, contrast_factor):
mean = np.mean(x_np, (1, 2), keepdims=True)
y_np = mean + contrast_factor * (x_np - mean)
return y_np
def _adjustContrastTf(self, x_np, contrast_factor):
with self.cached_session():
x = constant_op.constant(x_np)
y = image_ops.adjust_contrast(x, contrast_factor)
y_tf = self.evaluate(y)
return y_tf
def testRandomContrast(self):
x_shapes = [
[1, 2, 2, 3],
[2, 1, 2, 3],
[1, 2, 2, 3],
[2, 5, 5, 3],
[2, 1, 1, 3],
]
for x_shape in x_shapes:
x_np = np.random.rand(*x_shape) * 255.
contrast_factor = np.random.rand() * 2.0 + 0.1
y_np = self._adjustContrastNp(x_np, contrast_factor)
y_tf = self._adjustContrastTf(x_np, contrast_factor)
self.assertAllClose(y_tf, y_np, rtol=1e-5, atol=1e-5)
def testContrastFactorShape(self):
x_shape = [1, 2, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
with self.assertRaisesRegex((ValueError, errors.InvalidArgumentError),
"contrast_factor must be scalar|"
"Shape must be rank 0 but is rank 1"):
image_ops.adjust_contrast(x_np, [2.0])
@test_util.run_in_graph_and_eager_modes
def testDeterminismUnimplementedExceptionThrowing(self):
"""Test d9m-unimplemented exception-throwing when op-determinism is enabled.
This test depends upon other tests, tests which do not enable
op-determinism, to ensure that determinism-unimplemented exceptions are not
erroneously thrown when op-determinism is not enabled.
"""
if test_util.is_xla_enabled():
self.skipTest('XLA implementation does not raise exception')
with self.session(), test_util.deterministic_ops():
input_shape = (1, 2, 2, 1)
on_gpu = len(tf_config.list_physical_devices("GPU"))
# AdjustContrast seems to now be inaccessible via the Python API.
# AdjustContrastv2 only supports float16 and float32 on GPU, and other
# types are converted to and from float32 at the Python level before
# AdjustContrastv2 is called.
dtypes_to_test = [
dtypes.uint8, dtypes.int8, dtypes.int16, dtypes.int32, dtypes.float32,
dtypes.float64
]
if on_gpu:
dtypes_to_test.append(dtypes.float16)
ctx_mgr = self.assertRaisesRegex(
errors.UnimplementedError,
"A deterministic GPU implementation of AdjustContrastv2 is not" +
" currently available.")
else:
ctx_mgr = contextlib.suppress()
for dtype in dtypes_to_test:
input_images = array_ops.zeros(input_shape, dtype=dtype)
contrast_factor = 1.
with ctx_mgr:
output_images = image_ops.adjust_contrast(input_images,
contrast_factor)
self.evaluate(output_images)
class AdjustBrightnessTest(test_util.TensorFlowTestCase):
def _testBrightness(self, x_np, y_np, delta, tol=1e-6):
with self.cached_session():
x = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.adjust_brightness(x, delta)
y_tf = self.evaluate(y)
self.assertAllClose(y_tf, y_np, tol)
def testPositiveDeltaUint8(self):
x_shape = [2, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
y_data = [10, 15, 23, 64, 145, 236, 47, 18, 244, 100, 255, 11]
y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape)
self._testBrightness(x_np, y_np, delta=10. / 255.)
def testPositiveDeltaFloat32(self):
x_shape = [2, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.float32).reshape(x_shape) / 255.
y_data = [10, 15, 23, 64, 145, 236, 47, 18, 244, 100, 265, 11]
y_np = np.array(y_data, dtype=np.float32).reshape(x_shape) / 255.
self._testBrightness(x_np, y_np, delta=10. / 255.)
def testPositiveDeltaFloat16(self):
x_shape = [2, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.float16).reshape(x_shape) / 255.
y_data = [10, 15, 23, 64, 145, 236, 47, 18, 244, 100, 265, 11]
y_np = np.array(y_data, dtype=np.float16).reshape(x_shape) / 255.
self._testBrightness(x_np, y_np, delta=10. / 255., tol=1e-3)
def testNegativeDelta(self):
x_shape = [2, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
y_data = [0, 0, 3, 44, 125, 216, 27, 0, 224, 80, 245, 0]
y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape)
self._testBrightness(x_np, y_np, delta=-10. / 255.)
class PerImageWhiteningTest(test_util.TensorFlowTestCase,
parameterized.TestCase):
def _NumpyPerImageWhitening(self, x):
num_pixels = np.prod(x.shape)
mn = np.mean(x)
std = np.std(x)
stddev = max(std, 1.0 / math.sqrt(num_pixels))
y = x.astype(np.float32)
y -= mn
y /= stddev
return y
@parameterized.named_parameters([("_int8", np.int8), ("_int16", np.int16),
("_int32", np.int32), ("_int64", np.int64),
("_uint8", np.uint8), ("_uint16", np.uint16),
("_uint32", np.uint32),
("_uint64", np.uint64),
("_float32", np.float32)])
def testBasic(self, data_type):
x_shape = [13, 9, 3]
x_np = np.arange(0, np.prod(x_shape), dtype=data_type).reshape(x_shape)
y_np = self._NumpyPerImageWhitening(x_np)
with self.cached_session():
x = constant_op.constant(x_np, dtype=data_type, shape=x_shape)
y = image_ops.per_image_standardization(x)
y_tf = self.evaluate(y)
self.assertAllClose(y_tf, y_np, atol=1e-4)
def testUniformImage(self):
im_np = np.ones([19, 19, 3]).astype(np.float32) * 249
im = constant_op.constant(im_np)
whiten = image_ops.per_image_standardization(im)
with self.cached_session():
whiten_np = self.evaluate(whiten)
self.assertFalse(np.any(np.isnan(whiten_np)))
def testBatchWhitening(self):
imgs_np = np.random.uniform(0., 255., [4, 24, 24, 3])
whiten_np = [self._NumpyPerImageWhitening(img) for img in imgs_np]
with self.cached_session():
imgs = constant_op.constant(imgs_np)
whiten = image_ops.per_image_standardization(imgs)
whiten_tf = self.evaluate(whiten)
for w_tf, w_np in zip(whiten_tf, whiten_np):
self.assertAllClose(w_tf, w_np, atol=1e-4)
class CropToBoundingBoxTest(test_util.TensorFlowTestCase):
def _CropToBoundingBox(self, x, offset_height, offset_width, target_height,
target_width, use_tensor_inputs):
if use_tensor_inputs:
offset_height = ops.convert_to_tensor(offset_height)
offset_width = ops.convert_to_tensor(offset_width)
target_height = ops.convert_to_tensor(target_height)
target_width = ops.convert_to_tensor(target_width)
x_tensor = ops.convert_to_tensor(x)
else:
x_tensor = x
y = image_ops.crop_to_bounding_box(x_tensor, offset_height, offset_width,
target_height, target_width)
with self.cached_session():
return self.evaluate(y)
def _assertReturns(self,
x,
x_shape,
offset_height,
offset_width,
y,
y_shape,
use_tensor_inputs_options=None):
use_tensor_inputs_options = use_tensor_inputs_options or [False, True]
target_height, target_width, _ = y_shape
x = np.array(x).reshape(x_shape)
y = np.array(y).reshape(y_shape)
for use_tensor_inputs in use_tensor_inputs_options:
y_tf = self._CropToBoundingBox(x, offset_height, offset_width,
target_height, target_width,
use_tensor_inputs)
self.assertAllClose(y, y_tf)
def _assertRaises(self,
x,
x_shape,
offset_height,
offset_width,
target_height,
target_width,
err_msg,
use_tensor_inputs_options=None):
use_tensor_inputs_options = use_tensor_inputs_options or [False, True]
x = np.array(x).reshape(x_shape)
for use_tensor_inputs in use_tensor_inputs_options:
with self.assertRaisesRegex(
(ValueError, errors.InvalidArgumentError), err_msg):
self._CropToBoundingBox(x, offset_height, offset_width, target_height,
target_width, use_tensor_inputs)
def _assertShapeInference(self, pre_shape, height, width, post_shape):
image = array_ops.placeholder(dtypes.float32, shape=pre_shape)
y = image_ops.crop_to_bounding_box(image, 0, 0, height, width)
self.assertEqual(y.get_shape().as_list(), post_shape)
def testNoOp(self):
x_shape = [10, 10, 10]
x = np.random.uniform(size=x_shape)
self._assertReturns(x, x_shape, 0, 0, x, x_shape)
def testCrop(self):
x = [1, 2, 3, 4, 5, 6, 7, 8, 9]
x_shape = [3, 3, 1]
offset_height, offset_width = [1, 0]
y_shape = [2, 3, 1]
y = [4, 5, 6, 7, 8, 9]
self._assertReturns(x, x_shape, offset_height, offset_width, y, y_shape)
offset_height, offset_width = [0, 1]
y_shape = [3, 2, 1]
y = [2, 3, 5, 6, 8, 9]
self._assertReturns(x, x_shape, offset_height, offset_width, y, y_shape)
offset_height, offset_width = [0, 0]
y_shape = [2, 3, 1]
y = [1, 2, 3, 4, 5, 6]
self._assertReturns(x, x_shape, offset_height, offset_width, y, y_shape)
offset_height, offset_width = [0, 0]
y_shape = [3, 2, 1]
y = [1, 2, 4, 5, 7, 8]
self._assertReturns(x, x_shape, offset_height, offset_width, y, y_shape)
def testShapeInference(self):
# Shape function requires placeholders and a graph.
with ops.Graph().as_default():
self._assertShapeInference([55, 66, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([59, 69, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([None, 66, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([None, 69, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([55, None, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([59, None, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([None, None, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([55, 66, None], 55, 66, [55, 66, None])
self._assertShapeInference([59, 69, None], 55, 66, [55, 66, None])
self._assertShapeInference([None, None, None], 55, 66, [55, 66, None])
self._assertShapeInference(None, 55, 66, [55, 66, None])
def testNon3DInput(self):
# Input image is not 3D
x = [0] * 15
offset_height, offset_width = [0, 0]
target_height, target_width = [2, 2]
for x_shape in ([3, 5], [1, 3, 5, 1, 1]):
self._assertRaises(x, x_shape, offset_height, offset_width, target_height,
target_width,
"must have either 3 or 4 dimensions.")
def testZeroLengthInput(self):
# Input image has 0-length dimension(s).
# Each line is a test configuration:
# x_shape, target_height, target_width
test_config = (([0, 2, 2], 1, 1), ([2, 0, 2], 1, 1), ([2, 2, 0], 1, 1),
([0, 2, 2], 0, 1), ([2, 0, 2], 1, 0))
offset_height, offset_width = [0, 0]
x = []
for x_shape, target_height, target_width in test_config:
self._assertRaises(
x,
x_shape,
offset_height,
offset_width,
target_height,
target_width,
"inner 3 dims of 'image.shape' must be > 0",
use_tensor_inputs_options=[False])
# Multiple assertion could fail, but the evaluation order is arbitrary.
# Match gainst generic pattern.
self._assertRaises(
x,
x_shape,
offset_height,
offset_width,
target_height,
target_width,
"inner 3 dims of 'image.shape' must be > 0",
use_tensor_inputs_options=[True])
def testBadParams(self):
x_shape = [4, 4, 1]
x = np.zeros(x_shape)
# Each line is a test configuration:
# (offset_height, offset_width, target_height, target_width), err_msg
test_config = (
([-1, 0, 3, 3], "offset_height must be >= 0"),
([0, -1, 3, 3], "offset_width must be >= 0"),
([0, 0, 0, 3], "target_height must be > 0"),
([0, 0, 3, 0], "target_width must be > 0"),
([2, 0, 3, 3], r"height must be >= target \+ offset"),
([0, 2, 3, 3], r"width must be >= target \+ offset"))
for params, err_msg in test_config:
self._assertRaises(x, x_shape, *params, err_msg=err_msg)
def testNameScope(self):
# Testing name scope requires a graph.
with ops.Graph().as_default():
image = array_ops.placeholder(dtypes.float32, shape=[55, 66, 3])
y = image_ops.crop_to_bounding_box(image, 0, 0, 55, 66)
self.assertTrue(y.name.startswith("crop_to_bounding_box"))
class CentralCropTest(test_util.TensorFlowTestCase):
def _assertShapeInference(self, pre_shape, fraction, post_shape):
image = array_ops.placeholder(dtypes.float32, shape=pre_shape)
y = image_ops.central_crop(image, fraction)
if post_shape is None:
self.assertEqual(y.get_shape().dims, None)
else:
self.assertEqual(y.get_shape().as_list(), post_shape)
def testNoOp(self):
x_shapes = [[13, 9, 3], [5, 13, 9, 3]]
for x_shape in x_shapes:
x_np = np.ones(x_shape, dtype=np.float32)
for use_gpu in [True, False]:
with self.cached_session(use_gpu=use_gpu):
x = constant_op.constant(x_np, shape=x_shape)
y = image_ops.central_crop(x, 1.0)
y_tf = self.evaluate(y)
self.assertAllEqual(y_tf, x_np)
def testCropping(self):
x_shape = [4, 8, 1]
x_np = np.array(
[[1, 2, 3, 4, 5, 6, 7, 8], [1, 2, 3, 4, 5, 6, 7, 8],
[1, 2, 3, 4, 5, 6, 7, 8], [1, 2, 3, 4, 5, 6, 7, 8]],
dtype=np.int32).reshape(x_shape)
y_np = np.array([[3, 4, 5, 6], [3, 4, 5, 6]]).reshape([2, 4, 1])
for use_gpu in [True, False]:
with self.cached_session(use_gpu=use_gpu):
x = constant_op.constant(x_np, shape=x_shape)
y = image_ops.central_crop(x, 0.5)
y_tf = self.evaluate(y)
self.assertAllEqual(y_tf, y_np)
self.assertAllEqual(y_tf.shape, y_np.shape)
x_shape = [2, 4, 8, 1]
x_np = np.array(
[[1, 2, 3, 4, 5, 6, 7, 8], [1, 2, 3, 4, 5, 6, 7, 8],
[1, 2, 3, 4, 5, 6, 7, 8], [1, 2, 3, 4, 5, 6, 7, 8],
[8, 7, 6, 5, 4, 3, 2, 1], [8, 7, 6, 5, 4, 3, 2, 1],
[8, 7, 6, 5, 4, 3, 2, 1], [8, 7, 6, 5, 4, 3, 2, 1]],
dtype=np.int32).reshape(x_shape)
y_np = np.array([[[3, 4, 5, 6], [3, 4, 5, 6]],
[[6, 5, 4, 3], [6, 5, 4, 3]]]).reshape([2, 2, 4, 1])
with self.cached_session():
x = constant_op.constant(x_np, shape=x_shape)
y = image_ops.central_crop(x, 0.5)
y_tf = self.evaluate(y)
self.assertAllEqual(y_tf, y_np)
self.assertAllEqual(y_tf.shape, y_np.shape)
def testCropping2(self):
# Test case for 10315
x_shapes = [[240, 320, 3], [5, 240, 320, 3]]
expected_y_shapes = [[80, 106, 3], [5, 80, 106, 3]]
for x_shape, y_shape in zip(x_shapes, expected_y_shapes):
x_np = np.zeros(x_shape, dtype=np.int32)
y_np = np.zeros(y_shape, dtype=np.int32)
for use_gpu in [True, False]:
with self.cached_session(use_gpu=use_gpu):
y_tf = self.evaluate(image_ops.central_crop(x_np, 0.33))
self.assertAllEqual(y_tf, y_np)
self.assertAllEqual(y_tf.shape, y_np.shape)
def testShapeInference(self):
# Shape function requires placeholders and a graph.
with ops.Graph().as_default():
# Test no-op fraction=1.0, with 3-D tensors.
self._assertShapeInference([50, 60, 3], 1.0, [50, 60, 3])
self._assertShapeInference([None, 60, 3], 1.0, [None, 60, 3])
self._assertShapeInference([50, None, 3], 1.0, [50, None, 3])
self._assertShapeInference([None, None, 3], 1.0, [None, None, 3])
self._assertShapeInference([50, 60, None], 1.0, [50, 60, None])
self._assertShapeInference([None, None, None], 1.0, [None, None, None])
# Test no-op fraction=0.5, with 3-D tensors.
self._assertShapeInference([50, 60, 3], 0.5, [26, 30, 3])
self._assertShapeInference([None, 60, 3], 0.5, [None, 30, 3])
self._assertShapeInference([50, None, 3], 0.5, [26, None, 3])
self._assertShapeInference([None, None, 3], 0.5, [None, None, 3])
self._assertShapeInference([50, 60, None], 0.5, [26, 30, None])
self._assertShapeInference([None, None, None], 0.5, [None, None, None])
# Test no-op fraction=1.0, with 4-D tensors.
self._assertShapeInference([5, 50, 60, 3], 1.0, [5, 50, 60, 3])
self._assertShapeInference([5, None, 60, 3], 1.0, [5, None, 60, 3])
self._assertShapeInference([5, 50, None, 3], 1.0, [5, 50, None, 3])
self._assertShapeInference([5, None, None, 3], 1.0, [5, None, None, 3])
self._assertShapeInference([5, 50, 60, None], 1.0, [5, 50, 60, None])
self._assertShapeInference([5, None, None, None], 1.0,
[5, None, None, None])
self._assertShapeInference([None, None, None, None], 1.0,
[None, None, None, None])
# Test no-op fraction=0.5, with 4-D tensors.
self._assertShapeInference([5, 50, 60, 3], 0.5, [5, 26, 30, 3])
self._assertShapeInference([5, None, 60, 3], 0.5, [5, None, 30, 3])
self._assertShapeInference([5, 50, None, 3], 0.5, [5, 26, None, 3])
self._assertShapeInference([5, None, None, 3], 0.5, [5, None, None, 3])
self._assertShapeInference([5, 50, 60, None], 0.5, [5, 26, 30, None])
self._assertShapeInference([5, None, None, None], 0.5,
[5, None, None, None])
self._assertShapeInference([None, None, None, None], 0.5,
[None, None, None, None])
def testErrorOnInvalidCentralCropFractionValues(self):
x_shape = [13, 9, 3]
x_np = np.ones(x_shape, dtype=np.float32)
for use_gpu in [True, False]:
with self.cached_session(use_gpu=use_gpu):
x = constant_op.constant(x_np, shape=x_shape)
with self.assertRaises(ValueError):
_ = image_ops.central_crop(x, 0.0)
with self.assertRaises(ValueError):
_ = image_ops.central_crop(x, 1.01)
def testErrorOnInvalidShapes(self):
x_shapes = [None, [], [3], [3, 9], [3, 9, 3, 9, 3]]
for x_shape in x_shapes:
x_np = np.ones(x_shape, dtype=np.float32)
for use_gpu in [True, False]:
with self.cached_session(use_gpu=use_gpu):
x = constant_op.constant(x_np, shape=x_shape)
with self.assertRaises(ValueError):
_ = image_ops.central_crop(x, 0.5)
def testNameScope(self):
# Testing name scope requires a graph.
with ops.Graph().as_default():
x_shape = [13, 9, 3]
x_np = np.ones(x_shape, dtype=np.float32)
for use_gpu in [True, False]:
with self.cached_session(use_gpu=use_gpu):
y = image_ops.central_crop(x_np, 1.0)
self.assertTrue(y.op.name.startswith("central_crop"))
def testCentralFractionTensor(self):
# Test case for GitHub issue 45324.
x_shape = [240, 320, 3]
y_shape = [80, 106, 3]
@def_function.function(autograph=False)
def f(x, central_fraction):
return image_ops.central_crop(x, central_fraction)
x_np = np.zeros(x_shape, dtype=np.int32)
y_np = np.zeros(y_shape, dtype=np.int32)
y_tf = self.evaluate(f(x_np, constant_op.constant(0.33)))
self.assertAllEqual(y_tf, y_np)
self.assertAllEqual(y_tf.shape, y_np.shape)
class PadToBoundingBoxTest(test_util.TensorFlowTestCase,
parameterized.TestCase):
def _PadToBoundingBox(self, x, offset_height, offset_width, target_height,
target_width, use_tensor_inputs):
if use_tensor_inputs:
offset_height = ops.convert_to_tensor(offset_height)
offset_width = ops.convert_to_tensor(offset_width)
target_height = ops.convert_to_tensor(target_height)
target_width = ops.convert_to_tensor(target_width)
x_tensor = ops.convert_to_tensor(x)
else:
x_tensor = x
@def_function.function
def pad_bbox(*args):
return image_ops.pad_to_bounding_box(*args)
with self.cached_session():
return self.evaluate(pad_bbox(x_tensor, offset_height, offset_width,
target_height, target_width))
def _assertReturns(self,
x,
x_shape,
offset_height,
offset_width,
y,
y_shape,
use_tensor_inputs_options=None):
use_tensor_inputs_options = use_tensor_inputs_options or [False, True]
target_height, target_width, _ = y_shape
x = np.array(x).reshape(x_shape)
y = np.array(y).reshape(y_shape)
for use_tensor_inputs in use_tensor_inputs_options:
y_tf = self._PadToBoundingBox(x, offset_height, offset_width,
target_height, target_width,
use_tensor_inputs)
self.assertAllClose(y, y_tf)
def _assertRaises(self,
x,
x_shape,
offset_height,
offset_width,
target_height,
target_width,
err_msg,
use_tensor_inputs_options=None):
use_tensor_inputs_options = use_tensor_inputs_options or [False, True]
x = np.array(x).reshape(x_shape)
for use_tensor_inputs in use_tensor_inputs_options:
with self.assertRaisesRegex(
(ValueError, errors.InvalidArgumentError), err_msg):
self._PadToBoundingBox(x, offset_height, offset_width, target_height,
target_width, use_tensor_inputs)
def _assertShapeInference(self, pre_shape, height, width, post_shape):
image = array_ops.placeholder(dtypes.float32, shape=pre_shape)
y = image_ops.pad_to_bounding_box(image, 0, 0, height, width)
self.assertEqual(y.get_shape().as_list(), post_shape)
def testInt64(self):
x = [1, 2, 3, 4, 5, 6, 7, 8, 9]
x_shape = [3, 3, 1]
y = [0, 0, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
y_shape = [4, 3, 1]
x = np.array(x).reshape(x_shape)
y = np.array(y).reshape(y_shape)
i = constant_op.constant([1, 0, 4, 3], dtype=dtypes.int64)
y_tf = image_ops.pad_to_bounding_box(x, i[0], i[1], i[2], i[3])
with self.cached_session():
self.assertAllClose(y, self.evaluate(y_tf))
def testNoOp(self):
x_shape = [10, 10, 10]
x = np.random.uniform(size=x_shape)
offset_height, offset_width = [0, 0]
self._assertReturns(x, x_shape, offset_height, offset_width, x, x_shape)
def testPadding(self):
x = [1, 2, 3, 4, 5, 6, 7, 8, 9]
x_shape = [3, 3, 1]
offset_height, offset_width = [1, 0]
y = [0, 0, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
y_shape = [4, 3, 1]
self._assertReturns(x, x_shape, offset_height, offset_width, y, y_shape)
offset_height, offset_width = [0, 1]
y = [0, 1, 2, 3, 0, 4, 5, 6, 0, 7, 8, 9]
y_shape = [3, 4, 1]
self._assertReturns(x, x_shape, offset_height, offset_width, y, y_shape)
offset_height, offset_width = [0, 0]
y = [1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 0, 0]
y_shape = [4, 3, 1]
self._assertReturns(x, x_shape, offset_height, offset_width, y, y_shape)
offset_height, offset_width = [0, 0]
y = [1, 2, 3, 0, 4, 5, 6, 0, 7, 8, 9, 0]
y_shape = [3, 4, 1]
self._assertReturns(x, x_shape, offset_height, offset_width, y, y_shape)
def testShapeInference(self):
# Shape function requires placeholders and a graph.
with ops.Graph().as_default():
self._assertShapeInference([55, 66, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([50, 60, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([None, 66, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([None, 60, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([55, None, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([50, None, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([None, None, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([55, 66, None], 55, 66, [55, 66, None])
self._assertShapeInference([50, 60, None], 55, 66, [55, 66, None])
self._assertShapeInference([None, None, None], 55, 66, [55, 66, None])
self._assertShapeInference(None, 55, 66, [55, 66, None])
def testNon3DInput(self):
# Input image is not 3D
x = [0] * 15
offset_height, offset_width = [0, 0]
target_height, target_width = [2, 2]
for x_shape in ([3, 5], [1, 3, 5, 1, 1]):
self._assertRaises(x, x_shape, offset_height, offset_width, target_height,
target_width,
"must have either 3 or 4 dimensions.")
def testZeroLengthInput(self):
# Input image has 0-length dimension(s).
# Each line is a test configuration:
# x_shape, target_height, target_width
test_config = (([0, 2, 2], 2, 2), ([2, 0, 2], 2, 2), ([2, 2, 0], 2, 2))
offset_height, offset_width = [0, 0]
x = []
for x_shape, target_height, target_width in test_config:
self._assertRaises(
x,
x_shape,
offset_height,
offset_width,
target_height,
target_width,
"inner 3 dims of 'image.shape' must be > 0",
use_tensor_inputs_options=[False])
# The original error message does not contain back slashes. However, they
# are added by either the assert op or the runtime. If this behavior
# changes in the future, the match string will also needs to be changed.
self._assertRaises(
x,
x_shape,
offset_height,
offset_width,
target_height,
target_width,
"inner 3 dims of \\'image.shape\\' must be > 0",
use_tensor_inputs_options=[True])
def testBadParamsScalarInputs(self):
# In this test, inputs do not get converted to tensors before calling the
# tf.function. The error message here is raised in python
# since the python function has direct access to the scalars.
x_shape = [3, 3, 1]
x = np.zeros(x_shape)
# Each line is a test configuration:
# offset_height, offset_width, target_height, target_width, err_msg
test_config = (
(-1, 0, 4, 4,
"offset_height must be >= 0"),
(0, -1, 4, 4,
"offset_width must be >= 0"),
(2, 0, 4, 4,
"height must be <= target - offset"),
(0, 2, 4, 4,
"width must be <= target - offset"))
for config_item in test_config:
self._assertRaises(
x, x_shape, *config_item, use_tensor_inputs_options=[False])
def testBadParamsTensorInputsEager(self):
# In this test inputs get converted to EagerTensors before calling the
# tf.function. The error message here is raised in python
# since the python function has direct access to the tensor's values.
with context.eager_mode():
x_shape = [3, 3, 1]
x = np.zeros(x_shape)
# Each line is a test configuration:
# offset_height, offset_width, target_height, target_width, err_msg
test_config = (
(-1, 0, 4, 4,
"offset_height must be >= 0"),
(0, -1, 4, 4,
"offset_width must be >= 0"),
(2, 0, 4, 4,
"height must be <= target - offset"),
(0, 2, 4, 4,
"width must be <= target - offset"))
for config_item in test_config:
self._assertRaises(
x, x_shape, *config_item, use_tensor_inputs_options=[True])
@parameterized.named_parameters([("OffsetHeight", (-1, 0, 4, 4)),
("OffsetWidth", (0, -1, 4, 4)),
("Height", (2, 0, 4, 4)),
("Width", (0, 2, 4, 4))])
def testBadParamsTensorInputsGraph(self, config):
# In this test inputs get converted to tensors before calling the
# tf.function. The error message here is raised during shape inference.
with context.graph_mode():
x_shape = [3, 3, 1]
x = np.zeros(x_shape)
self._assertRaises(
x,
x_shape,
*config,
"Paddings must be non-negative",
use_tensor_inputs_options=[True])
def testNameScope(self):
# Testing name scope requires a graph.
with ops.Graph().as_default():
image = array_ops.placeholder(dtypes.float32, shape=[55, 66, 3])
y = image_ops.pad_to_bounding_box(image, 0, 0, 55, 66)
self.assertTrue(y.op.name.startswith("pad_to_bounding_box"))
def testInvalidInput(self):
# Test case for GitHub issue 46890.
if test_util.is_xla_enabled():
# TODO(b/200850176): test fails with XLA.
return
with self.session():
with self.assertRaises(errors_impl.InvalidArgumentError):
v = image_ops.pad_to_bounding_box(
image=np.ones((1, 1, 1)),
target_height=5191549470,
target_width=5191549470,
offset_height=1,
offset_width=1)
self.evaluate(v)
class ImageProjectiveTransformV2(test_util.TensorFlowTestCase):
def testShapeTooLarge(self):
interpolation = "BILINEAR"
fill_mode = "REFLECT"
images = constant_op.constant(
0.184634328, shape=[2, 5, 8, 3], dtype=dtypes.float32)
transforms = constant_op.constant(
0.378575385, shape=[2, 8], dtype=dtypes.float32)
output_shape = constant_op.constant([1879048192, 1879048192],
shape=[2],
dtype=dtypes.int32)
with self.assertRaisesRegex(errors.InvalidArgumentError,
r"Encountered overflow when multiplying"):
self.evaluate(
gen_image_ops.ImageProjectiveTransformV2(
images=images,
transforms=transforms,
output_shape=output_shape,
interpolation=interpolation,
fill_mode=fill_mode))
class InternalPadToBoundingBoxTest(test_util.TensorFlowTestCase,
parameterized.TestCase):
def _InternalPadToBoundingBox(self, x, offset_height, offset_width,
target_height, target_width, use_tensor_inputs):
if use_tensor_inputs:
offset_height = ops.convert_to_tensor(offset_height)
offset_width = ops.convert_to_tensor(offset_width)
target_height = ops.convert_to_tensor(target_height)
target_width = ops.convert_to_tensor(target_width)
x_tensor = ops.convert_to_tensor(x)
else:
x_tensor = x
@def_function.function
def pad_bbox(*args):
return image_ops.pad_to_bounding_box_internal(*args, check_dims=False)
with self.cached_session():
return self.evaluate(
pad_bbox(x_tensor, offset_height, offset_width, target_height,
target_width))
def _assertReturns(self,
x,
x_shape,
offset_height,
offset_width,
y,
y_shape,
use_tensor_inputs_options=None):
use_tensor_inputs_options = use_tensor_inputs_options or [False, True]
target_height, target_width, _ = y_shape
x = np.array(x).reshape(x_shape)
y = np.array(y).reshape(y_shape)
for use_tensor_inputs in use_tensor_inputs_options:
y_tf = self._InternalPadToBoundingBox(x, offset_height, offset_width,
target_height, target_width,
use_tensor_inputs)
self.assertAllClose(y, y_tf)
def _assertShapeInference(self, pre_shape, height, width, post_shape):
image = array_ops.placeholder(dtypes.float32, shape=pre_shape)
y = image_ops.pad_to_bounding_box_internal(
image, 0, 0, height, width, check_dims=False)
self.assertEqual(y.get_shape().as_list(), post_shape)
def testInt64(self):
x = [1, 2, 3, 4, 5, 6, 7, 8, 9]
x_shape = [3, 3, 1]
y = [0, 0, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
y_shape = [4, 3, 1]
x = np.array(x).reshape(x_shape)
y = np.array(y).reshape(y_shape)
i = constant_op.constant([1, 0, 4, 3], dtype=dtypes.int64)
y_tf = image_ops.pad_to_bounding_box_internal(
x, i[0], i[1], i[2], i[3], check_dims=False)
with self.cached_session():
self.assertAllClose(y, self.evaluate(y_tf))
def testNoOp(self):
x_shape = [10, 10, 10]
x = np.random.uniform(size=x_shape)
offset_height, offset_width = [0, 0]
self._assertReturns(x, x_shape, offset_height, offset_width, x, x_shape)
def testPadding(self):
x = [1, 2, 3, 4, 5, 6, 7, 8, 9]
x_shape = [3, 3, 1]
offset_height, offset_width = [1, 0]
y = [0, 0, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
y_shape = [4, 3, 1]
self._assertReturns(x, x_shape, offset_height, offset_width, y, y_shape)
offset_height, offset_width = [0, 1]
y = [0, 1, 2, 3, 0, 4, 5, 6, 0, 7, 8, 9]
y_shape = [3, 4, 1]
self._assertReturns(x, x_shape, offset_height, offset_width, y, y_shape)
offset_height, offset_width = [0, 0]
y = [1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 0, 0]
y_shape = [4, 3, 1]
self._assertReturns(x, x_shape, offset_height, offset_width, y, y_shape)
offset_height, offset_width = [0, 0]
y = [1, 2, 3, 0, 4, 5, 6, 0, 7, 8, 9, 0]
y_shape = [3, 4, 1]
self._assertReturns(x, x_shape, offset_height, offset_width, y, y_shape)
def testShapeInference(self):
# Shape function requires placeholders and a graph.
with ops.Graph().as_default():
self._assertShapeInference([55, 66, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([50, 60, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([None, 66, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([None, 60, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([55, None, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([50, None, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([None, None, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([55, 66, None], 55, 66, [55, 66, None])
self._assertShapeInference([50, 60, None], 55, 66, [55, 66, None])
self._assertShapeInference([None, None, None], 55, 66, [55, 66, None])
self._assertShapeInference(None, 55, 66, [55, 66, None])
def testNameScope(self):
# Testing name scope requires a graph.
with ops.Graph().as_default():
image = array_ops.placeholder(dtypes.float32, shape=[55, 66, 3])
y = image_ops.pad_to_bounding_box_internal(
image, 0, 0, 55, 66, check_dims=False)
self.assertTrue(y.op.name.startswith("pad_to_bounding_box"))
class SelectDistortedCropBoxTest(test_util.TensorFlowTestCase):
def _testSampleDistortedBoundingBox(self, image, bounding_box,
min_object_covered, aspect_ratio_range,
area_range):
original_area = float(np.prod(image.shape))
bounding_box_area = float((bounding_box[3] - bounding_box[1]) *
(bounding_box[2] - bounding_box[0]))
image_size_np = np.array(image.shape, dtype=np.int32)
bounding_box_np = (
np.array(bounding_box, dtype=np.float32).reshape([1, 1, 4]))
aspect_ratios = []
area_ratios = []
fraction_object_covered = []
num_iter = 1000
with self.cached_session():
image_tf = constant_op.constant(image, shape=image.shape)
image_size_tf = constant_op.constant(
image_size_np, shape=image_size_np.shape)
bounding_box_tf = constant_op.constant(
bounding_box_np, dtype=dtypes.float32, shape=bounding_box_np.shape)
begin, size, _ = image_ops.sample_distorted_bounding_box(
image_size=image_size_tf,
bounding_boxes=bounding_box_tf,
min_object_covered=min_object_covered,
aspect_ratio_range=aspect_ratio_range,
area_range=area_range)
y = array_ops.strided_slice(image_tf, begin, begin + size)
for _ in range(num_iter):
y_tf = self.evaluate(y)
crop_height = y_tf.shape[0]
crop_width = y_tf.shape[1]
aspect_ratio = float(crop_width) / float(crop_height)
area = float(crop_width * crop_height)
aspect_ratios.append(aspect_ratio)
area_ratios.append(area / original_area)
fraction_object_covered.append(float(np.sum(y_tf)) / bounding_box_area)
# min_object_covered as tensor
min_object_covered_t = ops.convert_to_tensor(min_object_covered)
begin, size, _ = image_ops.sample_distorted_bounding_box(
image_size=image_size_tf,
bounding_boxes=bounding_box_tf,
min_object_covered=min_object_covered_t,
aspect_ratio_range=aspect_ratio_range,
area_range=area_range)
y = array_ops.strided_slice(image_tf, begin, begin + size)
for _ in range(num_iter):
y_tf = self.evaluate(y)
crop_height = y_tf.shape[0]
crop_width = y_tf.shape[1]
aspect_ratio = float(crop_width) / float(crop_height)
area = float(crop_width * crop_height)
aspect_ratios.append(aspect_ratio)
area_ratios.append(area / original_area)
fraction_object_covered.append(float(np.sum(y_tf)) / bounding_box_area)
# Ensure that each entry is observed within 3 standard deviations.
# num_bins = 10
# aspect_ratio_hist, _ = np.histogram(aspect_ratios,
# bins=num_bins,
# range=aspect_ratio_range)
# mean = np.mean(aspect_ratio_hist)
# stddev = np.sqrt(mean)
# TODO(wicke, shlens, dga): Restore this test so that it is no longer flaky.
# TODO(irving): Since the rejection probability is not independent of the
# aspect ratio, the aspect_ratio random value is not exactly uniformly
# distributed in [min_aspect_ratio, max_aspect_ratio). This test should be
# fixed to reflect the true statistical property, then tightened to enforce
# a stricter bound. Or, ideally, the sample_distorted_bounding_box Op
# be fixed to not use rejection sampling and generate correctly uniform
# aspect ratios.
# self.assertAllClose(aspect_ratio_hist,
# [mean] * num_bins, atol=3.6 * stddev)
# The resulting crop will not be uniformly distributed in area. In practice,
# we find that the area skews towards the small sizes. Instead, we perform
# a weaker test to ensure that the area ratios are merely within the
# specified bounds.
self.assertLessEqual(max(area_ratios), area_range[1])
self.assertGreaterEqual(min(area_ratios), area_range[0])
# For reference, here is what the distribution of area ratios look like.
area_ratio_hist, _ = np.histogram(area_ratios, bins=10, range=area_range)
print("area_ratio_hist ", area_ratio_hist)
# Ensure that fraction_object_covered is satisfied.
# TODO(wicke, shlens, dga): Restore this test so that it is no longer flaky.
# self.assertGreaterEqual(min(fraction_object_covered), min_object_covered)
def testWholeImageBoundingBox(self):
height = 40
width = 50
image_size = [height, width, 1]
bounding_box = [0.0, 0.0, 1.0, 1.0]
image = np.arange(
0, np.prod(image_size), dtype=np.int32).reshape(image_size)
self._testSampleDistortedBoundingBox(
image,
bounding_box,
min_object_covered=0.1,
aspect_ratio_range=(0.75, 1.33),
area_range=(0.05, 1.0))
def testWithBoundingBox(self):
height = 40
width = 50
x_shape = [height, width, 1]
image = np.zeros(x_shape, dtype=np.int32)
# Create an object with 1's in a region with area A and require that
# the total pixel values >= 0.1 * A.
min_object_covered = 0.1
xmin = 2
ymin = 3
xmax = 12
ymax = 13
for x in np.arange(xmin, xmax + 1, 1):
for y in np.arange(ymin, ymax + 1, 1):
image[x, y] = 1
# Bounding box is specified as (ymin, xmin, ymax, xmax) in
# relative coordinates.
bounding_box = (float(ymin) / height, float(xmin) / width,
float(ymax) / height, float(xmax) / width)
self._testSampleDistortedBoundingBox(
image,
bounding_box=bounding_box,
min_object_covered=min_object_covered,
aspect_ratio_range=(0.75, 1.33),
area_range=(0.05, 1.0))
def testSampleDistortedBoundingBoxShape(self):
# Shape function requires placeholders and a graph.
with ops.Graph().as_default():
with self.cached_session():
image_size = constant_op.constant(
[40, 50, 1], shape=[3], dtype=dtypes.int32)
bounding_box = constant_op.constant(
[[[0.0, 0.0, 1.0, 1.0]]],
shape=[1, 1, 4],
dtype=dtypes.float32,
)
begin, end, bbox_for_drawing = image_ops.sample_distorted_bounding_box(
image_size=image_size,
bounding_boxes=bounding_box,
min_object_covered=0.1,
aspect_ratio_range=(0.75, 1.33),
area_range=(0.05, 1.0))
# Test that the shapes are correct.
self.assertAllEqual([3], begin.get_shape().as_list())
self.assertAllEqual([3], end.get_shape().as_list())
self.assertAllEqual([1, 1, 4], bbox_for_drawing.get_shape().as_list())
# Actual run to make sure shape is correct inside Compute().
begin = self.evaluate(begin)
end = self.evaluate(end)
bbox_for_drawing = self.evaluate(bbox_for_drawing)
begin, end, bbox_for_drawing = image_ops.sample_distorted_bounding_box(
image_size=image_size,
bounding_boxes=bounding_box,
min_object_covered=array_ops.placeholder(dtypes.float32),
aspect_ratio_range=(0.75, 1.33),
area_range=(0.05, 1.0))
# Test that the shapes are correct.
self.assertAllEqual([3], begin.get_shape().as_list())
self.assertAllEqual([3], end.get_shape().as_list())
self.assertAllEqual([1, 1, 4], bbox_for_drawing.get_shape().as_list())
def testDefaultMinObjectCovered(self):
# By default min_object_covered=0.1 if not provided
with self.cached_session():
image_size = constant_op.constant(
[40, 50, 1], shape=[3], dtype=dtypes.int32)
bounding_box = constant_op.constant(
[[[0.0, 0.0, 1.0, 1.0]]],
shape=[1, 1, 4],
dtype=dtypes.float32,
)
begin, end, bbox_for_drawing = image_ops.sample_distorted_bounding_box(
image_size=image_size,
bounding_boxes=bounding_box,
aspect_ratio_range=(0.75, 1.33),
area_range=(0.05, 1.0))
self.assertAllEqual([3], begin.get_shape().as_list())
self.assertAllEqual([3], end.get_shape().as_list())
self.assertAllEqual([1, 1, 4], bbox_for_drawing.get_shape().as_list())
# Actual run to make sure shape is correct inside Compute().
begin = self.evaluate(begin)
end = self.evaluate(end)
bbox_for_drawing = self.evaluate(bbox_for_drawing)
def _testStatelessSampleDistortedBoundingBox(self, image, bounding_box,
min_object_covered,
aspect_ratio_range, area_range):
with test_util.use_gpu():
original_area = float(np.prod(image.shape))
bounding_box_area = float((bounding_box[3] - bounding_box[1]) *
(bounding_box[2] - bounding_box[0]))
image_size_np = np.array(image.shape, dtype=np.int32)
bounding_box_np = (
np.array(bounding_box, dtype=np.float32).reshape([1, 1, 4]))
iterations = 2
test_seeds = [(1, 2), (3, 4), (5, 6)]
for seed in test_seeds:
aspect_ratios = []
area_ratios = []
fraction_object_covered = []
for _ in range(iterations):
image_tf = constant_op.constant(image, shape=image.shape)
image_size_tf = constant_op.constant(
image_size_np, shape=image_size_np.shape)
bounding_box_tf = constant_op.constant(bounding_box_np,
dtype=dtypes.float32,
shape=bounding_box_np.shape)
begin, size, _ = image_ops.stateless_sample_distorted_bounding_box(
image_size=image_size_tf,
bounding_boxes=bounding_box_tf,
seed=seed,
min_object_covered=min_object_covered,
aspect_ratio_range=aspect_ratio_range,
area_range=area_range)
y = array_ops.strided_slice(image_tf, begin, begin + size)
y_tf = self.evaluate(y)
crop_height = y_tf.shape[0]
crop_width = y_tf.shape[1]
aspect_ratio = float(crop_width) / float(crop_height)
area = float(crop_width * crop_height)
aspect_ratios.append(aspect_ratio)
area_ratio = area / original_area
area_ratios.append(area_ratio)
fraction_object_covered.append(
float(np.sum(y_tf)) / bounding_box_area)
# Check that `area_ratio` is within valid range.
self.assertLessEqual(area_ratio, area_range[1])
self.assertGreaterEqual(area_ratio, area_range[0])
# Each array should consist of one value just repeated `iteration` times
# because the same seed is used.
self.assertEqual(len(set(aspect_ratios)), 1)
self.assertEqual(len(set(area_ratios)), 1)
self.assertEqual(len(set(fraction_object_covered)), 1)
# TODO(b/162345082): stateless random op generates different random number
# with xla_gpu. Update tests such that there is a single ground truth result
# to test against.
def testWholeImageBoundingBoxStateless(self):
height = 40
width = 50
image_size = [height, width, 1]
bounding_box = [0.0, 0.0, 1.0, 1.0]
image = np.arange(
0, np.prod(image_size), dtype=np.int32).reshape(image_size)
for min_obj_covered in [0.1, constant_op.constant(0.1)]:
self._testStatelessSampleDistortedBoundingBox(
image,
bounding_box,
min_object_covered=min_obj_covered,
aspect_ratio_range=(0.75, 1.33),
area_range=(0.05, 1.0))
# TODO(b/162345082): stateless random op generates different random number
# with xla_gpu. Update tests such that there is a single ground truth result
# to test against.
def testWithBoundingBoxStateless(self):
height = 40
width = 50
x_shape = [height, width, 1]
image = np.zeros(x_shape, dtype=np.int32)
xmin = 2
ymin = 3
xmax = 12
ymax = 13
for x in np.arange(xmin, xmax + 1, 1):
for y in np.arange(ymin, ymax + 1, 1):
image[x, y] = 1
# Bounding box is specified as (ymin, xmin, ymax, xmax) in
# relative coordinates.
bounding_box = (float(ymin) / height, float(xmin) / width,
float(ymax) / height, float(xmax) / width)
# Test both scalar and tensor input for `min_object_covered`.
for min_obj_covered in [0.1, constant_op.constant(0.1)]:
self._testStatelessSampleDistortedBoundingBox(
image,
bounding_box=bounding_box,
min_object_covered=min_obj_covered,
aspect_ratio_range=(0.75, 1.33),
area_range=(0.05, 1.0))
def testSampleDistortedBoundingBoxShapeStateless(self):
with test_util.use_gpu():
image_size = constant_op.constant(
[40, 50, 1], shape=[3], dtype=dtypes.int32)
bounding_box = constant_op.constant(
[[[0.0, 0.0, 1.0, 1.0]]],
shape=[1, 1, 4],
dtype=dtypes.float32,
)
bbox_func = functools.partial(
image_ops.stateless_sample_distorted_bounding_box,
image_size=image_size,
bounding_boxes=bounding_box,
min_object_covered=0.1,
aspect_ratio_range=(0.75, 1.33),
area_range=(0.05, 1.0))
# Check error is raised with wrong seed shapes.
for seed in [1, (1, 2, 3)]:
with self.assertRaises((ValueError, errors.InvalidArgumentError)):
begin, end, bbox_for_drawing = bbox_func(seed=seed)
test_seed = (1, 2)
begin, end, bbox_for_drawing = bbox_func(seed=test_seed)
# Test that the shapes are correct.
self.assertAllEqual([3], begin.get_shape().as_list())
self.assertAllEqual([3], end.get_shape().as_list())
self.assertAllEqual([1, 1, 4], bbox_for_drawing.get_shape().as_list())
# Actual run to make sure shape is correct inside Compute().
begin = self.evaluate(begin)
end = self.evaluate(end)
bbox_for_drawing = self.evaluate(bbox_for_drawing)
self.assertAllEqual([3], begin.shape)
self.assertAllEqual([3], end.shape)
self.assertAllEqual([1, 1, 4], bbox_for_drawing.shape)
def testDeterminismExceptionThrowing(self):
with test_util.deterministic_ops():
with self.assertRaisesRegex(
ValueError, "requires a non-zero seed to be passed in when "
"determinism is enabled"):
image_ops_impl.sample_distorted_bounding_box_v2(
image_size=[50, 50, 1],
bounding_boxes=[[[0., 0., 1., 1.]]],
)
image_ops_impl.sample_distorted_bounding_box_v2(
image_size=[50, 50, 1], bounding_boxes=[[[0., 0., 1., 1.]]], seed=1)
with self.assertRaisesRegex(
ValueError, 'requires "seed" or "seed2" to be non-zero when '
"determinism is enabled"):
image_ops_impl.sample_distorted_bounding_box(
image_size=[50, 50, 1], bounding_boxes=[[[0., 0., 1., 1.]]])
image_ops_impl.sample_distorted_bounding_box(
image_size=[50, 50, 1], bounding_boxes=[[[0., 0., 1., 1.]]], seed=1)
class ResizeImagesV2Test(test_util.TensorFlowTestCase, parameterized.TestCase):
METHODS = [
image_ops.ResizeMethod.BILINEAR, image_ops.ResizeMethod.NEAREST_NEIGHBOR,
image_ops.ResizeMethod.BICUBIC, image_ops.ResizeMethod.AREA,
image_ops.ResizeMethod.LANCZOS3, image_ops.ResizeMethod.LANCZOS5,
image_ops.ResizeMethod.GAUSSIAN, image_ops.ResizeMethod.MITCHELLCUBIC
]
# Some resize methods, such as Gaussian, are non-interpolating in that they
# change the image even if there is no scale change, for some test, we only
# check the value on the value preserving methods.
INTERPOLATING_METHODS = [
image_ops.ResizeMethod.BILINEAR, image_ops.ResizeMethod.NEAREST_NEIGHBOR,
image_ops.ResizeMethod.BICUBIC, image_ops.ResizeMethod.AREA,
image_ops.ResizeMethod.LANCZOS3, image_ops.ResizeMethod.LANCZOS5
]
TYPES = [
np.uint8, np.int8, np.uint16, np.int16, np.int32, np.int64, np.float16,
np.float32, np.float64
]
def _assertShapeInference(self, pre_shape, size, post_shape):
# Try single image resize
single_image = array_ops.placeholder(dtypes.float32, shape=pre_shape)
y = image_ops.resize_images_v2(single_image, size)
self.assertEqual(y.get_shape().as_list(), post_shape)
# Try batch images resize with known batch size
images = array_ops.placeholder(dtypes.float32, shape=[99] + pre_shape)
y = image_ops.resize_images_v2(images, size)
self.assertEqual(y.get_shape().as_list(), [99] + post_shape)
# Try batch images resize with unknown batch size
images = array_ops.placeholder(dtypes.float32, shape=[None] + pre_shape)
y = image_ops.resize_images_v2(images, size)
self.assertEqual(y.get_shape().as_list(), [None] + post_shape)
def shouldRunOnGPU(self, method, nptype):
if (method == image_ops.ResizeMethod.NEAREST_NEIGHBOR and
nptype in [np.float32, np.float64]):
return True
else:
return False
@test_util.disable_xla("align_corners=False not supported by XLA")
def testNoOp(self):
img_shape = [1, 6, 4, 1]
single_shape = [6, 4, 1]
# This test is also conducted with int8, so 127 is the maximum
# value that can be used.
data = [
127, 127, 64, 64, 127, 127, 64, 64, 64, 64, 127, 127, 64, 64, 127, 127,
50, 50, 100, 100, 50, 50, 100, 100
]
target_height = 6
target_width = 4
for nptype in self.TYPES:
img_np = np.array(data, dtype=nptype).reshape(img_shape)
for method in self.METHODS:
with self.cached_session():
image = constant_op.constant(img_np, shape=img_shape)
y = image_ops.resize_images_v2(image, [target_height, target_width],
method)
yshape = array_ops.shape(y)
resized, newshape = self.evaluate([y, yshape])
self.assertAllEqual(img_shape, newshape)
if method in self.INTERPOLATING_METHODS:
self.assertAllClose(resized, img_np, atol=1e-5)
# Resizing with a single image must leave the shape unchanged also.
with self.cached_session():
img_single = img_np.reshape(single_shape)
image = constant_op.constant(img_single, shape=single_shape)
y = image_ops.resize_images_v2(image, [target_height, target_width],
self.METHODS[0])
yshape = array_ops.shape(y)
newshape = self.evaluate(yshape)
self.assertAllEqual(single_shape, newshape)
# half_pixel_centers unsupported in ResizeBilinear
@test_util.disable_xla("b/127616992")
def testTensorArguments(self):
img_shape = [1, 6, 4, 1]
single_shape = [6, 4, 1]
# This test is also conducted with int8, so 127 is the maximum
# value that can be used.
data = [
127, 127, 64, 64, 127, 127, 64, 64, 64, 64, 127, 127, 64, 64, 127, 127,
50, 50, 100, 100, 50, 50, 100, 100
]
def resize_func(t, new_size, method):
return image_ops.resize_images_v2(t, new_size, method)
img_np = np.array(data, dtype=np.uint8).reshape(img_shape)
for method in self.METHODS:
with self.cached_session():
image = constant_op.constant(img_np, shape=img_shape)
y = resize_func(image, [6, 4], method)
yshape = array_ops.shape(y)
resized, newshape = self.evaluate([y, yshape])
self.assertAllEqual(img_shape, newshape)
if method in self.INTERPOLATING_METHODS:
self.assertAllClose(resized, img_np, atol=1e-5)
# Resizing with a single image must leave the shape unchanged also.
with self.cached_session():
img_single = img_np.reshape(single_shape)
image = constant_op.constant(img_single, shape=single_shape)
y = resize_func(image, [6, 4], self.METHODS[0])
yshape = array_ops.shape(y)
resized, newshape = self.evaluate([y, yshape])
self.assertAllEqual(single_shape, newshape)
if method in self.INTERPOLATING_METHODS:
self.assertAllClose(resized, img_single, atol=1e-5)
# Incorrect shape.
with self.assertRaises(ValueError):
new_size = constant_op.constant(4)
_ = resize_func(image, new_size, image_ops.ResizeMethod.BILINEAR)
with self.assertRaises(ValueError):
new_size = constant_op.constant([4])
_ = resize_func(image, new_size, image_ops.ResizeMethod.BILINEAR)
with self.assertRaises(ValueError):
new_size = constant_op.constant([1, 2, 3])
_ = resize_func(image, new_size, image_ops.ResizeMethod.BILINEAR)
# Incorrect dtypes.
with self.assertRaises(ValueError):
new_size = constant_op.constant([6.0, 4])
_ = resize_func(image, new_size, image_ops.ResizeMethod.BILINEAR)
with self.assertRaises(ValueError):
_ = resize_func(image, [6, 4.0], image_ops.ResizeMethod.BILINEAR)
with self.assertRaises(ValueError):
_ = resize_func(image, [None, 4], image_ops.ResizeMethod.BILINEAR)
with self.assertRaises(ValueError):
_ = resize_func(image, [6, None], image_ops.ResizeMethod.BILINEAR)
def testReturnDtypeV1(self):
# Shape inference in V1.
with ops.Graph().as_default():
target_shapes = [[6, 4], [3, 2],
[
array_ops.placeholder(dtypes.int32),
array_ops.placeholder(dtypes.int32)
]]
for nptype in self.TYPES:
image = array_ops.placeholder(nptype, shape=[1, 6, 4, 1])
for method in self.METHODS:
for target_shape in target_shapes:
y = image_ops.resize_images_v2(image, target_shape, method)
if method == image_ops.ResizeMethod.NEAREST_NEIGHBOR:
expected_dtype = image.dtype
else:
expected_dtype = dtypes.float32
self.assertEqual(y.dtype, expected_dtype)
@parameterized.named_parameters([("_RunEagerly", True), ("_RunGraph", False)])
def testReturnDtypeV2(self, run_func_eagerly):
if not context.executing_eagerly() and run_func_eagerly:
# Skip running tf.function eagerly in V1 mode.
self.skipTest("Skip test that runs tf.function eagerly in V1 mode.")
else:
@def_function.function
def test_dtype(image, target_shape, target_method):
y = image_ops.resize_images_v2(image, target_shape, target_method)
if method == image_ops.ResizeMethod.NEAREST_NEIGHBOR:
expected_dtype = image.dtype
else:
expected_dtype = dtypes.float32
self.assertEqual(y.dtype, expected_dtype)
target_shapes = [[6, 4],
[3, 2],
[tensor_spec.TensorSpec(shape=None, dtype=dtypes.int32),
tensor_spec.TensorSpec(shape=None, dtype=dtypes.int32)]]
for nptype in self.TYPES:
image = tensor_spec.TensorSpec(shape=[1, 6, 4, 1], dtype=nptype)
for method in self.METHODS:
for target_shape in target_shapes:
with test_util.run_functions_eagerly(run_func_eagerly):
test_dtype.get_concrete_function(image, target_shape, method)
# half_pixel_centers not supported by XLA
@test_util.disable_xla("b/127616992")
def testSumTensor(self):
img_shape = [1, 6, 4, 1]
# This test is also conducted with int8, so 127 is the maximum
# value that can be used.
data = [
127, 127, 64, 64, 127, 127, 64, 64, 64, 64, 127, 127, 64, 64, 127, 127,
50, 50, 100, 100, 50, 50, 100, 100
]
# Test size where width is specified as a tensor which is a sum
# of two tensors.
width_1 = constant_op.constant(1)
width_2 = constant_op.constant(3)
width = math_ops.add(width_1, width_2)
height = constant_op.constant(6)
img_np = np.array(data, dtype=np.uint8).reshape(img_shape)
for method in self.METHODS:
with self.cached_session():
image = constant_op.constant(img_np, shape=img_shape)
y = image_ops.resize_images_v2(image, [height, width], method)
yshape = array_ops.shape(y)
resized, newshape = self.evaluate([y, yshape])
self.assertAllEqual(img_shape, newshape)
if method in self.INTERPOLATING_METHODS:
self.assertAllClose(resized, img_np, atol=1e-5)
@test_util.disable_xla("align_corners=False not supported by XLA")
def testResizeDown(self):
# This test is also conducted with int8, so 127 is the maximum
# value that can be used.
data = [
127, 127, 64, 64, 127, 127, 64, 64, 64, 64, 127, 127, 64, 64, 127, 127,
50, 50, 100, 100, 50, 50, 100, 100
]
expected_data = [127, 64, 64, 127, 50, 100]
target_height = 3
target_width = 2
# Test out 3-D and 4-D image shapes.
img_shapes = [[1, 6, 4, 1], [6, 4, 1]]
target_shapes = [[1, target_height, target_width, 1],
[target_height, target_width, 1]]
for target_shape, img_shape in zip(target_shapes, img_shapes):
for nptype in self.TYPES:
img_np = np.array(data, dtype=nptype).reshape(img_shape)
for method in self.METHODS:
if test.is_gpu_available() and self.shouldRunOnGPU(method, nptype):
with self.cached_session():
image = constant_op.constant(img_np, shape=img_shape)
y = image_ops.resize_images_v2(
image, [target_height, target_width], method)
expected = np.array(expected_data).reshape(target_shape)
resized = self.evaluate(y)
self.assertAllClose(resized, expected, atol=1e-5)
@test_util.disable_xla("align_corners=False not supported by XLA")
def testResizeUp(self):
img_shape = [1, 3, 2, 1]
data = [64, 32, 32, 64, 50, 100]
target_height = 6
target_width = 4
expected_data = {}
expected_data[image_ops.ResizeMethod.BILINEAR] = [
64.0, 56.0, 40.0, 32.0, 56.0, 52.0, 44.0, 40.0, 40.0, 44.0, 52.0, 56.0,
36.5, 45.625, 63.875, 73.0, 45.5, 56.875, 79.625, 91.0, 50.0, 62.5,
87.5, 100.0
]
expected_data[image_ops.ResizeMethod.NEAREST_NEIGHBOR] = [
64.0, 64.0, 32.0, 32.0, 64.0, 64.0, 32.0, 32.0, 32.0, 32.0, 64.0, 64.0,
32.0, 32.0, 64.0, 64.0, 50.0, 50.0, 100.0, 100.0, 50.0, 50.0, 100.0,
100.0
]
expected_data[image_ops.ResizeMethod.AREA] = [
64.0, 64.0, 32.0, 32.0, 64.0, 64.0, 32.0, 32.0, 32.0, 32.0, 64.0, 64.0,
32.0, 32.0, 64.0, 64.0, 50.0, 50.0, 100.0, 100.0, 50.0, 50.0, 100.0,
100.0
]
expected_data[image_ops.ResizeMethod.LANCZOS3] = [
75.8294, 59.6281, 38.4313, 22.23, 60.6851, 52.0037, 40.6454, 31.964,
35.8344, 41.0779, 47.9383, 53.1818, 24.6968, 43.0769, 67.1244, 85.5045,
35.7939, 56.4713, 83.5243, 104.2017, 44.8138, 65.1949, 91.8603, 112.2413
]
expected_data[image_ops.ResizeMethod.LANCZOS5] = [
77.5699, 60.0223, 40.6694, 23.1219, 61.8253, 51.2369, 39.5593, 28.9709,
35.7438, 40.8875, 46.5604, 51.7041, 21.5942, 43.5299, 67.7223, 89.658,
32.1213, 56.784, 83.984, 108.6467, 44.5802, 66.183, 90.0082, 111.6109
]
expected_data[image_ops.ResizeMethod.GAUSSIAN] = [
61.1087, 54.6926, 41.3074, 34.8913, 54.6926, 51.4168, 44.5832, 41.3074,
41.696, 45.2456, 52.6508, 56.2004, 39.4273, 47.0526, 62.9602, 70.5855,
47.3008, 57.3042, 78.173, 88.1764, 51.4771, 62.3638, 85.0752, 95.9619
]
expected_data[image_ops.ResizeMethod.BICUBIC] = [
70.1453, 59.0252, 36.9748, 25.8547, 59.3195, 53.3386, 41.4789, 35.4981,
36.383, 41.285, 51.0051, 55.9071, 30.2232, 42.151, 65.8032, 77.731,
41.6492, 55.823, 83.9288, 98.1026, 47.0363, 62.2744, 92.4903, 107.7284
]
expected_data[image_ops.ResizeMethod.MITCHELLCUBIC] = [
66.0382, 56.6079, 39.3921, 29.9618, 56.7255, 51.9603, 43.2611, 38.4959,
39.1828, 43.4664, 51.2864, 55.57, 34.6287, 45.1812, 64.4458, 74.9983,
43.8523, 56.8078, 80.4594, 93.4149, 48.9943, 63.026, 88.6422, 102.6739
]
for nptype in self.TYPES:
for method in expected_data:
with self.cached_session():
img_np = np.array(data, dtype=nptype).reshape(img_shape)
image = constant_op.constant(img_np, shape=img_shape)
y = image_ops.resize_images_v2(image, [target_height, target_width],
method)
resized = self.evaluate(y)
expected = np.array(expected_data[method]).reshape(
[1, target_height, target_width, 1])
self.assertAllClose(resized, expected, atol=1e-04)
# XLA doesn't implement half_pixel_centers
@test_util.disable_xla("b/127616992")
def testLegacyBicubicMethodsMatchNewMethods(self):
img_shape = [1, 3, 2, 1]
data = [64, 32, 32, 64, 50, 100]
target_height = 6
target_width = 4
methods_to_test = ((gen_image_ops.resize_bilinear, "triangle"),
(gen_image_ops.resize_bicubic, "keyscubic"))
for legacy_method, new_method in methods_to_test:
with self.cached_session():
img_np = np.array(data, dtype=np.float32).reshape(img_shape)
image = constant_op.constant(img_np, shape=img_shape)
legacy_result = legacy_method(
image,
constant_op.constant([target_height, target_width],
dtype=dtypes.int32),
half_pixel_centers=True)
scale = (
constant_op.constant([target_height, target_width],
dtype=dtypes.float32) /
math_ops.cast(array_ops.shape(image)[1:3], dtype=dtypes.float32))
new_result = gen_image_ops.scale_and_translate(
image,
constant_op.constant([target_height, target_width],
dtype=dtypes.int32),
scale,
array_ops.zeros([2]),
kernel_type=new_method,
antialias=False)
self.assertAllClose(
self.evaluate(legacy_result), self.evaluate(new_result), atol=1e-04)
def testResizeDownArea(self):
img_shape = [1, 6, 6, 1]
data = [
128, 64, 32, 16, 8, 4, 4, 8, 16, 32, 64, 128, 128, 64, 32, 16, 8, 4, 5,
10, 15, 20, 25, 30, 30, 25, 20, 15, 10, 5, 5, 10, 15, 20, 25, 30
]
img_np = np.array(data, dtype=np.uint8).reshape(img_shape)
target_height = 4
target_width = 4
expected_data = [
73, 33, 23, 39, 73, 33, 23, 39, 14, 16, 19, 21, 14, 16, 19, 21
]
with self.cached_session():
image = constant_op.constant(img_np, shape=img_shape)
y = image_ops.resize_images_v2(image, [target_height, target_width],
image_ops.ResizeMethod.AREA)
expected = np.array(expected_data).reshape(
[1, target_height, target_width, 1])
resized = self.evaluate(y)
self.assertAllClose(resized, expected, atol=1)
def testCompareNearestNeighbor(self):
if test.is_gpu_available():
input_shape = [1, 5, 6, 3]
target_height = 8
target_width = 12
for nptype in [np.float32, np.float64]:
img_np = np.arange(
0, np.prod(input_shape), dtype=nptype).reshape(input_shape)
with self.cached_session():
image = constant_op.constant(img_np, shape=input_shape)
new_size = constant_op.constant([target_height, target_width])
out_op = image_ops.resize_images_v2(
image, new_size, image_ops.ResizeMethod.NEAREST_NEIGHBOR)
gpu_val = self.evaluate(out_op)
with self.cached_session(use_gpu=False):
image = constant_op.constant(img_np, shape=input_shape)
new_size = constant_op.constant([target_height, target_width])
out_op = image_ops.resize_images_v2(
image, new_size, image_ops.ResizeMethod.NEAREST_NEIGHBOR)
cpu_val = self.evaluate(out_op)
self.assertAllClose(cpu_val, gpu_val, rtol=1e-5, atol=1e-5)
@test_util.disable_xla("align_corners=False not supported by XLA")
def testBfloat16MultipleOps(self):
target_height = 8
target_width = 12
img = np.random.uniform(0, 100, size=(30, 10, 2)).astype(np.float32)
img_bf16 = ops.convert_to_tensor(img, dtype="bfloat16")
new_size = constant_op.constant([target_height, target_width])
img_methods = [
image_ops.ResizeMethod.BILINEAR,
image_ops.ResizeMethod.NEAREST_NEIGHBOR, image_ops.ResizeMethod.BICUBIC,
image_ops.ResizeMethod.AREA
]
for method in img_methods:
out_op_bf16 = image_ops.resize_images_v2(img_bf16, new_size, method)
out_op_f32 = image_ops.resize_images_v2(img, new_size, method)
bf16_val = self.evaluate(out_op_bf16)
f32_val = self.evaluate(out_op_f32)
self.assertAllClose(bf16_val, f32_val, rtol=1e-2, atol=1e-2)
def testCompareBilinear(self):
if test.is_gpu_available():
input_shape = [1, 5, 6, 3]
target_height = 8
target_width = 12
for nptype in [np.float32, np.float64]:
img_np = np.arange(
0, np.prod(input_shape), dtype=nptype).reshape(input_shape)
value = {}
for use_gpu in [True, False]:
with self.cached_session(use_gpu=use_gpu):
image = constant_op.constant(img_np, shape=input_shape)
new_size = constant_op.constant([target_height, target_width])
out_op = image_ops.resize_images(image, new_size,
image_ops.ResizeMethod.BILINEAR)
value[use_gpu] = self.evaluate(out_op)
self.assertAllClose(value[True], value[False], rtol=1e-5, atol=1e-5)
def testShapeInference(self):
# Shape function requires placeholders and a graph.
with ops.Graph().as_default():
self._assertShapeInference([50, 60, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([55, 66, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([59, 69, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([50, 69, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([59, 60, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([None, 60, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([None, 66, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([None, 69, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([50, None, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([55, None, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([59, None, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([None, None, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([50, 60, None], [55, 66], [55, 66, None])
self._assertShapeInference([55, 66, None], [55, 66], [55, 66, None])
self._assertShapeInference([59, 69, None], [55, 66], [55, 66, None])
self._assertShapeInference([50, 69, None], [55, 66], [55, 66, None])
self._assertShapeInference([59, 60, None], [55, 66], [55, 66, None])
self._assertShapeInference([None, None, None], [55, 66], [55, 66, None])
def testNameScope(self):
# Testing name scope requires placeholders and a graph.
with ops.Graph().as_default():
with self.cached_session():
single_image = array_ops.placeholder(dtypes.float32, shape=[50, 60, 3])
y = image_ops.resize_images(single_image, [55, 66])
self.assertTrue(y.op.name.startswith("resize"))
def _ResizeImageCall(self, x, max_h, max_w, preserve_aspect_ratio,
use_tensor_inputs):
if use_tensor_inputs:
target_max = ops.convert_to_tensor([max_h, max_w])
x_tensor = ops.convert_to_tensor(x)
else:
target_max = (max_h, max_w)
x_tensor = x
def resize_func(t,
target_max=target_max,
preserve_aspect_ratio=preserve_aspect_ratio):
return image_ops.resize_images(
t, ops.convert_to_tensor(target_max),
preserve_aspect_ratio=preserve_aspect_ratio)
with self.cached_session():
return self.evaluate(resize_func(x_tensor))
def _assertResizeEqual(self,
x,
x_shape,
y,
y_shape,
preserve_aspect_ratio=True,
use_tensor_inputs_options=None):
use_tensor_inputs_options = use_tensor_inputs_options or [False, True]
target_height, target_width, _ = y_shape
x = np.array(x).reshape(x_shape)
y = np.array(y).reshape(y_shape)
for use_tensor_inputs in use_tensor_inputs_options:
y_tf = self._ResizeImageCall(x, target_height, target_width,
preserve_aspect_ratio, use_tensor_inputs)
self.assertAllClose(y, y_tf)
def _assertResizeCheckShape(self,
x,
x_shape,
target_shape,
y_shape,
preserve_aspect_ratio=True,
use_tensor_inputs_options=None):
use_tensor_inputs_options = use_tensor_inputs_options or [False, True]
target_height, target_width = target_shape
x = np.array(x).reshape(x_shape)
y = np.zeros(y_shape)
for use_tensor_inputs in use_tensor_inputs_options:
y_tf = self._ResizeImageCall(x, target_height, target_width,
preserve_aspect_ratio, use_tensor_inputs)
self.assertShapeEqual(y, ops.convert_to_tensor(y_tf))
def testPreserveAspectRatioMultipleImages(self):
x_shape = [10, 100, 80, 10]
x = np.random.uniform(size=x_shape)
for preserve_aspect_ratio in [True, False]:
with self.subTest(preserve_aspect_ratio=preserve_aspect_ratio):
expect_shape = [10, 250, 200, 10] if preserve_aspect_ratio \
else [10, 250, 250, 10]
self._assertResizeCheckShape(
x,
x_shape, [250, 250],
expect_shape,
preserve_aspect_ratio=preserve_aspect_ratio)
def testPreserveAspectRatioNoOp(self):
x_shape = [10, 10, 10]
x = np.random.uniform(size=x_shape)
self._assertResizeEqual(x, x_shape, x, x_shape)
def testPreserveAspectRatioSmaller(self):
x_shape = [100, 100, 10]
x = np.random.uniform(size=x_shape)
self._assertResizeCheckShape(x, x_shape, [75, 50], [50, 50, 10])
def testPreserveAspectRatioSmallerMultipleImages(self):
x_shape = [10, 100, 100, 10]
x = np.random.uniform(size=x_shape)
self._assertResizeCheckShape(x, x_shape, [75, 50], [10, 50, 50, 10])
def testPreserveAspectRatioLarger(self):
x_shape = [100, 100, 10]
x = np.random.uniform(size=x_shape)
self._assertResizeCheckShape(x, x_shape, [150, 200], [150, 150, 10])
def testPreserveAspectRatioSameRatio(self):
x_shape = [1920, 1080, 3]
x = np.random.uniform(size=x_shape)
self._assertResizeCheckShape(x, x_shape, [3840, 2160], [3840, 2160, 3])
def testPreserveAspectRatioSquare(self):
x_shape = [299, 299, 3]
x = np.random.uniform(size=x_shape)
self._assertResizeCheckShape(x, x_shape, [320, 320], [320, 320, 3])
def testLargeDim(self):
with self.session():
with self.assertRaises(errors.InvalidArgumentError):
x = np.ones((5, 1, 1, 2))
v = image_ops.resize_images_v2(x, [1610637938, 1610637938],
image_ops.ResizeMethod.BILINEAR)
_ = self.evaluate(v)
class ResizeImagesTest(test_util.TensorFlowTestCase,
parameterized.TestCase):
METHODS = [
image_ops.ResizeMethodV1.BILINEAR,
image_ops.ResizeMethodV1.NEAREST_NEIGHBOR,
image_ops.ResizeMethodV1.BICUBIC, image_ops.ResizeMethodV1.AREA
]
TYPES = [
np.uint8, np.int8, np.uint16, np.int16, np.int32, np.int64, np.float16,
np.float32, np.float64
]
def _assertShapeInference(self, pre_shape, size, post_shape):
# Try single image resize
single_image = array_ops.placeholder(dtypes.float32, shape=pre_shape)
y = image_ops.resize_images(single_image, size)
self.assertEqual(y.get_shape().as_list(), post_shape)
# Try batch images resize with known batch size
images = array_ops.placeholder(dtypes.float32, shape=[99] + pre_shape)
y = image_ops.resize_images(images, size)
self.assertEqual(y.get_shape().as_list(), [99] + post_shape)
# Try batch images resize with unknown batch size
images = array_ops.placeholder(dtypes.float32, shape=[None] + pre_shape)
y = image_ops.resize_images(images, size)
self.assertEqual(y.get_shape().as_list(), [None] + post_shape)
def shouldRunOnGPU(self, method, nptype):
if (method == image_ops.ResizeMethodV1.NEAREST_NEIGHBOR and
nptype in [np.float32, np.float64]):
return True
else:
return False
@test_util.disable_xla("align_corners=False not supported by XLA")
def testNoOp(self):
img_shape = [1, 6, 4, 1]
single_shape = [6, 4, 1]
# This test is also conducted with int8, so 127 is the maximum
# value that can be used.
data = [
127, 127, 64, 64, 127, 127, 64, 64, 64, 64, 127, 127, 64, 64, 127, 127,
50, 50, 100, 100, 50, 50, 100, 100
]
target_height = 6
target_width = 4
for nptype in self.TYPES:
img_np = np.array(data, dtype=nptype).reshape(img_shape)
for method in self.METHODS:
with self.cached_session():
image = constant_op.constant(img_np, shape=img_shape)
y = image_ops.resize_images(image, [target_height, target_width],
method)
yshape = array_ops.shape(y)
resized, newshape = self.evaluate([y, yshape])
self.assertAllEqual(img_shape, newshape)
self.assertAllClose(resized, img_np, atol=1e-5)
# Resizing with a single image must leave the shape unchanged also.
with self.cached_session():
img_single = img_np.reshape(single_shape)
image = constant_op.constant(img_single, shape=single_shape)
y = image_ops.resize_images(image, [target_height, target_width],
self.METHODS[0])
yshape = array_ops.shape(y)
newshape = self.evaluate(yshape)
self.assertAllEqual(single_shape, newshape)
def testTensorArguments(self):
img_shape = [1, 6, 4, 1]
single_shape = [6, 4, 1]
# This test is also conducted with int8, so 127 is the maximum
# value that can be used.
data = [
127, 127, 64, 64, 127, 127, 64, 64, 64, 64, 127, 127, 64, 64, 127, 127,
50, 50, 100, 100, 50, 50, 100, 100
]
def resize_func(t, new_size, method):
return image_ops.resize_images(t, new_size, method)
img_np = np.array(data, dtype=np.uint8).reshape(img_shape)
for method in self.METHODS:
with self.cached_session():
image = constant_op.constant(img_np, shape=img_shape)
y = resize_func(image, [6, 4], method)
yshape = array_ops.shape(y)
resized, newshape = self.evaluate([y, yshape])
self.assertAllEqual(img_shape, newshape)
self.assertAllClose(resized, img_np, atol=1e-5)
# Resizing with a single image must leave the shape unchanged also.
with self.cached_session():
img_single = img_np.reshape(single_shape)
image = constant_op.constant(img_single, shape=single_shape)
y = resize_func(image, [6, 4], self.METHODS[0])
yshape = array_ops.shape(y)
resized, newshape = self.evaluate([y, yshape])
self.assertAllEqual(single_shape, newshape)
self.assertAllClose(resized, img_single, atol=1e-5)
# Incorrect shape.
with self.assertRaises(ValueError):
new_size = constant_op.constant(4)
_ = resize_func(image, new_size, image_ops.ResizeMethodV1.BILINEAR)
with self.assertRaises(ValueError):
new_size = constant_op.constant([4])
_ = resize_func(image, new_size, image_ops.ResizeMethodV1.BILINEAR)
with self.assertRaises(ValueError):
new_size = constant_op.constant([1, 2, 3])
_ = resize_func(image, new_size, image_ops.ResizeMethodV1.BILINEAR)
# Incorrect dtypes.
with self.assertRaises(ValueError):
new_size = constant_op.constant([6.0, 4])
_ = resize_func(image, new_size, image_ops.ResizeMethodV1.BILINEAR)
with self.assertRaises(ValueError):
_ = resize_func(image, [6, 4.0], image_ops.ResizeMethodV1.BILINEAR)
with self.assertRaises(ValueError):
_ = resize_func(image, [None, 4], image_ops.ResizeMethodV1.BILINEAR)
with self.assertRaises(ValueError):
_ = resize_func(image, [6, None], image_ops.ResizeMethodV1.BILINEAR)
def testReturnDtypeV1(self):
# Shape inference in V1.
with ops.Graph().as_default():
target_shapes = [[6, 4], [3, 2], [
array_ops.placeholder(dtypes.int32),
array_ops.placeholder(dtypes.int32)
]]
for nptype in self.TYPES:
image = array_ops.placeholder(nptype, shape=[1, 6, 4, 1])
for method in self.METHODS:
for target_shape in target_shapes:
y = image_ops.resize_images(image, target_shape, method)
if (method == image_ops.ResizeMethodV1.NEAREST_NEIGHBOR or
target_shape == image.shape[1:3]):
expected_dtype = image.dtype
else:
expected_dtype = dtypes.float32
self.assertEqual(y.dtype, expected_dtype)
@parameterized.named_parameters([("_RunEagerly", True), ("_RunGraph", False)])
def testReturnDtypeV2(self, run_func_eagerly):
if not context.executing_eagerly() and run_func_eagerly:
# Skip running tf.function eagerly in V1 mode.
self.skipTest("Skip test that runs tf.function eagerly in V1 mode.")
else:
@def_function.function
def test_dtype(image, target_shape, target_method):
y = image_ops.resize_images(image, target_shape, target_method)
if (method == image_ops.ResizeMethodV1.NEAREST_NEIGHBOR or
target_shape == image.shape[1:3]):
expected_dtype = image.dtype
else:
expected_dtype = dtypes.float32
self.assertEqual(y.dtype, expected_dtype)
target_shapes = [[6, 4],
[3, 2],
[tensor_spec.TensorSpec(shape=None, dtype=dtypes.int32),
tensor_spec.TensorSpec(shape=None, dtype=dtypes.int32)]]
for nptype in self.TYPES:
image = tensor_spec.TensorSpec(shape=[1, 6, 4, 1], dtype=nptype)
for method in self.METHODS:
for target_shape in target_shapes:
with test_util.run_functions_eagerly(run_func_eagerly):
test_dtype.get_concrete_function(image, target_shape, method)
@test_util.disable_xla("align_corners=False not supported by XLA")
def testSumTensor(self):
img_shape = [1, 6, 4, 1]
# This test is also conducted with int8, so 127 is the maximum
# value that can be used.
data = [
127, 127, 64, 64, 127, 127, 64, 64, 64, 64, 127, 127, 64, 64, 127, 127,
50, 50, 100, 100, 50, 50, 100, 100
]
# Test size where width is specified as a tensor which is a sum
# of two tensors.
width_1 = constant_op.constant(1)
width_2 = constant_op.constant(3)
width = math_ops.add(width_1, width_2)
height = constant_op.constant(6)
img_np = np.array(data, dtype=np.uint8).reshape(img_shape)
for method in self.METHODS:
with self.cached_session() as sess:
image = constant_op.constant(img_np, shape=img_shape)
y = image_ops.resize_images(image, [height, width], method)
yshape = array_ops.shape(y)
resized, newshape = self.evaluate([y, yshape])
self.assertAllEqual(img_shape, newshape)
self.assertAllClose(resized, img_np, atol=1e-5)
@test_util.disable_xla("align_corners=False not supported by XLA")
def testResizeDown(self):
# This test is also conducted with int8, so 127 is the maximum
# value that can be used.
data = [
127, 127, 64, 64, 127, 127, 64, 64, 64, 64, 127, 127, 64, 64, 127, 127,
50, 50, 100, 100, 50, 50, 100, 100
]
expected_data = [127, 64, 64, 127, 50, 100]
target_height = 3
target_width = 2
# Test out 3-D and 4-D image shapes.
img_shapes = [[1, 6, 4, 1], [6, 4, 1]]
target_shapes = [[1, target_height, target_width, 1],
[target_height, target_width, 1]]
for target_shape, img_shape in zip(target_shapes, img_shapes):
for nptype in self.TYPES:
img_np = np.array(data, dtype=nptype).reshape(img_shape)
for method in self.METHODS:
if test.is_gpu_available() and self.shouldRunOnGPU(method, nptype):
with self.cached_session():
image = constant_op.constant(img_np, shape=img_shape)
y = image_ops.resize_images(image, [target_height, target_width],
method)
expected = np.array(expected_data).reshape(target_shape)
resized = self.evaluate(y)
self.assertAllClose(resized, expected, atol=1e-5)
@test_util.disable_xla("align_corners=False not supported by XLA")
def testResizeUpAlignCornersFalse(self):
img_shape = [1, 3, 2, 1]
data = [64, 32, 32, 64, 50, 100]
target_height = 6
target_width = 4
expected_data = {}
expected_data[image_ops.ResizeMethodV1.BILINEAR] = [
64.0, 48.0, 32.0, 32.0, 48.0, 48.0, 48.0, 48.0, 32.0, 48.0, 64.0, 64.0,
41.0, 61.5, 82.0, 82.0, 50.0, 75.0, 100.0, 100.0, 50.0, 75.0, 100.0,
100.0
]
expected_data[image_ops.ResizeMethodV1.NEAREST_NEIGHBOR] = [
64.0, 64.0, 32.0, 32.0, 64.0, 64.0, 32.0, 32.0, 32.0, 32.0, 64.0, 64.0,
32.0, 32.0, 64.0, 64.0, 50.0, 50.0, 100.0, 100.0, 50.0, 50.0, 100.0,
100.0
]
expected_data[image_ops.ResizeMethodV1.AREA] = [
64.0, 64.0, 32.0, 32.0, 64.0, 64.0, 32.0, 32.0, 32.0, 32.0, 64.0, 64.0,
32.0, 32.0, 64.0, 64.0, 50.0, 50.0, 100.0, 100.0, 50.0, 50.0, 100.0,
100.0
]
for nptype in self.TYPES:
for method in [
image_ops.ResizeMethodV1.BILINEAR,
image_ops.ResizeMethodV1.NEAREST_NEIGHBOR,
image_ops.ResizeMethodV1.AREA
]:
with self.cached_session():
img_np = np.array(data, dtype=nptype).reshape(img_shape)
image = constant_op.constant(img_np, shape=img_shape)
y = image_ops.resize_images(
image, [target_height, target_width], method, align_corners=False)
resized = self.evaluate(y)
expected = np.array(expected_data[method]).reshape(
[1, target_height, target_width, 1])
self.assertAllClose(resized, expected, atol=1e-05)
def testResizeUpAlignCornersTrue(self):
img_shape = [1, 3, 2, 1]
data = [6, 3, 3, 6, 6, 9]
target_height = 5
target_width = 4
expected_data = {}
expected_data[image_ops.ResizeMethodV1.BILINEAR] = [
6.0, 5.0, 4.0, 3.0, 4.5, 4.5, 4.5, 4.5, 3.0, 4.0, 5.0, 6.0, 4.5, 5.5,
6.5, 7.5, 6.0, 7.0, 8.0, 9.0
]
expected_data[image_ops.ResizeMethodV1.NEAREST_NEIGHBOR] = [
6.0, 6.0, 3.0, 3.0, 3.0, 3.0, 6.0, 6.0, 3.0, 3.0, 6.0, 6.0, 6.0, 6.0,
9.0, 9.0, 6.0, 6.0, 9.0, 9.0
]
# TODO(b/37749740): Improve alignment of ResizeMethodV1.AREA when
# align_corners=True.
expected_data[image_ops.ResizeMethodV1.AREA] = [
6.0, 6.0, 6.0, 3.0, 6.0, 6.0, 6.0, 3.0, 3.0, 3.0, 3.0, 6.0, 3.0, 3.0,
3.0, 6.0, 6.0, 6.0, 6.0, 9.0
]
for nptype in self.TYPES:
for method in [
image_ops.ResizeMethodV1.BILINEAR,
image_ops.ResizeMethodV1.NEAREST_NEIGHBOR,
image_ops.ResizeMethodV1.AREA
]:
with self.cached_session():
img_np = np.array(data, dtype=nptype).reshape(img_shape)
image = constant_op.constant(img_np, shape=img_shape)
y = image_ops.resize_images(
image, [target_height, target_width], method, align_corners=True)
resized = self.evaluate(y)
expected = np.array(expected_data[method]).reshape(
[1, target_height, target_width, 1])
self.assertAllClose(resized, expected, atol=1e-05)
def testResizeUpBicubic(self):
img_shape = [1, 6, 6, 1]
data = [
128, 128, 64, 64, 128, 128, 64, 64, 64, 64, 128, 128, 64, 64, 128, 128,
50, 50, 100, 100, 50, 50, 100, 100, 50, 50, 100, 100, 50, 50, 100, 100,
50, 50, 100, 100
]
img_np = np.array(data, dtype=np.uint8).reshape(img_shape)
target_height = 8
target_width = 8
expected_data = [
128, 135, 96, 55, 64, 114, 134, 128, 78, 81, 68, 52, 57, 118, 144, 136,
55, 49, 79, 109, 103, 89, 83, 84, 74, 70, 95, 122, 115, 69, 49, 55, 100,
105, 75, 43, 50, 89, 105, 100, 57, 54, 74, 96, 91, 65, 55, 58, 70, 69,
75, 81, 80, 72, 69, 70, 105, 112, 75, 36, 45, 92, 111, 105
]
with self.cached_session():
image = constant_op.constant(img_np, shape=img_shape)
y = image_ops.resize_images(image, [target_height, target_width],
image_ops.ResizeMethodV1.BICUBIC)
resized = self.evaluate(y)
expected = np.array(expected_data).reshape(
[1, target_height, target_width, 1])
self.assertAllClose(resized, expected, atol=1)
def testResizeDownArea(self):
img_shape = [1, 6, 6, 1]
data = [
128, 64, 32, 16, 8, 4, 4, 8, 16, 32, 64, 128, 128, 64, 32, 16, 8, 4, 5,
10, 15, 20, 25, 30, 30, 25, 20, 15, 10, 5, 5, 10, 15, 20, 25, 30
]
img_np = np.array(data, dtype=np.uint8).reshape(img_shape)
target_height = 4
target_width = 4
expected_data = [
73, 33, 23, 39, 73, 33, 23, 39, 14, 16, 19, 21, 14, 16, 19, 21
]
with self.cached_session():
image = constant_op.constant(img_np, shape=img_shape)
y = image_ops.resize_images(image, [target_height, target_width],
image_ops.ResizeMethodV1.AREA)
expected = np.array(expected_data).reshape(
[1, target_height, target_width, 1])
resized = self.evaluate(y)
self.assertAllClose(resized, expected, atol=1)
def testCompareNearestNeighbor(self):
if test.is_gpu_available():
input_shape = [1, 5, 6, 3]
target_height = 8
target_width = 12
for nptype in [np.float32, np.float64]:
for align_corners in [True, False]:
img_np = np.arange(
0, np.prod(input_shape), dtype=nptype).reshape(input_shape)
with self.cached_session():
image = constant_op.constant(img_np, shape=input_shape)
new_size = constant_op.constant([target_height, target_width])
out_op = image_ops.resize_images(
image,
new_size,
image_ops.ResizeMethodV1.NEAREST_NEIGHBOR,
align_corners=align_corners)
gpu_val = self.evaluate(out_op)
with self.cached_session(use_gpu=False):
image = constant_op.constant(img_np, shape=input_shape)
new_size = constant_op.constant([target_height, target_width])
out_op = image_ops.resize_images(
image,
new_size,
image_ops.ResizeMethodV1.NEAREST_NEIGHBOR,
align_corners=align_corners)
cpu_val = self.evaluate(out_op)
self.assertAllClose(cpu_val, gpu_val, rtol=1e-5, atol=1e-5)
def testCompareBilinear(self):
if test.is_gpu_available():
input_shape = [1, 5, 6, 3]
target_height = 8
target_width = 12
for nptype in [np.float32, np.float64]:
for align_corners in [True, False]:
img_np = np.arange(
0, np.prod(input_shape), dtype=nptype).reshape(input_shape)
value = {}
for use_gpu in [True, False]:
with self.cached_session(use_gpu=use_gpu):
image = constant_op.constant(img_np, shape=input_shape)
new_size = constant_op.constant([target_height, target_width])
out_op = image_ops.resize_images(
image,
new_size,
image_ops.ResizeMethodV1.BILINEAR,
align_corners=align_corners)
value[use_gpu] = self.evaluate(out_op)
self.assertAllClose(value[True], value[False], rtol=1e-5, atol=1e-5)
def testShapeInference(self):
# Shape function requires placeholders and a graph.
with ops.Graph().as_default():
self._assertShapeInference([50, 60, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([55, 66, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([59, 69, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([50, 69, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([59, 60, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([None, 60, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([None, 66, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([None, 69, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([50, None, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([55, None, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([59, None, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([None, None, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([50, 60, None], [55, 66], [55, 66, None])
self._assertShapeInference([55, 66, None], [55, 66], [55, 66, None])
self._assertShapeInference([59, 69, None], [55, 66], [55, 66, None])
self._assertShapeInference([50, 69, None], [55, 66], [55, 66, None])
self._assertShapeInference([59, 60, None], [55, 66], [55, 66, None])
self._assertShapeInference([None, None, None], [55, 66], [55, 66, None])
def testNameScope(self):
# Testing name scope requires placeholders and a graph.
with ops.Graph().as_default():
img_shape = [1, 3, 2, 1]
with self.cached_session():
single_image = array_ops.placeholder(dtypes.float32, shape=[50, 60, 3])
y = image_ops.resize_images(single_image, [55, 66])
self.assertTrue(y.op.name.startswith("resize"))
def _ResizeImageCall(self, x, max_h, max_w, preserve_aspect_ratio,
use_tensor_inputs):
if use_tensor_inputs:
target_max = ops.convert_to_tensor([max_h, max_w])
x_tensor = ops.convert_to_tensor(x)
else:
target_max = [max_h, max_w]
x_tensor = x
y = image_ops.resize_images(
x_tensor, target_max, preserve_aspect_ratio=preserve_aspect_ratio)
with self.cached_session():
return self.evaluate(y)
def _assertResizeEqual(self, x, x_shape, y, y_shape,
preserve_aspect_ratio=True,
use_tensor_inputs_options=None):
use_tensor_inputs_options = use_tensor_inputs_options or [False, True]
target_height, target_width, _ = y_shape
x = np.array(x).reshape(x_shape)
y = np.array(y).reshape(y_shape)
for use_tensor_inputs in use_tensor_inputs_options:
y_tf = self._ResizeImageCall(x, target_height, target_width,
preserve_aspect_ratio, use_tensor_inputs)
self.assertAllClose(y, y_tf)
def _assertResizeCheckShape(self, x, x_shape, target_shape,
y_shape, preserve_aspect_ratio=True,
use_tensor_inputs_options=None):
use_tensor_inputs_options = use_tensor_inputs_options or [False, True]
target_height, target_width = target_shape
x = np.array(x).reshape(x_shape)
y = np.zeros(y_shape)
for use_tensor_inputs in use_tensor_inputs_options:
y_tf = self._ResizeImageCall(x, target_height, target_width,
preserve_aspect_ratio, use_tensor_inputs)
self.assertShapeEqual(y, ops.convert_to_tensor(y_tf))
def testPreserveAspectRatioMultipleImages(self):
x_shape = [10, 100, 100, 10]
x = np.random.uniform(size=x_shape)
self._assertResizeCheckShape(x, x_shape, [250, 250], [10, 250, 250, 10],
preserve_aspect_ratio=False)
def testPreserveAspectRatioNoOp(self):
x_shape = [10, 10, 10]
x = np.random.uniform(size=x_shape)
self._assertResizeEqual(x, x_shape, x, x_shape)
def testPreserveAspectRatioSmaller(self):
x_shape = [100, 100, 10]
x = np.random.uniform(size=x_shape)
self._assertResizeCheckShape(x, x_shape, [75, 50], [50, 50, 10])
def testPreserveAspectRatioSmallerMultipleImages(self):
x_shape = [10, 100, 100, 10]
x = np.random.uniform(size=x_shape)
self._assertResizeCheckShape(x, x_shape, [75, 50], [10, 50, 50, 10])
def testPreserveAspectRatioLarger(self):
x_shape = [100, 100, 10]
x = np.random.uniform(size=x_shape)
self._assertResizeCheckShape(x, x_shape, [150, 200], [150, 150, 10])
def testPreserveAspectRatioSameRatio(self):
x_shape = [1920, 1080, 3]
x = np.random.uniform(size=x_shape)
self._assertResizeCheckShape(x, x_shape, [3840, 2160], [3840, 2160, 3])
def testPreserveAspectRatioSquare(self):
x_shape = [299, 299, 3]
x = np.random.uniform(size=x_shape)
self._assertResizeCheckShape(x, x_shape, [320, 320], [320, 320, 3])
class ResizeImageWithPadV1Test(test_util.TensorFlowTestCase):
def _ResizeImageWithPad(self, x, target_height, target_width,
use_tensor_inputs):
if use_tensor_inputs:
target_height = ops.convert_to_tensor(target_height)
target_width = ops.convert_to_tensor(target_width)
x_tensor = ops.convert_to_tensor(x)
else:
x_tensor = x
with self.cached_session():
return self.evaluate(
image_ops.resize_image_with_pad_v1(x_tensor, target_height,
target_width))
def _assertReturns(self,
x,
x_shape,
y,
y_shape,
use_tensor_inputs_options=None):
use_tensor_inputs_options = use_tensor_inputs_options or [False, True]
target_height, target_width, _ = y_shape
x = np.array(x).reshape(x_shape)
y = np.array(y).reshape(y_shape)
for use_tensor_inputs in use_tensor_inputs_options:
y_tf = self._ResizeImageWithPad(x, target_height, target_width,
use_tensor_inputs)
self.assertAllClose(y, y_tf)
def _assertRaises(self,
x,
x_shape,
target_height,
target_width,
err_msg,
use_tensor_inputs_options=None):
use_tensor_inputs_options = use_tensor_inputs_options or [False, True]
x = np.array(x).reshape(x_shape)
for use_tensor_inputs in use_tensor_inputs_options:
with self.assertRaisesRegex(
(ValueError, errors.InvalidArgumentError), err_msg):
self._ResizeImageWithPad(x, target_height, target_width,
use_tensor_inputs)
def _assertShapeInference(self, pre_shape, height, width, post_shape):
image = array_ops.placeholder(dtypes.float32, shape=pre_shape)
y = image_ops.resize_image_with_pad_v1(image, height, width)
self.assertEqual(y.get_shape().as_list(), post_shape)
def testShapeInference(self):
# Shape function requires placeholders and a graph.
with ops.Graph().as_default():
# Test with 3-D tensors.
self._assertShapeInference([55, 66, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([50, 60, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([None, 66, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([None, 60, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([55, None, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([50, None, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([None, None, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([55, 66, None], 55, 66, [55, 66, None])
self._assertShapeInference([50, 60, None], 55, 66, [55, 66, None])
self._assertShapeInference([None, None, None], 55, 66, [55, 66, None])
self._assertShapeInference(None, 55, 66, [55, 66, None])
# Test with 4-D tensors.
self._assertShapeInference([5, 55, 66, 3], 55, 66, [5, 55, 66, 3])
self._assertShapeInference([5, 50, 60, 3], 55, 66, [5, 55, 66, 3])
self._assertShapeInference([5, None, 66, 3], 55, 66, [5, 55, 66, 3])
self._assertShapeInference([5, None, 60, 3], 55, 66, [5, 55, 66, 3])
self._assertShapeInference([5, 55, None, 3], 55, 66, [5, 55, 66, 3])
self._assertShapeInference([5, 50, None, 3], 55, 66, [5, 55, 66, 3])
self._assertShapeInference([5, None, None, 3], 55, 66, [5, 55, 66, 3])
self._assertShapeInference([5, 55, 66, None], 55, 66, [5, 55, 66, None])
self._assertShapeInference([5, 50, 60, None], 55, 66, [5, 55, 66, None])
self._assertShapeInference([5, None, None, None], 55, 66,
[5, 55, 66, None])
self._assertShapeInference([None, None, None, None], 55, 66,
[None, 55, 66, None])
def testNoOp(self):
x_shape = [10, 10, 10]
x = np.random.uniform(size=x_shape)
self._assertReturns(x, x_shape, x, x_shape)
def testPad(self):
# Reduce vertical dimension
x = [1, 2, 3, 4, 5, 6, 7, 8]
x_shape = [2, 4, 1]
y = [0, 1, 3, 0]
y_shape = [1, 4, 1]
self._assertReturns(x, x_shape, y, y_shape)
# Reduce horizontal dimension
x = [1, 2, 3, 4, 5, 6, 7, 8]
x_shape = [2, 4, 1]
y = [1, 3, 0, 0]
y_shape = [2, 2, 1]
self._assertReturns(x, x_shape, y, y_shape)
x = [1, 2, 3, 4, 5, 6, 7, 8]
x_shape = [2, 4, 1]
y = [1, 3]
y_shape = [1, 2, 1]
self._assertReturns(x, x_shape, y, y_shape)
# half_pixel_centers not supported by XLA
@test_util.for_all_test_methods(test_util.disable_xla, "b/127616992")
class ResizeImageWithPadV2Test(test_util.TensorFlowTestCase):
def _ResizeImageWithPad(self, x, target_height, target_width,
use_tensor_inputs):
if use_tensor_inputs:
target_height = ops.convert_to_tensor(target_height)
target_width = ops.convert_to_tensor(target_width)
x_tensor = ops.convert_to_tensor(x)
else:
x_tensor = x
with self.cached_session():
return self.evaluate(
image_ops.resize_image_with_pad_v2(x_tensor, target_height,
target_width))
def _assertReturns(self,
x,
x_shape,
y,
y_shape,
use_tensor_inputs_options=None):
use_tensor_inputs_options = use_tensor_inputs_options or [False, True]
target_height, target_width, _ = y_shape
x = np.array(x).reshape(x_shape)
y = np.array(y).reshape(y_shape)
for use_tensor_inputs in use_tensor_inputs_options:
y_tf = self._ResizeImageWithPad(x, target_height, target_width,
use_tensor_inputs)
self.assertAllClose(y, y_tf)
def _assertRaises(self,
x,
x_shape,
target_height,
target_width,
err_msg,
use_tensor_inputs_options=None):
use_tensor_inputs_options = use_tensor_inputs_options or [False, True]
x = np.array(x).reshape(x_shape)
for use_tensor_inputs in use_tensor_inputs_options:
with self.assertRaisesRegex(
(ValueError, errors.InvalidArgumentError), err_msg):
self._ResizeImageWithPad(x, target_height, target_width,
use_tensor_inputs)
def _assertShapeInference(self, pre_shape, height, width, post_shape):
image = array_ops.placeholder(dtypes.float32, shape=pre_shape)
y = image_ops.resize_image_with_pad_v1(image, height, width)
self.assertEqual(y.get_shape().as_list(), post_shape)
def testShapeInference(self):
# Shape function requires placeholders and a graph.
with ops.Graph().as_default():
# Test with 3-D tensors.
self._assertShapeInference([55, 66, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([50, 60, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([None, 66, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([None, 60, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([55, None, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([50, None, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([None, None, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([55, 66, None], 55, 66, [55, 66, None])
self._assertShapeInference([50, 60, None], 55, 66, [55, 66, None])
self._assertShapeInference([None, None, None], 55, 66, [55, 66, None])
self._assertShapeInference(None, 55, 66, [55, 66, None])
# Test with 4-D tensors.
self._assertShapeInference([5, 55, 66, 3], 55, 66, [5, 55, 66, 3])
self._assertShapeInference([5, 50, 60, 3], 55, 66, [5, 55, 66, 3])
self._assertShapeInference([5, None, 66, 3], 55, 66, [5, 55, 66, 3])
self._assertShapeInference([5, None, 60, 3], 55, 66, [5, 55, 66, 3])
self._assertShapeInference([5, 55, None, 3], 55, 66, [5, 55, 66, 3])
self._assertShapeInference([5, 50, None, 3], 55, 66, [5, 55, 66, 3])
self._assertShapeInference([5, None, None, 3], 55, 66, [5, 55, 66, 3])
self._assertShapeInference([5, 55, 66, None], 55, 66, [5, 55, 66, None])
self._assertShapeInference([5, 50, 60, None], 55, 66, [5, 55, 66, None])
self._assertShapeInference([5, None, None, None], 55, 66,
[5, 55, 66, None])
self._assertShapeInference([None, None, None, None], 55, 66,
[None, 55, 66, None])
def testNoOp(self):
x_shape = [10, 10, 10]
x = np.random.uniform(size=x_shape)
self._assertReturns(x, x_shape, x, x_shape)
def testPad(self):
# Reduce vertical dimension
x = [1, 2, 3, 4, 5, 6, 7, 8]
x_shape = [2, 4, 1]
y = [0, 3.5, 5.5, 0]
y_shape = [1, 4, 1]
self._assertReturns(x, x_shape, y, y_shape)
# Reduce horizontal dimension
x = [1, 2, 3, 4, 5, 6, 7, 8]
x_shape = [2, 4, 1]
y = [3.5, 5.5, 0, 0]
y_shape = [2, 2, 1]
self._assertReturns(x, x_shape, y, y_shape)
x = [1, 2, 3, 4, 5, 6, 7, 8]
x_shape = [2, 4, 1]
y = [3.5, 5.5]
y_shape = [1, 2, 1]
self._assertReturns(x, x_shape, y, y_shape)
class ResizeNearestNeighborGrad(test_util.TensorFlowTestCase):
def testSizeTooLarge(self):
align_corners = True
half_pixel_centers = False
grads = constant_op.constant(1, shape=[1, 8, 16, 3], dtype=dtypes.float16)
size = constant_op.constant([1879048192, 1879048192],
shape=[2],
dtype=dtypes.int32)
with self.assertRaisesRegex(errors.InvalidArgumentError,
r"Encountered overflow when multiplying"):
self.evaluate(
gen_image_ops.ResizeNearestNeighborGrad(
grads=grads,
size=size,
align_corners=align_corners,
half_pixel_centers=half_pixel_centers))
class ResizeImageWithCropOrPadTest(test_util.TensorFlowTestCase):
def _ResizeImageWithCropOrPad(self, x, target_height, target_width,
use_tensor_inputs):
if use_tensor_inputs:
target_height = ops.convert_to_tensor(target_height)
target_width = ops.convert_to_tensor(target_width)
x_tensor = ops.convert_to_tensor(x)
else:
x_tensor = x
@def_function.function
def resize_crop_or_pad(*args):
return image_ops.resize_image_with_crop_or_pad(*args)
with self.cached_session():
return self.evaluate(
resize_crop_or_pad(x_tensor, target_height, target_width))
def _assertReturns(self,
x,
x_shape,
y,
y_shape,
use_tensor_inputs_options=None):
use_tensor_inputs_options = use_tensor_inputs_options or [False, True]
target_height, target_width, _ = y_shape
x = np.array(x).reshape(x_shape)
y = np.array(y).reshape(y_shape)
for use_tensor_inputs in use_tensor_inputs_options:
y_tf = self._ResizeImageWithCropOrPad(x, target_height, target_width,
use_tensor_inputs)
self.assertAllClose(y, y_tf)
def _assertRaises(self,
x,
x_shape,
target_height,
target_width,
err_msg,
use_tensor_inputs_options=None):
use_tensor_inputs_options = use_tensor_inputs_options or [False, True]
x = np.array(x).reshape(x_shape)
for use_tensor_inputs in use_tensor_inputs_options:
with self.assertRaisesRegex(
(ValueError, errors.InvalidArgumentError), err_msg):
self._ResizeImageWithCropOrPad(x, target_height, target_width,
use_tensor_inputs)
def _assertShapeInference(self, pre_shape, height, width, post_shape):
image = array_ops.placeholder(dtypes.float32, shape=pre_shape)
y = image_ops.resize_image_with_crop_or_pad(image, height, width)
self.assertEqual(y.get_shape().as_list(), post_shape)
def testNoOp(self):
x_shape = [10, 10, 10]
x = np.random.uniform(size=x_shape)
self._assertReturns(x, x_shape, x, x_shape)
def testPad(self):
# Pad even along col.
x = [1, 2, 3, 4, 5, 6, 7, 8]
x_shape = [2, 4, 1]
y = [0, 1, 2, 3, 4, 0, 0, 5, 6, 7, 8, 0]
y_shape = [2, 6, 1]
self._assertReturns(x, x_shape, y, y_shape)
# Pad odd along col.
x = [1, 2, 3, 4, 5, 6, 7, 8]
x_shape = [2, 4, 1]
y = [0, 1, 2, 3, 4, 0, 0, 0, 5, 6, 7, 8, 0, 0]
y_shape = [2, 7, 1]
self._assertReturns(x, x_shape, y, y_shape)
# Pad even along row.
x = [1, 2, 3, 4, 5, 6, 7, 8]
x_shape = [2, 4, 1]
y = [0, 0, 0, 0, 1, 2, 3, 4, 5, 6, 7, 8, 0, 0, 0, 0]
y_shape = [4, 4, 1]
self._assertReturns(x, x_shape, y, y_shape)
# Pad odd along row.
x = [1, 2, 3, 4, 5, 6, 7, 8]
x_shape = [2, 4, 1]
y = [0, 0, 0, 0, 1, 2, 3, 4, 5, 6, 7, 8, 0, 0, 0, 0, 0, 0, 0, 0]
y_shape = [5, 4, 1]
self._assertReturns(x, x_shape, y, y_shape)
def testCrop(self):
# Crop even along col.
x = [1, 2, 3, 4, 5, 6, 7, 8]
x_shape = [2, 4, 1]
y = [2, 3, 6, 7]
y_shape = [2, 2, 1]
self._assertReturns(x, x_shape, y, y_shape)
# Crop odd along col.
x = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]
x_shape = [2, 6, 1]
y = [2, 3, 4, 8, 9, 10]
y_shape = [2, 3, 1]
self._assertReturns(x, x_shape, y, y_shape)
# Crop even along row.
x = [1, 2, 3, 4, 5, 6, 7, 8]
x_shape = [4, 2, 1]
y = [3, 4, 5, 6]
y_shape = [2, 2, 1]
self._assertReturns(x, x_shape, y, y_shape)
# Crop odd along row.
x = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]
x_shape = [8, 2, 1]
y = [3, 4, 5, 6, 7, 8, 9, 10, 11, 12]
y_shape = [5, 2, 1]
self._assertReturns(x, x_shape, y, y_shape)
def testCropAndPad(self):
# Pad along row but crop along col.
x = [1, 2, 3, 4, 5, 6, 7, 8]
x_shape = [2, 4, 1]
y = [0, 0, 2, 3, 6, 7, 0, 0]
y_shape = [4, 2, 1]
self._assertReturns(x, x_shape, y, y_shape)
# Crop along row but pad along col.
x = [1, 2, 3, 4, 5, 6, 7, 8]
x_shape = [4, 2, 1]
y = [0, 3, 4, 0, 0, 5, 6, 0]
y_shape = [2, 4, 1]
self._assertReturns(x, x_shape, y, y_shape)
def testShapeInference(self):
# Shape function requires placeholders and a graph.
with ops.Graph().as_default():
self._assertShapeInference([50, 60, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([55, 66, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([59, 69, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([50, 69, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([59, 60, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([None, 60, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([None, 66, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([None, 69, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([50, None, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([55, None, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([59, None, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([None, None, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([50, 60, None], 55, 66, [55, 66, None])
self._assertShapeInference([55, 66, None], 55, 66, [55, 66, None])
self._assertShapeInference([59, 69, None], 55, 66, [55, 66, None])
self._assertShapeInference([50, 69, None], 55, 66, [55, 66, None])
self._assertShapeInference([59, 60, None], 55, 66, [55, 66, None])
self._assertShapeInference([None, None, None], 55, 66, [55, 66, None])
self._assertShapeInference(None, 55, 66, [55, 66, None])
def testNon3DInput(self):
# Input image is not 3D
x = [0] * 15
target_height, target_width = [4, 4]
for x_shape in ([3, 5],):
self._assertRaises(x, x_shape, target_height, target_width,
"must have either 3 or 4 dimensions.")
for x_shape in ([1, 3, 5, 1, 1],):
self._assertRaises(x, x_shape, target_height, target_width,
"must have either 3 or 4 dimensions.")
def testZeroLengthInput(self):
# Input image has 0-length dimension(s).
target_height, target_width = [1, 1]
x = []
for x_shape in ([0, 2, 2], [2, 0, 2], [2, 2, 0]):
self._assertRaises(
x,
x_shape,
target_height,
target_width,
"inner 3 dims of 'image.shape' must be > 0",
use_tensor_inputs_options=[False])
# The original error message does not contain back slashes. However, they
# are added by either the assert op or the runtime. If this behavior
# changes in the future, the match string will also needs to be changed.
self._assertRaises(
x,
x_shape,
target_height,
target_width,
"inner 3 dims of \\'image.shape\\' must be > 0",
use_tensor_inputs_options=[True])
def testBadParams(self):
x_shape = [4, 4, 1]
x = np.zeros(x_shape)
# target_height <= 0
target_height, target_width = [0, 5]
self._assertRaises(x, x_shape, target_height, target_width,
"target_height must be > 0")
# target_width <= 0
target_height, target_width = [5, 0]
self._assertRaises(x, x_shape, target_height, target_width,
"target_width must be > 0")
def testNameScope(self):
# Testing name scope requires placeholders and a graph.
with ops.Graph().as_default():
image = array_ops.placeholder(dtypes.float32, shape=[50, 60, 3])
y = image_ops.resize_image_with_crop_or_pad(image, 55, 66)
self.assertTrue(y.op.name.startswith("resize_image_with_crop_or_pad"))
def simple_color_ramp():
"""Build a simple color ramp RGB image."""
w, h = 256, 200
i = np.arange(h)[:, None]
j = np.arange(w)
image = np.empty((h, w, 3), dtype=np.uint8)
image[:, :, 0] = i
image[:, :, 1] = j
image[:, :, 2] = (i + j) >> 1
return image
class JpegTest(test_util.TensorFlowTestCase):
# TODO(irving): Add self.assertAverageLess or similar to test_util
def averageError(self, image0, image1):
self.assertEqual(image0.shape, image1.shape)
image0 = image0.astype(int) # Avoid overflow
return np.abs(image0 - image1).sum() / np.prod(image0.shape)
def testExisting(self):
# Read a real jpeg and verify shape
path = ("tensorflow/core/lib/jpeg/testdata/"
"jpeg_merge_test1.jpg")
with self.cached_session():
jpeg0 = io_ops.read_file(path)
image0 = image_ops.decode_jpeg(jpeg0)
image1 = image_ops.decode_jpeg(image_ops.encode_jpeg(image0))
jpeg0, image0, image1 = self.evaluate([jpeg0, image0, image1])
self.assertEqual(len(jpeg0), 3771)
self.assertEqual(image0.shape, (256, 128, 3))
self.assertLess(self.averageError(image0, image1), 1.4)
def testCmyk(self):
# Confirm that CMYK reads in as RGB
base = "tensorflow/core/lib/jpeg/testdata"
rgb_path = os.path.join(base, "jpeg_merge_test1.jpg")
cmyk_path = os.path.join(base, "jpeg_merge_test1_cmyk.jpg")
shape = 256, 128, 3
for channels in 3, 0:
with self.cached_session():
rgb = image_ops.decode_jpeg(
io_ops.read_file(rgb_path), channels=channels)
cmyk = image_ops.decode_jpeg(
io_ops.read_file(cmyk_path), channels=channels)
rgb, cmyk = self.evaluate([rgb, cmyk])
self.assertEqual(rgb.shape, shape)
self.assertEqual(cmyk.shape, shape)
error = self.averageError(rgb, cmyk)
self.assertLess(error, 4)
def testCropAndDecodeJpeg(self):
with self.cached_session() as sess:
# Encode it, then decode it, then encode it
base = "tensorflow/core/lib/jpeg/testdata"
jpeg0 = io_ops.read_file(os.path.join(base, "jpeg_merge_test1.jpg"))
h, w, _ = 256, 128, 3
crop_windows = [[0, 0, 5, 5], [0, 0, 5, w], [0, 0, h, 5],
[h - 6, w - 5, 6, 5], [6, 5, 15, 10], [0, 0, h, w]]
for crop_window in crop_windows:
# Explicit two stages: decode + crop.
image1 = image_ops.decode_jpeg(jpeg0)
y, x, h, w = crop_window
image1_crop = image_ops.crop_to_bounding_box(image1, y, x, h, w)
# Combined decode+crop.
image2 = image_ops.decode_and_crop_jpeg(jpeg0, crop_window, channels=3)
# Combined decode+crop should have the same shape inference on image
# sizes.
image1_shape = image1_crop.get_shape().as_list()
image2_shape = image2.get_shape().as_list()
self.assertAllEqual(image1_shape, image2_shape)
# CropAndDecode should be equal to DecodeJpeg+Crop.
image1_crop, image2 = self.evaluate([image1_crop, image2])
self.assertAllEqual(image1_crop, image2)
def testCropAndDecodeJpegWithInvalidCropWindow(self):
with self.cached_session() as sess:
# Encode it, then decode it, then encode it
base = "tensorflow/core/lib/jpeg/testdata"
jpeg0 = io_ops.read_file(os.path.join(base, "jpeg_merge_test1.jpg"))
h, w, _ = 256, 128, 3
# Invalid crop windows.
crop_windows = [[-1, 11, 11, 11], [11, -1, 11, 11], [11, 11, -1, 11],
[11, 11, 11, -1], [11, 11, 0, 11], [11, 11, 11, 0],
[0, 0, h + 1, w], [0, 0, h, w + 1]]
for crop_window in crop_windows:
with self.assertRaisesRegex(
(ValueError, errors.InvalidArgumentError),
"Invalid JPEG data or crop window"):
result = image_ops.decode_and_crop_jpeg(jpeg0, crop_window)
self.evaluate(result)
def testSynthetic(self):
with self.cached_session():
# Encode it, then decode it, then encode it
image0 = constant_op.constant(simple_color_ramp())
jpeg0 = image_ops.encode_jpeg(image0)
image1 = image_ops.decode_jpeg(jpeg0, dct_method="INTEGER_ACCURATE")
image2 = image_ops.decode_jpeg(
image_ops.encode_jpeg(image1), dct_method="INTEGER_ACCURATE")
jpeg0, image0, image1, image2 = self.evaluate(
[jpeg0, image0, image1, image2])
# The decoded-encoded image should be similar to the input
self.assertLess(self.averageError(image0, image1), 0.6)
# We should be very close to a fixpoint
self.assertLess(self.averageError(image1, image2), 0.02)
# Smooth ramps compress well (input size is 153600)
self.assertGreaterEqual(len(jpeg0), 5000)
self.assertLessEqual(len(jpeg0), 6000)
def testSyntheticFasterAlgorithm(self):
with self.cached_session():
# Encode it, then decode it, then encode it
image0 = constant_op.constant(simple_color_ramp())
jpeg0 = image_ops.encode_jpeg(image0)
image1 = image_ops.decode_jpeg(jpeg0, dct_method="INTEGER_FAST")
image2 = image_ops.decode_jpeg(
image_ops.encode_jpeg(image1), dct_method="INTEGER_FAST")
jpeg0, image0, image1, image2 = self.evaluate(
[jpeg0, image0, image1, image2])
# The decoded-encoded image should be similar to the input, but
# note this is worse than the slower algorithm because it is
# less accurate.
self.assertLess(self.averageError(image0, image1), 0.95)
# Repeated compression / decompression will have a higher error
# with a lossier algorithm.
self.assertLess(self.averageError(image1, image2), 1.05)
# Smooth ramps compress well (input size is 153600)
self.assertGreaterEqual(len(jpeg0), 5000)
self.assertLessEqual(len(jpeg0), 6000)
def testDefaultDCTMethodIsIntegerFast(self):
with self.cached_session():
# Compare decoding with both dct_option=INTEGER_FAST and
# default. They should be the same.
image0 = constant_op.constant(simple_color_ramp())
jpeg0 = image_ops.encode_jpeg(image0)
image1 = image_ops.decode_jpeg(jpeg0, dct_method="INTEGER_FAST")
image2 = image_ops.decode_jpeg(jpeg0)
image1, image2 = self.evaluate([image1, image2])
# The images should be the same.
self.assertAllClose(image1, image2)
def testShape(self):
# Shape function requires placeholders and a graph.
with ops.Graph().as_default():
with self.cached_session():
jpeg = constant_op.constant("nonsense")
for channels in 0, 1, 3:
image = image_ops.decode_jpeg(jpeg, channels=channels)
self.assertEqual(image.get_shape().as_list(),
[None, None, channels or None])
def testExtractJpegShape(self):
# Read a real jpeg and verify shape.
path = ("tensorflow/core/lib/jpeg/testdata/"
"jpeg_merge_test1.jpg")
with self.cached_session():
jpeg = io_ops.read_file(path)
# Extract shape without decoding.
image_shape = self.evaluate(image_ops.extract_jpeg_shape(jpeg))
self.assertAllEqual(image_shape, [256, 128, 3])
def testExtractJpegShapeforCmyk(self):
# Read a cmyk jpeg image, and verify its shape.
path = ("tensorflow/core/lib/jpeg/testdata/"
"jpeg_merge_test1_cmyk.jpg")
with self.cached_session():
jpeg = io_ops.read_file(path)
image_shape = self.evaluate(image_ops.extract_jpeg_shape(jpeg))
# Cmyk jpeg image has 4 channels.
self.assertAllEqual(image_shape, [256, 128, 4])
def testRandomJpegQuality(self):
# Previous implementation of random_jpeg_quality had a bug.
# This unit test tests the fixed version, but due to forward compatibility
# this test can only be done when fixed version is used.
# Test jpeg quality dynamic randomization.
with ops.Graph().as_default(), self.test_session():
np.random.seed(7)
path = ("tensorflow/core/lib/jpeg/testdata/medium.jpg")
jpeg = io_ops.read_file(path)
image = image_ops.decode_jpeg(jpeg)
random_jpeg_image = image_ops.random_jpeg_quality(image, 40, 100)
with self.cached_session() as sess:
# Test randomization.
random_jpeg_images = [sess.run(random_jpeg_image) for _ in range(5)]
are_images_equal = []
for i in range(1, len(random_jpeg_images)):
# Most of them should be different if randomization is occurring
# correctly.
are_images_equal.append(
np.array_equal(random_jpeg_images[0], random_jpeg_images[i]))
self.assertFalse(all(are_images_equal))
# TODO(b/162345082): stateless random op generates different random number
# with xla_gpu. Update tests such that there is a single ground truth result
# to test against.
def testStatelessRandomJpegQuality(self):
# Test deterministic randomness in jpeg quality by checking that the same
# sequence of jpeg quality adjustments are returned each round given the
# same seed.
with test_util.use_gpu():
path = ("tensorflow/core/lib/jpeg/testdata/medium.jpg")
jpeg = io_ops.read_file(path)
image = image_ops.decode_jpeg(jpeg)
jpeg_quality = (40, 100)
seeds_list = [(1, 2), (3, 4)]
iterations = 2
random_jpeg_images_all = [[] for _ in range(iterations)]
for random_jpeg_images in random_jpeg_images_all:
for seed in seeds_list:
distorted_jpeg = image_ops.stateless_random_jpeg_quality(
image, jpeg_quality[0], jpeg_quality[1], seed=seed)
# Verify that the random jpeg image is different from the original
# jpeg image.
self.assertNotAllEqual(image, distorted_jpeg)
random_jpeg_images.append(self.evaluate(distorted_jpeg))
# Verify that the results are identical given the same seed.
for i in range(1, iterations):
self.assertAllEqual(random_jpeg_images_all[0],
random_jpeg_images_all[i])
def testAdjustJpegQuality(self):
# Test if image_ops.adjust_jpeg_quality works when jpeq quality
# is an int (not tensor) for backward compatibility.
with ops.Graph().as_default(), self.test_session():
np.random.seed(7)
jpeg_quality = np.random.randint(40, 100)
path = ("tensorflow/core/lib/jpeg/testdata/medium.jpg")
jpeg = io_ops.read_file(path)
image = image_ops.decode_jpeg(jpeg)
adjust_jpeg_quality_image = image_ops.adjust_jpeg_quality(
image, jpeg_quality)
with self.cached_session() as sess:
sess.run(adjust_jpeg_quality_image)
def testAdjustJpegQualityShape(self):
with self.cached_session():
image = constant_op.constant(
np.arange(24, dtype=np.uint8).reshape([2, 4, 3]))
adjusted_image = image_ops.adjust_jpeg_quality(image, 80)
adjusted_image.shape.assert_is_compatible_with([None, None, 3])
class PngTest(test_util.TensorFlowTestCase):
def testExisting(self):
# Read some real PNGs, converting to different channel numbers
prefix = "tensorflow/core/lib/png/testdata/"
inputs = ((1, "lena_gray.png"), (4, "lena_rgba.png"),
(3, "lena_palette.png"), (4, "lena_palette_trns.png"))
for channels_in, filename in inputs:
for channels in 0, 1, 3, 4:
with self.cached_session():
png0 = io_ops.read_file(prefix + filename)
image0 = image_ops.decode_png(png0, channels=channels)
png0, image0 = self.evaluate([png0, image0])
self.assertEqual(image0.shape, (26, 51, channels or channels_in))
if channels == channels_in:
image1 = image_ops.decode_png(image_ops.encode_png(image0))
self.assertAllEqual(image0, self.evaluate(image1))
def testSynthetic(self):
with self.cached_session():
# Encode it, then decode it
image0 = constant_op.constant(simple_color_ramp())
png0 = image_ops.encode_png(image0, compression=7)
image1 = image_ops.decode_png(png0)
png0, image0, image1 = self.evaluate([png0, image0, image1])
# PNG is lossless
self.assertAllEqual(image0, image1)
# Smooth ramps compress well, but not too well
self.assertGreaterEqual(len(png0), 400)
self.assertLessEqual(len(png0), 750)
def testSyntheticUint16(self):
with self.cached_session():
# Encode it, then decode it
image0 = constant_op.constant(simple_color_ramp(), dtype=dtypes.uint16)
png0 = image_ops.encode_png(image0, compression=7)
image1 = image_ops.decode_png(png0, dtype=dtypes.uint16)
png0, image0, image1 = self.evaluate([png0, image0, image1])
# PNG is lossless
self.assertAllEqual(image0, image1)
# Smooth ramps compress well, but not too well
self.assertGreaterEqual(len(png0), 800)
self.assertLessEqual(len(png0), 1500)
def testSyntheticTwoChannel(self):
with self.cached_session():
# Strip the b channel from an rgb image to get a two-channel image.
gray_alpha = simple_color_ramp()[:, :, 0:2]
image0 = constant_op.constant(gray_alpha)
png0 = image_ops.encode_png(image0, compression=7)
image1 = image_ops.decode_png(png0)
png0, image0, image1 = self.evaluate([png0, image0, image1])
self.assertEqual(2, image0.shape[-1])
self.assertAllEqual(image0, image1)
def testSyntheticTwoChannelUint16(self):
with self.cached_session():
# Strip the b channel from an rgb image to get a two-channel image.
gray_alpha = simple_color_ramp()[:, :, 0:2]
image0 = constant_op.constant(gray_alpha, dtype=dtypes.uint16)
png0 = image_ops.encode_png(image0, compression=7)
image1 = image_ops.decode_png(png0, dtype=dtypes.uint16)
png0, image0, image1 = self.evaluate([png0, image0, image1])
self.assertEqual(2, image0.shape[-1])
self.assertAllEqual(image0, image1)
def testShape(self):
# Shape function requires placeholders and a graph.
with ops.Graph().as_default():
with self.cached_session():
png = constant_op.constant("nonsense")
for channels in 0, 1, 3:
image = image_ops.decode_png(png, channels=channels)
self.assertEqual(image.get_shape().as_list(),
[None, None, channels or None])
class GifTest(test_util.TensorFlowTestCase):
def _testValid(self, filename):
# Read some real GIFs
prefix = "tensorflow/core/lib/gif/testdata/"
WIDTH = 20
HEIGHT = 40
STRIDE = 5
shape = (12, HEIGHT, WIDTH, 3)
with self.cached_session():
gif0 = io_ops.read_file(prefix + filename)
image0 = image_ops.decode_gif(gif0)
gif0, image0 = self.evaluate([gif0, image0])
self.assertEqual(image0.shape, shape)
for frame_idx, frame in enumerate(image0):
gt = np.zeros(shape[1:], dtype=np.uint8)
start = frame_idx * STRIDE
end = (frame_idx + 1) * STRIDE
print(frame_idx)
if end <= WIDTH:
gt[:, start:end, :] = 255
else:
start -= WIDTH
end -= WIDTH
gt[start:end, :, :] = 255
self.assertAllClose(frame, gt)
def testValid(self):
self._testValid("scan.gif")
self._testValid("optimized.gif")
def testShape(self):
# Shape function requires placeholders and a graph.
with ops.Graph().as_default():
with self.cached_session():
gif = constant_op.constant("nonsense")
image = image_ops.decode_gif(gif)
self.assertEqual(image.get_shape().as_list(), [None, None, None, 3])
def testAnimatedGif(self):
# Test if all frames in the animated GIF file is properly decoded.
with self.cached_session():
base = "tensorflow/core/lib/gif/testdata"
gif = io_ops.read_file(os.path.join(base, "pendulum_sm.gif"))
gt_frame0 = io_ops.read_file(os.path.join(base, "pendulum_sm_frame0.png"))
gt_frame1 = io_ops.read_file(os.path.join(base, "pendulum_sm_frame1.png"))
gt_frame2 = io_ops.read_file(os.path.join(base, "pendulum_sm_frame2.png"))
image = image_ops.decode_gif(gif)
frame0 = image_ops.decode_png(gt_frame0)
frame1 = image_ops.decode_png(gt_frame1)
frame2 = image_ops.decode_png(gt_frame2)
image, frame0, frame1, frame2 = self.evaluate([image, frame0, frame1,
frame2])
# Compare decoded gif frames with ground-truth data.
self.assertAllEqual(image[0], frame0)
self.assertAllEqual(image[1], frame1)
self.assertAllEqual(image[2], frame2)
class ConvertImageTest(test_util.TensorFlowTestCase):
def _convert(self, original, original_dtype, output_dtype, expected):
x_np = np.array(original, dtype=original_dtype.as_numpy_dtype())
y_np = np.array(expected, dtype=output_dtype.as_numpy_dtype())
with self.cached_session():
image = constant_op.constant(x_np)
y = image_ops.convert_image_dtype(image, output_dtype)
self.assertTrue(y.dtype == output_dtype)
self.assertAllClose(y, y_np, atol=1e-5)
if output_dtype in [
dtypes.float32, dtypes.float64, dtypes.int32, dtypes.int64
]:
y_saturate = image_ops.convert_image_dtype(
image, output_dtype, saturate=True)
self.assertTrue(y_saturate.dtype == output_dtype)
self.assertAllClose(y_saturate, y_np, atol=1e-5)
def testNoConvert(self):
# Tests with Tensor.op requires a graph.
with ops.Graph().as_default():
# Make sure converting to the same data type creates only an identity op
with self.cached_session():
image = constant_op.constant([1], dtype=dtypes.uint8)
image_ops.convert_image_dtype(image, dtypes.uint8)
y = image_ops.convert_image_dtype(image, dtypes.uint8)
self.assertEqual(y.op.type, "Identity")
self.assertEqual(y.op.inputs[0], image)
def testConvertBetweenInteger(self):
# Make sure converting to between integer types scales appropriately
with self.cached_session():
self._convert([0, 255], dtypes.uint8, dtypes.int16, [0, 255 * 128])
self._convert([0, 32767], dtypes.int16, dtypes.uint8, [0, 255])
self._convert([0, 2**32], dtypes.int64, dtypes.int32, [0, 1])
self._convert([0, 1], dtypes.int32, dtypes.int64, [0, 2**32])
def testConvertBetweenFloat(self):
# Make sure converting to between float types does nothing interesting
with self.cached_session():
self._convert([-1.0, 0, 1.0, 200000], dtypes.float32, dtypes.float64,
[-1.0, 0, 1.0, 200000])
self._convert([-1.0, 0, 1.0, 200000], dtypes.float64, dtypes.float32,
[-1.0, 0, 1.0, 200000])
def testConvertBetweenIntegerAndFloat(self):
# Make sure converting from and to a float type scales appropriately
with self.cached_session():
self._convert([0, 1, 255], dtypes.uint8, dtypes.float32,
[0, 1.0 / 255.0, 1])
self._convert([0, 1.1 / 255.0, 1], dtypes.float32, dtypes.uint8,
[0, 1, 255])
def testConvertBetweenInt16AndInt8(self):
with self.cached_session():
# uint8, uint16
self._convert([0, 255 * 256], dtypes.uint16, dtypes.uint8, [0, 255])
self._convert([0, 255], dtypes.uint8, dtypes.uint16, [0, 255 * 256])
# int8, uint16
self._convert([0, 127 * 2 * 256], dtypes.uint16, dtypes.int8, [0, 127])
self._convert([0, 127], dtypes.int8, dtypes.uint16, [0, 127 * 2 * 256])
# int16, uint16
self._convert([0, 255 * 256], dtypes.uint16, dtypes.int16, [0, 255 * 128])
self._convert([0, 255 * 128], dtypes.int16, dtypes.uint16, [0, 255 * 256])
class TotalVariationTest(test_util.TensorFlowTestCase):
"""Tests the function total_variation() in image_ops.
We test a few small handmade examples, as well as
some larger examples using an equivalent numpy
implementation of the total_variation() function.
We do NOT test for overflows and invalid / edge-case arguments.
"""
def _test(self, x_np, y_np):
"""Test that the TensorFlow implementation of
total_variation(x_np) calculates the values in y_np.
Note that these may be float-numbers so we only test
for approximate equality within some narrow error-bound.
"""
# Create a TensorFlow session.
with self.cached_session():
# Add a constant to the TensorFlow graph that holds the input.
x_tf = constant_op.constant(x_np, shape=x_np.shape)
# Add ops for calculating the total variation using TensorFlow.
y = image_ops.total_variation(images=x_tf)
# Run the TensorFlow session to calculate the result.
y_tf = self.evaluate(y)
# Assert that the results are as expected within
# some small error-bound in case they are float-values.
self.assertAllClose(y_tf, y_np)
def _total_variation_np(self, x_np):
"""Calculate the total variation of x_np using numpy.
This implements the same function as TensorFlow but
using numpy instead.
Args:
x_np: Numpy array with 3 or 4 dimensions.
"""
dim = len(x_np.shape)
if dim == 3:
# Calculate differences for neighboring pixel-values using slices.
dif1 = x_np[1:, :, :] - x_np[:-1, :, :]
dif2 = x_np[:, 1:, :] - x_np[:, :-1, :]
# Sum for all axis.
sum_axis = None
elif dim == 4:
# Calculate differences for neighboring pixel-values using slices.
dif1 = x_np[:, 1:, :, :] - x_np[:, :-1, :, :]
dif2 = x_np[:, :, 1:, :] - x_np[:, :, :-1, :]
# Only sum for the last 3 axis.
sum_axis = (1, 2, 3)
else:
# This should not occur in this test-code.
pass
tot_var = np.sum(np.abs(dif1), axis=sum_axis) + \
np.sum(np.abs(dif2), axis=sum_axis)
return tot_var
def _test_tensorflow_vs_numpy(self, x_np):
"""Test the TensorFlow implementation against a numpy implementation.
Args:
x_np: Numpy array with 3 or 4 dimensions.
"""
# Calculate the y-values using the numpy implementation.
y_np = self._total_variation_np(x_np)
self._test(x_np, y_np)
def _generateArray(self, shape):
"""Generate an array of the given shape for use in testing.
The numbers are calculated as the cumulative sum, which
causes the difference between neighboring numbers to vary."""
# Flattened length of the array.
flat_len = np.prod(shape)
a = np.array(range(flat_len), dtype=int)
a = np.cumsum(a)
a = a.reshape(shape)
return a
# TODO(b/133851381): re-enable this test.
def disabledtestTotalVariationNumpy(self):
"""Test the TensorFlow implementation against a numpy implementation.
The two implementations are very similar so it is possible that both
have the same bug, which would not be detected by this test. It is
therefore necessary to test with manually crafted data as well."""
# Generate a test-array.
# This is an 'image' with 100x80 pixels and 3 color channels.
a = self._generateArray(shape=(100, 80, 3))
# Test the TensorFlow implementation vs. numpy implementation.
# We use a numpy implementation to check the results that are
# calculated using TensorFlow are correct.
self._test_tensorflow_vs_numpy(a)
self._test_tensorflow_vs_numpy(a + 1)
self._test_tensorflow_vs_numpy(-a)
self._test_tensorflow_vs_numpy(1.1 * a)
# Expand to a 4-dim array.
b = a[np.newaxis, :]
# Combine several variations of the image into a single 4-dim array.
multi = np.vstack((b, b + 1, -b, 1.1 * b))
# Test that the TensorFlow function can also handle 4-dim arrays.
self._test_tensorflow_vs_numpy(multi)
def testTotalVariationHandmade(self):
"""Test the total variation for a few handmade examples."""
# We create an image that is 2x2 pixels with 3 color channels.
# The image is very small so we can check the result by hand.
# Red color channel.
# The following are the sum of absolute differences between the pixels.
# sum row dif = (4-1) + (7-2) = 3 + 5 = 8
# sum col dif = (2-1) + (7-4) = 1 + 3 = 4
r = [[1, 2], [4, 7]]
# Blue color channel.
# sum row dif = 18 + 29 = 47
# sum col dif = 7 + 18 = 25
g = [[11, 18], [29, 47]]
# Green color channel.
# sum row dif = 120 + 193 = 313
# sum col dif = 47 + 120 = 167
b = [[73, 120], [193, 313]]
# Combine the 3 color channels into a single 3-dim array.
# The shape is (2, 2, 3) corresponding to (height, width and color).
a = np.dstack((r, g, b))
# Total variation for this image.
# Sum of all pixel differences = 8 + 4 + 47 + 25 + 313 + 167 = 564
tot_var = 564
# Calculate the total variation using TensorFlow and assert it is correct.
self._test(a, tot_var)
# If we add 1 to all pixel-values then the total variation is unchanged.
self._test(a + 1, tot_var)
# If we negate all pixel-values then the total variation is unchanged.
self._test(-a, tot_var) # pylint: disable=invalid-unary-operand-type
# Scale the pixel-values by a float. This scales the total variation as
# well.
b = 1.1 * a
self._test(b, 1.1 * tot_var)
# Scale by another float.
c = 1.2 * a
self._test(c, 1.2 * tot_var)
# Combine these 3 images into a single array of shape (3, 2, 2, 3)
# where the first dimension is for the image-number.
multi = np.vstack((a[np.newaxis, :], b[np.newaxis, :], c[np.newaxis, :]))
# Check that TensorFlow correctly calculates the total variation
# for each image individually and returns the correct array.
self._test(multi, tot_var * np.array([1.0, 1.1, 1.2]))
class FormatTest(test_util.TensorFlowTestCase):
def testFormats(self):
prefix = "tensorflow/core/lib"
paths = ("png/testdata/lena_gray.png", "jpeg/testdata/jpeg_merge_test1.jpg",
"gif/testdata/lena.gif")
decoders = {
"jpeg": functools.partial(image_ops.decode_jpeg, channels=3),
"png": functools.partial(image_ops.decode_png, channels=3),
"gif": lambda s: array_ops.squeeze(image_ops.decode_gif(s), axis=0),
}
with self.cached_session():
for path in paths:
contents = self.evaluate(io_ops.read_file(os.path.join(prefix, path)))
images = {}
for name, decode in decoders.items():
image = self.evaluate(decode(contents))
self.assertEqual(image.ndim, 3)
for prev_name, prev in images.items():
print("path %s, names %s %s, shapes %s %s" %
(path, name, prev_name, image.shape, prev.shape))
self.assertAllEqual(image, prev)
images[name] = image
def testError(self):
path = "tensorflow/core/lib/gif/testdata/scan.gif"
with self.cached_session():
for decode in image_ops.decode_jpeg, image_ops.decode_png:
with self.assertRaisesOpError(r"Got 12 frames"):
decode(io_ops.read_file(path)).eval()
class CombinedNonMaxSuppressionTest(test_util.TensorFlowTestCase):
# NOTE(b/142795960): parameterized tests do not work well with tf.tensor
# inputs. Due to failures, creating another test `testInvalidTensorInput`
# which is identical to this one except that the input here is a scalar as
# opposed to a tensor.
def testInvalidPyInput(self):
boxes_np = [[[[0, 0, 1, 1], [0, 0.1, 1, 1.1], [0, -0.1, 1, 0.9],
[0, 10, 1, 11], [0, 10.1, 1, 11.1], [0, 100, 1, 101]]]]
scores_np = [[[0.9, 0.75, 0.6, 0.95, 0.5, 0.3]]]
max_output_size_per_class = 5
max_total_size = 2**31
with self.assertRaisesRegex(
(TypeError, ValueError),
"type int64 that does not match expected type of int32|"
"Tensor conversion requested dtype int32 for Tensor with dtype int64"):
image_ops.combined_non_max_suppression(
boxes=boxes_np,
scores=scores_np,
max_output_size_per_class=max_output_size_per_class,
max_total_size=max_total_size)
# NOTE(b/142795960): parameterized tests do not work well with tf.tensor
# inputs. Due to failures, creating another this test which is identical to
# `testInvalidPyInput` except that the input is a tensor here as opposed
# to a scalar.
def testInvalidTensorInput(self):
boxes_np = [[[[0, 0, 1, 1], [0, 0.1, 1, 1.1], [0, -0.1, 1, 0.9],
[0, 10, 1, 11], [0, 10.1, 1, 11.1], [0, 100, 1, 101]]]]
scores_np = [[[0.9, 0.75, 0.6, 0.95, 0.5, 0.3]]]
max_output_size_per_class = 5
max_total_size = ops.convert_to_tensor(2**31)
with self.assertRaisesRegex(
(TypeError, ValueError),
"type int64 that does not match expected type of int32|"
"Tensor conversion requested dtype int32 for Tensor with dtype int64"):
image_ops.combined_non_max_suppression(
boxes=boxes_np,
scores=scores_np,
max_output_size_per_class=max_output_size_per_class,
max_total_size=max_total_size)
class NonMaxSuppressionTest(test_util.TensorFlowTestCase):
def testNonMaxSuppression(self):
boxes_np = [[0, 0, 1, 1], [0, 0.1, 1, 1.1], [0, -0.1, 1, 0.9],
[0, 10, 1, 11], [0, 10.1, 1, 11.1], [0, 100, 1, 101]]
scores_np = [0.9, 0.75, 0.6, 0.95, 0.5, 0.3]
max_output_size_np = 3
iou_threshold_np = 0.5
with self.cached_session():
boxes = constant_op.constant(boxes_np)
scores = constant_op.constant(scores_np)
max_output_size = constant_op.constant(max_output_size_np)
iou_threshold = constant_op.constant(iou_threshold_np)
selected_indices = image_ops.non_max_suppression(
boxes, scores, max_output_size, iou_threshold)
self.assertAllClose(selected_indices, [3, 0, 5])
def testInvalidShape(self):
def nms_func(box, score, max_output_size, iou_thres):
return image_ops.non_max_suppression(box, score, max_output_size,
iou_thres)
max_output_size = 3
iou_thres = 0.5
# The boxes should be 2D of shape [num_boxes, 4].
with self.assertRaises((ValueError, errors_impl.InvalidArgumentError)):
boxes = constant_op.constant([0.0, 0.0, 1.0, 1.0])
scores = constant_op.constant([0.9])
nms_func(boxes, scores, max_output_size, iou_thres)
# Dimensions must be 4 (but is 3)
with self.assertRaises((ValueError, errors_impl.InvalidArgumentError)):
boxes = constant_op.constant([[0.0, 0, 1.0]])
scores = constant_op.constant([0.9])
nms_func(boxes, scores, max_output_size, iou_thres)
# The boxes is of shape [num_boxes, 4], and the scores is
# of shape [num_boxes]. So an error will be thrown bc 1 != 2.
with self.assertRaises((ValueError, errors_impl.InvalidArgumentError)):
boxes = constant_op.constant([[0.0, 0.0, 1.0, 1.0], [0.0, 0.0, 1.0, 1.0]])
scores = constant_op.constant([0.9])
nms_func(boxes, scores, max_output_size, iou_thres)
# The scores should be 1D of shape [num_boxes].
with self.assertRaises((ValueError, errors_impl.InvalidArgumentError)):
boxes = constant_op.constant([[0.0, 0.0, 1.0, 1.0]])
scores = constant_op.constant([[0.9]])
nms_func(boxes, scores, max_output_size, iou_thres)
# The max output size should be a scalar (0-D).
with self.assertRaises((ValueError, errors_impl.InvalidArgumentError)):
boxes = constant_op.constant([[0.0, 0.0, 1.0, 1.0]])
scores = constant_op.constant([0.9])
nms_func(boxes, scores, [[max_output_size]], iou_thres)
# The iou_threshold should be a scalar (0-D).
with self.assertRaises((ValueError, errors_impl.InvalidArgumentError)):
boxes = constant_op.constant([[0.0, 0.0, 1.0, 1.0]])
scores = constant_op.constant([0.9])
nms_func(boxes, scores, max_output_size, [[iou_thres]])
@test_util.xla_allow_fallback(
"non_max_suppression with dynamic output shape unsupported.")
def testTensors(self):
with context.eager_mode():
boxes_tensor = constant_op.constant([[6.625, 6.688, 272., 158.5],
[6.625, 6.75, 270.5, 158.4],
[5.375, 5., 272., 157.5]])
scores_tensor = constant_op.constant([0.84, 0.7944, 0.7715])
max_output_size = 100
iou_threshold = 0.5
score_threshold = 0.3
soft_nms_sigma = 0.25
pad_to_max_output_size = False
# gen_image_ops.non_max_suppression_v5.
for dtype in [np.float16, np.float32]:
boxes = math_ops.cast(boxes_tensor, dtype=dtype)
scores = math_ops.cast(scores_tensor, dtype=dtype)
_, _, num_selected = gen_image_ops.non_max_suppression_v5(
boxes, scores, max_output_size, iou_threshold, score_threshold,
soft_nms_sigma, pad_to_max_output_size)
self.assertEqual(num_selected.numpy(), 1)
@test_util.xla_allow_fallback(
"non_max_suppression with dynamic output shape unsupported.")
def testDataTypes(self):
# Test case for GitHub issue 20199.
boxes_np = [[0, 0, 1, 1], [0, 0.1, 1, 1.1], [0, -0.1, 1, 0.9],
[0, 10, 1, 11], [0, 10.1, 1, 11.1], [0, 100, 1, 101]]
scores_np = [0.9, 0.75, 0.6, 0.95, 0.5, 0.3]
max_output_size_np = 3
iou_threshold_np = 0.5
score_threshold_np = float("-inf")
# Note: There are multiple versions of non_max_suppression v2, v3, v4.
# gen_image_ops.non_max_suppression_v2:
for input_dtype in [np.float16, np.float32]:
for threshold_dtype in [np.float16, np.float32]:
with self.cached_session():
boxes = constant_op.constant(boxes_np, dtype=input_dtype)
scores = constant_op.constant(scores_np, dtype=input_dtype)
max_output_size = constant_op.constant(max_output_size_np)
iou_threshold = constant_op.constant(
iou_threshold_np, dtype=threshold_dtype)
selected_indices = gen_image_ops.non_max_suppression_v2(
boxes, scores, max_output_size, iou_threshold)
selected_indices = self.evaluate(selected_indices)
self.assertAllClose(selected_indices, [3, 0, 5])
# gen_image_ops.non_max_suppression_v3
for input_dtype in [np.float16, np.float32]:
for threshold_dtype in [np.float16, np.float32]:
# XLA currently requires dtypes to be equal.
if input_dtype == threshold_dtype or not test_util.is_xla_enabled():
with self.cached_session():
boxes = constant_op.constant(boxes_np, dtype=input_dtype)
scores = constant_op.constant(scores_np, dtype=input_dtype)
max_output_size = constant_op.constant(max_output_size_np)
iou_threshold = constant_op.constant(
iou_threshold_np, dtype=threshold_dtype)
score_threshold = constant_op.constant(
score_threshold_np, dtype=threshold_dtype)
selected_indices = gen_image_ops.non_max_suppression_v3(
boxes, scores, max_output_size, iou_threshold, score_threshold)
selected_indices = self.evaluate(selected_indices)
self.assertAllClose(selected_indices, [3, 0, 5])
# gen_image_ops.non_max_suppression_v4.
for input_dtype in [np.float16, np.float32]:
for threshold_dtype in [np.float16, np.float32]:
with self.cached_session():
boxes = constant_op.constant(boxes_np, dtype=input_dtype)
scores = constant_op.constant(scores_np, dtype=input_dtype)
max_output_size = constant_op.constant(max_output_size_np)
iou_threshold = constant_op.constant(
iou_threshold_np, dtype=threshold_dtype)
score_threshold = constant_op.constant(
score_threshold_np, dtype=threshold_dtype)
selected_indices, _ = gen_image_ops.non_max_suppression_v4(
boxes, scores, max_output_size, iou_threshold, score_threshold)
selected_indices = self.evaluate(selected_indices)
self.assertAllClose(selected_indices, [3, 0, 5])
# gen_image_ops.non_max_suppression_v5.
soft_nms_sigma_np = float(0.0)
for dtype in [np.float16, np.float32]:
with self.cached_session():
boxes = constant_op.constant(boxes_np, dtype=dtype)
scores = constant_op.constant(scores_np, dtype=dtype)
max_output_size = constant_op.constant(max_output_size_np)
iou_threshold = constant_op.constant(iou_threshold_np, dtype=dtype)
score_threshold = constant_op.constant(score_threshold_np, dtype=dtype)
soft_nms_sigma = constant_op.constant(soft_nms_sigma_np, dtype=dtype)
selected_indices, _, _ = gen_image_ops.non_max_suppression_v5(
boxes, scores, max_output_size, iou_threshold, score_threshold,
soft_nms_sigma)
selected_indices = self.evaluate(selected_indices)
self.assertAllClose(selected_indices, [3, 0, 5])
def testZeroIOUThreshold(self):
boxes_np = [[0, 0, 1, 1], [0, 0.1, 1, 1.1], [0, -0.1, 1, 0.9],
[0, 10, 1, 11], [0, 10.1, 1, 11.1], [0, 100, 1, 101]]
scores_np = [1., 1., 1., 1., 1., 1.]
max_output_size_np = 3
iou_threshold_np = 0.0
with self.cached_session():
boxes = constant_op.constant(boxes_np)
scores = constant_op.constant(scores_np)
max_output_size = constant_op.constant(max_output_size_np)
iou_threshold = constant_op.constant(iou_threshold_np)
selected_indices = image_ops.non_max_suppression(
boxes, scores, max_output_size, iou_threshold)
self.assertAllClose(selected_indices, [0, 3, 5])
class NonMaxSuppressionWithScoresTest(test_util.TensorFlowTestCase):
@test_util.xla_allow_fallback(
"non_max_suppression with dynamic output shape unsupported.")
def testSelectFromThreeClustersWithSoftNMS(self):
boxes_np = [[0, 0, 1, 1], [0, 0.1, 1, 1.1], [0, -0.1, 1, 0.9],
[0, 10, 1, 11], [0, 10.1, 1, 11.1], [0, 100, 1, 101]]
scores_np = [0.9, 0.75, 0.6, 0.95, 0.5, 0.3]
max_output_size_np = 6
iou_threshold_np = 0.5
score_threshold_np = 0.0
soft_nms_sigma_np = 0.5
boxes = constant_op.constant(boxes_np)
scores = constant_op.constant(scores_np)
max_output_size = constant_op.constant(max_output_size_np)
iou_threshold = constant_op.constant(iou_threshold_np)
score_threshold = constant_op.constant(score_threshold_np)
soft_nms_sigma = constant_op.constant(soft_nms_sigma_np)
selected_indices, selected_scores = \
image_ops.non_max_suppression_with_scores(
boxes,
scores,
max_output_size,
iou_threshold,
score_threshold,
soft_nms_sigma)
selected_indices, selected_scores = self.evaluate(
[selected_indices, selected_scores])
self.assertAllClose(selected_indices, [3, 0, 1, 5, 4, 2])
self.assertAllClose(selected_scores,
[0.95, 0.9, 0.384, 0.3, 0.256, 0.197],
rtol=1e-2, atol=1e-2)
class NonMaxSuppressionPaddedTest(test_util.TensorFlowTestCase,
parameterized.TestCase):
@test_util.disable_xla(
"b/141236442: "
"non_max_suppression with dynamic output shape unsupported.")
def testSelectFromThreeClustersV1(self):
with ops.Graph().as_default():
boxes_np = [[0, 0, 1, 1], [0, 0.1, 1, 1.1], [0, -0.1, 1, 0.9],
[0, 10, 1, 11], [0, 10.1, 1, 11.1], [0, 100, 1, 101]]
scores_np = [0.9, 0.75, 0.6, 0.95, 0.5, 0.3]
max_output_size_np = 5
iou_threshold_np = 0.5
boxes = constant_op.constant(boxes_np)
scores = constant_op.constant(scores_np)
max_output_size = constant_op.constant(max_output_size_np)
iou_threshold = constant_op.constant(iou_threshold_np)
selected_indices_padded, num_valid_padded = \
image_ops.non_max_suppression_padded(
boxes,
scores,
max_output_size,
iou_threshold,
pad_to_max_output_size=True)
selected_indices, num_valid = image_ops.non_max_suppression_padded(
boxes,
scores,
max_output_size,
iou_threshold,
pad_to_max_output_size=False)
# The output shape of the padded operation must be fully defined.
self.assertEqual(selected_indices_padded.shape.is_fully_defined(), True)
self.assertEqual(selected_indices.shape.is_fully_defined(), False)
with self.cached_session():
self.assertAllClose(selected_indices_padded, [3, 0, 5, 0, 0])
self.assertEqual(num_valid_padded.eval(), 3)
self.assertAllClose(selected_indices, [3, 0, 5])
self.assertEqual(num_valid.eval(), 3)
@parameterized.named_parameters([("_RunEagerly", True), ("_RunGraph", False)])
@test_util.disable_xla(
"b/141236442: "
"non_max_suppression with dynamic output shape unsupported.")
def testSelectFromThreeClustersV2(self, run_func_eagerly):
if not context.executing_eagerly() and run_func_eagerly:
# Skip running tf.function eagerly in V1 mode.
self.skipTest("Skip test that runs tf.function eagerly in V1 mode.")
else:
@def_function.function
def func(boxes, scores, max_output_size, iou_threshold):
boxes = constant_op.constant(boxes_np)
scores = constant_op.constant(scores_np)
max_output_size = constant_op.constant(max_output_size_np)
iou_threshold = constant_op.constant(iou_threshold_np)
yp, nvp = image_ops.non_max_suppression_padded(
boxes,
scores,
max_output_size,
iou_threshold,
pad_to_max_output_size=True)
y, n = image_ops.non_max_suppression_padded(
boxes,
scores,
max_output_size,
iou_threshold,
pad_to_max_output_size=False)
# The output shape of the padded operation must be fully defined.
self.assertEqual(yp.shape.is_fully_defined(), True)
self.assertEqual(y.shape.is_fully_defined(), False)
return yp, nvp, y, n
boxes_np = [[0, 0, 1, 1], [0, 0.1, 1, 1.1], [0, -0.1, 1, 0.9],
[0, 10, 1, 11], [0, 10.1, 1, 11.1], [0, 100, 1, 101]]
scores_np = [0.9, 0.75, 0.6, 0.95, 0.5, 0.3]
max_output_size_np = 5
iou_threshold_np = 0.5
selected_indices_padded, num_valid_padded, selected_indices, num_valid = \
func(boxes_np, scores_np, max_output_size_np, iou_threshold_np)
with self.cached_session():
with test_util.run_functions_eagerly(run_func_eagerly):
self.assertAllClose(selected_indices_padded, [3, 0, 5, 0, 0])
self.assertEqual(self.evaluate(num_valid_padded), 3)
self.assertAllClose(selected_indices, [3, 0, 5])
self.assertEqual(self.evaluate(num_valid), 3)
@test_util.xla_allow_fallback(
"non_max_suppression with dynamic output shape unsupported.")
def testSelectFromContinuousOverLapV1(self):
with ops.Graph().as_default():
boxes_np = [[0, 0, 1, 1], [0, 0.2, 1, 1.2], [0, 0.4, 1, 1.4],
[0, 0.6, 1, 1.6], [0, 0.8, 1, 1.8], [0, 2, 1, 2]]
scores_np = [0.9, 0.75, 0.6, 0.5, 0.4, 0.3]
max_output_size_np = 3
iou_threshold_np = 0.5
score_threshold_np = 0.1
boxes = constant_op.constant(boxes_np)
scores = constant_op.constant(scores_np)
max_output_size = constant_op.constant(max_output_size_np)
iou_threshold = constant_op.constant(iou_threshold_np)
score_threshold = constant_op.constant(score_threshold_np)
selected_indices, num_valid = image_ops.non_max_suppression_padded(
boxes,
scores,
max_output_size,
iou_threshold,
score_threshold)
# The output shape of the padded operation must be fully defined.
self.assertEqual(selected_indices.shape.is_fully_defined(), False)
with self.cached_session():
self.assertAllClose(selected_indices, [0, 2, 4])
self.assertEqual(num_valid.eval(), 3)
@parameterized.named_parameters([("_RunEagerly", True), ("_RunGraph", False)])
@test_util.xla_allow_fallback(
"non_max_suppression with dynamic output shape unsupported.")
def testSelectFromContinuousOverLapV2(self, run_func_eagerly):
if not context.executing_eagerly() and run_func_eagerly:
# Skip running tf.function eagerly in V1 mode.
self.skipTest("Skip test that runs tf.function eagerly in V1 mode.")
else:
@def_function.function
def func(boxes, scores, max_output_size, iou_threshold, score_threshold):
boxes = constant_op.constant(boxes)
scores = constant_op.constant(scores)
max_output_size = constant_op.constant(max_output_size)
iou_threshold = constant_op.constant(iou_threshold)
score_threshold = constant_op.constant(score_threshold)
y, nv = image_ops.non_max_suppression_padded(
boxes, scores, max_output_size, iou_threshold, score_threshold)
# The output shape of the padded operation must be fully defined.
self.assertEqual(y.shape.is_fully_defined(), False)
return y, nv
boxes_np = [[0, 0, 1, 1], [0, 0.2, 1, 1.2], [0, 0.4, 1, 1.4],
[0, 0.6, 1, 1.6], [0, 0.8, 1, 1.8], [0, 2, 1, 2]]
scores_np = [0.9, 0.75, 0.6, 0.5, 0.4, 0.3]
max_output_size_np = 3
iou_threshold_np = 0.5
score_threshold_np = 0.1
selected_indices, num_valid = func(boxes_np, scores_np,
max_output_size_np, iou_threshold_np,
score_threshold_np)
with self.cached_session():
with test_util.run_functions_eagerly(run_func_eagerly):
self.assertAllClose(selected_indices, [0, 2, 4])
self.assertEqual(self.evaluate(num_valid), 3)
def testInvalidDtype(self):
boxes_np = [[4.0, 6.0, 3.0, 6.0],
[2.0, 1.0, 5.0, 4.0],
[9.0, 0.0, 9.0, 9.0]]
scores = [5.0, 6.0, 5.0]
max_output_size = 2**31
with self.assertRaisesRegex(
(TypeError, ValueError), "type int64 that does not match type int32"):
boxes = constant_op.constant(boxes_np)
image_ops.non_max_suppression_padded(boxes, scores, max_output_size)
class NonMaxSuppressionWithOverlapsTest(test_util.TensorFlowTestCase):
def testSelectOneFromThree(self):
overlaps_np = [
[1.0, 0.7, 0.2],
[0.7, 1.0, 0.0],
[0.2, 0.0, 1.0],
]
scores_np = [0.7, 0.9, 0.1]
max_output_size_np = 3
overlaps = constant_op.constant(overlaps_np)
scores = constant_op.constant(scores_np)
max_output_size = constant_op.constant(max_output_size_np)
overlap_threshold = 0.6
score_threshold = 0.4
selected_indices = image_ops.non_max_suppression_with_overlaps(
overlaps, scores, max_output_size, overlap_threshold, score_threshold)
with self.cached_session():
self.assertAllClose(selected_indices, [1])
class VerifyCompatibleImageShapesTest(test_util.TensorFlowTestCase):
"""Tests utility function used by ssim() and psnr()."""
def testWrongDims(self):
# Shape function requires placeholders and a graph.
with ops.Graph().as_default():
img = array_ops.placeholder(dtype=dtypes.float32)
img_np = np.array((2, 2))
with self.cached_session() as sess:
_, _, checks = image_ops_impl._verify_compatible_image_shapes(img, img)
with self.assertRaises(errors.InvalidArgumentError):
sess.run(checks, {img: img_np})
def testShapeMismatch(self):
# Shape function requires placeholders and a graph.
with ops.Graph().as_default():
img1 = array_ops.placeholder(dtype=dtypes.float32)
img2 = array_ops.placeholder(dtype=dtypes.float32)
img1_np = np.array([1, 2, 2, 1])
img2_np = np.array([1, 3, 3, 1])
with self.cached_session() as sess:
_, _, checks = image_ops_impl._verify_compatible_image_shapes(
img1, img2)
with self.assertRaises(errors.InvalidArgumentError):
sess.run(checks, {img1: img1_np, img2: img2_np})
class PSNRTest(test_util.TensorFlowTestCase):
"""Tests for PSNR."""
def _LoadTestImage(self, sess, filename):
content = io_ops.read_file(os.path.join(
"tensorflow/core/lib/psnr/testdata", filename))
im = image_ops.decode_jpeg(content, dct_method="INTEGER_ACCURATE")
im = image_ops.convert_image_dtype(im, dtypes.float32)
im, = self.evaluate([im])
return np.expand_dims(im, axis=0)
def _LoadTestImages(self):
with self.cached_session() as sess:
q20 = self._LoadTestImage(sess, "cat_q20.jpg")
q72 = self._LoadTestImage(sess, "cat_q72.jpg")
q95 = self._LoadTestImage(sess, "cat_q95.jpg")
return q20, q72, q95
def _PSNR_NumPy(self, orig, target, max_value):
"""Numpy implementation of PSNR."""
mse = ((orig - target) ** 2).mean(axis=(-3, -2, -1))
return 20 * np.log10(max_value) - 10 * np.log10(mse)
def _RandomImage(self, shape, max_val):
"""Returns an image or image batch with given shape."""
return np.random.rand(*shape).astype(np.float32) * max_val
def testPSNRSingleImage(self):
image1 = self._RandomImage((8, 8, 1), 1)
image2 = self._RandomImage((8, 8, 1), 1)
psnr = self._PSNR_NumPy(image1, image2, 1)
with self.cached_session():
tf_image1 = constant_op.constant(image1, shape=image1.shape,
dtype=dtypes.float32)
tf_image2 = constant_op.constant(image2, shape=image2.shape,
dtype=dtypes.float32)
tf_psnr = self.evaluate(image_ops.psnr(tf_image1, tf_image2, 1.0, "psnr"))
self.assertAllClose(psnr, tf_psnr, atol=0.001)
def testPSNRMultiImage(self):
image1 = self._RandomImage((10, 8, 8, 1), 1)
image2 = self._RandomImage((10, 8, 8, 1), 1)
psnr = self._PSNR_NumPy(image1, image2, 1)
with self.cached_session():
tf_image1 = constant_op.constant(image1, shape=image1.shape,
dtype=dtypes.float32)
tf_image2 = constant_op.constant(image2, shape=image2.shape,
dtype=dtypes.float32)
tf_psnr = self.evaluate(image_ops.psnr(tf_image1, tf_image2, 1, "psnr"))
self.assertAllClose(psnr, tf_psnr, atol=0.001)
def testGoldenPSNR(self):
q20, q72, q95 = self._LoadTestImages()
# Verify NumPy implementation first.
# Golden values are generated using GNU Octave's psnr() function.
psnr1 = self._PSNR_NumPy(q20, q72, 1)
self.assertNear(30.321, psnr1, 0.001, msg="q20.dtype=" + str(q20.dtype))
psnr2 = self._PSNR_NumPy(q20, q95, 1)
self.assertNear(29.994, psnr2, 0.001)
psnr3 = self._PSNR_NumPy(q72, q95, 1)
self.assertNear(35.302, psnr3, 0.001)
# Test TensorFlow implementation.
with self.cached_session():
tf_q20 = constant_op.constant(q20, shape=q20.shape, dtype=dtypes.float32)
tf_q72 = constant_op.constant(q72, shape=q72.shape, dtype=dtypes.float32)
tf_q95 = constant_op.constant(q95, shape=q95.shape, dtype=dtypes.float32)
tf_psnr1 = self.evaluate(image_ops.psnr(tf_q20, tf_q72, 1, "psnr1"))
tf_psnr2 = self.evaluate(image_ops.psnr(tf_q20, tf_q95, 1, "psnr2"))
tf_psnr3 = self.evaluate(image_ops.psnr(tf_q72, tf_q95, 1, "psnr3"))
self.assertAllClose(psnr1, tf_psnr1, atol=0.001)
self.assertAllClose(psnr2, tf_psnr2, atol=0.001)
self.assertAllClose(psnr3, tf_psnr3, atol=0.001)
def testInfinity(self):
q20, _, _ = self._LoadTestImages()
psnr = self._PSNR_NumPy(q20, q20, 1)
with self.cached_session():
tf_q20 = constant_op.constant(q20, shape=q20.shape, dtype=dtypes.float32)
tf_psnr = self.evaluate(image_ops.psnr(tf_q20, tf_q20, 1, "psnr"))
self.assertAllClose(psnr, tf_psnr, atol=0.001)
def testInt(self):
img1 = self._RandomImage((10, 8, 8, 1), 255)
img2 = self._RandomImage((10, 8, 8, 1), 255)
img1 = constant_op.constant(img1, dtypes.uint8)
img2 = constant_op.constant(img2, dtypes.uint8)
psnr_uint8 = image_ops.psnr(img1, img2, 255)
img1 = image_ops.convert_image_dtype(img1, dtypes.float32)
img2 = image_ops.convert_image_dtype(img2, dtypes.float32)
psnr_float32 = image_ops.psnr(img1, img2, 1.0)
with self.cached_session():
self.assertAllClose(
self.evaluate(psnr_uint8), self.evaluate(psnr_float32), atol=0.001)
class SSIMTest(test_util.TensorFlowTestCase):
"""Tests for SSIM."""
_filenames = ["checkerboard1.png",
"checkerboard2.png",
"checkerboard3.png",]
_ssim = np.asarray([[1.000000, 0.230880, 0.231153],
[0.230880, 1.000000, 0.996828],
[0.231153, 0.996828, 1.000000]])
def _LoadTestImage(self, sess, filename):
content = io_ops.read_file(os.path.join(
"tensorflow/core/lib/ssim/testdata", filename))
im = image_ops.decode_png(content)
im = image_ops.convert_image_dtype(im, dtypes.float32)
im, = self.evaluate([im])
return np.expand_dims(im, axis=0)
def _LoadTestImages(self):
with self.cached_session() as sess:
return [self._LoadTestImage(sess, f) for f in self._filenames]
def _RandomImage(self, shape, max_val):
"""Returns an image or image batch with given shape."""
return np.random.rand(*shape).astype(np.float32) * max_val
def testAgainstMatlab(self):
"""Tests against values produced by Matlab."""
img = self._LoadTestImages()
expected = self._ssim[np.triu_indices(3)]
def ssim_func(x):
return image_ops.ssim(
*x, max_val=1.0, filter_size=11, filter_sigma=1.5, k1=0.01, k2=0.03)
with self.cached_session():
scores = [
self.evaluate(ssim_func(t))
for t in itertools.combinations_with_replacement(img, 2)
]
self.assertAllClose(expected, np.squeeze(scores), atol=1e-4)
def testBatch(self):
img = self._LoadTestImages()
expected = self._ssim[np.triu_indices(3, k=1)]
img1, img2 = zip(*itertools.combinations(img, 2))
img1 = np.concatenate(img1)
img2 = np.concatenate(img2)
ssim = image_ops.ssim(
constant_op.constant(img1),
constant_op.constant(img2),
1.0,
filter_size=11,
filter_sigma=1.5,
k1=0.01,
k2=0.03)
with self.cached_session():
self.assertAllClose(expected, self.evaluate(ssim), atol=1e-4)
def testBatchNumpyInputs(self):
img = self._LoadTestImages()
expected = self._ssim[np.triu_indices(3, k=1)]
img1, img2 = zip(*itertools.combinations(img, 2))
img1 = np.concatenate(img1)
img2 = np.concatenate(img2)
with self.cached_session():
img1 = self.evaluate(constant_op.constant(img1))
img2 = self.evaluate(constant_op.constant(img2))
ssim = image_ops.ssim(
img1,
img2,
1.0,
filter_size=11,
filter_sigma=1.5,
k1=0.01,
k2=0.03)
with self.cached_session():
self.assertAllClose(expected, self.evaluate(ssim), atol=1e-4)
def testBroadcast(self):
img = self._LoadTestImages()[:2]
expected = self._ssim[:2, :2]
img = constant_op.constant(np.concatenate(img))
img1 = array_ops.expand_dims(img, axis=0) # batch dims: 1, 2.
img2 = array_ops.expand_dims(img, axis=1) # batch dims: 2, 1.
ssim = image_ops.ssim(
img1, img2, 1.0, filter_size=11, filter_sigma=1.5, k1=0.01, k2=0.03)
with self.cached_session():
self.assertAllClose(expected, self.evaluate(ssim), atol=1e-4)
def testNegative(self):
"""Tests against negative SSIM index."""
step = np.expand_dims(np.arange(0, 256, 16, dtype=np.uint8), axis=0)
img1 = np.tile(step, (16, 1))
img2 = np.fliplr(img1)
img1 = img1.reshape((1, 16, 16, 1))
img2 = img2.reshape((1, 16, 16, 1))
ssim = image_ops.ssim(
constant_op.constant(img1),
constant_op.constant(img2),
255,
filter_size=11,
filter_sigma=1.5,
k1=0.01,
k2=0.03)
with self.cached_session():
self.assertLess(self.evaluate(ssim), 0)
def testInt(self):
img1 = self._RandomImage((1, 16, 16, 3), 255)
img2 = self._RandomImage((1, 16, 16, 3), 255)
img1 = constant_op.constant(img1, dtypes.uint8)
img2 = constant_op.constant(img2, dtypes.uint8)
ssim_uint8 = image_ops.ssim(
img1, img2, 255, filter_size=11, filter_sigma=1.5, k1=0.01, k2=0.03)
img1 = image_ops.convert_image_dtype(img1, dtypes.float32)
img2 = image_ops.convert_image_dtype(img2, dtypes.float32)
ssim_float32 = image_ops.ssim(
img1, img2, 1.0, filter_size=11, filter_sigma=1.5, k1=0.01, k2=0.03)
with self.cached_session():
self.assertAllClose(
self.evaluate(ssim_uint8), self.evaluate(ssim_float32), atol=0.001)
def testWithIndexMap(self):
img1 = self._RandomImage((1, 16, 16, 3), 255)
img2 = self._RandomImage((1, 16, 16, 3), 255)
ssim_locals = image_ops.ssim(
img1,
img2,
1.0,
filter_size=11,
filter_sigma=1.5,
k1=0.01,
k2=0.03,
return_index_map=True)
self.assertEqual(ssim_locals.shape, (1, 6, 6))
ssim_global = image_ops.ssim(
img1, img2, 1.0, filter_size=11, filter_sigma=1.5, k1=0.01, k2=0.03)
axes = constant_op.constant([-2, -1], dtype=dtypes.int32)
self.assertAllClose(ssim_global, math_ops.reduce_mean(ssim_locals, axes))
class MultiscaleSSIMTest(test_util.TensorFlowTestCase):
"""Tests for MS-SSIM."""
_filenames = ["checkerboard1.png",
"checkerboard2.png",
"checkerboard3.png",]
_msssim = np.asarray([[1.000000, 0.091016, 0.091025],
[0.091016, 1.000000, 0.999567],
[0.091025, 0.999567, 1.000000]])
def _LoadTestImage(self, sess, filename):
content = io_ops.read_file(os.path.join(
"tensorflow/core/lib/ssim/testdata", filename))
im = image_ops.decode_png(content)
im = image_ops.convert_image_dtype(im, dtypes.float32)
im, = self.evaluate([im])
return np.expand_dims(im, axis=0)
def _LoadTestImages(self):
with self.cached_session() as sess:
return [self._LoadTestImage(sess, f) for f in self._filenames]
def _RandomImage(self, shape, max_val):
"""Returns an image or image batch with given shape."""
return np.random.rand(*shape).astype(np.float32) * max_val
def testAgainstMatlab(self):
"""Tests against MS-SSIM computed with Matlab implementation.
For color images, MS-SSIM scores are averaged over color channels.
"""
img = self._LoadTestImages()
expected = self._msssim[np.triu_indices(3)]
def ssim_func(x):
return image_ops.ssim_multiscale(
*x, max_val=1.0, filter_size=11, filter_sigma=1.5, k1=0.01, k2=0.03)
with self.cached_session():
scores = [
self.evaluate(ssim_func(t))
for t in itertools.combinations_with_replacement(img, 2)
]
self.assertAllClose(expected, np.squeeze(scores), atol=1e-4)
def testUnweightedIsDifferentiable(self):
img = self._LoadTestImages()
@def_function.function
def msssim_func(x1, x2, scalar):
return image_ops.ssim_multiscale(
x1 * scalar,
x2 * scalar,
max_val=1.0,
power_factors=(1, 1, 1, 1, 1),
filter_size=11,
filter_sigma=1.5,
k1=0.01,
k2=0.03)
scalar = constant_op.constant(1.0, dtype=dtypes.float32)
with backprop.GradientTape() as tape:
tape.watch(scalar)
y = msssim_func(img[0], img[1], scalar)
grad = tape.gradient(y, scalar)
np_grads = self.evaluate(grad)
self.assertTrue(np.isfinite(np_grads).all())
def testUnweightedIsDifferentiableEager(self):
if not context.executing_eagerly():
self.skipTest("Eager mode only")
img = self._LoadTestImages()
def msssim_func(x1, x2, scalar):
return image_ops.ssim_multiscale(
x1 * scalar,
x2 * scalar,
max_val=1.0,
power_factors=(1, 1, 1, 1, 1),
filter_size=11,
filter_sigma=1.5,
k1=0.01,
k2=0.03)
scalar = constant_op.constant(1.0, dtype=dtypes.float32)
with backprop.GradientTape() as tape:
tape.watch(scalar)
y = msssim_func(img[0], img[1], scalar)
grad = tape.gradient(y, scalar)
np_grads = self.evaluate(grad)
self.assertTrue(np.isfinite(np_grads).all())
def testBatch(self):
"""Tests MS-SSIM computed in batch."""
img = self._LoadTestImages()
expected = self._msssim[np.triu_indices(3, k=1)]
img1, img2 = zip(*itertools.combinations(img, 2))
img1 = np.concatenate(img1)
img2 = np.concatenate(img2)
msssim = image_ops.ssim_multiscale(
constant_op.constant(img1),
constant_op.constant(img2),
1.0,
filter_size=11,
filter_sigma=1.5,
k1=0.01,
k2=0.03)
with self.cached_session():
self.assertAllClose(expected, self.evaluate(msssim), 1e-4)
def testBroadcast(self):
"""Tests MS-SSIM broadcasting."""
img = self._LoadTestImages()[:2]
expected = self._msssim[:2, :2]
img = constant_op.constant(np.concatenate(img))
img1 = array_ops.expand_dims(img, axis=0) # batch dims: 1, 2.
img2 = array_ops.expand_dims(img, axis=1) # batch dims: 2, 1.
score_tensor = image_ops.ssim_multiscale(
img1, img2, 1.0, filter_size=11, filter_sigma=1.5, k1=0.01, k2=0.03)
with self.cached_session():
self.assertAllClose(expected, self.evaluate(score_tensor), 1e-4)
def testRange(self):
"""Tests against low MS-SSIM score.
MS-SSIM is a geometric mean of SSIM and CS scores of various scales.
If any of the value is negative so that the geometric mean is not
well-defined, then treat the MS-SSIM score as zero.
"""
with self.cached_session() as sess:
img1 = self._LoadTestImage(sess, "checkerboard1.png")
img2 = self._LoadTestImage(sess, "checkerboard3.png")
images = [img1, img2, np.zeros_like(img1),
np.full_like(img1, fill_value=255)]
images = [ops.convert_to_tensor(x, dtype=dtypes.float32) for x in images]
msssim_ops = [
image_ops.ssim_multiscale(
x, y, 1.0, filter_size=11, filter_sigma=1.5, k1=0.01, k2=0.03)
for x, y in itertools.combinations(images, 2)
]
msssim = self.evaluate(msssim_ops)
msssim = np.squeeze(msssim)
self.assertTrue(np.all(msssim >= 0.0))
self.assertTrue(np.all(msssim <= 1.0))
def testInt(self):
img1 = self._RandomImage((1, 180, 240, 3), 255)
img2 = self._RandomImage((1, 180, 240, 3), 255)
img1 = constant_op.constant(img1, dtypes.uint8)
img2 = constant_op.constant(img2, dtypes.uint8)
ssim_uint8 = image_ops.ssim_multiscale(
img1, img2, 255, filter_size=11, filter_sigma=1.5, k1=0.01, k2=0.03)
img1 = image_ops.convert_image_dtype(img1, dtypes.float32)
img2 = image_ops.convert_image_dtype(img2, dtypes.float32)
ssim_float32 = image_ops.ssim_multiscale(
img1, img2, 1.0, filter_size=11, filter_sigma=1.5, k1=0.01, k2=0.03)
with self.cached_session():
self.assertAllClose(
self.evaluate(ssim_uint8), self.evaluate(ssim_float32), atol=0.001)
def testNumpyInput(self):
"""Test case for GitHub issue 28241."""
image = np.random.random([512, 512, 1])
score_tensor = image_ops.ssim_multiscale(image, image, max_val=1.0)
with self.cached_session():
_ = self.evaluate(score_tensor)
class ImageGradientsTest(test_util.TensorFlowTestCase):
def testImageGradients(self):
shape = [1, 2, 4, 1]
img = constant_op.constant([[1, 3, 4, 2], [8, 7, 5, 6]])
img = array_ops.reshape(img, shape)
expected_dy = np.reshape([[7, 4, 1, 4], [0, 0, 0, 0]], shape)
expected_dx = np.reshape([[2, 1, -2, 0], [-1, -2, 1, 0]], shape)
dy, dx = image_ops.image_gradients(img)
with self.cached_session():
actual_dy = self.evaluate(dy)
actual_dx = self.evaluate(dx)
self.assertAllClose(expected_dy, actual_dy)
self.assertAllClose(expected_dx, actual_dx)
def testImageGradientsMultiChannelBatch(self):
batch = [[[[1, 2], [2, 5], [3, 3]],
[[8, 4], [5, 1], [9, 8]]],
[[[5, 3], [7, 9], [1, 6]],
[[1, 2], [6, 3], [6, 3]]]]
expected_dy = [[[[7, 2], [3, -4], [6, 5]],
[[0, 0], [0, 0], [0, 0]]],
[[[-4, -1], [-1, -6], [5, -3]],
[[0, 0], [0, 0], [0, 0]]]]
expected_dx = [[[[1, 3], [1, -2], [0, 0]],
[[-3, -3], [4, 7], [0, 0]]],
[[[2, 6], [-6, -3], [0, 0]],
[[5, 1], [0, 0], [0, 0]]]]
batch = constant_op.constant(batch)
assert batch.get_shape().as_list() == [2, 2, 3, 2]
dy, dx = image_ops.image_gradients(batch)
with self.cached_session():
actual_dy = self.evaluate(dy)
actual_dx = self.evaluate(dx)
self.assertAllClose(expected_dy, actual_dy)
self.assertAllClose(expected_dx, actual_dx)
def testImageGradientsBadShape(self):
# [2 x 4] image but missing batch and depth dimensions.
img = constant_op.constant([[1, 3, 4, 2], [8, 7, 5, 6]])
with self.assertRaises(ValueError):
image_ops.image_gradients(img)
class SobelEdgesTest(test_util.TensorFlowTestCase):
def disabled_testSobelEdges1x2x3x1(self):
img = constant_op.constant([[1, 3, 6], [4, 1, 5]],
dtype=dtypes.float32, shape=[1, 2, 3, 1])
expected = np.reshape([[[0, 0], [0, 12], [0, 0]],
[[0, 0], [0, 12], [0, 0]]], [1, 2, 3, 1, 2])
sobel = image_ops.sobel_edges(img)
with self.cached_session():
actual_sobel = self.evaluate(sobel)
self.assertAllClose(expected, actual_sobel)
def testSobelEdges5x3x4x2(self):
batch_size = 5
plane = np.reshape([[1, 3, 6, 2], [4, 1, 5, 7], [2, 5, 1, 4]],
[1, 3, 4, 1])
two_channel = np.concatenate([plane, plane], axis=3)
batch = np.concatenate([two_channel] * batch_size, axis=0)
img = constant_op.constant(batch, dtype=dtypes.float32,
shape=[batch_size, 3, 4, 2])
expected_plane = np.reshape([[[0, 0], [0, 12], [0, 10], [0, 0]],
[[6, 0], [0, 6], [-6, 10], [-6, 0]],
[[0, 0], [0, 0], [0, 10], [0, 0]]],
[1, 3, 4, 1, 2])
expected_two_channel = np.concatenate(
[expected_plane, expected_plane], axis=3)
expected_batch = np.concatenate([expected_two_channel] * batch_size, axis=0)
sobel = image_ops.sobel_edges(img)
with self.cached_session():
actual_sobel = self.evaluate(sobel)
self.assertAllClose(expected_batch, actual_sobel)
@test_util.run_all_in_graph_and_eager_modes
class DecodeImageTest(test_util.TensorFlowTestCase, parameterized.TestCase):
_FORWARD_COMPATIBILITY_HORIZONS = [
(2020, 1, 1),
(2020, 7, 14),
(2525, 1, 1), # future behavior
]
def testBmpChannels(self):
for horizon in self._FORWARD_COMPATIBILITY_HORIZONS:
with compat.forward_compatibility_horizon(*horizon):
with test_util.use_gpu():
base = "tensorflow/core/lib/bmp/testdata"
# `rgba_transparent.bmp` has 4 channels with transparent pixels.
# Test consistency between `decode_image` and `decode_bmp` functions.
bmp0 = io_ops.read_file(os.path.join(base, "rgba_small.bmp"))
image0 = image_ops.decode_image(bmp0, channels=4)
image1 = image_ops.decode_bmp(bmp0, channels=4)
image0, image1 = self.evaluate([image0, image1])
self.assertAllEqual(image0, image1)
# Test that 3 channels is returned with user request of `channels=3`
# even though image has 4 channels.
# Note that this operation simply drops 4th channel information. This
# is the same behavior as `decode_png`.
# e.g. pixel values [25, 25, 25, 100] becomes [25, 25, 25].
bmp1 = io_ops.read_file(os.path.join(base, "rgb_small.bmp"))
image2 = image_ops.decode_bmp(bmp0, channels=3)
image3 = image_ops.decode_bmp(bmp1)
image2, image3 = self.evaluate([image2, image3])
self.assertAllEqual(image2, image3)
# Test that 4 channels is returned with user request of `channels=4`
# even though image has 3 channels. Alpha channel should be set to
# UINT8_MAX.
bmp3 = io_ops.read_file(os.path.join(base, "rgb_small_255.bmp"))
bmp4 = io_ops.read_file(os.path.join(base, "rgba_small_255.bmp"))
image4 = image_ops.decode_bmp(bmp3, channels=4)
image5 = image_ops.decode_bmp(bmp4)
image4, image5 = self.evaluate([image4, image5])
self.assertAllEqual(image4, image5)
# Test that 3 channels is returned with user request of `channels=3`
# even though image has 1 channel (grayscale).
bmp6 = io_ops.read_file(os.path.join(base, "grayscale_small.bmp"))
bmp7 = io_ops.read_file(
os.path.join(base, "grayscale_small_3channels.bmp"))
image6 = image_ops.decode_bmp(bmp6, channels=3)
image7 = image_ops.decode_bmp(bmp7)
image6, image7 = self.evaluate([image6, image7])
self.assertAllEqual(image6, image7)
# Test that 4 channels is returned with user request of `channels=4`
# even though image has 1 channel (grayscale). Alpha channel should be
# set to UINT8_MAX.
bmp9 = io_ops.read_file(
os.path.join(base, "grayscale_small_4channels.bmp"))
image8 = image_ops.decode_bmp(bmp6, channels=4)
image9 = image_ops.decode_bmp(bmp9)
image8, image9 = self.evaluate([image8, image9])
self.assertAllEqual(image8, image9)
def testJpegUint16(self):
for horizon in self._FORWARD_COMPATIBILITY_HORIZONS:
with compat.forward_compatibility_horizon(*horizon):
with self.cached_session():
base = "tensorflow/core/lib/jpeg/testdata"
jpeg0 = io_ops.read_file(os.path.join(base, "jpeg_merge_test1.jpg"))
image0 = image_ops.decode_image(jpeg0, dtype=dtypes.uint16)
image1 = image_ops.convert_image_dtype(image_ops.decode_jpeg(jpeg0),
dtypes.uint16)
image0, image1 = self.evaluate([image0, image1])
self.assertAllEqual(image0, image1)
def testPngUint16(self):
for horizon in self._FORWARD_COMPATIBILITY_HORIZONS:
with compat.forward_compatibility_horizon(*horizon):
with self.cached_session():
base = "tensorflow/core/lib/png/testdata"
png0 = io_ops.read_file(os.path.join(base, "lena_rgba.png"))
image0 = image_ops.decode_image(png0, dtype=dtypes.uint16)
image1 = image_ops.convert_image_dtype(
image_ops.decode_png(png0, dtype=dtypes.uint16), dtypes.uint16)
image0, image1 = self.evaluate([image0, image1])
self.assertAllEqual(image0, image1)
# NumPy conversions should happen before
x = np.random.randint(256, size=(4, 4, 3), dtype=np.uint16)
x_str = image_ops_impl.encode_png(x)
x_dec = image_ops_impl.decode_image(
x_str, channels=3, dtype=dtypes.uint16)
self.assertAllEqual(x, x_dec)
def testGifUint16(self):
for horizon in self._FORWARD_COMPATIBILITY_HORIZONS:
with compat.forward_compatibility_horizon(*horizon):
with self.cached_session():
base = "tensorflow/core/lib/gif/testdata"
gif0 = io_ops.read_file(os.path.join(base, "scan.gif"))
image0 = image_ops.decode_image(gif0, dtype=dtypes.uint16)
image1 = image_ops.convert_image_dtype(image_ops.decode_gif(gif0),
dtypes.uint16)
image0, image1 = self.evaluate([image0, image1])
self.assertAllEqual(image0, image1)
def testBmpUint16(self):
for horizon in self._FORWARD_COMPATIBILITY_HORIZONS:
with compat.forward_compatibility_horizon(*horizon):
with self.cached_session():
base = "tensorflow/core/lib/bmp/testdata"
bmp0 = io_ops.read_file(os.path.join(base, "lena.bmp"))
image0 = image_ops.decode_image(bmp0, dtype=dtypes.uint16)
image1 = image_ops.convert_image_dtype(image_ops.decode_bmp(bmp0),
dtypes.uint16)
image0, image1 = self.evaluate([image0, image1])
self.assertAllEqual(image0, image1)
def testJpegFloat32(self):
for horizon in self._FORWARD_COMPATIBILITY_HORIZONS:
with compat.forward_compatibility_horizon(*horizon):
with self.cached_session():
base = "tensorflow/core/lib/jpeg/testdata"
jpeg0 = io_ops.read_file(os.path.join(base, "jpeg_merge_test1.jpg"))
image0 = image_ops.decode_image(jpeg0, dtype=dtypes.float32)
image1 = image_ops.convert_image_dtype(image_ops.decode_jpeg(jpeg0),
dtypes.float32)
image0, image1 = self.evaluate([image0, image1])
self.assertAllEqual(image0, image1)
def testPngFloat32(self):
for horizon in self._FORWARD_COMPATIBILITY_HORIZONS:
with compat.forward_compatibility_horizon(*horizon):
with self.cached_session():
base = "tensorflow/core/lib/png/testdata"
png0 = io_ops.read_file(os.path.join(base, "lena_rgba.png"))
image0 = image_ops.decode_image(png0, dtype=dtypes.float32)
image1 = image_ops.convert_image_dtype(
image_ops.decode_png(png0, dtype=dtypes.uint16), dtypes.float32)
image0, image1 = self.evaluate([image0, image1])
self.assertAllEqual(image0, image1)
def testGifFloat32(self):
for horizon in self._FORWARD_COMPATIBILITY_HORIZONS:
with compat.forward_compatibility_horizon(*horizon):
with self.cached_session():
base = "tensorflow/core/lib/gif/testdata"
gif0 = io_ops.read_file(os.path.join(base, "scan.gif"))
image0 = image_ops.decode_image(gif0, dtype=dtypes.float32)
image1 = image_ops.convert_image_dtype(image_ops.decode_gif(gif0),
dtypes.float32)
image0, image1 = self.evaluate([image0, image1])
self.assertAllEqual(image0, image1)
def testBmpFloat32(self):
for horizon in self._FORWARD_COMPATIBILITY_HORIZONS:
with compat.forward_compatibility_horizon(*horizon):
with self.cached_session():
base = "tensorflow/core/lib/bmp/testdata"
bmp0 = io_ops.read_file(os.path.join(base, "lena.bmp"))
image0 = image_ops.decode_image(bmp0, dtype=dtypes.float32)
image1 = image_ops.convert_image_dtype(image_ops.decode_bmp(bmp0),
dtypes.float32)
image0, image1 = self.evaluate([image0, image1])
self.assertAllEqual(image0, image1)
def testExpandAnimations(self):
for horizon in self._FORWARD_COMPATIBILITY_HORIZONS:
with compat.forward_compatibility_horizon(*horizon):
with self.cached_session():
base = "tensorflow/core/lib/gif/testdata"
gif0 = io_ops.read_file(os.path.join(base, "scan.gif"))
# Test `expand_animations=False` case.
image0 = image_ops.decode_image(
gif0, dtype=dtypes.float32, expand_animations=False)
# image_ops.decode_png() handles GIFs and returns 3D tensors
animation = image_ops.decode_gif(gif0)
first_frame = array_ops.gather(animation, 0)
image1 = image_ops.convert_image_dtype(first_frame, dtypes.float32)
image0, image1 = self.evaluate([image0, image1])
self.assertLen(image0.shape, 3)
self.assertAllEqual(list(image0.shape), [40, 20, 3])
self.assertAllEqual(image0, image1)
# Test `expand_animations=True` case.
image2 = image_ops.decode_image(gif0, dtype=dtypes.float32)
image3 = image_ops.convert_image_dtype(animation, dtypes.float32)
image2, image3 = self.evaluate([image2, image3])
self.assertLen(image2.shape, 4)
self.assertAllEqual(list(image2.shape), [12, 40, 20, 3])
self.assertAllEqual(image2, image3)
def testImageCropAndResize(self):
if test_util.is_gpu_available():
op = image_ops_impl.crop_and_resize_v2(
image=array_ops.zeros((2, 1, 1, 1)),
boxes=[[1.0e+40, 0, 0, 0]],
box_indices=[1],
crop_size=[1, 1])
self.evaluate(op)
else:
message = "Boxes contains at least one element that is not finite"
with self.assertRaisesRegex((errors.InvalidArgumentError, ValueError),
message):
op = image_ops_impl.crop_and_resize_v2(
image=array_ops.zeros((2, 1, 1, 1)),
boxes=[[1.0e+40, 0, 0, 0]],
box_indices=[1],
crop_size=[1, 1])
self.evaluate(op)
def testImageCropAndResizeWithInvalidInput(self):
with self.session():
with self.assertRaises((errors.InvalidArgumentError, ValueError)):
op = image_ops_impl.crop_and_resize_v2(
image=np.ones((1, 1, 1, 1)),
boxes=np.ones((11, 4)),
box_indices=np.ones((11)),
crop_size=[2065374891, 1145309325])
self.evaluate(op)
def testImageCropAndResizeWithNon1DBoxes(self):
with self.assertRaisesRegex((errors.InvalidArgumentError, ValueError),
"must be rank 1"):
op = image_ops_impl.crop_and_resize_v2(
image=np.ones((2, 2, 2, 2)),
boxes=np.ones((0, 4)),
box_indices=np.ones((0, 1)),
crop_size=[1, 1])
self.evaluate(op)
@parameterized.named_parameters(
("_jpeg", "JPEG", "jpeg_merge_test1.jpg"),
("_png", "PNG", "lena_rgba.png"),
("_gif", "GIF", "scan.gif"),
)
def testWrongOpBmp(self, img_format, filename):
base_folder = "tensorflow/core/lib"
base_path = os.path.join(base_folder, img_format.lower(), "testdata")
err_msg = "Trying to decode " + img_format + " format using DecodeBmp op"
with self.assertRaisesRegex(
(ValueError, errors.InvalidArgumentError), err_msg):
img_bytes = io_ops.read_file(os.path.join(base_path, filename))
img = image_ops.decode_bmp(img_bytes)
self.evaluate(img)
@parameterized.named_parameters(
("_jpeg", image_ops.decode_jpeg, "DecodeJpeg"),
("_png", image_ops.decode_png, "DecodePng"),
("_gif", image_ops.decode_gif, "DecodeGif"),
)
def testWrongOp(self, decode_op, op_used):
base = "tensorflow/core/lib/bmp/testdata"
bmp0 = io_ops.read_file(os.path.join(base, "rgba_small.bmp"))
err_msg = ("Trying to decode BMP format using a wrong op. Use `decode_bmp` "
"or `decode_image` instead. Op used: ") + op_used
with self.assertRaisesRegex(
(ValueError, errors.InvalidArgumentError), err_msg):
img = decode_op(bmp0)
self.evaluate(img)
@parameterized.named_parameters(
("_png", "PNG", "lena_rgba.png"),
("_gif", "GIF", "scan.gif"),
("_bmp", "BMP", "rgba_small.bmp"),
)
def testWrongOpJpeg(self, img_format, filename):
base_folder = "tensorflow/core/lib"
base_path = os.path.join(base_folder, img_format.lower(), "testdata")
err_msg = ("DecodeAndCropJpeg operation can run on JPEG only, but "
"detected ") + img_format
with self.assertRaisesRegex(
(ValueError, errors.InvalidArgumentError), err_msg):
img_bytes = io_ops.read_file(os.path.join(base_path, filename))
img = image_ops.decode_and_crop_jpeg(img_bytes, [1, 1, 2, 2])
self.evaluate(img)
def testGifFramesWithDiffSize(self):
"""Test decoding an animated GIF.
This test verifies that `decode_image` op can decode animated GIFs whose
first frame does not fill the canvas. The unoccupied areas should be filled
with zeros (black).
`squares.gif` is animated with two images of different sizes. It
alternates between a smaller image of size 10 x 10 and a larger image of
size 16 x 16. Because it starts animating with the smaller image, the first
frame does not fill the canvas. (Canvas size is equal to max frame width x
max frame height.)
`red_black.gif` has just a single image in a GIF format. It is the same
image as the smaller image (size 10 x 10) of the two images in
`squares.gif`. The only difference is that its background (canvas - smaller
image) is pre-filled with zeros (black); it is the groundtruth.
"""
base = "tensorflow/core/lib/gif/testdata"
gif_bytes0 = io_ops.read_file(os.path.join(base, "squares.gif"))
image0 = image_ops.decode_image(gif_bytes0, dtype=dtypes.float32,
expand_animations=False)
gif_bytes1 = io_ops.read_file(os.path.join(base, "red_black.gif"))
image1 = image_ops.decode_image(gif_bytes1, dtype=dtypes.float32)
image1_0 = array_ops.gather(image1, 0)
image0, image1_0 = self.evaluate([image0, image1_0])
self.assertAllEqual(image0, image1_0)
if __name__ == "__main__":
googletest.main()
| {
"content_hash": "fd9c442da543fde452d167307ae1f7dc",
"timestamp": "",
"source": "github",
"line_count": 6405,
"max_line_length": 80,
"avg_line_length": 39.546604215456675,
"alnum_prop": 0.6046167329922304,
"repo_name": "tensorflow/tensorflow-experimental_link_static_libraries_once",
"id": "e229dd6525334d3dcf4c22cfa25298605da981c8",
"size": "253985",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "tensorflow/python/ops/image_ops_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "36962"
},
{
"name": "C",
"bytes": "1343737"
},
{
"name": "C#",
"bytes": "13584"
},
{
"name": "C++",
"bytes": "123969891"
},
{
"name": "CMake",
"bytes": "182027"
},
{
"name": "Cython",
"bytes": "5003"
},
{
"name": "Dockerfile",
"bytes": "416070"
},
{
"name": "Go",
"bytes": "2095490"
},
{
"name": "HTML",
"bytes": "4686483"
},
{
"name": "Java",
"bytes": "1074471"
},
{
"name": "Jupyter Notebook",
"bytes": "789401"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "MLIR",
"bytes": "11067751"
},
{
"name": "Makefile",
"bytes": "2760"
},
{
"name": "Objective-C",
"bytes": "169288"
},
{
"name": "Objective-C++",
"bytes": "294177"
},
{
"name": "Pawn",
"bytes": "5552"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "Python",
"bytes": "42585406"
},
{
"name": "Roff",
"bytes": "5034"
},
{
"name": "Ruby",
"bytes": "9199"
},
{
"name": "Shell",
"bytes": "620507"
},
{
"name": "Smarty",
"bytes": "89545"
},
{
"name": "SourcePawn",
"bytes": "14577"
},
{
"name": "Starlark",
"bytes": "7486225"
},
{
"name": "Swift",
"bytes": "78435"
},
{
"name": "Vim Snippet",
"bytes": "58"
}
],
"symlink_target": ""
} |
"""
"""
import json
import pdb
from policy_util import PolicyError, HostStruct, HostAddr, PolicyAppConnectionMgr, is_ipv6_enabled
"""
Entity implementing the business logic of user connection/access policy.
"""
#
#
class PolicyKeys(object):
"""
String constants
"""
# Common key words
KW_IGNORED_NAME = "name"
KW_IGNORED_IDENTITY = "identity"
KW_IGNORED_TYPE = "type"
KW_VHOST_NAME = "id"
# Policy ruleset key words
KW_MAXCONN = "maxConnections"
KW_MAXCONNPERHOST = "maxConnectionsPerHost"
KW_MAXCONNPERUSER = "maxConnectionsPerUser"
KW_CONNECTION_ALLOW_DEFAULT = "allowUnknownUser"
KW_GROUPS = "groups"
# Policy settings key words
KW_USERS = "users"
KW_REMOTE_HOSTS = "remoteHosts"
KW_MAX_FRAME_SIZE = "maxFrameSize"
KW_MAX_MESSAGE_SIZE = "maxMessageSize"
KW_MAX_SESSION_WINDOW = "maxSessionWindow"
KW_MAX_SESSIONS = "maxSessions"
KW_MAX_SENDERS = "maxSenders"
KW_MAX_RECEIVERS = "maxReceivers"
KW_ALLOW_DYNAMIC_SRC = "allowDynamicSource"
KW_ALLOW_ANONYMOUS_SENDER = "allowAnonymousSender"
KW_ALLOW_USERID_PROXY = "allowUserIdProxy"
KW_SOURCES = "sources"
KW_TARGETS = "targets"
# Policy stats key words
KW_CONNECTIONS_APPROVED = "connectionsApproved"
KW_CONNECTIONS_DENIED = "connectionsDenied"
KW_CONNECTIONS_CURRENT = "connectionsCurrent"
KW_PER_USER_STATE = "perUserState"
KW_PER_HOST_STATE = "perHostState"
# What settings does a user get when allowed to connect but
# not restricted by a user group?
KW_DEFAULT_SETTINGS = "$default"
# Config file separator character for two IP addresses in a range
KC_CONFIG_IP_SEP = "-"
# Config file separator character for names in a list
KC_CONFIG_LIST_SEP = ","
# user-to-group computed map in compiled ruleset
RULESET_U2G_MAP = "U2G"
# policy stats controlled by C code but referenced by settings
KW_CSTATS = "denialCounts"
#
#
class PolicyCompiler(object):
"""
Validate incoming configuration for legal schema.
- Warn about section options that go unused.
- Disallow negative max connection numbers.
- Check that connectionOrigins resolve to IP hosts.
- Enforce internal consistency,
"""
allowed_ruleset_options = [
PolicyKeys.KW_IGNORED_NAME,
PolicyKeys.KW_IGNORED_IDENTITY,
PolicyKeys.KW_IGNORED_TYPE,
PolicyKeys.KW_VHOST_NAME,
PolicyKeys.KW_MAXCONN,
PolicyKeys.KW_MAXCONNPERHOST,
PolicyKeys.KW_MAXCONNPERUSER,
PolicyKeys.KW_CONNECTION_ALLOW_DEFAULT,
PolicyKeys.KW_GROUPS
]
allowed_settings_options = [
PolicyKeys.KW_USERS,
PolicyKeys.KW_REMOTE_HOSTS,
PolicyKeys.KW_MAX_FRAME_SIZE,
PolicyKeys.KW_MAX_MESSAGE_SIZE,
PolicyKeys.KW_MAX_SESSION_WINDOW,
PolicyKeys.KW_MAX_SESSIONS,
PolicyKeys.KW_MAX_SENDERS,
PolicyKeys.KW_MAX_RECEIVERS,
PolicyKeys.KW_ALLOW_DYNAMIC_SRC,
PolicyKeys.KW_ALLOW_ANONYMOUS_SENDER,
PolicyKeys.KW_ALLOW_USERID_PROXY,
PolicyKeys.KW_SOURCES,
PolicyKeys.KW_TARGETS
]
def __init__(self):
"""
Create a validator
"""
pass
def validateNumber(self, val, v_min, v_max, errors):
"""
Range check a numeric int policy value
@param[in] val policy value to check
@param[in] v_min minumum value
@param[in] v_max maximum value. zero disables check
@param[out] errors failure message
@return v_min <= val <= v_max
"""
try:
v_int = int(val)
except Exception, e:
errors.append("Value '%s' does not resolve to an integer." % val)
return False
if v_int < v_min:
errors.append("Value '%s' is below minimum '%s'." % (val, v_min))
return False
if v_max > 0 and v_int > v_max:
errors.append("Value '%s' is above maximum '%s'." % (val, v_max))
return False
return True
def compile_connection_group(self, vhostname, groupname, val, list_out, warnings, errors):
"""
Handle an ingressHostGroups submap.
Each origin value is verified. On a successful run the submap
is replaced parsed lists of HostAddr objects.
@param[in] vhostname vhost name
@param[in] groupname vhost/group name
@param[in] val origin list as text string
@param[out] list_out user inputs replaced with HostAddr objects
@param[out] warnings nonfatal irregularities observed
@param[out] errors descriptions of failure
@return - origins is usable. If True then warnings[] may contain useful
information about fields that are ignored. If False then
warnings[] may contain info and errors[0] will hold the
description of why the origin was rejected.
"""
key = PolicyKeys.KW_REMOTE_HOSTS
# convert val string to list of host specs
if type(val) is str:
# 'abc, def, mytarget'
val = [x.strip(' ') for x in val.split(PolicyKeys.KC_CONFIG_LIST_SEP)]
elif type(val) is list:
# ['abc', 'def', 'mytarget']
pass
elif type(val) is unicode:
# u'abc, def, mytarget'
val = [x.strip(' ') for x in str(val).split(PolicyKeys.KC_CONFIG_LIST_SEP)]
else:
errors.append(
"Policy vhost '%s' user group '%s' option '%s' has illegal value '%s'. Type must be 'str' or 'list' but is '%s;" %
(vhostname, groupname, key, val, type(val)))
return False
for coname in val:
try:
coha = HostAddr(coname, PolicyKeys.KC_CONFIG_IP_SEP)
list_out.append(coha)
except Exception, e:
errors.append("Policy vhost '%s' user group '%s' option '%s' connectionOption '%s' failed to translate: '%s'." %
(vhostname, groupname, key, coname, e))
return False
return True
def compile_app_settings(self, vhostname, usergroup, policy_in, policy_out, warnings, errors):
"""
Compile a schema from processed json format to local internal format.
@param[in] name vhost name
@param[in] policy_in user config settings
@param[out] policy_out validated Internal format
@param[out] warnings nonfatal irregularities observed
@param[out] errors descriptions of failure
@return - settings are usable. If True then warnings[] may contain useful
information about fields that are ignored. If False then
warnings[] may contain info and errors[0] will hold the
description of why the policy was rejected.
"""
# rulesets may not come through standard config so make nice defaults
policy_out[PolicyKeys.KW_USERS] = ''
policy_out[PolicyKeys.KW_REMOTE_HOSTS] = ''
policy_out[PolicyKeys.KW_MAX_FRAME_SIZE] = 2147483647
policy_out[PolicyKeys.KW_MAX_MESSAGE_SIZE] = 0
policy_out[PolicyKeys.KW_MAX_SESSION_WINDOW] = 2147483647
policy_out[PolicyKeys.KW_MAX_SESSIONS] = 65536
policy_out[PolicyKeys.KW_MAX_SENDERS] = 2147483647
policy_out[PolicyKeys.KW_MAX_RECEIVERS] = 2147483647
policy_out[PolicyKeys.KW_ALLOW_DYNAMIC_SRC] = False
policy_out[PolicyKeys.KW_ALLOW_ANONYMOUS_SENDER] = False
policy_out[PolicyKeys.KW_ALLOW_USERID_PROXY] = False
policy_out[PolicyKeys.KW_SOURCES] = ''
policy_out[PolicyKeys.KW_TARGETS] = ''
cerror = []
for key, val in policy_in.iteritems():
if key not in self.allowed_settings_options:
warnings.append("Policy vhost '%s' user group '%s' option '%s' is ignored." %
(vhostname, usergroup, key))
if key in [PolicyKeys.KW_MAX_FRAME_SIZE,
PolicyKeys.KW_MAX_MESSAGE_SIZE,
PolicyKeys.KW_MAX_RECEIVERS,
PolicyKeys.KW_MAX_SENDERS,
PolicyKeys.KW_MAX_SESSION_WINDOW,
PolicyKeys.KW_MAX_SESSIONS
]:
if not self.validateNumber(val, 0, 0, cerror):
errors.append("Policy vhost '%s' user group '%s' option '%s' has error '%s'." %
(vhostname, usergroup, key, cerror[0]))
return False
policy_out[key] = val
elif key == PolicyKeys.KW_REMOTE_HOSTS:
# Conection groups are lists of IP addresses that need to be
# converted into binary structures for comparisons.
val_out = []
if not self.compile_connection_group(vhostname, usergroup, val, val_out, warnings, errors):
return False
policy_out[key] = val_out
elif key in [PolicyKeys.KW_ALLOW_ANONYMOUS_SENDER,
PolicyKeys.KW_ALLOW_DYNAMIC_SRC,
PolicyKeys.KW_ALLOW_USERID_PROXY
]:
if not type(val) is bool:
errors.append("Policy vhost '%s' user group '%s' option '%s' has illegal boolean value '%s'." %
(vhostname, usergroup, key, val))
return False
policy_out[key] = val
elif key in [PolicyKeys.KW_USERS,
PolicyKeys.KW_SOURCES,
PolicyKeys.KW_TARGETS
]:
# accept a string or list
if type(val) is str:
# 'abc, def, mytarget'
val = [x.strip(' ') for x in val.split(PolicyKeys.KC_CONFIG_LIST_SEP)]
elif type(val) is list:
# ['abc', 'def', 'mytarget']
pass
elif type(val) is unicode:
# u'abc, def, mytarget'
val = [x.strip(' ') for x in str(val).split(PolicyKeys.KC_CONFIG_LIST_SEP)]
else:
errors.append("Policy vhost '%s' user group '%s' option '%s' has illegal value '%s'. Type must be 'str' or 'list' but is '%s;" %
(vhostname, usergroup, key, val, type(val)))
# deduplicate address lists
val = list(set(val))
# output result is CSV string with no white space between values: 'abc,def,mytarget'
policy_out[key] = ','.join(val)
return True
def compile_access_ruleset(self, name, policy_in, policy_out, warnings, errors):
"""
Compile a schema from processed json format to local internal format.
@param[in] name vhost name
@param[in] policy_in raw policy to be validated
@param[out] policy_out validated Internal format
@param[out] warnings nonfatal irregularities observed
@param[out] errors descriptions of failure
@return - policy is usable. If True then warnings[] may contain useful
information about fields that are ignored. If False then
warnings[] may contain info and errors[0] will hold the
description of why the policy was rejected.
"""
cerror = []
# rulesets may not come through standard config so make nice defaults
policy_out[PolicyKeys.KW_MAXCONN] = 65535
policy_out[PolicyKeys.KW_MAXCONNPERHOST] = 65535
policy_out[PolicyKeys.KW_MAXCONNPERUSER] = 65535
policy_out[PolicyKeys.KW_CONNECTION_ALLOW_DEFAULT] = False
policy_out[PolicyKeys.KW_GROUPS] = {}
# validate the options
for key, val in policy_in.iteritems():
if key not in self.allowed_ruleset_options:
warnings.append("Policy vhost '%s' option '%s' is ignored." %
(name, key))
if key in [PolicyKeys.KW_MAXCONN,
PolicyKeys.KW_MAXCONNPERHOST,
PolicyKeys.KW_MAXCONNPERUSER
]:
if not self.validateNumber(val, 0, 65535, cerror):
msg = ("Policy vhost '%s' option '%s' has error '%s'." %
(name, key, cerror[0]))
errors.append(msg)
return False
policy_out[key] = val
elif key in [PolicyKeys.KW_CONNECTION_ALLOW_DEFAULT]:
if not type(val) is bool:
errors.append("Policy vhost '%s' option '%s' must be of type 'bool' but is '%s'" %
(name, key, type(val)))
return False
policy_out[key] = val
elif key in [PolicyKeys.KW_GROUPS]:
if not type(val) is dict:
errors.append("Policy vhost '%s' option '%s' must be of type 'dict' but is '%s'" %
(name, key, type(val)))
return False
for skey, sval in val.iteritems():
newsettings = {}
if not self.compile_app_settings(name, skey, sval, newsettings, warnings, errors):
return False
policy_out[key][skey] = {}
policy_out[key][skey].update(newsettings)
# Verify that each user is in only one group.
# Create user-to-group map for looking up user's group
policy_out[PolicyKeys.RULESET_U2G_MAP] = {}
if PolicyKeys.KW_GROUPS in policy_out:
for group, groupsettings in policy_out[PolicyKeys.KW_GROUPS].iteritems():
if PolicyKeys.KW_USERS in groupsettings:
users = [x.strip(' ') for x in groupsettings[PolicyKeys.KW_USERS].split(PolicyKeys.KC_CONFIG_LIST_SEP)]
for user in users:
if user in policy_out[PolicyKeys.RULESET_U2G_MAP]:
errors.append("Policy vhost '%s' user '%s' is in multiple user groups '%s' and '%s'" %
(name, user, policy_out[PolicyKeys.RULESET_U2G_MAP][user], group))
return False
else:
policy_out[PolicyKeys.RULESET_U2G_MAP][user] = group
else:
warnings.append("Policy vhost '%s' user group '%s' has no defined users. This policy has no effect" % (name, group))
# Default connections require a default settings
if policy_out[PolicyKeys.KW_CONNECTION_ALLOW_DEFAULT]:
if not PolicyKeys.KW_DEFAULT_SETTINGS in policy_out[PolicyKeys.KW_GROUPS]:
errors.append("Policy vhost '%s' allows connections by default but default settings are not defined" %
(name))
return False
return True
#
#
class AppStats(object):
"""
Maintain live state and statistics for an vhost.
"""
def __init__(self, id, manager, ruleset):
self.my_id = id
self._manager = manager
self.conn_mgr = PolicyAppConnectionMgr(
ruleset[PolicyKeys.KW_MAXCONN],
ruleset[PolicyKeys.KW_MAXCONNPERUSER],
ruleset[PolicyKeys.KW_MAXCONNPERHOST])
self._cstats = self._manager.get_agent().qd.qd_dispatch_policy_c_counts_alloc()
self._manager.get_agent().add_implementation(self, "vhostStats")
def update_ruleset(self, ruleset):
"""
The parent ruleset has changed.
Propagate settings into the connection manager.
@param ruleset: new ruleset
@return:
"""
self.conn_mgr.update(
ruleset[PolicyKeys.KW_MAXCONN],
ruleset[PolicyKeys.KW_MAXCONNPERHOST],
ruleset[PolicyKeys.KW_MAXCONNPERUSER])
def refresh_entity(self, attributes):
"""Refresh management attributes"""
entitymap = {}
entitymap[PolicyKeys.KW_VHOST_NAME] = self.my_id
entitymap[PolicyKeys.KW_CONNECTIONS_APPROVED] = self.conn_mgr.connections_approved
entitymap[PolicyKeys.KW_CONNECTIONS_DENIED] = self.conn_mgr.connections_denied
entitymap[PolicyKeys.KW_CONNECTIONS_CURRENT] = self.conn_mgr.connections_active
entitymap[PolicyKeys.KW_PER_USER_STATE] = self.conn_mgr.per_user_state
entitymap[PolicyKeys.KW_PER_HOST_STATE] = self.conn_mgr.per_host_state
self._manager.get_agent().qd.qd_dispatch_policy_c_counts_refresh(self._cstats, entitymap)
attributes.update(entitymap)
def can_connect(self, conn_id, user, host, diags):
return self.conn_mgr.can_connect(conn_id, user, host, diags)
def disconnect(self, conn_id, user, host):
self.conn_mgr.disconnect(conn_id, user, host)
def count_other_denial(self):
self.conn_mgr.count_other_denial()
def get_cstats(self):
return self._cstats
#
#
class ConnectionFacts:
def __init__(self, user, host, app, conn_name):
self.user = user
self.host = host
self.app = app
self.conn_name = conn_name
#
#
class PolicyLocal(object):
"""
The local policy database.
"""
def __init__(self, manager):
"""
Create instance
@params manager policy manager class
"""
# manager is a class
# It provides access the dispatch system functions
self._manager = manager
# rulesetdb is a map
# key : vhost name
# val : ruleset for this app
# created by configuration
# augmented by policy compiler
self.rulesetdb = {}
# settingsdb is a map
# key : <vhost name>
# val : a map
# key : <user group name>
# val : settings to use for user's connection
# created by configuration
self.settingsdb = {}
# statsdb is a map
# key : <vhost name>
# val : AppStats object
self.statsdb = {}
# _policy_compiler is a function
# validates incoming policy and readies it for internal use
self._policy_compiler = PolicyCompiler()
# _connections is a map
# key : numeric connection id
# val : ConnectionFacts
# Entries created as connection AMQP Opens arrive
# Entries destroyed as sockets closed
self._connections = {}
# _default_vhost is a string
# holds the name of the vhost to use when the
# open.hostname is not found in the rulesetdb
self._default_vhost = ""
#
# Service interfaces
#
def create_ruleset(self, attributes):
"""
Create or update named policy ruleset.
@param[in] attributes: from config
"""
warnings = []
diag = []
candidate = {}
name = attributes[PolicyKeys.KW_VHOST_NAME]
result = self._policy_compiler.compile_access_ruleset(name, attributes, candidate, warnings, diag)
if not result:
raise PolicyError("Policy '%s' is invalid: %s" % (name, diag[0]))
if len(warnings) > 0:
for warning in warnings:
self._manager.log_warning(warning)
if name not in self.rulesetdb:
if name not in self.statsdb:
self.statsdb[name] = AppStats(name, self._manager, candidate)
self._manager.log_info("Created policy rules for vhost %s" % name)
else:
self.statsdb[name].update_ruleset(candidate)
self._manager.log_info("Updated policy rules for vhost %s" % name)
# TODO: ruleset lock
self.rulesetdb[name] = {}
self.rulesetdb[name].update(candidate)
def policy_delete(self, name):
"""
Delete named policy
@param[in] name vhost name
"""
if name not in self.rulesetdb:
raise PolicyError("Policy '%s' does not exist" % name)
# TODO: ruleset lock
del self.rulesetdb[name]
#
# db enumerator
#
def policy_db_get_names(self):
"""
Return a list of vhost names in this policy
"""
return self.rulesetdb.keys()
def set_default_vhost(self, name):
"""
Set the default vhost name.
@param name: the name of the default vhost
@return: none
"""
self._default_vhost = name
self._manager.log_info("Policy fallback defaultVhost is defined: '%s'" % name)
def default_vhost_enabled(self):
"""
The default vhost is enabled if the name is not blank and
the vhost is defined in rulesetdb.
@return:
"""
return not self._default_vhost == "" and self._default_vhost in self.rulesetdb
#
# Runtime query interface
#
def lookup_user(self, user, rhost, vhost_in, conn_name, conn_id):
"""
Lookup function called from C.
Determine if a user on host accessing vhost through AMQP Open is allowed
according to the policy access rules.
If allowed then return the policy vhost settings name. If stats.can_connect
returns true then it has registered and counted the connection.
@param[in] user connection authId
@param[in] rhost connection remote host numeric IP address as string
@param[in] vhost_in vhost user is accessing
@param[in] conn_name connection name used for tracking reports
@param[in] conn_id internal connection id
@return settings user-group name if allowed; "" if not allowed
"""
try:
# choose rule set based on incoming vhost or default vhost
vhost = vhost_in
if vhost_in not in self.rulesetdb:
if self.default_vhost_enabled():
vhost = self._default_vhost
else:
self._manager.log_info(
"DENY AMQP Open for user '%s', rhost '%s', vhost '%s': "
"No policy defined for vhost" % (user, rhost, vhost))
return ""
ruleset = self.rulesetdb[vhost]
# look up the stats
if vhost not in self.statsdb:
msg = (
"DENY AMQP Open for user '%s', rhost '%s', vhost '%s': "
"INTERNAL: Policy is defined but stats are missing" % (user, rhost, vhost))
raise PolicyError(msg)
stats = self.statsdb[vhost]
# Get settings for user in a user group or in default
if user in ruleset[PolicyKeys.RULESET_U2G_MAP]:
usergroup = ruleset[PolicyKeys.RULESET_U2G_MAP][user]
elif "*" in ruleset[PolicyKeys.RULESET_U2G_MAP]:
usergroup = ruleset[PolicyKeys.RULESET_U2G_MAP]["*"]
else:
if ruleset[PolicyKeys.KW_CONNECTION_ALLOW_DEFAULT]:
usergroup = PolicyKeys.KW_DEFAULT_SETTINGS
else:
self._manager.log_info(
"DENY AMQP Open for user '%s', rhost '%s', vhost '%s': "
"User is not in a user group and unknown users are denied" % (user, rhost, vhost))
stats.count_other_denial()
return ""
groupsettings = ruleset[PolicyKeys.KW_GROUPS][usergroup]
# User in usergroup allowed to connect from rhost?
allowed = False
if PolicyKeys.KW_REMOTE_HOSTS in groupsettings:
# Users are restricted to connecting from a rhost
# defined by the group's remoteHost list
cglist = groupsettings[PolicyKeys.KW_REMOTE_HOSTS]
uhs = HostStruct(rhost)
for cohost in cglist:
if cohost.match_bin(uhs):
allowed = True
break
if not allowed:
self._manager.log_info(
"DENY AMQP Open for user '%s', rhost '%s', vhost '%s': "
"User is not allowed to connect from this network host" % (user, rhost, vhost))
stats.count_other_denial()
return ""
# This user passes administrative approval.
# Now check live connection counts
diags = []
if not stats.can_connect(conn_name, user, rhost, diags):
for diag in diags:
self._manager.log_info(
"DENY AMQP Open for user '%s', rhost '%s', vhost '%s': "
"%s" % (user, rhost, vhost, diag))
return ""
# Record facts about this connection to use during teardown
facts = ConnectionFacts(user, rhost, vhost, conn_name)
self._connections[conn_id] = facts
# Return success
return usergroup
except Exception, e:
self._manager.log_info(
"DENY AMQP Open lookup_user failed for user '%s', rhost '%s', vhost '%s': "
"Internal error: %s" % (user, rhost, vhost, e))
# return failure
return ""
def lookup_settings(self, vhost_in, groupname, upolicy):
"""
Given a settings name, return the aggregated policy blob.
@param[in] vhost_in: vhost user is accessing
@param[in] groupname: user group name
@param[out] upolicy: dict holding policy values - the settings blob
TODO: make this a c struct
@return if lookup worked
# Note: the upolicy output is a non-nested dict with settings of interest
"""
try:
vhost = vhost_in
if vhost not in self.rulesetdb:
if self.default_vhost_enabled():
vhost = self._default_vhost
if vhost not in self.rulesetdb:
self._manager.log_info(
"lookup_settings fail for vhost '%s', user group '%s': "
"No policy defined for this vhost" % (vhost, groupname))
return False
ruleset = self.rulesetdb[vhost]
if groupname not in ruleset[PolicyKeys.KW_GROUPS]:
self._manager.log_trace(
"lookup_settings fail for vhost '%s', user group '%s': "
"This vhost has no settings for the user group" % (vhost, groupname))
return False
upolicy.update(ruleset[PolicyKeys.KW_GROUPS][groupname])
upolicy[PolicyKeys.KW_CSTATS] = self.statsdb[vhost].get_cstats()
return True
except Exception, e:
return False
def close_connection(self, conn_id):
"""
Close the connection.
@param conn_id:
@return:
"""
try:
if conn_id in self._connections:
facts = self._connections[conn_id]
stats = self.statsdb[facts.app]
stats.disconnect(facts.conn_name, facts.user, facts.host)
del self._connections[conn_id]
except Exception, e:
self._manager.log_trace(
"Policy internal error closing connection id %s. %s" % (conn_id, str(e)))
#
#
def test_load_config(self):
"""
Test function to load a policy.
@return:
"""
ruleset_str = '["vhost", {"id": "photoserver", "maxConnections": 50, "maxConnectionsPerUser": 5, "maxConnectionsPerHost": 20, "allowUnknownUser": true,'
ruleset_str += '"groups": {'
ruleset_str += '"anonymous": { "users": "anonymous", "remoteHosts": "*", "maxFrameSize": 111111, "maxMessageSize": 111111, "maxSessionWindow": 111111, "maxSessions": 1, "maxSenders": 11, "maxReceivers": 11, "allowDynamicSource": false, "allowAnonymousSender": false, "sources": "public", "targets": "" },'
ruleset_str += '"users": { "users": "u1, u2", "remoteHosts": "*", "maxFrameSize": 222222, "maxMessageSize": 222222, "maxSessionWindow": 222222, "maxSessions": 2, "maxSenders": 22, "maxReceivers": 22, "allowDynamicSource": false, "allowAnonymousSender": false, "sources": "public, private", "targets": "public" },'
ruleset_str += '"paidsubscribers": { "users": "p1, p2", "remoteHosts": "*", "maxFrameSize": 333333, "maxMessageSize": 333333, "maxSessionWindow": 333333, "maxSessions": 3, "maxSenders": 33, "maxReceivers": 33, "allowDynamicSource": true, "allowAnonymousSender": false, "sources": "public, private", "targets": "public, private" },'
ruleset_str += '"test": { "users": "zeke, ynot", "remoteHosts": "10.48.0.0-10.48.255.255, 192.168.100.0-192.168.100.255", "maxFrameSize": 444444, "maxMessageSize": 444444, "maxSessionWindow": 444444, "maxSessions": 4, "maxSenders": 44, "maxReceivers": 44, "allowDynamicSource": true, "allowAnonymousSender": true, "sources": "private", "targets": "private" },'
if is_ipv6_enabled():
ruleset_str += '"admin": { "users": "alice, bob", "remoteHosts": "10.48.0.0-10.48.255.255, 192.168.100.0-192.168.100.255, 10.18.0.0-10.18.255.255, 127.0.0.1, ::1", "maxFrameSize": 555555, "maxMessageSize": 555555, "maxSessionWindow": 555555, "maxSessions": 5, "maxSenders": 55, "maxReceivers": 55, "allowDynamicSource": true, "allowAnonymousSender": true, "sources": "public, private, management", "targets": "public, private, management" },'
ruleset_str += '"superuser": { "users": "ellen", "remoteHosts": "72.135.2.9, 127.0.0.1, ::1", "maxFrameSize": 666666, "maxMessageSize": 666666, "maxSessionWindow": 666666, "maxSessions": 6, "maxSenders": 66, "maxReceivers": 66, "allowDynamicSource": false, "allowAnonymousSender": false, "sources": "public, private, management, root", "targets": "public, private, management, root" },'
else:
ruleset_str += '"admin": { "users": "alice, bob", "remoteHosts": "10.48.0.0-10.48.255.255, 192.168.100.0-192.168.100.255, 10.18.0.0-10.18.255.255, 127.0.0.1", "maxFrameSize": 555555, "maxMessageSize": 555555, "maxSessionWindow": 555555, "maxSessions": 5, "maxSenders": 55, "maxReceivers": 55, "allowDynamicSource": true, "allowAnonymousSender": true, "sources": "public, private, management", "targets": "public, private, management" },'
ruleset_str += '"superuser": { "users": "ellen", "remoteHosts": "72.135.2.9, 127.0.0.1", "maxFrameSize": 666666, "maxMessageSize": 666666, "maxSessionWindow": 666666, "maxSessions": 6, "maxSenders": 66, "maxReceivers": 66, "allowDynamicSource": false, "allowAnonymousSender": false, "sources": "public, private, management, root", "targets": "public, private, management, root" },'
ruleset_str += '"$default": { "remoteHosts": "*", "maxFrameSize": 222222, "maxMessageSize": 222222, "maxSessionWindow": 222222, "maxSessions": 2, "maxSenders": 22, "maxReceivers": 22, "allowDynamicSource": false, "allowAnonymousSender": false, "sources": "public, private", "targets": "public" }'
ruleset_str += '}}]'
ruleset = json.loads(ruleset_str)
self.create_ruleset(ruleset[1])
| {
"content_hash": "e6abd2046b357c5293581730a5d5e719",
"timestamp": "",
"source": "github",
"line_count": 699,
"max_line_length": 464,
"avg_line_length": 45.43919885550787,
"alnum_prop": 0.5688873496631195,
"repo_name": "lulf/qpid-dispatch",
"id": "4f176e0ce55c3fc67e1eb000253464707b555a70",
"size": "32551",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "python/qpid_dispatch_internal/policy/policy_local.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "1237842"
},
{
"name": "C++",
"bytes": "33945"
},
{
"name": "CMake",
"bytes": "26159"
},
{
"name": "CSS",
"bytes": "151847"
},
{
"name": "HTML",
"bytes": "105127"
},
{
"name": "Java",
"bytes": "1940"
},
{
"name": "JavaScript",
"bytes": "1948607"
},
{
"name": "Objective-C",
"bytes": "8049"
},
{
"name": "Python",
"bytes": "935562"
},
{
"name": "Shell",
"bytes": "22812"
}
],
"symlink_target": ""
} |
from flask import Flask
from flask import render_template
app = Flask(__name__)
app.config['DEBUG'] = True
# Note: We don't need to call run() since our application is embedded within
# the App Engine WSGI application server.
@app.route('/')
def main():
return render_template('index.html', posts={})
@app.route('/controller/')
def controller():
return render_template('controller.html', posts={})
@app.errorhandler(404)
def page_not_found(e):
"""Return a custom 404 error."""
return 'Sorry, nothing at this URL.', 404
| {
"content_hash": "01d583100143ef0021af2753656ba31f",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 76,
"avg_line_length": 24.59090909090909,
"alnum_prop": 0.6913123844731978,
"repo_name": "ikeralbeniz/leafletcast",
"id": "e4538208b72cdb6279a898d0ad28e25aab61c799",
"size": "541",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "main.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "8715"
},
{
"name": "Python",
"bytes": "3600"
}
],
"symlink_target": ""
} |
import matplotlib.pyplot as plt
import numpy as np
angles = []
# row, prowtch, yaw
for row in range(36):
for pitch in range(36):
for yaw in range(36):
row *= 10; pitch *= 10; yaw *= 10
angles.append([row,pitch,yaw])
print angles[0]
plt.plot(angles[0,:],angles[1,:])
plt.show()
| {
"content_hash": "6fc9171b60c294c7d09f802ea69ad056",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 45,
"avg_line_length": 22.5,
"alnum_prop": 0.5968253968253968,
"repo_name": "nickvazz/Trick-Identifier",
"id": "cdf32cd55cd454aa2345d25c651bb8d2c8bc072a",
"size": "315",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rowPitchYaw.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "10977"
}
],
"symlink_target": ""
} |
import rope.base.codeanalyze
import rope.base.evaluate
import rope.base.builtins
import rope.base.oi.soi
import rope.base.pyscopes
from rope.base import (pynamesdef as pynames, exceptions, ast,
astutils, pyobjects, fscommands, arguments, utils)
from rope.base.pyobjects import *
class PyFunction(pyobjects.PyFunction):
def __init__(self, pycore, ast_node, parent):
AbstractFunction.__init__(self)
PyDefinedObject.__init__(self, pycore, ast_node, parent)
self.arguments = self.ast_node.args
self.parameter_pyobjects = pynames._Inferred(
self._infer_parameters, self.get_module()._get_concluded_data())
self.returned = pynames._Inferred(self._infer_returned)
self.parameter_pynames = None
def _create_structural_attributes(self):
return {}
def _create_concluded_attributes(self):
return {}
def _create_scope(self):
return rope.base.pyscopes.FunctionScope(self.pycore, self,
_FunctionVisitor)
def _infer_parameters(self):
pyobjects = rope.base.oi.soi.infer_parameter_objects(self)
self._handle_special_args(pyobjects)
return pyobjects
def _infer_returned(self, args=None):
return rope.base.oi.soi.infer_returned_object(self, args)
def _handle_special_args(self, pyobjects):
if len(pyobjects) == len(self.arguments.args):
if self.arguments.vararg:
pyobjects.append(rope.base.builtins.get_list())
if self.arguments.kwarg:
pyobjects.append(rope.base.builtins.get_dict())
def _set_parameter_pyobjects(self, pyobjects):
if pyobjects is not None:
self._handle_special_args(pyobjects)
self.parameter_pyobjects.set(pyobjects)
def get_parameters(self):
if self.parameter_pynames is None:
result = {}
for index, name in enumerate(self.get_param_names()):
# TODO: handle tuple parameters
result[name] = pynames.ParameterName(self, index)
self.parameter_pynames = result
return self.parameter_pynames
def get_parameter(self, index):
if index < len(self.parameter_pyobjects.get()):
return self.parameter_pyobjects.get()[index]
def get_returned_object(self, args):
return self.returned.get(args)
def get_name(self):
return self.get_ast().name
def get_param_names(self, special_args=True):
# TODO: handle tuple parameters
result = [node.id for node in self.arguments.args
if isinstance(node, ast.Name)]
if special_args:
if self.arguments.vararg:
result.append(self.arguments.vararg)
if self.arguments.kwarg:
result.append(self.arguments.kwarg)
return result
def get_kind(self):
"""Get function type
It returns one of 'function', 'method', 'staticmethod' or
'classmethod' strs.
"""
scope = self.parent.get_scope()
if isinstance(self.parent, PyClass):
for decorator in self.decorators:
pyname = rope.base.evaluate.eval_node(scope, decorator)
if pyname == rope.base.builtins.builtins['staticmethod']:
return 'staticmethod'
if pyname == rope.base.builtins.builtins['classmethod']:
return 'classmethod'
return 'method'
return 'function'
@property
def decorators(self):
try:
return getattr(self.ast_node, 'decorator_list')
except AttributeError:
return getattr(self.ast_node, 'decorators', None)
class PyClass(pyobjects.PyClass):
def __init__(self, pycore, ast_node, parent):
self.visitor_class = _ClassVisitor
AbstractClass.__init__(self)
PyDefinedObject.__init__(self, pycore, ast_node, parent)
self.parent = parent
self._superclasses = self.get_module()._get_concluded_data()
def get_superclasses(self):
if self._superclasses.get() is None:
self._superclasses.set(self._get_bases())
return self._superclasses.get()
def get_name(self):
return self.get_ast().name
def _create_concluded_attributes(self):
result = {}
for base in reversed(self.get_superclasses()):
result.update(base.get_attributes())
return result
def _get_bases(self):
result = []
for base_name in self.ast_node.bases:
base = rope.base.evaluate.eval_node(self.parent.get_scope(),
base_name)
if base is not None and \
base.get_object().get_type() == get_base_type('Type'):
result.append(base.get_object())
return result
def _create_scope(self):
return rope.base.pyscopes.ClassScope(self.pycore, self)
class PyModule(pyobjects.PyModule):
def __init__(self, pycore, source=None,
resource=None, force_errors=False):
ignore = pycore.project.prefs.get('ignore_syntax_errors', False)
syntax_errors = force_errors or not ignore
self.has_errors = False
try:
source, node = self._init_source(pycore, source, resource)
except exceptions.ModuleSyntaxError:
self.has_errors = True
if syntax_errors:
raise
else:
source = '\n'
node = ast.parse('\n')
self.source_code = source
self.star_imports = []
self.visitor_class = _GlobalVisitor
self.coding = fscommands.read_str_coding(self.source_code)
super(PyModule, self).__init__(pycore, node, resource)
def _init_source(self, pycore, source_code, resource):
filename = 'string'
if resource:
filename = resource.path
try:
if source_code is None:
source_bytes = resource.read_bytes()
source_code = fscommands.file_data_to_unicode(source_bytes)
else:
if isinstance(source_code, unicode):
source_bytes = fscommands.unicode_to_file_data(source_code)
else:
source_bytes = source_code
ast_node = ast.parse(source_bytes, filename=filename)
except SyntaxError, e:
raise exceptions.ModuleSyntaxError(filename, e.lineno, e.msg)
except UnicodeDecodeError, e:
raise exceptions.ModuleSyntaxError(filename, 1, '%s' % (e.reason))
return source_code, ast_node
@utils.prevent_recursion(lambda: {})
def _create_concluded_attributes(self):
result = {}
for star_import in self.star_imports:
result.update(star_import.get_names())
return result
def _create_scope(self):
return rope.base.pyscopes.GlobalScope(self.pycore, self)
@property
@utils.saveit
def lines(self):
"""A `SourceLinesAdapter`"""
return rope.base.codeanalyze.SourceLinesAdapter(self.source_code)
@property
@utils.saveit
def logical_lines(self):
"""A `LogicalLinesFinder`"""
return rope.base.codeanalyze.CachingLogicalLineFinder(self.lines)
class PyPackage(pyobjects.PyPackage):
def __init__(self, pycore, resource=None, force_errors=False):
self.resource = resource
init_dot_py = self._get_init_dot_py()
if init_dot_py is not None:
ast_node = pycore.resource_to_pyobject(
init_dot_py, force_errors=force_errors).get_ast()
else:
ast_node = ast.parse('\n')
super(PyPackage, self).__init__(pycore, ast_node, resource)
def _create_structural_attributes(self):
result = {}
modname = self.pycore.modname(self.resource)
extension_submodules = self.pycore._builtin_submodules(modname)
for name, module in extension_submodules.iteritems():
result[name] = rope.base.builtins.BuiltinName(module)
if self.resource is None:
return result
for name, resource in self._get_child_resources().items():
result[name] = pynames.ImportedModule(self, resource=resource)
return result
def _create_concluded_attributes(self):
result = {}
init_dot_py = self._get_init_dot_py()
if init_dot_py:
init_object = self.pycore.resource_to_pyobject(init_dot_py)
result.update(init_object.get_attributes())
return result
def _get_child_resources(self):
result = {}
for child in self.resource.get_children():
if child.is_folder():
result[child.name] = child
elif child.name.endswith('.py') and \
child.name != '__init__.py':
name = child.name[:-3]
result[name] = child
return result
def _get_init_dot_py(self):
if self.resource is not None and self.resource.has_child('__init__.py'):
return self.resource.get_child('__init__.py')
else:
return None
def _create_scope(self):
return self.get_module().get_scope()
def get_module(self):
init_dot_py = self._get_init_dot_py()
if init_dot_py:
return self.pycore.resource_to_pyobject(init_dot_py)
return self
class _AssignVisitor(object):
def __init__(self, scope_visitor):
self.scope_visitor = scope_visitor
self.assigned_ast = None
def _Assign(self, node):
self.assigned_ast = node.value
for child_node in node.targets:
ast.walk(child_node, self)
def _assigned(self, name, assignment=None):
self.scope_visitor._assigned(name, assignment)
def _Name(self, node):
assignment = None
if self.assigned_ast is not None:
assignment = pynames.AssignmentValue(self.assigned_ast)
self._assigned(node.id, assignment)
def _Tuple(self, node):
names = astutils.get_name_levels(node)
for name, levels in names:
assignment = None
if self.assigned_ast is not None:
assignment = pynames.AssignmentValue(self.assigned_ast, levels)
self._assigned(name, assignment)
def _Attribute(self, node):
pass
def _Subscript(self, node):
pass
def _Slice(self, node):
pass
class _ScopeVisitor(object):
def __init__(self, pycore, owner_object):
self.pycore = pycore
self.owner_object = owner_object
self.names = {}
self.defineds = []
def get_module(self):
if self.owner_object is not None:
return self.owner_object.get_module()
else:
return None
def _ClassDef(self, node):
pyclass = PyClass(self.pycore, node, self.owner_object)
self.names[node.name] = pynames.DefinedName(pyclass)
self.defineds.append(pyclass)
def _FunctionDef(self, node):
pyfunction = PyFunction(self.pycore, node, self.owner_object)
for decorator in pyfunction.decorators:
if isinstance(decorator, ast.Name) and decorator.id == 'property':
if isinstance(self, _ClassVisitor):
type_ = rope.base.builtins.Property(pyfunction)
arg = pynames.UnboundName(PyObject(self.owner_object))
def _eval(type_=type_, arg=arg):
return type_.get_property_object(
arguments.ObjectArguments([arg]))
self.names[node.name] = pynames.EvaluatedName(
_eval, module=self.get_module(), lineno=node.lineno)
break
else:
self.names[node.name] = pynames.DefinedName(pyfunction)
self.defineds.append(pyfunction)
def _Assign(self, node):
ast.walk(node, _AssignVisitor(self))
def _AugAssign(self, node):
pass
def _For(self, node):
names = self._update_evaluated(node.target, node.iter,
'.__iter__().next()')
for child in node.body + node.orelse:
ast.walk(child, self)
def _assigned(self, name, assignment):
pyname = self.names.get(name, None)
if pyname is None:
pyname = pynames.AssignedName(module=self.get_module())
if isinstance(pyname, pynames.AssignedName):
if assignment is not None:
pyname.assignments.append(assignment)
self.names[name] = pyname
def _update_evaluated(self, targets, assigned,
evaluation= '', eval_type=False):
result = {}
names = astutils.get_name_levels(targets)
for name, levels in names:
assignment = pynames.AssignmentValue(assigned, levels,
evaluation, eval_type)
self._assigned(name, assignment)
return result
def _With(self, node):
if node.optional_vars:
self._update_evaluated(node.optional_vars,
node.context_expr, '.__enter__()')
for child in node.body:
ast.walk(child, self)
def _excepthandler(self, node):
if node.name is not None and isinstance(node.name, ast.Name):
type_node = node.type
if isinstance(node.type, ast.Tuple) and type_node.elts:
type_node = type_node.elts[0]
self._update_evaluated(node.name, type_node, eval_type=True)
for child in node.body:
ast.walk(child, self)
def _ExceptHandler(self, node):
self._excepthandler(node)
def _Import(self, node):
for import_pair in node.names:
module_name = import_pair.name
alias = import_pair.asname
first_package = module_name.split('.')[0]
if alias is not None:
imported = pynames.ImportedModule(self.get_module(),
module_name)
if not self._is_ignored_import(imported):
self.names[alias] = imported
else:
imported = pynames.ImportedModule(self.get_module(),
first_package)
if not self._is_ignored_import(imported):
self.names[first_package] = imported
def _ImportFrom(self, node):
level = 0
if node.level:
level = node.level
imported_module = pynames.ImportedModule(self.get_module(),
node.module, level)
if self._is_ignored_import(imported_module):
return
if len(node.names) == 1 and node.names[0].name == '*':
if isinstance(self.owner_object, PyModule):
self.owner_object.star_imports.append(
StarImport(imported_module))
else:
for imported_name in node.names:
imported = imported_name.name
alias = imported_name.asname
if alias is not None:
imported = alias
self.names[imported] = pynames.ImportedName(imported_module,
imported_name.name)
def _is_ignored_import(self, imported_module):
if not self.pycore.project.prefs.get('ignore_bad_imports', False):
return False
return not isinstance(imported_module.get_object(), AbstractModule)
def _Global(self, node):
module = self.get_module()
for name in node.names:
if module is not None:
try:
pyname = module[name]
except exceptions.AttributeNotFoundError:
pyname = pynames.AssignedName(node.lineno)
self.names[name] = pyname
class _GlobalVisitor(_ScopeVisitor):
def __init__(self, pycore, owner_object):
super(_GlobalVisitor, self).__init__(pycore, owner_object)
class _ClassVisitor(_ScopeVisitor):
def __init__(self, pycore, owner_object):
super(_ClassVisitor, self).__init__(pycore, owner_object)
def _FunctionDef(self, node):
_ScopeVisitor._FunctionDef(self, node)
if len(node.args.args) > 0:
first = node.args.args[0]
if isinstance(first, ast.Name):
new_visitor = _ClassInitVisitor(self, first.id)
for child in ast.get_child_nodes(node):
ast.walk(child, new_visitor)
class _FunctionVisitor(_ScopeVisitor):
def __init__(self, pycore, owner_object):
super(_FunctionVisitor, self).__init__(pycore, owner_object)
self.returned_asts = []
self.generator = False
def _Return(self, node):
if node.value is not None:
self.returned_asts.append(node.value)
def _Yield(self, node):
if node.value is not None:
self.returned_asts.append(node.value)
self.generator = True
class _ClassInitVisitor(_AssignVisitor):
def __init__(self, scope_visitor, self_name):
super(_ClassInitVisitor, self).__init__(scope_visitor)
self.self_name = self_name
def _Attribute(self, node):
if not isinstance(node.ctx, ast.Store):
return
if isinstance(node.value, ast.Name) and \
node.value.id == self.self_name:
if node.attr not in self.scope_visitor.names:
self.scope_visitor.names[node.attr] = pynames.AssignedName(
lineno=node.lineno, module=self.scope_visitor.get_module())
if self.assigned_ast is not None:
pyname = self.scope_visitor.names[node.attr]
if isinstance(pyname, pynames.AssignedName):
pyname.assignments.append(
pynames.AssignmentValue(self.assigned_ast))
def _Tuple(self, node):
if not isinstance(node.ctx, ast.Store):
return
for child in ast.get_child_nodes(node):
ast.walk(child, self)
def _Name(self, node):
pass
def _FunctionDef(self, node):
pass
def _ClassDef(self, node):
pass
def _For(self, node):
pass
def _With(self, node):
pass
class StarImport(object):
def __init__(self, imported_module):
self.imported_module = imported_module
def get_names(self):
result = {}
imported = self.imported_module.get_object()
for name in imported:
if not name.startswith('_'):
result[name] = pynames.ImportedName(self.imported_module, name)
return result
| {
"content_hash": "166dd7d5867928376f7f3c03276e0e72",
"timestamp": "",
"source": "github",
"line_count": 537,
"max_line_length": 80,
"avg_line_length": 35.26070763500931,
"alnum_prop": 0.57818853974122,
"repo_name": "wezhang/vim-setup",
"id": "50b243607b45f45f77768fa74609270ae5618066",
"size": "18935",
"binary": false,
"copies": "47",
"ref": "refs/heads/master",
"path": "bundle/python-mode/pymode/libs2/rope/base/pyobjectsdef.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "BlitzBasic",
"bytes": "652"
},
{
"name": "C",
"bytes": "40106"
},
{
"name": "CoffeeScript",
"bytes": "1402"
},
{
"name": "Erlang",
"bytes": "2441"
},
{
"name": "HTML",
"bytes": "134"
},
{
"name": "JavaScript",
"bytes": "9357"
},
{
"name": "Lua",
"bytes": "400"
},
{
"name": "Makefile",
"bytes": "4808"
},
{
"name": "Python",
"bytes": "2832932"
},
{
"name": "Ruby",
"bytes": "139183"
},
{
"name": "Scala",
"bytes": "14131"
},
{
"name": "Shell",
"bytes": "25789"
},
{
"name": "VimL",
"bytes": "2285718"
}
],
"symlink_target": ""
} |
import functools
import logging
from django.contrib.auth.decorators import login_required
from django.shortcuts import render
from django.conf import settings
import requests
logger = logging.getLogger(__name__)
def api_token_required(f):
@functools.wraps(f)
@login_required
def requirer(request, *args, **kwargs):
if not request.user.profile.access_token:
return render(request, "client/access_token_needed.html")
return f(request, *args, **kwargs)
return requirer
def catch_expired_token_and_retry(f):
"""Tokens expire rather quickly, but we generally have a refresh token
to get new ones as necessary.
Try to get a new token and retry a invalid-token-failed call once.
"""
@functools.wraps(f)
def wrapper(request, *args, **kwargs):
resp = f(request, *args, **kwargs)
resp_json = resp.json()
if "error" in resp_json and resp_json["error"]["error_code"] == 3:
# expired/invalid token, try to get a new one with our refresh token
logger.info("Invalid access token, trying to get a new one")
results = requests.post(
settings.LBTC_URL + "/oauth2/access_token/",
data={"grant_type": "refresh_token",
"client_id": settings.LBTC_CLIENT_ID,
"client_secret": settings.LBTC_CLIENT_SECRET,
"refresh_token": request.user.profile.access_token_refresh_token,})
if results.status_code != 200:
# just return the original error
return resp
request.user.profile.set_access_token(**results.json())
request.user.profile.save()
return f(request, *args, **kwargs)
return resp
return wrapper
@catch_expired_token_and_retry
def api_get(request, path, params=None):
headers = {"Authorization": "Bearer " + request.user.profile.access_token}
if not params: params = {}
params.update(access_token=request.user.profile.access_token)
return requests.get(settings.LBTC_URL + path, params=params, headers=headers)
@catch_expired_token_and_retry
def api_post(request, path, data=None):
headers = {"Authorization": "Bearer " + request.user.profile.access_token}
if not data: data = {}
data.update(access_token=request.user.profile.access_token)
return requests.post(settings.LBTC_URL + path, data=data, headers=headers)
| {
"content_hash": "fc879b74d78cf40a06f9dacf488dcab7",
"timestamp": "",
"source": "github",
"line_count": 76,
"max_line_length": 89,
"avg_line_length": 32.4078947368421,
"alnum_prop": 0.6447421843280552,
"repo_name": "LocalBitcoins/lbtcex",
"id": "c622f9ea783a874c53112926bc8b9809717d232a",
"size": "2463",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lbtcex/client/utils.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "11606"
}
],
"symlink_target": ""
} |
import calendar
DEPS = [
'recipe_engine/file',
'recipe_engine/json',
'recipe_engine/path',
'recipe_engine/properties',
'recipe_engine/step',
'recipe_engine/time',
'gsutil',
'vars',
]
DM_JSON = 'dm.json'
VERBOSE_LOG = 'verbose.log'
def RunSteps(api):
api.vars.setup()
revision = api.properties['revision']
results_dir = api.path['start_dir'].join('test')
# Upload the images. It is *vital* that the images are uploaded first
# so they exist whenever the json is processed.
image_dest_path = 'gs://%s/dm-images-v1' % api.properties['gs_bucket']
for ext in ['.png', '.pdf']:
files_to_upload = api.file.glob_paths(
'find %s images' % ext,
results_dir,
'*%s' % ext,
test_data=['someimage.png'])
# For some reason, glob returns results_dir when it should return nothing.
files_to_upload = [f for f in files_to_upload if str(f).endswith(ext)]
if len(files_to_upload) > 0:
api.gsutil.cp('%s images' % ext, results_dir.join('*%s' % ext),
image_dest_path, multithread=True)
# Compute the directory to upload results to
now = api.time.utcnow()
summary_dest_path = '/'.join([
'dm-json-v1',
str(now.year ).zfill(4),
str(now.month).zfill(2),
str(now.day ).zfill(2),
str(now.hour ).zfill(2),
revision,
api.vars.builder_name,
str(int(calendar.timegm(now.utctimetuple())))])
# Trybot results are further siloed by issue/patchset.
if api.vars.is_trybot:
summary_dest_path = '/'.join(('trybot', summary_dest_path,
str(api.vars.issue), str(api.vars.patchset)))
summary_dest_path = 'gs://%s/%s' % (api.properties['gs_bucket'],
summary_dest_path)
# Directly upload dm.json and verbose.log if it exists
json_file = results_dir.join(DM_JSON)
log_file = results_dir.join(VERBOSE_LOG)
api.gsutil.cp('dm.json', json_file,
summary_dest_path + '/' + DM_JSON, extra_args=['-Z'])
files = api.file.listdir('check for optional verbose.log file',
results_dir, test_data=['dm.json', 'verbose.log'])
if log_file in files:
api.gsutil.cp('verbose.log', log_file,
summary_dest_path + '/' + VERBOSE_LOG, extra_args=['-Z'])
def GenTests(api):
builder = 'Upload-Test-Debian9-GCC-GCE-CPU-AVX2-x86_64-Debug-All'
yield (
api.test('normal_bot') +
api.properties(buildername=builder,
gs_bucket='skia-infra-gm',
revision='abc123',
path_config='kitchen')
)
yield (
api.test('alternate_bucket') +
api.properties(buildername=builder,
gs_bucket='skia-infra-gm-alt',
revision='abc123',
path_config='kitchen')
)
yield (
api.test('failed_once') +
api.properties(buildername=builder,
gs_bucket='skia-infra-gm',
revision='abc123',
path_config='kitchen') +
api.step_data('upload .png images', retcode=1)
)
yield (
api.test('failed_all') +
api.properties(buildername=builder,
gs_bucket='skia-infra-gm',
revision='abc123',
path_config='kitchen') +
api.step_data('upload .png images', retcode=1) +
api.step_data('upload .png images (attempt 2)', retcode=1) +
api.step_data('upload .png images (attempt 3)', retcode=1) +
api.step_data('upload .png images (attempt 4)', retcode=1) +
api.step_data('upload .png images (attempt 5)', retcode=1)
)
yield (
api.test('trybot') +
api.properties.tryserver(
gerrit_project='skia',
gerrit_url='https://skia-review.googlesource.com/',
) +
api.properties(
buildername=builder,
gs_bucket='skia-infra-gm',
revision='abc123',
path_config='kitchen')
)
| {
"content_hash": "947b63ef44dc4ad23ff753026c13e6a5",
"timestamp": "",
"source": "github",
"line_count": 126,
"max_line_length": 79,
"avg_line_length": 31.38095238095238,
"alnum_prop": 0.5768841679312089,
"repo_name": "youtube/cobalt_sandbox",
"id": "3e8bfe4474079d944d1fd1724f88fad679c38b3e",
"size": "4155",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "third_party/skia/infra/bots/recipes/upload_dm_results.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
import os
from abc import *
from . import build
import pynja.tc
class CppTask(build.BuildTask):
def __init__(self, project, sourcePath, outputPath, workingDir):
super().__init__(project)
self.sourcePath = sourcePath
self.outputPath = outputPath
self.workingDir = workingDir
# common compiler options
self.extraOptions = []
self.optLevel = 3
self.debugLevel = 2
self.warnLevel = 3
self.warningsAsErrors = False
self.includePaths = []
self.defines = []
self.createPCH = False
self.usePCH = None # point this at a PCH file
# gcc-specific
self.addressModel = None # = {"-m32", "-m64"}
self.std = None # see option -std within "C Dialect Options"
self.lto = None
# msvc-specific
self.dynamicCRT = True
self.asyncExceptionHandling = False
self.externCNoThrow = True
# nvcc-specific
self.relocatableDeviceCode = True
self.deviceDebugLevel = 1 # {0 = none, 1 = lineinfo, 2 = full [disables optimization]}
# internal state tracking
self._creatingPDB = False
def emit(self):
project = self.project
toolchain = project.toolchain
toolchain.emit_cpp_compile(project, self)
if self.phonyTarget:
project.projectMan.add_phony_target(self.phonyTarget, self.outputPath)
# Precompiled headers are always force-included via the commandline.
# If a toolchain does not support precompiled headers, then this
# dummy task is created, and the source header is force-included instead.
# Toolchains must also detect when the 'usePCH' attribute points at a header
# and handle it specially.
class DummyPchTask(CppTask):
def __init__(self, project, sourcePath, workingDir):
super().__init__(project, sourcePath, sourcePath, workingDir)
# override emit() with a null implementation
def emit(self):
pass
class StaticLibTask(build.BuildTask):
def __init__(self, project, outputPath, workingDir):
super().__init__(project)
self.outputPath = outputPath
self.workingDir = workingDir
self.inputs = []
def emit(self):
project = self.project
toolchain = project.toolchain
self.inputs.extend(project._inputs)
toolchain.emit_static_lib(project, self)
if self.phonyTarget:
project.projectMan.add_phony_target(self.phonyTarget, self.outputPath)
class LinkTask(build.BuildTask):
def __init__(self, project, outputPath, workingDir):
super().__init__(project)
self.extraOptions = []
self.outputPath = outputPath
self.outputLibraryPath = None
self.workingDir = workingDir
self.makeExecutable = True # if False, make shared library instead
self.inputs = []
self.keepDebugInfo = True
# gcc-specific
self.addressModel = None
self.lto = None
self.noUndefined = True
def emit(self):
project = self.project
toolchain = project.toolchain
self.inputs.extend(project._inputs)
self.inputs.extend(project._inputLibs)
toolchain.emit_link(project, self)
if self.phonyTarget:
project.projectMan.add_phony_target(self.phonyTarget, self.outputPath)
def _is_toolchain_msvc(toolchain):
return ( isinstance(toolchain, pynja.MsvcToolChain)
or (isinstance(toolchain, pynja.NvccToolChain) and 'msvc' in toolchain.hostCompiler)
)
class CppProject(build.Project):
def __init__(self, projectMan, variant):
super().__init__(projectMan, variant)
self.outputPath = None
self.toolchain = self.get_toolchain()
self.defines = [] # project-level defines affect all compilations; tasks that use preprocessor may also consume this
self.includePaths = [] # ""
self.linkLibraries = []
self._inputs = []
self._inputLibs = []
# for situations where we want to aggregate implicit dependencies for cpp_compiles
self._forcedDeps = set()
@abstractmethod
def get_toolchain(self):
pass
# add input to library or linker commandline
def add_input(self, filePath):
self._inputs.append(filePath)
def add_input_lib(self, filePath):
self._inputLibs.append(filePath)
if filePath.endswith('.so'):
self.add_runtime_dependency(filePath)
def add_input_libs(self, filePaths):
self._inputLibs.extend(filePaths)
def add_lib_dependency(self, project):
self._inputLibs.extend(project.linkLibraries)
self.add_runtime_dependency_project(project)
self.add_cb_project_reference(project)
# preprocessor-like tasks
def set_include_paths_and_defines(self, task):
"""Can be overridden to add more .includePaths and .defines to any compatible task."""
task.defines.extend(self.defines)
task.includePaths.extend(self.includePaths)
# precompiled header
# For convenience, you can disable PCH creation by setting reallyCreatePCH = False.
# In that case, the source header will be force-included instead. (and no other
# modifications will be necessary to client code)
def make_pch(self, sourcePath, reallyCreatePCH = True):
with self.make_pch_ex(sourcePath, reallyCreatePCH) as task:
pass
return task
def make_pch_ex(self, sourcePath, reallyCreatePCH = True):
sourcePath = os.path.normpath(sourcePath)
if self.toolchain.supportsPCH and reallyCreatePCH:
if os.path.isabs(sourcePath):
outputPath = os.path.join(self.builtDir, os.path.basename(sourcePath) + self.toolchain.pchFileExt)
else:
outputPath = os.path.join(self.builtDir, sourcePath + self.toolchain.pchFileExt)
sourcePath = os.path.join(self.projectDir, sourcePath)
task = CppTask(self, sourcePath, outputPath, self.projectDir)
task.createPCH = True
self.set_cpp_compile_options(task)
if _is_toolchain_msvc(self.toolchain):
self.add_input(outputPath + self.toolchain.objectFileExt)
return task
else:
if not os.path.isabs(sourcePath):
sourcePath = os.path.join(self.projectDir, sourcePath)
task = DummyPchTask(self, sourcePath, self.projectDir)
return task
# C++ compile
def _cpp_compile_one(self, sourcePath):
sourcePath = os.path.normpath(sourcePath)
if os.path.isabs(sourcePath):
outputPath = os.path.join(self.builtDir, os.path.basename(sourcePath) + self.toolchain.objectFileExt)
else:
outputPath = os.path.join(self.builtDir, sourcePath + self.toolchain.objectFileExt)
sourcePath = os.path.join(self.projectDir, sourcePath)
task = CppTask(self, sourcePath, outputPath, self.projectDir)
self.set_cpp_compile_options(task)
self.add_input(outputPath)
return task
def cpp_compile(self, filePaths):
with self.cpp_compile_ex(filePaths) as tasks:
pass
return tasks # this could be either a scalar CppTask or an iterable BuildTasks
def cpp_compile_ex(self, filePaths):
if isinstance(filePaths, str):
return self._cpp_compile_one(filePaths)
else:
taskList = []
for filePath in filePaths:
task = self._cpp_compile_one(filePath)
taskList.append(task)
tasks = pynja.BuildTasks(taskList)
return tasks
def set_cpp_compile_options(self, task):
"""Can be overridden to apply common compiler options to CppTask created by cpp_compile*."""
self.set_include_paths_and_defines(task)
# static lib creation
def make_static_lib_abs_ex(self, outputPath):
if self.outputPath:
raise Exception("outputPath already selected: " + self.outputPath)
self.outputPath = outputPath
self.libraryPath = outputPath
self.linkLibraries.append(self.libraryPath)
self.linkLibraries.extend(self._inputLibs)
task = StaticLibTask(self, self.outputPath, self.projectDir)
self.set_static_lib_options(task)
return task
def make_static_lib_ex(self, name):
name = os.path.normpath(name)
if _is_toolchain_msvc(self.toolchain):
outputPath = os.path.join(self.builtDir, name + ".lib")
else:
outputPath = os.path.join(self.builtDir, "lib" + name + ".a")
task = self.make_static_lib_abs_ex(outputPath)
task.phonyTarget = name
return task
def make_static_lib(self, name):
with self.make_static_lib_ex(name) as task:
return task
def set_static_lib_options(self, task):
"""Can be overridden to apply options to StaticLibTask created by make_static_lib."""
pass
# shared lib creation
def make_shared_lib_abs_ex(self, outputPath, libraryPath):
if self.outputPath:
raise Exception("outputPath already selected: " + self.outputPath)
self.outputPath = outputPath
self.libraryPath = libraryPath
self.linkLibraries.append(self.libraryPath)
self.add_runtime_dependency(self.outputPath)
if isinstance(self.toolchain, pynja.AndroidGccToolChain):
self._android_link_stl()
task = LinkTask(self, self.outputPath, self.projectDir)
task.outputLibraryPath = libraryPath
task.makeExecutable = False
self.set_shared_lib_options(task)
return task
def make_shared_lib_ex(self, name):
name = os.path.normpath(name)
if self.toolchain.isTargetWindows:
outputPath = os.path.join(self.builtDir, name + ".dll")
if _is_toolchain_msvc(self.toolchain):
libraryPath = os.path.join(self.builtDir, name + ".lib")
else:
libraryPath = outputPath # mingw can link directly against DLLs -- no implib needed
else:
outputPath = os.path.join(self.builtDir, "lib" + name + ".so")
libraryPath = outputPath
task = self.make_shared_lib_abs_ex(outputPath, libraryPath)
task.phonyTarget = name
return task
def make_shared_lib(self, name):
with self.make_shared_lib_ex(name) as task:
return task
def set_shared_lib_options(self, task):
"""Can be overridden to apply options to LinkTask created by make_shared_lib."""
pass
# executable creation
def make_executable_abs_ex(self, outputPath):
if self.outputPath:
raise Exception("outputPath already selected: " + self.outputPath)
self.outputPath = outputPath
self.add_runtime_dependency(self.outputPath)
if isinstance(self.toolchain, pynja.AndroidGccToolChain):
self._android_link_stl()
task = LinkTask(self, self.outputPath, self.projectDir)
task.makeExecutable = True
self.set_executable_options(task)
return task
def make_executable_ex(self, name):
name = os.path.normpath(name)
if self.toolchain.isTargetWindows:
outputPath = os.path.join(self.builtDir, name + ".exe")
else:
outputPath = os.path.join(self.builtDir, name)
task = self.make_executable_abs_ex(outputPath)
task.phonyTarget = name
if self.variant.config == 'rel':
task.lto = self.toolchain.ltoSupport
return task
def make_executable(self, name):
with self.make_executable_ex(name) as task:
return task
def set_executable_options(self, task):
"""Can be overridden to apply options to LinkTask created by make_executable."""
pass
# for pseudo-projects
def propagate_lib_dependencies(self):
self.linkLibraries.extend(self._inputLibs)
# android-specific
def android_select_stl(self, android_stl, linkDynamic=False):
"""Selects an STL: None, "gnu-libstdc++", "stlport"."""
self.android_stl = android_stl
self.android_stl_link_dynamic = linkDynamic
if self.android_stl == None:
pass
elif self.android_stl == 'gnu-libstdc++':
basedir = '%s/sources/cxx-stl/gnu-libstdc++/%s' % (self.toolchain.ndkDirEsc, self.toolchain.gccVersionStr)
self.includePaths.append('%s/include' % (basedir))
self.includePaths.append('%s/libs/%s/include' % (basedir, self.toolchain.archStr))
elif self.android_stl == 'stlport':
basedir = '%s/sources/cxx-stl/stlport' % (self.toolchain.ndkDirEsc)
self.includePaths.append('%s/include' % (basedir))
else:
raise Exception("TODO: unhandled androidSTL=%s" % (task.androidSTL))
def _android_link_stl(self):
if self.android_stl == None:
pass
elif self.android_stl == 'gnu-libstdc++':
basedir = '%s/sources/cxx-stl/gnu-libstdc++/%s' % (self.toolchain.ndkDirEsc, self.toolchain.gccVersionStr)
if self.android_stl_link_dynamic:
self.add_input_lib('%s/libs/%s/libgnustl_shared.so' % (basedir, self.toolchain.archStr))
else:
self.add_input_lib('%s/libs/%s/libgnustl_static.a' % (basedir, self.toolchain.archStr))
self.add_input_lib('%s/libs/%s/libsupc++.a' % (basedir, self.toolchain.archStr))
elif self.android_stl == 'stlport':
basedir = '%s/sources/cxx-stl/stlport' % (self.toolchain.ndkDirEsc)
if self.android_stl_link_dynamic:
self.add_input_lib('%s/libs/%s/libstlport_shared.so' % (basedir, self.toolchain.archStr))
else:
self.add_input_lib('%s/libs/%s/libstlport_static.a' % (basedir, self.toolchain.archStr))
else:
raise Exception("TODO: unhandled androidSTL=%s" % (task.androidSTL))
| {
"content_hash": "7e995570ea5c635392953c0a501b2c9b",
"timestamp": "",
"source": "github",
"line_count": 374,
"max_line_length": 134,
"avg_line_length": 37.713903743315505,
"alnum_prop": 0.6321871676710387,
"repo_name": "fifoforlifo/pynja",
"id": "760a9469817f7ca0d6b2dbf41aff2899a61de040",
"size": "14105",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/pynja/cpp.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "1431"
},
{
"name": "C++",
"bytes": "2971018"
},
{
"name": "Java",
"bytes": "701"
},
{
"name": "Protocol Buffer",
"bytes": "353085"
},
{
"name": "Python",
"bytes": "190328"
},
{
"name": "Shell",
"bytes": "5634"
}
],
"symlink_target": ""
} |
VALID_QUERY_PARAMS = "query"
SEARCH_TYPE = "type"
SEARCH_DATABASE = "d"
SEARCH_FORMULA = "f"
SEARCH_TEXT = "t"
SEARCH_LIMIT = 50
AUTHENTICATION_FAIL = "You must be an admin to perform database manipulation."
FORMULA_CREATION_SUCCESS = "Formula creation is successful!"
FORMULA_CREATION_FAIL = "Formula creation failed"
FORMULA_CREATION_EXIST = "Formula already exist. Adds the new question " \
"IDs to the existing formula"
FORMULA_UPDATE_SUCCESS = "Formula update is successful!"
FORMULA_UPDATE_FAIL = "Formula update failed"
FORMULA_DELETION_SUCCESS = "Formula deletion is successful!"
FORMULA_DELETION_FAIL = "Formula deletion failed"
FORMULA_DB_CRUD_FAIL = "Fails to manipulate formula database"
FORMULA_INDEXING_SUCCESS = "Formula and formula index table has been " \
"reindexed successfully."
FORMULA_INDEXING_FAIL = "Unable to reindex the formula and formula index " \
"table."
QUESTION_UPDATE_SUCCESS = "Question update is successful!"
QUESTION_UPDATE_FAIL = "Question update failed"
QUESTION_DB_CRUD_FAIL = "Fails to manipulate question database"
SOLUTION_UPDATE_SUCCESS = "Solution update is successful!"
SOLUTION_UPDATE_FAIL = "Solution update failed"
SOLUTION_DB_CRUD_FAIL = "Fails to manipulate solution database"
SEARCH_NOT_FOUND = "Unable to find related questions for the query."
FORMULA_SEARCH_NO_QUERY = "Unable to search: query param is unavailable"
FORMULA_SEARCH_NOT_FOUND = "Unable to find related formulas."
FORMULA_SEARCH_NOT_ALLOWED = "Only formula, database, and text search are " \
"allowed"
FORMULA_TEST_CREATION_SUCCESS = "Test Formula creation is successful!"
FORMULA_TEST_CREATION_FAIL = "Test Formula creation failed"
FORMULA_TEST_CREATION_EXIST = "Test Formula already exist. Adds the new " \
"question IDs to the existing formula"
FORMULA_TEST_UPDATE_SUCCESS = "Test Formula creation is successful!"
FORMULA_TEST_UPDATE_FAIL = "Test Formula updated failed"
FORMULA_TEST_DELETION_SUCCESS = "Test Formula deletion is successful!"
FORMULA_TEST_DELETION_FAIL = "Test Formula deletion failed"
FORMULA_TEST_DB_CRUD_FAIL = "Fails to manipulate test formula database"
FORMULA_TEST_INDEXING_SUCCESS = "Test formula and test formula index table " \
"has been reindexed successfully."
FORMULA_TEST_INDEXING_FAIL = "Unable to reindex the test formula and " \
"test formula index table." | {
"content_hash": "d03ab45f7c3b23c06edf8028d89072f0",
"timestamp": "",
"source": "github",
"line_count": 54,
"max_line_length": 78,
"avg_line_length": 46.574074074074076,
"alnum_prop": 0.7137176938369781,
"repo_name": "deka108/mathqa-server",
"id": "9636cd837c4323197fc7ed8b6704cf70d5f45ec8",
"size": "2515",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "apiv2/constants.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "120893"
},
{
"name": "HTML",
"bytes": "500139"
},
{
"name": "JavaScript",
"bytes": "1112441"
},
{
"name": "PHP",
"bytes": "2232"
},
{
"name": "PLpgSQL",
"bytes": "2233286"
},
{
"name": "Python",
"bytes": "350711"
}
],
"symlink_target": ""
} |
from setuptools import setup, find_packages
from kite.version import get_version
setup(
name='kite-string',
description='command-line HTTP request wrapper for takosan .',
version=get_version(),
packages=find_packages(),
include_package_data=True,
install_requires=[
'requests',
'Click',
],
entry_points='''
[console_scripts]
kite=kite.cli:main
''',
url='https://github.com/laughk/kite-string',
author = 'Kei Iwasaki',
author_email = 'me@laughk.org',
license='MIT License',
classifiers=[
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
],
)
| {
"content_hash": "9491c4c2b4f4a5727c470b9b7bd4546f",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 66,
"avg_line_length": 25.258064516129032,
"alnum_prop": 0.6015325670498084,
"repo_name": "laughk/kite-string",
"id": "41aab1accc686202a128a05b5172dddd0f6f582d",
"size": "829",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "5883"
},
{
"name": "Shell",
"bytes": "239"
}
],
"symlink_target": ""
} |
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Boostrap3ButtonPlugin.page_link'
db.add_column(u'aldryn_bootstrap3_boostrap3buttonplugin', 'page_link',
self.gf('django.db.models.fields.related.ForeignKey')(to=orm['cms.Page'], null=True, on_delete=models.SET_NULL, blank=True),
keep_default=False)
# Adding field 'Boostrap3ButtonPlugin.anchor'
db.add_column(u'aldryn_bootstrap3_boostrap3buttonplugin', 'anchor',
self.gf('django.db.models.fields.CharField')(default='', max_length=128, blank=True),
keep_default=False)
# Adding field 'Boostrap3ButtonPlugin.mailto'
db.add_column(u'aldryn_bootstrap3_boostrap3buttonplugin', 'mailto',
self.gf('django.db.models.fields.EmailField')(max_length=75, null=True, blank=True),
keep_default=False)
# Adding field 'Boostrap3ButtonPlugin.phone'
db.add_column(u'aldryn_bootstrap3_boostrap3buttonplugin', 'phone',
self.gf('django.db.models.fields.CharField')(max_length=40, null=True, blank=True),
keep_default=False)
# Adding field 'Boostrap3ButtonPlugin.target'
db.add_column(u'aldryn_bootstrap3_boostrap3buttonplugin', 'target',
self.gf('django.db.models.fields.CharField')(default='', max_length=100, blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Boostrap3ButtonPlugin.page_link'
db.delete_column(u'aldryn_bootstrap3_boostrap3buttonplugin', 'page_link_id')
# Deleting field 'Boostrap3ButtonPlugin.anchor'
db.delete_column(u'aldryn_bootstrap3_boostrap3buttonplugin', 'anchor')
# Deleting field 'Boostrap3ButtonPlugin.mailto'
db.delete_column(u'aldryn_bootstrap3_boostrap3buttonplugin', 'mailto')
# Deleting field 'Boostrap3ButtonPlugin.phone'
db.delete_column(u'aldryn_bootstrap3_boostrap3buttonplugin', 'phone')
# Deleting field 'Boostrap3ButtonPlugin.target'
db.delete_column(u'aldryn_bootstrap3_boostrap3buttonplugin', 'target')
models = {
u'aldryn_bootstrap3.boostrap3blockquoteplugin': {
'Meta': {'object_name': 'Boostrap3BlockquotePlugin', '_ormbases': ['cms.CMSPlugin']},
'classes': (u'django.db.models.fields.TextField', [], {'default': "u''", 'blank': 'True'}),
'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "u'+'", 'unique': 'True', 'primary_key': 'True', 'to': "orm['cms.CMSPlugin']"}),
'reverse': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
u'aldryn_bootstrap3.boostrap3buttonplugin': {
'Meta': {'object_name': 'Boostrap3ButtonPlugin', '_ormbases': ['cms.CMSPlugin']},
'anchor': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'classes': (u'django.db.models.fields.TextField', [], {'default': "u''", 'blank': 'True'}),
'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "u'+'", 'unique': 'True', 'primary_key': 'True', 'to': "orm['cms.CMSPlugin']"}),
'context': (u'django.db.models.fields.CharField', [], {'default': "u'link'", 'max_length': '255'}),
'icon_left': (u'django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '255', 'blank': 'True'}),
'icon_right': (u'django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '255', 'blank': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '256', 'blank': 'True'}),
'mailto': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'page_link': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Page']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'phone': ('django.db.models.fields.CharField', [], {'max_length': '40', 'null': 'True', 'blank': 'True'}),
'size': (u'django.db.models.fields.CharField', [], {'default': "u'md'", 'max_length': '255', 'blank': 'True'}),
'target': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'default': "u''", 'max_length': '200', 'blank': 'True'})
},
u'aldryn_bootstrap3.bootstrap3columnplugin': {
'Meta': {'object_name': 'Bootstrap3ColumnPlugin', '_ormbases': ['cms.CMSPlugin']},
'classes': (u'django.db.models.fields.TextField', [], {'default': "u''", 'blank': 'True'}),
u'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['cms.CMSPlugin']", 'unique': 'True', 'primary_key': 'True'}),
u'lg_offset': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
u'lg_size': ('django.db.models.fields.IntegerField', [], {'default': '1', 'null': 'True', 'blank': 'True'}),
u'md_offset': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
u'md_size': ('django.db.models.fields.IntegerField', [], {'default': '1', 'null': 'True', 'blank': 'True'}),
u'sm_offset': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
u'sm_size': ('django.db.models.fields.IntegerField', [], {'default': '1', 'null': 'True', 'blank': 'True'}),
u'xs_offset': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
u'xs_size': ('django.db.models.fields.IntegerField', [], {'default': '1', 'null': 'True', 'blank': 'True'})
},
u'aldryn_bootstrap3.bootstrap3rowplugin': {
'Meta': {'object_name': 'Bootstrap3RowPlugin', '_ormbases': ['cms.CMSPlugin']},
'classes': (u'django.db.models.fields.TextField', [], {'default': "u''", 'blank': 'True'}),
u'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['cms.CMSPlugin']", 'unique': 'True', 'primary_key': 'True'})
},
'cms.cmsplugin': {
'Meta': {'object_name': 'CMSPlugin'},
'changed_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '15', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.CMSPlugin']", 'null': 'True', 'blank': 'True'}),
'placeholder': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Placeholder']", 'null': 'True'}),
'plugin_type': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'}),
'position': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
'cms.page': {
'Meta': {'ordering': "('tree_id', 'lft')", 'unique_together': "(('publisher_is_draft', 'application_namespace'), ('reverse_id', 'site', 'publisher_is_draft'))", 'object_name': 'Page'},
'application_namespace': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'application_urls': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '200', 'null': 'True', 'blank': 'True'}),
'changed_by': ('django.db.models.fields.CharField', [], {'max_length': '70'}),
'changed_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'created_by': ('django.db.models.fields.CharField', [], {'max_length': '70'}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'in_navigation': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'is_home': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'languages': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'limit_visibility_in_menu': ('django.db.models.fields.SmallIntegerField', [], {'default': 'None', 'null': 'True', 'db_index': 'True', 'blank': 'True'}),
'login_required': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'navigation_extenders': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '80', 'null': 'True', 'blank': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['cms.Page']"}),
'placeholders': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['cms.Placeholder']", 'symmetrical': 'False'}),
'publication_date': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'publication_end_date': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'publisher_is_draft': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'publisher_public': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'publisher_draft'", 'unique': 'True', 'null': 'True', 'to': "orm['cms.Page']"}),
'reverse_id': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '40', 'null': 'True', 'blank': 'True'}),
'revision_id': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'djangocms_pages'", 'to': u"orm['sites.Site']"}),
'soft_root': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'template': ('django.db.models.fields.CharField', [], {'default': "'INHERIT'", 'max_length': '100'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'xframe_options': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'cms.placeholder': {
'Meta': {'object_name': 'Placeholder'},
'default_width': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slot': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'})
},
u'sites.site': {
'Meta': {'ordering': "(u'domain',)", 'object_name': 'Site', 'db_table': "u'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
}
}
complete_apps = ['aldryn_bootstrap3'] | {
"content_hash": "4226af6efcdc8eca2a0b449585358686",
"timestamp": "",
"source": "github",
"line_count": 155,
"max_line_length": 196,
"avg_line_length": 80.8258064516129,
"alnum_prop": 0.5743135376756067,
"repo_name": "intip/aldryn-bootstrap3",
"id": "928c3ffce98e83e9bb90e5fa32f8875184df147b",
"size": "12552",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "aldryn_bootstrap3/south_migrations/0007_auto__add_field_boostrap3buttonplugin_page_link__add_field_boostrap3bu.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "53946"
},
{
"name": "Python",
"bytes": "836773"
}
],
"symlink_target": ""
} |
import unittest
import chainer
import numpy
from chainer.backends import cuda
from chainer import functions
from chainer import gradient_check
from chainer import testing
from chainer.testing import attr
from chainer.utils import type_check
@testing.parameterize(*testing.product({
'shape': [(3, 2), ()],
'dtype': [numpy.float16, numpy.float32, numpy.float64]
}))
class TestMinimum(unittest.TestCase):
def setUp(self):
shape = self.shape
self.gy = numpy.random.uniform(-1, 1, shape).astype(self.dtype)
self.ggx1 = numpy.random.uniform(-1, 1, shape).astype(self.dtype)
self.ggx2 = numpy.random.uniform(-1, 1, shape).astype(self.dtype)
self.check_forward_options = {}
self.check_backward_options = {'dtype': numpy.float64}
self.check_double_backward_options = {'dtype': numpy.float64}
if self.dtype == numpy.float16:
eps = 2 ** -3
self.check_forward_options = {'atol': 1e-4, 'rtol': 1e-3}
self.check_backward_options = {'atol': 1e-2, 'rtol': 1e-1}
self.check_double_backward_options = {'atol': 1e-2, 'rtol': 1e-1}
else:
eps = 1e-2
self.check_backward_options['eps'] = eps
self.check_double_backward_options['eps'] = eps
self.x1 = numpy.random.uniform(-1, 1, shape).astype(self.dtype)
self.x2 = numpy.random.uniform(-1, 1, shape).astype(self.dtype)
# Avoid close values for stability in numerical gradient.
idx = abs(self.x1 - self.x2) < 2 * eps
self.x1[idx] = -0.5
self.x2[idx] = 0.5
self.y_expected = numpy.minimum(self.x1, self.x2)
def check_forward(self, x1_data, x2_data, y_expected):
x1 = chainer.Variable(x1_data)
x2 = chainer.Variable(x2_data)
y = functions.minimum(x1, x2)
self.assertEqual(y.data.dtype, self.dtype)
testing.assert_allclose(
y_expected, y.data, **self.check_forward_options)
def test_forward_cpu(self):
self.check_forward(self.x1, self.x2, self.y_expected)
@attr.gpu
def test_forward_gpu(self):
x1 = cuda.to_gpu(self.x1)
x2 = cuda.to_gpu(self.x2)
self.check_forward(x1, x2, self.y_expected)
def check_backward(self, x1_data, x2_data, y_grad):
func = functions.minimum
x = (x1_data, x2_data)
gradient_check.check_backward(
func, x, y_grad, **self.check_backward_options)
def test_backward_cpu(self):
self.check_backward(self.x1, self.x2, self.gy)
@attr.gpu
def test_backward_gpu(self):
x1 = cuda.to_gpu(self.x1)
x2 = cuda.to_gpu(self.x2)
gy = cuda.to_gpu(self.gy)
self.check_backward(x1, x2, gy)
def check_double_backward(self, x1, x2, gy, ggx1, ggx2):
gradient_check.check_double_backward(
functions.minimum, (x1, x2), gy, (ggx1, ggx2),
no_grads=[True, True, False],
**self.check_double_backward_options)
def test_double_backward_cpu(self):
self.check_double_backward(
self.x1, self.x2, self.gy, self.ggx1, self.ggx2)
@attr.gpu
def test_double_backward_gpu(self):
x1 = cuda.to_gpu(self.x1)
x2 = cuda.to_gpu(self.x2)
gy = cuda.to_gpu(self.gy)
ggx1 = cuda.to_gpu(self.ggx1)
ggx2 = cuda.to_gpu(self.ggx2)
self.check_double_backward(x1, x2, gy, ggx1, ggx2)
@testing.parameterize(*testing.product({
'dtype1': [numpy.float16, numpy.float32, numpy.float64],
'dtype2': [numpy.float16, numpy.float32, numpy.float64]
}))
class TestMinimumInconsistentTypes(unittest.TestCase):
def test_minimum_inconsistent_types(self):
if self.dtype1 == self.dtype2:
return
x1_data = numpy.random.uniform(-1, 1, (3, 2)).astype(self.dtype1)
x2_data = numpy.random.uniform(-1, 1, (3, 2)).astype(self.dtype2)
x1 = chainer.Variable(x1_data)
x2 = chainer.Variable(x2_data)
with self.assertRaises(type_check.InvalidType):
functions.minimum(x1, x2)
@testing.parameterize(*testing.product({
'dtype': [numpy.float16, numpy.float32, numpy.float64]
}))
class TestMinimumInconsistentShapes(unittest.TestCase):
def test_minimum_inconsistent_shapes(self):
x1_data = numpy.random.uniform(-1, 1, (3, 2)).astype(self.dtype)
x2_data = numpy.random.uniform(-1, 1, (2, 3)).astype(self.dtype)
x1 = chainer.Variable(x1_data)
x2 = chainer.Variable(x2_data)
with self.assertRaises(type_check.InvalidType):
functions.minimum(x1, x2)
testing.run_module(__name__, __file__)
| {
"content_hash": "87a2c091308ef25e3822aa5325c72b48",
"timestamp": "",
"source": "github",
"line_count": 132,
"max_line_length": 77,
"avg_line_length": 35.20454545454545,
"alnum_prop": 0.6214762212179901,
"repo_name": "anaruse/chainer",
"id": "ad968eec8b789a1627723f1e12ef6aa8b7cc6baf",
"size": "4647",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/chainer_tests/functions_tests/math_tests/test_minimum.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "3368"
},
{
"name": "PowerShell",
"bytes": "7197"
},
{
"name": "Python",
"bytes": "3723858"
}
],
"symlink_target": ""
} |
from direct.task.Task import Task
from panda3d.core import *
from panda3d.direct import *
from DistributedNPCToonBaseAI import *
from toontown.quest import Quests
class DistributedNPCSpecialQuestGiverAI(DistributedNPCToonBaseAI):
def __init__(self, air, npcId, questCallback = None, hq = 0):
DistributedNPCToonBaseAI.__init__(self, air, npcId, questCallback)
self.hq = hq
self.tutorial = 0
self.pendingAvId = None
return
def getTutorial(self):
return self.tutorial
def setTutorial(self, val):
self.tutorial = val
def getHq(self):
return self.hq
def avatarEnter(self):
avId = self.air.getAvatarIdFromSender()
self.notify.debug('avatar enter ' + str(avId))
self.air.questManager.requestInteract(avId, self)
DistributedNPCToonBaseAI.avatarEnter(self)
def chooseQuest(self, questId, quest = None):
avId = self.air.getAvatarIdFromSender()
self.notify.debug('chooseQuest: avatar %s choseQuest %s' % (avId, questId))
if not self.pendingAvId:
self.notify.warning('chooseQuest: not expecting an answer from any avatar: %s' % avId)
return
if self.pendingAvId != avId:
self.notify.warning('chooseQuest: not expecting an answer from this avatar: %s' % avId)
return
if questId == 0:
self.pendingAvId = None
self.pendingQuests = None
self.air.questManager.avatarCancelled(avId)
self.cancelChoseQuest(avId)
return
for quest in self.pendingQuests:
if questId == quest[0]:
self.pendingAvId = None
self.pendingQuests = None
self.air.questManager.avatarChoseQuest(avId, self, *quest)
return
self.air.questManager.avatarChoseQuest(avId, self, *quest)
self.notify.warning('chooseQuest: avatar: %s chose a quest not offered: %s' % (avId, questId))
self.pendingAvId = None
self.pendingQuests = None
return
def chooseTrack(self, trackId):
avId = self.air.getAvatarIdFromSender()
self.notify.debug('chooseTrack: avatar %s choseTrack %s' % (avId, trackId))
if not self.pendingAvId:
self.notify.warning('chooseTrack: not expecting an answer from any avatar: %s' % avId)
return
if self.pendingAvId != avId:
self.notify.warning('chooseTrack: not expecting an answer from this avatar: %s' % avId)
return
if trackId == -1:
self.pendingAvId = None
self.pendingTracks = None
self.pendingTrackQuest = None
self.air.questManager.avatarCancelled(avId)
self.cancelChoseTrack(avId)
return
for track in self.pendingTracks:
if trackId == track:
self.air.questManager.avatarChoseTrack(avId, self, self.pendingTrackQuest, trackId)
self.pendingAvId = None
self.pendingTracks = None
self.pendingTrackQuest = None
return
self.notify.warning('chooseTrack: avatar: %s chose a track not offered: %s' % (avId, trackId))
self.pendingAvId = None
self.pendingTracks = None
self.pendingTrackQuest = None
return
def sendTimeoutMovie(self, task):
self.pendingAvId = None
self.pendingQuests = None
self.pendingTracks = None
self.pendingTrackQuest = None
self.sendUpdate('setMovie', [NPCToons.QUEST_MOVIE_TIMEOUT,
self.npcId,
self.busy,
[],
ClockDelta.globalClockDelta.getRealNetworkTime()])
self.sendClearMovie(None)
self.busy = 0
return Task.done
def sendClearMovie(self, task):
self.pendingAvId = None
self.pendingQuests = None
self.pendingTracks = None
self.pendingTrackQuest = None
self.busy = 0
self.sendUpdate('setMovie', [NPCToons.QUEST_MOVIE_CLEAR,
self.npcId,
0,
[],
ClockDelta.globalClockDelta.getRealNetworkTime()])
return Task.done
def rejectAvatar(self, avId):
self.busy = avId
self.sendUpdate('setMovie', [NPCToons.QUEST_MOVIE_REJECT,
self.npcId,
avId,
[],
ClockDelta.globalClockDelta.getRealNetworkTime()])
if not self.tutorial:
taskMgr.doMethodLater(5.5, self.sendClearMovie, self.uniqueName('clearMovie'))
def rejectAvatarTierNotDone(self, avId):
self.busy = avId
self.sendUpdate('setMovie', [NPCToons.QUEST_MOVIE_TIER_NOT_DONE,
self.npcId,
avId,
[],
ClockDelta.globalClockDelta.getRealNetworkTime()])
if not self.tutorial:
taskMgr.doMethodLater(5.5, self.sendClearMovie, self.uniqueName('clearMovie'))
def completeQuest(self, avId, questId, rewardId):
self.busy = avId
self.sendUpdate('setMovie', [NPCToons.QUEST_MOVIE_COMPLETE,
self.npcId,
avId,
[questId, rewardId, 0],
ClockDelta.globalClockDelta.getRealNetworkTime()])
if not self.tutorial:
taskMgr.doMethodLater(60.0, self.sendTimeoutMovie, self.uniqueName('clearMovie'))
def incompleteQuest(self, avId, questId, completeStatus, toNpcId):
self.busy = avId
self.sendUpdate('setMovie', [NPCToons.QUEST_MOVIE_INCOMPLETE,
self.npcId,
avId,
[questId, completeStatus, toNpcId],
ClockDelta.globalClockDelta.getRealNetworkTime()])
if not self.tutorial:
taskMgr.doMethodLater(60.0, self.sendTimeoutMovie, self.uniqueName('clearMovie'))
def assignQuest(self, avId, questId, rewardId, toNpcId):
self.busy = avId
if self.questCallback:
self.questCallback()
self.sendUpdate('setMovie', [NPCToons.QUEST_MOVIE_ASSIGN,
self.npcId,
avId,
[questId, rewardId, toNpcId],
ClockDelta.globalClockDelta.getRealNetworkTime()])
if not self.tutorial:
taskMgr.doMethodLater(60.0, self.sendTimeoutMovie, self.uniqueName('clearMovie'))
def presentQuestChoice(self, avId, quests):
self.busy = avId
self.pendingAvId = avId
self.pendingQuests = quests
flatQuests = []
for quest in quests:
flatQuests.extend(quest)
self.sendUpdate('setMovie', [NPCToons.QUEST_MOVIE_QUEST_CHOICE,
self.npcId,
avId,
flatQuests,
ClockDelta.globalClockDelta.getRealNetworkTime()])
if not self.tutorial:
taskMgr.doMethodLater(60.0, self.sendTimeoutMovie, self.uniqueName('clearMovie'))
def presentTrackChoice(self, avId, questId, tracks):
self.busy = avId
self.pendingAvId = avId
self.pendingTracks = tracks
self.pendingTrackQuest = questId
self.sendUpdate('setMovie', [NPCToons.QUEST_MOVIE_TRACK_CHOICE,
self.npcId,
avId,
tracks,
ClockDelta.globalClockDelta.getRealNetworkTime()])
if not self.tutorial:
taskMgr.doMethodLater(60.0, self.sendTimeoutMovie, self.uniqueName('clearMovie'))
def cancelChoseQuest(self, avId):
self.busy = avId
self.sendUpdate('setMovie', [NPCToons.QUEST_MOVIE_QUEST_CHOICE_CANCEL,
self.npcId,
avId,
[],
ClockDelta.globalClockDelta.getRealNetworkTime()])
if not self.tutorial:
taskMgr.doMethodLater(60.0, self.sendTimeoutMovie, self.uniqueName('clearMovie'))
def cancelChoseTrack(self, avId):
self.busy = avId
self.sendUpdate('setMovie', [NPCToons.QUEST_MOVIE_TRACK_CHOICE_CANCEL,
self.npcId,
avId,
[],
ClockDelta.globalClockDelta.getRealNetworkTime()])
if not self.tutorial:
taskMgr.doMethodLater(60.0, self.sendTimeoutMovie, self.uniqueName('clearMovie'))
def setMovieDone(self):
avId = self.air.getAvatarIdFromSender()
self.notify.debug('setMovieDone busy: %s avId: %s' % (self.busy, avId))
if self.busy == avId:
taskMgr.remove(self.uniqueName('clearMovie'))
self.sendClearMovie(None)
elif self.busy:
self.air.writeServerEvent('suspicious', avId, 'DistributedNPCToonAI.setMovieDone busy with %s' % self.busy)
self.notify.warning('somebody called setMovieDone that I was not busy with! avId: %s' % avId)
return
| {
"content_hash": "42e22a3b0151dee2b228cd631b063432",
"timestamp": "",
"source": "github",
"line_count": 226,
"max_line_length": 119,
"avg_line_length": 37.97345132743363,
"alnum_prop": 0.6243299930086227,
"repo_name": "silly-wacky-3-town-toon/SOURCE-COD",
"id": "ca1f76929d08ea72ab2a3ee35b0ec38647fc045e",
"size": "8582",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "toontown/toon/DistributedNPCSpecialQuestGiverAI.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "10249"
},
{
"name": "C",
"bytes": "1752256"
},
{
"name": "C#",
"bytes": "8440"
},
{
"name": "C++",
"bytes": "5485400"
},
{
"name": "Emacs Lisp",
"bytes": "210083"
},
{
"name": "F#",
"bytes": "2310"
},
{
"name": "Forth",
"bytes": "506"
},
{
"name": "GLSL",
"bytes": "1040"
},
{
"name": "JavaScript",
"bytes": "7003"
},
{
"name": "Makefile",
"bytes": "895"
},
{
"name": "Mask",
"bytes": "969"
},
{
"name": "NSIS",
"bytes": "1009050"
},
{
"name": "Objective-C",
"bytes": "21821"
},
{
"name": "PLSQL",
"bytes": "10200"
},
{
"name": "Pascal",
"bytes": "4986"
},
{
"name": "Perl6",
"bytes": "30612"
},
{
"name": "Puppet",
"bytes": "259"
},
{
"name": "Python",
"bytes": "33566014"
},
{
"name": "Shell",
"bytes": "14642"
},
{
"name": "Tcl",
"bytes": "2084458"
}
],
"symlink_target": ""
} |
from qpid.messaging import *
from qpid.tests.messaging import Base
from qpidtoollibs.broker import EventHelper
import math
class EventTests (Base):
"""
Test various qmf events
"""
def setup_connection(self):
return Connection.establish(self.broker, **self.connection_options())
def setup_session(self):
return self.conn.session()
def test_queue_declare(self):
helper = EventHelper()
# subscribe for queue declare events
rcv = self.ssn.receiver(helper.eventAddress("org.apache.qpid.broker", "queueDeclare"))
# create a queue
snd = self.ssn.sender("myq; {create:always, delete:always}")
# ensure we got an event
event = helper.event(rcv.fetch(timeout=1))
assert event.name, "org_apache_qpid_broker:queueDeclare"
assert event.qName, "myq"
def test_queue_delete(self):
helper = EventHelper()
rcv = self.ssn.receiver(helper.eventAddress("org.apache.qpid.broker", "queueDelete"))
snd = self.ssn.sender("myq; {create:always, delete:always}")
snd.close()
event = helper.event(rcv.fetch(timeout=1))
assert event.name, "org_apache_qpid_broker:queueDelete"
assert event.qName, "myq"
def test_queue_autodelete_exclusive(self):
helper = EventHelper()
rcv = self.ssn.receiver(helper.eventAddress("org.apache.qpid.broker", "queueDelete"))
#create new session
ssn2 = self.setup_session()
snd = ssn2.sender("myq; {create:always, node:{x-declare:{auto-delete:True, exclusive:True}}}")
ssn2.close()
event = helper.event(rcv.fetch(timeout=5))
assert event.name, "org_apache_qpid_broker:queueDelete"
assert event.qName, "myq"
def test_queue_autodelete_shared(self):
helper = EventHelper()
rcv = self.ssn.receiver(helper.eventAddress("org.apache.qpid.broker", "queueDelete"))
rcv2 = self.ssn.receiver("myq; {create:always, node:{x-declare:{auto-delete:True}}}")
rcv2.close()
event = helper.event(rcv.fetch(timeout=5))
assert event.name, "org_apache_qpid_broker:queueDelete"
assert event.qName, "myq"
| {
"content_hash": "22ef6f51d25248ed8a84c89a25f0a87c",
"timestamp": "",
"source": "github",
"line_count": 64,
"max_line_length": 102,
"avg_line_length": 34.40625,
"alnum_prop": 0.6475930971843779,
"repo_name": "irinabov/debian-qpid-python",
"id": "7ab7b0a1ac80919668517598c4e4be6f01db8dfe",
"size": "2994",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "qpid_tests/broker_0_10/qmf_events.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "43"
},
{
"name": "Python",
"bytes": "999393"
}
],
"symlink_target": ""
} |
from typing import Any, cast, Dict, Optional
from pyre_extensions import none_throws
from backend.common.consts.award_type import AwardType
from backend.common.consts.notification_type import NotificationType
from backend.common.models.event import Event
from backend.common.models.notifications.notification import Notification
from backend.common.models.team import Team
class AwardsNotification(Notification):
def __init__(self, event: Event, team: Optional[Team] = None) -> None:
self.event = event
self.team = team
self.team_awards = event.team_awards().get(team.key, []) if team else []
@classmethod
def _type(cls) -> NotificationType:
from backend.common.consts.notification_type import NotificationType
return NotificationType.AWARDS
@property
def fcm_notification(self) -> Optional[Any]:
from firebase_admin import messaging
# Construct Team-specific payload
if self.team:
if len(self.team_awards) == 1:
award = self.team_awards[0]
# For WINNER/FINALIST, change our verbage
if award.award_type_enum in [AwardType.WINNER, AwardType.FINALIST]:
body = "is the"
else:
body = "won the"
body = "{} {}".format(body, award.name_str)
else:
body = "won {} awards".format(len(self.team_awards))
return messaging.Notification(
title="Team {} Awards".format(none_throws(self.team).team_number),
body="Team {} {} at the {} {}.".format(
none_throws(self.team).team_number,
body,
self.event.year,
self.event.normalized_name,
),
)
# Construct Event payload
return messaging.Notification(
title="{} Awards".format(self.event.event_short.upper()),
body="{} {} awards have been posted.".format(
self.event.year, self.event.normalized_name
),
)
@property
def data_payload(self) -> Optional[Dict[str, str]]:
payload = {"event_key": self.event.key_name}
if self.team:
payload["team_key"] = self.team.key_name
return payload
@property
def webhook_message_data(self) -> Optional[Dict[str, Any]]:
payload = cast(Dict[str, Any], none_throws(self.data_payload))
payload["event_name"] = self.event.name
from backend.common.helpers.award_helper import AwardHelper
from backend.common.queries.dict_converters.award_converter import (
AwardConverter,
)
if self.team:
payload["awards"] = [
AwardConverter.awardConverter_v3(award)
for award in AwardHelper.organize_awards(self.team_awards)
]
else:
payload["awards"] = [
AwardConverter.awardConverter_v3(award)
for award in AwardHelper.organize_awards(self.event.awards)
]
return payload
| {
"content_hash": "f9567def54062d7f5b6028470e824eef",
"timestamp": "",
"source": "github",
"line_count": 88,
"max_line_length": 83,
"avg_line_length": 35.68181818181818,
"alnum_prop": 0.5872611464968153,
"repo_name": "the-blue-alliance/the-blue-alliance",
"id": "09497b2d06b68ce68b8ec746f073a7a34f6baf2f",
"size": "3140",
"binary": false,
"copies": "1",
"ref": "refs/heads/py3",
"path": "src/backend/common/models/notifications/awards.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "359032"
},
{
"name": "Dockerfile",
"bytes": "2503"
},
{
"name": "HTML",
"bytes": "5877313"
},
{
"name": "JavaScript",
"bytes": "755910"
},
{
"name": "Less",
"bytes": "244218"
},
{
"name": "PHP",
"bytes": "10727"
},
{
"name": "Pug",
"bytes": "1857"
},
{
"name": "Python",
"bytes": "4321885"
},
{
"name": "Ruby",
"bytes": "4677"
},
{
"name": "Shell",
"bytes": "27698"
}
],
"symlink_target": ""
} |
'''
test alarm Image and custom sns text template with email
@author: Ronghao.zhou
'''
import time as time
import zstackwoodpecker.test_util as test_util
import zstackwoodpecker.test_lib as test_lib
import zstackwoodpecker.operations.zwatch_operations as zwt_ops
import zstackwoodpecker.test_state as test_state
import zstackwoodpecker.operations.resource_operations as res_ops
import zstackwoodpecker.zstack_test.zstack_test_image as zstack_image_header
import os
test_stub = test_lib.lib_get_test_stub()
test_dict = test_state.TestStateDict()
email_platform_uuid = None
email_endpoint_uuid = None
my_sns_topic_uuid = None
event_template_uuid = None
alarm_template_uuid = None
alarm_uuid_list = []
def test():
global my_sns_topic_uuid, email_endpoint_uuid, email_platform_uuid, event_template_uuid, \
alarm_template_uuid, alarm_uuid_list, test_dict
smtp_server = os.environ.get('smtpServer')
pop_server = os.environ.get('popServer')
smtp_port = os.environ.get('smtpPort')
username = os.environ.get('mailUsername')
password = os.environ.get('mailPassword')
email_platform_name = 'Alarm_email'
email_platform = zwt_ops.create_sns_email_platform(smtp_server, smtp_port,
email_platform_name, username, password)
email_platform_uuid = email_platform.uuid
try:
zwt_ops.validate_sns_email_platform(email_platform_uuid)
except:
test_util.test_fail(
'Validate SNS Email Platform Failed, Email Plarform: %s' % email_platform_uuid)
email_endpoint_uuid = zwt_ops.create_sns_email_endpoint(username, 'test_qa',
email_platform_uuid).uuid
my_sns_topic = zwt_ops.create_sns_topic('my_sns_topic')
my_sns_topic_uuid = my_sns_topic.uuid
zwt_ops.subscribe_sns_topic(my_sns_topic_uuid, email_endpoint_uuid)
# create alarm
namespace = 'ZStack/Image'
greater_than_or_equal_to = 'GreaterThanOrEqualTo'
greater_than = 'GreaterThan'
actions = [{"actionUuid": my_sns_topic_uuid, "actionType": "sns"}]
period = 10
threshold_1 = 1
threshold_3 = 3
threshold_10 = 10
threshold_50 = 50
total_image_count = 'TotalImageCount'
total_image_count_alarm_uuid = zwt_ops.create_alarm(greater_than_or_equal_to, period,
threshold_3, namespace,
total_image_count,
name='total-count-image',
repeat_interval=600,
actions=actions).uuid
alarm_uuid_list.append(total_image_count_alarm_uuid)
ready_image_count = 'ReadyImageCount'
ready_image_count_alarm_uuid = zwt_ops.create_alarm(greater_than_or_equal_to, period,
threshold_3, namespace,
ready_image_count,
name='ready_image_count',
repeat_interval=600,
actions=actions).uuid
alarm_uuid_list.append(ready_image_count_alarm_uuid)
ready_image_in_percent = 'ReadyImageInPercent'
ready_image_in_percent_alarm_uuid = zwt_ops.create_alarm(greater_than_or_equal_to, period,
threshold_1, namespace,
ready_image_in_percent,
name='ready_image_in_percent',
repeat_interval=600,
actions=actions).uuid
alarm_uuid_list.append(ready_image_in_percent_alarm_uuid)
root_volume_template_count = 'RootVolumeTemplateCount'
root_volume_template_count_alarm_uuid = zwt_ops.create_alarm(greater_than_or_equal_to,
period,
threshold_3, namespace,
root_volume_template_count,
name='root_volume_template_count',
repeat_interval=600,
actions=actions, ).uuid
alarm_uuid_list.append(root_volume_template_count_alarm_uuid)
root_volume_template_in_percent = 'RootVolumeTemplateInPercent'
root_volume_template_in_percent_alarm_uuid = zwt_ops.create_alarm(greater_than, period,
threshold_1, namespace,
root_volume_template_in_percent,
name='root_volume_template_in_percent',
repeat_interval=600,
actions=actions).uuid
alarm_uuid_list.append(root_volume_template_in_percent_alarm_uuid)
data_volume_template_count = 'DataVolumeTemplateCount'
data_volume_template_count_alarm_uuid = zwt_ops.create_alarm(greater_than_or_equal_to,
period,
threshold_3, namespace,
data_volume_template_count,
name='data_volume_template_count',
repeat_interval=600,
actions=actions).uuid
alarm_uuid_list.append(data_volume_template_count_alarm_uuid)
data_volume_template_in_percent = 'DataVolumeTemplateInPercent'
data_volume_template_in_percent_alarm_uuid = zwt_ops.create_alarm(greater_than, period,
threshold_1, namespace,
data_volume_template_in_percent,
name='data_volume_template_in_percent',
repeat_interval=600,
actions=actions).uuid
alarm_uuid_list.append(data_volume_template_in_percent_alarm_uuid)
iso_count = 'ISOCount'
iso_count_alarm_uuid = zwt_ops.create_alarm(greater_than_or_equal_to, period, threshold_3,
namespace, iso_count, name='iso_count',
repeat_interval=600, actions=actions).uuid
alarm_uuid_list.append(iso_count_alarm_uuid)
iso_in_percent = 'ISOInPercent'
iso_in_percent_alarm_uuid = zwt_ops.create_alarm(greater_than, period, threshold_1,
namespace,
iso_in_percent, name='iso_in_percent',
repeat_interval=600, actions=actions).uuid
alarm_uuid_list.append(iso_in_percent_alarm_uuid)
# create Image
image_name = os.environ.get('imageName_s')
l3_name = os.environ.get('l3VlanNetworkName1')
vm_name='multihost_basic_vm'
vm = test_stub.create_vm(vm_name,image_name,l3_name)
test_dict.add_vm(vm)
volume = test_stub.create_volume()
test_dict.add_volume(volume)
volume.attach(vm)
zone_uuid = vm.get_vm().zoneUuid
root_volume_uuid = test_lib.lib_get_root_volume_uuid(vm.get_vm())
bs_uuid_list = test_lib.lib_get_backup_storage_uuid_list_by_zone(zone_uuid)
image_option = test_util.ImageOption()
image_option.set_root_volume_uuid(root_volume_uuid)
image_option.set_format('qcow2')
image_option.set_backup_storage_uuid_list(bs_uuid_list)
# image_option.set_mediaType('ISO')
for i in range(threshold_3):
image_option.set_name('root_volume_template_for_test_' + str(i))
root_volume_template = zstack_image_header.ZstackTestImage()
root_volume_template.set_creation_option(image_option)
root_volume_template.create()
test_dict.add_image(root_volume_template)
iso=test_stub.add_test_minimal_iso("iso_for_test_"+str(i))
test_dict.add_image(iso)
time.sleep(30)
# before change template
flag = zwt_ops.check_sns_email(pop_server, username, password, total_image_count,
total_image_count_alarm_uuid)
if flag != 1:
test_util.test_fail('No send event email')
flag = zwt_ops.check_sns_email(pop_server, username, password, ready_image_count,
ready_image_count_alarm_uuid)
if flag != 1:
test_util.test_fail('No send event email')
flag = zwt_ops.check_sns_email(pop_server, username, password, ready_image_in_percent,
ready_image_in_percent_alarm_uuid)
if flag != 1:
test_util.test_fail('No send event email')
flag = zwt_ops.check_sns_email(pop_server, username, password, root_volume_template_count,
root_volume_template_count_alarm_uuid)
if flag != 1:
test_util.test_fail('No send event email')
flag = zwt_ops.check_sns_email(pop_server, username, password,
root_volume_template_in_percent,
root_volume_template_in_percent_alarm_uuid)
if flag != 1:
test_util.test_fail('No send event email')
flag = zwt_ops.check_sns_email(pop_server, username, password, iso_count,
iso_count_alarm_uuid)
if flag != 1:
test_util.test_fail('No send event email')
flag = zwt_ops.check_sns_email(pop_server, username, password, iso_in_percent,
iso_in_percent_alarm_uuid)
if flag != 1:
test_util.test_fail('No send event email')
alarm_keywords = 'TemplateForAlarmOn'
if zwt_ops.check_keywords_in_email(pop_server, username, password, alarm_keywords,
total_image_count_alarm_uuid):
test_util.test_fail('email already exsist before test')
if zwt_ops.check_keywords_in_email(pop_server, username, password, alarm_keywords,
ready_image_count_alarm_uuid):
test_util.test_fail('email already exsist before test')
if zwt_ops.check_keywords_in_email(pop_server, username, password, alarm_keywords,
ready_image_in_percent_alarm_uuid):
test_util.test_fail('email already exsist before test')
if zwt_ops.check_keywords_in_email(pop_server, username, password, alarm_keywords,
root_volume_template_count_alarm_uuid):
test_util.test_fail('email already exsist before test')
if zwt_ops.check_keywords_in_email(pop_server, username, password, alarm_keywords,
root_volume_template_in_percent_alarm_uuid):
test_util.test_fail('email already exsist before test')
if zwt_ops.check_keywords_in_email(pop_server, username, password, alarm_keywords,
data_volume_template_count_alarm_uuid):
test_util.test_fail('email already exsist before test')
if zwt_ops.check_keywords_in_email(pop_server, username, password, alarm_keywords,
data_volume_template_in_percent_alarm_uuid):
test_util.test_fail('email already exsist before test')
if zwt_ops.check_keywords_in_email(pop_server, username, password, alarm_keywords,
iso_count_alarm_uuid):
test_util.test_fail('email already exsist before test')
if zwt_ops.check_keywords_in_email(pop_server, username, password, alarm_keywords,
iso_in_percent_alarm_uuid):
test_util.test_fail('email already exsist before test')
application_platform_type = 'Email'
alarm_template_name = 'my-alarm-template'
alarm_template = '${ALARM_NAME} Change status to ${ALARM_CURRENT_STATUS}' \
'ALARM_UUID:${ALARM_UUID}' \
'keyword1:ThisWordIsKeyWord' \
'keyword2:TemplateForAlarmOn' \
'(Using for template changes email check)'
alarm_template_uuid = zwt_ops.create_sns_text_template(alarm_template_name,
application_platform_type,
alarm_template,
default_template=False).uuid
event_template_name = 'my-event-template'
event_keywords = 'TemplateForEventHappened'
event_template = '${EVENT_NAME} IN ${EVENT_NAMESPACE}' \
'keyword1:ThisWordIsKeyWord' \
'keyword2:TemplateForEventHappened' \
'keyword3{PARAM_EVENT_SUBSCRIPTION_UUID}' \
'(Using for template changes email check)'
event_template_uuid = zwt_ops.create_sns_text_template(event_template_name,
application_platform_type,
event_template,
default_template=True).uuid
# test update text template
zwt_ops.update_sns_text_template(alarm_template_uuid, description='this is a new description',
default_template=True)
cond = res_ops.gen_query_conditions('uuid', '=', alarm_template_uuid)
inv = res_ops.query_resource(res_ops.SNS_TEXT_TEMPLATE, cond)[0]
if inv.defaultTemplate == False or inv.description != 'this is a new description':
test_util.test_fail('change template fail')
for i in range(threshold_3):
data_volume_template = volume.create_template(bs_uuid_list,
name="vol_temp_for_volume_test_" + str(i))
test_dict.add_image(data_volume_template)
# wait for reboot and send email
time.sleep(30)
test_lib.lib_robot_cleanup(test_dict)
zwt_ops.delete_sns_text_template(alarm_template_uuid)
zwt_ops.delete_sns_text_template(event_template_uuid)
for alarm_uuid in alarm_uuid_list:
zwt_ops.delete_alarm(alarm_uuid)
zwt_ops.delete_sns_topic(my_sns_topic_uuid)
zwt_ops.delete_sns_application_endpoint(email_endpoint_uuid)
zwt_ops.delete_sns_application_platform(email_platform_uuid)
if zwt_ops.check_keywords_in_email(pop_server, username, password, alarm_keywords,
data_volume_template_count_alarm_uuid) and zwt_ops.check_keywords_in_email(
pop_server, username, password, alarm_keywords,
data_volume_template_in_percent_alarm_uuid):
test_util.test_pass('success check all keywords in the email')
else:
test_util.test_fail('cannt check all mail')
# Will be called only if exception happens in test().
def error_cleanup():
global test_dict, my_sns_topic_uuid, email_endpoint_uuid, email_platform_uuid, event_template_uuid, \
alarm_template_uuid, alarm_uuid_list
test_lib.lib_error_cleanup(test_dict)
if alarm_uuid_list:
for alarm_uuid in alarm_uuid_list:
zwt_ops.delete_alarm(alarm_uuid)
if event_template_uuid:
zwt_ops.delete_sns_text_template(event_template_uuid)
if alarm_template_uuid:
zwt_ops.delete_sns_text_template(alarm_template_uuid)
if my_sns_topic_uuid:
zwt_ops.delete_sns_topic(my_sns_topic_uuid)
if email_endpoint_uuid:
zwt_ops.delete_sns_application_endpoint(email_endpoint_uuid)
if email_platform_uuid:
zwt_ops.delete_sns_application_platform(email_platform_uuid)
if event_template_uuid:
zwt_ops.delete_sns_text_template(event_template_uuid)
| {
"content_hash": "d6611edbba72337519ba69986eec4378",
"timestamp": "",
"source": "github",
"line_count": 310,
"max_line_length": 116,
"avg_line_length": 54.15483870967742,
"alnum_prop": 0.5450917321896592,
"repo_name": "zstackorg/zstack-woodpecker",
"id": "5d39155b84cd5be893522e2c7dcfb1c149fab983",
"size": "16788",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "integrationtest/vm/multihosts/zwatch/test_sns_text_template_with_email.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Go",
"bytes": "46522"
},
{
"name": "Makefile",
"bytes": "692"
},
{
"name": "Puppet",
"bytes": "875"
},
{
"name": "Python",
"bytes": "2891030"
},
{
"name": "Shell",
"bytes": "54266"
}
],
"symlink_target": ""
} |
from datatypes import Record
from datatypes import Zone
from request import Request
from helpers import parse
class Database:
__logger = None
container = None
database_type = None
def __init__(
self,
container,
database_type,
logger=None
):
self.__logger = logger
self.container = container
self.database_type = database_type
# Accessing Records
def save_records(
self,
records,
auto_fetch=False,
force=False,
options=None
):
"""Save records to the database."""
# https://developer.apple.com/library/ios/documentation/DataManagement/Conceptual/CloutKitWebServicesReference/ModifyRecords/ModifyRecords.html#//apple_ref/doc/uid/TP40015240-CH2-SW9
operations = self.__create_modify_operations(
records,
auto_fetch,
force
)
payload = {
'operations': operations,
}
if options is not None:
payload.update(options)
result = Request.perform_request(
'POST',
self.container,
self.database_type,
'records/modify',
payload,
logger=self.__logger
)
if result.is_success is True:
objects = []
objects_json = parse(result.value, 'records')
for object_json in objects_json:
objects.append(Record(object_json))
result.value = objects
return result
def __create_modify_operations(self, records, auto_fetch, force):
operations = []
for record in records:
operation_type = None
if force is True:
operation_type = 'forceUpdate'
elif record.record_change_tag is not None:
operation_type = 'update'
elif record.record_type is not None:
operation_type = 'create'
elif auto_fetch is True:
# Fetch, and if record is returned,
# append the change tag and update
fetch_record_dict = {
'recordName': record.record_name,
'desiredKeys': None
}
fetched_record = self.fetch_records([fetch_record_dict])
if fetched_record is not None:
operation_type = 'update'
record.record_change_tag = fetched_record.record_change_tag
else:
print """Record doesn't already exist and is
missing a record type!"""
if operation_type is not None:
operation = {
'operationType': operation_type,
'record': record.json()
}
operations.append(operation)
return operations
def fetch_records(
self,
records=None,
record_type=None,
record_names=None,
references=None,
options=None
):
"""Fetch one or more records."""
# https://developer.apple.com/library/ios/documentation/DataManagement/Conceptual/CloutKitWebServicesReference/LookupRecords/LookupRecords.html#//apple_ref/doc/uid/TP40015240-CH6-SW2
json_records = self.__create_fetch_json_records(
records,
record_type,
record_names,
references
)
payload = {
'records': json_records,
}
if options is not None:
payload.update(options)
result = Request.perform_request(
'POST',
self.container,
self.database_type,
'records/lookup',
payload,
logger=self.__logger
)
if result.is_success is True:
objects = []
objects_json = parse(result.value, 'records')
for object_json in objects_json:
objects.append(Record(object_json))
result.value = objects
return result
def __create_fetch_json_records(
self,
records=None,
record_type=None,
record_names=None,
references=None
):
json_records = []
# Create JSON for records
if records is not None:
for record in records:
json_records.append(record.json())
# Create JSON for record names
if record_names is not None:
for record_name in record_names:
record = Record()
record.record_type = record_type
record.record_name = record_name
json_records.append(record.json())
# Create JSON for record names
if references is not None:
for reference in references:
record = Record()
record_name = reference.record_name
if record_name is not None and len(record_name) > 0:
record.record_name = record_name
json_records.append(record.json())
return json_records
def delete_records(self, records, force=False, options=None):
"""Delete one or more records."""
# https://developer.apple.com/library/ios/documentation/DataManagement/Conceptual/CloutKitWebServicesReference/ModifyRecords/ModifyRecords.html#//apple_ref/doc/uid/TP40015240-CH2-SW9
operation_type = 'delete'
if force is True:
operation_type = 'forceDelete'
operations = []
for record in records:
operation = {
'operationType': operation_type,
'record': record.json()
}
operations.append(operation)
payload = {
'operations': operations,
}
if options is not None:
payload.update(options)
result = Request.perform_request(
'POST',
self.container,
self.database_type,
'records/modify',
payload,
logger=self.__logger
)
if result.is_success is True:
objects = []
objects_json = parse(result.value, 'records')
for object_json in objects_json:
objects.append(Record(object_json))
result.value = objects
return result
def perform_query(self, query, continuation_marker=None, options=None):
"""Fetch records using a query."""
# https://developer.apple.com/library/ios/documentation/DataManagement/Conceptual/CloutKitWebServicesReference/QueryingRecords/QueryingRecords.html#//apple_ref/doc/uid/TP40015240-CH5-SW4
payload = {
'query': query.json(),
}
if continuation_marker is not None:
if options is None:
options = {}
options['continuationMarker'] = continuation_marker
if options is not None:
payload.update(options)
result = Request.perform_request(
'POST',
self.container,
self.database_type,
'records/query',
payload,
logger=self.__logger
)
if result.is_success is True:
objects = []
objects_json = parse(result.value, 'records')
for object_json in objects_json:
objects.append(Record(object_json))
continuation_marker = parse(result.value, 'continuationMarker')
result.value = (objects, continuation_marker)
return result
# Syncing Records
def fetch_changed_records(self, zone_id, options=None):
"""Fetch changed records in a given custom zone."""
# https://developer.apple.com/library/ios/documentation/DataManagement/Conceptual/CloutKitWebServicesReference/ChangeRecords/ChangeRecords.html#//apple_ref/doc/uid/TP40015240-CH7-SW1
if self.database_type == 'private':
return None
payload = {
'zoneID': zone_id,
}
if options is not None:
payload.update(options)
result = Request.perform_request(
'POST',
self.container,
'private',
'records/changes',
payload,
logger=self.__logger
)
if result.is_success is True:
objects = []
objects_json = parse(result.value, 'records')
for object_json in objects_json:
objects.append(Record(object_json))
more_coming = parse(result.value, 'moreComing')
reverse = parse(result.value, 'reverse')
sync_token = parse(result.value, 'syncToken')
result.value = (objects, more_coming, reverse, sync_token)
return result
# Accessing Record Zones
def save_record_zones(self, zone_ids):
"""Create one or more zones in the database."""
# https://developer.apple.com/library/ios/documentation/DataManagement/Conceptual/CloutKitWebServicesReference/ModifyZones/ModifyZones.html#//apple_ref/doc/uid/TP40015240-CH10-SW1
return self.__modify_record_zones(zone_ids, 'create')
def fetch_record_zones(self, zones):
"""Fetch one or more zones."""
# https://developer.apple.com/library/ios/documentation/DataManagement/Conceptual/CloutKitWebServicesReference/GettingZonesbyIdentifier/GettingZonesbyIdentifier.html#//apple_ref/doc/uid/TP40015240-CH22-SW1
zones_json = []
for zone in zones:
zones_json.append(zone.json())
payload = {
'zones': zones_json,
}
result = Request.perform_request(
'POST',
self.container,
self.database_type,
'zones/lookup',
payload,
logger=self.__logger
)
if result.is_success is True:
objects = []
objects_json = parse(result.value, 'zones')
for object_json in objects_json:
objects.append(Zone(object_json))
result.value = objects
return result
def fetch_all_record_zones(self):
"""Fetch all zones in the database."""
# https://developer.apple.com/library/ios/documentation/DataManagement/Conceptual/CloutKitWebServicesReference/GettingAllZones/GettingAllZones.html#//apple_ref/doc/uid/TP40015240-CH21-SW3
result = Request.perform_request(
'GET',
self.container,
self.database_type,
'zones/list',
logger=self.__logger
)
if result.is_success is True:
zones = []
objects_json = parse(result.value, 'zones')
for object_json in objects_json:
zones.append(Zone(object_json))
if len(zones) > 0:
result.value = self.fetch_record_zones(zones)
else:
result.value = []
return result
def delete_record_zones(self, zone_ids):
"""Delete the specified zones."""
# https://developer.apple.com/library/ios/documentation/DataManagement/Conceptual/CloutKitWebServicesReference/ModifyZones/ModifyZones.html#//apple_ref/doc/uid/TP40015240-CH10-SW1
return self.__modify_record_zones(zone_ids, 'delete')
def __modify_record_zones(self, zone_ids, operation_type):
# https://developer.apple.com/library/ios/documentation/DataManagement/Conceptual/CloutKitWebServicesReference/ModifyZones/ModifyZones.html#//apple_ref/doc/uid/TP40015240-CH10-SW1
operations = []
for zone_id in zone_ids:
operation = {
'operationType': operation_type,
'zone': zone_id.json()
}
operations.append(operation)
payload = {
'operations': operations,
}
result = Request.perform_request(
'POST',
self.container,
self.database_type,
'zones/modify',
payload,
logger=self.__logger
)
if result.is_success is True:
objects = []
objects_json = parse(result.value, 'zones')
for object_json in objects_json:
objects.append(Zone(object_json))
result.value = objects
return result
| {
"content_hash": "61709dd6bab57d97d13cc55124eb8610",
"timestamp": "",
"source": "github",
"line_count": 385,
"max_line_length": 213,
"avg_line_length": 32.212987012987014,
"alnum_prop": 0.5617642315755523,
"repo_name": "Baza207/CloudKitPy",
"id": "482867baab02aac256fb34f0a704ba7d16b1e967",
"size": "12564",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cloudkitpy/database.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "78993"
}
],
"symlink_target": ""
} |
from google.cloud import notebooks_v1
async def sample_register_instance():
# Create a client
client = notebooks_v1.NotebookServiceAsyncClient()
# Initialize request argument(s)
request = notebooks_v1.RegisterInstanceRequest(
parent="parent_value",
instance_id="instance_id_value",
)
# Make the request
operation = client.register_instance(request=request)
print("Waiting for operation to complete...")
response = await operation.result()
# Handle the response
print(response)
# [END notebooks_v1_generated_NotebookService_RegisterInstance_async]
| {
"content_hash": "627736d465fed342dbcea96416f2c4b5",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 69,
"avg_line_length": 25.625,
"alnum_prop": 0.7105691056910569,
"repo_name": "googleapis/python-notebooks",
"id": "c638b8d882ad28914c00b7df2ef73e460b5cd97c",
"size": "2013",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "samples/generated_samples/notebooks_v1_generated_notebook_service_register_instance_async.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2050"
},
{
"name": "Python",
"bytes": "1752787"
},
{
"name": "Shell",
"bytes": "30669"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import, unicode_literals
from django.http import HttpResponseBadRequest
from django.shortcuts import render_to_response
from django.views.decorators.csrf import csrf_exempt
from debug_toolbar.panels.sql.forms import SQLSelectForm
@csrf_exempt
def sql_select(request):
"""Returns the output of the SQL SELECT statement"""
form = SQLSelectForm(request.POST or None)
if form.is_valid():
sql = form.cleaned_data["raw_sql"]
params = form.cleaned_data["params"]
cursor = form.cursor
cursor.execute(sql, params)
headers = [d[0] for d in cursor.description]
result = cursor.fetchall()
cursor.close()
context = {
"result": result,
"sql": form.reformat_sql(),
"duration": form.cleaned_data["duration"],
"headers": headers,
"alias": form.cleaned_data["alias"],
}
# Using render_to_response avoids running global context processors.
return render_to_response("debug_toolbar/panels/sql_select.html", context)
return HttpResponseBadRequest("Form errors")
@csrf_exempt
def sql_explain(request):
"""Returns the output of the SQL EXPLAIN on the given query"""
form = SQLSelectForm(request.POST or None)
if form.is_valid():
sql = form.cleaned_data["raw_sql"]
params = form.cleaned_data["params"]
vendor = form.connection.vendor
cursor = form.cursor
if vendor == "postgresql":
cursor.execute("EXPLAIN ANALYZE %s" % (sql,), params)
else:
cursor.execute("EXPLAIN %s" % (sql,), params)
headers = [d[0] for d in cursor.description]
result = cursor.fetchall()
cursor.close()
context = {
"result": result,
"sql": form.reformat_sql(),
"duration": form.cleaned_data["duration"],
"headers": headers,
"alias": form.cleaned_data["alias"],
}
# Using render_to_response avoids running global context processors.
return render_to_response("debug_toolbar/panels/sql_explain.html", context)
return HttpResponseBadRequest("Form errors")
@csrf_exempt
def sql_profile(request):
"""Returns the output of running the SQL and getting the profiling statistics"""
form = SQLSelectForm(request.POST or None)
if form.is_valid():
sql = form.cleaned_data["raw_sql"]
params = form.cleaned_data["params"]
cursor = form.cursor
result = None
headers = None
result_error = None
try:
cursor.execute("SET PROFILING=1") # Enable profiling
cursor.execute(sql, params) # Execute SELECT
cursor.execute("SET PROFILING=0") # Disable profiling
# The Query ID should always be 1 here but I'll subselect to get
# the last one just in case...
cursor.execute(
"""
SELECT *
FROM information_schema.profiling
WHERE query_id = (
SELECT query_id
FROM information_schema.profiling
ORDER BY query_id DESC
LIMIT 1
)
"""
)
headers = [d[0] for d in cursor.description]
result = cursor.fetchall()
except Exception:
result_error = "Profiling is either not available or not supported by your database."
cursor.close()
context = {
"result": result,
"result_error": result_error,
"sql": form.reformat_sql(),
"duration": form.cleaned_data["duration"],
"headers": headers,
"alias": form.cleaned_data["alias"],
}
# Using render_to_response avoids running global context processors.
return render_to_response("debug_toolbar/panels/sql_profile.html", context)
return HttpResponseBadRequest("Form errors")
| {
"content_hash": "b40e0a8dde26c8ec2d159c9acaea81d4",
"timestamp": "",
"source": "github",
"line_count": 111,
"max_line_length": 97,
"avg_line_length": 35.4054054054054,
"alnum_prop": 0.6040712468193384,
"repo_name": "mvaled/sentry",
"id": "f91f0c7b8e689d66ff3d2eab9b6497ca2b127145",
"size": "3930",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/debug_toolbar/panels/sql/views.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "226439"
},
{
"name": "Dockerfile",
"bytes": "6431"
},
{
"name": "HTML",
"bytes": "173429"
},
{
"name": "JavaScript",
"bytes": "9314175"
},
{
"name": "Lua",
"bytes": "65885"
},
{
"name": "Makefile",
"bytes": "9225"
},
{
"name": "Python",
"bytes": "50385401"
},
{
"name": "Ruby",
"bytes": "168"
},
{
"name": "Shell",
"bytes": "5685"
},
{
"name": "TypeScript",
"bytes": "773664"
}
],
"symlink_target": ""
} |
"""Support for the Swedish weather institute weather service."""
import asyncio
from datetime import timedelta
import logging
from typing import Dict, List
import aiohttp
import async_timeout
from homeassistant.components.weather import (
ATTR_FORECAST_CONDITION, ATTR_FORECAST_PRECIPITATION, ATTR_FORECAST_TEMP,
ATTR_FORECAST_TEMP_LOW, ATTR_FORECAST_TIME, WeatherEntity)
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import (
CONF_LATITUDE, CONF_LONGITUDE, CONF_NAME, TEMP_CELSIUS)
from homeassistant.core import HomeAssistant
from homeassistant.helpers import aiohttp_client
from homeassistant.util import Throttle, slugify
from .const import ATTR_SMHI_CLOUDINESS, ENTITY_ID_SENSOR_FORMAT
DEPENDENCIES = ['smhi']
_LOGGER = logging.getLogger(__name__)
# Used to map condition from API results
CONDITION_CLASSES = {
'cloudy': [5, 6],
'fog': [7],
'hail': [],
'lightning': [21],
'lightning-rainy': [11],
'partlycloudy': [3, 4],
'pouring': [10, 20],
'rainy': [8, 9, 18, 19],
'snowy': [15, 16, 17, 25, 26, 27],
'snowy-rainy': [12, 13, 14, 22, 23, 24],
'sunny': [1, 2],
'windy': [],
'windy-variant': [],
'exceptional': [],
}
# 5 minutes between retrying connect to API again
RETRY_TIMEOUT = 5*60
MIN_TIME_BETWEEN_UPDATES = timedelta(minutes=31)
async def async_setup_platform(
hass, config, async_add_entities, discovery_info=None):
"""Old way of setting up components.
Can only be called when a user accidentally mentions smhi in the
config. In that case it will be ignored.
"""
pass
async def async_setup_entry(
hass: HomeAssistant, config_entry: ConfigEntry,
config_entries) -> bool:
"""Add a weather entity from map location."""
location = config_entry.data
name = slugify(location[CONF_NAME])
session = aiohttp_client.async_get_clientsession(hass)
entity = SmhiWeather(
location[CONF_NAME], location[CONF_LATITUDE], location[CONF_LONGITUDE],
session=session)
entity.entity_id = ENTITY_ID_SENSOR_FORMAT.format(name)
config_entries([entity], True)
return True
class SmhiWeather(WeatherEntity):
"""Representation of a weather entity."""
def __init__(self, name: str, latitude: str,
longitude: str,
session: aiohttp.ClientSession = None) -> None:
"""Initialize the SMHI weather entity."""
from smhi import Smhi
self._name = name
self._latitude = latitude
self._longitude = longitude
self._forecasts = None
self._fail_count = 0
self._smhi_api = Smhi(self._longitude, self._latitude, session=session)
@property
def unique_id(self) -> str:
"""Return a unique id."""
return '{}, {}'.format(self._latitude, self._longitude)
@Throttle(MIN_TIME_BETWEEN_UPDATES)
async def async_update(self) -> None:
"""Refresh the forecast data from SMHI weather API."""
from smhi.smhi_lib import SmhiForecastException
def fail():
"""Postpone updates."""
self._fail_count += 1
if self._fail_count < 3:
self.hass.helpers.event.async_call_later(
RETRY_TIMEOUT, self.retry_update())
try:
with async_timeout.timeout(10, loop=self.hass.loop):
self._forecasts = await self.get_weather_forecast()
self._fail_count = 0
except (asyncio.TimeoutError, SmhiForecastException):
_LOGGER.error(
"Failed to connect to SMHI API, retry in 5 minutes")
fail()
async def retry_update(self):
"""Retry refresh weather forecast."""
self.async_update()
async def get_weather_forecast(self) -> []:
"""Return the current forecasts from SMHI API."""
return await self._smhi_api.async_get_forecast()
@property
def name(self) -> str:
"""Return the name of the sensor."""
return self._name
@property
def temperature(self) -> int:
"""Return the temperature."""
if self._forecasts is not None:
return self._forecasts[0].temperature
return None
@property
def temperature_unit(self) -> str:
"""Return the unit of measurement."""
return TEMP_CELSIUS
@property
def humidity(self) -> int:
"""Return the humidity."""
if self._forecasts is not None:
return self._forecasts[0].humidity
return None
@property
def wind_speed(self) -> float:
"""Return the wind speed."""
if self._forecasts is not None:
# Convert from m/s to km/h
return round(self._forecasts[0].wind_speed * 18 / 5)
return None
@property
def wind_bearing(self) -> int:
"""Return the wind bearing."""
if self._forecasts is not None:
return self._forecasts[0].wind_direction
return None
@property
def visibility(self) -> float:
"""Return the visibility."""
if self._forecasts is not None:
return self._forecasts[0].horizontal_visibility
return None
@property
def pressure(self) -> int:
"""Return the pressure."""
if self._forecasts is not None:
return self._forecasts[0].pressure
return None
@property
def cloudiness(self) -> int:
"""Return the cloudiness."""
if self._forecasts is not None:
return self._forecasts[0].cloudiness
return None
@property
def condition(self) -> str:
"""Return the weather condition."""
if self._forecasts is None:
return None
return next((
k for k, v in CONDITION_CLASSES.items()
if self._forecasts[0].symbol in v), None)
@property
def attribution(self) -> str:
"""Return the attribution."""
return 'Swedish weather institute (SMHI)'
@property
def forecast(self) -> List:
"""Return the forecast."""
if self._forecasts is None or len(self._forecasts) < 2:
return None
data = []
for forecast in self._forecasts[1:]:
condition = next((
k for k, v in CONDITION_CLASSES.items()
if forecast.symbol in v), None)
data.append({
ATTR_FORECAST_TIME: forecast.valid_time.isoformat(),
ATTR_FORECAST_TEMP: forecast.temperature_max,
ATTR_FORECAST_TEMP_LOW: forecast.temperature_min,
ATTR_FORECAST_PRECIPITATION:
round(forecast.total_precipitation, 1),
ATTR_FORECAST_CONDITION: condition,
})
return data
@property
def device_state_attributes(self) -> Dict:
"""Return SMHI specific attributes."""
if self.cloudiness:
return {ATTR_SMHI_CLOUDINESS: self.cloudiness}
| {
"content_hash": "fd08b7377f581bcd21d71f92d6435b1d",
"timestamp": "",
"source": "github",
"line_count": 231,
"max_line_length": 79,
"avg_line_length": 30.32034632034632,
"alnum_prop": 0.6045117075956596,
"repo_name": "jamespcole/home-assistant",
"id": "fc3399f755ccc5bdbc32ca45f85fc6c2873f9f92",
"size": "7004",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "homeassistant/components/smhi/weather.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1175"
},
{
"name": "Dockerfile",
"bytes": "1081"
},
{
"name": "HCL",
"bytes": "826"
},
{
"name": "Python",
"bytes": "14822074"
},
{
"name": "Ruby",
"bytes": "745"
},
{
"name": "Shell",
"bytes": "17609"
}
],
"symlink_target": ""
} |
from msrest.serialization import Model
class EffectiveNetworkSecurityGroup(Model):
"""Effective network security group.
:param network_security_group: The ID of network security group that is
applied.
:type network_security_group: :class:`SubResource
<azure.mgmt.network.v2017_06_01.models.SubResource>`
:param association:
:type association: :class:`EffectiveNetworkSecurityGroupAssociation
<azure.mgmt.network.v2017_06_01.models.EffectiveNetworkSecurityGroupAssociation>`
:param effective_security_rules: A collection of effective security rules.
:type effective_security_rules: list of
:class:`EffectiveNetworkSecurityRule
<azure.mgmt.network.v2017_06_01.models.EffectiveNetworkSecurityRule>`
"""
_attribute_map = {
'network_security_group': {'key': 'networkSecurityGroup', 'type': 'SubResource'},
'association': {'key': 'association', 'type': 'EffectiveNetworkSecurityGroupAssociation'},
'effective_security_rules': {'key': 'effectiveSecurityRules', 'type': '[EffectiveNetworkSecurityRule]'},
}
def __init__(self, network_security_group=None, association=None, effective_security_rules=None):
self.network_security_group = network_security_group
self.association = association
self.effective_security_rules = effective_security_rules
| {
"content_hash": "7cb76877fb1994c7b96c577350d92938",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 112,
"avg_line_length": 46.89655172413793,
"alnum_prop": 0.7308823529411764,
"repo_name": "SUSE/azure-sdk-for-python",
"id": "09ab5b1a9ffe9d92d73b0f619163b84ee3ff6da5",
"size": "1834",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "azure-mgmt-network/azure/mgmt/network/v2017_06_01/models/effective_network_security_group.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "9090161"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
import re
from xml.sax.saxutils import unescape
import html5lib
from html5lib.constants import namespaces
from html5lib.filters import sanitizer
from html5lib.serializer import HTMLSerializer
from bleach.encoding import force_unicode
from bleach.utils import alphabetize_attributes
#: List of allowed tags
ALLOWED_TAGS = [
'a',
'abbr',
'acronym',
'b',
'blockquote',
'code',
'em',
'i',
'li',
'ol',
'strong',
'ul',
]
#: Map of allowed attributes by tag
ALLOWED_ATTRIBUTES = {
'a': ['href', 'title'],
'abbr': ['title'],
'acronym': ['title'],
}
#: List of allowed styles
ALLOWED_STYLES = []
#: List of allowed protocols
ALLOWED_PROTOCOLS = ['http', 'https', 'mailto']
class Cleaner(object):
"""Cleaner for cleaning HTML fragments of malicious content
This cleaner is a security-focused function whose sole purpose is to remove
malicious content from a string such that it can be displayed as content in
a web page.
This cleaner is not designed to use to transform content to be used in
non-web-page contexts.
To use::
from bleach.sanitizer import Cleaner
cleaner = Cleaner()
for text in all_the_yucky_things:
sanitized = cleaner.clean(text)
"""
def __init__(self, tags=ALLOWED_TAGS, attributes=ALLOWED_ATTRIBUTES,
styles=ALLOWED_STYLES, protocols=ALLOWED_PROTOCOLS, strip=False,
strip_comments=True, filters=None):
"""Initializes a Cleaner
:arg list tags: allowed list of tags; defaults to
``bleach.sanitizer.ALLOWED_TAGS``
:arg dict attributes: allowed attributes; can be a callable, list or dict;
defaults to ``bleach.sanitizer.ALLOWED_ATTRIBUTES``
:arg list styles: allowed list of css styles; defaults to
``bleach.sanitizer.ALLOWED_STYLES``
:arg list protocols: allowed list of protocols for links; defaults
to ``bleach.sanitizer.ALLOWED_PROTOCOLS``
:arg bool strip: whether or not to strip disallowed elements
:arg bool strip_comments: whether or not to strip HTML comments
:arg list filters: list of html5lib Filter classes to pass streamed content through
.. seealso:: http://html5lib.readthedocs.io/en/latest/movingparts.html#filters
.. Warning::
Using filters changes the output of ``bleach.Cleaner.clean``.
Make sure the way the filters change the output are secure.
"""
self.tags = tags
self.attributes = attributes
self.styles = styles
self.protocols = protocols
self.strip = strip
self.strip_comments = strip_comments
self.filters = filters or []
self.parser = html5lib.HTMLParser(namespaceHTMLElements=False)
self.walker = html5lib.getTreeWalker('etree')
self.serializer = HTMLSerializer(
quote_attr_values='always',
omit_optional_tags=False,
# Bleach has its own sanitizer, so don't use the html5lib one
sanitize=False,
# Bleach sanitizer alphabetizes already, so don't use the html5lib one
alphabetical_attributes=False,
)
def clean(self, text):
"""Cleans text and returns sanitized result as unicode
:arg str text: text to be cleaned
:returns: sanitized text as unicode
"""
if not text:
return u''
text = force_unicode(text)
dom = self.parser.parseFragment(text)
filtered = BleachSanitizerFilter(
source=self.walker(dom),
# Bleach-sanitizer-specific things
attributes=self.attributes,
strip_disallowed_elements=self.strip,
strip_html_comments=self.strip_comments,
# html5lib-sanitizer things
allowed_elements=self.tags,
allowed_css_properties=self.styles,
allowed_protocols=self.protocols,
allowed_svg_properties=[],
)
# Apply any filters after the BleachSanitizerFilter
for filter_class in self.filters:
filtered = filter_class(source=filtered)
return self.serializer.render(filtered)
def attribute_filter_factory(attributes):
"""Generates attribute filter function for the given attributes value
The attributes value can take one of several shapes. This returns a filter
function appropriate to the attributes value. One nice thing about this is
that there's less if/then shenanigans in the ``allow_token`` method.
"""
if callable(attributes):
return attributes
if isinstance(attributes, dict):
def _attr_filter(tag, attr, value):
if tag in attributes:
attr_val = attributes[tag]
if callable(attr_val):
return attr_val(tag, attr, value)
if attr in attr_val:
return True
if '*' in attributes:
attr_val = attributes['*']
if callable(attr_val):
return attr_val(tag, attr, value)
return attr in attr_val
return False
return _attr_filter
if isinstance(attributes, list):
def _attr_filter(tag, attr, value):
return attr in attributes
return _attr_filter
raise ValueError('attributes needs to be a callable, a list or a dict')
class BleachSanitizerFilter(sanitizer.Filter):
"""html5lib Filter that sanitizes text
This filter can be used anywhere html5lib filters can be used.
"""
def __init__(self, source, attributes=ALLOWED_ATTRIBUTES,
strip_disallowed_elements=False, strip_html_comments=True,
**kwargs):
"""Creates a BleachSanitizerFilter instance
:arg Treewalker source: stream
:arg list tags: allowed list of tags; defaults to
``bleach.sanitizer.ALLOWED_TAGS``
:arg dict attributes: allowed attributes; can be a callable, list or dict;
defaults to ``bleach.sanitizer.ALLOWED_ATTRIBUTES``
:arg list styles: allowed list of css styles; defaults to
``bleach.sanitizer.ALLOWED_STYLES``
:arg list protocols: allowed list of protocols for links; defaults
to ``bleach.sanitizer.ALLOWED_PROTOCOLS``
:arg bool strip_disallowed_elements: whether or not to strip disallowed
elements
:arg bool strip_html_comments: whether or not to strip HTML comments
"""
self.attr_filter = attribute_filter_factory(attributes)
self.strip_disallowed_elements = strip_disallowed_elements
self.strip_html_comments = strip_html_comments
return super(BleachSanitizerFilter, self).__init__(source, **kwargs)
def sanitize_token(self, token):
"""Sanitize a token either by HTML-encoding or dropping.
Unlike sanitizer.Filter, allowed_attributes can be a dict of {'tag':
['attribute', 'pairs'], 'tag': callable}.
Here callable is a function with two arguments of attribute name and
value. It should return true of false.
Also gives the option to strip tags instead of encoding.
"""
token_type = token['type']
if token_type in ['StartTag', 'EndTag', 'EmptyTag']:
if token['name'] in self.allowed_elements:
return self.allow_token(token)
elif self.strip_disallowed_elements:
pass
else:
if 'data' in token:
# Alphabetize the attributes before calling .disallowed_token()
# so that the resulting string is stable
token['data'] = alphabetize_attributes(token['data'])
return self.disallowed_token(token)
elif token_type == 'Comment':
if not self.strip_html_comments:
return token
else:
return token
def allow_token(self, token):
"""Handles the case where we're allowing the tag"""
if 'data' in token:
# Loop through all the attributes and drop the ones that are not
# allowed, are unsafe or break other rules. Additionally, fix
# attribute values that need fixing.
#
# At the end of this loop, we have the final set of attributes
# we're keeping.
attrs = {}
for namespaced_name, val in token['data'].items():
namespace, name = namespaced_name
# Drop attributes that are not explicitly allowed
#
# NOTE(willkg): We pass in the attribute name--not a namespaced
# name.
if not self.attr_filter(token['name'], name, val):
continue
# Look at attributes that have uri values
if namespaced_name in self.attr_val_is_uri:
val_unescaped = re.sub(
"[`\000-\040\177-\240\s]+",
'',
unescape(val)).lower()
# Remove replacement characters from unescaped characters.
val_unescaped = val_unescaped.replace("\ufffd", "")
# Drop attributes with uri values that have protocols that
# aren't allowed
if (re.match(r'^[a-z0-9][-+.a-z0-9]*:', val_unescaped) and
(val_unescaped.split(':')[0] not in self.allowed_protocols)):
continue
# Drop values in svg attrs with non-local IRIs
if namespaced_name in self.svg_attr_val_allows_ref:
new_val = re.sub(r'url\s*\(\s*[^#\s][^)]+?\)',
' ',
unescape(val))
new_val = new_val.strip()
if not new_val:
continue
else:
# Replace the val with the unescaped version because
# it's a iri
val = new_val
# Drop href and xlink:href attr for svg elements with non-local IRIs
if (None, token['name']) in self.svg_allow_local_href:
if namespaced_name in [(None, 'href'), (namespaces['xlink'], 'href')]:
if re.search(r'^\s*[^#\s]', val):
continue
# If it's a style attribute, sanitize it
if namespaced_name == (None, u'style'):
val = self.sanitize_css(val)
# At this point, we want to keep the attribute, so add it in
attrs[namespaced_name] = val
token['data'] = alphabetize_attributes(attrs)
return token
def sanitize_css(self, style):
"""Sanitizes css in style tags"""
# disallow urls
style = re.compile('url\s*\(\s*[^\s)]+?\s*\)\s*').sub(' ', style)
# gauntlet
# Validate the css in the style tag and if it's not valid, then drop
# the whole thing.
parts = style.split(';')
gauntlet = re.compile(
r"""^([-/:,#%.'"\sa-zA-Z0-9!]|\w-\w|'[\s\w]+'\s*|"[\s\w]+"|\([\d,%\.\s]+\))*$"""
)
for part in parts:
if not gauntlet.match(part):
return ''
if not re.match("^\s*([-\w]+\s*:[^:;]*(;\s*|$))*$", style):
return ''
clean = []
for prop, value in re.findall('([-\w]+)\s*:\s*([^:;]*)', style):
if not value:
continue
if prop.lower() in self.allowed_css_properties:
clean.append(prop + ': ' + value + ';')
elif prop.lower() in self.allowed_svg_properties:
clean.append(prop + ': ' + value + ';')
return ' '.join(clean)
| {
"content_hash": "9be675790304376caaf6e50e4429282a",
"timestamp": "",
"source": "github",
"line_count": 368,
"max_line_length": 92,
"avg_line_length": 32.90217391304348,
"alnum_prop": 0.5678064089857945,
"repo_name": "nitin-cherian/LifeLongLearning",
"id": "539711ac10faf92384fa04776bd16f1d5c5c5558",
"size": "12108",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "Python/PythonProgrammingLanguage/Encapsulation/encap_env/lib/python3.5/site-packages/bleach/sanitizer.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "32365"
},
{
"name": "CSS",
"bytes": "10259"
},
{
"name": "HTML",
"bytes": "55977"
},
{
"name": "JavaScript",
"bytes": "7368910"
},
{
"name": "Jupyter Notebook",
"bytes": "768879"
},
{
"name": "Mako",
"bytes": "494"
},
{
"name": "Python",
"bytes": "17502534"
},
{
"name": "Shell",
"bytes": "7751"
},
{
"name": "Smarty",
"bytes": "30663"
}
],
"symlink_target": ""
} |
import MySQLdb
import papercut.settings
settings = papercut.settings.CONF()
class Papercut_Auth:
"""
Authentication backend interface
"""
def __init__(self):
self.conn = MySQLdb.connect(host=settings.dbhost, db=settings.dbname, user=settings.dbuser, passwd=settings.dbpass)
self.cursor = self.conn.cursor()
def is_valid_user(self, username, password):
stmt = """
SELECT
password
FROM
papercut_groups_auth
WHERE
username='%s'
""" % (username)
num_rows = self.cursor.execute(stmt)
if num_rows == 0 or num_rows is None:
settings.logEvent('Error - Authentication failed for username \'%s\' (user not found)' % (username))
return 0
db_password = self.cursor.fetchone()[0]
if db_password != password:
settings.logEvent('Error - Authentication failed for username \'%s\' (incorrect password)' % (username))
return 0
else:
return 1
| {
"content_hash": "c46cf6ae90d87ef188745977505e103c",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 123,
"avg_line_length": 32.26470588235294,
"alnum_prop": 0.5587967183226983,
"repo_name": "jgrassler/papercut",
"id": "26913858e40f35678c631d07b3f94bc6be1c1db0",
"size": "1178",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "papercut/auth/mysql.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "248023"
},
{
"name": "Shell",
"bytes": "1892"
}
],
"symlink_target": ""
} |
import socket
import logging as log
import hashlib
import struct
STARTBYTE = '\x00'
ENDBYTE = '\xff'
HTTP_101_RESPONSE = 'HTTP/1.1 101 Web Socket Protocol Handshake\x0D\x0A'
HTTP_UPGRADE = 'Upgrade: WebSocket\x0D\x0A'
HTTP_CONNECTION = 'Connection: Upgrade\x0D\x0A'
HTTP_ORIGIN = 'WebSocket-Origin: %s\x0D\x0A'
HTTP_LOCATION = 'WebSocket-Location: ws://%s\x0D\x0A'
HTTP_PROTOCOL = 'WebSocket-Protocol: sample\x0D\x0A'
HTTP_SEC_ORIGIN = 'Sec-WebSocket-Origin: %s\x0D\x0A'
HTTP_SEC_LOCATION = 'Sec-WebSocket-Location: ws://%s\x0D\x0A'
HTTP_SEC_PROTOCOL = 'Sec-WebSocket-Protocol: sample\x0D\x0A'
HTTP_CRLF = '\x0D\x0A'
HTTP_CRLF_x2 = '\x0D\x0A\x0D\x0A'
class WebSocket:
def __init__(self, socket):
self.Socket = socket
self.WebSocketBuffer = ''
self.ApplicationPath = '/'
self.Host = None
self.Origin = None
self.SecurityResponse = ''
self.WebSocketSecurityRequired = False
try:
httpHeader = self.Socket.recv(4096)
print httpHeader
self.ParseHttpHeader(httpHeader)
if self.Origin != None and self.Host != None:
#WebSocket-Origin = Origin parameter of HTTP request
#WebSocket-Location = Host + resource request parameters of HTTP request
log.info("WebSocket sending HTTP response to client")
if self.WebSocketSecurityRequired:
self.Socket.send(HTTP_101_RESPONSE)
self.Socket.send(HTTP_UPGRADE)
self.Socket.send(HTTP_CONNECTION)
self.Socket.send(HTTP_SEC_ORIGIN % self.Origin)
self.Socket.send(HTTP_SEC_LOCATION % (self.Host + self.ApplicationPath))
self.Socket.send(HTTP_SEC_PROTOCOL)
self.Socket.send(HTTP_CRLF)
self.Socket.send( self.SecurityResponse )
else:
self.Socket.send(HTTP_101_RESPONSE)
self.Socket.send(HTTP_UPGRADE)
self.Socket.send(HTTP_CONNECTION)
self.Socket.send(HTTP_ORIGIN % self.Origin)
self.Socket.send(HTTP_LOCATION % (self.Host + self.ApplicationPath))
self.Socket.send(HTTP_PROTOCOL)
self.Socket.send(HTTP_CRLF)
else:
log.info("WebSocket could not parse HTTP header")
raise Exception("WebSocket could not parse HTTP header")
except Exception as ex:
log.info("WebSocket could not complete the HTTP handshake to establish a web socket connection")
log.info(ex)
self.Close()
raise ex
#raise Exception("WebSocket could not complete the HTTP handshake to establish a web socket connection")
def ParseHttpHeader(self, header):
appNameStartIndex = header.find("GET /")
if appNameStartIndex != -1:
appNameEndIndex = header.find(" HTTP/1.")
if appNameEndIndex != -1:
appPath = header[appNameStartIndex + 4:appNameEndIndex]
self.ApplicationPath = appPath
log.info("Application Path requested by WebSocket connection: %s" % (appPath))
hostStartIndex = header.find("Host: ")
if hostStartIndex != -1:
hostEndIndex = header.find("\r", hostStartIndex)
if hostEndIndex != -1:
host = header[hostStartIndex + 6 : hostEndIndex]
self.Host = host
log.info("Host requested by WebSocket connection: %s" % (host))
originStartIndex = header.find("Origin: ")
if originStartIndex != -1:
originEndIndex = header.find("\r", originStartIndex)
if originEndIndex != -1:
origin = header[originStartIndex + 8 : originEndIndex]
self.Origin = origin
log.info("Origin requested by WebSocket connection: %s" % (origin))
#Web Socket Security protocol
securityKey1 = self._ExtractField(header, "Sec-WebSocket-Key1: ")
if securityKey1 != None:
log.info("Sec-Websocket present, need to create Web Socket security response")
self.WebSocketSecurityRequired = True
securityKey2 = self._ExtractField(header, "Sec-WebSocket-Key2: ")
securityCode = header[-8:] #Last 8 bytes (64 bits) not including the terminating HTTP \r\n's
print 'Security Request: ', securityCode
self.SecurityResponse = self._CreateSecurityResponse(securityKey1, securityKey2, securityCode)
log.info("Created security response!")
def _ExtractField(self, header, name):
startIndex = header.find(name)
if startIndex != -1:
endIndex = header.find("\r", startIndex)
if endIndex != -1:
retVal = header[startIndex + len(name) : endIndex]
return retVal
return None
def _CreateSecurityResponse(self, key1, key2, code):
secKey1 = self._GetSecKeyValue(key1)
secKey2 = self._GetSecKeyValue(key2)
val = ""
val += struct.pack('>ii', secKey1, secKey2)
val += code
response = hashlib.md5(val).digest()
return response
def _GetSecKeyValue(self, key):
secKeyInts = '0'
spaceCount = 0
for char in key:
ordinal = ord(char)
if ordinal == 32:
spaceCount += 1
elif ordinal >= 48 and ordinal <= 57:
secKeyInts += char
secKeyInts = int(secKeyInts)
secKeyValue = 0
if spaceCount > 0:
secKeyValue = secKeyInts/spaceCount
print 'debug key: '
print key
print spaceCount
print secKeyInts
print secKeyValue
return secKeyValue
def Send(self, msg):
log.info(u'WebSocket sending data to client: %s' % (repr(msg)))
self.Socket.send(STARTBYTE + str(msg) + ENDBYTE)
#Will return a (possibly empty) list of commands,
#or None if the connection is suspected to be closed or an error occurs
def Recv(self):
webSocketCommands = []
try:
log.info('WebSocket waiting to receive data from client')
data = self.Socket.recv(4096)
if not data:
raise Exception("WebSocket client connection closed")
#Buffer incoming data
self.WebSocketBuffer += data
#Parse as many commands as we can from the data we've received, based on web socket protocol
bufferIndex = self.WebSocketBuffer.find(ENDBYTE)
while bufferIndex != -1:
command = self.WebSocketBuffer[:bufferIndex+1]
#will become '' if index+1 is out of range
self.WebSocketBuffer = self.WebSocketBuffer[bufferIndex+1:]
if command.find(STARTBYTE) == 0:
#strip protocol bytes from front and end of string
command = command[1:-1]
webSocketCommands.append(command)
log.info(u'WebSocket got command from client: %s' % (repr(command)))
else:
log.info(u'WebSocket got incorrectly formatted data from client: %s' % (repr(command)))
bufferIndex = self.WebSocketBuffer.find(ENDBYTE)
log.info('WebSocket got data from client: %s' % (repr(webSocketCommands)))
return webSocketCommands
except Exception as ex:
log.info("WebSocket got an exception while trying to receive from client socket")
return None
def Close(self):
log.info('WebSocket closing client socket')
self.Socket.close()
#Needed for use with select()
def fileno(self):
return self.Socket.fileno()
| {
"content_hash": "9170d5b47a10078a5f6ad18d9e4bc775",
"timestamp": "",
"source": "github",
"line_count": 211,
"max_line_length": 116,
"avg_line_length": 39.43127962085308,
"alnum_prop": 0.5598557692307692,
"repo_name": "fos/fos-legacy",
"id": "9c9d0fc2fcb37f074c6d0cf9ebab313268e82b5c",
"size": "8320",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "scratch/very_scratch/server/example7/webSocket.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C++",
"bytes": "7294"
},
{
"name": "Erlang",
"bytes": "2662"
},
{
"name": "Haskell",
"bytes": "1973"
},
{
"name": "JavaScript",
"bytes": "432354"
},
{
"name": "Python",
"bytes": "1231025"
}
],
"symlink_target": ""
} |
import logging
class Game(object):
def __init__(self, stype, sid, sip, sport):
self.type = stype
self.id = sid
self.ip = sip
self.port = sport
class GameManager(object):
def __init__(self):
self.gamemap = {}
def AddGame(self, stype, sid, sip, sport):
logging.debug(u"AddGame() type:%d id:%d ip:%s port:%d", stype, sid, sip, sport)
try:
game = Game(stype, sid, sip, sport)
if not self.gamemap.get(stype):
self.gamemap[stype] = []
self.gamemap[stype].append(game)
except Exception as e:
logging.error(u"AddGame() reason:%s", e.message)
def RemoveGame(self, stype, sid):
logging.warn(u"RemoveGame() type:%d id:%d ", stype, sid)
gslist = self.gamemap.get(stype)
if gslist:
for index, gs in enumerate(gslist):
if gs.id == sid:
gslist.pop(index)
break
def GetGameMap(self):
return self.gamemap
instance = GameManager()
| {
"content_hash": "3ec451f6598d9b74d326357c5b2e0ca1",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 87,
"avg_line_length": 27.435897435897434,
"alnum_prop": 0.5373831775700935,
"repo_name": "xiexiangwei/xGame",
"id": "c2359d6169c53cfc6ccb474b3ecd1ceaf4f31fba",
"size": "1086",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "gamecenter/gamemanager.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "124202"
}
],
"symlink_target": ""
} |
from twisted.internet.defer import inlineCallbacks
from twisted.names import client
from twisted.python.modules import getModule
from twisted.trial import unittest
from twistedcaldav.stdconfig import config
from txdav.caldav.datastore.scheduling.ischedule import utils
from txdav.caldav.datastore.scheduling.ischedule.delivery import ScheduleViaISchedule
class CalDAV (unittest.TestCase):
"""
txdav.caldav.datastore.scheduling.caldav tests
"""
def tearDown(self):
"""
By setting the resolver to None, it will be recreated next time a name
lookup is done.
"""
client.theResolver = None
utils.DebugResolver = None
@inlineCallbacks
def test_matchCalendarUserAddress(self):
"""
Make sure we do an exact comparison on EmailDomain
"""
self.patch(config.Scheduling.iSchedule, "Enabled", True)
self.patch(config.Scheduling.iSchedule, "RemoteServers", "")
# Only mailtos:
result = yield ScheduleViaISchedule.matchCalendarUserAddress("http://example.com/principal/user")
self.assertFalse(result)
# Need to setup a fake resolver
module = getModule(__name__)
dataPath = module.filePath.sibling("data")
bindPath = dataPath.child("db.example.com")
self.patch(config.Scheduling.iSchedule, "DNSDebug", bindPath.path)
utils.DebugResolver = None
utils._initResolver()
result = yield ScheduleViaISchedule.matchCalendarUserAddress("mailto:user@example.com")
self.assertTrue(result)
result = yield ScheduleViaISchedule.matchCalendarUserAddress("mailto:user@example.org")
self.assertFalse(result)
result = yield ScheduleViaISchedule.matchCalendarUserAddress("mailto:user@example.org?subject=foobar")
self.assertFalse(result)
result = yield ScheduleViaISchedule.matchCalendarUserAddress("mailto:user")
self.assertFalse(result)
# Test when not enabled
ScheduleViaISchedule.domainServerMap = {}
self.patch(config.Scheduling.iSchedule, "Enabled", False)
result = yield ScheduleViaISchedule.matchCalendarUserAddress("mailto:user@example.com")
self.assertFalse(result)
| {
"content_hash": "29be20348a0ef5d2b33daa7bbaf461ed",
"timestamp": "",
"source": "github",
"line_count": 57,
"max_line_length": 110,
"avg_line_length": 39.40350877192982,
"alnum_prop": 0.7061442564559216,
"repo_name": "red-hood/calendarserver",
"id": "56c286d7c31a7779f98a521e16bd3d29b73a7ac7",
"size": "2853",
"binary": false,
"copies": "1",
"ref": "refs/heads/trunk",
"path": "txdav/caldav/datastore/scheduling/ischedule/test/test_delivery.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "1482"
},
{
"name": "CSS",
"bytes": "4214"
},
{
"name": "DIGITAL Command Language",
"bytes": "1234"
},
{
"name": "DTrace",
"bytes": "13143"
},
{
"name": "HTML",
"bytes": "36120"
},
{
"name": "JavaScript",
"bytes": "80248"
},
{
"name": "Makefile",
"bytes": "14429"
},
{
"name": "PLSQL",
"bytes": "12719"
},
{
"name": "PLpgSQL",
"bytes": "291431"
},
{
"name": "Python",
"bytes": "10537612"
},
{
"name": "R",
"bytes": "1091"
},
{
"name": "SQLPL",
"bytes": "6430"
},
{
"name": "Shell",
"bytes": "96975"
}
],
"symlink_target": ""
} |
"""
This is my module brief line.
This is a more complete paragraph documenting my module.
- A list item.
- Another list item.
This section can use any reST syntax.
"""
A_CONSTANT = 1000
"""This is an important constant."""
YET_ANOTHER = {
'this': 'that',
'jam': 'eggs',
'yet': {
'things': [1, 2, 3, 'a'],
'tuples': (A_CONSTANT, 4)
}
}
"""Yet another public constant variable"""
def a_function(my_arg, another):
"""
This is the brief description of my function.
This is a more complete example of my function. It can include doctest,
code blocks or any other reST structure.
>>> a_function(10, [MyClass('a'), MyClass('b')])
20
:param int my_arg: The first argument of the function. Just a number.
:param another: The other argument of the important function.
:type another: A list of :class:`MyClass`
:return: The length of the second argument times the first argument.
:rtype: int
"""
return my_arg * len(another)
class MyClass(object):
"""
This is the brief of my main class.
A more general description of what the class does.
:param int param1: The first parameter of my class.
:param param2: The second one.
:type param2: int or float
:var my_attribute: Just an instance attribute.
:raises TypeError: if param2 is not None.
"""
class_attribute = 625
"""This is a class attribute."""
def __init__(self, param1, param2=None):
self.param1 = param1
if param2 is not None:
raise TypeError()
self.param2 = param2
self.my_attribute = 100
def my_method(self, param1, param2):
"""
The brief of this method.
This method does many many important things.
:param int param1: A parameter.
:param list param2: Another parameter.
:return: A list of the first parameter as long a the length of the
second parameter.
:rtype: list of int
"""
return [param1] * len(param2)
class AnotherClass(MyClass):
"""
This another class.
Check the nice inheritance diagram. See :class:`MyClass`.
"""
class MyException(Exception):
"""
This is my custom exception.
This is a more complete description of what my exception does. Again, you
can be as verbose as you want here.
"""
__all__ = [
'A_CONSTANT',
'YET_ANOTHER',
'a_function',
'MyClass',
'AnotherClass',
'MyException'
]
| {
"content_hash": "a8c0a728b50e91ec46e1b246ebb61f76",
"timestamp": "",
"source": "github",
"line_count": 111,
"max_line_length": 77,
"avg_line_length": 22.513513513513512,
"alnum_prop": 0.6198479391756703,
"repo_name": "carlos-jenkins/autoapi",
"id": "3889f0eee287e55150f13f5d96f58db0850d0be6",
"size": "2523",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "doc/documented.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "23537"
}
],
"symlink_target": ""
} |
"""
Tests for the backported class:`str` class.
"""
from __future__ import absolute_import, unicode_literals, print_function
from future.builtins import *
from future import utils
from future.tests.base import unittest, expectedFailurePY2
import os
TEST_UNICODE_STR = u'ℝεα∂@ßʟ℮ ☂ℯṧт υηḯ¢☺ḓ℮'
class TestStr(unittest.TestCase):
def test_str(self):
self.assertFalse(str is bytes)
self.assertEqual(str('blah'), u'blah') # u'' prefix: Py3.3 and Py2 only
self.assertEqual(str(b'1234'), "b'1234'")
def test_bool_str(self):
s1 = str(u'abc')
s2 = u'abc'
s3 = str(u'')
s4 = u''
self.assertEqual(bool(s1), bool(s2))
self.assertEqual(bool(s3), bool(s4))
def test_os_path_join(self):
"""
Issue #15: can't os.path.join(u'abc', str(u'def'))
"""
self.assertEqual(os.path.join(u'abc', str(u'def')),
u'abc{0}def'.format(os.sep))
def test_str_encode_utf8(self):
b = str(TEST_UNICODE_STR).encode('utf-8')
self.assertTrue(isinstance(b, bytes))
self.assertFalse(isinstance(b, str))
s = b.decode('utf-8')
self.assertTrue(isinstance(s, str))
self.assertEqual(s, TEST_UNICODE_STR)
def test_str_encode_cp1251(self):
b1 = b'\xcd\xeb\xff'
s1 = str(b1, 'cp1251')
self.assertEqual(s1, u'Нля')
b2 = bytes(b'\xcd\xeb\xff')
s2 = str(b2, 'cp1251')
self.assertEqual(s2, u'Нля')
def test_str_encode_decode_with_py2_str_arg(self):
# Try passing a standard Py2 string (as if unicode_literals weren't imported)
b = str(TEST_UNICODE_STR).encode(utils.bytes_to_native_str(b'utf-8'))
self.assertTrue(isinstance(b, bytes))
self.assertFalse(isinstance(b, str))
s = b.decode(utils.bytes_to_native_str(b'utf-8'))
self.assertTrue(isinstance(s, str))
self.assertEqual(s, TEST_UNICODE_STR)
def test_str_encode_decode_big5(self):
a = u'Unicode string: \u5b54\u5b50'
self.assertEqual(str(a), a.encode('big5').decode('big5'))
def test_str_empty(self):
"""
str() -> u''
"""
self.assertEqual(str(), u'')
def test_str_iterable_of_ints(self):
self.assertEqual(str([65, 66, 67]), '[65, 66, 67]')
self.assertNotEqual(str([65, 66, 67]), 'ABC')
def test_str_str(self):
self.assertEqual(str('ABC'), u'ABC')
self.assertEqual(str('ABC'), 'ABC')
def test_str_is_str(self):
s = str(u'ABC')
self.assertTrue(str(s) is s)
self.assertEqual(repr(str(s)), "'ABC'")
def test_str_fromhex(self):
self.assertFalse(hasattr(str, 'fromhex'))
def test_str_hasattr_decode(self):
"""
This test tests whether hasattr(s, 'decode') is False, like it is on Py3.
Sometimes code (such as http.client in Py3.3) checks hasattr(mystring,
'decode') to determine if a string-like thing needs encoding. It would
be nice to have this return False so the string can be treated on Py2
like a Py3 string.
"""
s = str(u'abcd')
self.assertFalse(hasattr(s, 'decode'))
self.assertTrue(hasattr(s, 'encode'))
def test_isinstance_str(self):
self.assertTrue(isinstance(str('blah'), str))
def test_isinstance_str_subclass(self):
"""
Issue #89
"""
value = str(u'abc')
class Magic(str):
pass
self.assertTrue(isinstance(value, str))
self.assertFalse(isinstance(value, Magic))
def test_str_getitem(self):
s = str('ABCD')
self.assertNotEqual(s[0], 65)
self.assertEqual(s[0], 'A')
self.assertEqual(s[-1], 'D')
self.assertEqual(s[0:1], 'A')
self.assertEqual(s[:], u'ABCD')
@unittest.expectedFailure
def test_u_literal_creates_newstr_object(self):
"""
It would nice if the u'' or '' literal syntax could be coaxed
into producing our new str objects somehow ...
"""
s = u'ABCD'
self.assertTrue(isinstance(s, str))
self.assertFalse(repr(b).startswith('b'))
def test_repr(self):
s = str('ABCD')
self.assertFalse(repr(s).startswith('b'))
def test_str(self):
b = str('ABCD')
self.assertTrue(str(b), 'ABCD')
def test_str_setitem(self):
s = 'ABCD'
with self.assertRaises(TypeError):
s[0] = b'B'
def test_str_iteration(self):
s = str('ABCD')
for item in s:
self.assertFalse(isinstance(item, int))
self.assertTrue(isinstance(item, str))
self.assertNotEqual(list(s), [65, 66, 67, 68])
self.assertEqual(list(s), ['A', 'B', 'C', 'D'])
def test_str_plus_bytes(self):
s = str(u'ABCD')
b = b'EFGH'
# We allow this now:
# with self.assertRaises(TypeError):
# s + b
# str objects don't have an __radd__ method, so the following
# does not raise a TypeError. Is this a problem?
# with self.assertRaises(TypeError):
# b + s
# Now with our custom bytes object:
b2 = bytes(b'EFGH')
with self.assertRaises(TypeError):
s + b2
with self.assertRaises(TypeError):
b2 + s
def test_str_plus_str(self):
s1 = str('ABCD')
s2 = s1 + s1
self.assertEqual(s2, u'ABCDABCD')
self.assertTrue(isinstance(s2, str))
s3 = s1 + u'ZYXW'
self.assertEqual(s3, 'ABCDZYXW')
self.assertTrue(isinstance(s3, str))
s4 = 'ZYXW' + s1
self.assertEqual(s4, 'ZYXWABCD')
self.assertTrue(isinstance(s4, str))
def test_str_join_str(self):
s = str(' * ')
strings = ['AB', 'EFGH', 'IJKL', TEST_UNICODE_STR]
result = s.join(strings)
self.assertEqual(result, 'AB * EFGH * IJKL * ' + TEST_UNICODE_STR)
self.assertTrue(isinstance(result, str))
def test_str_join_bytes(self):
s = str('ABCD')
byte_strings1 = [b'EFGH', u'IJKL']
# We allow this on Python 2 for compatibility with old libraries:
if utils.PY2:
self.assertEqual(s.join(byte_strings1), u'EFGHABCDIJKL')
byte_strings2 = [bytes(b'EFGH'), u'IJKL']
with self.assertRaises(TypeError):
s.join(byte_strings2)
def test_str_join_staticmethod(self):
"""
Issue #33
"""
c = str.join('-', ['a', 'b'])
self.assertEqual(c, 'a-b')
self.assertEqual(type(c), str)
def test_str_join_staticmethod_workaround_1(self):
"""
Issue #33
"""
c = str('-').join(['a', 'b'])
self.assertEqual(c, 'a-b')
self.assertEqual(type(c), str)
def test_str_join_staticmethod_workaround_2(self):
"""
Issue #33
"""
c = str.join(str('-'), ['a', 'b'])
self.assertEqual(c, 'a-b')
self.assertEqual(type(c), str)
def test_str_replace(self):
s = str('ABCD')
c = s.replace('A', 'F')
self.assertEqual(c, 'FBCD')
self.assertTrue(isinstance(c, str))
with self.assertRaises(TypeError):
s.replace(bytes(b'A'), u'F')
with self.assertRaises(TypeError):
s.replace(u'A', bytes(b'F'))
def test_str_partition(self):
s1 = str('ABCD')
parts = s1.partition('B')
self.assertEqual(parts, ('A', 'B', 'CD'))
self.assertTrue(all([isinstance(p, str) for p in parts]))
s2 = str('ABCDABCD')
parts = s2.partition('B')
self.assertEqual(parts, ('A', 'B', 'CDABCD'))
def test_str_rpartition(self):
s2 = str('ABCDABCD')
parts = s2.rpartition('B')
self.assertEqual(parts, ('ABCDA', 'B', 'CD'))
self.assertTrue(all([isinstance(p, str) for p in parts]))
def test_str_contains_something(self):
s = str('ABCD')
self.assertTrue('A' in s)
if utils.PY2:
self.assertTrue(b'A' in s)
with self.assertRaises(TypeError):
bytes(b'A') in s
with self.assertRaises(TypeError):
65 in s # unlike bytes
self.assertTrue('AB' in s)
self.assertFalse(str([65, 66]) in s) # unlike bytes
self.assertFalse('AC' in s)
self.assertFalse('Z' in s)
def test_str_index(self):
s = str('ABCD')
self.assertEqual(s.index('B'), 1)
with self.assertRaises(TypeError):
s.index(67)
with self.assertRaises(TypeError):
s.index(bytes(b'C'))
def test_startswith(self):
s = str('abcd')
self.assertTrue(s.startswith('a'))
self.assertTrue(s.startswith(('a', 'd')))
self.assertTrue(s.startswith(str('ab')))
if utils.PY2:
# We allow this, because e.g. Python 2 os.path.join concatenates
# its arg with a byte-string '/' indiscriminately.
self.assertFalse(s.startswith(b'A'))
self.assertTrue(s.startswith(b'a'))
with self.assertRaises(TypeError) as cm:
self.assertFalse(s.startswith(bytes(b'A')))
with self.assertRaises(TypeError) as cm:
s.startswith((bytes(b'A'), bytes(b'B')))
with self.assertRaises(TypeError) as cm:
s.startswith(65)
def test_join(self):
sep = str('-')
self.assertEqual(sep.join('abcd'), 'a-b-c-d')
if utils.PY2:
sep.join(b'abcd')
with self.assertRaises(TypeError) as cm:
sep.join(bytes(b'abcd'))
def test_endswith(self):
s = str('abcd')
self.assertTrue(s.endswith('d'))
self.assertTrue(s.endswith(('b', 'd')))
self.assertTrue(s.endswith(str('cd')))
self.assertFalse(s.endswith(('A', 'B')))
if utils.PY2:
self.assertFalse(s.endswith(b'D'))
self.assertTrue(s.endswith((b'D', b'd')))
with self.assertRaises(TypeError) as cm:
s.endswith(65)
with self.assertRaises(TypeError) as cm:
s.endswith((bytes(b'D'),))
def test_split(self):
s = str('ABCD')
self.assertEqual(s.split('B'), ['A', 'CD'])
if utils.PY2:
self.assertEqual(s.split(b'B'), ['A', 'CD'])
with self.assertRaises(TypeError) as cm:
s.split(bytes(b'B'))
def test_rsplit(self):
s = str('ABCD')
self.assertEqual(s.rsplit('B'), ['A', 'CD'])
if utils.PY2:
self.assertEqual(s.rsplit(b'B'), ['A', 'CD'])
with self.assertRaises(TypeError) as cm:
s.rsplit(bytes(b'B'))
def test_eq_bytes(self):
s = str('ABCD')
b = bytes(b'ABCD')
self.assertNotEqual(s, b)
self.assertNotEqual(str(''), bytes(b''))
native_s = 'ABCD'
native_b = b'ABCD'
self.assertFalse(b == native_s)
self.assertTrue(b != native_s)
# Fails on Py2:
# self.assertNotEqual(native_s, b)
# with no obvious way to change this.
# For backward compatibility with broken string-handling code in
# Py2 libraries, we allow the following:
if utils.PY2:
self.assertTrue(native_b == s)
self.assertFalse(s != native_b)
def test_eq(self):
s = str('ABCD')
self.assertEqual('ABCD', s)
self.assertEqual(s, 'ABCD')
self.assertEqual(s, s)
self.assertTrue(u'ABCD' == s)
if utils.PY2:
self.assertTrue(b'ABCD' == s)
else:
self.assertFalse(b'ABCD' == s)
self.assertFalse(bytes(b'ABCD') == s)
def test_ne(self):
s = str('ABCD')
self.assertNotEqual('A', s)
self.assertNotEqual(s, 'A')
self.assertNotEqual(s, 5)
self.assertNotEqual(2.7, s)
self.assertNotEqual(s, ['A', 'B', 'C', 'D'])
if utils.PY2:
self.assertFalse(b'ABCD' != s)
else:
self.assertTrue(b'ABCD' != s)
self.assertTrue(bytes(b'ABCD') != s)
def test_cmp(self):
s = str(u'ABC')
with self.assertRaises(TypeError):
s > 3
with self.assertRaises(TypeError):
s < 1000
with self.assertRaises(TypeError):
s > b'XYZ'
with self.assertRaises(TypeError):
s < b'XYZ'
with self.assertRaises(TypeError):
s <= 3
with self.assertRaises(TypeError):
s >= int(3)
with self.assertRaises(TypeError):
s < 3.3
with self.assertRaises(TypeError):
s > (3.3 + 3j)
with self.assertRaises(TypeError):
s >= (1, 2)
with self.assertRaises(TypeError):
s <= [1, 2]
def test_mul(self):
s = str(u'ABC')
c = s * 4
self.assertTrue(isinstance(c, str))
self.assertEqual(c, u'ABCABCABCABC')
d = s * int(4)
self.assertTrue(isinstance(d, str))
self.assertEqual(d, u'ABCABCABCABC')
if utils.PY2:
e = s * long(4)
self.assertTrue(isinstance(e, str))
self.assertEqual(e, u'ABCABCABCABC')
with self.assertRaises(TypeError):
s * 3.3
with self.assertRaises(TypeError):
s * (3.3 + 3j)
def test_rmul(self):
s = str(u'XYZ')
c = 3 * s
self.assertTrue(isinstance(c, str))
self.assertEqual(c, u'XYZXYZXYZ')
d = s * int(3)
self.assertTrue(isinstance(d, str))
self.assertEqual(d, u'XYZXYZXYZ')
if utils.PY2:
e = long(3) * s
self.assertTrue(isinstance(e, str))
self.assertEqual(e, u'XYZXYZXYZ')
with self.assertRaises(TypeError):
3.3 * s
with self.assertRaises(TypeError):
(3.3 + 3j) * s
@unittest.skip('Fails on Python <= 2.7.6 due to string subclass slicing bug')
def test_slice(self):
"""
Do slices return newstr objects?
"""
s = str(u'abcd')
self.assertEqual(s[:2], u'ab')
self.assertEqual(type(s[:2]), str)
self.assertEqual(s[-2:], u'cd')
self.assertEqual(type(s[-2:]), str)
@unittest.skip('Fails on Python <= 2.7.6 due to string subclass slicing bug')
def test_subclassing(self):
"""
Can newstr be subclassed and do str methods then return instances of
the same class? (This is the Py3 behaviour).
"""
class SubClass(str):
pass
s = SubClass(u'abcd')
self.assertEqual(type(s), SubClass)
self.assertEqual(type(s + s), str)
self.assertEqual(type(s[0]), str)
self.assertEqual(type(s[:2]), str)
self.assertEqual(type(s.join([u'_', u'_', u'_'])), str)
def test_subclassing_2(self):
"""
Tests __new__ method in subclasses. Fails in versions <= 0.11.4
"""
class SubClass(str):
def __new__(cls, *args, **kwargs):
self = str.__new__(cls, *args, **kwargs)
assert type(self) == SubClass
return self
s = SubClass(u'abcd')
self.assertTrue(True)
# From Python 3.3: test_unicode.py
def checkequalnofix(self, result, object, methodname, *args):
method = getattr(object, methodname)
realresult = method(*args)
self.assertEqual(realresult, result)
self.assertTrue(type(realresult) is type(result))
# if the original is returned make sure that
# this doesn't happen with subclasses
if realresult is object:
class usub(str):
def __repr__(self):
return 'usub(%r)' % str.__repr__(self)
object = usub(object)
method = getattr(object, methodname)
realresult = method(*args)
self.assertEqual(realresult, result)
self.assertTrue(object is not realresult)
type2test = str
def test_maketrans_translate(self):
# these work with plain translate()
self.checkequalnofix('bbbc', 'abababc', 'translate',
{ord('a'): None})
self.checkequalnofix('iiic', 'abababc', 'translate',
{ord('a'): None, ord('b'): ord('i')})
self.checkequalnofix('iiix', 'abababc', 'translate',
{ord('a'): None, ord('b'): ord('i'), ord('c'): 'x'})
self.checkequalnofix('c', 'abababc', 'translate',
{ord('a'): None, ord('b'): ''})
self.checkequalnofix('xyyx', 'xzx', 'translate',
{ord('z'): 'yy'})
# this needs maketrans()
self.checkequalnofix('abababc', 'abababc', 'translate',
{'b': '<i>'})
tbl = self.type2test.maketrans({'a': None, 'b': '<i>'})
self.checkequalnofix('<i><i><i>c', 'abababc', 'translate', tbl)
# test alternative way of calling maketrans()
tbl = self.type2test.maketrans('abc', 'xyz', 'd')
self.checkequalnofix('xyzzy', 'abdcdcbdddd', 'translate', tbl)
self.assertRaises(TypeError, self.type2test.maketrans)
self.assertRaises(ValueError, self.type2test.maketrans, 'abc', 'defg')
self.assertRaises(TypeError, self.type2test.maketrans, 2, 'def')
self.assertRaises(TypeError, self.type2test.maketrans, 'abc', 2)
self.assertRaises(TypeError, self.type2test.maketrans, 'abc', 'def', 2)
self.assertRaises(ValueError, self.type2test.maketrans, {'xy': 2})
self.assertRaises(TypeError, self.type2test.maketrans, {(1,): 2})
self.assertRaises(TypeError, 'hello'.translate)
self.assertRaises(TypeError, 'abababc'.translate, 'abc', 'xyz')
@expectedFailurePY2
def test_multiple_inheritance(self):
"""
Issue #96 (for newstr instead of newobject)
"""
import collections
class Base(str):
pass
class Foo(Base, collections.Container):
def __contains__(self, item):
return False
@expectedFailurePY2
def test_with_metaclass_and_str(self):
"""
Issue #91 (for newstr instead of newobject)
"""
from future.utils import with_metaclass
class MetaClass(type):
pass
class TestClass(with_metaclass(MetaClass, str)):
pass
def test_surrogateescape_encoding(self):
"""
Tests whether surrogateescape encoding works correctly.
"""
pairs = [(u'\udcc3', b'\xc3'),
(u'\udcff', b'\xff')]
for (s, b) in pairs:
encoded = str(s).encode('utf-8', 'surrogateescape')
self.assertEqual(b, encoded)
self.assertTrue(isinstance(encoded, bytes))
self.assertEqual(s, encoded.decode('utf-8', 'surrogateescape'))
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "9f74bc2d12ef1ef8ab0e0c2b967f9da2",
"timestamp": "",
"source": "github",
"line_count": 568,
"max_line_length": 85,
"avg_line_length": 33.53697183098591,
"alnum_prop": 0.5465903721980157,
"repo_name": "NcLang/vimrc",
"id": "7e37a62fca6cd5bbbb111055d5ee3a73071e38d7",
"size": "19107",
"binary": false,
"copies": "8",
"ref": "refs/heads/master",
"path": "sources_non_forked/YouCompleteMe/third_party/ycmd/third_party/python-future/tests/test_future/test_str.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "568"
},
{
"name": "CSS",
"bytes": "6320"
},
{
"name": "CoffeeScript",
"bytes": "1402"
},
{
"name": "Erlang",
"bytes": "3232"
},
{
"name": "GCC Machine Description",
"bytes": "525"
},
{
"name": "Go",
"bytes": "2239"
},
{
"name": "HTML",
"bytes": "134"
},
{
"name": "JavaScript",
"bytes": "1064"
},
{
"name": "Makefile",
"bytes": "8657"
},
{
"name": "Perl",
"bytes": "2705"
},
{
"name": "Python",
"bytes": "704814"
},
{
"name": "Ruby",
"bytes": "33390"
},
{
"name": "Shell",
"bytes": "9370"
},
{
"name": "TeX",
"bytes": "6193"
},
{
"name": "VimL",
"bytes": "3170590"
},
{
"name": "XSLT",
"bytes": "4217"
}
],
"symlink_target": ""
} |
"""
WSGI config for printer project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/howto/deployment/wsgi/
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "printer.settings")
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
| {
"content_hash": "94a0c6582a40d8360da9c865b504af99",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 78,
"avg_line_length": 27.785714285714285,
"alnum_prop": 0.7737789203084833,
"repo_name": "3DRPP/printer",
"id": "9c9157422a85e56ffe1b001f024e02e6bdbd1be0",
"size": "389",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "printer/wsgi.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "414299"
},
{
"name": "HTML",
"bytes": "79236"
},
{
"name": "JavaScript",
"bytes": "182918"
},
{
"name": "Python",
"bytes": "43362"
},
{
"name": "Shell",
"bytes": "1479"
}
],
"symlink_target": ""
} |
from .features import no_op, gradient, gaussian_filter, daisy, dsift
from .predefined import fast_daisy, fast_dsift
from .base import ndfeature, imgfeature
| {
"content_hash": "e96808692ca58ebca4423c5091c1aa2c",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 68,
"avg_line_length": 52,
"alnum_prop": 0.8012820512820513,
"repo_name": "jalabort/menpofast",
"id": "1850f7f0030770b575b66428c9db6a2570b849cd",
"size": "156",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "menpofast/feature/__init__.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "C++",
"bytes": "2747"
},
{
"name": "Makefile",
"bytes": "46"
},
{
"name": "Python",
"bytes": "118852"
}
],
"symlink_target": ""
} |
import sys
import bisect
import contextlib
import warnings
import pkg_resources
import numpy
from PyQt4.QtGui import (
QWidget, QButtonGroup, QGroupBox, QRadioButton, QSlider,
QDoubleSpinBox, QComboBox, QSpinBox, QListView,
QVBoxLayout, QHBoxLayout, QFormLayout, QSpacerItem, QSizePolicy,
QCursor, QIcon, QStandardItemModel, QStandardItem, QStyle,
QStylePainter, QStyleOptionFrame, QPixmap,
QApplication, QDrag
)
from PyQt4 import QtGui
from PyQt4.QtCore import (
Qt, QObject, QEvent, QSize, QModelIndex, QMimeData, QTimer
)
from PyQt4.QtCore import pyqtSignal as Signal, pyqtSlot as Slot
import Orange.data
from Orange import preprocess
from Orange.statistics import distribution
from Orange.preprocess import Continuize, ProjectPCA, \
ProjectCUR, Randomize as Random
from Orange.widgets import widget, gui, settings
from Orange.widgets.utils.overlay import OverlayWidget
from Orange.widgets.utils.sql import check_sql_input
@contextlib.contextmanager
def blocked(qobj):
state = qobj.signalsBlocked()
qobj.blockSignals(True)
try:
yield qobj
finally:
qobj.blockSignals(state)
class BaseEditor(QWidget):
"""
Base widget for editing preprocessor's parameters.
"""
#: Emitted when parameters have changed.
changed = Signal()
#: Emitted when parameters were edited/changed as a result of
#: user interaction.
edited = Signal()
def setParameters(self, parameters):
"""
Set parameters.
Parameters
----------
parameters : dict
Parameters as a dictionary. It is up to subclasses to
properly parse the contents.
"""
raise NotImplementedError
def parameters(self):
"""Return the parameters as a dictionary.
"""
raise NotImplementedError
@staticmethod
def createinstance(params):
"""
Create the Preprocessor instance given the stored parameters dict.
Parameters
----------
params : dict
Parameters as returned by `parameters`.
"""
raise NotImplementedError
class _NoneDisc(preprocess.discretize.Discretization):
"""Discretize all variables into None.
Used in combination with preprocess.Discretize to remove
all discrete features from the domain.
"""
def __call__(self, data, variable):
return None
class DiscretizeEditor(BaseEditor):
"""
Editor for preprocess.Discretize.
"""
#: Discretize methods
NoDisc, EqualWidth, EqualFreq, Drop, EntropyMDL = 0, 1, 2, 3, 4
Discretizers = {
NoDisc: (None, {}),
EqualWidth: (preprocess.discretize.EqualWidth, {"n": 4}),
EqualFreq: (preprocess.discretize.EqualFreq, {"n": 4}),
Drop: (_NoneDisc, {}),
EntropyMDL: (preprocess.discretize.EntropyMDL, {"force": False})
}
Names = {
NoDisc: "None",
EqualWidth: "Equal width discretization",
EqualFreq: "Equal frequency discretization",
Drop: "Remove continuous attributes",
EntropyMDL: "Entropy-MDL discretization"
}
def __init__(self, parent=None, **kwargs):
BaseEditor.__init__(self, parent, **kwargs)
self.__method = DiscretizeEditor.EqualFreq
self.__nintervals = 4
layout = QVBoxLayout()
self.setLayout(layout)
self.__group = group = QButtonGroup(self, exclusive=True)
for method in [self.EntropyMDL, self.EqualFreq, self.EqualWidth,
self.Drop]:
rb = QRadioButton(
self, text=self.Names[method],
checked=self.__method == method
)
layout.addWidget(rb)
group.addButton(rb, method)
group.buttonClicked.connect(self.__on_buttonClicked)
self.__slbox = slbox = QGroupBox(
title="Number of intervals (for equal width/frequency",
flat=True
)
slbox.setLayout(QVBoxLayout())
self.__slider = slider = QSlider(
orientation=Qt.Horizontal,
minimum=2, maximum=10, value=self.__nintervals,
enabled=self.__method in [self.EqualFreq, self.EqualWidth],
)
slider.valueChanged.connect(self.__on_valueChanged)
slbox.layout().addWidget(slider)
container = QHBoxLayout()
container.setContentsMargins(13, 0, 0, 0)
container.addWidget(slbox)
self.layout().insertLayout(3, container)
self.setSizePolicy(QSizePolicy.Minimum, QSizePolicy.Preferred)
def setMethod(self, method):
if self.__method != method:
self.__method = method
b = self.__group.button(method)
b.setChecked(True)
self.__slider.setEnabled(
method in [self.EqualFreq, self.EqualWidth]
)
self.changed.emit()
def method(self):
return self.__method
def intervals(self):
return self.__nintervals
def setIntervals(self, n):
n = numpy.clip(n, self.__slider.minimum(), self.__slider.maximum())
n = int(n)
if self.__nintervals != n:
self.__nintervals = n
# blocking signals in order to differentiate between
# changed by user (notified through __on_valueChanged) or
# changed programmatically (this)
with blocked(self.__slider):
self.__slider.setValue(n)
self.changed.emit()
def setParameters(self, params):
method = params.get("method", self.EqualFreq)
nintervals = params.get("n", 5)
self.setMethod(method)
if method in [self.EqualFreq, self.EqualWidth]:
self.setIntervals(nintervals)
def parameters(self):
if self.__method in [self.EqualFreq, self.EqualWidth]:
return {"method": self.__method, "n": self.__nintervals}
else:
return {"method": self.__method}
def __on_buttonClicked(self):
# on user 'method' button click
method = self.__group.checkedId()
if method != self.__method:
self.setMethod(self.__group.checkedId())
self.edited.emit()
def __on_valueChanged(self):
# on user n intervals slider change.
self.__nintervals = self.__slider.value()
self.changed.emit()
self.edited.emit()
@staticmethod
def createinstance(params):
params = dict(params)
method = params.pop("method", DiscretizeEditor.EqualFreq)
method, defaults = DiscretizeEditor.Discretizers[method]
if method is None:
return None
resolved = dict(defaults)
# update only keys in defaults?
resolved.update(params)
return preprocess.Discretize(method(**params), remove_const=False)
class ContinuizeEditor(BaseEditor):
Continuizers = [
("Most frequent is base", Continuize.FrequentAsBase),
("One attribute per value", Continuize.Indicators),
("Remove multinomial attributes", Continuize.RemoveMultinomial),
("Remove all discrete attributes", Continuize.Remove),
("Treat as ordinal", Continuize.AsOrdinal),
("Divide by number of values", Continuize.AsNormalizedOrdinal)]
def __init__(self, parent=None, **kwargs):
super().__init__(parent, **kwargs)
self.setLayout(QVBoxLayout())
self.__treatment = Continuize.Indicators
self.__group = group = QButtonGroup(exclusive=True)
group.buttonClicked.connect(self.__on_buttonClicked)
for text, treatment in ContinuizeEditor.Continuizers:
rb = QRadioButton(
text=text,
checked=self.__treatment == treatment)
group.addButton(rb, int(treatment))
self.layout().addWidget(rb)
self.setSizePolicy(QSizePolicy.Minimum, QSizePolicy.Fixed)
def setTreatment(self, treatment):
b = self.__group.button(treatment)
if b is not None:
b.setChecked(True)
self.__treatment = treatment
self.changed.emit()
def treatment(self):
return self.__treatment
def setParameters(self, params):
treatment = params.get("multinomial_treatment", Continuize.Indicators)
self.setTreatment(treatment)
def parameters(self):
return {"multinomial_treatment": self.__treatment}
def __on_buttonClicked(self):
self.__treatment = self.__group.checkedId()
self.changed.emit()
self.edited.emit()
@staticmethod
def createinstance(params):
params = dict(params)
treatment = params.pop("multinomial_treatment", Continuize.Indicators)
return Continuize(multinomial_treatment=treatment)
class _RemoveNaNRows(preprocess.preprocess.Preprocess):
def __call__(self, data):
mask = numpy.isnan(data.X)
mask = numpy.any(mask, axis=1)
return data[~mask]
class ImputeEditor(BaseEditor):
(NoImputation, Constant, Average,
Model, Random, DropRows, DropColumns) = 0, 1, 2, 3, 4, 5, 6
Imputers = {
NoImputation: (None, {}),
# Constant: (None, {"value": 0})
Average: (preprocess.impute.Average(), {}),
# Model: (preprocess.impute.Model, {}),
Random: (preprocess.impute.Random(), {}),
DropRows: (None, {})
}
Names = {
NoImputation: "Don't impute.",
Constant: "Replace with constant",
Average: "Average/Most frequent",
Model: "Model based imputer",
Random: "Replace with random value",
DropRows: "Remove rows with missing values.",
}
def __init__(self, parent=None, **kwargs):
super().__init__(parent, **kwargs)
self.setLayout(QVBoxLayout())
self.__method = ImputeEditor.Average
self.__group = group = QButtonGroup(self, exclusive=True)
group.buttonClicked.connect(self.__on_buttonClicked)
for methodid in [self.Average, self.Random, self.DropRows]:
text = self.Names[methodid]
rb = QRadioButton(text=text, checked=self.__method == methodid)
group.addButton(rb, methodid)
self.layout().addWidget(rb)
def setMethod(self, method):
b = self.__group.button(method)
if b is not None:
b.setChecked(True)
self.__method = method
self.changed.emit()
def setParameters(self, params):
method = params.get("method", ImputeEditor.Average)
self.setMethod(method)
def parameters(self):
return {"method": self.__method}
def __on_buttonClicked(self):
self.__method = self.__group.checkedId()
self.changed.emit()
self.edited.emit()
@staticmethod
def createinstance(params):
params = dict(params)
method = params.pop("method", ImputeEditor.Average)
if method == ImputeEditor.NoImputation:
return None
elif method == ImputeEditor.Average:
return preprocess.Impute()
elif method == ImputeEditor.Model:
return preprocess.Impute(method=preprocess.impute.Model())
elif method == ImputeEditor.DropRows:
return _RemoveNaNRows()
elif method == ImputeEditor.DropColumns:
return preprocess.RemoveNaNColumns()
else:
method, defaults = ImputeEditor.Imputers[method]
defaults = dict(defaults)
defaults.update(params)
return preprocess.Impute(method=method)
class UnivariateFeatureSelect(QWidget):
changed = Signal()
edited = Signal()
#: Strategy
Fixed, Percentile, FDR, FPR, FWE = 1, 2, 3, 4, 5
def __init__(self, parent=None, **kwargs):
super().__init__(parent, **kwargs)
self.setLayout(QVBoxLayout())
self.__scoreidx = 0
self.__strategy = UnivariateFeatureSelect.Fixed
self.__k = 10
self.__p = 75.0
box = QGroupBox(title="Score", flat=True)
box.setLayout(QVBoxLayout())
self.__cb = cb = QComboBox(self, )
self.__cb.currentIndexChanged.connect(self.setScoreIndex)
self.__cb.activated.connect(self.edited)
box.layout().addWidget(cb)
self.layout().addWidget(box)
box = QGroupBox(title="Strategy", flat=True)
self.__group = group = QButtonGroup(self, exclusive=True)
self.__spins = {}
form = QFormLayout()
fixedrb = QRadioButton("Fixed", checked=True)
group.addButton(fixedrb, UnivariateFeatureSelect.Fixed)
kspin = QSpinBox(
minimum=1, value=self.__k,
enabled=self.__strategy == UnivariateFeatureSelect.Fixed
)
kspin.valueChanged[int].connect(self.setK)
kspin.editingFinished.connect(self.edited)
self.__spins[UnivariateFeatureSelect.Fixed] = kspin
form.addRow(fixedrb, kspin)
percrb = QRadioButton("Percentile")
group.addButton(percrb, UnivariateFeatureSelect.Percentile)
pspin = QDoubleSpinBox(
minimum=0.0, maximum=100.0, singleStep=0.5,
value=self.__p, suffix="%",
enabled=self.__strategy == UnivariateFeatureSelect.Percentile
)
pspin.valueChanged[float].connect(self.setP)
pspin.editingFinished.connect(self.edited)
self.__spins[UnivariateFeatureSelect.Percentile] = pspin
# Percentile controls disabled for now.
pspin.setEnabled(False)
percrb.setEnabled(False)
form.addRow(percrb, pspin)
# form.addRow(QRadioButton("FDR"), QDoubleSpinBox())
# form.addRow(QRadioButton("FPR"), QDoubleSpinBox())
# form.addRow(QRadioButton("FWE"), QDoubleSpinBox())
self.__group.buttonClicked.connect(self.__on_buttonClicked)
box.setLayout(form)
self.layout().addWidget(box)
def setScoreIndex(self, scoreindex):
if self.__scoreidx != scoreindex:
self.__scoreidx = scoreindex
self.__cb.setCurrentIndex(scoreindex)
self.changed.emit()
def scoreIndex(self):
return self.__scoreidx
def setStrategy(self, strategy):
if self.__strategy != strategy:
self.__strategy = strategy
b = self.__group.button(strategy)
b.setChecked(True)
for st, rb in self.__spins.items():
rb.setEnabled(st == strategy)
self.changed.emit()
def setK(self, k):
if self.__k != k:
self.__k = k
spin = self.__spins[UnivariateFeatureSelect.Fixed]
spin.setValue(k)
if self.__strategy == UnivariateFeatureSelect.Fixed:
self.changed.emit()
def setP(self, p):
if self.__p != p:
self.__p = p
spin = self.__spins[UnivariateFeatureSelect.Percentile]
spin.setValue(p)
if self.__strategy == UnivariateFeatureSelect.Percentile:
self.changed.emit()
def setItems(self, itemlist):
for item in itemlist:
self.__cb.addItem(item["text"])
def __on_buttonClicked(self):
strategy = self.__group.checkedId()
self.setStrategy(strategy)
self.edited.emit()
def setParameters(self, params):
score = params.get("score", 0)
strategy = params.get("strategy", UnivariateFeatureSelect.Fixed)
self.setScoreIndex(score)
self.setStrategy(strategy)
if strategy == UnivariateFeatureSelect.Fixed:
self.setK(params.get("k", 10))
else:
self.setP(params.get("p", 75))
def parameters(self):
score = self.__scoreidx
strategy = self.__strategy
p = self.__p
k = self.__k
return {"score": score, "strategy": strategy, "p": p, "k": k}
class FeatureSelectEditor(BaseEditor):
MEASURES = [
("Information Gain", preprocess.score.InfoGain),
("Gain ratio", preprocess.score.GainRatio),
("Gini index", preprocess.score.Gini),
]
def __init__(self, parent=None):
super().__init__(parent)
self.setLayout(QVBoxLayout())
self.layout().setContentsMargins(0, 0, 0, 0)
self.__score = 0
self.__selecionidx = 0
self.__uni_fs = UnivariateFeatureSelect()
self.__uni_fs.setItems(
[{"text": "Information gain", "tooltip": ""},
{"text": "Gain ratio"},
{"text": "Gini index"}
]
)
self.layout().addWidget(self.__uni_fs)
self.__uni_fs.changed.connect(self.changed)
self.__uni_fs.edited.connect(self.edited)
def setParameters(self, params):
self.__uni_fs.setParameters(params)
def parameters(self):
return self.__uni_fs.parameters()
@staticmethod
def createinstance(params):
params = dict(params)
score = params.pop("score", 0)
score = FeatureSelectEditor.MEASURES[score][1]
strategy = params.get("strategy", UnivariateFeatureSelect.Fixed)
k = params.get("k", 10)
if strategy == UnivariateFeatureSelect.Fixed:
return preprocess.fss.SelectBestFeatures(score, k=k)
else:
# TODO: implement top percentile selection
raise NotImplementedError
# TODO: Model based FS (random forest variable importance, ...), RFE
# Unsupervised (min variance, constant, ...)??
class _Scaling(preprocess.preprocess.Preprocess):
"""
Scale data preprocessor.
"""
@staticmethod
def mean(dist):
values, counts = numpy.array(dist)
return numpy.average(values, weights=counts)
@staticmethod
def median(dist):
values, counts = numpy.array(dist)
cumdist = numpy.cumsum(counts)
if cumdist[-1] > 0:
cumdist /= cumdist[-1]
return numpy.interp(0.5, cumdist, values)
@staticmethod
def span(dist):
values = numpy.array(dist[0])
minval = numpy.min(values)
maxval = numpy.max(values)
return maxval - minval
@staticmethod
def std(dist):
values, counts = numpy.array(dist)
mean = numpy.average(values, weights=counts)
diff = values - mean
return numpy.sqrt(numpy.average(diff ** 2, weights=counts))
def __init__(self, center=mean, scale=std):
self.center = center
self.scale = scale
def __call__(self, data):
if self.center is None and self.scale is None:
return data
def transform(var):
dist = distribution.get_distribution(data, var)
if self.center:
c = self.center(dist)
dist[0, :] -= c
else:
c = 0
if self.scale:
s = self.scale(dist)
if s < 1e-15:
s = 1
else:
s = 1
factor = 1 / s
return var.copy(compute_value=preprocess.transformation.Normalizer(var, c, factor))
newvars = []
for var in data.domain.attributes:
if var.is_continuous:
newvars.append(transform(var))
else:
newvars.append(var)
domain = Orange.data.Domain(newvars, data.domain.class_vars,
data.domain.metas)
return data.from_table(domain, data)
class Scale(BaseEditor):
NoCentering, CenterMean, CenterMedian = 0, 1, 2
NoScaling, ScaleBySD, ScaleBySpan = 0, 1, 2
def __init__(self, parent=None, **kwargs):
super().__init__(parent, **kwargs)
self.setLayout(QVBoxLayout())
form = QFormLayout()
self.__centercb = QComboBox()
self.__centercb.addItems(["No centering", "Center by mean",
"Center by median"])
self.__scalecb = QComboBox()
self.__scalecb.addItems(["No scaling", "Scale by std",
"Scale by span"])
form.addRow("Center", self.__centercb)
form.addRow("Scale", self.__scalecb)
self.layout().addLayout(form)
self.__centercb.currentIndexChanged.connect(self.changed)
self.__scalecb.currentIndexChanged.connect(self.changed)
self.__centercb.activated.connect(self.edited)
self.__scalecb.activated.connect(self.edited)
def setParameters(self, params):
center = params.get("center", Scale.CenterMean)
scale = params.get("scale", Scale.ScaleBySD)
self.__centercb.setCurrentIndex(center)
self.__scalecb.setCurrentIndex(scale)
def parameters(self):
return {"center": self.__centercb.currentIndex(),
"scale": self.__scalecb.currentIndex()}
@staticmethod
def createinstance(params):
center = params.get("center", Scale.CenterMean)
scale = params.get("scale", Scale.ScaleBySD)
if center == Scale.NoCentering:
center = None
elif center == Scale.CenterMean:
center = _Scaling.mean
elif center == Scale.CenterMedian:
center = _Scaling.median
else:
assert False
if scale == Scale.NoScaling:
scale = None
elif scale == Scale.ScaleBySD:
scale = _Scaling.std
elif scale == Scale.ScaleBySpan:
scale = _Scaling.span
else:
assert False
return _Scaling(center=center, scale=scale)
class _Randomize(preprocess.preprocess.Preprocess):
"""
Randomize data preprocessor.
"""
def __init__(self, rand_type=Random.RandomizeClasses):
self.rand_type = rand_type
def __call__(self, data):
randomizer = Random(rand_type=self.rand_type)
return randomizer(data)
class Randomize(BaseEditor):
RandomizeClasses, RandomizeAttributes, RandomizeMetas = Random.RandTypes
def __init__(self, parent=None, **kwargs):
super().__init__(parent, **kwargs)
self.setLayout(QVBoxLayout())
form = QFormLayout()
self.__rand_type_cb = QComboBox()
self.__rand_type_cb.addItems(["Classes",
"Features",
"Meta data"])
form.addRow("Randomize", self.__rand_type_cb)
self.layout().addLayout(form)
self.__rand_type_cb.currentIndexChanged.connect(self.changed)
self.__rand_type_cb.activated.connect(self.edited)
def setParameters(self, params):
rand_type = params.get("rand_type", Randomize.RandomizeClasses)
self.__rand_type_cb.setCurrentIndex(rand_type)
def parameters(self):
return {"rand_type": self.__rand_type_cb.currentIndex()}
@staticmethod
def createinstance(params):
rand_type = params.get("rand_type", Randomize.RandomizeClasses)
return _Randomize(rand_type=rand_type)
class PCA(BaseEditor):
def __init__(self, parent=None, **kwargs):
super().__init__(parent, **kwargs)
self.setLayout(QVBoxLayout())
self.n_components = 10
form = QFormLayout()
self.cspin = QSpinBox(minimum=1, value=self.n_components)
self.cspin.valueChanged[int].connect(self.setC)
self.cspin.editingFinished.connect(self.edited)
form.addRow("Components", self.cspin)
self.layout().addLayout(form)
def setParameters(self, params):
self.n_components = params.get("n_components", 10)
def parameters(self):
return {"n_components": self.n_components}
def setC(self, n_components):
if self.n_components != n_components:
self.n_components = n_components
self.cspin.setValue(n_components)
self.changed.emit()
@staticmethod
def createinstance(params):
n_components = params.get("n_components", 10)
return ProjectPCA(n_components=n_components)
class CUR(BaseEditor):
def __init__(self, parent=None, **kwargs):
super().__init__(parent, **kwargs)
self.setLayout(QVBoxLayout())
self.rank = 10
self.max_error = 1
form = QFormLayout()
self.rspin = QSpinBox(minimum=2, value=self.rank)
self.rspin.valueChanged[int].connect(self.setR)
self.rspin.editingFinished.connect(self.edited)
self.espin = QDoubleSpinBox(
minimum=0.1, maximum=100.0, singleStep=0.1,
value=self.max_error)
self.espin.valueChanged[float].connect(self.setE)
self.espin.editingFinished.connect(self.edited)
form.addRow("Rank", self.rspin)
form.addRow("Relative error", self.espin)
self.layout().addLayout(form)
def setParameters(self, params):
self.setR(params.get("rank", 10))
self.setE(params.get("max_error", 1))
def parameters(self):
return {"rank": self.rank, "max_error": self.max_error}
def setR(self, rank):
if self.rank != rank:
self.rank = rank
self.rspin.setValue(rank)
self.changed.emit()
def setE(self, max_error):
if self.max_error != max_error:
self.max_error = max_error
self.espin.setValue(max_error)
self.changed.emit()
@staticmethod
def createinstance(params):
rank = params.get("rank", 10)
max_error = params.get("max_error", 1)
return ProjectCUR(rank=rank, max_error=max_error)
# This is intended for future improvements.
# I.e. it should be possible to add/register preprocessor actions
# through entry points (for use by add-ons). Maybe it should be a
# general framework (this is not the only place where such
# functionality is desired (for instance in Orange v2.* Rank widget
# already defines its own entry point).
class Description:
"""
A description of an action/function.
"""
def __init__(self, title, icon=None, summary=None, input=None, output=None,
requires=None, note=None, related=None, keywords=None,
helptopic=None):
self.title = title
self.icon = icon
self.summary = summary
self.input = input
self.output = output
self.requires = requires
self.note = note
self.related = related
self.keywords = keywords
self.helptopic = helptopic
class PreprocessAction:
def __init__(self, name, qualname, category, description, viewclass):
self.name = name
self.qualname = qualname
self.category = category
self.description = description
self.viewclass = viewclass
def icon_path(basename):
return pkg_resources.resource_filename(__name__, "icons/" + basename)
PREPROCESSORS = [
PreprocessAction(
"Discretize", "orange.preprocess.discretize", "Discretization",
Description("Discretize Continuous Variables",
icon_path("Discretize.svg")),
DiscretizeEditor
),
PreprocessAction(
"Continuize", "orange.preprocess.continuize", "Continuization",
Description("Continuize Discrete Variables",
icon_path("Continuize.svg")),
ContinuizeEditor
),
PreprocessAction(
"Impute", "orange.preprocess.impute", "Impute",
Description("Impute Missing Values",
icon_path("Impute.svg")),
ImputeEditor
),
PreprocessAction(
"Feature Selection", "orange.preprocess.fss", "Feature Selection",
Description("Select Relevant Features",
icon_path("SelectColumns.svg")),
FeatureSelectEditor
),
PreprocessAction(
"Normalize", "orange.preprocess.scale", "Scaling",
Description("Normalize Features",
icon_path("Normalize.svg")),
Scale
),
PreprocessAction(
"Randomize", "orange.preprocess.randomize", "Randomization",
Description("Randomize",
icon_path("Random.svg")),
Randomize
),
PreprocessAction(
"PCA", "orange.preprocess.pca", "PCA",
Description("Principal Component Analysis",
icon_path("PCA.svg")),
PCA
),
PreprocessAction(
"CUR", "orange.preprocess.cur", "CUR",
Description("CUR Matrix Decomposition",
icon_path("SelectColumns.svg")),
CUR
)
]
# TODO: Extend with entry points here
# PREPROCESSORS += iter_entry_points("Orange.widgets.data.owpreprocess")
# ####
# The actual owwidget (with helper classes)
# ####
# Note:
# The preprocessors are drag/dropped onto a sequence widget, where
# they can be reordered/removed/edited.
#
# Model <-> Adapter/Controller <-> View
#
# * `Model`: the current constructed preprocessor model.
# * the source (of drag/drop) is an item model displayed in a list
# view (source list).
# * the drag/drop is controlled by the controller/adapter,
def list_model_move_row_helper(model, parent, src, dst):
assert src != dst and src != dst - 1
data = model.itemData(model.index(src, 0, parent))
removed = model.removeRow(src, parent)
if not removed:
return False
realdst = dst - 1 if dst > src else dst
inserted = model.insertRow(realdst, parent)
if not inserted:
return False
dataset = model.setItemData(model.index(realdst, 0, parent), data)
return removed and inserted and dataset
def list_model_move_rows_helper(model, parent, src, count, dst):
assert not (src <= dst < src + count + 1)
rowdata = [model.itemData(model.index(src + i, 0, parent))
for i in range(count)]
removed = model.removeRows(src, count, parent)
if not removed:
return False
realdst = dst - count if dst > src else dst
inserted = model.insertRows(realdst, count, parent)
if not inserted:
return False
setdata = True
for i, data in enumerate(rowdata):
didset = model.setItemData(model.index(realdst + i, 0, parent), data)
setdata = setdata and didset
return setdata
class StandardItemModel(QtGui.QStandardItemModel):
"""
A QStandardItemModel improving support for internal row moves.
The QStandardItemModel is missing support for explicitly moving
rows internally. Therefore to move a row it is first removed
reinserted as an empty row and it's data repopulated.
This triggers rowsRemoved/rowsInserted and dataChanged signals.
If an observer is monitoring the model state it would see all the model
changes. By using moveRow[s] only one `rowsMoved` signal is emitted
coalescing all the updates.
.. note:: The semantics follow Qt5's QAbstractItemModel.moveRow[s]
"""
def moveRow(self, sourceParent, sourceRow, destParent, destRow):
"""
Move sourceRow from sourceParent to destinationRow under destParent.
Returns True if the row was successfully moved; otherwise
returns false.
.. note:: Only moves within the same parent are currently supported
"""
if not sourceParent == destParent:
return False
if not self.beginMoveRows(sourceParent, sourceRow, sourceRow,
destParent, destRow):
return False
# block so rowsRemoved/Inserted and dataChanged signals
# are not emitted during the move. Instead the rowsMoved
# signal will be emitted from self.endMoveRows().
# I am mostly sure this is safe (a possible problem would be if the
# base class itself would connect to the rowsInserted, ... to monitor
# ensure internal invariants)
with blocked(self):
didmove = list_model_move_row_helper(
self, sourceParent, sourceRow, destRow)
self.endMoveRows()
if not didmove:
warnings.warn(
"`moveRow` did not succeed! Data model might be "
"in an inconsistent state.",
RuntimeWarning)
return didmove
def moveRows(self, sourceParent, sourceRow, count,
destParent, destRow):
"""
Move count rows starting with the given sourceRow under parent
sourceParent to row destRow under parent destParent.
Return true if the rows were successfully moved; otherwise
returns false.
.. note:: Only moves within the same parent are currently supported
"""
if not self.beginMoveRows(sourceParent, sourceRow, sourceRow + count,
destParent, destRow):
return False
# block so rowsRemoved/Inserted and dataChanged signals
# are not emitted during the move. Instead the rowsMoved
# signal will be emitted from self.endMoveRows().
with blocked(self):
didmove = list_model_move_rows_helper(
self, sourceParent, sourceRow, count, destRow)
self.endMoveRows()
if not didmove:
warnings.warn(
"`moveRows` did not succeed! Data model might be "
"in an inconsistent state.",
RuntimeWarning)
return didmove
#: Qt.ItemRole holding the PreprocessAction instance
DescriptionRole = Qt.UserRole
#: Qt.ItemRole storing the preprocess parameters
ParametersRole = Qt.UserRole + 1
class Controller(QObject):
"""
Controller for displaying/editing QAbstractItemModel using SequenceFlow.
It creates/deletes updates the widgets in the view when the model
changes, as well as interprets drop events (with appropriate mime data)
onto the view, modifying the model appropriately.
Parameters
----------
view : SeqeunceFlow
The view to control (required).
model : QAbstarctItemModel
A list model
parent : QObject
The controller's parent.
"""
MimeType = "application/x-qwidget-ref"
def __init__(self, view, model=None, parent=None):
super().__init__(parent)
self._model = None
self.view = view
view.installEventFilter(self)
view.widgetCloseRequested.connect(self._closeRequested)
view.widgetMoved.connect(self._widgetMoved)
# gruesome
self._setDropIndicatorAt = view._SequenceFlow__setDropIndicatorAt
self._insertIndexAt = view._SequenceFlow__insertIndexAt
if model is not None:
self.setModel(model)
def __connect(self, model):
model.dataChanged.connect(self._dataChanged)
model.rowsInserted.connect(self._rowsInserted)
model.rowsRemoved.connect(self._rowsRemoved)
model.rowsMoved.connect(self._rowsMoved)
def __disconnect(self, model):
model.dataChanged.disconnect(self._dataChanged)
model.rowsInserted.disconnect(self._rowsInserted)
model.rowsRemoved.disconnect(self._rowsRemoved)
model.rowsMoved.disconnect(self._rowsMoved)
def setModel(self, model):
"""Set the model for the view.
:type model: QAbstarctItemModel.
"""
if self._model is model:
return
if self._model is not None:
self.__disconnect(self._model)
self._clear()
self._model = model
if self._model is not None:
self._initialize(model)
self.__connect(model)
def model(self):
"""Return the model.
"""
return self._model
def _initialize(self, model):
for i in range(model.rowCount()):
index = model.index(i, 0)
self._insertWidgetFor(i, index)
def _clear(self):
self.view.clear()
def dragEnterEvent(self, event):
if event.mimeData().hasFormat(self.MimeType) and \
self.model() is not None:
event.setDropAction(Qt.CopyAction)
event.accept()
return True
else:
return False
def dragMoveEvent(self, event):
if event.mimeData().hasFormat(self.MimeType) and \
self.model() is not None:
event.accept()
self._setDropIndicatorAt(event.pos())
return True
else:
return False
def dragLeaveEvent(self, event):
return False
# TODO: Remember if we have seen enter with the proper data
# (leave event does not have mimeData)
# if event.mimeData().hasFormat(self.MimeType) and \
# event.proposedAction() == Qt.CopyAction:
# event.accept()
# self._setDropIndicatorAt(None)
# return True
# else:
# return False
def dropEvent(self, event):
if event.mimeData().hasFormat(self.MimeType) and \
self.model() is not None:
# Create and insert appropriate widget.
self._setDropIndicatorAt(None)
row = self._insertIndexAt(event.pos())
model = self.model()
diddrop = model.dropMimeData(
event.mimeData(), Qt.CopyAction, row, 0, QModelIndex())
if diddrop:
event.accept()
return True
else:
return False
def eventFilter(self, view, event):
if view is not self.view:
return False
if event.type() == QEvent.DragEnter:
return self.dragEnterEvent(event)
elif event.type() == QEvent.DragMove:
return self.dragMoveEvent(event)
elif event.type() == QEvent.DragLeave:
return self.dragLeaveEvent(event)
elif event.type() == QEvent.Drop:
return self.dropEvent(event)
else:
return super().eventFilter(view, event)
def _dataChanged(self, topleft, bottomright):
model = self.model()
widgets = self.view.widgets()
top, left = topleft.row(), topleft.column()
bottom, right = bottomright.row(), bottomright.column()
assert left == 0 and right == 0
for row in range(top, bottom + 1):
self.setWidgetData(widgets[row], model.index(row, 0))
def _rowsInserted(self, parent, start, end):
model = self.model()
for row in range(start, end + 1):
index = model.index(row, 0, parent)
self._insertWidgetFor(row, index)
def _rowsRemoved(self, parent, start, end):
for row in reversed(range(start, end + 1)):
self._removeWidgetFor(row, None)
def _rowsMoved(self, srcparetn, srcstart, srcend,
dstparent, dststart, dstend):
raise NotImplementedError
def _closeRequested(self, row):
model = self.model()
assert 0 <= row < model.rowCount()
model.removeRows(row, 1, QModelIndex())
def _widgetMoved(self, from_, to):
# The widget in the view were already swapped, so
# we must disconnect from the model when moving the rows.
# It would be better if this class would also filter and
# handle internal widget moves.
model = self.model()
self.__disconnect(model)
try:
model.moveRow
except AttributeError:
data = model.itemData(model.index(from_, 0))
removed = model.removeRow(from_, QModelIndex())
inserted = model.insertRow(to, QModelIndex())
model.setItemData(model.index(to, 0), data)
assert removed and inserted
assert model.rowCount() == len(self.view.widgets())
else:
if to > from_:
to = to + 1
didmove = model.moveRow(QModelIndex(), from_, QModelIndex(), to)
assert didmove
finally:
self.__connect(model)
def _insertWidgetFor(self, row, index):
widget = self.createWidgetFor(index)
self.view.insertWidget(row, widget, title=index.data(Qt.DisplayRole))
self.view.setIcon(row, index.data(Qt.DecorationRole))
self.setWidgetData(widget, index)
widget.edited.connect(self.__edited)
def _removeWidgetFor(self, row, index):
widget = self.view.widgets()[row]
self.view.removeWidget(widget)
widget.edited.disconnect(self.__edited)
widget.deleteLater()
def createWidgetFor(self, index):
"""
Create a QWidget instance for the index (:class:`QModelIndex`)
"""
definition = index.data(DescriptionRole)
widget = definition.viewclass()
return widget
def setWidgetData(self, widget, index):
"""
Set/update the widget state from the model at index.
"""
params = index.data(ParametersRole)
if not isinstance(params, dict):
params = {}
widget.setParameters(params)
def setModelData(self, widget, index):
"""
Get the data from the widget state and set/update the model at index.
"""
params = widget.parameters()
assert isinstance(params, dict)
self._model.setData(index, params, ParametersRole)
@Slot()
def __edited(self,):
widget = self.sender()
row = self.view.indexOf(widget)
index = self.model().index(row, 0)
self.setModelData(widget, index)
class SequenceFlow(QWidget):
"""
A re-orderable list of widgets.
"""
#: Emitted when the user clicks the Close button in the header
widgetCloseRequested = Signal(int)
#: Emitted when the user moves/drags a widget to a new location.
widgetMoved = Signal(int, int)
class Frame(QtGui.QDockWidget):
"""
Widget frame with a handle.
"""
closeRequested = Signal()
def __init__(self, parent=None, widget=None, title=None, **kwargs):
super().__init__(parent, **kwargs)
self.setFeatures(QtGui.QDockWidget.DockWidgetClosable)
self.setAllowedAreas(Qt.NoDockWidgetArea)
self.__title = ""
self.__icon = ""
self.__focusframe = None
self.__deleteaction = QtGui.QAction(
"Remove", self, shortcut=QtGui.QKeySequence.Delete,
enabled=False, triggered=self.closeRequested
)
self.addAction(self.__deleteaction)
if widget is not None:
self.setWidget(widget)
self.setSizePolicy(QSizePolicy.Minimum, QSizePolicy.Fixed)
if title:
self.setTitle(title)
self.setFocusPolicy(Qt.ClickFocus | Qt.TabFocus)
def setTitle(self, title):
if self.__title != title:
self.__title = title
self.setWindowTitle(title)
self.update()
def setIcon(self, icon):
icon = QIcon(icon)
if self.__icon != icon:
self.__icon = icon
self.setWindowIcon(icon)
self.update()
def paintEvent(self, event):
painter = QStylePainter(self)
opt = QStyleOptionFrame()
opt.init(self)
painter.drawPrimitive(QStyle.PE_FrameDockWidget, opt)
painter.end()
super().paintEvent(event)
def focusInEvent(self, event):
event.accept()
self.__focusframe = QtGui.QFocusFrame(self)
self.__focusframe.setWidget(self)
self.__deleteaction.setEnabled(True)
def focusOutEvent(self, event):
event.accept()
self.__focusframe.deleteLater()
self.__focusframe = None
self.__deleteaction.setEnabled(False)
def closeEvent(self, event):
super().closeEvent(event)
event.ignore()
self.closeRequested.emit()
def __init__(self, parent=None, **kwargs):
super().__init__(parent, **kwargs)
self.__dropindicator = QSpacerItem(
16, 16, QSizePolicy.Expanding, QSizePolicy.Fixed
)
self.__dragstart = (None, None, None)
layout = QVBoxLayout()
layout.setContentsMargins(0, 0, 0, 0)
self.__flowlayout = QVBoxLayout()
layout.addLayout(self.__flowlayout)
layout.addSpacerItem(
QSpacerItem(1, 1, QSizePolicy.Expanding, QSizePolicy.Expanding))
self.setLayout(layout)
self.setAcceptDrops(True)
def sizeHint(self):
"""Reimplemented."""
if self.widgets():
return super().sizeHint()
else:
return QSize(250, 350)
def addWidget(self, widget, title):
"""Add `widget` with `title` to list of widgets (in the last position).
Parameters
----------
widget : QWidget
Widget instance.
title : str
Widget title.
"""
index = len(self.widgets())
self.insertWidget(index, widget, title)
def insertWidget(self, index, widget, title):
"""Insert `widget` with `title` at `index`.
Parameters
----------
index : int
Position at which the widget should be inserted.
widget : QWidget
Widget instance.
title : str
Widget title.
"""
# TODO: Check if widget is already inserted.
frame = SequenceFlow.Frame(widget=widget, title=title)
frame.closeRequested.connect(self.__closeRequested)
layout = self.__flowlayout
frames = [item.widget() for item in self.layout_iter(layout)
if item.widget()]
if 0 < index < len(frames):
# find the layout index of a widget occupying the current
# index'th slot.
insert_index = layout.indexOf(frames[index])
elif index == 0:
insert_index = 0
elif index < 0 or index >= len(frames):
insert_index = layout.count()
else:
assert False
layout.insertWidget(insert_index, frame)
frame.installEventFilter(self)
def removeWidget(self, widget):
"""Remove widget from the list.
Parameters
----------
widget : QWidget
Widget instance to remove.
"""
layout = self.__flowlayout
frame = self.__widgetFrame(widget)
if frame is not None:
frame.setWidget(None)
widget.setVisible(False)
widget.setParent(None)
layout.takeAt(layout.indexOf(frame))
frame.hide()
frame.deleteLater()
def clear(self):
"""Clear the list (remove all widgets).
"""
for w in reversed(self.widgets()):
self.removeWidget(w)
def widgets(self):
"""Return a list of all `widgets`.
"""
layout = self.__flowlayout
items = (layout.itemAt(i) for i in range(layout.count()))
return [item.widget().widget()
for item in items if item.widget() is not None]
def indexOf(self, widget):
"""Return the index (logical position) of `widget`
"""
widgets = self.widgets()
return widgets.index(widget)
def setTitle(self, index, title):
"""Set title for `widget` at `index`.
"""
widget = self.widgets()[index]
frame = self.__widgetFrame(widget)
frame.setTitle(title)
def setIcon(self, index, icon):
widget = self.widgets()[index]
frame = self.__widgetFrame(widget)
frame.setIcon(icon)
def dropEvent(self, event):
"""Reimplemented."""
layout = self.__flowlayout
index = self.__insertIndexAt(self.mapFromGlobal(QCursor.pos()))
if event.mimeData().hasFormat("application/x-internal-move") and \
event.source() is self:
# Complete the internal move
frame, oldindex, _ = self.__dragstart
# Remove the drop indicator spacer item before re-inserting
# the frame
self.__setDropIndicatorAt(None)
if index > oldindex:
index = index - 1
if index != oldindex:
item = layout.takeAt(oldindex)
assert item.widget() is frame
layout.insertWidget(index, frame)
self.widgetMoved.emit(oldindex, index)
event.accept()
self.__dragstart = None, None, None
def dragEnterEvent(self, event):
"""Reimplemented."""
if event.mimeData().hasFormat("application/x-internal-move") and \
event.source() is self:
assert self.__dragstart[0] is not None
event.acceptProposedAction()
def dragMoveEvent(self, event):
"""Reimplemented."""
pos = self.mapFromGlobal(QCursor.pos())
self.__setDropIndicatorAt(pos)
def dragLeaveEvent(self, event):
"""Reimplemented."""
self.__setDropIndicatorAt(None)
def eventFilter(self, obj, event):
"""Reimplemented."""
if isinstance(obj, SequenceFlow.Frame) and obj.parent() is self:
etype = event.type()
if etype == QEvent.MouseButtonPress and \
event.button() == Qt.LeftButton:
# Is the mouse press on the dock title bar
# (assume everything above obj.widget is a title bar)
# TODO: Get the proper title bar geometry.
if event.pos().y() < obj.widget().y():
index = self.indexOf(obj.widget())
self.__dragstart = (obj, index, event.pos())
elif etype == QEvent.MouseMove and \
event.buttons() & Qt.LeftButton and \
obj is self.__dragstart[0]:
_, _, down = self.__dragstart
if (down - event.pos()).manhattanLength() >= \
QApplication.startDragDistance():
self.__startInternalDrag(obj, event.pos())
self.__dragstart = None, None, None
return True
elif etype == QEvent.MouseButtonRelease and \
event.button() == Qt.LeftButton and \
self.__dragstart[0] is obj:
self.__dragstart = None, None, None
return super().eventFilter(obj, event)
def __setDropIndicatorAt(self, pos):
# find the index where drop at pos would insert.
index = -1
layout = self.__flowlayout
if pos is not None:
index = self.__insertIndexAt(pos)
spacer = self.__dropindicator
currentindex = self.layout_index_of(layout, spacer)
if currentindex != -1:
item = layout.takeAt(currentindex)
assert item is spacer
if currentindex < index:
index -= 1
if index != -1:
layout.insertItem(index, spacer)
def __insertIndexAt(self, pos):
y = pos.y()
midpoints = [item.widget().geometry().center().y()
for item in self.layout_iter(self.__flowlayout)
if item.widget() is not None]
index = bisect.bisect_left(midpoints, y)
return index
def __startInternalDrag(self, frame, hotSpot=None):
drag = QDrag(self)
pixmap = QPixmap(frame.size())
frame.render(pixmap)
transparent = QPixmap(pixmap.size())
transparent.fill(Qt.transparent)
painter = QtGui.QPainter(transparent)
painter.setOpacity(0.35)
painter.drawPixmap(0, 0, pixmap.width(), pixmap.height(), pixmap)
painter.end()
drag.setPixmap(transparent)
if hotSpot is not None:
drag.setHotSpot(hotSpot)
mime = QMimeData()
mime.setData("application/x-internal-move", "")
drag.setMimeData(mime)
return drag.exec_(Qt.MoveAction)
def __widgetFrame(self, widget):
layout = self.__flowlayout
for item in self.layout_iter(layout):
if item.widget() is not None and \
isinstance(item.widget(), SequenceFlow.Frame) and \
item.widget().widget() is widget:
return item.widget()
else:
return None
def __closeRequested(self):
frame = self.sender()
index = self.indexOf(frame.widget())
self.widgetCloseRequested.emit(index)
@staticmethod
def layout_iter(layout):
return (layout.itemAt(i) for i in range(layout.count()))
@staticmethod
def layout_index_of(layout, item):
for i, item1 in enumerate(SequenceFlow.layout_iter(layout)):
if item == item1:
return i
return -1
class OWPreprocess(widget.OWWidget):
name = "Preprocess"
description = "Construct a data preprocessing pipeline."
icon = "icons/Preprocess.svg"
priority = 2105
inputs = [("Data", Orange.data.Table, "set_data")]
outputs = [("Preprocessor", preprocess.preprocess.Preprocess),
("Preprocessed Data", Orange.data.Table)]
storedsettings = settings.Setting({})
autocommit = settings.Setting(False)
def __init__(self):
super().__init__()
self.data = None
self._invalidated = False
# List of available preprocessors (DescriptionRole : Description)
self.preprocessors = QStandardItemModel()
def mimeData(indexlist):
assert len(indexlist) == 1
index = indexlist[0]
qname = index.data(DescriptionRole).qualname
m = QMimeData()
m.setData("application/x-qwidget-ref", qname)
return m
# TODO: Fix this (subclass even if just to pass a function
# for mimeData delegate)
self.preprocessors.mimeData = mimeData
box = gui.widgetBox(self.controlArea, "Preprocessors")
self.preprocessorsView = view = QListView(
selectionMode=QListView.SingleSelection,
dragEnabled=True,
dragDropMode=QListView.DragOnly
)
view.setModel(self.preprocessors)
view.activated.connect(self.__activated)
box.layout().addWidget(view)
####
self._qname2ppdef = {ppdef.qualname: ppdef for ppdef in PREPROCESSORS}
# List of 'selected' preprocessors and their parameters.
self.preprocessormodel = None
self.flow_view = SequenceFlow()
self.controler = Controller(self.flow_view, parent=self)
self.overlay = OverlayWidget(self)
self.overlay.setAttribute(Qt.WA_TransparentForMouseEvents)
self.overlay.setWidget(self.flow_view)
self.overlay.setLayout(QVBoxLayout())
self.overlay.layout().addWidget(
QtGui.QLabel("Drag items from the list on the left",
wordWrap=True))
self.scroll_area = QtGui.QScrollArea(
verticalScrollBarPolicy=Qt.ScrollBarAlwaysOn
)
self.scroll_area.viewport().setAcceptDrops(True)
self.scroll_area.setWidget(self.flow_view)
self.scroll_area.setWidgetResizable(True)
self.mainArea.layout().addWidget(self.scroll_area)
self.flow_view.installEventFilter(self)
box = gui.widgetBox(self.controlArea, "Output")
gui.auto_commit(box, self, "autocommit", "Commit", box=False)
self._initialize()
def _initialize(self):
for pp_def in PREPROCESSORS:
description = pp_def.description
if description.icon:
icon = QIcon(description.icon)
else:
icon = QIcon()
item = QStandardItem(icon, description.title)
item.setToolTip(description.summary or "")
item.setData(pp_def, DescriptionRole)
item.setFlags(Qt.ItemIsEnabled | Qt.ItemIsSelectable |
Qt.ItemIsDragEnabled)
self.preprocessors.appendRow([item])
try:
model = self.load(self.storedsettings)
except Exception:
model = self.load({})
self.set_model(model)
if not model.rowCount():
# enforce default width constraint if no preprocessors
# are instantiated (if the model is not empty the constraints
# will be triggered by LayoutRequest event on the `flow_view`)
self.__update_size_constraint()
self.apply()
def load(self, saved):
"""Load a preprocessor list from a dict."""
name = saved.get("name", "")
preprocessors = saved.get("preprocessors", [])
model = StandardItemModel()
def dropMimeData(data, action, row, column, parent):
if data.hasFormat("application/x-qwidget-ref") and \
action == Qt.CopyAction:
qname = bytes(data.data("application/x-qwidget-ref")).decode()
ppdef = self._qname2ppdef[qname]
item = QStandardItem(ppdef.description.title)
item.setData({}, ParametersRole)
item.setData(ppdef.description.title, Qt.DisplayRole)
item.setData(ppdef, DescriptionRole)
self.preprocessormodel.insertRow(row, [item])
return True
else:
return False
model.dropMimeData = dropMimeData
for qualname, params in preprocessors:
pp_def = self._qname2ppdef[qualname]
description = pp_def.description
item = QStandardItem(description.title)
if description.icon:
icon = QIcon(description.icon)
else:
icon = QIcon()
item.setIcon(icon)
item.setToolTip(description.summary)
item.setData(pp_def, DescriptionRole)
item.setData(params, ParametersRole)
model.appendRow(item)
return model
def save(self, model):
"""Save the preprocessor list to a dict."""
d = {"name": ""}
preprocessors = []
for i in range(model.rowCount()):
item = model.item(i)
pp_def = item.data(DescriptionRole)
params = item.data(ParametersRole)
preprocessors.append((pp_def.qualname, params))
d["preprocessors"] = preprocessors
return d
def set_model(self, ppmodel):
if self.preprocessormodel:
self.preprocessormodel.dataChanged.disconnect(self.__on_modelchanged)
self.preprocessormodel.rowsInserted.disconnect(self.__on_modelchanged)
self.preprocessormodel.rowsRemoved.disconnect(self.__on_modelchanged)
self.preprocessormodel.rowsMoved.disconnect(self.__on_modelchanged)
self.preprocessormodel.deleteLater()
self.preprocessormodel = ppmodel
self.controler.setModel(ppmodel)
if ppmodel is not None:
self.preprocessormodel.dataChanged.connect(self.__on_modelchanged)
self.preprocessormodel.rowsInserted.connect(self.__on_modelchanged)
self.preprocessormodel.rowsRemoved.connect(self.__on_modelchanged)
self.preprocessormodel.rowsMoved.connect(self.__on_modelchanged)
self.__update_overlay()
def __update_overlay(self):
if self.preprocessormodel is None or \
self.preprocessormodel.rowCount() == 0:
self.overlay.setWidget(self.flow_view)
self.overlay.show()
else:
self.overlay.setWidget(None)
self.overlay.hide()
def __on_modelchanged(self):
self.__update_overlay()
self.commit()
@check_sql_input
def set_data(self, data=None):
"""Set the input data set."""
self.data = data
def handleNewSignals(self):
self.apply()
def __activated(self, index):
item = self.preprocessors.itemFromIndex(index)
action = item.data(DescriptionRole)
item = QStandardItem()
item.setData({}, ParametersRole)
item.setData(action.description.title, Qt.DisplayRole)
item.setData(action, DescriptionRole)
self.preprocessormodel.appendRow([item])
def buildpreproc(self):
plist = []
for i in range(self.preprocessormodel.rowCount()):
item = self.preprocessormodel.item(i)
desc = item.data(DescriptionRole)
params = item.data(ParametersRole)
if not isinstance(params, dict):
params = {}
create = desc.viewclass.createinstance
plist.append(create(params))
if len(plist) == 1:
return plist[0]
else:
return preprocess.preprocess.PreprocessorList(plist)
def apply(self):
# Sync the model into storedsettings on every apply.
self.storeSpecificSettings()
preprocessor = self.buildpreproc()
if self.data is not None:
self.error(0)
try:
data = preprocessor(self.data)
except ValueError as e:
self.error(0, str(e))
return
else:
data = None
self.send("Preprocessor", preprocessor)
self.send("Preprocessed Data", data)
def commit(self):
if not self._invalidated:
self._invalidated = True
QApplication.postEvent(self, QEvent(QEvent.User))
def customEvent(self, event):
if event.type() == QEvent.User and self._invalidated:
self._invalidated = False
self.apply()
def eventFilter(self, receiver, event):
if receiver is self.flow_view and event.type() == QEvent.LayoutRequest:
QTimer.singleShot(0, self.__update_size_constraint)
return super().eventFilter(receiver, event)
def storeSpecificSettings(self):
"""Reimplemented."""
self.storedsettings = self.save(self.preprocessormodel)
super().storeSpecificSettings()
def saveSettings(self):
"""Reimplemented."""
self.storedsettings = self.save(self.preprocessormodel)
super().saveSettings()
def onDeleteWidget(self):
self.data = None
self.set_model(None)
super().onDeleteWidget()
@Slot()
def __update_size_constraint(self):
# Update minimum width constraint on the scroll area containing
# the 'instantiated' preprocessor list (to avoid the horizontal
# scroll bar).
sh = self.flow_view.minimumSizeHint()
scroll_width = self.scroll_area.verticalScrollBar().width()
self.scroll_area.setMinimumWidth(
min(max(sh.width() + scroll_width + 2, self.controlArea.width()),
520))
def sizeHint(self):
sh = super().sizeHint()
return sh.expandedTo(QSize(sh.width(), 500))
def test_main(argv=sys.argv):
argv = list(argv)
app = QtGui.QApplication(argv)
if len(argv) > 1:
filename = argv[1]
else:
filename = "brown-selected"
w = OWPreprocess()
w.set_data(Orange.data.Table(filename))
w.show()
w.raise_()
r = app.exec_()
w.set_data(None)
w.saveSettings()
w.onDeleteWidget()
return r
if __name__ == "__main__":
sys.exit(test_main())
| {
"content_hash": "d314889994aece5dae6470e76bef5e82",
"timestamp": "",
"source": "github",
"line_count": 1922,
"max_line_length": 95,
"avg_line_length": 32.89542143600416,
"alnum_prop": 0.5947647291419533,
"repo_name": "kwikadi/orange3",
"id": "722e4d78d23a761f637a24a2e9e686c95eb94d01",
"size": "63225",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Orange/widgets/data/owpreprocess.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "C",
"bytes": "20412"
},
{
"name": "C++",
"bytes": "1992"
},
{
"name": "GLSL",
"bytes": "75"
},
{
"name": "HTML",
"bytes": "1985"
},
{
"name": "JavaScript",
"bytes": "3025"
},
{
"name": "Jupyter Notebook",
"bytes": "6662"
},
{
"name": "NSIS",
"bytes": "19900"
},
{
"name": "Python",
"bytes": "4052846"
},
{
"name": "Shell",
"bytes": "39117"
}
],
"symlink_target": ""
} |
def task(*fn, **kwargs):
# decorator without parameters
if fn:
function = fn[0]
function.task_metadata = {}
return function
# decorator with parameters
def wrap(function):
function.task_metadata = kwargs
return function
return wrap
@task
def simple():
print("thats all folks")
@task(output=['my_input.txt'])
def pre(to_create):
with open(to_create[0], 'w') as fp:
fp.write('foo')
@task(output=['out1.txt', 'out2.txt'])
def create(to_be_created):
print("I should create these files: %s" % " ".join(to_be_created))
@task(input=['my_input.txt'], output=['my_output_result.txt'])
def process(in_, out_):
print("processing %s" % in_[0])
print("creating %s" % out_[0])
| {
"content_hash": "9c5dacff0825b92571bac21a9a5f50ef",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 70,
"avg_line_length": 21.742857142857144,
"alnum_prop": 0.6005256241787122,
"repo_name": "pydoit/doit",
"id": "a33448a7c8406c38cf82749fec19c97dd7e13b1d",
"size": "761",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "doc/samples/my_tasks.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "442"
},
{
"name": "Python",
"bytes": "561336"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from __future__ import division
import click
from guild import click_util
@click.command()
@click.argument(
"packages",
metavar="PACKAGE...",
nargs=-1,
required=True,
autocompletion=click_util.completion_filename,
)
@click.option(
"-U",
"--upgrade",
help="Upgrade specified packages to the newest available version.",
is_flag=True,
)
@click.option(
"--reinstall",
help="Resinstall the package if it's already installed. Implies --upgrade.",
is_flag=True,
)
@click.option("--no-cache", help="Don't use cached packages.", is_flag=True)
@click.option("--no-deps", help="Don't install dependencies.", is_flag=True)
@click.option("--pre", help="Install pre-release versions.", is_flag=True)
@click.option(
"-t",
"--target",
metavar="DIR",
help="Install package and requirements in DIR.",
autocompletion=click_util.completion_dir,
)
@click_util.use_args
def install(args):
"""Install one or more packages."""
from . import packages_impl
packages_impl.install_packages(args)
| {
"content_hash": "72e3773cd8a24d4b01568e22cc5cb34a",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 80,
"avg_line_length": 25.3953488372093,
"alnum_prop": 0.674908424908425,
"repo_name": "guildai/guild",
"id": "93bf3518add5a04100ba2cdcfa56b808b7e78e29",
"size": "1673",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "guild/commands/install.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "416"
},
{
"name": "JavaScript",
"bytes": "29682"
},
{
"name": "Makefile",
"bytes": "2621"
},
{
"name": "Python",
"bytes": "736181"
},
{
"name": "Shell",
"bytes": "1074"
},
{
"name": "Vue",
"bytes": "48469"
}
],
"symlink_target": ""
} |
import altair as alt
from . import plotting, themes
from ._core import FramePlotMethods, SeriesPlotMethods
from .plotting import scatter_matrix, andrews_curves, parallel_coordinates, lag_plot
__version__ = '0.2.01.dev0'
| {
"content_hash": "2fcf6ea59eb325d1d376939bbf9ec42d",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 84,
"avg_line_length": 36.833333333333336,
"alnum_prop": 0.7828054298642534,
"repo_name": "jakevdp/pdvega",
"id": "7e5fa9c8ae14f4c267c8f0abc285fcf5b93c7de6",
"size": "236",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pdvega/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "311"
},
{
"name": "Python",
"bytes": "77892"
}
],
"symlink_target": ""
} |
import asyncio
import atexit
from contextlib import suppress
import logging
import gc
import os
import signal
import sys
import warnings
import click
import dask
from dask.system import CPU_COUNT
from distributed import Nanny, Security
from distributed.cli.utils import check_python_3, install_signal_handlers
from distributed.comm import get_address_host_port
from distributed.preloading import validate_preload_argv
from distributed.proctitle import (
enable_proctitle_on_children,
enable_proctitle_on_current,
)
from distributed.utils import deserialize_for_cli, import_term
from tlz import valmap
from tornado.ioloop import IOLoop, TimeoutError
logger = logging.getLogger("distributed.dask_worker")
pem_file_option_type = click.Path(exists=True, resolve_path=True)
@click.command(context_settings=dict(ignore_unknown_options=True))
@click.argument("scheduler", type=str, required=False)
@click.option(
"--tls-ca-file",
type=pem_file_option_type,
default=None,
help="CA cert(s) file for TLS (in PEM format)",
)
@click.option(
"--tls-cert",
type=pem_file_option_type,
default=None,
help="certificate file for TLS (in PEM format)",
)
@click.option(
"--tls-key",
type=pem_file_option_type,
default=None,
help="private key file for TLS (in PEM format)",
)
@click.option(
"--worker-port",
default=None,
help="Serving computation port, defaults to random. "
"When creating multiple workers with --nprocs, a sequential range of "
"worker ports may be used by specifying the first and last available "
"ports like <first-port>:<last-port>. For example, --worker-port=3000:3026 "
"will use ports 3000, 3001, ..., 3025, 3026.",
)
@click.option(
"--nanny-port",
default=None,
help="Serving nanny port, defaults to random. "
"When creating multiple nannies with --nprocs, a sequential range of "
"nanny ports may be used by specifying the first and last available "
"ports like <first-port>:<last-port>. For example, --nanny-port=3000:3026 "
"will use ports 3000, 3001, ..., 3025, 3026.",
)
@click.option(
"--bokeh-port", type=int, default=None, help="Deprecated. See --dashboard-address"
)
@click.option(
"--dashboard-address",
type=str,
default=":0",
help="Address on which to listen for diagnostics dashboard",
)
@click.option(
"--dashboard/--no-dashboard",
"dashboard",
default=True,
required=False,
help="Launch the Dashboard [default: --dashboard]",
)
@click.option(
"--bokeh/--no-bokeh",
"bokeh",
default=None,
help="Deprecated. See --dashboard/--no-dashboard.",
required=False,
)
@click.option(
"--listen-address",
type=str,
default=None,
help="The address to which the worker binds. Example: tcp://0.0.0.0:9000",
)
@click.option(
"--contact-address",
type=str,
default=None,
help="The address the worker advertises to the scheduler for "
"communication with it and other workers. "
"Example: tcp://127.0.0.1:9000",
)
@click.option(
"--host",
type=str,
default=None,
help="Serving host. Should be an ip address that is"
" visible to the scheduler and other workers. "
"See --listen-address and --contact-address if you "
"need different listen and contact addresses. "
"See --interface.",
)
@click.option(
"--interface", type=str, default=None, help="Network interface like 'eth0' or 'ib0'"
)
@click.option(
"--protocol", type=str, default=None, help="Protocol like tcp, tls, or ucx"
)
@click.option("--nthreads", type=int, default=0, help="Number of threads per process.")
@click.option(
"--nprocs",
type=int,
default=1,
show_default=True,
help="Number of worker processes to launch.",
)
@click.option(
"--name",
type=str,
default=None,
help="A unique name for this worker like 'worker-1'. "
"If used with --nprocs then the process number "
"will be appended like name-0, name-1, name-2, ...",
)
@click.option(
"--memory-limit",
default="auto",
show_default=True,
help="Bytes of memory per process that the worker can use. "
"This can be an integer (bytes), "
"float (fraction of total system memory), "
"string (like 5GB or 5000M), "
"'auto', or zero for no memory management",
)
@click.option(
"--reconnect/--no-reconnect",
default=True,
help="Reconnect to scheduler if disconnected [default: --reconnect]",
)
@click.option(
"--nanny/--no-nanny",
default=True,
help="Start workers in nanny process for management [default: --nanny]",
)
@click.option("--pid-file", type=str, default="", help="File to write the process PID")
@click.option(
"--local-directory", default=None, type=str, help="Directory to place worker files"
)
@click.option(
"--resources",
type=str,
default=None,
help='Resources for task constraints like "GPU=2 MEM=10e9". '
"Resources are applied separately to each worker process "
"(only relevant when starting multiple worker processes with '--nprocs').",
)
@click.option(
"--scheduler-file",
type=str,
default=None,
help="Filename to JSON encoded scheduler information. "
"Use with dask-scheduler --scheduler-file",
)
@click.option(
"--death-timeout",
type=str,
default=None,
help="Seconds to wait for a scheduler before closing",
)
@click.option(
"--dashboard-prefix", type=str, default="", help="Prefix for the dashboard"
)
@click.option(
"--lifetime",
type=str,
default=None,
help="If provided, shut down the worker after this duration.",
)
@click.option(
"--lifetime-stagger",
type=str,
default="0 seconds",
show_default=True,
help="Random amount by which to stagger lifetime values",
)
@click.option(
"--worker-class",
type=str,
default="dask.distributed.Worker",
show_default=True,
help="Worker class used to instantiate workers from.",
)
@click.option(
"--lifetime-restart/--no-lifetime-restart",
"lifetime_restart",
default=False,
show_default=True,
required=False,
help="Whether or not to restart the worker after the lifetime lapses. "
"This assumes that you are using the --lifetime and --nanny keywords",
)
@click.option(
"--preload",
type=str,
multiple=True,
is_eager=True,
help="Module that should be loaded by each worker process "
'like "foo.bar" or "/path/to/foo.py"',
)
@click.argument(
"preload_argv", nargs=-1, type=click.UNPROCESSED, callback=validate_preload_argv
)
@click.option(
"--preload-nanny",
type=str,
multiple=True,
is_eager=True,
help="Module that should be loaded by each nanny "
'like "foo.bar" or "/path/to/foo.py"',
)
@click.version_option()
def main(
scheduler,
host,
worker_port,
listen_address,
contact_address,
nanny_port,
nthreads,
nprocs,
nanny,
name,
pid_file,
resources,
dashboard,
bokeh,
bokeh_port,
scheduler_file,
dashboard_prefix,
tls_ca_file,
tls_cert,
tls_key,
dashboard_address,
worker_class,
preload_nanny,
**kwargs
):
g0, g1, g2 = gc.get_threshold() # https://github.com/dask/distributed/issues/1653
gc.set_threshold(g0 * 3, g1 * 3, g2 * 3)
enable_proctitle_on_current()
enable_proctitle_on_children()
if bokeh_port is not None:
warnings.warn(
"The --bokeh-port flag has been renamed to --dashboard-address. "
"Consider adding ``--dashboard-address :%d`` " % bokeh_port
)
dashboard_address = bokeh_port
if bokeh is not None:
warnings.warn(
"The --bokeh/--no-bokeh flag has been renamed to --dashboard/--no-dashboard. "
)
dashboard = bokeh
sec = Security(
**{
k: v
for k, v in [
("tls_ca_file", tls_ca_file),
("tls_worker_cert", tls_cert),
("tls_worker_key", tls_key),
]
if v is not None
}
)
if nprocs > 1 and not nanny:
logger.error(
"Failed to launch worker. You cannot use the --no-nanny argument when nprocs > 1."
)
sys.exit(1)
if contact_address and not listen_address:
logger.error(
"Failed to launch worker. "
"Must specify --listen-address when --contact-address is given"
)
sys.exit(1)
if nprocs > 1 and listen_address:
logger.error(
"Failed to launch worker. "
"You cannot specify --listen-address when nprocs > 1."
)
sys.exit(1)
if (worker_port or host) and listen_address:
logger.error(
"Failed to launch worker. "
"You cannot specify --listen-address when --worker-port or --host is given."
)
sys.exit(1)
try:
if listen_address:
(host, worker_port) = get_address_host_port(listen_address, strict=True)
if contact_address:
# we only need this to verify it is getting parsed
(_, _) = get_address_host_port(contact_address, strict=True)
else:
# if contact address is not present we use the listen_address for contact
contact_address = listen_address
except ValueError as e:
logger.error("Failed to launch worker. " + str(e))
sys.exit(1)
if nanny:
port = nanny_port
else:
port = worker_port
if not nthreads:
nthreads = CPU_COUNT // nprocs
if pid_file:
with open(pid_file, "w") as f:
f.write(str(os.getpid()))
def del_pid_file():
if os.path.exists(pid_file):
os.remove(pid_file)
atexit.register(del_pid_file)
if resources:
resources = resources.replace(",", " ").split()
resources = dict(pair.split("=") for pair in resources)
resources = valmap(float, resources)
else:
resources = None
loop = IOLoop.current()
worker_class = import_term(worker_class)
if nanny:
kwargs["worker_class"] = worker_class
kwargs["preload_nanny"] = preload_nanny
if nanny:
kwargs.update({"worker_port": worker_port, "listen_address": listen_address})
t = Nanny
else:
if nanny_port:
kwargs["service_ports"] = {"nanny": nanny_port}
t = worker_class
if (
not scheduler
and not scheduler_file
and dask.config.get("scheduler-address", None) is None
):
raise ValueError(
"Need to provide scheduler address like\n"
"dask-worker SCHEDULER_ADDRESS:8786"
)
with suppress(TypeError, ValueError):
name = int(name)
if "DASK_INTERNAL_INHERIT_CONFIG" in os.environ:
config = deserialize_for_cli(os.environ["DASK_INTERNAL_INHERIT_CONFIG"])
# Update the global config given priority to the existing global config
dask.config.update(dask.config.global_config, config, priority="old")
nannies = [
t(
scheduler,
scheduler_file=scheduler_file,
nthreads=nthreads,
loop=loop,
resources=resources,
security=sec,
contact_address=contact_address,
host=host,
port=port,
dashboard=dashboard,
dashboard_address=dashboard_address,
name=name
if nprocs == 1 or name is None or name == ""
else str(name) + "-" + str(i),
**kwargs
)
for i in range(nprocs)
]
async def close_all():
# Unregister all workers from scheduler
if nanny:
await asyncio.gather(*[n.close(timeout=2) for n in nannies])
signal_fired = False
def on_signal(signum):
nonlocal signal_fired
signal_fired = True
if signum != signal.SIGINT:
logger.info("Exiting on signal %d", signum)
return asyncio.ensure_future(close_all())
async def run():
await asyncio.gather(*nannies)
await asyncio.gather(*[n.finished() for n in nannies])
install_signal_handlers(loop, cleanup=on_signal)
try:
loop.run_sync(run)
except TimeoutError:
# We already log the exception in nanny / worker. Don't do it again.
if not signal_fired:
logger.info("Timed out starting worker")
sys.exit(1)
except KeyboardInterrupt:
pass
finally:
logger.info("End worker")
def go():
check_python_3()
main()
if __name__ == "__main__":
go()
| {
"content_hash": "b8fb4fd6895e916ee9f7786deab3cdca",
"timestamp": "",
"source": "github",
"line_count": 452,
"max_line_length": 95,
"avg_line_length": 28.008849557522122,
"alnum_prop": 0.6191943127962085,
"repo_name": "blaze/distributed",
"id": "fcb10b510922562a3c57f8793f08b9cf5e3fd9f0",
"size": "12660",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "distributed/cli/dask_worker.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "236"
},
{
"name": "Python",
"bytes": "511624"
},
{
"name": "Shell",
"bytes": "1120"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from future.builtins import str
from future.utils import with_metaclass
from json import loads
try:
from urllib.request import urlopen
from urllib.parse import urlencode
except ImportError:
from urllib import urlopen, urlencode
from django.contrib.contenttypes.generic import GenericForeignKey
from django.db import models
from django.db.models.base import ModelBase
from django.db.models.signals import post_save
from django.template.defaultfilters import truncatewords_html
from django.utils.encoding import python_2_unicode_compatible
from django.utils.html import strip_tags
from django.utils.timesince import timesince
from django.utils.timezone import now
from django.utils.translation import ugettext, ugettext_lazy as _
from mezzanine.conf import settings
from mezzanine.core.fields import RichTextField
from mezzanine.core.managers import DisplayableManager, CurrentSiteManager
from mezzanine.generic.fields import KeywordsField
from mezzanine.utils.html import TagCloser
from mezzanine.utils.models import base_concrete_model, get_user_model_name
from mezzanine.utils.sites import current_site_id
from mezzanine.utils.urls import admin_url, slugify, unique_slug
user_model_name = get_user_model_name()
class SiteRelated(models.Model):
"""
Abstract model for all things site-related. Adds a foreignkey to
Django's ``Site`` model, and filters by site with all querysets.
See ``mezzanine.utils.sites.current_site_id`` for implementation
details.
"""
objects = CurrentSiteManager()
class Meta:
abstract = True
site = models.ForeignKey("sites.Site", editable=False)
def save(self, update_site=False, *args, **kwargs):
"""
Set the site to the current site when the record is first
created, or the ``update_site`` argument is explicitly set
to ``True``.
"""
if update_site or not self.id:
self.site_id = current_site_id()
super(SiteRelated, self).save(*args, **kwargs)
@python_2_unicode_compatible
class Slugged(SiteRelated):
"""
Abstract model that handles auto-generating slugs. Each slugged
object is also affiliated with a specific site object.
"""
title = models.CharField(_("Title"), max_length=500)
slug = models.CharField(_("URL"), max_length=2000, blank=True, null=True,
help_text=_("Leave blank to have the URL auto-generated from "
"the title."))
class Meta:
abstract = True
def __str__(self):
return self.title
def save(self, *args, **kwargs):
"""
If no slug is provided, generates one before saving.
"""
if not self.slug:
self.slug = self.generate_unique_slug()
super(Slugged, self).save(*args, **kwargs)
def generate_unique_slug(self):
"""
Create a unique slug by passing the result of get_slug() to
utils.urls.unique_slug, which appends an index if necessary.
"""
# For custom content types, use the ``Page`` instance for
# slug lookup.
concrete_model = base_concrete_model(Slugged, self)
slug_qs = concrete_model.objects.exclude(id=self.id)
return unique_slug(slug_qs, "slug", self.get_slug())
def get_slug(self):
"""
Allows subclasses to implement their own slug creation logic.
"""
attr = "title"
if settings.USE_MODELTRANSLATION:
from modeltranslation.utils import build_localized_fieldname
attr = build_localized_fieldname(attr, settings.LANGUAGE_CODE)
# Get self.title_xx where xx is the default language, if any.
# Get self.title otherwise.
return slugify(getattr(self, attr, None) or self.title)
def admin_link(self):
return "<a href='%s'>%s</a>" % (self.get_absolute_url(),
ugettext("View on site"))
admin_link.allow_tags = True
admin_link.short_description = ""
class MetaData(models.Model):
"""
Abstract model that provides meta data for content.
"""
_meta_title = models.CharField(_("Title"), null=True, blank=True,
max_length=500,
help_text=_("Optional title to be used in the HTML title tag. "
"If left blank, the main title field will be used."))
description = models.TextField(_("Description"), blank=True)
gen_description = models.BooleanField(_("Generate description"),
help_text=_("If checked, the description will be automatically "
"generated from content. Uncheck if you want to manually "
"set a custom description."), default=True)
keywords = KeywordsField(verbose_name=_("Keywords"))
class Meta:
abstract = True
def save(self, *args, **kwargs):
"""
Set the description field on save.
"""
if self.gen_description:
self.description = strip_tags(self.description_from_content())
super(MetaData, self).save(*args, **kwargs)
def meta_title(self):
"""
Accessor for the optional ``_meta_title`` field, which returns
the string version of the instance if not provided.
"""
return self._meta_title or str(self)
def description_from_content(self):
"""
Returns the first block or sentence of the first content-like
field.
"""
description = ""
# Use the first RichTextField, or TextField if none found.
for field_type in (RichTextField, models.TextField):
if not description:
for field in self._meta.fields:
if isinstance(field, field_type) and \
field.name != "description":
description = getattr(self, field.name)
if description:
from mezzanine.core.templatetags.mezzanine_tags \
import richtext_filters
description = richtext_filters(description)
break
# Fall back to the title if description couldn't be determined.
if not description:
description = str(self)
# Strip everything after the first block or sentence.
ends = ("</p>", "<br />", "<br/>", "<br>", "</ul>",
"\n", ". ", "! ", "? ")
for end in ends:
pos = description.lower().find(end)
if pos > -1:
description = TagCloser(description[:pos]).html
break
else:
description = truncatewords_html(description, 100)
return description
class TimeStamped(models.Model):
"""
Provides created and updated timestamps on models.
"""
class Meta:
abstract = True
created = models.DateTimeField(null=True, editable=False)
updated = models.DateTimeField(null=True, editable=False)
def save(self, *args, **kwargs):
_now = now()
self.updated = _now
if not self.id:
self.created = _now
super(TimeStamped, self).save(*args, **kwargs)
CONTENT_STATUS_DRAFT = 1
CONTENT_STATUS_PUBLISHED = 2
CONTENT_STATUS_CHOICES = (
(CONTENT_STATUS_DRAFT, _("Draft")),
(CONTENT_STATUS_PUBLISHED, _("Published")),
)
class Displayable(Slugged, MetaData, TimeStamped):
"""
Abstract model that provides features of a visible page on the
website such as publishing fields. Basis of Mezzanine pages,
blog posts, and Cartridge products.
"""
status = models.IntegerField(_("Status"),
choices=CONTENT_STATUS_CHOICES, default=CONTENT_STATUS_PUBLISHED,
help_text=_("With Draft chosen, will only be shown for admin users "
"on the site."))
publish_date = models.DateTimeField(_("Published from"),
help_text=_("With Published chosen, won't be shown until this time"),
blank=True, null=True)
expiry_date = models.DateTimeField(_("Expires on"),
help_text=_("With Published chosen, won't be shown after this time"),
blank=True, null=True)
short_url = models.URLField(blank=True, null=True)
in_sitemap = models.BooleanField(_("Show in sitemap"), default=True)
objects = DisplayableManager()
search_fields = {"keywords": 10, "title": 5}
class Meta:
abstract = True
def save(self, *args, **kwargs):
"""
Set default for ``publish_date``. We can't use ``auto_now_add`` on
the field as it will be blank when a blog post is created from
the quick blog form in the admin dashboard.
"""
if self.publish_date is None:
self.publish_date = now()
super(Displayable, self).save(*args, **kwargs)
def get_admin_url(self):
return admin_url(self, "change", self.id)
def publish_date_since(self):
"""
Returns the time since ``publish_date``.
"""
return timesince(self.publish_date)
publish_date_since.short_description = _("Published from")
def get_absolute_url(self):
"""
Raise an error if called on a subclass without
``get_absolute_url`` defined, to ensure all search results
contains a URL.
"""
name = self.__class__.__name__
raise NotImplementedError("The model %s does not have "
"get_absolute_url defined" % name)
def set_short_url(self):
"""
Sets the ``short_url`` attribute using the bit.ly credentials
if they have been specified, and saves it. Used by the
``set_short_url_for`` template tag, and ``TweetableAdmin``.
"""
if not self.short_url:
from mezzanine.conf import settings
settings.use_editable()
parts = (self.site.domain, self.get_absolute_url())
self.short_url = "http://%s%s" % parts
if settings.BITLY_ACCESS_TOKEN:
url = "https://api-ssl.bit.ly/v3/shorten?%s" % urlencode({
"access_token": settings.BITLY_ACCESS_TOKEN,
"uri": self.short_url,
})
response = loads(urlopen(url).read().decode("utf-8"))
if response["status_code"] == 200:
self.short_url = response["data"]["url"]
self.save()
return ""
def _get_next_or_previous_by_publish_date(self, is_next, **kwargs):
"""
Retrieves next or previous object by publish date. We implement
our own version instead of Django's so we can hook into the
published manager and concrete subclasses.
"""
arg = "publish_date__gt" if is_next else "publish_date__lt"
order = "publish_date" if is_next else "-publish_date"
lookup = {arg: self.publish_date}
concrete_model = base_concrete_model(Displayable, self)
try:
queryset = concrete_model.objects.published
except AttributeError:
queryset = concrete_model.objects.all
try:
return queryset(**kwargs).filter(**lookup).order_by(order)[0]
except IndexError:
pass
def get_next_by_publish_date(self, **kwargs):
"""
Retrieves next object by publish date.
"""
return self._get_next_or_previous_by_publish_date(True, **kwargs)
def get_previous_by_publish_date(self, **kwargs):
"""
Retrieves previous object by publish date.
"""
return self._get_next_or_previous_by_publish_date(False, **kwargs)
class RichText(models.Model):
"""
Provides a Rich Text field for managing general content and making
it searchable.
"""
content = RichTextField(_("Content"))
search_fields = ("content",)
class Meta:
abstract = True
class OrderableBase(ModelBase):
"""
Checks for ``order_with_respect_to`` on the model's inner ``Meta``
class and if found, copies it to a custom attribute and deletes it
since it will cause errors when used with ``ForeignKey("self")``.
Also creates the ``ordering`` attribute on the ``Meta`` class if
not yet provided.
"""
def __new__(cls, name, bases, attrs):
if "Meta" not in attrs:
class Meta:
pass
attrs["Meta"] = Meta
if hasattr(attrs["Meta"], "order_with_respect_to"):
order_field = attrs["Meta"].order_with_respect_to
attrs["order_with_respect_to"] = order_field
del attrs["Meta"].order_with_respect_to
if not hasattr(attrs["Meta"], "ordering"):
setattr(attrs["Meta"], "ordering", ("_order",))
return super(OrderableBase, cls).__new__(cls, name, bases, attrs)
class Orderable(with_metaclass(OrderableBase, models.Model)):
"""
Abstract model that provides a custom ordering integer field
similar to using Meta's ``order_with_respect_to``, since to
date (Django 1.2) this doesn't work with ``ForeignKey("self")``,
or with Generic Relations. We may also want this feature for
models that aren't ordered with respect to a particular field.
"""
_order = models.IntegerField(_("Order"), null=True)
class Meta:
abstract = True
def with_respect_to(self):
"""
Returns a dict to use as a filter for ordering operations
containing the original ``Meta.order_with_respect_to`` value
if provided. If the field is a Generic Relation, the dict
returned contains names and values for looking up the
relation's ``ct_field`` and ``fk_field`` attributes.
"""
try:
name = self.order_with_respect_to
value = getattr(self, name)
except AttributeError:
# No ``order_with_respect_to`` specified on the model.
return {}
# Support for generic relations.
field = getattr(self.__class__, name)
if isinstance(field, GenericForeignKey):
names = (field.ct_field, field.fk_field)
return dict([(n, getattr(self, n)) for n in names])
return {name: value}
def save(self, *args, **kwargs):
"""
Set the initial ordering value.
"""
if self._order is None:
lookup = self.with_respect_to()
lookup["_order__isnull"] = False
concrete_model = base_concrete_model(Orderable, self)
self._order = concrete_model.objects.filter(**lookup).count()
super(Orderable, self).save(*args, **kwargs)
def delete(self, *args, **kwargs):
"""
Update the ordering values for siblings.
"""
lookup = self.with_respect_to()
lookup["_order__gte"] = self._order
concrete_model = base_concrete_model(Orderable, self)
after = concrete_model.objects.filter(**lookup)
after.update(_order=models.F("_order") - 1)
super(Orderable, self).delete(*args, **kwargs)
def _get_next_or_previous_by_order(self, is_next, **kwargs):
"""
Retrieves next or previous object by order. We implement our
own version instead of Django's so we can hook into the
published manager, concrete subclasses and our custom
``with_respect_to`` method.
"""
lookup = self.with_respect_to()
lookup["_order"] = self._order + (1 if is_next else -1)
concrete_model = base_concrete_model(Orderable, self)
try:
queryset = concrete_model.objects.published
except AttributeError:
queryset = concrete_model.objects.filter
try:
return queryset(**kwargs).get(**lookup)
except concrete_model.DoesNotExist:
pass
def get_next_by_order(self, **kwargs):
"""
Retrieves next object by order.
"""
return self._get_next_or_previous_by_order(True, **kwargs)
def get_previous_by_order(self, **kwargs):
"""
Retrieves previous object by order.
"""
return self._get_next_or_previous_by_order(False, **kwargs)
class Ownable(models.Model):
"""
Abstract model that provides ownership of an object for a user.
"""
user = models.ForeignKey(user_model_name, verbose_name=_("Author"),
related_name="%(class)ss")
class Meta:
abstract = True
def is_editable(self, request):
"""
Restrict in-line editing to the objects's owner and superusers.
"""
return request.user.is_superuser or request.user.id == self.user_id
class SitePermission(models.Model):
"""
Permission relationship between a user and a site that's
used instead of ``User.is_staff``, for admin and inline-editing
access.
"""
user = models.ForeignKey(user_model_name, verbose_name=_("Author"),
related_name="%(class)ss")
sites = models.ManyToManyField("sites.Site", blank=True,
verbose_name=_("Sites"))
class Meta:
verbose_name = _("Site permission")
verbose_name_plural = _("Site permissions")
def create_site_permission(sender, **kw):
sender_name = "%s.%s" % (sender._meta.app_label, sender._meta.object_name)
if sender_name.lower() != user_model_name.lower():
return
user = kw["instance"]
if user.is_staff and not user.is_superuser:
perm, created = SitePermission.objects.get_or_create(user=user)
if created or perm.sites.count() < 1:
perm.sites.add(current_site_id())
# We don't specify the user model here, because with 1.5's custom
# user models, everything explodes. So we check the name of it in
# the signal.
post_save.connect(create_site_permission)
| {
"content_hash": "8c6469734d3c03fe36401b8552acc782",
"timestamp": "",
"source": "github",
"line_count": 497,
"max_line_length": 78,
"avg_line_length": 35.885311871227366,
"alnum_prop": 0.6113821138211382,
"repo_name": "cccs-web/mezzanine",
"id": "2d7fe212e394b340bb62efc14e7e85d5f7e45765",
"size": "17835",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mezzanine/core/models.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "108170"
},
{
"name": "JavaScript",
"bytes": "228868"
},
{
"name": "Python",
"bytes": "1084061"
}
],
"symlink_target": ""
} |
"""Default configuration for the Airflow webserver."""
from __future__ import annotations
import os
from airflow.www.fab_security.manager import AUTH_DB
# from airflow.www.fab_security.manager import AUTH_LDAP
# from airflow.www.fab_security.manager import AUTH_OAUTH
# from airflow.www.fab_security.manager import AUTH_OID
# from airflow.www.fab_security.manager import AUTH_REMOTE_USER
basedir = os.path.abspath(os.path.dirname(__file__))
# Flask-WTF flag for CSRF
WTF_CSRF_ENABLED = True
# ----------------------------------------------------
# AUTHENTICATION CONFIG
# ----------------------------------------------------
# For details on how to set up each of the following authentication, see
# http://flask-appbuilder.readthedocs.io/en/latest/security.html# authentication-methods
# for details.
# The authentication type
# AUTH_OID : Is for OpenID
# AUTH_DB : Is for database
# AUTH_LDAP : Is for LDAP
# AUTH_REMOTE_USER : Is for using REMOTE_USER from web server
# AUTH_OAUTH : Is for OAuth
AUTH_TYPE = AUTH_DB
# Uncomment to setup Full admin role name
# AUTH_ROLE_ADMIN = 'Admin'
# Uncomment and set to desired role to enable access without authentication
# AUTH_ROLE_PUBLIC = 'Viewer'
# Will allow user self registration
# AUTH_USER_REGISTRATION = True
# The recaptcha it's automatically enabled for user self registration is active and the keys are necessary
# RECAPTCHA_PRIVATE_KEY = PRIVATE_KEY
# RECAPTCHA_PUBLIC_KEY = PUBLIC_KEY
# Config for Flask-Mail necessary for user self registration
# MAIL_SERVER = 'smtp.gmail.com'
# MAIL_USE_TLS = True
# MAIL_USERNAME = 'yourappemail@gmail.com'
# MAIL_PASSWORD = 'passwordformail'
# MAIL_DEFAULT_SENDER = 'sender@gmail.com'
# The default user self registration role
# AUTH_USER_REGISTRATION_ROLE = "Public"
# When using OAuth Auth, uncomment to setup provider(s) info
# Google OAuth example:
# OAUTH_PROVIDERS = [{
# 'name':'google',
# 'token_key':'access_token',
# 'icon':'fa-google',
# 'remote_app': {
# 'api_base_url':'https://www.googleapis.com/oauth2/v2/',
# 'client_kwargs':{
# 'scope': 'email profile'
# },
# 'access_token_url':'https://accounts.google.com/o/oauth2/token',
# 'authorize_url':'https://accounts.google.com/o/oauth2/auth',
# 'request_token_url': None,
# 'client_id': GOOGLE_KEY,
# 'client_secret': GOOGLE_SECRET_KEY,
# }
# }]
# When using LDAP Auth, setup the ldap server
# AUTH_LDAP_SERVER = "ldap://ldapserver.new"
# When using OpenID Auth, uncomment to setup OpenID providers.
# example for OpenID authentication
# OPENID_PROVIDERS = [
# { 'name': 'Yahoo', 'url': 'https://me.yahoo.com' },
# { 'name': 'AOL', 'url': 'http://openid.aol.com/<username>' },
# { 'name': 'Flickr', 'url': 'http://www.flickr.com/<username>' },
# { 'name': 'MyOpenID', 'url': 'https://www.myopenid.com' }]
# ----------------------------------------------------
# Theme CONFIG
# ----------------------------------------------------
# Flask App Builder comes up with a number of predefined themes
# that you can use for Apache Airflow.
# http://flask-appbuilder.readthedocs.io/en/latest/customizing.html#changing-themes
# Please make sure to remove "navbar_color" configuration from airflow.cfg
# in order to fully utilize the theme. (or use that property in conjunction with theme)
# APP_THEME = "bootstrap-theme.css" # default bootstrap
# APP_THEME = "amelia.css"
# APP_THEME = "cerulean.css"
# APP_THEME = "cosmo.css"
# APP_THEME = "cyborg.css"
# APP_THEME = "darkly.css"
# APP_THEME = "flatly.css"
# APP_THEME = "journal.css"
# APP_THEME = "lumen.css"
# APP_THEME = "paper.css"
# APP_THEME = "readable.css"
# APP_THEME = "sandstone.css"
# APP_THEME = "simplex.css"
# APP_THEME = "slate.css"
# APP_THEME = "solar.css"
# APP_THEME = "spacelab.css"
# APP_THEME = "superhero.css"
# APP_THEME = "united.css"
# APP_THEME = "yeti.css"
| {
"content_hash": "bd75a2af77d3b21f25de13d14d458e89",
"timestamp": "",
"source": "github",
"line_count": 113,
"max_line_length": 106,
"avg_line_length": 35.017699115044245,
"alnum_prop": 0.6467020470053071,
"repo_name": "apache/airflow",
"id": "ac999a0deafb65b1f5985e1e38a7432ddc0800cc",
"size": "4744",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "airflow/config_templates/default_webserver_config.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "25980"
},
{
"name": "Dockerfile",
"bytes": "71458"
},
{
"name": "HCL",
"bytes": "3786"
},
{
"name": "HTML",
"bytes": "172957"
},
{
"name": "JavaScript",
"bytes": "143915"
},
{
"name": "Jinja",
"bytes": "38911"
},
{
"name": "Jupyter Notebook",
"bytes": "5482"
},
{
"name": "Mako",
"bytes": "1339"
},
{
"name": "Python",
"bytes": "23697738"
},
{
"name": "R",
"bytes": "313"
},
{
"name": "Shell",
"bytes": "211306"
},
{
"name": "TypeScript",
"bytes": "521019"
}
],
"symlink_target": ""
} |
import gym
import numpy as np
from typing import (
cast,
Dict,
List,
Tuple,
Type,
Union,
)
from ray.rllib.algorithms import AlgorithmConfig
from ray.rllib.algorithms.crr.torch import CRRModel
from ray.rllib.algorithms.ddpg.noop_model import TorchNoopModel
from ray.rllib.models.catalog import ModelCatalog
from ray.rllib.models.modelv2 import ModelV2
from ray.rllib.models.torch.torch_action_dist import (
TorchCategorical,
TorchDistributionWrapper,
get_torch_categorical_class_with_temperature,
)
from ray.rllib.policy.torch_policy_v2 import TorchPolicyV2
from ray.rllib.policy.torch_mixins import TargetNetworkMixin
from ray.rllib.policy.sample_batch import SampleBatch
from ray.rllib.utils.framework import try_import_torch
from ray.rllib.utils.annotations import override
from ray.rllib.utils.torch_utils import (
huber_loss,
l2_loss,
)
from ray.rllib.utils.typing import (
TrainerConfigDict,
TensorType,
)
torch, nn = try_import_torch()
class CRRTorchPolicy(TorchPolicyV2, TargetNetworkMixin):
def __init__(
self,
observation_space: gym.spaces.Space,
action_space: gym.spaces.Space,
config: TrainerConfigDict,
):
self.target_model = None # assign it in self.make_model
self._is_action_discrete = isinstance(action_space, gym.spaces.Discrete)
TorchPolicyV2.__init__(
self,
observation_space,
action_space,
config,
max_seq_len=config["model"]["max_seq_len"],
)
# For discreet action space, use a custom TorchCategorical distribution
# that supports temperature.
if self._is_action_discrete:
assert self.dist_class == TorchCategorical
self.dist_class = get_torch_categorical_class_with_temperature(
config["categorical_distribution_temperature"]
)
"""
by here your model should include the following
(We assume state s is already encoded and there is no need to use RNNs/other
models to encode observations into states):
1. a nn representing the actor pi(a|s)
1.1* in case of continuous actions it should be normal / squashed normal
dist if the action space is bounded?
1.2 in case of of discrete set of actions the output of the model should be
a discrete distribution over action classes
2. a nn representing the critic Q(s, a)
2.1* in case of continuous actions it should take in concat([s,a]) and output
a single scalar
2.2 in case of discrete actions it should take in s and output a logit for
each action class as well as a scale for matching the reward scale.
3. for critic it should have n_critic copies of the Q function nn
4. for each critic it should have a target model copy
"""
def action_distribution_fn(
self,
model: ModelV2,
*,
obs_batch: TensorType,
state_batches: TensorType,
**kwargs,
) -> Tuple[TensorType, type, List[TensorType]]:
model_out, _ = model(obs_batch)
dist_input = model.get_policy_output(model_out)
dist_class = self.dist_class
return dist_input, dist_class, []
def make_model(self) -> ModelV2:
# copying ddpg build model to here to be explicit
model_config = self.config["model"]
model_config.update(
dict(
actor_hidden_activation=self.config["actor_hidden_activation"],
actor_hiddens=self.config["actor_hiddens"],
critic_hidden_activation=self.config["critic_hidden_activation"],
critic_hiddens=self.config["critic_hiddens"],
twin_q=self.config["twin_q"],
)
)
num_outputs = int(np.product(self.observation_space.shape))
# TODO: why do we even have to go through this get_model_v2 function?
self.model = ModelCatalog.get_model_v2(
obs_space=self.observation_space,
action_space=self.action_space,
num_outputs=num_outputs,
model_config=model_config,
framework=self.config["framework"],
# use this model for interface (get_q, get_q_twin, .etc)
model_interface=CRRModel,
default_model=TorchNoopModel,
name="model",
)
# TODO: this is a bad python pattern to assign attributes that do not exist in
# the constructor
self.target_model = ModelCatalog.get_model_v2(
obs_space=self.observation_space,
action_space=self.action_space,
num_outputs=num_outputs,
model_config=model_config,
framework=self.config["framework"],
# use this model for interface (get_q, get_q_twin, .etc)
model_interface=CRRModel,
default_model=TorchNoopModel,
name="target_model",
)
return self.model
def optimizer(
self,
) -> Union[List["torch.optim.Optimizer"], "torch.optim.Optimizer"]:
# Set epsilons to match tf.keras.optimizers.Adam's epsilon default.
actor_optimizer = torch.optim.Adam(
params=self.model.policy_variables(),
lr=self.config["actor_lr"],
betas=(0.9, 0.999),
eps=1e-8,
)
critic_optimizer = torch.optim.Adam(
params=self.model.q_variables(),
lr=self.config["critic_lr"],
betas=(0.9, 0.999),
eps=1e-8,
)
# Return them in the same order as the respective loss terms are returned.
return actor_optimizer, critic_optimizer
def loss(
self,
model: ModelV2,
dist_class: Type[TorchDistributionWrapper],
train_batch: SampleBatch,
) -> Union[TensorType, List[TensorType]]:
# update the actor
# compute the weights assigned to every transition
# (s_t, a_t) and log(pi(a_t|s_t))
self._compute_action_weights_and_logps(model, dist_class, train_batch)
# compute actor loss
actor_loss = self._compute_actor_loss(model, dist_class, train_batch)
# update the critic
# standard critic update with pessimistic Q-learning (e.g. DQN)
critic_loss = self._compute_critic_loss(model, dist_class, train_batch)
self.log("loss_actor", actor_loss)
self.log("loss_critic", critic_loss)
return actor_loss, critic_loss
def log(self, key, value):
# internal log function
self.model.tower_stats[key] = value
# def update_target(self):
# tau = self.config['tau']
#
# model_params = self.model.parameters()
# target_params = self.target_models[self.mode].parameters()
# for src_p, trg_p in zip(model_params, target_params):
# trg_p.data = (1 - tau) * trg_p.data + tau * src_p.data
@override(TorchPolicyV2)
def stats_fn(self, train_batch: SampleBatch) -> Dict[str, TensorType]:
stats_dict = {
k: torch.stack(self.get_tower_stats(k)).mean().item()
for k in self.model.tower_stats
}
return stats_dict
def _get_q_value(
self, model: ModelV2, model_out: TensorType, actions: TensorType
) -> TensorType:
# helper function to compute the pessimistic q value
q1 = model.get_q_values(model_out, actions)
q2 = model.get_twin_q_values(model_out, actions)
return torch.minimum(q1, q2)
def _compute_adv_and_logps(
self,
model: ModelV2,
dist_class: Type[TorchDistributionWrapper],
train_batch: SampleBatch,
) -> None:
# uses mean|max|expectation to compute estimate of advantages
# continuous/discrete action spaces:
# for max:
# A(s_t, a_t) = Q(s_t, a_t) - max_{a^j} Q(s_t, a^j)
# where a^j is m times sampled from the policy p(a | s_t)
# for mean:
# A(s_t, a_t) = Q(s_t, a_t) - avg( Q(s_t, a^j) )
# where a^j is m times sampled from the policy p(a | s_t)
# discrete action space and adv_type=expectation:
# A(s_t, a_t) = Q(s_t, a_t) - sum_j[Q(s_t, a^j) * pi(a^j)]
advantage_type = self.config["advantage_type"]
n_action_sample = self.config["n_action_sample"]
batch_size = len(train_batch)
out_t, _ = model(train_batch)
# construct pi(s_t) and Q(s_t, a_t) for computing advantage actions
pi_s_t = dist_class(model.get_policy_output(out_t), model)
q_t = self._get_q_value(model, out_t, train_batch[SampleBatch.ACTIONS])
# compute the logp of the actions in the dataset (for computing actor's loss)
action_logp = pi_s_t.dist.log_prob(train_batch[SampleBatch.ACTIONS])
# fix the shape if it's not canonical (i.e. shape[-1] != 1)
if len(action_logp.shape) <= 1:
action_logp.unsqueeze_(-1)
train_batch[SampleBatch.ACTION_LOGP] = action_logp
if advantage_type == "expectation":
assert (
self._is_action_discrete
), "Action space should be discrete when advantage_type = expectation."
assert hasattr(
self.model, "q_model"
), "CRR's ModelV2 should have q_model neural network in discrete \
action spaces"
assert isinstance(
pi_s_t.dist, torch.distributions.Categorical
), "The output of the policy should be a torch Categorical \
distribution."
q_vals = self.model.q_model(out_t)
if hasattr(self.model, "twin_q_model"):
q_twins = self.model.twin_q_model(out_t)
q_vals = torch.minimum(q_vals, q_twins)
probs = pi_s_t.dist.probs
v_t = (q_vals * probs).sum(-1, keepdims=True)
else:
policy_actions = pi_s_t.dist.sample((n_action_sample,)) # samples
if self._is_action_discrete:
flat_actions = policy_actions.reshape(-1)
else:
flat_actions = policy_actions.reshape(-1, *self.action_space.shape)
reshaped_s_t = train_batch[SampleBatch.OBS].view(
1, batch_size, *self.observation_space.shape
)
reshaped_s_t = reshaped_s_t.expand(
n_action_sample, batch_size, *self.observation_space.shape
)
flat_s_t = reshaped_s_t.reshape(-1, *self.observation_space.shape)
input_v_t = SampleBatch(
**{SampleBatch.OBS: flat_s_t, SampleBatch.ACTIONS: flat_actions}
)
out_v_t, _ = model(input_v_t)
flat_q_st_pi = self._get_q_value(model, out_v_t, flat_actions)
reshaped_q_st_pi = flat_q_st_pi.reshape(-1, batch_size, 1)
if advantage_type == "mean":
v_t = reshaped_q_st_pi.mean(dim=0)
elif advantage_type == "max":
v_t, _ = reshaped_q_st_pi.max(dim=0)
else:
raise ValueError(f"Invalid advantage type: {advantage_type}.")
adv_t = q_t - v_t
train_batch["advantages"] = adv_t
# logging
self.log("q_batch_avg", q_t.mean())
self.log("q_batch_max", q_t.max())
self.log("q_batch_min", q_t.min())
self.log("v_batch_avg", v_t.mean())
self.log("v_batch_max", v_t.max())
self.log("v_batch_min", v_t.min())
self.log("adv_batch_avg", adv_t.mean())
self.log("adv_batch_max", adv_t.max())
self.log("adv_batch_min", adv_t.min())
self.log("reward_batch_avg", train_batch[SampleBatch.REWARDS].mean())
def _compute_action_weights_and_logps(
self,
model: ModelV2,
dist_class: Type[TorchDistributionWrapper],
train_batch: SampleBatch,
) -> None:
# uses bin|exp to compute action weights
# 1(A>=0) or exp(A/temp)
weight_type = self.config["weight_type"]
self._compute_adv_and_logps(model, dist_class, train_batch)
if weight_type == "bin":
weights = (train_batch["advantages"] > 0.0).float()
elif weight_type == "exp":
temperature = self.config["temperature"]
max_weight = self.config["max_weight"]
weights = (
(train_batch["advantages"] / temperature).exp().clamp(0.0, max_weight)
)
else:
raise ValueError(f"invalid weight type: {weight_type}.")
train_batch["action_weights"] = weights
# logging
self.log("weights_avg", weights.mean())
self.log("weights_max", weights.max())
self.log("weights_min", weights.min())
def _compute_actor_loss(
self,
model: ModelV2,
dist_class: Type[TorchDistributionWrapper],
train_batch: SampleBatch,
) -> Union[TensorType, List[TensorType]]:
loss = -(
train_batch["action_weights"] * train_batch[SampleBatch.ACTION_LOGP]
).mean(0)
return loss
def _compute_critic_loss(
self,
model: ModelV2,
dist_class: Type[TorchDistributionWrapper],
train_batch: SampleBatch,
):
discount = self.config["gamma"]
# Compute bellman targets to regress on
# target, use target model to compute the target
target_model = cast(CRRModel, self.target_models[model])
target_out_next, _ = target_model(
{SampleBatch.OBS: train_batch[SampleBatch.NEXT_OBS]}
)
# compute target values with no gradient
with torch.no_grad():
# get the action of the current policy evaluated at the next state
pi_s_next = dist_class(
target_model.get_policy_output(target_out_next), target_model
)
target_a_next = pi_s_next.sample()
if not self._is_action_discrete:
target_a_next = target_a_next.clamp(
torch.from_numpy(self.action_space.low).to(target_a_next),
torch.from_numpy(self.action_space.high).to(target_a_next),
)
# q1_target = target_model.get_q_values(target_out_next, target_a_next)
# q2_target = target_model.get_twin_q_values(target_out_next, target_a_next)
# target_q_next = torch.minimum(q1_target, q2_target).squeeze(-1)
target_q_next = self._get_q_value(
target_model, target_out_next, target_a_next
).squeeze(-1)
target = (
train_batch[SampleBatch.REWARDS]
+ discount
* (1.0 - train_batch[SampleBatch.DONES].float())
* target_q_next
)
# compute the predicted output
model = cast(CRRModel, model)
model_out_t, _ = model({SampleBatch.OBS: train_batch[SampleBatch.OBS]})
q1 = model.get_q_values(model_out_t, train_batch[SampleBatch.ACTIONS]).squeeze(
-1
)
q2 = model.get_twin_q_values(
model_out_t, train_batch[SampleBatch.ACTIONS]
).squeeze(-1)
# compute the MSE loss for all q-functions
td_error_q1 = q1 - target
td_error_q2 = q2 - target
loss_fn = l2_loss if self.config["td_error_loss_fn"] == "mse" else huber_loss
loss = torch.mean(loss_fn(torch.cat((td_error_q1, td_error_q2), dim=0)))
# logging
self.log("td_error_q1", (td_error_q1**2).mean())
self.log("td_error_q2", (td_error_q2**2).mean())
self.log("td_error", loss)
self.log("targets_avg", target.mean())
self.log("targets_max", target.max())
self.log("targets_min", target.min())
return loss
if __name__ == "__main__":
obs_space = gym.spaces.Box(np.array((-1, -1)), np.array((1, 1)))
act_space = gym.spaces.Box(np.array((-1, -1)), np.array((1, 1)))
config = AlgorithmConfig().framework(framework="torch").to_dict()
print(config["framework"])
CRRTorchPolicy(obs_space, act_space, config=config)
| {
"content_hash": "8bdfb1303afa97f3a0f07a06470c6580",
"timestamp": "",
"source": "github",
"line_count": 432,
"max_line_length": 88,
"avg_line_length": 37.479166666666664,
"alnum_prop": 0.5854487060712742,
"repo_name": "ray-project/ray",
"id": "82e9b682fc9a5ec14421d00f6d0de059fc753ddf",
"size": "16191",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rllib/algorithms/crr/torch/crr_torch_policy.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "37490"
},
{
"name": "C++",
"bytes": "5972422"
},
{
"name": "CSS",
"bytes": "10912"
},
{
"name": "Cython",
"bytes": "227477"
},
{
"name": "Dockerfile",
"bytes": "20210"
},
{
"name": "HTML",
"bytes": "30382"
},
{
"name": "Java",
"bytes": "1160849"
},
{
"name": "JavaScript",
"bytes": "1128"
},
{
"name": "Jinja",
"bytes": "6371"
},
{
"name": "Jupyter Notebook",
"bytes": "1615"
},
{
"name": "Makefile",
"bytes": "234"
},
{
"name": "PowerShell",
"bytes": "1114"
},
{
"name": "Python",
"bytes": "19539109"
},
{
"name": "Shell",
"bytes": "134583"
},
{
"name": "Starlark",
"bytes": "334862"
},
{
"name": "TypeScript",
"bytes": "190599"
}
],
"symlink_target": ""
} |
import json
import sys
import rethinkdb as r
from dateutil import parser
from elasticsearch import Elasticsearch
def convert_data(raw_data):
return dict(
id=raw_data["id"],
message=raw_data.get("message", ""),
picture=raw_data.get("picture", ""),
updated=parser.parse(raw_data["updated_time"]),
created=parser.parse(raw_data["created_time"]),
link=raw_data.get("link", ""),
comments=convert_comments(raw_data.get("comments", {"data":[]})["data"]),
upvote=0,
downvote=0,
approved=True,
deleted=False,
limbo=False
)
def convert_comments(comments):
return [dict(
created=parser.parse(c["created_time"]),
message=c["message"],
by=c["from"]["name"]
) for c in comments]
def clean_feed_item(item):
if not item:
return {}
item["created"] = item["created"].isoformat()
item["updated"] = item["updated"].isoformat()
comments = []
for c in item["comments"]:
c["created"] = c["created"].isoformat()
comments.append(c)
item["comments"] = comments
return item
def unclean_feed_item(item):
if not item:
return {}
item["created"] = parser.parse(item["created"])
item["updated"] = parser.parse(item["updated"])
comments = []
for c in item["comments"]:
c["created"] = parser.parse(c["created"])
comments.append(c)
item["comments"] = comments
return item
def validate_new(new, original):
if new["id"] != original["id"]:
return False
if new["created"] != original["created"]:
return False
if new["updated"] != original["updated"]:
return False
if not new["approved"] and not (new["limbo"] or new["deleted"]):
return False
if new["limbo"] and new["deleted"]:
return False
if abs(new["upvote"] - original["upvote"]) > 1:
return False
if abs(new["downvote"] - original["downvote"]) > 1:
return False
return True
| {
"content_hash": "dfbc7c8c660e1ba68c169c6371ae176b",
"timestamp": "",
"source": "github",
"line_count": 83,
"max_line_length": 79,
"avg_line_length": 22.867469879518072,
"alnum_prop": 0.624868282402529,
"repo_name": "streed/facebookGroupArchive",
"id": "75bc427364ef109548836b1334d6488b36a6a2f2",
"size": "1898",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "archiver/util/group.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "4391"
},
{
"name": "Python",
"bytes": "7347"
}
],
"symlink_target": ""
} |
import warnings
def get_conf():
"""Parses various PETSc configuration/include files to get data types.
precision, indices, complexscalars = get_conf()
Output:
precision: 'single', 'double', 'longlong' indicates precision of PetscScalar
indices: '32', '64' indicates bit-size of PetscInt
complex: True/False indicates whether PetscScalar is complex or not.
"""
import sys, os
precision = None
indices = None
complexscalars = None
try:
petscdir = os.environ['PETSC_DIR']
except KeyError:
warnings.warn('Nonexistent or invalid PETSc installation, using defaults')
return None, None, None
try:
petscdir = os.path.join(petscdir, os.environ['PETSC_ARCH'])
except KeyError:
pass
petscvariables = os.path.join(petscdir, 'lib','petsc','conf', 'petscvariables')
petscconfinclude = os.path.join(petscdir, 'include', 'petscconf.h')
try:
fid = open(petscvariables, 'r')
except IOError:
warnings.warn('Nonexistent or invalid PETSc installation, using defaults')
return None, None, None
else:
for line in fid:
if line.startswith('PETSC_PRECISION'):
precision = line.strip().split('=')[1].strip('\n').strip()
fid.close()
try:
fid = open(petscconfinclude, 'r')
except IOError:
warnings.warn('Nonexistent or invalid PETSc installation, using defaults')
return None, None, None
else:
for line in fid:
if line.startswith('#define PETSC_USE_64BIT_INDICES 1'):
indices = '64bit'
elif line.startswith('#define PETSC_USE_COMPLEX 1'):
complexscalars = True
if indices is None:
indices = '32bit'
if complexscalars is None:
complexscalars = False
fid.close()
return precision, indices, complexscalars
| {
"content_hash": "1ce797364f6b7812865c22d11980a0bf",
"timestamp": "",
"source": "github",
"line_count": 63,
"max_line_length": 83,
"avg_line_length": 30.666666666666668,
"alnum_prop": 0.6185300207039337,
"repo_name": "patemotter/trilinos-prediction",
"id": "ba06dc3026f205399fae94736238ef08779e4eaf",
"size": "1932",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tpetra_solvers/helper_scripts/petsc_conf.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "AspectJ",
"bytes": "57476"
},
{
"name": "C++",
"bytes": "407510"
},
{
"name": "CMake",
"bytes": "9371"
},
{
"name": "Jupyter Notebook",
"bytes": "1737789"
},
{
"name": "Makefile",
"bytes": "5842"
},
{
"name": "Perl",
"bytes": "59825"
},
{
"name": "Python",
"bytes": "53666"
},
{
"name": "Shell",
"bytes": "73049"
}
],
"symlink_target": ""
} |
import base64
import xml.etree.ElementTree as ET
from functools import partial
from urllib.parse import urlencode
from geopy.exc import ConfigurationError, GeocoderQueryError
from geopy.geocoders.base import DEFAULT_SENTINEL, Geocoder
from geopy.location import Location
from geopy.util import logger
__all__ = ("IGNFrance", )
class IGNFrance(Geocoder):
"""Geocoder using the IGN France GeoCoder OpenLS API.
Documentation at:
https://geoservices.ign.fr/documentation/geoservices/index.html
"""
xml_request = """<?xml version="1.0" encoding="UTF-8"?>
<XLS version="1.2"
xmlns="http://www.opengis.net/xls"
xmlns:gml="http://www.opengis.net/gml"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://www.opengis.net/xls
http://schemas.opengis.net/ols/1.2/olsAll.xsd">
<RequestHeader srsName="epsg:4326"/>
<Request methodName="{method_name}"
maximumResponses="{maximum_responses}"
requestID=""
version="1.2">
{sub_request}
</Request>
</XLS>"""
api_path = '/%(api_key)s/geoportail/ols'
def __init__(
self,
api_key,
*,
username=None,
password=None,
referer=None,
domain='wxs.ign.fr',
scheme=None,
timeout=DEFAULT_SENTINEL,
proxies=DEFAULT_SENTINEL,
user_agent=None,
ssl_context=DEFAULT_SENTINEL,
adapter_factory=None
):
"""
:param str api_key: The API key required by IGN France API
to perform geocoding requests. You can get your key here:
https://geoservices.ign.fr/documentation/services-acces.html.
Mandatory. For authentication with referer
and with username/password, the api key always differ.
:param str username: When making a call need HTTP simple
authentication username. Mandatory if no referer set
:param str password: When making a call need HTTP simple
authentication password. Mandatory if no referer set
:param str referer: When making a call need HTTP referer.
Mandatory if no password and username
:param str domain: Currently it is ``'wxs.ign.fr'``, can
be changed for testing purposes for developer API
e.g ``'gpp3-wxs.ign.fr'`` at the moment.
:param str scheme:
See :attr:`geopy.geocoders.options.default_scheme`.
:param int timeout:
See :attr:`geopy.geocoders.options.default_timeout`.
:param dict proxies:
See :attr:`geopy.geocoders.options.default_proxies`.
:param str user_agent:
See :attr:`geopy.geocoders.options.default_user_agent`.
:type ssl_context: :class:`ssl.SSLContext`
:param ssl_context:
See :attr:`geopy.geocoders.options.default_ssl_context`.
:param callable adapter_factory:
See :attr:`geopy.geocoders.options.default_adapter_factory`.
.. versionadded:: 2.0
"""
super().__init__(
scheme=scheme,
timeout=timeout,
proxies=proxies,
user_agent=user_agent,
ssl_context=ssl_context,
adapter_factory=adapter_factory,
)
# Catch if no api key with username and password
# or no api key with referer
if not ((api_key and username and password) or (api_key and referer)):
raise ConfigurationError('You should provide an api key and a '
'username with a password or an api '
'key with a referer depending on '
'created api key')
if (username and password) and referer:
raise ConfigurationError('You can\'t set username/password and '
'referer together. The API key always '
'differs depending on both scenarios')
if username and not password:
raise ConfigurationError(
'username and password must be set together'
)
self.api_key = api_key
self.username = username
self.password = password
self.referer = referer
self.domain = domain.strip('/')
api_path = self.api_path % dict(api_key=self.api_key)
self.api = '%s://%s%s' % (self.scheme, self.domain, api_path)
def geocode(
self,
query,
*,
query_type='StreetAddress',
maximum_responses=25,
is_freeform=False,
filtering=None,
exactly_one=True,
timeout=DEFAULT_SENTINEL
):
"""
Return a location point by address.
:param str query: The query string to be geocoded.
:param str query_type: The type to provide for geocoding. It can be
`PositionOfInterest`, `StreetAddress` or `CadastralParcel`.
`StreetAddress` is the default choice if none provided.
:param int maximum_responses: The maximum number of responses
to ask to the API in the query body.
:param str is_freeform: Set if return is structured with
freeform structure or a more structured returned.
By default, value is False.
:param str filtering: Provide string that help setting geocoder
filter. It contains an XML string. See examples in documentation
and ignfrance.py file in directory tests.
:param bool exactly_one: Return one result or a list of results, if
available.
:param int timeout: Time, in seconds, to wait for the geocoding service
to respond before raising a :class:`geopy.exc.GeocoderTimedOut`
exception. Set this only if you wish to override, on this call
only, the value set during the geocoder's initialization.
:rtype: ``None``, :class:`geopy.location.Location` or a list of them, if
``exactly_one=False``.
"""
# Check if acceptable query type
if query_type not in ['PositionOfInterest',
'StreetAddress',
'CadastralParcel']:
raise GeocoderQueryError("""You did not provided a query_type the
webservice can consume. It should be PositionOfInterest,
'StreetAddress or CadastralParcel""")
# Check query validity for CadastralParcel
if query_type == 'CadastralParcel' and len(query.strip()) != 14:
raise GeocoderQueryError("""You must send a string of fourteen
characters long to match the cadastre required code""")
sub_request = """
<GeocodeRequest returnFreeForm="{is_freeform}">
<Address countryCode="{query_type}">
<freeFormAddress>{query}</freeFormAddress>
{filtering}
</Address>
</GeocodeRequest>
"""
xml_request = self.xml_request.format(
method_name='LocationUtilityService',
sub_request=sub_request,
maximum_responses=maximum_responses
)
# Manage type change for xml case sensitive
if is_freeform:
is_freeform = 'true'
else:
is_freeform = 'false'
# Manage filtering value
if filtering is None:
filtering = ''
# Create query using parameters
request_string = xml_request.format(
is_freeform=is_freeform,
query=query,
query_type=query_type,
filtering=filtering
)
params = {
'xls': request_string
}
url = "?".join((self.api, urlencode(params)))
logger.debug("%s.geocode: %s", self.__class__.__name__, url)
callback = partial(
self._parse_xml, is_freeform=is_freeform, exactly_one=exactly_one
)
return self._request_raw_content(url, callback, timeout=timeout)
def reverse(
self,
query,
*,
reverse_geocode_preference=('StreetAddress', ),
maximum_responses=25,
filtering='',
exactly_one=True,
timeout=DEFAULT_SENTINEL
):
"""
Return an address by location point.
:param query: The coordinates for which you wish to obtain the
closest human-readable addresses.
:type query: :class:`geopy.point.Point`, list or tuple of ``(latitude,
longitude)``, or string as ``"%(latitude)s, %(longitude)s"``.
:param list reverse_geocode_preference: Enable to set expected results
type. It can be `StreetAddress` or `PositionOfInterest`.
Default is set to `StreetAddress`.
:param int maximum_responses: The maximum number of responses
to ask to the API in the query body.
:param str filtering: Provide string that help setting geocoder
filter. It contains an XML string. See examples in documentation
and ignfrance.py file in directory tests.
:param bool exactly_one: Return one result or a list of results, if
available.
:param int timeout: Time, in seconds, to wait for the geocoding service
to respond before raising a :class:`geopy.exc.GeocoderTimedOut`
exception. Set this only if you wish to override, on this call
only, the value set during the geocoder's initialization.
:rtype: ``None``, :class:`geopy.location.Location` or a list of them, if
``exactly_one=False``.
"""
sub_request = """
<ReverseGeocodeRequest>
{reverse_geocode_preference}
<Position>
<gml:Point>
<gml:pos>{query}</gml:pos>
</gml:Point>
{filtering}
</Position>
</ReverseGeocodeRequest>
"""
xml_request = self.xml_request.format(
method_name='ReverseGeocodeRequest',
sub_request=sub_request,
maximum_responses=maximum_responses
)
for pref in reverse_geocode_preference:
if pref not in ('StreetAddress', 'PositionOfInterest'):
raise GeocoderQueryError(
'`reverse_geocode_preference` must contain '
'one or more of: StreetAddress, PositionOfInterest'
)
point = self._coerce_point_to_string(query, "%(lat)s %(lon)s")
reverse_geocode_preference = '\n'.join((
'<ReverseGeocodePreference>%s</ReverseGeocodePreference>' % pref
for pref
in reverse_geocode_preference
))
request_string = xml_request.format(
maximum_responses=maximum_responses,
query=point,
reverse_geocode_preference=reverse_geocode_preference,
filtering=filtering
)
url = "?".join((self.api, urlencode({'xls': request_string})))
logger.debug("%s.reverse: %s", self.__class__.__name__, url)
callback = partial(
self._parse_xml,
exactly_one=exactly_one,
is_reverse=True,
is_freeform='false'
)
return self._request_raw_content(url, callback, timeout=timeout)
def _parse_xml(self,
page,
is_reverse=False,
is_freeform=False,
exactly_one=True):
"""
Returns location, (latitude, longitude) from XML feed
and transform to json
"""
# Parse the page
tree = ET.fromstring(page.encode('utf-8'))
# Clean tree from namespace to facilitate XML manipulation
def remove_namespace(doc, namespace):
"""Remove namespace in the document in place."""
ns = '{%s}' % namespace
nsl = len(ns)
for elem in doc.iter():
if elem.tag.startswith(ns):
elem.tag = elem.tag[nsl:]
remove_namespace(tree, 'http://www.opengis.net/gml')
remove_namespace(tree, 'http://www.opengis.net/xls')
remove_namespace(tree, 'http://www.opengis.net/xlsext')
# Return places as json instead of XML
places = self._xml_to_json_places(tree, is_reverse=is_reverse)
if not places:
return None
if exactly_one:
return self._parse_place(places[0], is_freeform=is_freeform)
else:
return [
self._parse_place(
place,
is_freeform=is_freeform
) for place in places
]
def _xml_to_json_places(self, tree, is_reverse=False):
"""
Transform the xml ElementTree due to XML webservice return to json
"""
select_multi = (
'GeocodedAddress'
if not is_reverse
else 'ReverseGeocodedLocation'
)
adresses = tree.findall('.//' + select_multi)
places = []
sel_pl = './/Address/Place[@type="{}"]'
for adr in adresses:
el = {}
el['pos'] = adr.find('./Point/pos')
el['street'] = adr.find('.//Address/StreetAddress/Street')
el['freeformaddress'] = adr.find('.//Address/freeFormAddress')
el['municipality'] = adr.find(sel_pl.format('Municipality'))
el['numero'] = adr.find(sel_pl.format('Numero'))
el['feuille'] = adr.find(sel_pl.format('Feuille'))
el['section'] = adr.find(sel_pl.format('Section'))
el['departement'] = adr.find(sel_pl.format('Departement'))
el['commune_absorbee'] = adr.find(sel_pl.format('CommuneAbsorbee'))
el['commune'] = adr.find(sel_pl.format('Commune'))
el['insee'] = adr.find(sel_pl.format('INSEE'))
el['qualite'] = adr.find(sel_pl.format('Qualite'))
el['territoire'] = adr.find(sel_pl.format('Territoire'))
el['id'] = adr.find(sel_pl.format('ID'))
el['id_tr'] = adr.find(sel_pl.format('ID_TR'))
el['bbox'] = adr.find(sel_pl.format('Bbox'))
el['nature'] = adr.find(sel_pl.format('Nature'))
el['postal_code'] = adr.find('.//Address/PostalCode')
el['extended_geocode_match_code'] = adr.find(
'.//ExtendedGeocodeMatchCode'
)
place = {}
def testContentAttrib(selector, key):
"""
Helper to select by attribute and if not attribute,
value set to empty string
"""
return selector.attrib.get(
key,
None
) if selector is not None else None
place['accuracy'] = testContentAttrib(
adr.find('.//GeocodeMatchCode'), 'accuracy')
place['match_type'] = testContentAttrib(
adr.find('.//GeocodeMatchCode'), 'matchType')
place['building'] = testContentAttrib(
adr.find('.//Address/StreetAddress/Building'), 'number')
place['search_centre_distance'] = testContentAttrib(
adr.find('.//SearchCentreDistance'), 'value')
for key, value in iter(el.items()):
if value is not None:
place[key] = value.text
else:
place[key] = None
# We check if lat lng is not empty and unpack accordingly
if place['pos']:
lat, lng = place['pos'].split(' ')
place['lat'] = lat.strip()
place['lng'] = lng.strip()
else:
place['lat'] = place['lng'] = None
# We removed the unused key
place.pop("pos", None)
places.append(place)
return places
def _request_raw_content(self, url, callback, *, timeout):
"""
Send the request to get raw content.
"""
headers = {}
if self.referer is not None:
headers['Referer'] = self.referer
if self.username and self.password and self.referer is None:
credentials = '{0}:{1}'.format(self.username, self.password).encode()
auth_str = base64.standard_b64encode(credentials).decode()
headers['Authorization'] = 'Basic {}'.format(auth_str.strip())
return self._call_geocoder(
url,
callback,
headers=headers,
timeout=timeout,
is_json=False,
)
def _parse_place(self, place, is_freeform=None):
"""
Get the location, lat, lng and place from a single json place.
"""
# When freeform already so full address
if is_freeform == 'true':
location = place.get('freeformaddress')
else:
# For parcelle
if place.get('numero'):
location = place.get('street')
else:
# When classic geocoding
# or when reverse geocoding
location = "%s %s" % (
place.get('postal_code', ''),
place.get('commune', ''),
)
if place.get('street'):
location = "%s, %s" % (
place.get('street', ''),
location,
)
if place.get('building'):
location = "%s %s" % (
place.get('building', ''),
location,
)
return Location(location, (place.get('lat'), place.get('lng')), place)
| {
"content_hash": "432599a9a91c44cc807d99e3cc6c8e08",
"timestamp": "",
"source": "github",
"line_count": 496,
"max_line_length": 81,
"avg_line_length": 36.41532258064516,
"alnum_prop": 0.5445687077842986,
"repo_name": "jmb/geopy",
"id": "702238ce0158cb1842c946ccdf174b72bbfcd76a",
"size": "18062",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "geopy/geocoders/ignfrance.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "1334"
},
{
"name": "Python",
"bytes": "477174"
}
],
"symlink_target": ""
} |
import socket
import sys
import threading
import time
SERVERADDRESS = (socket.gethostname(), 6000)
s = socket.socket(type=socket.SOCK_DGRAM)
username=''
class Chat():
def __init__(self, host=socket.gethostname(), port=5000):
try:
s.settimeout(0.5)
s.bind((host, port))
self.__s = s
print('Listening on {}:{}'.format(host, port))
print('''Use "/pseudo" to choose a pseudonym.
Use "/help" for help.''')
except OSError:
print('Please choose another Port or IP address.')
quit()
except:
print('Please choose another Port or IP address.')
quit()
def run(self):
handlers = {
'/exit': self._exit,
'/quit': self._quit,
'/join': self._join,
'/send': self._send,
'/pseudo': self._pseudo,
'/list':self._list,
'/help': self._help
}
self.__running = True
self.__address = None
threading.Thread(target=self._receive).start()
while self.__running:
line = sys.stdin.readline().rstrip() + ' '
# Extract the command and the param
command = line[:line.index(' ')]
param = line[line.index(' ')+1:].rstrip()
# Call the command handler
if command in handlers:
try:
handlers[command]() if param == '' else handlers[command](param)
except:
print("Error while command execution.")
else:
print('Unknown Command:', command)
def _exit(self):
self._connection("{} leaved the port.".format(username))
try:
EchoClient((": delete :"+str(username)+str(s.getsockname())).encode()).run()
except:
pass
self.__running = False
self.__address = None
self.__s.close()
def _quit(self):
self._connection("{} leaved the port.".format(username))
self.__address = None
try:
EchoClient((": delete :"+str(username)+str(s.getsockname())).encode()).run()
except:
pass
def _join(self, param):
if username=='':
print('Error while joining, please use "/pseudo" to choose a username.')
else:
tokens = param.split(' ')
if len(tokens) == 2:
try:
self.__address = (socket.gethostbyaddr(tokens[0])[0], int(tokens[1]))
print('Connected to {}:{}'.format(*self.__address))
self._connection("{} joined the port.".format(username))
except OSError:
print("Error during command execution.")
else:
print('''Error during command execution.
Please type Port and Ip address. Example: /join localhost 5001''')
def _connection(self, param):
if self.__address is not None:
try:
message = param.encode()
totalsent = 0
while totalsent < len(message):
sent = self.__s.sendto(message[totalsent:], self.__address)
totalsent += sent
except OSError:
print('Error during command execution.')
def _send(self, param):
if self.__address is not None:
try:
if username=='':
print('Error while sending, please use "/pseudo" to choose a username.')
else:
param = username + " : " + param
message = param.encode()
totalsent = 0
print(self.__address)
while totalsent < len(message):
sent = self.__s.sendto(message[totalsent:], self.__address)
totalsent += sent
print("Message sent.")
except OSError:
print('Error during command execution.')
else:
print("No port joined yet.")
def _receive(self):
while self.__running:
try:
data, address = self.__s.recvfrom(1024)
localtime = time.asctime(time.localtime(time.time()))
if data.decode()[1:4]==' : ':
print("{} sent at {}.".format(data.decode(),localtime))
else:
print(data.decode())
try:
n = data.decode().split(' ')
if n[1] =='joined':
EchoClient(("{}{}".format(data.decode()[:-17], address)).encode()).run()
if n[1] =='leaved':
EchoClient((": delete :{}{}".format(data.decode()[:-17], address)).encode()).run()
except:
pass
except socket.timeout:
pass
except OSError:
return
def _pseudo(self, param):
tokens = param.split(' ')
if len(tokens) == 1:
try:
global username
username=param
print('Your pseudo has been saved.')
EchoClient((str(param)+str(s.getsockname())).encode()).run()
except:
pass
else:
print('Invalid pseudo.')
def _list(self):
try:
command = ": list :" + str(s.getsockname())
EchoClient(str(command).encode()).run()
except:
print("Server not found, impossible to connect.")
def _help(self):
print('''Please type:
"/exit" to exit the chat application.
"/quit" to quit a chatroom.
"/join" to join a chat room.
"/send" to send a message to members of the same chatroom.
"/pseudo" to chose a pseudo to chat.
"/list" to show online users.''')
class EchoClient():
def __init__(self, message):
self.__message = message
self.__s = socket.socket()
def run(self):
self.__s.connect(SERVERADDRESS)
self._send()
self.__s.close()
def _send(self):
totalsent = 0
msg = self.__message
try:
while totalsent < len(msg):
sent = self.__s.send(msg[totalsent:])
totalsent += sent
except OSError:
print("Error while sending message.")
if __name__ == '__main__':
if len(sys.argv) == 3:
Chat(sys.argv[1], int(sys.argv[2])).run()
else:
Chat().run()
| {
"content_hash": "c0321516bbfaf02d2e268fa0f833c221",
"timestamp": "",
"source": "github",
"line_count": 192,
"max_line_length": 110,
"avg_line_length": 34.645833333333336,
"alnum_prop": 0.4760974143114853,
"repo_name": "Diab0lix/Chat",
"id": "b5997730e822b1808276ee5ff9e49afeb436a34f",
"size": "6787",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "chat_update/chat.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "22062"
}
],
"symlink_target": ""
} |
import os
import logging
logger = logging.getLogger('LW')
logger.setLevel(logging.INFO)
handler = logging.FileHandler(os.path.expanduser('~/.lw/lw.log'), mode='w')
handler.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s:%(levelname)s:%(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler) | {
"content_hash": "6d8b1920456cabb460c6cde9a8c5fbbe",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 75,
"avg_line_length": 25.23076923076923,
"alnum_prop": 0.7652439024390244,
"repo_name": "paeronskruven/lw",
"id": "85f25795dc7ee9defa367ce13fc9459a4cd25a69",
"size": "328",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lw/log.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "10718"
}
],
"symlink_target": ""
} |
from flask import redirect, render_template, render_template_string, Blueprint, jsonify
from flask import request, url_for
from flask_user import current_user, login_required, roles_accepted, roles_required
from app import app, db
from app.core.models import UserProfileForm, FeatureRequest, User, Product, UsersRoles, Role
from flask_wtf import csrf
from datetime import datetime
from sqlalchemy import func
core_blueprint = Blueprint('core', __name__, url_prefix='/')
# The Home page is accessible to anyone
@core_blueprint.route('')
def home_page():
if not current_user.is_authenticated:
return render_template('core/guest_page.html')
else:
return render_template('core/home_page.html')
# The User page is accessible to authenticated users (users that have logged in)
@core_blueprint.route('user')
@login_required # Limits access to authenticated users
def user_page():
return render_template('core/user_page.html')
# The Admin page is accessible to users with the 'admin' role
@core_blueprint.route('admin')
@roles_accepted('admin') # Limits access to users with the 'admin' role
def admin_page():
return render_template('core/admin_page.html')
@core_blueprint.route('user/profile', methods=['GET', 'POST'])
@login_required
def user_profile_page():
# Initialize form
form = UserProfileForm(request.form, current_user)
# Process valid POST
if request.method == 'POST' and form.validate():
# Copy form fields to user_profile fields
form.populate_obj(current_user)
# Save user_profile
db.session.commit()
# Redirect to home page
return redirect(url_for('core.home_page'))
# Process GET or invalid POST
return render_template('core/user_profile_page.html',
form=form)
# Register blueprint
app.register_blueprint(core_blueprint)
# Feature Route
@app.route('/features')
@login_required
def feature_request():
# Test if user is IWS user or client
if current_user.roles[0].name == 'client':
features = FeatureRequest.query.filter(FeatureRequest.user_id == current_user.id)
return render_template('core/feature_requests.html',
features=features)
else:
features = FeatureRequest.query.all()
return render_template('core/feature_requests.html',
features=features)
@app.route('/new_feature', methods=['POST'])
@login_required
def new_feature():
if current_user.roles[0].name == 'client':
#If feature added by client
#get the count of FR gp : global priority
gp = db.session.query(func.count(FeatureRequest.id)).scalar()
#by default the priority will be in the end
gp = gp +1
#get the count of FR cp : client priority
cp = db.session.query(func.count(FeatureRequest.id)).filter(FeatureRequest.user_id == current_user.id).scalar()
#by default the priority will be in the end
cp = cp +1
date_object = datetime.strptime(request.json['target_date'], '%m-%d-%Y')
feature = FeatureRequest(title=request.json['title'],description=request.json['description'],
target_date=date_object,ticket_url=request.json['ticket_url'],
user_id=current_user.id,product_id=request.json['product_id'],
global_priority=gp,client_priority=cp)
db.session.add(feature)
db.session.commit()
#id = cur.lastrowid
return jsonify({"title": request.json['title'],
"description": request.json['description'],
"client_priority": cp,
"global_priority": gp,
"target_date": request.json['target_date'],
"ticket_url": request.json['ticket_url'],
"client_id": request.json['client_id'],
"id": feature.id,
"product_id": request.json['product_id']})
else:
#If feature added by IWS USER
#get the count of FR gp : global priority
gp = db.session.query(func.count(FeatureRequest.id)).scalar()
#by default the priority will be in the end
gp = gp +1
#get the count of FR cp : client priority
cp = db.session.query(func.count(FeatureRequest.id)).filter(FeatureRequest.user_id == request.json['client_id']).scalar()
cp = cp + 1
date_object = datetime.strptime(request.json['target_date'], '%m-%d-%Y')
feature = FeatureRequest(title=request.json['title'],description=request.json['description'],
target_date=date_object,ticket_url=request.json['ticket_url'],
user_id=request.json['client_id'],product_id=request.json['product_id'],
global_priority=gp,client_priority=cp)
db.session.add(feature)
db.session.commit()
#id = cur.lastrowid
return jsonify({"title": request.json['title'],
"description": request.json['description'],
"client_priority": cp,
"global_priority": gp,
"target_date": request.json['target_date'],
"ticket_url": request.json['ticket_url'],
"client_id": request.json['client_id'],
"id": feature.id,
"product_id": request.json['product_id']})
@app.route('/save_priorities', methods=['POST'])
@login_required
def save_priorities():
if current_user.roles[0].name == 'client':
id_feature = request.json['id']
client_priority = request.json['priority']
global_pri = request.json['global_priority']
fr = FeatureRequest.query.filter_by(id=id_feature).first()
fr.global_priority = global_pri
fr.client_priority = client_priority
db.session.commit()
return jsonify(reponse=dict(result="ok"))
else:
id_feature = request.json['id']
client_priority = request.json['priority']
global_pri = request.json['global_priority']
fr = FeatureRequest.query.filter_by(id=id_feature).first()
fr.global_priority = global_pri
fr.client_priority = client_priority
db.session.commit()
return jsonify(reponse=dict(result="ok"))
@app.route('/update_feature', methods=['POST'])
@login_required
def update_feature():
id_feature = request.json['id']
date_object = datetime.strptime(request.json['target_date'], '%m-%d-%Y')
fr = FeatureRequest.query.filter_by(id=id_feature).first()
if fr:
fr.title = request.json['title']
fr.ticket_url = request.json['ticket_url']
fr.target_date = date_object
fr.product_id = request.json['product_id']
#db.session.query(FeatureRequest).filter_by(id = id_feature).update({'global_priority': int(priority)})
db.session.commit()
return jsonify(reponse=dict(result="ok"))
else:
return jsonify(reponse=dict(result="error"))
@app.route('/delete_feature', methods=['POST'])
@login_required
def delete_feature():
id_feature = request.json['id']
fr = FeatureRequest.query.filter_by(id=id_feature).first()
if fr:
FeatureRequest.query.filter_by(id=id_feature).delete()
db.session.commit()
return jsonify(reponse=dict(result="ok"))
else:
return jsonify(reponse=dict(result="error"))
@app.route('/features_list')
@login_required
def features_list():
if current_user.roles[0].name == 'client':
cur = FeatureRequest.query.filter(FeatureRequest.user_id == current_user.id).order_by(FeatureRequest.client_priority)
entries = [dict(id=row.id,title=row.title,
target_date=row.target_date,description=row.description,ticket_url=row.ticket_url, client_priority=row.client_priority,
global_priority=row.global_priority, client_id = row.user_id, product_id=row.product_id) for row in cur]
return jsonify(features=entries)
else:
cur = FeatureRequest.query.order_by(FeatureRequest.global_priority).all()
entries = [dict(id=row.id,title=row.title,
target_date=row.target_date,description=row.description,ticket_url=row.ticket_url, client_priority=row.client_priority,
global_priority=row.global_priority, client_id = row.user_id, product_id=row.product_id) for row in cur]
return jsonify(features=entries)
# Client Route
@app.route('/clients')
@roles_required('admin')
@login_required
def clients():
clients = User.query.join(User.roles).filter(Role.name == 'client').group_by(User).all()
return render_template('core/clients.html',
clients=clients)
@app.route('/clients_list')
@login_required
def clients_list():
cur = User.query.join(User.roles).filter(Role.name == 'client').group_by(User).order_by(User.id).all()
entries = [dict(company_name=row.company_name,email=row.email,
description="",id=row.id,last_name=row.last_name, first_name=row.first_name,priority=row.priority) for row in cur]
return jsonify(clients=entries)
@app.route('/new_client', methods=['POST'])
@roles_required('admin')
@login_required
def new_client():
email = request.json['email']
user = User.query.filter(User.email==email).first()
if not user:
user = User(email=email, first_name=request.json['first_name'], last_name=request.json['last_name'],
password = app.user_manager.hash_password(request.json['password']),
company_name=request.json['company_name'], active=True,confirmed_at=datetime.utcnow())
role = Role.query.filter(Role.name == 'client').first()
user.roles.append(role)
db.session.add(user)
db.session.commit()
return jsonify({"email": request.json['email'],
"first_name": request.json['first_name'],
"result": "OK",
"last_name": request.json['last_name'],
"company_name": request.json['company_name'],
})
else:
return jsonify({"result":"Error","msg":"email exist"})
# Product Route
@app.route('/products')
@roles_required('admin')
@login_required
def products():
products = Product.query.all()
return render_template('core/products.html',
products=products)
@app.route('/products_list')
@login_required
def products_list():
cur = Product.query.all()
entries = [dict(id=row.id,product_name=row.product_name, description=row.description) for row in cur]
return jsonify(products=entries)
@app.route('/new_product', methods=['POST'])
@roles_required('admin')
@login_required
def new_product():
product = Product.query.filter(Product.product_name==request.json['product_name']).first()
if not product:
product = Product(product_name=request.json['product_name'], description=request.json['description'])
db.session.add(product)
db.session.commit()
return jsonify({"product_name": request.json['product_name'],
"description": request.json['description'],
"id": product.id,
"result": "OK"
})
else:
return jsonify({"result":"Error","msg":"product name exist"})
# User Route
@app.route('/users')
@roles_required('admin')
@login_required
def users():
users = User.query.join(User.roles).filter(Role.name == 'user').group_by(User).all()
return render_template('core/users.html',
users=users)
@app.route('/users_list')
@roles_required('admin')
@login_required
def users_list():
cur = User.query.join(User.roles).filter(Role.name == 'user').group_by(User).order_by(User.id).all()
entries = [dict(email=row.email, id=row.id,last_name=row.last_name, first_name=row.first_name) for row in cur]
return jsonify(users=entries)
@app.route('/new_user', methods=['POST'])
@roles_required('admin')
@login_required
def new_user():
email = request.json['email']
user = User.query.filter(User.email==email).first()
if not user:
user = User(email=email, first_name=request.json['first_name'], last_name=request.json['last_name'],
password = app.user_manager.hash_password(request.json['password']),
active=True,confirmed_at=datetime.utcnow())
role = Role.query.filter(Role.name == 'user').first()
user.roles.append(role)
db.session.add(user)
db.session.commit()
return jsonify({"email": request.json['email'],
"first_name": request.json['first_name'],
"result": "OK",
"last_name": request.json['last_name'],
"id": user.id
})
else:
return jsonify({"result":"Error","msg":"email exist"})
| {
"content_hash": "9ecbe6ff9a4f5cb7580f10f6f8233db0",
"timestamp": "",
"source": "github",
"line_count": 362,
"max_line_length": 137,
"avg_line_length": 37.34530386740332,
"alnum_prop": 0.5988608624898292,
"repo_name": "walidham/iws2",
"id": "acf22efadf749b0422ed182a04401aae6bf1fd21",
"size": "13620",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/core/views.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "10104"
},
{
"name": "HTML",
"bytes": "52513"
},
{
"name": "JavaScript",
"bytes": "19174"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "37858"
},
{
"name": "Shell",
"bytes": "310"
}
],
"symlink_target": ""
} |
import numpy as np
# Generally, you want at least a few walkers for each dimension you may be exploring. To start with we are only using `4`, but you may want to eventually increase this to `8` or `10` if you have more cores available for computation. For the **vertical** model, there are 13 parameters.
nparam = 13
nwalkers = 4 * nparam
print(nwalkers)
# If you are fixing dparmeters, then you should change the previous line to reflect the number of parameters you will be sampling.
# Below, we create an array of starting walker positions, similar to how `emcee` is initialized. You should tweak the `low` and `high` ranges to correspond to a small guess around your starting position.
p0 = np.array([np.random.uniform(1.03, 1.05, nwalkers), # mass [M_sun]
np.random.uniform(20., 21.0, nwalkers), #r_c [AU]
np.random.uniform(30., 40., nwalkers), #T_10m [K]
np.random.uniform(0.50, 0.55, nwalkers), # q_m
np.random.uniform(110., 115, nwalkers), #T_10a [K]
np.random.uniform(0.50, 0.55, nwalkers), # q_a
np.random.uniform(-3.4, -3.1, nwalkers), #log10 Sigma_c [log10 g/cm^2]
np.random.uniform(0.17, 0.18, nwalkers), #xi [km/s]
np.random.uniform(44.0, 46.0, nwalkers), #inc [degrees]
np.random.uniform(40.0, 41.0, nwalkers), #PA [degrees]
np.random.uniform(-0.1, 0.1, nwalkers), #vz [km/s]
np.random.uniform(-0.1, 0.1, nwalkers), #mu_a [arcsec]
np.random.uniform(-0.1, 0.1, nwalkers)]) #mu_d [arcsec]
# Save the new position file to disk
np.save("pos0.npy", p0)
| {
"content_hash": "c66bd058c31e5683c596d4bd4e55b682",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 285,
"avg_line_length": 52.903225806451616,
"alnum_prop": 0.6359756097560976,
"repo_name": "iancze/JudithExcalibur",
"id": "68138b66818fd7e8810adb2b0663cc107b1f5312",
"size": "1841",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "assets/initialize_walkers.vertical.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Julia",
"bytes": "384436"
},
{
"name": "Jupyter Notebook",
"bytes": "570364"
},
{
"name": "Makefile",
"bytes": "288"
},
{
"name": "Python",
"bytes": "35663"
},
{
"name": "Shell",
"bytes": "1007"
}
],
"symlink_target": ""
} |
import unittest
import json
import os
from subprocess import Popen, PIPE
from genson import SchemaBuilder
BASE_SCHEMA = {"$schema": SchemaBuilder.DEFAULT_URI}
FIXTURE_PATH = os.path.join(os.path.dirname(__file__), 'fixtures')
SHORT_USAGE = """\
usage: genson [-h] [--version] [-d DELIM] [-e ENCODING] [-i SPACES]
[-s SCHEMA] [-$ SCHEMA_URI]
..."""
def fixture(filename):
return os.path.join(FIXTURE_PATH, filename)
def stderr_message(message):
return '{}\ngenson: error: {}\n'.format(SHORT_USAGE, message)
def run(args=[], stdin_data=None):
"""
Run the ``genson`` executable as a subprocess and return
(stdout, stderr).
"""
genson_process = Popen(
['python', '-m', 'genson'] + args, stdout=PIPE, stderr=PIPE,
stdin=PIPE if stdin_data is not None else None)
if stdin_data is not None:
stdin_data = stdin_data.encode('utf-8')
(stdout, stderr) = genson_process.communicate(stdin_data)
genson_process.wait()
if isinstance(stdout, bytes):
stdout = stdout.decode('utf-8')
if isinstance(stderr, bytes):
stderr = stderr.decode('utf-8')
return (stdout, stderr)
class TestBasic(unittest.TestCase):
def test_empty_input(self):
(stdout, stderr) = run(stdin_data='')
self.assertEqual(stderr, '')
self.assertEqual(json.loads(stdout), BASE_SCHEMA)
def test_empty_object_stdin(self):
(stdout, stderr) = run(stdin_data='{}')
self.assertEqual(stderr, '')
self.assertEqual(
json.loads(stdout),
dict({"type": "object"}, **BASE_SCHEMA))
def test_empty_object_file(self):
(stdout, stderr) = run([fixture('empty.json')])
self.assertEqual(stderr, '')
self.assertEqual(
json.loads(stdout),
BASE_SCHEMA)
def test_basic_schema_file(self):
(stdout, stderr) = run(['-s', fixture('base_schema.json')])
self.assertEqual(stderr, '')
self.assertEqual(
json.loads(stdout),
BASE_SCHEMA)
class TestError(unittest.TestCase):
maxDiff = 1000
BAD_JSON_FILE = fixture('not_json.txt')
BAD_JSON_MESSAGE = stderr_message(
'invalid JSON in %s: Expecting value: line 1 column 1 (char 0)'
% BAD_JSON_FILE)
def test_no_input(self):
(stdout, stderr) = run()
self.assertEqual(stderr, stderr_message(
'noting to do - no schemas or objects given'))
self.assertEqual(stdout, '')
def test_object_not_json(self):
(stdout, stderr) = run([self.BAD_JSON_FILE])
self.assertEqual(stderr, self.BAD_JSON_MESSAGE)
self.assertEqual(stdout, '')
def test_schema_not_json(self):
(stdout, stderr) = run(['-s', self.BAD_JSON_FILE])
self.assertEqual(stderr, self.BAD_JSON_MESSAGE)
self.assertEqual(stdout, '')
class TestDelimiter(unittest.TestCase):
def test_delim_newline(self):
(stdout, stderr) = run(['-d', 'newline'],
stdin_data='{"hi":"there"}\n{"hi":5}')
self.assertEqual(stderr, '')
self.assertEqual(
json.loads(stdout),
dict({"required": ["hi"], "type": "object", "properties": {
"hi": {"type": ["integer", "string"]}}}, **BASE_SCHEMA))
def test_delim_auto_empty(self):
(stdout, stderr) = run(['-d', ''], stdin_data='{"hi":"there"}{"hi":5}')
self.assertEqual(stderr, '')
self.assertEqual(
json.loads(stdout),
dict({"required": ["hi"], "type": "object", "properties": {
"hi": {"type": ["integer", "string"]}}}, **BASE_SCHEMA))
def test_delim_auto_whitespace(self):
(stdout, stderr) = run(['-d', ''],
stdin_data='{"hi":"there"} \n\t{"hi":5}')
self.assertEqual(stderr, '')
self.assertEqual(
json.loads(stdout),
dict({"required": ["hi"], "type": "object", "properties": {
"hi": {"type": ["integer", "string"]}}}, **BASE_SCHEMA))
class TestEncoding(unittest.TestCase):
def test_encoding_unicode(self):
(stdout, stderr) = run(
['-e', 'utf-8', fixture('utf-8.json')])
self.assertEqual(stderr, '')
self.assertEqual(
json.loads(stdout),
dict({"type": "string"}, **BASE_SCHEMA))
def test_encoding_cp1252(self):
(stdout, stderr) = run(
['-e', 'cp1252', fixture('cp1252.json')])
self.assertEqual(stderr, '')
self.assertEqual(
json.loads(stdout),
dict({"type": "string"}, **BASE_SCHEMA))
| {
"content_hash": "31c3b3c2a6121c6ff3703edd3c611a38",
"timestamp": "",
"source": "github",
"line_count": 140,
"max_line_length": 79,
"avg_line_length": 33.3,
"alnum_prop": 0.5645645645645646,
"repo_name": "wolverdude/GenSON",
"id": "acee10f74ad817c183f684f451bbe45cca173d0b",
"size": "4662",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "test/test_bin.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "56936"
}
],
"symlink_target": ""
} |
"""Unit tests for gclient.py.
See gclient_smoketest.py for integration tests.
"""
import Queue
import copy
import logging
import os
import sys
import unittest
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import gclient
import gclient_utils
from testing_support import trial_dir
def write(filename, content):
"""Writes the content of a file and create the directories as needed."""
filename = os.path.abspath(filename)
dirname = os.path.dirname(filename)
if not os.path.isdir(dirname):
os.makedirs(dirname)
with open(filename, 'w') as f:
f.write(content)
class SCMMock(object):
def __init__(self, unit_test, url):
self.unit_test = unit_test
self.url = url
def RunCommand(self, command, options, args, file_list):
self.unit_test.assertEquals('None', command)
self.unit_test.processed.put(self.url)
def FullUrlForRelativeUrl(self, url):
return self.url + url
# pylint: disable=R0201
def DoesRemoteURLMatch(self, _):
return True
def GetActualRemoteURL(self, _):
return self.url
class GclientTest(trial_dir.TestCase):
def setUp(self):
super(GclientTest, self).setUp()
self.processed = Queue.Queue()
self.previous_dir = os.getcwd()
os.chdir(self.root_dir)
# Manual mocks.
self._old_createscm = gclient.gclient_scm.CreateSCM
gclient.gclient_scm.CreateSCM = self._createscm
self._old_sys_stdout = sys.stdout
sys.stdout = gclient.gclient_utils.MakeFileAutoFlush(sys.stdout)
sys.stdout = gclient.gclient_utils.MakeFileAnnotated(sys.stdout)
def tearDown(self):
self.assertEquals([], self._get_processed())
gclient.gclient_scm.CreateSCM = self._old_createscm
sys.stdout = self._old_sys_stdout
os.chdir(self.previous_dir)
super(GclientTest, self).tearDown()
def _createscm(self, parsed_url, root_dir, name, out_fh=None, out_cb=None):
self.assertTrue(parsed_url.startswith('svn://example.com/'), parsed_url)
self.assertTrue(root_dir.startswith(self.root_dir), root_dir)
return SCMMock(self, parsed_url)
def testDependencies(self):
self._dependencies('1')
def testDependenciesJobs(self):
self._dependencies('1000')
def _dependencies(self, jobs):
"""Verifies that dependencies are processed in the right order.
e.g. if there is a dependency 'src' and another 'src/third_party/bar', that
bar isn't fetched until 'src' is done.
Also test that a From() dependency should not be processed when it is listed
as a requirement.
Args:
|jobs| is the number of parallel jobs simulated.
"""
parser = gclient.OptionParser()
options, args = parser.parse_args(['--jobs', jobs])
write(
'.gclient',
'solutions = [\n'
' { "name": "foo", "url": "svn://example.com/foo" },\n'
' { "name": "bar", "url": "svn://example.com/bar" },\n'
' { "name": "bar/empty", "url": "svn://example.com/bar_empty" },\n'
']')
write(
os.path.join('foo', 'DEPS'),
'deps = {\n'
' "foo/dir1": "/dir1",\n'
# This one will depend on dir1/dir2 in bar.
' "foo/dir1/dir2/dir3": "/dir1/dir2/dir3",\n'
' "foo/dir1/dir2/dir3/dir4": "/dir1/dir2/dir3/dir4",\n'
' "foo/dir1/dir2/dir5/dir6":\n'
' From("foo/dir1/dir2/dir3/dir4", "foo/dir1/dir2"),\n'
'}')
write(
os.path.join('bar', 'DEPS'),
'deps = {\n'
# There is two foo/dir1/dir2. This one is fetched as bar/dir1/dir2.
' "foo/dir1/dir2": "/dir1/dir2",\n'
'}')
write(
os.path.join('bar/empty', 'DEPS'),
'deps = {\n'
'}')
# Test From()
write(
os.path.join('foo/dir1/dir2/dir3/dir4', 'DEPS'),
'deps = {\n'
# This one should not be fetched or set as a requirement.
' "foo/dir1/dir2/dir5": "svn://example.com/x",\n'
# This foo/dir1/dir2 points to a different url than the one in bar.
' "foo/dir1/dir2": "/dir1/another",\n'
'}')
obj = gclient.GClient.LoadCurrentConfig(options)
self._check_requirements(obj.dependencies[0], {})
self._check_requirements(obj.dependencies[1], {})
obj.RunOnDeps('None', args)
actual = self._get_processed()
first_3 = [
'svn://example.com/bar',
'svn://example.com/bar_empty',
'svn://example.com/foo',
]
if jobs != 1:
# We don't care of the ordering of these items except that bar must be
# before bar/empty.
self.assertTrue(
actual.index('svn://example.com/bar') <
actual.index('svn://example.com/bar_empty'))
self.assertEquals(first_3, sorted(actual[0:3]))
else:
self.assertEquals(first_3, actual[0:3])
self.assertEquals(
[
'svn://example.com/foo/dir1',
'svn://example.com/bar/dir1/dir2',
'svn://example.com/foo/dir1/dir2/dir3',
'svn://example.com/foo/dir1/dir2/dir3/dir4',
'svn://example.com/foo/dir1/dir2/dir3/dir4/dir1/another',
],
actual[3:])
self.assertEquals(3, len(obj.dependencies))
self.assertEquals('foo', obj.dependencies[0].name)
self.assertEquals('bar', obj.dependencies[1].name)
self.assertEquals('bar/empty', obj.dependencies[2].name)
self._check_requirements(
obj.dependencies[0],
{
'foo/dir1': ['bar', 'bar/empty', 'foo'],
'foo/dir1/dir2/dir3':
['bar', 'bar/empty', 'foo', 'foo/dir1', 'foo/dir1/dir2'],
'foo/dir1/dir2/dir3/dir4':
[ 'bar', 'bar/empty', 'foo', 'foo/dir1', 'foo/dir1/dir2',
'foo/dir1/dir2/dir3'],
'foo/dir1/dir2/dir5/dir6':
[ 'bar', 'bar/empty', 'foo', 'foo/dir1', 'foo/dir1/dir2',
'foo/dir1/dir2/dir3/dir4'],
})
self._check_requirements(
obj.dependencies[1],
{
'foo/dir1/dir2': ['bar', 'bar/empty', 'foo', 'foo/dir1'],
})
self._check_requirements(
obj.dependencies[2],
{})
self._check_requirements(
obj,
{
'foo': [],
'bar': [],
'bar/empty': ['bar'],
})
def _check_requirements(self, solution, expected):
for dependency in solution.dependencies:
e = expected.pop(dependency.name)
a = sorted(dependency.requirements)
self.assertEquals(e, a, (dependency.name, e, a))
self.assertEquals({}, expected)
def _get_processed(self):
"""Retrieves the item in the order they were processed."""
items = []
try:
while True:
items.append(self.processed.get_nowait())
except Queue.Empty:
pass
return items
def testAutofix(self):
# Invalid urls causes pain when specifying requirements. Make sure it's
# auto-fixed.
d = gclient.Dependency(
None, 'name', 'proto://host/path/@revision', None, None, None, None,
None, '', True)
self.assertEquals('proto://host/path@revision', d.url)
def testStr(self):
parser = gclient.OptionParser()
options, _ = parser.parse_args([])
obj = gclient.GClient('foo', options)
obj.add_dependencies_and_close(
[
gclient.Dependency(
obj, 'foo', 'url', None, None, None, None, None, 'DEPS', True),
gclient.Dependency(
obj, 'bar', 'url', None, None, None, None, None, 'DEPS', True),
],
[])
obj.dependencies[0].add_dependencies_and_close(
[
gclient.Dependency(
obj.dependencies[0], 'foo/dir1', 'url', None, None, None, None,
None, 'DEPS', True),
gclient.Dependency(
obj.dependencies[0], 'foo/dir2',
gclient.GClientKeywords.FromImpl('bar'), None, None, None, None,
None, 'DEPS', True),
gclient.Dependency(
obj.dependencies[0], 'foo/dir3',
gclient.GClientKeywords.FileImpl('url'), None, None, None, None,
None, 'DEPS', True),
],
[])
# Make sure __str__() works fine.
# pylint: disable=W0212
obj.dependencies[0]._file_list.append('foo')
str_obj = str(obj)
self.assertEquals(471, len(str_obj), '%d\n%s' % (len(str_obj), str_obj))
def testHooks(self):
topdir = self.root_dir
gclient_fn = os.path.join(topdir, '.gclient')
fh = open(gclient_fn, 'w')
print >> fh, 'solutions = [{"name":"top","url":"svn://example.com/top"}]'
fh.close()
subdir_fn = os.path.join(topdir, 'top')
os.mkdir(subdir_fn)
deps_fn = os.path.join(subdir_fn, 'DEPS')
fh = open(deps_fn, 'w')
hooks = [{'pattern':'.', 'action':['cmd1', 'arg1', 'arg2']}]
print >> fh, 'hooks = %s' % repr(hooks)
fh.close()
fh = open(os.path.join(subdir_fn, 'fake.txt'), 'w')
print >> fh, 'bogus content'
fh.close()
os.chdir(topdir)
parser = gclient.OptionParser()
options, _ = parser.parse_args([])
options.force = True
client = gclient.GClient.LoadCurrentConfig(options)
work_queue = gclient_utils.ExecutionQueue(options.jobs, None, False)
for s in client.dependencies:
work_queue.enqueue(s)
work_queue.flush({}, None, [], options=options)
self.assertEqual(client.GetHooks(options), [x['action'] for x in hooks])
def testCustomHooks(self):
topdir = self.root_dir
gclient_fn = os.path.join(topdir, '.gclient')
fh = open(gclient_fn, 'w')
extra_hooks = [{'name': 'append', 'pattern':'.', 'action':['supercmd']}]
print >> fh, ('solutions = [{"name":"top","url":"svn://example.com/top",'
'"custom_hooks": %s},' ) % repr(extra_hooks + [{'name': 'skip'}])
print >> fh, '{"name":"bottom","url":"svn://example.com/bottom"}]'
fh.close()
subdir_fn = os.path.join(topdir, 'top')
os.mkdir(subdir_fn)
deps_fn = os.path.join(subdir_fn, 'DEPS')
fh = open(deps_fn, 'w')
hooks = [{'pattern':'.', 'action':['cmd1', 'arg1', 'arg2']}]
hooks.append({'pattern':'.', 'action':['cmd2', 'arg1', 'arg2']})
skip_hooks = [
{'name': 'skip', 'pattern':'.', 'action':['cmd3', 'arg1', 'arg2']}]
skip_hooks.append(
{'name': 'skip', 'pattern':'.', 'action':['cmd4', 'arg1', 'arg2']})
print >> fh, 'hooks = %s' % repr(hooks + skip_hooks)
fh.close()
# Make sure the custom hooks for that project don't affect the next one.
subdir_fn = os.path.join(topdir, 'bottom')
os.mkdir(subdir_fn)
deps_fn = os.path.join(subdir_fn, 'DEPS')
fh = open(deps_fn, 'w')
sub_hooks = [{'pattern':'.', 'action':['response1', 'yes1', 'yes2']}]
sub_hooks.append(
{'name': 'skip', 'pattern':'.', 'action':['response2', 'yes', 'sir']})
print >> fh, 'hooks = %s' % repr(sub_hooks)
fh.close()
fh = open(os.path.join(subdir_fn, 'fake.txt'), 'w')
print >> fh, 'bogus content'
fh.close()
os.chdir(topdir)
parser = gclient.OptionParser()
options, _ = parser.parse_args([])
options.force = True
client = gclient.GClient.LoadCurrentConfig(options)
work_queue = gclient_utils.ExecutionQueue(options.jobs, None, False)
for s in client.dependencies:
work_queue.enqueue(s)
work_queue.flush({}, None, [], options=options)
self.assertEqual(client.GetHooks(options),
[x['action'] for x in hooks + extra_hooks + sub_hooks])
def testTargetOS(self):
"""Verifies that specifying a target_os pulls in all relevant dependencies.
The target_os variable allows specifying the name of an additional OS which
should be considered when selecting dependencies from a DEPS' deps_os. The
value will be appended to the _enforced_os tuple.
"""
write(
'.gclient',
'solutions = [\n'
' { "name": "foo",\n'
' "url": "svn://example.com/foo",\n'
' }]\n'
'target_os = ["baz"]')
write(
os.path.join('foo', 'DEPS'),
'deps = {\n'
' "foo/dir1": "/dir1",'
'}\n'
'deps_os = {\n'
' "unix": { "foo/dir2": "/dir2", },\n'
' "baz": { "foo/dir3": "/dir3", },\n'
'}')
parser = gclient.OptionParser()
options, _ = parser.parse_args(['--jobs', '1'])
options.deps_os = "unix"
obj = gclient.GClient.LoadCurrentConfig(options)
self.assertEqual(['baz', 'unix'], sorted(obj.enforced_os))
def testTargetOsWithTargetOsOnly(self):
"""Verifies that specifying a target_os and target_os_only pulls in only
the relevant dependencies.
The target_os variable allows specifying the name of an additional OS which
should be considered when selecting dependencies from a DEPS' deps_os. With
target_os_only also set, the _enforced_os tuple will be set to only the
target_os value.
"""
write(
'.gclient',
'solutions = [\n'
' { "name": "foo",\n'
' "url": "svn://example.com/foo",\n'
' }]\n'
'target_os = ["baz"]\n'
'target_os_only = True')
write(
os.path.join('foo', 'DEPS'),
'deps = {\n'
' "foo/dir1": "/dir1",'
'}\n'
'deps_os = {\n'
' "unix": { "foo/dir2": "/dir2", },\n'
' "baz": { "foo/dir3": "/dir3", },\n'
'}')
parser = gclient.OptionParser()
options, _ = parser.parse_args(['--jobs', '1'])
options.deps_os = "unix"
obj = gclient.GClient.LoadCurrentConfig(options)
self.assertEqual(['baz'], sorted(obj.enforced_os))
def testTargetOsOnlyWithoutTargetOs(self):
"""Verifies that specifying a target_os_only without target_os_only raises
an exception.
"""
write(
'.gclient',
'solutions = [\n'
' { "name": "foo",\n'
' "url": "svn://example.com/foo",\n'
' }]\n'
'target_os_only = True')
write(
os.path.join('foo', 'DEPS'),
'deps = {\n'
' "foo/dir1": "/dir1",'
'}\n'
'deps_os = {\n'
' "unix": { "foo/dir2": "/dir2", },\n'
'}')
parser = gclient.OptionParser()
options, _ = parser.parse_args(['--jobs', '1'])
options.deps_os = "unix"
exception_raised = False
try:
gclient.GClient.LoadCurrentConfig(options)
except gclient_utils.Error:
exception_raised = True
self.assertTrue(exception_raised)
def testTargetOsInDepsFile(self):
"""Verifies that specifying a target_os value in a DEPS file pulls in all
relevant dependencies.
The target_os variable in a DEPS file allows specifying the name of an
additional OS which should be considered when selecting dependencies from a
DEPS' deps_os. The value will be appended to the _enforced_os tuple.
"""
write(
'.gclient',
'solutions = [\n'
' { "name": "foo",\n'
' "url": "svn://example.com/foo",\n'
' },\n'
' { "name": "bar",\n'
' "url": "svn://example.com/bar",\n'
' }]\n')
write(
os.path.join('foo', 'DEPS'),
'target_os = ["baz"]\n'
'deps_os = {\n'
' "unix": { "foo/unix": "/unix", },\n'
' "baz": { "foo/baz": "/baz", },\n'
' "jaz": { "foo/jaz": "/jaz", },\n'
'}')
write(
os.path.join('bar', 'DEPS'),
'deps_os = {\n'
' "unix": { "bar/unix": "/unix", },\n'
' "baz": { "bar/baz": "/baz", },\n'
' "jaz": { "bar/jaz": "/jaz", },\n'
'}')
parser = gclient.OptionParser()
options, _ = parser.parse_args(['--jobs', '1'])
options.deps_os = 'unix'
obj = gclient.GClient.LoadCurrentConfig(options)
obj.RunOnDeps('None', [])
self.assertEqual(['unix'], sorted(obj.enforced_os))
self.assertEquals(
[
'svn://example.com/bar',
'svn://example.com/bar/unix',
'svn://example.com/foo',
'svn://example.com/foo/baz',
'svn://example.com/foo/unix',
],
sorted(self._get_processed()))
def testUpdateWithOsDeps(self):
"""Verifies that complicated deps_os constructs result in the
correct data also with multple operating systems. Also see
testDepsOsOverrideDepsInDepsFile."""
test_data = [
# Tuples of deps, deps_os, os_list and expected_deps.
(
# OS doesn't need module.
{'foo': 'default_foo'},
{'os1': { 'foo': None } },
['os1'],
{'foo': None}
),
(
# OS wants a different version of module.
{'foo': 'default_foo'},
{'os1': { 'foo': 'os1_foo'} },
['os1'],
{'foo': 'os1_foo'}
),
(
# OS with no overrides at all.
{'foo': 'default_foo'},
{'os1': { 'foo': None } },
['os2'],
{'foo': 'default_foo'}
),
(
# One OS doesn't need module, one OS wants the default.
{'foo': 'default_foo'},
{'os1': { 'foo': None },
'os2': {}},
['os1', 'os2'],
{'foo': 'default_foo'}
),
(
# One OS doesn't need module, another OS wants a special version.
{'foo': 'default_foo'},
{'os1': { 'foo': None },
'os2': { 'foo': 'os2_foo'}},
['os1', 'os2'],
{'foo': 'os2_foo'}
),
(
# One OS wants to add a module.
{'foo': 'default_foo'},
{'os1': { 'bar': 'os1_bar' }},
['os1'],
{'foo': 'default_foo',
'bar': 'os1_bar'}
),
(
# One OS wants to add a module. One doesn't care.
{'foo': 'default_foo'},
{'os1': { 'bar': 'os1_bar' }},
['os1', 'os2'],
{'foo': 'default_foo',
'bar': 'os1_bar'}
),
(
# Two OSes want to add a module with the same definition.
{'foo': 'default_foo'},
{'os1': { 'bar': 'os12_bar' },
'os2': { 'bar': 'os12_bar' }},
['os1', 'os2'],
{'foo': 'default_foo',
'bar': 'os12_bar'}
),
]
for deps, deps_os, target_os_list, expected_deps in test_data:
orig_deps = copy.deepcopy(deps)
result = gclient.Dependency.MergeWithOsDeps(deps, deps_os, target_os_list)
self.assertEqual(result, expected_deps)
self.assertEqual(deps, orig_deps)
def testLateOverride(self):
"""Verifies expected behavior of LateOverride."""
url = "git@github.com:dart-lang/spark.git"
d = gclient.Dependency(None, 'name', 'url',
None, None, None, None, None, '', True)
late_url = d.LateOverride(url)
self.assertEquals(url, late_url)
def testDepsOsOverrideDepsInDepsFile(self):
"""Verifies that a 'deps_os' path can override a 'deps' path. Also
see testUpdateWithOsDeps above.
"""
write(
'.gclient',
'solutions = [\n'
' { "name": "foo",\n'
' "url": "svn://example.com/foo",\n'
' },]\n')
write(
os.path.join('foo', 'DEPS'),
'target_os = ["baz"]\n'
'deps = {\n'
' "foo/src": "/src",\n' # This path is to be overridden by similar path
# in deps_os['unix'].
'}\n'
'deps_os = {\n'
' "unix": { "foo/unix": "/unix",'
' "foo/src": "/src_unix"},\n'
' "baz": { "foo/baz": "/baz",\n'
' "foo/src": None},\n'
' "jaz": { "foo/jaz": "/jaz", },\n'
'}')
parser = gclient.OptionParser()
options, _ = parser.parse_args(['--jobs', '1'])
options.deps_os = 'unix'
obj = gclient.GClient.LoadCurrentConfig(options)
obj.RunOnDeps('None', [])
self.assertEqual(['unix'], sorted(obj.enforced_os))
self.assertEquals(
[
'svn://example.com/foo',
'svn://example.com/foo/baz',
'svn://example.com/foo/src_unix',
'svn://example.com/foo/unix',
],
sorted(self._get_processed()))
def testRecursionOverride(self):
"""Verifies gclient respects the |recursion| var syntax.
We check several things here:
- |recursion| = 3 sets recursion on the foo dep to exactly 3
(we pull /fizz, but not /fuzz)
- pulling foo/bar at recursion level 1 (in .gclient) is overriden by
a later pull of foo/bar at recursion level 2 (in the dep tree)
"""
write(
'.gclient',
'solutions = [\n'
' { "name": "foo", "url": "svn://example.com/foo" },\n'
' { "name": "foo/bar", "url": "svn://example.com/bar" },\n'
']')
write(
os.path.join('foo', 'DEPS'),
'deps = {\n'
' "bar": "/bar",\n'
'}\n'
'recursion = 3')
write(
os.path.join('bar', 'DEPS'),
'deps = {\n'
' "baz": "/baz",\n'
'}')
write(
os.path.join('baz', 'DEPS'),
'deps = {\n'
' "fizz": "/fizz",\n'
'}')
write(
os.path.join('fizz', 'DEPS'),
'deps = {\n'
' "fuzz": "/fuzz",\n'
'}')
options, _ = gclient.OptionParser().parse_args([])
obj = gclient.GClient.LoadCurrentConfig(options)
obj.RunOnDeps('None', [])
self.assertEquals(
[
'svn://example.com/foo',
'svn://example.com/bar',
'svn://example.com/foo/bar',
'svn://example.com/foo/bar/baz',
'svn://example.com/foo/bar/baz/fizz',
],
self._get_processed())
def testRecursedepsOverride(self):
"""Verifies gclient respects the |recursedeps| var syntax.
This is what we mean to check here:
- |recursedeps| = {...} on 2 levels means we pull exactly 3 deps
(up to /fizz, but not /fuzz)
- pulling foo/bar with no recursion (in .gclient) is overriden by
a later pull of foo/bar with recursion (in the dep tree)
- pulling foo/tar with no recursion (in .gclient) is no recursively
pulled (taz is left out)
"""
write(
'.gclient',
'solutions = [\n'
' { "name": "foo", "url": "svn://example.com/foo" },\n'
' { "name": "foo/bar", "url": "svn://example.com/bar" },\n'
' { "name": "foo/tar", "url": "svn://example.com/tar" },\n'
']')
write(
os.path.join('foo', 'DEPS'),
'deps = {\n'
' "bar": "/bar",\n'
'}\n'
'recursedeps = {"bar"}')
write(
os.path.join('bar', 'DEPS'),
'deps = {\n'
' "baz": "/baz",\n'
'}\n'
'recursedeps = {"baz"}')
write(
os.path.join('baz', 'DEPS'),
'deps = {\n'
' "fizz": "/fizz",\n'
'}')
write(
os.path.join('fizz', 'DEPS'),
'deps = {\n'
' "fuzz": "/fuzz",\n'
'}')
write(
os.path.join('tar', 'DEPS'),
'deps = {\n'
' "taz": "/taz",\n'
'}')
options, _ = gclient.OptionParser().parse_args([])
obj = gclient.GClient.LoadCurrentConfig(options)
obj.RunOnDeps('None', [])
self.assertEquals(
[
'svn://example.com/foo',
'svn://example.com/bar',
'svn://example.com/tar',
'svn://example.com/foo/bar',
'svn://example.com/foo/bar/baz',
'svn://example.com/foo/bar/baz/fizz',
],
self._get_processed())
def testRecursionOverridesRecursedeps(self):
"""Verifies gclient respects |recursion| over |recursedeps|.
|recursion| is set in a top-level DEPS file. That value is meant
to affect how many subdeps are parsed via recursion.
|recursedeps| is set in each DEPS file to control whether or not
to recurse into the immediate next subdep.
This test verifies that if both syntaxes are mixed in a DEPS file,
we disable |recursedeps| support and only obey |recursion|.
Since this setting is evaluated per DEPS file, recursed DEPS
files will each be re-evaluated according to the per DEPS rules.
So a DEPS that only contains |recursedeps| could then override any
previous |recursion| setting. There is extra processing to ensure
this does not happen.
For this test to work correctly, we need to use a DEPS chain that
only contains recursion controls in the top DEPS file.
In foo, |recursion| and |recursedeps| are specified. When we see
|recursion|, we stop trying to use |recursedeps|.
There are 2 constructions of DEPS here that are key to this test:
(1) In foo, if we used |recursedeps| instead of |recursion|, we
would also pull in bar. Since bar's DEPS doesn't contain any
recursion statements, we would stop processing at bar.
(2) In fizz, if we used |recursedeps| at all, we should pull in
fuzz.
We expect to keep going past bar (satisfying 1) and we don't
expect to pull in fuzz (satisfying 2).
"""
write(
'.gclient',
'solutions = [\n'
' { "name": "foo", "url": "svn://example.com/foo" },\n'
' { "name": "foo/bar", "url": "svn://example.com/bar" },\n'
']')
write(
os.path.join('foo', 'DEPS'),
'deps = {\n'
' "bar": "/bar",\n'
'}\n'
'recursion = 3\n'
'recursedeps = {"bar"}')
write(
os.path.join('bar', 'DEPS'),
'deps = {\n'
' "baz": "/baz",\n'
'}')
write(
os.path.join('baz', 'DEPS'),
'deps = {\n'
' "fizz": "/fizz",\n'
'}')
write(
os.path.join('fizz', 'DEPS'),
'deps = {\n'
' "fuzz": "/fuzz",\n'
'}\n'
'recursedeps = {"fuzz"}')
write(
os.path.join('fuzz', 'DEPS'),
'deps = {\n'
' "tar": "/tar",\n'
'}')
options, _ = gclient.OptionParser().parse_args([])
obj = gclient.GClient.LoadCurrentConfig(options)
obj.RunOnDeps('None', [])
self.assertEquals(
[
'svn://example.com/foo',
'svn://example.com/bar',
'svn://example.com/foo/bar',
# Deps after this would have been skipped if we were obeying
# |recursedeps|.
'svn://example.com/foo/bar/baz',
'svn://example.com/foo/bar/baz/fizz',
# And this dep would have been picked up if we were obeying
# |recursedeps|.
# 'svn://example.com/foo/bar/baz/fuzz',
],
self._get_processed())
def testGitDeps(self):
"""Verifies gclient respects a .DEPS.git deps file.
Along the way, we also test that if both DEPS and .DEPS.git are present,
that gclient does not read the DEPS file. This will reliably catch bugs
where gclient is always hitting the wrong file (DEPS).
"""
write(
'.gclient',
'solutions = [\n'
' { "name": "foo", "url": "svn://example.com/foo",\n'
' "deps_file" : ".DEPS.git",\n'
' },\n'
']')
write(
os.path.join('foo', '.DEPS.git'),
'deps = {\n'
' "bar": "/bar",\n'
'}')
write(
os.path.join('foo', 'DEPS'),
'deps = {\n'
' "baz": "/baz",\n'
'}')
options, _ = gclient.OptionParser().parse_args([])
obj = gclient.GClient.LoadCurrentConfig(options)
obj.RunOnDeps('None', [])
self.assertEquals(
[
'svn://example.com/foo',
'svn://example.com/foo/bar',
],
self._get_processed())
def testGitDepsFallback(self):
"""Verifies gclient respects fallback to DEPS upon missing deps file."""
write(
'.gclient',
'solutions = [\n'
' { "name": "foo", "url": "svn://example.com/foo",\n'
' "deps_file" : ".DEPS.git",\n'
' },\n'
']')
write(
os.path.join('foo', 'DEPS'),
'deps = {\n'
' "bar": "/bar",\n'
'}')
options, _ = gclient.OptionParser().parse_args([])
obj = gclient.GClient.LoadCurrentConfig(options)
obj.RunOnDeps('None', [])
self.assertEquals(
[
'svn://example.com/foo',
'svn://example.com/foo/bar',
],
self._get_processed())
if __name__ == '__main__':
sys.stdout = gclient_utils.MakeFileAutoFlush(sys.stdout)
sys.stdout = gclient_utils.MakeFileAnnotated(sys.stdout, include_zero=True)
sys.stderr = gclient_utils.MakeFileAutoFlush(sys.stderr)
sys.stderr = gclient_utils.MakeFileAnnotated(sys.stderr, include_zero=True)
logging.basicConfig(
level=[logging.ERROR, logging.WARNING, logging.INFO, logging.DEBUG][
min(sys.argv.count('-v'), 3)],
format='%(relativeCreated)4d %(levelname)5s %(module)13s('
'%(lineno)d) %(message)s')
unittest.main()
| {
"content_hash": "229e869aeb7a5eafd3c9f679806207df",
"timestamp": "",
"source": "github",
"line_count": 878,
"max_line_length": 80,
"avg_line_length": 32.424829157175395,
"alnum_prop": 0.5437844673153255,
"repo_name": "yetu/repotools",
"id": "aff9174e167757c896be49855bf630e432c6ecfc",
"size": "28658",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/gclient_test.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "17417"
},
{
"name": "C",
"bytes": "5028"
},
{
"name": "Python",
"bytes": "1591520"
},
{
"name": "Shell",
"bytes": "58445"
}
],
"symlink_target": ""
} |
from flask import Flask
from flask.ext.assets import Environment, Bundle
from flask_site.helpers import read_yaml, read_env
from flask_site.errors import HTTPMethodNotImplementedError, ControllerNotFoundError, ConfigNotFoundError
import importlib
import os.path
main_config = read_yaml('flask_site/config/config.yml')
environment = main_config.get(read_env())
flask_config = environment.get('flask') if environment else None # Get config for this environment, if it exists
routes_config = read_yaml('flask_site/config/routes.yml')
bundles_config = read_yaml('flask_site/config/bundles.yml')
def create_app(config=main_config, env=environment):
if not config:
raise ConfigNotFoundError('Config is not available')
if not env:
raise ConfigNotFoundError('Environment is not set')
app = Flask(__name__,
template_folder=os.path.abspath('templates'),
static_folder=os.path.abspath('static'))
app.config['DEBUG'] = flask_config.get('debug')
app.config['ASSETS_DEBUG'] = app.config['DEBUG']
app.config['SECRET_KEY'] = env['flask'].get('secret_key')
app.config['ENV'] = env
create_routes(app)
compile_assets(app)
return app
def check_and_compile_bundle(name, settings):
if len(name) == 0:
raise ValueError('The bundle name must have a length of more than 0')
if not isinstance(settings['type'], str):
raise ValueError('The "%s" bundle must have a string type associated with it' % name)
if len(settings['type']) == 0:
raise ValueError('The "%s" bundle type must have a type length of more than 0' % name)
if len(settings['files']) == 0:
raise ValueError('The "%s" bundle must have files associated with it' % name)
# Check each file in bundle to make sure it exists.
static_abs_path = os.path.abspath('static')
for filename in settings['files']:
if not os.path.isfile(os.path.join(static_abs_path, filename)):
raise IOError('File "%s" in bundle "%s" does not exist.' % (filename, name))
if settings['filters'] is None:
filters = None
else:
filters = ','.join(settings['filters'])
if 'output' in settings:
output = settings['output']
else:
output = 'out/' + name + '.%(version)s' + '.' + settings['type']
return Bundle(*settings['files'], filters=filters, output=output)
def compile_assets(app, bundle_config=bundles_config):
if not bundle_config:
raise ConfigNotFoundError('Bundles config is empty')
assets = Environment(app)
for name, settings in bundle_config.iteritems():
bundle = check_and_compile_bundle(name, settings)
assets.register(name, bundle)
def create_routes(app, app_routes=routes_config):
if not app_routes:
raise ConfigNotFoundError('Routes config is empty')
# Loop over app_routes (probably from a yml file)
for name, route in app_routes.iteritems():
# Make sure the route has an associated controller
try:
loaded_mod = load_class('flask_site.controllers.%s' % route['controller'])
except AttributeError:
raise ControllerNotFoundError('Class %s is not found' % route['controller'])
# Make sure that the controller implements methods for each defined in the config
cls_methods = dir(loaded_mod)
for method in route['methods']:
method = method.lower()
if method not in cls_methods:
raise HTTPMethodNotImplementedError(
'Class %s is not implementing method %s' % (route['controller'], method.upper())
)
# Finally, add a url rule for this route
app.add_url_rule(route['uri'],
view_func=loaded_mod.as_view(name),
methods=route['methods'])
def load_class(full_class_string):
class_data = full_class_string.split(".")
module_path = ".".join(class_data[:-1])
class_str = class_data[-1]
module = importlib.import_module(module_path)
return getattr(module, class_str)
| {
"content_hash": "3f993e3df4d9ed497c1ce5f461979ada",
"timestamp": "",
"source": "github",
"line_count": 112,
"max_line_length": 113,
"avg_line_length": 36.6875,
"alnum_prop": 0.6495497688001947,
"repo_name": "dpraul/flask-continuous-env",
"id": "8313bfa245bff0063a83d6c483bd4f87cb1b4a2f",
"size": "4109",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "flask_site/helpers/app_helper.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "541"
},
{
"name": "HTML",
"bytes": "21640"
},
{
"name": "Nginx",
"bytes": "1368"
},
{
"name": "Python",
"bytes": "12196"
}
],
"symlink_target": ""
} |
__author__ = "Paul Council, Joseph Gonzoph, Anand Patel"
__version__ = "sprint1"
__credits__ = ["Dan, Pat"]
class Observable(object):
"""
Describes an Observable object.
includes methods to notify all observers
and to add/delete them
Attributes:
observer_list: the list of registered observer instances
"""
def __init__(self):
self.observer_list = []
def notify_all(self, msg):
"""
notify all observers
:param msg: the message to be sent to the observers
:type msg: Message
"""
for obs in self.observer_list:
obs.notify(msg)
def add_observer(self, observer):
"""
add observer to the list
:param observer: the observer object to register to this observable
:type observer: Observer
"""
if observer not in self.observer_list:
self.observer_list.append(observer)
def delete_all_observers(self):
""" delete all observers """
del self.observer_list[:] | {
"content_hash": "d788397b81357b7999d725c293db31b9",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 75,
"avg_line_length": 27.473684210526315,
"alnum_prop": 0.5986590038314177,
"repo_name": "PaulieC/sprint1_Council",
"id": "8003ce840c0e9ed4c38f0f2dd26a8a23d4f0f01e",
"size": "1044",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Observable.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "46404"
}
],
"symlink_target": ""
} |
import sys, os
from ConfigParser import SafeConfigParser
import os.path as op
# Get distribution configuration
dist_root = op.join(op.dirname(__file__), '..')
setup_cfg = op.join(dist_root, 'setup.cfg')
dist_config = SafeConfigParser()
dist_config.read(setup_cfg)
sys.path.append(dist_root)
from pyesgf import __version__ as pyesgf_version
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.todo', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'ESGF Pyclient'
copyright = u'2012, Stephen Pascoe'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = pyesgf_version
# The full version, including alpha/beta/rc tags.
try:
release = pyesgf_version + dist_config.get('egg_info', 'tag_build')
except:
release = pyesgf_version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'pyramid'
#html_theme_path = []
#html_theme_options = {}
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'ESGFPyclientdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'ESGFPyclient.tex', u'ESGF Pyclient Documentation',
u'Stephen Pascoe', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'esgfpyclient', u'ESGF Pyclient Documentation',
[u'Stephen Pascoe'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'ESGFPyclient', u'ESGF Pyclient Documentation',
u'Stephen Pascoe', 'ESGFPyclient', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
| {
"content_hash": "9b4421a7cc532a57d6e60c89bf173cd3",
"timestamp": "",
"source": "github",
"line_count": 237,
"max_line_length": 80,
"avg_line_length": 32.130801687763714,
"alnum_prop": 0.7033486539724229,
"repo_name": "stephenpascoe/esgf-pyclient",
"id": "0a5be6021f1177db1f3437786cb60f560d67599b",
"size": "8039",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "docs/conf.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "111874"
}
],
"symlink_target": ""
} |
from syft.core.node.common.node_table import Base # noqa
| {
"content_hash": "53eb1fa6e24a5b1a068a932ab697837a",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 57,
"avg_line_length": 58,
"alnum_prop": 0.7758620689655172,
"repo_name": "OpenMined/PySyft",
"id": "ef9f1337cbbdf6bd0d632d6c690f85cb87bbf1a9",
"size": "138",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "packages/grid/backend/grid/db/base.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "2084"
},
{
"name": "Cap'n Proto",
"bytes": "1377"
},
{
"name": "Dockerfile",
"bytes": "9740"
},
{
"name": "HCL",
"bytes": "4438"
},
{
"name": "JavaScript",
"bytes": "85898"
},
{
"name": "Jupyter Notebook",
"bytes": "33167760"
},
{
"name": "Makefile",
"bytes": "7605"
},
{
"name": "Mako",
"bytes": "510"
},
{
"name": "PowerShell",
"bytes": "161"
},
{
"name": "Python",
"bytes": "3710174"
},
{
"name": "Shell",
"bytes": "52371"
},
{
"name": "TypeScript",
"bytes": "346493"
}
],
"symlink_target": ""
} |
from pymongo import MongoClient
from dataservice import DataService
import operator
import math
import time
class Helper(object):
@classmethod
def cosine_similarity(cls, app_list1, app_list2):
return float(cls.__count_match(app_list1, app_list2) / math.sqrt(len(app_list1) * len(app_list2)))
@classmethod
def __count_match(cls, list1, list2):
count = 0
for element in list1:
if element in list2:
count += 1
return count
def calculate_top_k(app, user_download_history, num):
app_similarity = {}
count = 0
for apps in user_download_history:
similarity = Helper.cosine_similarity([app], apps)
for other_app in apps:
if app_similarity.has_key(other_app):
app_similarity[other_app] = app_similarity[other_app] + similarity
else:
app_similarity[other_app] = similarity
#print(app_similarity)
if not app_similarity.has_key(app):
return False
app_similarity.pop(app)
sorted_tups = sorted(app_similarity.items(), key=operator.itemgetter(1), reverse=True)
name = "top_" + str(num) + "_app"
DataService.update_app_info({'app_id':app}, {'$set':{name:sorted_tups[:num]}})
return True
def generate_recommendations(user_download_history, recommendations):
all_user_id = user_download_history.keys()
for one_user_id in all_user_id:
generate_recommendations_for_one_user(user_download_history, recommendations, one_user_id)
return
def generate_recommendations_for_one_user(user_download_history, recommendations, one_user_id):
all_app_id = recommendations.keys()
recommended_app_list = []
sim_score = []
for app in user_download_history[ one_user_id ]:
if app in all_app_id:
for one_sim_app in recommendations[app]:
recommended_app_list.append(one_sim_app[0])
sim_score.append(one_sim_app[1])
sorted_list = [x for (y,x) in sorted(zip(sim_score, recommended_app_list), key=lambda pair: pair[0], reverse = True)]
DataService.update_user_download_history({'user_id':one_user_id}, {'$set':{"recommended_apps":sorted_list}})
return
def main():
try:
client = MongoClient('localhost', 27017)
DataService.init(client)
user_download_history = DataService.retrieve_user_download_history()
all_app_id = DataService.get_all_app_id()
for one_id in all_app_id:
calculate_top_k(one_id, user_download_history.values(), 5)
recommendations = DataService.retrieve_recommended_items()
generate_recommendations(user_download_history, recommendations)
except Exception as e:
print(e)
finally:
if 'client' in locals():
client.close()
start = time.clock()
if __name__ == "__main__":
main()
end = time.clock()
print "time elapsed = " + str(end - start)
| {
"content_hash": "5ec28584fe9c1ab19552059b26a3414f",
"timestamp": "",
"source": "github",
"line_count": 92,
"max_line_length": 122,
"avg_line_length": 32.45652173913044,
"alnum_prop": 0.630609511051574,
"repo_name": "brucexiejiaming/App_store",
"id": "7baea4b987f75ae7ff33d5d8b43607ae52e542ec",
"size": "2986",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "part2_recommender/recommender.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "8085"
}
],
"symlink_target": ""
} |
import mock
from rally.plugins.common.runners import serial
from tests.unit import fakes
from tests.unit import test
class SerialScenarioRunnerTestCase(test.TestCase):
def setUp(self):
super(SerialScenarioRunnerTestCase, self).setUp()
@mock.patch("rally.task.runner._run_scenario_once")
def test__run_scenario(self, mock__run_scenario_once):
times = 5
result = {"duration": 10, "idle_duration": 0, "error": [],
"scenario_output": {}, "atomic_actions": {}}
mock__run_scenario_once.return_value = result
expected_results = [result for i in range(times)]
runner = serial.SerialScenarioRunner(mock.MagicMock(),
{"times": times})
runner._run_scenario(fakes.FakeScenario, "do_it",
fakes.FakeUserContext({}).context, {})
self.assertEqual(len(runner.result_queue), times)
results = list(runner.result_queue)
self.assertEqual(results, expected_results)
def test__run_scenario_aborted(self):
runner = serial.SerialScenarioRunner(mock.MagicMock(),
{"times": 5})
runner.abort()
runner._run_scenario(fakes.FakeScenario, "do_it",
fakes.FakeUserContext({}).context, {})
self.assertEqual(len(runner.result_queue), 0)
def test_abort(self):
runner = serial.SerialScenarioRunner(mock.MagicMock(),
{"times": 5})
self.assertFalse(runner.aborted.is_set())
runner.abort()
self.assertTrue(runner.aborted.is_set())
| {
"content_hash": "daf9c48a8375b073fb865cc188001d8b",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 67,
"avg_line_length": 39.833333333333336,
"alnum_prop": 0.5839808726838015,
"repo_name": "shdowofdeath/rally",
"id": "1ae4a8464ce3ee17666fb91ffd3a9724d6dceb1c",
"size": "2325",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tests/unit/plugins/common/runners/test_serial.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "46737"
},
{
"name": "Python",
"bytes": "2421750"
},
{
"name": "Shell",
"bytes": "36795"
}
],
"symlink_target": ""
} |
import numpy as np
from tilitools.profiler import profile
class MKLWrapper:
"""Lp-norm Multiple Kernel Learning Wrapper for convex semi-supervised anomaly detection
Note:
- p-norm mkl supported
- dual solution is supported.
Written by: Nico Goernitz, TU Berlin, 2013/14
"""
def __init__(self, ssad, kernels, pnorm=2.0):
self.kernels = kernels # (list of 2d arrays) kernel matrices
self.samples = kernels[0].shape[0]
self.pnorm = pnorm
self.num_kernels = len(kernels)
self.dm = np.ones(self.num_kernels, dtype=np.float) / np.float(self.num_kernels) # (vector) mixing coefficients
self.ssad = ssad
self.ssad.set_train_kernel(self.combine_kernels(kernels))
print('MKL with {0} kernels.'.format(self.num_kernels))
def combine_kernels(self,kernels):
dim1, dim2 = kernels[0].shape
mixed = np.zeros((dim1, dim2))
for i in range(self.num_kernels):
mixed += self.dm[i] * kernels[i]
return mixed
@profile
def fit(self, precision=1e-3):
pnorm = self.pnorm
iter = 0
lastsol = np.zeros((self.num_kernels))
while sum([abs(lastsol[i]-self.dm[i]) for i in range(self.num_kernels)]) > precision:
# train ssad with current kernel mixing coefficients
self.ssad.set_train_kernel(self.combine_kernels(self.kernels))
self.ssad.fit()
# calculate new kernel mixing coefficients
lastsol = self.dm.copy()
alphas = self.ssad.get_alphas()
cy = self.ssad.cy
# linear part of the objective
norm_w_sq_m = np.zeros((self.num_kernels, 1))
res = cy.dot(cy.T)*alphas.dot(alphas.T)
for l in range(self.num_kernels):
norm_w_sq_m[l] = np.sum(self.dm[l]*self.dm[l] * res * self.kernels[l])
# solve the quadratic program
sum_norm_w = np.sum(np.power(norm_w_sq_m, pnorm/(pnorm+1.0)))
sum_norm_w = np.power(sum_norm_w, 1.0/pnorm)
dm = np.power(norm_w_sq_m, 1.0/(pnorm+1.0))/sum_norm_w
print('New mixing coefficients:', dm)
dm_norm = np.sum(np.power(abs(dm), pnorm))
dm_norm = np.power(dm_norm, 1.0/pnorm)
print('Norm of mixing coefficients: ', dm_norm)
self.dm = dm
iter += 1
print('Num iterations = {0}.'.format(iter))
return self
def get_threshold(self):
return self.ssad.get_threshold()
def get_support_dual(self):
return self.ssad.get_support_dual()
def get_mixing_coefficients(self):
return self.dm
def apply(self, kernels):
mixed = self.combine_kernels(kernels)
res = self.ssad.apply(mixed)
return res
| {
"content_hash": "b87ca17fe7ce28d9e9f3029952c270a5",
"timestamp": "",
"source": "github",
"line_count": 82,
"max_line_length": 120,
"avg_line_length": 34.51219512195122,
"alnum_prop": 0.5833922261484099,
"repo_name": "nicococo/tilitools",
"id": "12144d9064e292c1bb160fe1d90d824931bf7360",
"size": "2830",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tilitools/lp_mkl_wrapper.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "5757245"
},
{
"name": "Python",
"bytes": "100692"
}
],
"symlink_target": ""
} |
"""
Copyright (c) 2017 Nico Gräf (nicograef.de, github.com/nicograef)
Permission is hereby granted, free of charge, to any person obtaining a copy of this software
and associated documentation files (the "Software"), to deal in the Software without restriction,
including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from numpy import *
class NeuralNetwork:
def __init__(self, hidden_layer, learningRate=0.1):
#random.seed(1)
self.hiddenLayer = hidden_layer
self.learningRate = learningRate
self.weights = []
def train(self, input, output, t=1000, printError=False):
if len(self.weights) == 0:
self.init_weights(input, output)
for i in range(t):
layers, layers_with_bias = self.forward_propagation(input) # forward propagation
deltas = self.backward_propagation(output, layers) # backward propagation
self.updateWeights(deltas, layers_with_bias) # updating the weights
if printError and (i % int(t / 20) == 0): # print every 5%
print(str(int(i / (t / 100))) + "%", end=" ")
print("Error:", mean(abs(output - layers[-1])).round(4))
def dream(self, input, output, t=10, printPercentage=False):
for i in range(t):
layers, layers_with_bias = self.forward_propagation(input) # forward propagation
deltas = self.backward_propagation(output, layers) # backward propagation
input += self.updateInput(deltas, layers_with_bias, input)
if printPercentage and (i % int(t / 20) == 0): # print every 5%
print(str(int(i / (t / 100))) + "%")
return input
def setLearningRate(self, newLearningRate):
self.learningRate = newLearningRate;
def forward_propagation(self, input):
layers = [input]
layers_with_bias = [array([append(i, ([1.0])) for i in layers[0]])]
for i, weights in enumerate(self.weights):
Sum = dot(layers_with_bias[i], self.weights[i])
layers.append(self.activation_function(Sum))
layers_with_bias.append(array([append(i, ([1.0])) for i in layers[i+1]]))
return layers, layers_with_bias
def backward_propagation(self, output, layers):
deltas = []
for i, weights in enumerate(self.weights):
if i == 0:
error = output - layers[-1]
else:
error = dot(deltas[i-1], self.weights[-i][:-1].T) # weights without bias weight
deltas.append(error * self.activation_function(layers[-1-i], True))
return deltas
def updateWeights(self, deltas, layers):
for i, delta in enumerate(reversed(deltas)):
self.weights[i] += dot(layers[i].T, delta) * self.learningRate
def updateInput(self, deltas, layers, input):
input_error = dot(deltas[-1], self.weights[0][:-1].T)
input_delta = input_error * self.activation_function(input, True)
return input_delta
def init_weights(self, input, output):
inputSize = len(input[0])
outputSize= len(output[0])
for i, units in enumerate(self.hiddenLayer):
if i == 0:
self.weights.append(self.randomWeights(inputSize + 1, units)) # + 1 for bias
else:
self.weights.append(self.randomWeights(self.hiddenLayer[i-1] + 1, units)) # + 1 for bias
self.weights.append(self.randomWeights(self.hiddenLayer[-1] + 1, outputSize)) # + 1 for bias
def randomWeights(self, size1, size2):
return 2 * random.random((size1, size2)) - 1
def predict(self, input):
layers, layers_with_bias = self.forward_propagation(input) # forward propagation
return layers[-1]
def activation_function(self, x, deriv=False):
return self.sigmoid(x, deriv)
def sigmoid(self, x, deriv=False):
if deriv == True:
return x * (1 - x)
return 1 / (1 + exp(-x))
def score(self, input, output):
predicted_output = self.predict(input).round()
score = 0
for i, prediction in enumerate(predicted_output):
if array_equal(prediction, output[i]):
score += 1
return score / len(input)
| {
"content_hash": "a16a8a1ed08533e91e50cc24e8a9ce44",
"timestamp": "",
"source": "github",
"line_count": 116,
"max_line_length": 126,
"avg_line_length": 43.439655172413794,
"alnum_prop": 0.6368327049017662,
"repo_name": "graefnico/ai",
"id": "61c8838ab8ab6d7e6bb874974d74e4a912739c23",
"size": "5040",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "neural-net/NeuralNetwork.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "22560"
}
],
"symlink_target": ""
} |
import os
from building import *
def ExtendPackageVar(package, var):
v = []
if var not in package:
return v
for item in package[var]:
v = v + [item]
return v
def BuildPackage(package):
import json
f = open(package)
package_json = f.read()
# get package.json path
cwd = os.path.dirname(package)
package = json.loads(package_json)
# check package name
if 'name' not in package:
return []
# get depends
depend = ExtendPackageVar(package, 'depends')
src = []
if 'source_files' in package:
for src_file in package['source_files']:
src_file = os.path.join(cwd, src_file)
src += Glob(src_file)
CPPPATH = []
if 'CPPPATH' in package:
for path in package['CPPPATH']:
if path.startswith('/') and os.path.isdir(path):
CPPPATH = CPPPATH + [path]
else:
CPPPATH = CPPPATH + [os.path.join(cwd, path)]
CPPDEFINES = ExtendPackageVar(package, 'CPPDEFINES')
objs = DefineGroup(package['name'], src, depend = depend, CPPPATH = CPPPATH, CPPDEFINES = CPPDEFINES)
return objs
| {
"content_hash": "24b3ddbb53f488270c55040008b6edd8",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 105,
"avg_line_length": 23.857142857142858,
"alnum_prop": 0.5859709153122327,
"repo_name": "RT-Thread/rt-thread",
"id": "e9c43b66b7c4361eeb7d82bdf5cfbd3a505eef9e",
"size": "2184",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tools/package.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "20211623"
},
{
"name": "Batchfile",
"bytes": "77561"
},
{
"name": "C",
"bytes": "1056417995"
},
{
"name": "C++",
"bytes": "945403"
},
{
"name": "CMake",
"bytes": "250858"
},
{
"name": "CSS",
"bytes": "138218"
},
{
"name": "GDB",
"bytes": "11796"
},
{
"name": "HTML",
"bytes": "4763477"
},
{
"name": "JavaScript",
"bytes": "637"
},
{
"name": "LLVM",
"bytes": "10344"
},
{
"name": "Lex",
"bytes": "7026"
},
{
"name": "Logos",
"bytes": "7238"
},
{
"name": "Lua",
"bytes": "922"
},
{
"name": "M4",
"bytes": "17515"
},
{
"name": "Makefile",
"bytes": "485713"
},
{
"name": "Pawn",
"bytes": "1250"
},
{
"name": "Perl",
"bytes": "16728"
},
{
"name": "Python",
"bytes": "3175087"
},
{
"name": "RPC",
"bytes": "14162"
},
{
"name": "Shell",
"bytes": "422027"
},
{
"name": "Tcl",
"bytes": "179"
},
{
"name": "Yacc",
"bytes": "30555"
}
],
"symlink_target": ""
} |
"""This module contains all custom errors."""
import MySQLdb as mdb
class BadRequestError(Exception):
pass
class MySQLPoolSizeError(mdb.DatabaseError):
pass
| {
"content_hash": "f79938044d423d4279f8e4057fa45d78",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 45,
"avg_line_length": 16.9,
"alnum_prop": 0.7514792899408284,
"repo_name": "andjey/status-tracker",
"id": "c2da2423c184cd953f29b2e31aae71a17e86a113",
"size": "169",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "src/python/status_tracker/errors.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "6775"
},
{
"name": "Python",
"bytes": "29605"
},
{
"name": "Shell",
"bytes": "98"
}
],
"symlink_target": ""
} |
import re
from functools import wraps
import json
import inspect
import threading
import six
from funcy import memoize
from .cross import md5hex
import django
from django.db import models
from django.http import HttpRequest
from .conf import redis_client
# NOTE: we don't serialize this fields since their values could be very long
# and one should not filter by their equality anyway.
NOT_SERIALIZED_FIELDS = (
models.FileField,
models.TextField, # One should not filter by long text equality
)
if hasattr(models, 'BinaryField'):
NOT_SERIALIZED_FIELDS += (models.BinaryField,)
def non_proxy(model):
while model._meta.proxy:
# Every proxy model has exactly one non abstract parent model
model = next(b for b in model.__bases__
if issubclass(b, models.Model) and not b._meta.abstract)
return model
if django.VERSION < (1, 6):
def get_model_name(model):
return model._meta.module_name
else:
def get_model_name(model):
return model._meta.model_name
class MonkeyProxy(object):
def __init__(self, cls):
monkey_bases = [b._no_monkey for b in cls.__bases__ if hasattr(b, '_no_monkey')]
for monkey_base in monkey_bases:
self.__dict__.update(monkey_base.__dict__)
def monkey_mix(cls, mixin, methods=None):
"""
Mixes a mixin into existing class.
Does not use actual multi-inheritance mixins, just monkey patches methods.
Mixin methods can call copies of original ones stored in `_no_monkey` proxy:
class SomeMixin(object):
def do_smth(self, arg):
... do smth else before
self._no_monkey.do_smth(self, arg)
... do smth else after
"""
assert '_no_monkey' not in cls.__dict__, 'Multiple monkey mix not supported'
cls._no_monkey = MonkeyProxy(cls)
if methods is None:
# NOTE: there no such thing as unbound method in Python 3, it uses naked functions,
# so we use some six based altering here
isboundmethod = inspect.isfunction if six.PY3 else inspect.ismethod
methods = inspect.getmembers(mixin, isboundmethod)
else:
methods = [(m, getattr(mixin, m)) for m in methods]
for name, method in methods:
if hasattr(cls, name):
setattr(cls._no_monkey, name, getattr(cls, name))
# NOTE: remember, there is no bound methods in Python 3
setattr(cls, name, six.get_unbound_function(method))
@memoize
def stamp_fields(model):
"""
Returns serialized description of model fields.
"""
stamp = str([(f.name, f.attname, f.db_column, f.__class__) for f in model._meta.fields])
return md5hex(stamp)
### Cache keys calculation
def func_cache_key(func, args, kwargs, extra=None):
"""
Calculate cache key based on func and arguments
"""
factors = [func.__module__, func.__name__, func.__code__.co_firstlineno, args, kwargs, extra]
return md5hex(json.dumps(factors, sort_keys=True, default=str))
def view_cache_key(func, args, kwargs, extra=None):
"""
Calculate cache key for view func.
Use url instead of not properly serializable request argument.
"""
uri = args[0].build_absolute_uri()
return 'v:' + func_cache_key(func, args[1:], kwargs, extra=(uri, extra))
def cached_view_fab(_cached):
def cached_view(*dargs, **dkwargs):
def decorator(func):
dkwargs['_get_key'] = view_cache_key
cached_func = _cached(*dargs, **dkwargs)(func)
@wraps(func)
def wrapper(request, *args, **kwargs):
assert isinstance(request, HttpRequest), \
"A view should be passed with HttpRequest as first argument"
if request.method not in ('GET', 'HEAD'):
return func(request, *args, **kwargs)
return cached_func(request, *args, **kwargs)
return wrapper
return decorator
return cached_view
### Lua script loader
import os.path
STRIP_RE = re.compile(r'TOSTRIP.*/TOSTRIP', re.S)
@memoize
def load_script(name, strip=False):
# TODO: strip comments
filename = os.path.join(os.path.dirname(__file__), 'lua/%s.lua' % name)
with open(filename) as f:
code = f.read()
if strip:
code = STRIP_RE.sub('', code)
return redis_client.register_script(code)
### Whitespace handling for template tags
from django.utils.safestring import mark_safe
NEWLINE_BETWEEN_TAGS = mark_safe('>\n<')
SPACE_BETWEEN_TAGS = mark_safe('> <')
def carefully_strip_whitespace(text):
text = re.sub(r'>\s*\n\s*<', NEWLINE_BETWEEN_TAGS, text)
text = re.sub(r'>\s{2,}<', SPACE_BETWEEN_TAGS, text)
return text
# This will help mimic thread globals via dicts
def get_thread_id():
return threading.current_thread().ident
| {
"content_hash": "8509420161333f3c734b4c625358d877",
"timestamp": "",
"source": "github",
"line_count": 158,
"max_line_length": 97,
"avg_line_length": 30.772151898734176,
"alnum_prop": 0.6394487865076101,
"repo_name": "th13f/django-cacheops",
"id": "1807a0833e3998707dbd1a9adb3fa750b12c6000",
"size": "4886",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cacheops/utils.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Lua",
"bytes": "2959"
},
{
"name": "Python",
"bytes": "105318"
}
],
"symlink_target": ""
} |
import json
import mock
import os
from oslo.config import cfg
from heat.common import exception as heat_exception
from heat.common import identifier
from heat.common import policy
from heat.openstack.common import rpc
from heat.common.wsgi import Request
from heat.rpc import api as rpc_api
from heat.api.aws import exception
import heat.api.cfn.v1.stacks as stacks
from heat.tests.common import HeatTestCase
from heat.tests import utils
policy_path = os.path.dirname(os.path.realpath(__file__)) + "/policy/"
class CfnStackControllerTest(HeatTestCase):
'''
Tests the API class which acts as the WSGI controller,
the endpoint processing API requests after they are routed
'''
def setUp(self):
super(CfnStackControllerTest, self).setUp()
opts = [
cfg.StrOpt('config_dir', default=policy_path),
cfg.StrOpt('config_file', default='foo'),
cfg.StrOpt('project', default='heat'),
]
cfg.CONF.register_opts(opts)
cfg.CONF.set_default('host', 'host')
self.topic = rpc_api.ENGINE_TOPIC
self.api_version = '1.0'
self.template = {u'AWSTemplateFormatVersion': u'2010-09-09',
u'Foo': u'bar'}
# Create WSGI controller instance
class DummyConfig():
bind_port = 8000
cfgopts = DummyConfig()
self.controller = stacks.StackController(options=cfgopts)
self.controller.policy.enforcer.policy_path = (policy_path +
'deny_stack_user.json')
self.addCleanup(self.m.VerifyAll)
def _dummy_GET_request(self, params={}):
# Mangle the params dict into a query string
qs = "&".join(["=".join([k, str(params[k])]) for k in params])
environ = {'REQUEST_METHOD': 'GET', 'QUERY_STRING': qs}
req = Request(environ)
req.context = utils.dummy_context()
return req
def _stub_enforce(self, req, action, allowed=True):
self.m.StubOutWithMock(policy.Enforcer, 'enforce')
if allowed:
policy.Enforcer.enforce(req.context, action
).AndReturn(True)
else:
policy.Enforcer.enforce(req.context, action
).AndRaise(heat_exception.Forbidden)
self.m.ReplayAll()
# The tests
def test_stackid_addprefix(self):
self.m.ReplayAll()
response = self.controller._id_format({
'StackName': 'Foo',
'StackId': {
u'tenant': u't',
u'stack_name': u'Foo',
u'stack_id': u'123',
u'path': u''
}
})
expected = {'StackName': 'Foo',
'StackId': 'arn:openstack:heat::t:stacks/Foo/123'}
self.assertEqual(expected, response)
def test_enforce_ok(self):
params = {'Action': 'ListStacks'}
dummy_req = self._dummy_GET_request(params)
self._stub_enforce(dummy_req, 'ListStacks')
response = self.controller._enforce(dummy_req, 'ListStacks')
self.assertIsNone(response)
def test_enforce_denied(self):
self.m.ReplayAll()
params = {'Action': 'ListStacks'}
dummy_req = self._dummy_GET_request(params)
self._stub_enforce(dummy_req, 'ListStacks', False)
self.assertRaises(exception.HeatAccessDeniedError,
self.controller._enforce, dummy_req, 'ListStacks')
def test_enforce_ise(self):
params = {'Action': 'ListStacks'}
dummy_req = self._dummy_GET_request(params)
dummy_req.context.roles = ['heat_stack_user']
self.m.StubOutWithMock(policy.Enforcer, 'enforce')
policy.Enforcer.enforce(dummy_req.context, 'ListStacks'
).AndRaise(AttributeError)
self.m.ReplayAll()
self.assertRaises(exception.HeatInternalFailureError,
self.controller._enforce, dummy_req, 'ListStacks')
@mock.patch.object(rpc, 'call')
def test_list(self, mock_call):
# Format a dummy GET request to pass into the WSGI handler
params = {'Action': 'ListStacks'}
dummy_req = self._dummy_GET_request(params)
self._stub_enforce(dummy_req, 'ListStacks')
# Stub out the RPC call to the engine with a pre-canned response
engine_resp = [{u'stack_identity': {u'tenant': u't',
u'stack_name': u'wordpress',
u'stack_id': u'1',
u'path': u''},
u'updated_time': u'2012-07-09T09:13:11Z',
u'template_description': u'blah',
u'stack_status_reason': u'Stack successfully created',
u'creation_time': u'2012-07-09T09:12:45Z',
u'stack_name': u'wordpress',
u'stack_action': u'CREATE',
u'stack_status': u'COMPLETE'}]
mock_call.return_value = engine_resp
# Call the list controller function and compare the response
result = self.controller.list(dummy_req)
expected = {'ListStacksResponse': {'ListStacksResult':
{'StackSummaries':
[{u'StackId': u'arn:openstack:heat::t:stacks/wordpress/1',
u'LastUpdatedTime': u'2012-07-09T09:13:11Z',
u'TemplateDescription': u'blah',
u'StackStatusReason': u'Stack successfully created',
u'CreationTime': u'2012-07-09T09:12:45Z',
u'StackName': u'wordpress',
u'StackStatus': u'CREATE_COMPLETE'}]}}}
self.assertEqual(expected, result)
default_args = {'limit': None, 'sort_keys': None, 'marker': None,
'sort_dir': None, 'filters': None}
mock_call.assert_called_once_with(dummy_req.context, self.topic,
{'namespace': None,
'method': 'list_stacks',
'args': default_args,
'version': self.api_version},
None)
@mock.patch.object(rpc, 'call')
def test_list_rmt_aterr(self, mock_call):
params = {'Action': 'ListStacks'}
dummy_req = self._dummy_GET_request(params)
self._stub_enforce(dummy_req, 'ListStacks')
# Insert an engine RPC error and ensure we map correctly to the
# heat exception type
mock_call.side_effect = AttributeError
# Call the list controller function and compare the response
result = self.controller.list(dummy_req)
self.assertIsInstance(result, exception.HeatInvalidParameterValueError)
mock_call.assert_called_once_with(dummy_req.context, self.topic,
{'namespace': None,
'method': 'list_stacks',
'args': mock.ANY,
'version': self.api_version},
None)
@mock.patch.object(rpc, 'call')
def test_list_rmt_interr(self, mock_call):
params = {'Action': 'ListStacks'}
dummy_req = self._dummy_GET_request(params)
self._stub_enforce(dummy_req, 'ListStacks')
# Insert an engine RPC error and ensure we map correctly to the
# heat exception type
mock_call.side_effect = Exception()
# Call the list controller function and compare the response
result = self.controller.list(dummy_req)
self.assertIsInstance(result, exception.HeatInternalFailureError)
mock_call.assert_called_once_with(dummy_req.context, self.topic,
{'namespace': None,
'method': 'list_stacks',
'args': mock.ANY,
'version': self.api_version},
None)
def test_describe(self):
# Format a dummy GET request to pass into the WSGI handler
stack_name = u"wordpress"
identity = dict(identifier.HeatIdentifier('t', stack_name, '6'))
params = {'Action': 'DescribeStacks', 'StackName': stack_name}
dummy_req = self._dummy_GET_request(params)
self._stub_enforce(dummy_req, 'DescribeStacks')
# Stub out the RPC call to the engine with a pre-canned response
# Note the engine returns a load of keys we don't actually use
# so this is a subset of the real response format
engine_resp = [{u'stack_identity':
{u'tenant': u't',
u'stack_name': u'wordpress',
u'stack_id': u'6',
u'path': u''},
u'updated_time': u'2012-07-09T09:13:11Z',
u'parameters': {u'DBUsername': u'admin',
u'LinuxDistribution': u'F17',
u'InstanceType': u'm1.large',
u'DBRootPassword': u'admin',
u'DBPassword': u'admin',
u'DBName': u'wordpress'},
u'outputs':
[{u'output_key': u'WebsiteURL',
u'description': u'URL for Wordpress wiki',
u'output_value': u'http://10.0.0.8/wordpress'}],
u'stack_status_reason': u'Stack successfully created',
u'creation_time': u'2012-07-09T09:12:45Z',
u'stack_name': u'wordpress',
u'notification_topics': [],
u'stack_action': u'CREATE',
u'stack_status': u'COMPLETE',
u'description': u'blah',
u'disable_rollback': 'true',
u'timeout_mins':60,
u'capabilities':[]}]
self.m.StubOutWithMock(rpc, 'call')
rpc.call(dummy_req.context, self.topic,
{'namespace': None,
'method': 'identify_stack',
'args': {'stack_name': stack_name},
'version': self.api_version}, None).AndReturn(identity)
rpc.call(dummy_req.context, self.topic,
{'namespace': None,
'method': 'show_stack',
'args': {'stack_identity': identity},
'version': self.api_version}, None).AndReturn(engine_resp)
self.m.ReplayAll()
# Call the list controller function and compare the response
response = self.controller.describe(dummy_req)
expected = {'DescribeStacksResponse':
{'DescribeStacksResult':
{'Stacks':
[{'StackId': u'arn:openstack:heat::t:stacks/wordpress/6',
'StackStatusReason': u'Stack successfully created',
'Description': u'blah',
'Parameters':
[{'ParameterValue': u'admin',
'ParameterKey': u'DBUsername'},
{'ParameterValue': u'F17',
'ParameterKey': u'LinuxDistribution'},
{'ParameterValue': u'm1.large',
'ParameterKey': u'InstanceType'},
{'ParameterValue': u'admin',
'ParameterKey': u'DBRootPassword'},
{'ParameterValue': u'admin',
'ParameterKey': u'DBPassword'},
{'ParameterValue': u'wordpress',
'ParameterKey': u'DBName'}],
'Outputs':
[{'OutputKey': u'WebsiteURL',
'OutputValue': u'http://10.0.0.8/wordpress',
'Description': u'URL for Wordpress wiki'}],
'TimeoutInMinutes': 60,
'CreationTime': u'2012-07-09T09:12:45Z',
'Capabilities': [],
'StackName': u'wordpress',
'NotificationARNs': [],
'StackStatus': u'CREATE_COMPLETE',
'DisableRollback': 'true',
'LastUpdatedTime': u'2012-07-09T09:13:11Z'}]}}}
self.assertEqual(expected, response)
def test_describe_arn(self):
# Format a dummy GET request to pass into the WSGI handler
stack_name = u"wordpress"
stack_identifier = identifier.HeatIdentifier('t', stack_name, '6')
identity = dict(stack_identifier)
params = {'Action': 'DescribeStacks',
'StackName': stack_identifier.arn()}
dummy_req = self._dummy_GET_request(params)
self._stub_enforce(dummy_req, 'DescribeStacks')
# Stub out the RPC call to the engine with a pre-canned response
# Note the engine returns a load of keys we don't actually use
# so this is a subset of the real response format
engine_resp = [{u'stack_identity': {u'tenant': u't',
u'stack_name': u'wordpress',
u'stack_id': u'6',
u'path': u''},
u'updated_time': u'2012-07-09T09:13:11Z',
u'parameters': {u'DBUsername': u'admin',
u'LinuxDistribution': u'F17',
u'InstanceType': u'm1.large',
u'DBRootPassword': u'admin',
u'DBPassword': u'admin',
u'DBName': u'wordpress'},
u'outputs':
[{u'output_key': u'WebsiteURL',
u'description': u'URL for Wordpress wiki',
u'output_value': u'http://10.0.0.8/wordpress'}],
u'stack_status_reason': u'Stack successfully created',
u'creation_time': u'2012-07-09T09:12:45Z',
u'stack_name': u'wordpress',
u'notification_topics': [],
u'stack_action': u'CREATE',
u'stack_status': u'COMPLETE',
u'description': u'blah',
u'disable_rollback': 'true',
u'timeout_mins':60,
u'capabilities':[]}]
self.m.StubOutWithMock(rpc, 'call')
rpc.call(dummy_req.context, self.topic,
{'namespace': None,
'method': 'show_stack',
'args': {'stack_identity': identity},
'version': self.api_version}, None).AndReturn(engine_resp)
self.m.ReplayAll()
# Call the list controller function and compare the response
response = self.controller.describe(dummy_req)
expected = {'DescribeStacksResponse':
{'DescribeStacksResult':
{'Stacks':
[{'StackId': u'arn:openstack:heat::t:stacks/wordpress/6',
'StackStatusReason': u'Stack successfully created',
'Description': u'blah',
'Parameters':
[{'ParameterValue': u'admin',
'ParameterKey': u'DBUsername'},
{'ParameterValue': u'F17',
'ParameterKey': u'LinuxDistribution'},
{'ParameterValue': u'm1.large',
'ParameterKey': u'InstanceType'},
{'ParameterValue': u'admin',
'ParameterKey': u'DBRootPassword'},
{'ParameterValue': u'admin',
'ParameterKey': u'DBPassword'},
{'ParameterValue': u'wordpress',
'ParameterKey': u'DBName'}],
'Outputs':
[{'OutputKey': u'WebsiteURL',
'OutputValue': u'http://10.0.0.8/wordpress',
'Description': u'URL for Wordpress wiki'}],
'TimeoutInMinutes': 60,
'CreationTime': u'2012-07-09T09:12:45Z',
'Capabilities': [],
'StackName': u'wordpress',
'NotificationARNs': [],
'StackStatus': u'CREATE_COMPLETE',
'DisableRollback': 'true',
'LastUpdatedTime': u'2012-07-09T09:13:11Z'}]}}}
self.assertEqual(expected, response)
def test_describe_arn_invalidtenant(self):
# Format a dummy GET request to pass into the WSGI handler
stack_name = u"wordpress"
stack_identifier = identifier.HeatIdentifier('wibble', stack_name, '6')
identity = dict(stack_identifier)
params = {'Action': 'DescribeStacks',
'StackName': stack_identifier.arn()}
dummy_req = self._dummy_GET_request(params)
self._stub_enforce(dummy_req, 'DescribeStacks')
self.m.StubOutWithMock(rpc, 'call')
rpc.call(dummy_req.context, self.topic,
{'namespace': None,
'method': 'show_stack',
'args': {'stack_identity': identity},
'version': self.api_version},
None).AndRaise(heat_exception.InvalidTenant(target='test',
actual='test'))
self.m.ReplayAll()
result = self.controller.describe(dummy_req)
self.assertIsInstance(result, exception.HeatInvalidParameterValueError)
def test_describe_aterr(self):
stack_name = "wordpress"
identity = dict(identifier.HeatIdentifier('t', stack_name, '6'))
params = {'Action': 'DescribeStacks', 'StackName': stack_name}
dummy_req = self._dummy_GET_request(params)
self._stub_enforce(dummy_req, 'DescribeStacks')
# Insert an engine RPC error and ensure we map correctly to the
# heat exception type
self.m.StubOutWithMock(rpc, 'call')
rpc.call(dummy_req.context, self.topic,
{'namespace': None,
'method': 'identify_stack',
'args': {'stack_name': stack_name},
'version': self.api_version}, None).AndReturn(identity)
rpc.call(dummy_req.context, self.topic,
{'namespace': None,
'method': 'show_stack',
'args': {'stack_identity': identity},
'version': self.api_version}, None
).AndRaise(AttributeError())
self.m.ReplayAll()
result = self.controller.describe(dummy_req)
self.assertIsInstance(result, exception.HeatInvalidParameterValueError)
def test_describe_bad_name(self):
stack_name = "wibble"
params = {'Action': 'DescribeStacks', 'StackName': stack_name}
dummy_req = self._dummy_GET_request(params)
self._stub_enforce(dummy_req, 'DescribeStacks')
# Insert an engine RPC error and ensure we map correctly to the
# heat exception type
self.m.StubOutWithMock(rpc, 'call')
rpc.call(dummy_req.context, self.topic,
{'namespace': None,
'method': 'identify_stack',
'args': {'stack_name': stack_name},
'version': self.api_version}, None
).AndRaise(heat_exception.StackNotFound(stack_name='test'))
self.m.ReplayAll()
result = self.controller.describe(dummy_req)
self.assertIsInstance(result, exception.HeatInvalidParameterValueError)
def test_get_template_int_body(self):
'''Test the internal _get_template function.'''
params = {'TemplateBody': "abcdef"}
dummy_req = self._dummy_GET_request(params)
result = self.controller._get_template(dummy_req)
expected = "abcdef"
self.assertEqual(expected, result)
# TODO(shardy) : test the _get_template TemplateUrl case
def test_create(self):
# Format a dummy request
stack_name = "wordpress"
json_template = json.dumps(self.template)
params = {'Action': 'CreateStack', 'StackName': stack_name,
'TemplateBody': '%s' % json_template,
'TimeoutInMinutes': 30,
'DisableRollback': 'true',
'Parameters.member.1.ParameterKey': 'InstanceType',
'Parameters.member.1.ParameterValue': 'm1.xlarge'}
engine_parms = {u'InstanceType': u'm1.xlarge'}
engine_args = {'timeout_mins': u'30', 'disable_rollback': 'true'}
dummy_req = self._dummy_GET_request(params)
self._stub_enforce(dummy_req, 'CreateStack')
# Stub out the RPC call to the engine with a pre-canned response
engine_resp = {u'tenant': u't',
u'stack_name': u'wordpress',
u'stack_id': u'1',
u'path': u''}
self.m.StubOutWithMock(rpc, 'call')
rpc.call(dummy_req.context, self.topic,
{'namespace': None,
'method': 'create_stack',
'args': {'stack_name': stack_name,
'template': self.template,
'params': engine_parms,
'files': {},
'args': engine_args},
'version': self.api_version}, None).AndReturn(engine_resp)
self.m.ReplayAll()
response = self.controller.create(dummy_req)
expected = {
'CreateStackResponse': {
'CreateStackResult': {
u'StackId': u'arn:openstack:heat::t:stacks/wordpress/1'
}
}
}
self.assertEqual(expected, response)
def test_create_rollback(self):
# Format a dummy request
stack_name = "wordpress"
json_template = json.dumps(self.template)
params = {'Action': 'CreateStack', 'StackName': stack_name,
'TemplateBody': '%s' % json_template,
'TimeoutInMinutes': 30,
'DisableRollback': 'false',
'Parameters.member.1.ParameterKey': 'InstanceType',
'Parameters.member.1.ParameterValue': 'm1.xlarge'}
engine_parms = {u'InstanceType': u'm1.xlarge'}
engine_args = {'timeout_mins': u'30', 'disable_rollback': 'false'}
dummy_req = self._dummy_GET_request(params)
self._stub_enforce(dummy_req, 'CreateStack')
# Stub out the RPC call to the engine with a pre-canned response
engine_resp = {u'tenant': u't',
u'stack_name': u'wordpress',
u'stack_id': u'1',
u'path': u''}
self.m.StubOutWithMock(rpc, 'call')
rpc.call(dummy_req.context, self.topic,
{'namespace': None,
'method': 'create_stack',
'args': {'stack_name': stack_name,
'template': self.template,
'params': engine_parms,
'files': {},
'args': engine_args},
'version': self.api_version}, None).AndReturn(engine_resp)
self.m.ReplayAll()
response = self.controller.create(dummy_req)
expected = {
'CreateStackResponse': {
'CreateStackResult': {
u'StackId': u'arn:openstack:heat::t:stacks/wordpress/1'
}
}
}
self.assertEqual(expected, response)
def test_create_onfailure_true(self):
# Format a dummy request
stack_name = "wordpress"
json_template = json.dumps(self.template)
params = {'Action': 'CreateStack', 'StackName': stack_name,
'TemplateBody': '%s' % json_template,
'TimeoutInMinutes': 30,
'OnFailure': 'DO_NOTHING',
'Parameters.member.1.ParameterKey': 'InstanceType',
'Parameters.member.1.ParameterValue': 'm1.xlarge'}
engine_parms = {u'InstanceType': u'm1.xlarge'}
engine_args = {'timeout_mins': u'30', 'disable_rollback': 'true'}
dummy_req = self._dummy_GET_request(params)
self._stub_enforce(dummy_req, 'CreateStack')
# Stub out the RPC call to the engine with a pre-canned response
engine_resp = {u'tenant': u't',
u'stack_name': u'wordpress',
u'stack_id': u'1',
u'path': u''}
self.m.StubOutWithMock(rpc, 'call')
rpc.call(dummy_req.context, self.topic,
{'namespace': None,
'method': 'create_stack',
'args': {'stack_name': stack_name,
'template': self.template,
'params': engine_parms,
'files': {},
'args': engine_args},
'version': self.api_version}, None).AndReturn(engine_resp)
self.m.ReplayAll()
response = self.controller.create(dummy_req)
expected = {
'CreateStackResponse': {
'CreateStackResult': {
u'StackId': u'arn:openstack:heat::t:stacks/wordpress/1'
}
}
}
self.assertEqual(expected, response)
def test_create_onfailure_false_delete(self):
# Format a dummy request
stack_name = "wordpress"
json_template = json.dumps(self.template)
params = {'Action': 'CreateStack', 'StackName': stack_name,
'TemplateBody': '%s' % json_template,
'TimeoutInMinutes': 30,
'OnFailure': 'DELETE',
'Parameters.member.1.ParameterKey': 'InstanceType',
'Parameters.member.1.ParameterValue': 'm1.xlarge'}
engine_parms = {u'InstanceType': u'm1.xlarge'}
engine_args = {'timeout_mins': u'30', 'disable_rollback': 'false'}
dummy_req = self._dummy_GET_request(params)
self._stub_enforce(dummy_req, 'CreateStack')
# Stub out the RPC call to the engine with a pre-canned response
engine_resp = {u'tenant': u't',
u'stack_name': u'wordpress',
u'stack_id': u'1',
u'path': u''}
self.m.StubOutWithMock(rpc, 'call')
rpc.call(dummy_req.context, self.topic,
{'namespace': None,
'method': 'create_stack',
'args': {'stack_name': stack_name,
'template': self.template,
'params': engine_parms,
'files': {},
'args': engine_args},
'version': self.api_version}, None).AndReturn(engine_resp)
self.m.ReplayAll()
response = self.controller.create(dummy_req)
expected = {
'CreateStackResponse': {
'CreateStackResult': {
u'StackId': u'arn:openstack:heat::t:stacks/wordpress/1'
}
}
}
self.assertEqual(expected, response)
def test_create_onfailure_false_rollback(self):
# Format a dummy request
stack_name = "wordpress"
json_template = json.dumps(self.template)
params = {'Action': 'CreateStack', 'StackName': stack_name,
'TemplateBody': '%s' % json_template,
'TimeoutInMinutes': 30,
'OnFailure': 'ROLLBACK',
'Parameters.member.1.ParameterKey': 'InstanceType',
'Parameters.member.1.ParameterValue': 'm1.xlarge'}
engine_parms = {u'InstanceType': u'm1.xlarge'}
engine_args = {'timeout_mins': u'30', 'disable_rollback': 'false'}
dummy_req = self._dummy_GET_request(params)
self._stub_enforce(dummy_req, 'CreateStack')
# Stub out the RPC call to the engine with a pre-canned response
engine_resp = {u'tenant': u't',
u'stack_name': u'wordpress',
u'stack_id': u'1',
u'path': u''}
self.m.StubOutWithMock(rpc, 'call')
rpc.call(dummy_req.context, self.topic,
{'namespace': None,
'method': 'create_stack',
'args': {'stack_name': stack_name,
'template': self.template,
'params': engine_parms,
'files': {},
'args': engine_args},
'version': self.api_version}, None).AndReturn(engine_resp)
self.m.ReplayAll()
response = self.controller.create(dummy_req)
expected = {
'CreateStackResponse': {
'CreateStackResult': {
u'StackId': u'arn:openstack:heat::t:stacks/wordpress/1'
}
}
}
self.assertEqual(expected, response)
def test_create_onfailure_err(self):
# Format a dummy request
stack_name = "wordpress"
json_template = json.dumps(self.template)
params = {'Action': 'CreateStack', 'StackName': stack_name,
'TemplateBody': '%s' % json_template,
'TimeoutInMinutes': 30,
'DisableRollback': 'true',
'OnFailure': 'DO_NOTHING',
'Parameters.member.1.ParameterKey': 'InstanceType',
'Parameters.member.1.ParameterValue': 'm1.xlarge'}
dummy_req = self._dummy_GET_request(params)
self._stub_enforce(dummy_req, 'CreateStack')
self.assertRaises(exception.HeatInvalidParameterCombinationError,
self.controller.create, dummy_req)
def test_create_err_no_template(self):
# Format a dummy request with a missing template field
stack_name = "wordpress"
params = {'Action': 'CreateStack', 'StackName': stack_name}
dummy_req = self._dummy_GET_request(params)
self._stub_enforce(dummy_req, 'CreateStack')
result = self.controller.create(dummy_req)
self.assertIsInstance(result, exception.HeatMissingParameterError)
def test_create_err_inval_template(self):
# Format a dummy request with an invalid TemplateBody
stack_name = "wordpress"
json_template = "!$%**_+}@~?"
params = {'Action': 'CreateStack', 'StackName': stack_name,
'TemplateBody': '%s' % json_template}
dummy_req = self._dummy_GET_request(params)
self._stub_enforce(dummy_req, 'CreateStack')
result = self.controller.create(dummy_req)
self.assertIsInstance(result, exception.HeatInvalidParameterValueError)
def test_create_err_rpcerr(self):
# Format a dummy request
stack_name = "wordpress"
json_template = json.dumps(self.template)
params = {'Action': 'CreateStack', 'StackName': stack_name,
'TemplateBody': '%s' % json_template,
'TimeoutInMinutes': 30,
'Parameters.member.1.ParameterKey': 'InstanceType',
'Parameters.member.1.ParameterValue': 'm1.xlarge'}
engine_parms = {u'InstanceType': u'm1.xlarge'}
engine_args = {'timeout_mins': u'30'}
dummy_req = self._dummy_GET_request(params)
self.m.StubOutWithMock(policy.Enforcer, 'enforce')
# Insert an engine RPC error and ensure we map correctly to the
# heat exception type
self.m.StubOutWithMock(rpc, 'call')
policy.Enforcer.enforce(dummy_req.context, 'CreateStack'
).AndReturn(True)
rpc.call(dummy_req.context, self.topic,
{'namespace': None,
'method': 'create_stack',
'args': {'stack_name': stack_name,
'template': self.template,
'params': engine_parms,
'files': {},
'args': engine_args},
'version': self.api_version}, None
).AndRaise(AttributeError())
policy.Enforcer.enforce(dummy_req.context, 'CreateStack'
).AndReturn(True)
rpc.call(dummy_req.context, self.topic,
{'namespace': None,
'method': 'create_stack',
'args': {'stack_name': stack_name,
'template': self.template,
'params': engine_parms,
'files': {},
'args': engine_args},
'version': self.api_version}, None
).AndRaise(heat_exception.UnknownUserParameter(key='test'))
policy.Enforcer.enforce(dummy_req.context, 'CreateStack'
).AndReturn(True)
rpc.call(dummy_req.context, self.topic,
{'namespace': None,
'method': 'create_stack',
'args': {'stack_name': stack_name,
'template': self.template,
'params': engine_parms,
'files': {},
'args': engine_args},
'version': self.api_version}, None
).AndRaise(heat_exception.UserParameterMissing(key='test'))
self.m.ReplayAll()
result = self.controller.create(dummy_req)
self.assertIsInstance(result, exception.HeatInvalidParameterValueError)
result = self.controller.create(dummy_req)
self.assertIsInstance(result, exception.HeatInvalidParameterValueError)
result = self.controller.create(dummy_req)
self.assertIsInstance(result, exception.HeatInvalidParameterValueError)
def test_create_err_exists(self):
# Format a dummy request
stack_name = "wordpress"
json_template = json.dumps(self.template)
params = {'Action': 'CreateStack', 'StackName': stack_name,
'TemplateBody': '%s' % json_template,
'TimeoutInMinutes': 30,
'Parameters.member.1.ParameterKey': 'InstanceType',
'Parameters.member.1.ParameterValue': 'm1.xlarge'}
engine_parms = {u'InstanceType': u'm1.xlarge'}
engine_args = {'timeout_mins': u'30'}
dummy_req = self._dummy_GET_request(params)
self._stub_enforce(dummy_req, 'CreateStack')
# Insert an engine RPC error and ensure we map correctly to the
# heat exception type
self.m.StubOutWithMock(rpc, 'call')
rpc.call(dummy_req.context, self.topic,
{'namespace': None,
'method': 'create_stack',
'args': {'stack_name': stack_name,
'template': self.template,
'params': engine_parms,
'files': {},
'args': engine_args},
'version': self.api_version}, None
).AndRaise(heat_exception.StackExists(stack_name='test'))
self.m.ReplayAll()
result = self.controller.create(dummy_req)
self.assertIsInstance(result, exception.AlreadyExistsError)
def test_create_err_engine(self):
# Format a dummy request
stack_name = "wordpress"
json_template = json.dumps(self.template)
params = {'Action': 'CreateStack', 'StackName': stack_name,
'TemplateBody': '%s' % json_template,
'TimeoutInMinutes': 30,
'Parameters.member.1.ParameterKey': 'InstanceType',
'Parameters.member.1.ParameterValue': 'm1.xlarge'}
engine_parms = {u'InstanceType': u'm1.xlarge'}
engine_args = {'timeout_mins': u'30'}
dummy_req = self._dummy_GET_request(params)
self._stub_enforce(dummy_req, 'CreateStack')
# Stub out the RPC call to the engine with a pre-canned response
self.m.StubOutWithMock(rpc, 'call')
rpc.call(dummy_req.context, self.topic,
{'namespace': None,
'method': 'create_stack',
'args': {'stack_name': stack_name,
'template': self.template,
'params': engine_parms,
'files': {},
'args': engine_args},
'version': self.api_version}, None).AndRaise(
heat_exception.StackValidationFailed(
message='Something went wrong'))
self.m.ReplayAll()
result = self.controller.create(dummy_req)
self.assertIsInstance(result, exception.HeatInvalidParameterValueError)
def test_update(self):
# Format a dummy request
stack_name = "wordpress"
json_template = json.dumps(self.template)
params = {'Action': 'UpdateStack', 'StackName': stack_name,
'TemplateBody': '%s' % json_template,
'Parameters.member.1.ParameterKey': 'InstanceType',
'Parameters.member.1.ParameterValue': 'm1.xlarge'}
engine_parms = {u'InstanceType': u'm1.xlarge'}
engine_args = {}
dummy_req = self._dummy_GET_request(params)
self._stub_enforce(dummy_req, 'UpdateStack')
# Stub out the RPC call to the engine with a pre-canned response
identity = dict(identifier.HeatIdentifier('t', stack_name, '1'))
self.m.StubOutWithMock(rpc, 'call')
rpc.call(dummy_req.context, self.topic,
{'namespace': None,
'method': 'identify_stack',
'args': {'stack_name': stack_name},
'version': self.api_version}, None).AndReturn(identity)
rpc.call(dummy_req.context, self.topic,
{'namespace': None,
'method': 'update_stack',
'args': {'stack_identity': identity,
'template': self.template,
'params': engine_parms,
'files': {},
'args': engine_args},
'version': self.api_version},
None).AndReturn(identity)
self.m.ReplayAll()
response = self.controller.update(dummy_req)
expected = {
'UpdateStackResponse': {
'UpdateStackResult': {
u'StackId': u'arn:openstack:heat::t:stacks/wordpress/1'
}
}
}
self.assertEqual(expected, response)
def test_update_bad_name(self):
stack_name = "wibble"
json_template = json.dumps(self.template)
params = {'Action': 'UpdateStack', 'StackName': stack_name,
'TemplateBody': '%s' % json_template,
'Parameters.member.1.ParameterKey': 'InstanceType',
'Parameters.member.1.ParameterValue': 'm1.xlarge'}
dummy_req = self._dummy_GET_request(params)
self._stub_enforce(dummy_req, 'UpdateStack')
# Insert an engine RPC error and ensure we map correctly to the
# heat exception type
self.m.StubOutWithMock(rpc, 'call')
rpc.call(dummy_req.context, self.topic,
{'namespace': None,
'method': 'identify_stack',
'args': {'stack_name': stack_name},
'version': self.api_version}, None
).AndRaise(heat_exception.StackNotFound(stack_name='test'))
self.m.ReplayAll()
result = self.controller.update(dummy_req)
self.assertIsInstance(result, exception.HeatInvalidParameterValueError)
def test_create_or_update_err(self):
result = self.controller.create_or_update(req={}, action="dsdgfdf")
self.assertIsInstance(result, exception.HeatInternalFailureError)
def test_get_template(self):
# Format a dummy request
stack_name = "wordpress"
identity = dict(identifier.HeatIdentifier('t', stack_name, '6'))
params = {'Action': 'GetTemplate', 'StackName': stack_name}
dummy_req = self._dummy_GET_request(params)
self._stub_enforce(dummy_req, 'GetTemplate')
# Stub out the RPC call to the engine with a pre-canned response
engine_resp = self.template
self.m.StubOutWithMock(rpc, 'call')
rpc.call(dummy_req.context, self.topic,
{'namespace': None,
'method': 'identify_stack',
'args': {'stack_name': stack_name},
'version': self.api_version}, None).AndReturn(identity)
rpc.call(dummy_req.context, self.topic,
{'namespace': None,
'method': 'get_template',
'args': {'stack_identity': identity},
'version': self.api_version}, None).AndReturn(engine_resp)
self.m.ReplayAll()
response = self.controller.get_template(dummy_req)
expected = {'GetTemplateResponse':
{'GetTemplateResult':
{'TemplateBody': self.template}}}
self.assertEqual(expected, response)
def test_get_template_err_rpcerr(self):
stack_name = "wordpress"
identity = dict(identifier.HeatIdentifier('t', stack_name, '6'))
params = {'Action': 'GetTemplate', 'StackName': stack_name}
dummy_req = self._dummy_GET_request(params)
self._stub_enforce(dummy_req, 'GetTemplate')
# Insert an engine RPC error and ensure we map correctly to the
# heat exception type
self.m.StubOutWithMock(rpc, 'call')
rpc.call(dummy_req.context, self.topic,
{'namespace': None,
'method': 'identify_stack',
'args': {'stack_name': stack_name},
'version': self.api_version}, None).AndReturn(identity)
rpc.call(dummy_req.context, self.topic,
{'namespace': None,
'method': 'get_template',
'args': {'stack_identity': identity},
'version': self.api_version}, None
).AndRaise(AttributeError())
self.m.ReplayAll()
result = self.controller.get_template(dummy_req)
self.assertIsInstance(result, exception.HeatInvalidParameterValueError)
def test_get_template_bad_name(self):
stack_name = "wibble"
params = {'Action': 'GetTemplate', 'StackName': stack_name}
dummy_req = self._dummy_GET_request(params)
self._stub_enforce(dummy_req, 'GetTemplate')
# Insert an engine RPC error and ensure we map correctly to the
# heat exception type
self.m.StubOutWithMock(rpc, 'call')
rpc.call(dummy_req.context, self.topic,
{'namespace': None,
'method': 'identify_stack',
'args': {'stack_name': stack_name},
'version': self.api_version}, None
).AndRaise(heat_exception.StackNotFound(stack_name='test'))
self.m.ReplayAll()
result = self.controller.get_template(dummy_req)
self.assertIsInstance(result, exception.HeatInvalidParameterValueError)
def test_get_template_err_none(self):
stack_name = "wordpress"
identity = dict(identifier.HeatIdentifier('t', stack_name, '6'))
params = {'Action': 'GetTemplate', 'StackName': stack_name}
dummy_req = self._dummy_GET_request(params)
self._stub_enforce(dummy_req, 'GetTemplate')
# Stub out the RPC call to the engine to return None
# this test the "no such stack" error path
engine_resp = None
self.m.StubOutWithMock(rpc, 'call')
rpc.call(dummy_req.context, self.topic,
{'namespace': None,
'method': 'identify_stack',
'args': {'stack_name': stack_name},
'version': self.api_version}, None).AndReturn(identity)
rpc.call(dummy_req.context, self.topic,
{'namespace': None,
'method': 'get_template',
'args': {'stack_identity': identity},
'version': self.api_version}, None).AndReturn(engine_resp)
self.m.ReplayAll()
result = self.controller.get_template(dummy_req)
self.assertIsInstance(result, exception.HeatInvalidParameterValueError)
def test_validate_err_no_template(self):
# Format a dummy request with a missing template field
params = {'Action': 'ValidateTemplate'}
dummy_req = self._dummy_GET_request(params)
self._stub_enforce(dummy_req, 'ValidateTemplate')
result = self.controller.validate_template(dummy_req)
self.assertIsInstance(result, exception.HeatMissingParameterError)
def test_validate_err_inval_template(self):
# Format a dummy request with an invalid TemplateBody
json_template = "!$%**_+}@~?"
params = {'Action': 'ValidateTemplate',
'TemplateBody': '%s' % json_template}
dummy_req = self._dummy_GET_request(params)
self._stub_enforce(dummy_req, 'ValidateTemplate')
result = self.controller.validate_template(dummy_req)
self.assertIsInstance(result, exception.HeatInvalidParameterValueError)
def test_bad_resources_in_template(self):
# Format a dummy request
json_template = {
'AWSTemplateFormatVersion': '2010-09-09',
'Resources': {
'Type': 'AWS: : EC2: : Instance',
},
}
params = {'Action': 'ValidateTemplate',
'TemplateBody': '%s' % json.dumps(json_template)}
response = {'Error': 'Resources must contain Resource. '
'Found a [string] instead'}
dummy_req = self._dummy_GET_request(params)
self._stub_enforce(dummy_req, 'ValidateTemplate')
# Stub out the RPC call to the engine with a pre-canned response
self.m.StubOutWithMock(rpc, 'call')
rpc.call(dummy_req.context, self.topic,
{'namespace': None,
'method': 'validate_template',
'args': {'template': json_template},
'version': self.api_version}, None).AndReturn(response)
self.m.ReplayAll()
response = self.controller.validate_template(dummy_req)
expected = {'ValidateTemplateResponse':
{'ValidateTemplateResult':
'Resources must contain Resource. '
'Found a [string] instead'}}
self.assertEqual(expected, response)
def test_delete(self):
# Format a dummy request
stack_name = "wordpress"
identity = dict(identifier.HeatIdentifier('t', stack_name, '1'))
params = {'Action': 'DeleteStack', 'StackName': stack_name}
dummy_req = self._dummy_GET_request(params)
self._stub_enforce(dummy_req, 'DeleteStack')
# Stub out the RPC call to the engine with a pre-canned response
self.m.StubOutWithMock(rpc, 'call')
rpc.call(dummy_req.context, self.topic,
{'namespace': None,
'method': 'identify_stack',
'args': {'stack_name': stack_name},
'version': self.api_version}, None).AndReturn(identity)
# Engine returns None when delete successful
rpc.call(dummy_req.context, self.topic,
{'namespace': None,
'method': 'delete_stack',
'args': {'stack_identity': identity},
'version': self.api_version}, None).AndReturn(None)
self.m.ReplayAll()
response = self.controller.delete(dummy_req)
expected = {'DeleteStackResponse': {'DeleteStackResult': ''}}
self.assertEqual(expected, response)
def test_delete_err_rpcerr(self):
stack_name = "wordpress"
identity = dict(identifier.HeatIdentifier('t', stack_name, '1'))
params = {'Action': 'DeleteStack', 'StackName': stack_name}
dummy_req = self._dummy_GET_request(params)
self._stub_enforce(dummy_req, 'DeleteStack')
# Stub out the RPC call to the engine with a pre-canned response
self.m.StubOutWithMock(rpc, 'call')
rpc.call(dummy_req.context, self.topic,
{'namespace': None,
'method': 'identify_stack',
'args': {'stack_name': stack_name},
'version': self.api_version}, None).AndReturn(identity)
# Insert an engine RPC error and ensure we map correctly to the
# heat exception type
rpc.call(dummy_req.context, self.topic,
{'namespace': None,
'method': 'delete_stack',
'args': {'stack_identity': identity},
'version': self.api_version}, None
).AndRaise(AttributeError())
self.m.ReplayAll()
result = self.controller.delete(dummy_req)
self.assertIsInstance(result, exception.HeatInvalidParameterValueError)
def test_delete_bad_name(self):
stack_name = "wibble"
params = {'Action': 'DeleteStack', 'StackName': stack_name}
dummy_req = self._dummy_GET_request(params)
self._stub_enforce(dummy_req, 'DeleteStack')
# Insert an engine RPC error and ensure we map correctly to the
# heat exception type
self.m.StubOutWithMock(rpc, 'call')
rpc.call(dummy_req.context, self.topic,
{'namespace': None,
'method': 'identify_stack',
'args': {'stack_name': stack_name},
'version': self.api_version}, None
).AndRaise(heat_exception.StackNotFound(stack_name='test'))
self.m.ReplayAll()
result = self.controller.delete(dummy_req)
self.assertIsInstance(result, exception.HeatInvalidParameterValueError)
def test_events_list_event_id_integer(self):
self._test_events_list('42')
def test_events_list_event_id_uuid(self):
self._test_events_list('a3455d8c-9f88-404d-a85b-5315293e67de')
def _test_events_list(self, event_id):
# Format a dummy request
stack_name = "wordpress"
identity = dict(identifier.HeatIdentifier('t', stack_name, '6'))
params = {'Action': 'DescribeStackEvents', 'StackName': stack_name}
dummy_req = self._dummy_GET_request(params)
self._stub_enforce(dummy_req, 'DescribeStackEvents')
# Stub out the RPC call to the engine with a pre-canned response
engine_resp = [{u'stack_name': u'wordpress',
u'event_time': u'2012-07-23T13:05:39Z',
u'stack_identity': {u'tenant': u't',
u'stack_name': u'wordpress',
u'stack_id': u'6',
u'path': u''},
u'resource_name': u'WikiDatabase',
u'resource_status_reason': u'state changed',
u'event_identity':
{u'tenant': u't',
u'stack_name': u'wordpress',
u'stack_id': u'6',
u'path': u'/resources/WikiDatabase/events/{0}'.format(
event_id)},
u'resource_action': u'TEST',
u'resource_status': u'IN_PROGRESS',
u'physical_resource_id': None,
u'resource_properties': {u'UserData': u'blah'},
u'resource_type': u'AWS::EC2::Instance'}]
self.m.StubOutWithMock(rpc, 'call')
rpc.call(dummy_req.context, self.topic,
{'namespace': None,
'method': 'identify_stack',
'args': {'stack_name': stack_name},
'version': self.api_version}, None).AndReturn(identity)
rpc.call(dummy_req.context, self.topic,
{'namespace': None,
'method': 'list_events',
'args': {'stack_identity': identity},
'version': self.api_version}, None).AndReturn(engine_resp)
self.m.ReplayAll()
response = self.controller.events_list(dummy_req)
expected = {'DescribeStackEventsResponse':
{'DescribeStackEventsResult':
{'StackEvents':
[{'EventId': unicode(event_id),
'StackId': u'arn:openstack:heat::t:stacks/wordpress/6',
'ResourceStatus': u'TEST_IN_PROGRESS',
'ResourceType': u'AWS::EC2::Instance',
'Timestamp': u'2012-07-23T13:05:39Z',
'StackName': u'wordpress',
'ResourceProperties':
json.dumps({u'UserData': u'blah'}),
'PhysicalResourceId': None,
'ResourceStatusReason': u'state changed',
'LogicalResourceId': u'WikiDatabase'}]}}}
self.assertEqual(expected, response)
def test_events_list_err_rpcerr(self):
stack_name = "wordpress"
identity = dict(identifier.HeatIdentifier('t', stack_name, '6'))
params = {'Action': 'DescribeStackEvents', 'StackName': stack_name}
dummy_req = self._dummy_GET_request(params)
self._stub_enforce(dummy_req, 'DescribeStackEvents')
# Insert an engine RPC error and ensure we map correctly to the
# heat exception type
self.m.StubOutWithMock(rpc, 'call')
rpc.call(dummy_req.context, self.topic,
{'namespace': None,
'method': 'identify_stack',
'args': {'stack_name': stack_name},
'version': self.api_version}, None).AndReturn(identity)
rpc.call(dummy_req.context, self.topic,
{'namespace': None,
'method': 'list_events',
'args': {'stack_identity': identity},
'version': self.api_version}, None
).AndRaise(Exception())
self.m.ReplayAll()
result = self.controller.events_list(dummy_req)
self.assertIsInstance(result, exception.HeatInternalFailureError)
def test_events_list_bad_name(self):
stack_name = "wibble"
params = {'Action': 'DescribeStackEvents', 'StackName': stack_name}
dummy_req = self._dummy_GET_request(params)
self._stub_enforce(dummy_req, 'DescribeStackEvents')
# Insert an engine RPC error and ensure we map correctly to the
# heat exception type
self.m.StubOutWithMock(rpc, 'call')
rpc.call(dummy_req.context, self.topic,
{'namespace': None,
'method': 'identify_stack',
'args': {'stack_name': stack_name},
'version': self.api_version}, None
).AndRaise(heat_exception.StackNotFound(stack_name='test'))
self.m.ReplayAll()
result = self.controller.events_list(dummy_req)
self.assertIsInstance(result, exception.HeatInvalidParameterValueError)
def test_describe_stack_resource(self):
# Format a dummy request
stack_name = "wordpress"
identity = dict(identifier.HeatIdentifier('t', stack_name, '6'))
params = {'Action': 'DescribeStackResource',
'StackName': stack_name,
'LogicalResourceId': "WikiDatabase"}
dummy_req = self._dummy_GET_request(params)
self._stub_enforce(dummy_req, 'DescribeStackResource')
# Stub out the RPC call to the engine with a pre-canned response
engine_resp = {u'description': u'',
u'resource_identity': {
u'tenant': u't',
u'stack_name': u'wordpress',
u'stack_id': u'6',
u'path': u'resources/WikiDatabase'
},
u'stack_name': u'wordpress',
u'resource_name': u'WikiDatabase',
u'resource_status_reason': None,
u'updated_time': u'2012-07-23T13:06:00Z',
u'stack_identity': {u'tenant': u't',
u'stack_name': u'wordpress',
u'stack_id': u'6',
u'path': u''},
u'resource_action': u'CREATE',
u'resource_status': u'COMPLETE',
u'physical_resource_id':
u'a3455d8c-9f88-404d-a85b-5315293e67de',
u'resource_type': u'AWS::EC2::Instance',
u'metadata': {u'wordpress': []}}
self.m.StubOutWithMock(rpc, 'call')
rpc.call(dummy_req.context, self.topic,
{'namespace': None,
'method': 'identify_stack',
'args': {'stack_name': stack_name},
'version': self.api_version}, None).AndReturn(identity)
args = {
'stack_identity': identity,
'resource_name': dummy_req.params.get('LogicalResourceId'),
}
rpc.call(dummy_req.context, self.topic,
{'namespace': None,
'method': 'describe_stack_resource',
'args': args,
'version': self.api_version}, None).AndReturn(engine_resp)
self.m.ReplayAll()
response = self.controller.describe_stack_resource(dummy_req)
expected = {'DescribeStackResourceResponse':
{'DescribeStackResourceResult':
{'StackResourceDetail':
{'StackId': u'arn:openstack:heat::t:stacks/wordpress/6',
'ResourceStatus': u'CREATE_COMPLETE',
'Description': u'',
'ResourceType': u'AWS::EC2::Instance',
'ResourceStatusReason': None,
'LastUpdatedTimestamp': u'2012-07-23T13:06:00Z',
'StackName': u'wordpress',
'PhysicalResourceId':
u'a3455d8c-9f88-404d-a85b-5315293e67de',
'Metadata': {u'wordpress': []},
'LogicalResourceId': u'WikiDatabase'}}}}
self.assertEqual(expected, response)
def test_describe_stack_resource_nonexistent_stack(self):
# Format a dummy request
stack_name = "wibble"
params = {'Action': 'DescribeStackResource',
'StackName': stack_name,
'LogicalResourceId': "WikiDatabase"}
dummy_req = self._dummy_GET_request(params)
self._stub_enforce(dummy_req, 'DescribeStackResource')
# Stub out the RPC call to the engine with a pre-canned response
self.m.StubOutWithMock(rpc, 'call')
rpc.call(dummy_req.context, self.topic,
{'namespace': None,
'method': 'identify_stack',
'args': {'stack_name': stack_name},
'version': self.api_version}, None
).AndRaise(heat_exception.StackNotFound(stack_name='test'))
self.m.ReplayAll()
result = self.controller.describe_stack_resource(dummy_req)
self.assertIsInstance(result, exception.HeatInvalidParameterValueError)
def test_describe_stack_resource_nonexistent(self):
# Format a dummy request
stack_name = "wordpress"
identity = dict(identifier.HeatIdentifier('t', stack_name, '6'))
params = {'Action': 'DescribeStackResource',
'StackName': stack_name,
'LogicalResourceId': "wibble"}
dummy_req = self._dummy_GET_request(params)
self._stub_enforce(dummy_req, 'DescribeStackResource')
# Stub out the RPC call to the engine with a pre-canned response
self.m.StubOutWithMock(rpc, 'call')
rpc.call(dummy_req.context, self.topic,
{'namespace': None,
'method': 'identify_stack',
'args': {'stack_name': stack_name},
'version': self.api_version}, None).AndReturn(identity)
args = {
'stack_identity': identity,
'resource_name': dummy_req.params.get('LogicalResourceId'),
}
rpc.call(dummy_req.context, self.topic,
{'namespace': None,
'method': 'describe_stack_resource',
'args': args,
'version': self.api_version},
None).AndRaise(heat_exception.ResourceNotFound(
resource_name='test', stack_name='test'))
self.m.ReplayAll()
result = self.controller.describe_stack_resource(dummy_req)
self.assertIsInstance(result, exception.HeatInvalidParameterValueError)
def test_describe_stack_resources(self):
# Format a dummy request
stack_name = "wordpress"
identity = dict(identifier.HeatIdentifier('t', stack_name, '6'))
params = {'Action': 'DescribeStackResources',
'StackName': stack_name,
'LogicalResourceId': "WikiDatabase"}
dummy_req = self._dummy_GET_request(params)
self._stub_enforce(dummy_req, 'DescribeStackResources')
# Stub out the RPC call to the engine with a pre-canned response
engine_resp = [{u'description': u'',
u'resource_identity': {
u'tenant': u't',
u'stack_name': u'wordpress',
u'stack_id': u'6',
u'path': u'resources/WikiDatabase'
},
u'stack_name': u'wordpress',
u'resource_name': u'WikiDatabase',
u'resource_status_reason': None,
u'updated_time': u'2012-07-23T13:06:00Z',
u'stack_identity': {u'tenant': u't',
u'stack_name': u'wordpress',
u'stack_id': u'6',
u'path': u''},
u'resource_action': u'CREATE',
u'resource_status': u'COMPLETE',
u'physical_resource_id':
u'a3455d8c-9f88-404d-a85b-5315293e67de',
u'resource_type': u'AWS::EC2::Instance',
u'metadata': {u'ensureRunning': u'true''true'}}]
self.m.StubOutWithMock(rpc, 'call')
rpc.call(dummy_req.context, self.topic,
{'namespace': None,
'method': 'identify_stack',
'args': {'stack_name': stack_name},
'version': self.api_version}, None).AndReturn(identity)
args = {
'stack_identity': identity,
'resource_name': dummy_req.params.get('LogicalResourceId'),
}
rpc.call(dummy_req.context, self.topic,
{'namespace': None,
'method': 'describe_stack_resources',
'args': args,
'version': self.api_version}, None).AndReturn(engine_resp)
self.m.ReplayAll()
response = self.controller.describe_stack_resources(dummy_req)
expected = {'DescribeStackResourcesResponse':
{'DescribeStackResourcesResult':
{'StackResources':
[{'StackId': u'arn:openstack:heat::t:stacks/wordpress/6',
'ResourceStatus': u'CREATE_COMPLETE',
'Description': u'',
'ResourceType': u'AWS::EC2::Instance',
'Timestamp': u'2012-07-23T13:06:00Z',
'ResourceStatusReason': None,
'StackName': u'wordpress',
'PhysicalResourceId':
u'a3455d8c-9f88-404d-a85b-5315293e67de',
'LogicalResourceId': u'WikiDatabase'}]}}}
self.assertEqual(expected, response)
def test_describe_stack_resources_bad_name(self):
stack_name = "wibble"
params = {'Action': 'DescribeStackResources',
'StackName': stack_name,
'LogicalResourceId': "WikiDatabase"}
dummy_req = self._dummy_GET_request(params)
self._stub_enforce(dummy_req, 'DescribeStackResources')
# Insert an engine RPC error and ensure we map correctly to the
# heat exception type
self.m.StubOutWithMock(rpc, 'call')
rpc.call(dummy_req.context, self.topic,
{'namespace': None,
'method': 'identify_stack',
'args': {'stack_name': stack_name},
'version': self.api_version}, None
).AndRaise(heat_exception.StackNotFound(stack_name='test'))
self.m.ReplayAll()
result = self.controller.describe_stack_resources(dummy_req)
self.assertIsInstance(result, exception.HeatInvalidParameterValueError)
def test_describe_stack_resources_physical(self):
# Format a dummy request
stack_name = "wordpress"
identity = dict(identifier.HeatIdentifier('t', stack_name, '6'))
params = {'Action': 'DescribeStackResources',
'LogicalResourceId': "WikiDatabase",
'PhysicalResourceId': 'a3455d8c-9f88-404d-a85b-5315293e67de'}
dummy_req = self._dummy_GET_request(params)
self._stub_enforce(dummy_req, 'DescribeStackResources')
# Stub out the RPC call to the engine with a pre-canned response
engine_resp = [{u'description': u'',
u'resource_identity': {
u'tenant': u't',
u'stack_name': u'wordpress',
u'stack_id': u'6',
u'path': u'resources/WikiDatabase'
},
u'stack_name': u'wordpress',
u'resource_name': u'WikiDatabase',
u'resource_status_reason': None,
u'updated_time': u'2012-07-23T13:06:00Z',
u'stack_identity': {u'tenant': u't',
u'stack_name': u'wordpress',
u'stack_id': u'6',
u'path': u''},
u'resource_action': u'CREATE',
u'resource_status': u'COMPLETE',
u'physical_resource_id':
u'a3455d8c-9f88-404d-a85b-5315293e67de',
u'resource_type': u'AWS::EC2::Instance',
u'metadata': {u'ensureRunning': u'true''true'}}]
self.m.StubOutWithMock(rpc, 'call')
rpc.call(dummy_req.context, self.topic,
{'namespace': None,
'method': 'find_physical_resource',
'args': {'physical_resource_id':
'a3455d8c-9f88-404d-a85b-5315293e67de'},
'version': self.api_version}, None).AndReturn(identity)
args = {
'stack_identity': identity,
'resource_name': dummy_req.params.get('LogicalResourceId'),
}
rpc.call(dummy_req.context, self.topic,
{'namespace': None,
'method': 'describe_stack_resources',
'args': args,
'version': self.api_version}, None).AndReturn(engine_resp)
self.m.ReplayAll()
response = self.controller.describe_stack_resources(dummy_req)
expected = {'DescribeStackResourcesResponse':
{'DescribeStackResourcesResult':
{'StackResources':
[{'StackId': u'arn:openstack:heat::t:stacks/wordpress/6',
'ResourceStatus': u'CREATE_COMPLETE',
'Description': u'',
'ResourceType': u'AWS::EC2::Instance',
'Timestamp': u'2012-07-23T13:06:00Z',
'ResourceStatusReason': None,
'StackName': u'wordpress',
'PhysicalResourceId':
u'a3455d8c-9f88-404d-a85b-5315293e67de',
'LogicalResourceId': u'WikiDatabase'}]}}}
self.assertEqual(expected, response)
def test_describe_stack_resources_physical_not_found(self):
# Format a dummy request
params = {'Action': 'DescribeStackResources',
'LogicalResourceId': "WikiDatabase",
'PhysicalResourceId': 'aaaaaaaa-9f88-404d-cccc-ffffffffffff'}
dummy_req = self._dummy_GET_request(params)
self._stub_enforce(dummy_req, 'DescribeStackResources')
# Stub out the RPC call to the engine with a pre-canned response
self.m.StubOutWithMock(rpc, 'call')
rpc.call(dummy_req.context, self.topic,
{'namespace': None,
'method': 'find_physical_resource',
'args': {'physical_resource_id':
'aaaaaaaa-9f88-404d-cccc-ffffffffffff'},
'version': self.api_version},
None).AndRaise(
heat_exception.PhysicalResourceNotFound(resource_id='1'))
self.m.ReplayAll()
response = self.controller.describe_stack_resources(dummy_req)
self.assertIsInstance(response,
exception.HeatInvalidParameterValueError)
def test_describe_stack_resources_err_inval(self):
# Format a dummy request containing both StackName and
# PhysicalResourceId, which is invalid and should throw a
# HeatInvalidParameterCombinationError
stack_name = "wordpress"
params = {'Action': 'DescribeStackResources',
'StackName': stack_name,
'PhysicalResourceId': "123456"}
dummy_req = self._dummy_GET_request(params)
self._stub_enforce(dummy_req, 'DescribeStackResources')
ret = self.controller.describe_stack_resources(dummy_req)
self.assertIsInstance(ret,
exception.HeatInvalidParameterCombinationError)
def test_list_stack_resources(self):
# Format a dummy request
stack_name = "wordpress"
identity = dict(identifier.HeatIdentifier('t', stack_name, '6'))
params = {'Action': 'ListStackResources',
'StackName': stack_name}
dummy_req = self._dummy_GET_request(params)
self._stub_enforce(dummy_req, 'ListStackResources')
# Stub out the RPC call to the engine with a pre-canned response
engine_resp = [{u'resource_identity':
{u'tenant': u't',
u'stack_name': u'wordpress',
u'stack_id': u'6',
u'path': u'/resources/WikiDatabase'},
u'stack_name': u'wordpress',
u'resource_name': u'WikiDatabase',
u'resource_status_reason': None,
u'updated_time': u'2012-07-23T13:06:00Z',
u'stack_identity': {u'tenant': u't',
u'stack_name': u'wordpress',
u'stack_id': u'6',
u'path': u''},
u'resource_action': u'CREATE',
u'resource_status': u'COMPLETE',
u'physical_resource_id':
u'a3455d8c-9f88-404d-a85b-5315293e67de',
u'resource_type': u'AWS::EC2::Instance'}]
self.m.StubOutWithMock(rpc, 'call')
rpc.call(dummy_req.context, self.topic,
{'namespace': None,
'method': 'identify_stack',
'args': {'stack_name': stack_name},
'version': self.api_version}, None).AndReturn(identity)
rpc.call(dummy_req.context, self.topic,
{'namespace': None,
'method': 'list_stack_resources',
'args': {'stack_identity': identity},
'version': self.api_version}, None).AndReturn(engine_resp)
self.m.ReplayAll()
response = self.controller.list_stack_resources(dummy_req)
expected = {'ListStackResourcesResponse': {'ListStackResourcesResult':
{'StackResourceSummaries':
[{'ResourceStatus': u'CREATE_COMPLETE',
'ResourceType': u'AWS::EC2::Instance',
'ResourceStatusReason': None,
'LastUpdatedTimestamp': u'2012-07-23T13:06:00Z',
'PhysicalResourceId':
u'a3455d8c-9f88-404d-a85b-5315293e67de',
'LogicalResourceId': u'WikiDatabase'}]}}}
self.assertEqual(expected, response)
def test_list_stack_resources_bad_name(self):
stack_name = "wibble"
params = {'Action': 'ListStackResources',
'StackName': stack_name}
dummy_req = self._dummy_GET_request(params)
self._stub_enforce(dummy_req, 'ListStackResources')
# Insert an engine RPC error and ensure we map correctly to the
# heat exception type
self.m.StubOutWithMock(rpc, 'call')
rpc.call(dummy_req.context, self.topic,
{'namespace': None,
'method': 'identify_stack',
'args': {'stack_name': stack_name},
'version': self.api_version}, None
).AndRaise(heat_exception.StackNotFound(stack_name='test'))
self.m.ReplayAll()
result = self.controller.list_stack_resources(dummy_req)
self.assertIsInstance(result, exception.HeatInvalidParameterValueError)
| {
"content_hash": "d01b94d50df2ceb2c3b1eeb3ff78f2e3",
"timestamp": "",
"source": "github",
"line_count": 1704,
"max_line_length": 79,
"avg_line_length": 44.21948356807512,
"alnum_prop": 0.5228268082282681,
"repo_name": "ntt-sic/heat",
"id": "8b95df1eaa5b2fb9b14141e6ae0c959068028bbb",
"size": "75968",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "heat/tests/test_api_cfn_v1.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "3336181"
},
{
"name": "Shell",
"bytes": "22168"
}
],
"symlink_target": ""
} |
"""Extensions to the Python standard library."""
import sys
__all__ = [
'safe_hasattr',
'try_import',
'try_imports',
]
# same format as sys.version_info: "A tuple containing the five components of
# the version number: major, minor, micro, releaselevel, and serial. All
# values except releaselevel are integers; the release level is 'alpha',
# 'beta', 'candidate', or 'final'. The version_info value corresponding to the
# Python version 2.0 is (2, 0, 0, 'final', 0)." Additionally we use a
# releaselevel of 'dev' for unreleased under-development code.
#
# If the releaselevel is 'alpha' then the major/minor/micro components are not
# established at this point, and setup.py will use a version of next-$(revno).
# If the releaselevel is 'final', then the tarball will be major.minor.micro.
# Otherwise it is major.minor.micro~$(revno).
__version__ = (1, 0, 0, 'final', 0)
def try_import(name, alternative=None, error_callback=None):
"""Attempt to import ``name``. If it fails, return ``alternative``.
When supporting multiple versions of Python or optional dependencies, it
is useful to be able to try to import a module.
:param name: The name of the object to import, e.g. ``os.path`` or
``os.path.join``.
:param alternative: The value to return if no module can be imported.
Defaults to None.
:param error_callback: If non-None, a callable that is passed the ImportError
when the module cannot be loaded.
"""
module_segments = name.split('.')
last_error = None
remainder = []
# module_name will be what successfully imports. We cannot walk from the
# __import__ result because in import loops (A imports A.B, which imports
# C, which calls try_import("A.B")) A.B will not yet be set.
while module_segments:
module_name = '.'.join(module_segments)
try:
__import__(module_name)
except ImportError:
last_error = sys.exc_info()[1]
remainder.append(module_segments.pop())
continue
else:
break
else:
if last_error is not None and error_callback is not None:
error_callback(last_error)
return alternative
module = sys.modules[module_name]
nonexistent = object()
for segment in reversed(remainder):
module = getattr(module, segment, nonexistent)
if module is nonexistent:
if last_error is not None and error_callback is not None:
error_callback(last_error)
return alternative
return module
_RAISE_EXCEPTION = object()
def try_imports(module_names, alternative=_RAISE_EXCEPTION, error_callback=None):
"""Attempt to import modules.
Tries to import the first module in ``module_names``. If it can be
imported, we return it. If not, we go on to the second module and try
that. The process continues until we run out of modules to try. If none
of the modules can be imported, either raise an exception or return the
provided ``alternative`` value.
:param module_names: A sequence of module names to try to import.
:param alternative: The value to return if no module can be imported.
If unspecified, we raise an ImportError.
:param error_callback: If None, called with the ImportError for *each*
module that fails to load.
:raises ImportError: If none of the modules can be imported and no
alternative value was specified.
"""
module_names = list(module_names)
for module_name in module_names:
module = try_import(module_name, error_callback=error_callback)
if module:
return module
if alternative is _RAISE_EXCEPTION:
raise ImportError(
"Could not import any of: %s" % ', '.join(module_names))
return alternative
def safe_hasattr(obj, attr, _marker=object()):
"""Does 'obj' have an attribute 'attr'?
Use this rather than built-in hasattr, as the built-in swallows exceptions
in some versions of Python and behaves unpredictably with respect to
properties.
"""
return getattr(obj, attr, _marker) is not _marker
| {
"content_hash": "83bb8e6a17394f89ea9f3f754fa72a23",
"timestamp": "",
"source": "github",
"line_count": 108,
"max_line_length": 81,
"avg_line_length": 38.648148148148145,
"alnum_prop": 0.6669861044561571,
"repo_name": "testing-cabal/extras",
"id": "e453bc9b3310ec813f6128c6bd14e62b14473059",
"size": "4245",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "extras/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "659"
},
{
"name": "Python",
"bytes": "13423"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.