repo_name
stringlengths
5
92
path
stringlengths
4
221
copies
stringclasses
19 values
size
stringlengths
4
6
content
stringlengths
766
896k
license
stringclasses
15 values
hash
int64
-9,223,277,421,539,062,000
9,223,102,107B
line_mean
float64
6.51
99.9
line_max
int64
32
997
alpha_frac
float64
0.25
0.96
autogenerated
bool
1 class
ratio
float64
1.5
13.6
config_test
bool
2 classes
has_no_keywords
bool
2 classes
few_assignments
bool
1 class
txomon/SpockBot
spock/plugins/core/net.py
1
9269
""" Provides an asynchronous, crypto and compression aware socket for connecting to servers and processing incoming packet data. Coordinates with the Timers plugin to honor clock-time timers """ import logging import select import socket import time from Crypto.Cipher import AES from spock import utils from spock.mcp import mcdata, mcpacket from spock.plugins.base import PluginBase from spock.utils import pl_announce logger = logging.getLogger('spock') class AESCipher(object): def __init__(self, shared_secret): # Name courtesy of dx self.encryptifier = AES.new(shared_secret, AES.MODE_CFB, IV=shared_secret) self.decryptifier = AES.new(shared_secret, AES.MODE_CFB, IV=shared_secret) def encrypt(self, data): return self.encryptifier.encrypt(data) def decrypt(self, data): return self.decryptifier.decrypt(data) class SelectSocket(object): def __init__(self, timer): self.sending = False self.timer = timer self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self.sock.setblocking(False) self.close = self.sock.close self.shutdown = self.sock.shutdown self.recv = self.sock.recv self.send = self.sock.send def poll(self): flags = [] if self.sending: self.sending = False slist = [(self.sock,), (self.sock,), (self.sock,)] else: slist = [(self.sock,), (), (self.sock,)] timeout = self.timer.get_timeout() if timeout >= 0: slist.append(timeout) try: rlist, wlist, xlist = select.select(*slist) except select.error as e: logger.error("SELECTSOCKET: Socket Error: %s", str(e)) rlist = [] wlist = [] xlist = [] if rlist: flags.append('SOCKET_RECV') if wlist: flags.append('SOCKET_SEND') if xlist: flags.append('SOCKET_ERR') return flags def reset(self): self.sock.close() self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self.sock.setblocking(False) class NetCore(object): def __init__(self, sock, event): self.sock = sock self.event = event self.host = None self.port = None self.connected = False self.encrypted = False self.proto_state = mcdata.HANDSHAKE_STATE self.comp_state = mcdata.PROTO_COMP_OFF self.comp_threshold = -1 self.sbuff = b'' self.rbuff = utils.BoundBuffer() def connect(self, host='localhost', port=25565): self.host = host self.port = port try: logger.info("NETCORE: Attempting to connect to host: %s port: %s", host, port) # Set the connect to be a blocking operation self.sock.sock.setblocking(True) self.sock.sock.connect((self.host, self.port)) self.sock.sock.setblocking(False) self.connected = True self.event.emit('connect', (self.host, self.port)) logger.info("NETCORE: Connected to host: %s port: %s", host, port) except socket.error as error: logger.error("NETCORE: Error on Connect: %s", str(error)) def set_proto_state(self, state): self.proto_state = state self.event.emit(mcdata.state_lookup[state] + '_STATE') def set_comp_state(self, threshold): self.comp_threshold = threshold if threshold >= 0: self.comp_state = mcdata.PROTO_COMP_ON def push(self, packet): data = packet.encode(self.comp_state, self.comp_threshold) self.sbuff += (self.cipher.encrypt(data) if self.encrypted else data) self.event.emit(packet.ident, packet) self.event.emit(packet.str_ident, packet) self.sock.sending = True def push_packet(self, ident, data): self.push(mcpacket.Packet(ident, data)) def read_packet(self, data=b''): self.rbuff.append( self.cipher.decrypt(data) if self.encrypted else data) while True: self.rbuff.save() try: packet = mcpacket.Packet(ident=( self.proto_state, mcdata.SERVER_TO_CLIENT )).decode(self.rbuff, self.comp_state) except utils.BufferUnderflowException: self.rbuff.revert() break except mcpacket.PacketDecodeFailure as err: logger.warning('NETCORE: Packet decode failed') logger.warning( 'NETCORE: Failed packet ident is probably: %s', err.packet.str_ident ) self.event.emit('PACKET_ERR', err) break self.event.emit(packet.ident, packet) self.event.emit(packet.str_ident, packet) def enable_crypto(self, secret_key): self.cipher = AESCipher(secret_key) self.encrypted = True def disable_crypto(self): self.cipher = None self.encrypted = False def reset(self): self.connected = False self.sock.reset() self.__init__(self.sock, self.event) disconnect = reset @pl_announce('Net') class NetPlugin(PluginBase): requires = ('Event', 'Timers') defaults = { 'bufsize': 4096, 'sock_quit': True, } events = { 'event_tick': 'tick', 'SOCKET_RECV': 'handle_recv', 'SOCKET_SEND': 'handle_send', 'SOCKET_ERR': 'handle_err', 'SOCKET_HUP': 'handle_hup', 'PLAY<Disconnect': 'handle_disconnect', 'HANDSHAKE>Handshake': 'handle_handshake', 'LOGIN<Login Success': 'handle_login_success', 'LOGIN<Set Compression': 'handle_comp', 'PLAY<Set Compression': 'handle_comp', 'kill': 'handle_kill', } def __init__(self, ploader, settings): super(NetPlugin, self).__init__(ploader, settings) self.bufsize = self.settings['bufsize'] self.sock_quit = self.settings['sock_quit'] self.sock = SelectSocket(self.timers) self.net = NetCore(self.sock, self.event) self.sock_dead = False ploader.provides('Net', self.net) def tick(self, name, data): if self.net.connected: for flag in self.sock.poll(): self.event.emit(flag) else: timeout = self.timers.get_timeout() if timeout == -1: time.sleep(1) else: time.sleep(timeout) # SOCKET_RECV - Socket is ready to recieve data def handle_recv(self, name, data): if self.net.connected: try: data = self.sock.recv(self.bufsize) # print('read:', len(data)) if not data: self.event.emit('SOCKET_HUP') return self.net.read_packet(data) except socket.error as error: self.event.emit('SOCKET_ERR', error) # SOCKET_SEND - Socket is ready to send data and Send buffer contains # data to send def handle_send(self, name, data): if self.net.connected: try: sent = self.sock.send(self.net.sbuff) self.net.sbuff = self.net.sbuff[sent:] if self.net.sbuff: self.sending = True except socket.error as error: logger.error(str(error)) self.event.emit('SOCKET_ERR', error) # SOCKET_ERR - Socket Error has occured def handle_err(self, name, data): self.net.reset() logger.error("NETPLUGIN: Socket Error: %s", data) self.event.emit('disconnect', data) if self.sock_quit and not self.event.kill_event: self.sock_dead = True self.event.kill() # SOCKET_HUP - Socket has hung up def handle_hup(self, name, data): self.net.reset() logger.error("NETPLUGIN: Socket has hung up") self.event.emit('disconnect', "Socket Hung Up") if self.sock_quit and not self.event.kill_event: self.sock_dead = True self.event.kill() # Handshake - Change to whatever the next state is going to be def handle_handshake(self, name, packet): self.net.set_proto_state(packet.data['next_state']) # Login Success - Change to Play state def handle_login_success(self, name, packet): self.net.set_proto_state(mcdata.PLAY_STATE) # Handle Set Compression packets def handle_comp(self, name, packet): self.net.set_comp_state(packet.data['threshold']) def handle_disconnect(self, name, packet): logger.info("NETPLUGIN: Disconnected: %s", packet.data['reason']) self.event.emit('disconnect', packet.data['reason']) # Kill event - Try to shutdown the socket politely def handle_kill(self, name, data): logger.info("NETPLUGIN: Kill event recieved, shutting down socket") if not self.sock_dead: self.sock.shutdown(socket.SHUT_WR) self.sock.close()
mit
-8,122,943,707,253,785,000
32.828467
79
0.575467
false
3.884744
false
false
false
jameshensman/VFF
experiments/setting_a_b_M/gpr_special.py
1
4803
# Copyright 2016 James Hensman # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import print_function, absolute_import import numpy as np import gpflow import tensorflow as tf from vff.spectral_covariance import make_Kuu, make_Kuf from gpflow import settings float_type = settings.dtypes.float_type class GPR_1d(gpflow.models.GPModel): def __init__(self, X, Y, ms, a, b, kern, mean_function=gpflow.mean_functions.Zero()): """ In this special edition of VFF-GPR, we allow the boundary to be inside the data. This version is not very efficient. If recomputes the Kuf matrix at each iteration, and does not precompute any quantites, and does not exploit Kuu's strcture. Designed only for a demonstration with a, b, inside the data limits, for a practical version, use the VFF package. """ assert X.shape[1] == 1 assert isinstance(kern, (gpflow.kernels.Matern12, gpflow.kernels.Matern32, gpflow.kernels.Matern52)) kern = kern likelihood = gpflow.likelihoods.Gaussian() gpflow.models.GPModel.__init__(self, X, Y, kern, likelihood, mean_function) self.num_data = X.shape[0] self.num_latent = Y.shape[1] self.a = a self.b = b self.ms = ms @gpflow.params_as_tensors def _build_likelihood(self): num_inducing = tf.size(self.ms) num_data = tf.shape(self.Y)[0] output_dim = tf.shape(self.Y)[1] err = self.Y - self.mean_function(self.X) Kdiag = self.kern.Kdiag(self.X) Kuf = make_Kuf(self.kern, self.X, self.a, self.b, self.ms) Kuu = make_Kuu(self.kern, self.a, self.b, self.ms) Kuu = Kuu.get() sigma = tf.sqrt(self.likelihood.variance) # Compute intermediate matrices L = tf.cholesky(Kuu) A = tf.matrix_triangular_solve(L, Kuf) / sigma AAT = tf.matmul(A, tf.transpose(A)) B = AAT + tf.eye(num_inducing * 2 - 1, dtype=float_type) LB = tf.cholesky(B) log_det_B = 2. * tf.reduce_sum(tf.log(tf.diag_part(LB))) c = tf.matrix_triangular_solve(LB, tf.matmul(A, err)) / sigma # compute log marginal bound ND = tf.cast(num_data * output_dim, float_type) D = tf.cast(output_dim, float_type) bound = -0.5 * ND * tf.log(2 * np.pi * self.likelihood.variance) bound += -0.5 * D * log_det_B bound += -0.5 * tf.reduce_sum(tf.square(err))/self.likelihood.variance bound += 0.5 * tf.reduce_sum(tf.square(c)) bound += -0.5 * tf.reduce_sum(Kdiag)/self.likelihood.variance bound += 0.5 * tf.reduce_sum(tf.diag_part(AAT)) return bound @gpflow.params_as_tensors def _build_predict(self, Xnew, full_cov=False): num_inducing = tf.size(self.ms) err = self.Y - self.mean_function(self.X) Kuf = make_Kuf(self.kern, self.X, self.a, self.b, self.ms) Kuu = make_Kuu(self.kern, self.a, self.b, self.ms) Kuu = Kuu.get() sigma = tf.sqrt(self.likelihood.variance) # Compute intermediate matrices L = tf.cholesky(Kuu) A = tf.matrix_triangular_solve(L, Kuf) / sigma AAT = tf.matmul(A, tf.transpose(A)) B = AAT + tf.eye(num_inducing * 2 - 1, dtype=float_type) LB = tf.cholesky(B) c = tf.matrix_triangular_solve(LB, tf.matmul(A, err)) / sigma Kus = make_Kuf(self.kern, Xnew, self.a, self.b, self.ms) tmp1 = tf.matrix_triangular_solve(L, Kus, lower=True) tmp2 = tf.matrix_triangular_solve(LB, tmp1, lower=True) mean = tf.matmul(tf.transpose(tmp2), c) if full_cov: var = self.kern.K(Xnew) + \ tf.matmul(tf.transpose(tmp2), tmp2) - \ tf.matmul(tf.transpose(tmp1), tmp1) var = var[:, :, None] * tf.ones(self.Y.shape[1], dtype=float_type) else: var = self.kern.Kdiag(Xnew) + \ tf.reduce_sum(tf.square(tmp2), 0) - \ tf.reduce_sum(tf.square(tmp1), 0) var = var[:, None]# * tf.ones(self.Y.shape[1], dtype=float_type) return mean + self.mean_function(Xnew), var
apache-2.0
-2,842,001,661,177,074,000
39.025
88
0.600042
false
3.260692
false
false
false
angr/angr
angr/analyses/cfg/indirect_jump_resolvers/mips_elf_fast.py
1
7361
from typing import Dict, TYPE_CHECKING import logging import pyvex import archinfo from .... import options, BP_BEFORE from ....blade import Blade from ....annocfg import AnnotatedCFG from ....exploration_techniques import Slicecutor from .resolver import IndirectJumpResolver if TYPE_CHECKING: from angr.block import Block l = logging.getLogger(name=__name__) class OverwriteTmpValueCallback: def __init__(self, gp_value): self.gp_value = gp_value def overwrite_tmp_value(self, state): state.inspect.tmp_write_expr = state.solver.BVV(self.gp_value, state.arch.bits) class MipsElfFastResolver(IndirectJumpResolver): def __init__(self, project): super(MipsElfFastResolver, self).__init__(project, timeless=True) def filter(self, cfg, addr, func_addr, block, jumpkind): if not isinstance(self.project.arch, (archinfo.ArchMIPS32, archinfo.ArchMIPS64, )): return False return True def resolve(self, cfg, addr, func_addr, block, jumpkind): """ Resolves the indirect jump in MIPS ELF binaries where all external function calls are indexed using gp. :param cfg: A CFG instance. :param int addr: IRSB address. :param int func_addr: The function address. :param pyvex.IRSB block: The IRSB. :param str jumpkind: The jumpkind. :return: If it was resolved and targets alongside it :rtype: tuple """ project = self.project b = Blade(cfg.graph, addr, -1, cfg=cfg, project=project, ignore_sp=True, ignore_bp=True, ignored_regs=('gp',), cross_insn_opt=False, stop_at_calls=True ) sources = [n for n in b.slice.nodes() if b.slice.in_degree(n) == 0] if not sources: return False, [] source = sources[0] source_addr = source[0] annotated_cfg = AnnotatedCFG(project, None, detect_loops=False) annotated_cfg.from_digraph(b.slice) state = project.factory.blank_state(addr=source_addr, mode="fastpath", remove_options=options.refs, # suppress unconstrained stack reads for `gp` add_options={ options.SYMBOL_FILL_UNCONSTRAINED_REGISTERS, options.SYMBOL_FILL_UNCONSTRAINED_MEMORY, options.NO_CROSS_INSN_OPT, }, ) state.regs._t9 = func_addr func = cfg.kb.functions.function(addr=func_addr) # see if gp is used on this slice at all gp_used = self._is_gp_used_on_slice(project, b) gp_value = None if gp_used: if 'gp' not in func.info: # this might a special case: gp is only used once in this function, and it can be initialized right # before its use site. # however, it should have been determined in CFGFast # cannot determine the value of gp. quit pass else: gp_value = func.info['gp'] if gp_value is None: l.warning('Failed to determine value of register gp for function %#x.', func.addr) return False, [] # Special handling for cases where `gp` is stored on the stack gp_offset = project.arch.registers['gp'][0] self._set_gp_load_callback(state, b, project, gp_offset, gp_value) state.regs._gp = gp_value simgr = self.project.factory.simulation_manager(state) simgr.use_technique(Slicecutor(annotated_cfg, force_sat=True)) simgr.run() if simgr.cut: # pick the successor that is cut right after executing `addr` try: target_state = next(iter(cut for cut in simgr.cut if cut.history.addr == addr)) except StopIteration: l.info("Indirect jump at %#x cannot be resolved by %s.", addr, repr(self)) return False, [ ] target = target_state.addr if self._is_target_valid(cfg, target): l.debug("Indirect jump at %#x is resolved to target %#x.", addr, target) return True, [ target ] l.info("Indirect jump at %#x is resolved to target %#x, which seems to be invalid.", addr, target) return False, [ ] l.info("Indirect jump at %#x cannot be resolved by %s.", addr, repr(self)) return False, [ ] @staticmethod def _set_gp_load_callback(state, blade, project, gp_offset, gp_value): got_gp_stack_store = False tmps = {} for block_addr_in_slice in set(slice_node[0] for slice_node in blade.slice.nodes()): for stmt in project.factory.block(block_addr_in_slice, cross_insn_opt=False).vex.statements: if isinstance(stmt, pyvex.IRStmt.WrTmp) and isinstance(stmt.data, pyvex.IRExpr.Load): # Load from memory to a tmp - assuming it's loading from the stack tmps[stmt.tmp] = 'stack' elif isinstance(stmt, pyvex.IRStmt.Put) and stmt.offset == gp_offset: if isinstance(stmt.data, pyvex.IRExpr.RdTmp): tmp_offset = stmt.data.tmp # pylint:disable=cell-var-from-loop if tmps.get(tmp_offset, None) == 'stack': # found the load from stack # we must make sure value of that temporary variable equals to the correct gp value state.inspect.make_breakpoint('tmp_write', when=BP_BEFORE, condition=lambda s, bbl_addr_=block_addr_in_slice, tmp_offset_=tmp_offset: s.scratch.bbl_addr == bbl_addr_ and s.inspect.tmp_write_num == tmp_offset_, action=OverwriteTmpValueCallback( gp_value).overwrite_tmp_value ) got_gp_stack_store = True break if got_gp_stack_store: break @staticmethod def _is_gp_used_on_slice(project, b: Blade) -> bool: gp_offset = project.arch.registers['gp'][0] blocks_on_slice: Dict[int, 'Block'] = { } for block_addr, block_stmt_idx in b.slice.nodes(): if block_addr not in blocks_on_slice: blocks_on_slice[block_addr] = project.factory.block(block_addr, cross_insn_opt=False) block = blocks_on_slice[block_addr] stmt = block.vex.statements[block_stmt_idx] if isinstance(stmt, pyvex.IRStmt.WrTmp) \ and isinstance(stmt.data, pyvex.IRExpr.Get) \ and stmt.data.offset == gp_offset: gp_used = True break else: gp_used = False return gp_used
bsd-2-clause
5,518,427,772,508,819,000
42.556213
133
0.537835
false
4.177639
false
false
false
csomerlot/WIPTools
addin/Install/SingleBMP.py
1
12719
# Import system modules import sys, os import Helper import regression import arcpy from arcpy import env from arcpy.sa import * hp = Helper.Helper(sys.argv) try: # Local variables Rural_1yrQ = Raster(os.path.join(hp.Workspace + "\\WIPoutput.mdb", "UndevQ")) BMPpts = os.path.join(hp.SWorkspace, "BMPptsSin.shp") Units = hp.units['size'] wtredBMPs = os.path.join(hp.SWorkspace, "wtredBMPs") bmp_noclip = sys.argv[1] existing_efficiencies = sys.argv[5].split(';') proposed_efficiencies = sys.argv[6].split(';') landuse = sys.argv[7] if sys.argv[8] != "#": stream_reductions = sys.argv[8].split(';') strlngth = sys.argv[9] #~ parameters = hp.GetAlias(existing_efficiencies) Streams_nd = Raster(os.path.join(hp.Workspace + "\\WIPoutput.mdb", "streams")) Stream_Raster = hp.RemoveNulls(Streams_nd) arcpy.CopyRaster_management(hp.Workspace + "\\WIPoutput.mdb\\cumda", os.path.join(hp.SWorkspace, "cumda")) Cum_da = Raster(os.path.join(hp.SWorkspace, "cumda")) flowdir = ExtractByMask(os.path.join(hp.Workspace + "\\WIPoutput.mdb", "flowdir"), hp.Mask ) flowdir.save(os.path.join(hp.SWorkspace, "flowdir")) if landuse == "Existing": LU = "E" else: LU = "F" vecMask = os.path.join(hp.SWorkspace, "vectMask.shp") arcpy.RasterToPolygon_conversion(hp.Mask, vecMask, "SIMPLIFY", "Value") existing_params = hp.GetAlias(existing_efficiencies) proposed_params = hp.GetAlias(proposed_efficiencies) streamreduc_params = hp.GetAlias(stream_reductions) if not existing_params.keys().sort() == proposed_params.keys().sort() == streamreduc_params.keys().sort(): raise Exception, "Parameters found for Existing efficiencies, Proposed efficiencies, and Stream Reductions does not match" params = {} exec(hp.models['ProdTrans']['input'][-1]) hp.log("Preparing input BMPs...") hp.SetPIDs(bmp_noclip) arcpy.Clip_analysis(bmp_noclip, vecMask, BMPpts) for p in existing_params: # If we switch the loops below to be param first point second, then we could include this stuff in the param loop. Right now we don't want to run this calc for every point, hence this bit of code duplication outide the main loops pn = p[:10].strip() TSSprod = os.path.join(hp.Workspace + "\\WIPoutput.mdb", "p" + LU + pn) pointsrc = "" if os.path.exists(os.path.join(hp.SWorkspace, "pt" + pn)): pointsrc = "pt" + pn defEro = 0 if p in params: defEro = params[p]['DefEro'] hp.log("Calculate Urban/Rural ratio...") Cumulative_Impervious = Raster(os.path.join(hp.Workspace + "\\WIPoutput.mdb", "cumimpcovlake")) ## usgs_calcs = Helper.USGSVars(hp.Basin) urbanQcpbas = regression.urbanQcp(hp.Basin, Cum_da, Cumulative_Impervious) URratio = urbanQcpbas / Rural_1yrQ hp.log("Add erosivity to existing %s production..." % p) TSSP_ero_ext = Helper.CalcErosivity(hp, defEro, TSSprod, pointsrc, URratio, Stream_Raster) # need this to be here so that is not repeated many times inside CP, even if there are no CP points arcpy.CopyRaster_management(TSSP_ero_ext, os.path.join(hp.SWorkspace, "ero") + p[:10].strip()) hp.log("Checking for input BMPs in your area...") all = arcpy.GetCount_management(BMPpts) if all <= 1: raise Exception("You must have more than one point to run this tool!") hp.log("Looping through input BMPs...") BMProws = arcpy.SearchCursor(BMPpts) counter = 0 count = 1 #~ while BMProw: for BMProw in BMProws: print "%s\n" % (75*'-') print BMPpts BMP_FID = BMProw.getValue("PID") hp.log(" Processing point %s of %s..." % (count, all)) print " %s BMPID: %s\n" % (BMPpts, BMP_FID) bmp_type = BMProw.getValue(sys.argv[2]) bmp_Ex1yr = float(BMProw.getValue(sys.argv[3])) bmp_Prop1yr = float(BMProw.getValue(sys.argv[4])) hp.log(" Found bmp type of %s, existing Q1 of %s, and proposed Q1 of %s for PID %s" % (bmp_type, bmp_Ex1yr, bmp_Prop1yr, BMP_FID)) SinBMPpts = os.path.join(hp.SWorkspace, "SinBMPpts.shp") hp.GetSubset(BMPpts, SinBMPpts, " \"PID\" = %s " % BMP_FID) SingleBMP = os.path.join(hp.SWorkspace, "SingleBMP") hp.log("Convert this project to a raster mask...") arcpy.FeatureToRaster_conversion(os.path.join(hp.SWorkspace,SinBMPpts), "PID", SingleBMP, flowdir) SinBMPmask = Reclassify(SingleBMP, "VALUE", "NoData 0; 0.001 100000 1", "DATA") SinBMPmask.save(os.path.join(hp.SWorkspace,"SinBMPmask")) for p in existing_params: pn = p[:10].strip() K = os.path.join(hp.SWorkspace, "K" + pn) TSSP_ero_ext = Raster(os.path.join(hp.SWorkspace, "ero" + pn)) sum, chanp_red, washoff_red = 0, 0, 0 bmp_eeff = float(BMProw.getValue(existing_params[p])) bmp_peff = float(BMProw.getValue(proposed_params[p])) stream_red_per_ft = float(BMProw.getValue(streamreduc_params[p])) hp.log(" Found existing bmp efficiency of %s, proposed bmp efficiency of %s, and stream reduction of %s for PID %s" % (bmp_eeff, bmp_peff, stream_red_per_ft, BMP_FID)) pointsrc = "" if os.path.exists(os.path.join(hp.SWorkspace, "pt" + pn)): pointsrc = "pt" + pn defEro = 0 if p in params: defEro = params[p]['DefEro'] if bmp_type.lower() in ['bmp', 'new bmp']: if bmp_Prop1yr < bmp_Ex1yr: Channel_Prot = 1 else: Channel_Prot = 0 if not defEro: hp.log(" No Default erosivity for this BMP") Channel_Prot = 0 if not Channel_Prot: hp.log(" No Channel Protection from this BMP") else: hp.log(" Calculating Channel Protection from this BMP") #~ arcpy.Merge_management ("ChanBMPpts.shp; SinBMPpts.shp", "merge.shp") ModCumDa, thisBMPras, this_ds = regression.ChannelProtection(hp, SinBMPpts, sys.argv[4]) ModCumDa.save(os.path.join(hp.SWorkspace,"modcumda")) this_ds.save(os.path.join(hp.SWorkspace,"this_ds")) hp.log("Calculate Future Urban/Rural ratio...") URratio = this_ds / Rural_1yrQ URratio.save(os.path.join(hp.SWorkspace,"urratio")) TSSP_ero = Helper.CalcErosivity(hp, defEro, TSSprod, pointsrc, URratio, Stream_Raster) TSSP_ero.save(os.path.join(hp.SWorkspace,"tssp_ero")) hp.log("%s reduction..." % p) TSSred = TSSP_ero_ext - TSSP_ero TSSred.save(os.path.join(hp.SWorkspace,"tssred")) hp.log("Tabulating %s reduction..." % p) chanp_red = hp.Zonal(TSSred) print " %s Reduction component from Channel protection = %s\n" % (p, chanp_red) if bmp_peff > bmp_eeff: WQ_benefit = 1 else: WQ_benefit = 0 if not WQ_benefit: hp.log(" No Water Quality Benefit from this BMP") else: hp.log(" Calculating Water Quality Benefit from this BMP") REMBMPpts = os.path.join(hp.SWorkspace,"RemBMPpts.shp") hp.GetSubset(BMPpts, REMBMPpts, " \"PID\" <> %s AND %s > 0" % (BMP_FID, existing_params[p])) #~ arcpy.CopyFeatures_management(BMPpts, ) #~ rows = arcpy.UpdateCursor(os.path.join(hp.SWorkspace,"RemBMPpts.shp")) #~ row = rows.next() #~ while row: #~ if row.getValue("PID") == BMP_FID or float(row.getValue(existing_params[p])) <= 0: #~ rows.deleteRow(row) #~ row = rows.next() #~ del row, rows #~ hp.log("Adding erosivity to %s production..." % p) data_ero = Helper.CalcErosivity(hp, defEro, TSSprod, pointsrc, URratio, Stream_Raster) REMBMPs = (os.path.join(hp.SWorkspace, "REMBMPs")) hp.log("Convert all other BMPs to Raster...") arcpy.FeatureToRaster_conversion(REMBMPpts, existing_params[p], REMBMPs, flowdir) BMPs = hp.RemoveNulls(REMBMPs) wtredBMPs = ExtractByMask(BMPs / 100.0, hp.Mask) arcpy.CopyRaster_management(data_ero, os.path.join(hp.SWorkspace,"data_ero")) data_ero1 = Raster(os.path.join(hp.SWorkspace,"data_ero")) counter +=1 TSSLoad = hp.BMP2DA(flowdir, pn+str(counter), data_ero1, wtredBMPs) hp.log("%s reduction..." % p) TSSLoadpt = TSSLoad * (bmp_peff - bmp_eeff) * SinBMPmask / 100 hp.log("Tabulating %s reduction..." % p) washoff_red = hp.Zonal(TSSLoadpt) print " %s Reduction component from Washoff benefit = %s\n" % (p, washoff_red) WQ = washoff_red sum = chanp_red + washoff_red print TSSprod, sum hp.log("Writing attributes") hp.SetAtt(BMP_FID, hp.ShortName(p) + "red" + LU[0], sum, bmp_noclip) if bmp_type.lower() in ['stream restoration']: # Calculate in-stream reduction ################################ hp.log("Convert Stream Lengths to Raster...") arcpy.env.extent = os.path.join(hp.SWorkspace, "flowdir") arcpy.FeatureToRaster_conversion(os.path.join(hp.SWorkspace, "SinBMPpts.shp"), strlngth, os.path.join(hp.SWorkspace, "len"), flowdir) slengths = Float(Raster(os.path.join(hp.SWorkspace, "len"))) thisstream = hp.AttExtract(slengths, flowdir, "thisstream", Stream_Raster, Units) hp.log("Make mask...") ThisBMPmask = Reclassify(thisstream, "Value", ".00001 100000 1;-100000 0 0; NoData 0", "DATA") ThisBMPmask.save(os.path.join(hp.SWorkspace,"ThisBMPmask")) hp.log("Calculate reduction...") streamprod = (bmp_peff/ 100) * Raster(TSSprod) * ThisBMPmask * Power(URratio, 1.5) streamprod.save(os.path.join(hp.SWorkspace,"streamprod")) hp.log("Reclassify flowdirection to find straight paths...") Flowdirs = Reclassify(flowdir, "VALUE", "1 1;2 0;4 1;8 0;16 1;32 0;64 1;128 0", "DATA") hp.log("Reclassify flowdirection to find diagonal paths...") Flowdird = Reclassify(flowdir, "VALUE", "1 0;2 1;4 0;8 1;16 0;32 1;64 0;128 1", "DATA") hp.log("Calculate distance grid...") Dist = (Flowdirs + Flowdird * 1.4142) * hp.units['size'] hp.log("Calculate length") thislen = Dist * ThisBMPmask dist_red = hp.Zonal(thislen) * stream_red_per_ft print "stream_red_per_ft: %s, dist_red: %s" % (stream_red_per_ft, dist_red) hp.log("Summarize Stream reduction from point...") stream_red = hp.Zonal(streamprod) + dist_red print "Stream reduction", stream_red hp.log("Writing attributes") hp.SetAtt(BMP_FID, hp.ShortName(p) + "red" + LU[0], stream_red, bmp_noclip) count += 1 hp.Close() except: i, j, k = sys.exc_info() hp.EH(i, j, k)
gpl-3.0
-7,616,929,208,638,953,000
46.636704
259
0.526378
false
3.510627
false
false
false
baliga-lab/cmonkey2
cmonkey/network.py
1
13490
# vi: sw=4 ts=4 et: """network.py - cMonkey network module This file is part of cMonkey Python. Please see README and LICENSE for more information and licensing details. """ import numpy as np import logging import os.path import cmonkey.util as util import cmonkey.datamatrix as dm import cmonkey.scoring as scoring # Python2/Python3 compatibility try: xrange except NameError: xrange = range class Network: """class to represent a network graph. The graph is considered undirected For efficiency reasons, edges is a list of [source, target, weight] """ def __init__(self, name, edges, weight, dummy): """creates a network from a list of edges""" self.name = name self.edges = edges self.weight = weight self.__compute_edges_with_source() def __compute_edges_with_source(self): self.edges_with_source = {} for edge in self.edges: if edge[0] not in self.edges_with_source: self.edges_with_source[edge[0]] = [] if edge[1] not in self.edges_with_source: self.edges_with_source[edge[1]] = [] self.edges_with_source[edge[0]].append(edge) self.edges_with_source[edge[1]].append(edge) def validate(self, synonyms, genes): """Change the names in the network to have the standard names in the synonyms (elswhere call the thesaurus). Problem: it does not also rename the ratios matrix to the standard names Keyword arguments: synonyms -- The thesaurus. genes -- The gene names from the ratios. Usage: self.validate(synonyms, genes) """ # remap first new_edges = [] for n0, n1, score in self.edges: n0 = synonyms[n0] if n0 in synonyms else n0 n1 = synonyms[n1] if n1 in synonyms else n1 new_edges.append((n0, n1, score)) self.edges = new_edges self.__compute_edges_with_source() # then validate found = [] for g in genes: primary = synonyms.get(g, g) for n0, n1, score in self.edges: if primary == n0 or primary == n1: found.append(primary) if len(found) < len(genes) / 2: print(edges) raise(Exception("only %d genes found in edges" % len(found))) def num_edges(self): """returns the number of edges in this graph""" return len(self.edges) def total_score(self): """returns the sum of edge scores""" return sum(edge[2] for edge in self.edges) * 2 def normalize_scores_to(self, score): """normalizes all edge scores so that they sum up to the specified score""" total = self.total_score() if score != total: # score_e / score_total * score == score_e * (score_total / score) # we use this to save a division per loop iteration scale = float(score) / float(total) self.edges = [(edge[0], edge[1], edge[2] * scale) for edge in self.edges] self.__compute_edges_with_source() def edges_with_node(self, node): """returns the edges where node is a node of""" if node in self.edges_with_source: return self.edges_with_source[node] else: return [] def __repr__(self): return "Network: %s\n# edges: %d\n" % (self.name, len(self.edges)) @classmethod def create(cls, name, edges, weight, organism=None, ratios=None, check_size=True): """standard Factory method""" logging.debug("Network.create() called with %d edges", len(edges)) if edges is None: raise Exception("no edges specified in network '%s'" % name) added = set([]) network_edges = [] nodes = set() for edge in edges: nodes.add(edge[0]) nodes.add(edge[1]) """Shrink the number of edges to the ones that are actually usable. These are selected by the following considerations: # 1. check nodes that are in the thesaurus # 2. check gene names that are in the ratios matrix, but not in the network # 3. keep the nodes that are in the ratios and are in the thesaurus """ num_nodes_orig = len(nodes) if organism: thesaurus = organism.thesaurus() nodes = {n for n in nodes if n in thesaurus} if ratios: cano_nodes = {thesaurus[n] for n in nodes} cano_genes = {thesaurus[row] for row in ratios.row_names if row in thesaurus} probes_in = [gene for gene in cano_genes if gene in cano_nodes] nodes = {n for n in nodes if thesaurus[n] in probes_in} logging.debug("# nodes in network '%s': %d (of %d)", name, len(nodes), num_nodes_orig) for edge in edges: # we ignore self-edges, and edges with nodes not in the final nodes if edge[0] != edge[1] and edge[0] in nodes and edge[1] in nodes: key = "%s:%s" % (edge[0], edge[1]) key_rev = "%s:%s" % (edge[1], edge[0]) if key not in added and key_rev not in added: network_edges.append((edge[0], edge[1], edge[2])) added.add(key) added.add(key_rev) if check_size and len(network_edges) < 10: raise Exception("Error: only %d edges in network '%s'" % (len(network_edges), name)) logging.debug("Created network '%s' with %d edges", name, len(network_edges)) return Network(name, network_edges, weight, 0) COMPUTE_NETWORK = None ALL_GENES = None NETWORK_SCORE_MEMBERSHIP = None def compute_network_scores(cluster): """Generic method to compute network scores""" global COMPUTE_NETWORK, ALL_GENES, NETWORK_SCORE_MEMBERSHIP network = COMPUTE_NETWORK genes = sorted(NETWORK_SCORE_MEMBERSHIP.rows_for_cluster(cluster)) gene_scores = {} for gene in genes: # TODO: optimization: we can use numpy arrays for the scores array # and then sum edges = network.edges_with_node(gene) for edge in edges: other_gene = edge[0] if other_gene == gene: other_gene = edge[1] if other_gene in ALL_GENES: if other_gene not in gene_scores: gene_scores[other_gene] = [] gene_scores[other_gene].append(edge[2]) final_gene_scores = {} for gene, scores in gene_scores.items(): final_gene_scores[gene] = sum(scores) / len(genes) final_gene_scores[gene] = -np.log(final_gene_scores[gene] + 1) return final_gene_scores class ScoringFunction(scoring.ScoringFunctionBase): """Network scoring function. Note that even though there are several networks, scoring can't be generalized with the default ScoringCombiner, since the scores are computed through weighted addition rather than quantile normalization""" def __init__(self, function_id, cmrun): """Create scoring function instance""" scoring.ScoringFunctionBase.__init__(self, function_id, cmrun) self.__networks = None self.run_log = scoring.RunLog(function_id, cmrun.dbsession(), self.config_params) def initialize(self, args): """process additional parameters""" self.weights = {nw['type']: nw['weight'] for nw in args['networks']} def run_logs(self): return [self.run_log] def compute(self, iteration_result, ref_matrix=None): """overridden compute for storing additional information""" result = scoring.ScoringFunctionBase.compute(self, iteration_result, ref_matrix) iteration_result['networks'] = self.score_means return result def compute_force(self, iteration_result, ref_matrix=None): """overridden compute for storing additional information""" result = scoring.ScoringFunctionBase.compute_force(self, iteration_result, ref_matrix) iteration_result['networks'] = self.score_means return result def networks(self): """networks are cached""" if self.__networks is None: self.__networks = retrieve_networks(self.organism) if self.config_params['remap_network_nodes']: # network names are non-primary, this can happen # when the user makes up their own data for network in self.__networks: network.validate(self.organism.thesaurus(), self.gene_names()) return self.__networks def __update_score_means(self, network_scores): """returns the score means, adjusted to the current cluster setup""" # a dictionary that holds the network score means for # each cluster, separated for each network if network_scores: score_means = {network.name: self.__compute_cluster_score_means(network_scores[network.name]) for network in self.networks()} return {network: np.average(np.array(list(cluster_score_means.values()))) for network, cluster_score_means in score_means.items()} return {} def do_compute(self, iteration_result, ref_matrix=None): """compute method, iteration is the 0-based iteration number""" matrix = dm.DataMatrix(len(self.gene_names()), self.num_clusters(), self.gene_names()) network_scores = {} for network in self.networks(): logging.debug("Compute scores for network '%s', WEIGHT: %f", network.name, network.weight) start_time = util.current_millis() network_score = self.__compute_network_cluster_scores(network) network_scores[network.name] = network_score self.__update_score_matrix(matrix, network_score, network.weight) elapsed = util.current_millis() - start_time logging.debug("NETWORK '%s' SCORING TIME: %f s.", network.name, (elapsed / 1000.0)) # compute and store score means self.score_means = self.__update_score_means(network_scores) return matrix def __compute_network_cluster_scores(self, network): """computes the cluster scores for the given network""" global COMPUTE_NETWORK, ALL_GENES, NETWORK_SCORE_MEMBERSHIP result = {} use_multiprocessing = self.config_params[ scoring.KEY_MULTIPROCESSING] # Set the huge memory objects into globals # These are readonly anyways, but using Manager.list() or something # similar brings this down to a crawl COMPUTE_NETWORK = network ALL_GENES = set(self.gene_names()) # optimization: O(1) lookup NETWORK_SCORE_MEMBERSHIP = self.membership if use_multiprocessing: with util.get_mp_pool(self.config_params) as pool: map_results = pool.map(compute_network_scores, xrange(1, self.num_clusters() + 1)) for cluster in xrange(1, self.num_clusters() + 1): result[cluster] = map_results[cluster - 1] else: for cluster in xrange(1, self.num_clusters() + 1): result[cluster] = compute_network_scores(cluster) # cleanup COMPUTE_NETWORK = None ALL_GENES = None NETWORK_SCORE_MEMBERSHIP = None return result def __update_score_matrix(self, matrix, network_score, weight): """add values into the result score matrix""" mvalues = matrix.values gene_names = self.gene_names() for cluster in xrange(1, self.num_clusters() + 1): cluster_genes = set(network_score[cluster].keys()) for row_index in xrange(self.ratios.num_rows): gene = gene_names[row_index] if gene in cluster_genes: weighted_score = network_score[cluster][gene] * weight mvalues[row_index][cluster - 1] += weighted_score def __compute_cluster_score_means(self, network_score): """compute the score means on the given network score""" result = {} for cluster in xrange(1, self.num_clusters() + 1): cluster_scores = [network_score[cluster][gene] if gene in network_score[cluster] else 0.0 for gene in self.rows_for_cluster(cluster)] result[cluster] = util.trim_mean(cluster_scores, 0.05) return result def retrieve_networks(organism): """retrieves the networks provided by the organism object and possibly other sources, doing some normalization if necessary Note: wanted to make it private, but the scoring function can not see it after doing so""" networks = organism.networks() max_score = 0 for network in networks: #logging.debug("Network '%s' with %d edges", network.name(), # network.num_edges()) nw_total = network.total_score() if nw_total > max_score: max_score = nw_total for network in networks: network.normalize_scores_to(max_score) return networks
lgpl-3.0
2,727,564,660,023,877,000
39.510511
105
0.592809
false
4.048619
false
false
false
fvilca/cnn_tensorflow_cifar
cifar10_multi_gpu_train.py
1
10371
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """A binary to train CIFAR-10 using multiple GPU's with synchronous updates. Accuracy: cifar10_multi_gpu_train.py achieves ~86% accuracy after 100K steps (256 epochs of data) as judged by cifar10_eval.py. Speed: With batch_size 128. System | Step Time (sec/batch) | Accuracy -------------------------------------------------------------------- 1 Tesla K20m | 0.35-0.60 | ~86% at 60K steps (5 hours) 1 Tesla K40m | 0.25-0.35 | ~86% at 100K steps (4 hours) 2 Tesla K20m | 0.13-0.20 | ~84% at 30K steps (2.5 hours) 3 Tesla K20m | 0.13-0.18 | ~84% at 30K steps 4 Tesla K20m | ~0.10 | ~84% at 30K steps Usage: Please see the tutorial and website for how to download the CIFAR-10 data set, compile the program and train the model. http://tensorflow.org/tutorials/deep_cnn/ """ from __future__ import absolute_import from __future__ import division from __future__ import print_function from datetime import datetime import os.path import re import time import numpy as np from six.moves import xrange # pylint: disable=redefined-builtin import tensorflow as tf import cifar10 FLAGS = tf.app.flags.FLAGS tf.app.flags.DEFINE_string('train_dir', '/tmp/cifar10_train', """Directory where to write event logs """ """and checkpoint.""") tf.app.flags.DEFINE_integer('max_steps', 1000000, """Number of batches to run.""") tf.app.flags.DEFINE_integer('num_gpus', 1, """How many GPUs to use.""") tf.app.flags.DEFINE_boolean('log_device_placement', False, """Whether to log device placement.""") def tower_loss(scope): """Calculate the total loss on a single tower running the CIFAR model. Args: scope: unique prefix string identifying the CIFAR tower, e.g. 'tower_0' Returns: Tensor of shape [] containing the total loss for a batch of data """ # Get images and labels for CIFAR-10. images, labels = cifar10.distorted_inputs() # Build inference Graph. logits = cifar10.inference(images) # Build the portion of the Graph calculating the losses. Note that we will # assemble the total_loss using a custom function below. _ = cifar10.loss(logits, labels) # Assemble all of the losses for the current tower only. losses = tf.get_collection('losses', scope) # Calculate the total loss for the current tower. total_loss = tf.add_n(losses, name='total_loss') # Attach a scalar summary to all individual losses and the total loss; do the # same for the averaged version of the losses. for l in losses + [total_loss]: # Remove 'tower_[0-9]/' from the name in case this is a multi-GPU training # session. This helps the clarity of presentation on tensorboard. loss_name = re.sub('%s_[0-9]*/' % cifar10.TOWER_NAME, '', l.op.name) #tf.contrib.deprecated.scalar_summary(loss_name, l) tf.scalar_summary(loss_name,1) return total_loss def average_gradients(tower_grads): """Calculate the average gradient for each shared variable across all towers. Note that this function provides a synchronization point across all towers. Args: tower_grads: List of lists of (gradient, variable) tuples. The outer list is over individual gradients. The inner list is over the gradient calculation for each tower. Returns: List of pairs of (gradient, variable) where the gradient has been averaged across all towers. """ average_grads = [] for grad_and_vars in zip(*tower_grads): # Note that each grad_and_vars looks like the following: # ((grad0_gpu0, var0_gpu0), ... , (grad0_gpuN, var0_gpuN)) grads = [] for g, _ in grad_and_vars: # Add 0 dimension to the gradients to represent the tower. expanded_g = tf.expand_dims(g, 0) # Append on a 'tower' dimension which we will average over below. grads.append(expanded_g) # Average over the 'tower' dimension. grad = tf.concat(grads, 0) grad = tf.reduce_mean(grad, 0) # Keep in mind that the Variables are redundant because they are shared # across towers. So .. we will just return the first tower's pointer to # the Variable. v = grad_and_vars[0][1] grad_and_var = (grad, v) average_grads.append(grad_and_var) return average_grads def train(): """Train CIFAR-10 for a number of steps.""" with tf.Graph().as_default(), tf.device('/cpu:0'): # Create a variable to count the number of train() calls. This equals the # number of batches processed * FLAGS.num_gpus. global_step = tf.get_variable( 'global_step', [], initializer=tf.constant_initializer(0), trainable=False) # Calculate the learning rate schedule. num_batches_per_epoch = (cifar10.NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN / FLAGS.batch_size) decay_steps = int(num_batches_per_epoch * cifar10.NUM_EPOCHS_PER_DECAY) # Decay the learning rate exponentially based on the number of steps. lr = tf.train.exponential_decay(cifar10.INITIAL_LEARNING_RATE, global_step, decay_steps, cifar10.LEARNING_RATE_DECAY_FACTOR, staircase=True) # Create an optimizer that performs gradient descent. opt = tf.train.GradientDescentOptimizer(lr) # Calculate the gradients for each model tower. tower_grads = [] for i in xrange(FLAGS.num_gpus): with tf.device('/gpu:%d' % i): with tf.name_scope('%s_%d' % (cifar10.TOWER_NAME, i)) as scope: # Calculate the loss for one tower of the CIFAR model. This function # constructs the entire CIFAR model but shares the variables across # all towers. loss = tower_loss(scope) # Reuse variables for the next tower. tf.get_variable_scope().reuse_variables() # Retain the summaries from the final tower. summaries = tf.get_collection(tf.GraphKeys.SUMMARIES, scope) # Calculate the gradients for the batch of data on this CIFAR tower. grads = opt.compute_gradients(loss) # Keep track of the gradients across all towers. tower_grads.append(grads) # We must calculate the mean of each gradient. Note that this is the # synchronization point across all towers. grads = average_gradients(tower_grads) # Add a summary to track the learning rate. summaries.append(tf.contrib.deprecated.scalar_summary('learning_rate', lr)) # Add histograms for gradients. for grad, var in grads: if grad is not None: summaries.append( tf.contrib.deprecated.histogram_summary(var.op.name + '/gradients', grad)) # Apply the gradients to adjust the shared variables. apply_gradient_op = opt.apply_gradients(grads, global_step=global_step) # Add histograms for trainable variables. for var in tf.trainable_variables(): summaries.append( tf.contrib.deprecated.histogram_summary(var.op.name, var)) # Track the moving averages of all trainable variables. variable_averages = tf.train.ExponentialMovingAverage( cifar10.MOVING_AVERAGE_DECAY, global_step) variables_averages_op = variable_averages.apply(tf.trainable_variables()) # Group all updates to into a single train op. train_op = tf.group(apply_gradient_op, variables_averages_op) # Create a saver. saver = tf.train.Saver(tf.global_variables()) # Build the summary operation from the last tower summaries. summary_op = tf.contrib.deprecated.merge_summary(summaries) # Build an initialization operation to run below. init = tf.global_variables_initializer() # Start running operations on the Graph. allow_soft_placement must be set to # True to build towers on GPU, as some of the ops do not have GPU # implementations. sess = tf.Session(config=tf.ConfigProto( allow_soft_placement=True, log_device_placement=FLAGS.log_device_placement)) sess.run(init) # Start the queue runners. tf.train.start_queue_runners(sess=sess) summary_writer = tf.summary.FileWriter(FLAGS.train_dir, sess.graph) for step in xrange(FLAGS.max_steps): start_time = time.time() _, loss_value = sess.run([train_op, loss]) duration = time.time() - start_time assert not np.isnan(loss_value), 'Model diverged with loss = NaN' if step % 10 == 0: num_examples_per_step = FLAGS.batch_size * FLAGS.num_gpus examples_per_sec = num_examples_per_step / duration sec_per_batch = duration / FLAGS.num_gpus format_str = ('%s: step %d, loss = %.2f (%.1f examples/sec; %.3f ' 'sec/batch)') print (format_str % (datetime.now(), step, loss_value, examples_per_sec, sec_per_batch)) if step % 100 == 0: summary_str = sess.run(summary_op) summary_writer.add_summary(summary_str, step) # Save the model checkpoint periodically. if step % 1000 == 0 or (step + 1) == FLAGS.max_steps: checkpoint_path = os.path.join(FLAGS.train_dir, 'model.ckpt') saver.save(sess, checkpoint_path, global_step=step) def main(argv=None): # pylint: disable=unused-argument cifar10.maybe_download_and_extract() if tf.gfile.Exists(FLAGS.train_dir): tf.gfile.DeleteRecursively(FLAGS.train_dir) tf.gfile.MakeDirs(FLAGS.train_dir) train() if __name__ == '__main__': tf.app.run()
mit
-3,657,095,802,407,810,000
36.576087
80
0.646514
false
3.775391
false
false
false
food52/thumbor
thumbor/console.py
1
2470
#!/usr/bin/python # -*- coding: utf-8 -*- # thumbor imaging service # https://github.com/globocom/thumbor/wiki # Licensed under the MIT license: # http://www.opensource.org/licenses/mit-license # Copyright (c) 2011 globo.com timehome@corp.globo.com import optparse from thumbor.context import ServerParameters from thumbor import __version__ def get_server_parameters(arguments=None): parser = optparse.OptionParser(usage="thumbor or type thumbor -h (--help) for help", description=__doc__, version=__version__) parser.add_option("-p", "--port", type="int", dest="port", default=8888, help="The port to run this thumbor instance at [default: %default].") parser.add_option("-i", "--ip", dest="ip", default="0.0.0.0", help="The host address to run this thumbor instance at [default: %default].") parser.add_option("-f", "--fd", dest="file_descriptor", help="The file descriptor number or path to listen for connections on (--port and --ip will be ignored if this is set) [default: %default].") parser.add_option("-c", "--conf", dest="conf", default="", help="The path of the configuration file to use for this thumbor instance [default: %default].") parser.add_option("-k", "--keyfile", dest="keyfile", default="", help="The path of the security key file to use for this thumbor instance [default: %default].") parser.add_option("-l", "--log-level", dest="log_level", default="warning", help="The log level to be used. Possible values are: debug, info, warning, error, critical or notset. [default: %default].") parser.add_option("-o", "--log_file", dest="log_file", default="", help="Path of the file to log to.") parser.add_option("-a", "--app", dest="app", default='thumbor.app.ThumborServiceApp', help="A custom app to use for this thumbor server in case you subclassed ThumborServiceApp [default: %default].") (options, args) = parser.parse_args(arguments) port = options.port ip = options.ip fd = options.file_descriptor conf = options.conf or None keyfile = options.keyfile or None log_level = options.log_level log_file = options.log_file return ServerParameters(port=port, ip=ip, config_path=conf, keyfile=keyfile, log_level=log_level, app_class=options.app, fd=fd, log_file=log_file)
mit
8,738,400,710,362,373,000
53.888889
204
0.640891
false
3.805855
false
false
false
gautam1858/tensorflow
tensorflow/tools/compatibility/tf_upgrade_v2.py
1
54358
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Upgrader for Python scripts from 1.* TensorFlow to 2.0 TensorFlow.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import ast import pasta import six from tensorflow.tools.compatibility import ast_edits from tensorflow.tools.compatibility import renames_v2 from tensorflow.tools.compatibility import reorders_v2 class TFAPIChangeSpec(ast_edits.APIChangeSpec): """List of maps that describe what changed in the API.""" def __init__(self): # Maps from a function name to a dictionary that describes how to # map from an old argument keyword to the new argument keyword. # If the new argument is None, it will be removed. # Only keyword args are handled, so make sure to also put any function in # function_reorders to ensure that all args are made into keywords first. self.function_keyword_renames = { "tf.gradients": { "colocate_gradients_with_ops": None, }, "tf.hessians": { "colocate_gradients_with_ops": None, }, "*.minimize": { "colocate_gradients_with_ops": None, }, "*.compute_gradients": { "colocate_gradients_with_ops": None, }, "tf.cond": { "strict": None, "fn1": "true_fn", "fn2": "false_fn" }, "tf.argmin": { "dimension": "axis", }, "tf.argmax": { "dimension": "axis", }, "tf.arg_min": { "dimension": "axis", }, "tf.arg_max": { "dimension": "axis", }, "tf.math.argmin": { "dimension": "axis", }, "tf.math.argmax": { "dimension": "axis", }, "tf.image.crop_and_resize": { "box_ind": "box_indices", }, "tf.image.extract_image_patches": { "ksizes": "sizes", }, "tf.extract_image_patches": { "ksizes": "sizes", }, "tf.expand_dims": { "dim": "axis", }, "tf.batch_to_space": { "block_size": "block_shape", }, "tf.space_to_batch": { "block_size": "block_shape", }, "tf.nn.space_to_batch": { "block_size": "block_shape", }, "tf.constant": { "verify_shape": "verify_shape_is_now_always_true", }, "tf.convert_to_tensor": { "preferred_dtype": "dtype_hint" }, "tf.nn.softmax_cross_entropy_with_logits": { "dim": "axis", "_sentinel": None, }, "tf.nn.softmax_cross_entropy_with_logits_v2": { "dim": "axis" }, "tf.linalg.l2_normalize": { "dim": "axis", }, "tf.linalg.norm": { "keep_dims": "keepdims", }, "tf.norm": { "keep_dims": "keepdims", }, "tf.load_file_system_library": { "library_filename": "library_location", }, "tf.count_nonzero": { "input_tensor": "input", "keep_dims": "keepdims", "reduction_indices": "axis", }, "tf.math.count_nonzero": { "input_tensor": "input", "keep_dims": "keepdims", "reduction_indices": "axis", }, "tf.nn.erosion2d": { "kernel": "filters", "rates": "dilations", }, "tf.math.l2_normalize": { "dim": "axis", }, "tf.math.log_softmax": { "dim": "axis", }, "tf.math.softmax": { "dim": "axis" }, "tf.nn.l2_normalize": { "dim": "axis", }, "tf.nn.log_softmax": { "dim": "axis", }, "tf.nn.moments": { "keep_dims": "keepdims", }, "tf.nn.pool": { "dilation_rate": "dilations" }, "tf.nn.separable_conv2d": { "rate": "dilations" }, "tf.nn.depthwise_conv2d": { "rate": "dilations" }, "tf.nn.softmax": { "dim": "axis" }, "tf.nn.sufficient_statistics": { "keep_dims": "keepdims" }, "tf.debugging.assert_all_finite": { "t": "x", "msg": "message", }, "tf.sparse.add": { "thresh": "threshold", }, "tf.sparse_add": { "thresh": "threshold", }, "tf.sparse.concat": { "concat_dim": "axis", "expand_nonconcat_dim": "expand_nonconcat_dims", }, "tf.sparse_concat": { "concat_dim": "axis", "expand_nonconcat_dim": "expand_nonconcat_dims", }, "tf.sparse.split": { "split_dim": "axis", }, "tf.sparse_split": { "split_dim": "axis", }, "tf.sparse.reduce_max": { "reduction_axes": "axis", "keep_dims": "keepdims", }, "tf.sparse_reduce_max": { "reduction_axes": "axis", "keep_dims": "keepdims", }, "tf.sparse.reduce_sum": { "reduction_axes": "axis", "keep_dims": "keepdims", }, "tf.sparse_reduce_sum": { "reduction_axes": "axis", "keep_dims": "keepdims", }, "tf.nn.max_pool_with_argmax": { "Targmax": "output_dtype", }, "tf.multinomial": { "output_dtype": "dtype", }, "tf.random.multinomial": { "output_dtype": "dtype", }, "tf.reverse_sequence": { "seq_dim": "seq_axis", "batch_dim": "batch_axis", }, "tf.nn.batch_norm_with_global_normalization": { "t": "input", "m": "mean", "v": "variance", }, "tf.nn.dilation2d": { "filter": "filters", "rates": "dilations", }, "tf.nn.conv3d": { "filter": "filters" }, "tf.zeros_like": { "tensor": "input", }, "tf.ones_like": { "tensor": "input", }, "tf.nn.conv2d_transpose": { "value": "input", "filter": "filters", }, "tf.nn.conv3d_transpose": { "value": "input", "filter": "filters", }, "tf.nn.convolution": { "filter": "filters", "dilation_rate": "dilations", }, "tf.gfile.Exists": { "filename": "path", }, "tf.gfile.Remove": { "filename": "path", }, "tf.gfile.Stat": { "filename": "path", }, "tf.gfile.Glob": { "filename": "pattern", }, "tf.gfile.MkDir": { "dirname": "path", }, "tf.gfile.MakeDirs": { "dirname": "path", }, "tf.gfile.DeleteRecursively": { "dirname": "path", }, "tf.gfile.IsDirectory": { "dirname": "path", }, "tf.gfile.ListDirectory": { "dirname": "path", }, "tf.gfile.Copy": { "oldpath": "src", "newpath": "dst", }, "tf.gfile.Rename": { "oldname": "src", "newname": "dst", }, "tf.gfile.Walk": { "in_order": "topdown", }, "tf.random.stateless_multinomial": { "output_dtype": "dtype", }, "tf.string_to_number": { "string_tensor": "input", }, "tf.strings.to_number": { "string_tensor": "input", }, "tf.string_to_hash_bucket": { "string_tensor": "input", }, "tf.strings.to_hash_bucket": { "string_tensor": "input", }, "tf.reduce_all": { "reduction_indices": "axis", "keep_dims": "keepdims", }, "tf.math.reduce_all": { "reduction_indices": "axis", "keep_dims": "keepdims", }, "tf.reduce_any": { "reduction_indices": "axis", "keep_dims": "keepdims", }, "tf.math.reduce_any": { "reduction_indices": "axis", "keep_dims": "keepdims", }, "tf.reduce_min": { "reduction_indices": "axis", "keep_dims": "keepdims", }, "tf.math.reduce_min": { "reduction_indices": "axis", "keep_dims": "keepdims", }, "tf.reduce_max": { "reduction_indices": "axis", "keep_dims": "keepdims", }, "tf.math.reduce_max": { "reduction_indices": "axis", "keep_dims": "keepdims", }, "tf.reduce_sum": { "reduction_indices": "axis", "keep_dims": "keepdims", }, "tf.math.reduce_sum": { "reduction_indices": "axis", "keep_dims": "keepdims", }, "tf.reduce_mean": { "reduction_indices": "axis", "keep_dims": "keepdims", }, "tf.math.reduce_mean": { "reduction_indices": "axis", "keep_dims": "keepdims", }, "tf.reduce_prod": { "reduction_indices": "axis", "keep_dims": "keepdims", }, "tf.math.reduce_prod": { "reduction_indices": "axis", "keep_dims": "keepdims", }, "tf.reduce_logsumexp": { "reduction_indices": "axis", "keep_dims": "keepdims", }, "tf.math.reduce_logsumexp": { "reduction_indices": "axis", "keep_dims": "keepdims", }, "tf.reduce_join": { "keep_dims": "keepdims", "reduction_indices": "axis" }, "tf.strings.reduce_join": { "keep_dims": "keepdims", "reduction_indices": "axis" }, "tf.squeeze": { "squeeze_dims": "axis", }, "tf.nn.weighted_moments": { "keep_dims": "keepdims" }, } # pylint: disable=line-too-long # Add additional renames not in renames_v2.py here. # IMPORTANT: For the renames in here, if you also need to add to # function_reorders or function_keyword_renames, use the OLD function name. # These renames happen after the arguments have been processed. self.manual_symbol_renames = { "tf.batch_to_space_nd": "tf.batch_to_space", "tf.batch_gather": "tf.gather", "tf.space_to_batch_nd": "tf.space_to_batch", "tf.nn.space_to_batch": "tf.space_to_batch", "tf.estimator.inputs": "tf.compat.v1.estimator.inputs", "tf.extract_image_patches": "tf.image.extract_image_patches", "tf.gfile.Copy": "tf.io.gfile.copy", "tf.gfile.DeleteRecursively": "tf.io.gfile.rmtree", "tf.gfile.Exists": "tf.io.gfile.exists", "tf.gfile.Glob": "tf.io.gfile.glob", "tf.gfile.IsDirectory": "tf.io.gfile.isdir", "tf.gfile.ListDirectory": "tf.io.gfile.listdir", "tf.gfile.MakeDirs": "tf.io.gfile.makedirs", "tf.gfile.MkDir": "tf.io.gfile.mkdir", "tf.gfile.Remove": "tf.io.gfile.remove", "tf.gfile.Rename": "tf.io.gfile.rename", "tf.gfile.Stat": "tf.io.gfile.stat", "tf.gfile.Walk": "tf.io.gfile.walk", "tf.contrib.data.AUTOTUNE": "tf.data.experimental.AUTOTUNE", "tf.contrib.data.Counter": "tf.data.experimental.Counter", "tf.contrib.data.CheckpointInputPipelineHook": "tf.data.experimental.CheckpointInputPipelineHook", "tf.contrib.data.CsvDataset": "tf.data.experimental.CsvDataset", "tf.contrib.data.Optional": "tf.data.experimental.Optional", "tf.contrib.data.RandomDataset": "tf.data.experimental.RandomDataset", "tf.contrib.data.Reducer": "tf.data.experimental.Reducer", "tf.contrib.data.SqlDataset": "tf.data.experimental.SqlDataset", "tf.contrib.data.StatsAggregator": "tf.data.experimental.StatsAggregator", "tf.contrib.data.TFRecordWriter": "tf.data.experimental.TFRecordWriter", "tf.contrib.data.assert_element_shape": "tf.data.experimental.assert_element_shape", "tf.contrib.data.batch_and_drop_remainder": "tf.compat.v1.contrib.data.batch_and_drop_remainder", "tf.contrib.data.bucket_by_sequence_length": "tf.data.experimental.bucket_by_sequence_length", "tf.contrib.data.choose_from_datasets": "tf.data.experimental.choose_from_datasets", "tf.contrib.data.copy_to_device": "tf.data.experimental.copy_to_device", "tf.contrib.data.dense_to_sparse_batch": "tf.data.experimental.dense_to_sparse_batch", "tf.contrib.data.enumerate_dataset": "tf.data.experimental.enumerate_dataset", "tf.contrib.data.get_next_as_optional": "tf.data.experimental.get_next_as_optional", "tf.contrib.data.get_single_element": "tf.data.experimental.get_single_element", "tf.contrib.data.group_by_reducer": "tf.data.experimental.group_by_reducer", "tf.contrib.data.group_by_window": "tf.data.experimental.group_by_window", "tf.contrib.data.ignore_errors": "tf.data.experimental.ignore_errors", "tf.contrib.data.latency_stats": "tf.data.experimental.latency_stats", "tf.contrib.data.make_batched_features_dataset": "tf.data.experimental.make_batched_features_dataset", "tf.contrib.data.make_csv_dataset": "tf.data.experimental.make_csv_dataset", "tf.contrib.data.make_saveable_from_iterator": "tf.data.experimental.make_saveable_from_iterator", "tf.contrib.data.map_and_batch": "tf.data.experimental.map_and_batch", "tf.contrib.data.padded_batch_and_drop_remainder": "tf.compat.v1.contrib.data.padded_batch_and_drop_remainder", "tf.contrib.data.parallel_interleave": "tf.data.experimental.parallel_interleave", "tf.contrib.data.parse_example_dataset": "tf.data.experimental.parse_example_dataset", "tf.contrib.data.prefetch_to_device": "tf.data.experimental.prefetch_to_device", "tf.contrib.data.read_batch_features": "tf.compat.v1.contrib.data.read_batch_features", "tf.contrib.data.reduce_dataset": "tf.compat.v1.contrib.data.reduce_dataset", "tf.contrib.data.rejection_resample": "tf.data.experimental.rejection_resample", "tf.contrib.data.sample_from_datasets": "tf.data.experimental.sample_from_datasets", "tf.contrib.data.scan": "tf.data.experimental.scan", "tf.contrib.data.set_stats_aggregator": "tf.data.experimental.set_stats_aggregator", "tf.contrib.data.shuffle_and_repeat": "tf.data.experimental.shuffle_and_repeat", "tf.contrib.data.sliding_window_batch": "tf.compat.v1.contrib.data.sliding_window_batch", "tf.contrib.data.sloppy_interleave": "tf.compat.v1.contrib.data.sloppy_interleave", "tf.contrib.data.unbatch": "tf.data.experimental.unbatch", "tf.contrib.data.unique": "tf.data.experimental.unique", "tf.contrib.rnn.RNNCell": "tf.nn.rnn_cell.RNNCell", "tf.contrib.rnn.LSTMStateTuple": "tf.nn.rnn_cell.LSTMStateTuple", "tf.contrib.framework.sort": "tf.sort", "tf.contrib.framework.argsort": "tf.argsort", "tf.count_nonzero": "tf.math.count_nonzero", "tf.manip.batch_to_space_nd": "tf.batch_to_space", "tf.quantize_v2": "tf.quantization.quantize", "tf.sparse_add": "tf.sparse.add", "tf.sparse_concat": "tf.sparse.concat", "tf.sparse_split": "tf.sparse.split", "tf.sparse_matmul": "tf.linalg.matmul", "tf.sparse_reduce_sum": "tf.sparse.reduce_sum", "tf.sparse_reduce_max": "tf.sparse.reduce_max", "tf.random.stateless_multinomial": "tf.random.stateless_categorical", "tf.substr": "tf.strings.substr", "tf.string_to_hash_bucket": "tf.strings.to_hash_bucket", "tf.string_to_number": "tf.strings.to_number", "tf.multinomial": "tf.random.categorical", "tf.random.multinomial": "tf.random.categorical", "tf.reduce_join": "tf.strings.reduce_join", "tf.load_file_system_library": "tf.load_library", "tf.pywrap_tensorflow": "tf.compat.v1.pywrap_tensorflow", "tf.bincount": "tf.math.bincount", "tf.confusion_matrix": "tf.math.confusion_matrix", "tf.train.confusion_matrix": "tf.math.confusion_matrix", "tf.decode_csv": "tf.io.decode_csv", "tf.data.Iterator": "tf.compat.v1.data.Iterator", "tf.parse_example": "tf.io.parse_example", "tf.parse_single_example": "tf.io.parse_single_example", "tf.nn.fused_batch_norm": "tf.compat.v1.nn.fused_batch_norm", "tf.nn.softmax_cross_entropy_with_logits_v2": "tf.nn.softmax_cross_entropy_with_logits", "tf.losses.Reduction.MEAN": "tf.compat.v1.losses.Reduction.MEAN", "tf.losses.Reduction.SUM_BY_NONZERO_WEIGHTS": "tf.compat.v1.losses.Reduction.SUM_BY_NONZERO_WEIGHTS", "tf.losses.Reduction.SUM_OVER_NONZERO_WEIGHTS": "tf.compat.v1.losses.Reduction.SUM_OVER_NONZERO_WEIGHTS", "tf.lite.constants.FLOAT": "tf.float32", "tf.lite.constants.INT32": "tf.int32", "tf.lite.constants.INT64": "tf.int64", "tf.lite.constants.STRING": "tf.string", "tf.lite.constants.QUANTIZED_UINT8": "tf.uint8", "tf.arg_max": "tf.argmax", "tf.arg_min": "tf.argmin", # tf.nn.ctc_loss is still available in 2.0 but behavior # changed significantly. "tf.nn.ctc_loss": "tf.compat.v1.nn.ctc_loss", "tf.zeros_initializer": "tf.compat.v1.initializers.zeros", "tf.ones_initializer": "tf.compat.v1.initializers.ones", "tf.constant_initializer": "tf.compat.v1.initializers.constant", "tf.random_uniform_initializer": "tf.compat.v1.initializers.random_uniform", "tf.random_normal_initializer": "tf.compat.v1.initializers.random_normal", "tf.truncated_normal_initializer": "tf.compat.v1.initializers.truncated_normal", "tf.image.resize_images": "tf.image.resize", "tf.random_poisson": "tf.random.poisson", "tf.debugging.assert_greater": "tf.compat.v1.debugging.assert_greater", "tf.debugging.assert_greater_equal": "tf.compat.v1.debugging.assert_greater_equal", "tf.debugging.assert_integer": "tf.compat.v1.debugging.assert_integer", "tf.debugging.assert_less": "tf.compat.v1.debugging.assert_less", "tf.debugging.assert_less_equal": "tf.compat.v1.debugging.assert_less_equal", "tf.debugging.assert_near": "tf.compat.v1.debugging.assert_near", "tf.debugging.assert_negative": "tf.compat.v1.debugging.assert_negative", "tf.debugging.assert_non_negative": "tf.compat.v1.debugging.assert_non_negative", "tf.debugging.assert_non_positive": "tf.compat.v1.debugging.assert_non_positive", "tf.debugging.assert_none_equal": "tf.compat.v1.debugging.assert_none_equal", "tf.debugging.assert_type": "tf.compat.v1.debugging.assert_type", "tf.debugging.assert_positive": "tf.compat.v1.debugging.assert_positive", "tf.debugging.assert_equal": "tf.compat.v1.debugging.assert_equal", "tf.debugging.assert_scalar": "tf.compat.v1.debugging.assert_scalar", "tf.assert_equal": "tf.compat.v1.assert_equal", "tf.assert_less": "tf.compat.v1.assert_less", "tf.assert_greater": "tf.compat.v1.assert_greater", "tf.debugging.assert_rank": "tf.compat.v1.debugging.assert_rank", "tf.debugging.assert_rank_at_least": "tf.compat.v1.debugging.assert_rank_at_least", "tf.debugging.assert_rank_in": "tf.compat.v1.debugging.assert_rank_in", "tf.assert_rank": "tf.compat.v1.assert_rank", } # pylint: enable=line-too-long # Mapping from function to the new name of the function self.symbol_renames = renames_v2.renames self.symbol_renames.update(self.manual_symbol_renames) # Variables that should be changed to functions. self.change_to_function = {} # pylint: disable=line-too-long # This list should just contain names of functions that had # their arguments reordered. After adding a function name to the list # run the following to update reorders_v2.py: # bazel build tensorflow/tools/compatibility/update:generate_v2_reorders_map # bazel-bin/tensorflow/tools/compatibility/update/generate_v2_reorders_map # pylint: enable=line-too-long self.reordered_function_names = { "tf.io.serialize_sparse", "tf.io.serialize_many_sparse", "tf.argmax", "tf.argmin", "tf.batch_gather", "tf.batch_to_space", "tf.cond", "tf.nn.space_to_batch", "tf.boolean_mask", "tf.convert_to_tensor", "tf.nn.moments", "tf.nn.convolution", "tf.nn.crelu", "tf.nn.weighted_moments", "tf.nn.pool", "tf.nn.separable_conv2d", "tf.nn.depthwise_conv2d", "tf.multinomial", "tf.random.multinomial", "tf.pad", "tf.quantize_v2", "tf.feature_column.categorical_column_with_vocabulary_file", "tf.shape", "tf.size", "tf.random.poisson", "tf.sparse.add", "tf.sparse_add", "tf.sparse.concat", "tf.sparse_concat", "tf.sparse.segment_mean", "tf.sparse.segment_sqrt_n", "tf.sparse.segment_sum", "tf.sparse_matmul", "tf.sparse.reduce_max", "tf.sparse_reduce_max", "tf.io.decode_csv", "tf.strings.length", "tf.strings.reduce_join", "tf.strings.substr", "tf.substr", "tf.transpose", "tf.tuple", "tf.parse_example", "tf.parse_single_example", "tf.io.parse_example", "tf.io.parse_single_example", "tf.while_loop", "tf.reduce_all", "tf.math.reduce_all", "tf.reduce_any", "tf.math.reduce_any", "tf.reduce_min", "tf.math.reduce_min", "tf.reduce_max", "tf.math.reduce_max", "tf.reduce_sum", "tf.math.reduce_sum", "tf.reduce_mean", "tf.math.reduce_mean", "tf.reduce_prod", "tf.math.reduce_prod", "tf.reduce_logsumexp", "tf.math.reduce_logsumexp", "tf.reduce_join", "tf.confusion_matrix", "tf.math.confusion_matrix", "tf.math.in_top_k", "tf.nn.depth_to_space", "tf.nn.embedding_lookup", "tf.nn.embedding_lookup_sparse", "tf.nn.in_top_k", "tf.nn.space_to_depth", "tf.linalg.norm", "tf.norm", "tf.reverse_sequence", "tf.sparse_split", # tf.nn.softmax_cross_entropy_with_logits *must* be called with # keyword arguments. Add keyword arguments in rare case when they # are not specified. "tf.nn.softmax_cross_entropy_with_logits", } # Functions that were reordered should be changed to the new keyword args # for safety, if positional arguments are used. If you have reversed the # positional arguments yourself, this could do the wrong thing. self.function_reorders = reorders_v2.reorders # Specially handled functions (pasta version) # Each transformer is a callable which will be called with the arguments # transformer(parent, node, full_name, name, logs, errors) # Where logs and errors are lists to which (line, col, msg) tuples can be # appended, full_name is the FQN of the function called (or None if that is # unknown), name is the name of the function called (or None is that is # unknown). node is an ast.Call node representing this function call, and # parent is its parent in the AST. # The function may modify node (but not parent), and must return # - none, if nothing was modified # - node, if node was modified in place (make sure to use # pasta.ast_utils.replace_child to swap out children, otherwise formatting # may get messy) # - a replacement for node, if the whole call node was replaced. The caller # will take care of changing parent. self.function_transformers = { "tf.nn.dropout": self._dropout_transformer, "tf.batch_gather": self._batch_gather_transformer, "tf.to_bfloat16": self._cast_transformer, "tf.to_complex128": self._cast_transformer, "tf.to_complex64": self._cast_transformer, "tf.to_double": self._cast_transformer, "tf.to_float": self._cast_transformer, "tf.to_int32": self._cast_transformer, "tf.to_int64": self._cast_transformer, "tf.nn.softmax_cross_entropy_with_logits": self._softmax_cross_entropy_with_logits_transformer, "tf.image.resize_area": self._image_resize_transformer, "tf.image.resize_bicubic": self._image_resize_transformer, "tf.image.resize_bilinear": self._image_resize_transformer, "tf.image.resize_nearest_neighbor": self._image_resize_transformer, } decay_function_comment = ( "WARNING: <function name> has been changed to return a callable instead" " of a tensor when graph building, but its functionality remains " "unchanged during eager execution (returns a callable like " "before). The converter cannot detect and fix this reliably, so " "this usage has been converted to compat.v1 (even though it may already" " be correct).\n" ) # TODO(b/118888586): add default value change to update script. default_loss_reduction_changed = ( "WARNING: default value of loss_reduction has been changed to " "SUM_OVER_BATCH_SIZE.\n" ) assert_return_type_comment = ( "WARNING: assert_* functions have been changed to return None, the " "data argument has been removed, and arguments have been reordered." "\nThe calls have been converted to compat.v1 for safety (even though " " they may already have been correct)." ) assert_rank_comment = ( "WARNING: assert_rank_* functions have been changed to return None, and" " the data and summarize arguments have been removed." "\nThe calls have been converted to compat.v1 for safety (even though " " they may already have been correct)." ) tf_01s_like_no_optimize_comment = ( "WARNING: tf.zeros_like and tf.ones_like no longer have the optimize " "argument in TF 2.0 or after (also, `tensor' argument is renamed to " "`input')." "\nThe calls have been converted to compat.v1 for safety (even though " " they may already have been correct)." ) deprecate_partition_strategy_comment = ( "WARNING: `partition_strategy` has been removed from `%s` " " The 'div' strategy is used by default.") initializers_no_dtype_comment = ( "WARNING: tf.initializers and tf.keras.initializers no longer have the " "dtype argument in the constructor or partition_info argument in the " "call method in TF 2.0 and after. The only API symbols are now " "tf.keras.initializers.* or tf.initializers.*." "\nThe calls have been converted to compat.v1 for safety (even though " "they may already have been correct).") uniform_unit_scaling_initializer_comment = ( "WARNING: uniform_unit_scaling_initializer has been removed. Please use" " tf.initializers.variance_scaling instead with distribution=uniform " "to get equivalent behaviour.") metrics_comment = ( "WARNING: tf.metrics have been converted to object oriented versions in" " TF 2.0 and after. The metric function calls have been converted to " "compat.v1 for backward compatibility. Please update these calls to " "the TF 2.0 versions.") losses_comment = ( "WARNING: tf.losses have been converted to object oriented versions in" " TF 2.0 and after. The loss function calls have been converted to " "compat.v1 for backward compatibility. Please update these calls to " "the TF 2.0 versions.") export_saved_model_renamed = ( "(Manual edit required) Please rename the method export_savedmodel() " "to export_saved_model(). Two things to note:\n\t(1) The argument " "strip_default_attributes has been removed. The function will always " "strip the default attributes from ops. If this breaks your code, " "please switch to tf.compat.v1.estimator.Estimator.\n\t(2) This change " "only effects core estimator. If you are using " "tf.contrib.learn.Estimator, please switch to using core estimator.") make_initializable_iterator_deprecation = ( "(Manual edit required) The " "`tf.data.Dataset.make_initializable_iterator()` method has been " "removed. If you are using the Estimator API, you can return a dataset " "directly from your input functions without creating an iterator. " "As a last resort, please replace calls to that method on `dataset` " "with a call to " "`tf.compat.v1.data.make_initializable_iterator(dataset)`.") make_one_shot_iterator_deprecation = ( "(Manual edit required) The " "`tf.data.Dataset.make_one_shot_iterator()` method has been " "removed. If you are using eager execution, you can iterate over " "`dataset` using a Python `for` loop. If you are using the Estimator " "API, you can return a dataset directly from your input functions " "without creating an iterator. As a last resort, please replace calls " "to that method on `dataset` with a call to " "`tf.compat.v1.data.make_one_shot_iterator(dataset)`.") # Function warnings. <function name> placeholder inside warnings will be # replaced by function name. # You can use *. to add items which do not check the FQN, and apply to e.g., # methods. self.function_warnings = { "*.export_savedmodel": export_saved_model_renamed, "*.make_initializable_iterator": make_initializable_iterator_deprecation, "*.make_one_shot_iterator": make_one_shot_iterator_deprecation, "tf.assert_equal": assert_return_type_comment, "tf.assert_none_equal": assert_return_type_comment, "tf.assert_negative": assert_return_type_comment, "tf.assert_positive": assert_return_type_comment, "tf.assert_non_negative": assert_return_type_comment, "tf.assert_non_positive": assert_return_type_comment, "tf.assert_near": assert_return_type_comment, "tf.assert_less": assert_return_type_comment, "tf.assert_less_equal": assert_return_type_comment, "tf.assert_greater": assert_return_type_comment, "tf.assert_greater_equal": assert_return_type_comment, "tf.assert_integer": assert_return_type_comment, "tf.assert_type": assert_return_type_comment, "tf.assert_scalar": assert_return_type_comment, "tf.assert_rank": assert_rank_comment, "tf.assert_rank_at_least": assert_rank_comment, "tf.assert_rank_in": assert_rank_comment, "tf.debugging.assert_equal": assert_return_type_comment, "tf.debugging.assert_greater": assert_return_type_comment, "tf.debugging.assert_greater_equal": assert_return_type_comment, "tf.debugging.assert_integer": assert_return_type_comment, "tf.debugging.assert_less": assert_return_type_comment, "tf.debugging.assert_less_equal": assert_return_type_comment, "tf.debugging.assert_near": assert_return_type_comment, "tf.debugging.assert_negative": assert_return_type_comment, "tf.debugging.assert_non_negative": assert_return_type_comment, "tf.debugging.assert_non_positive": assert_return_type_comment, "tf.debugging.assert_none_equal": assert_return_type_comment, "tf.debugging.assert_positive": assert_return_type_comment, "tf.debugging.assert_type": assert_return_type_comment, "tf.debugging.assert_scalar": assert_return_type_comment, "tf.debugging.assert_rank": assert_rank_comment, "tf.debugging.assert_rank_at_least": assert_rank_comment, "tf.debugging.assert_rank_in": assert_rank_comment, "tf.device": "tf.device no longer takes function as an argument. " "'devide_name_or_function' argument has been renamed to " "'device_name'.", "tf.flags": "tf.flags has been removed, please use the argparse or absl" " module if you need command line parsing.", "tf.train.exponential_decay": decay_function_comment, "tf.train.piecewise_constant_decay": decay_function_comment, "tf.train.polynomial_decay": decay_function_comment, "tf.train.natural_exp_decay": decay_function_comment, "tf.train.inverse_time_decay": decay_function_comment, "tf.train.cosine_decay": decay_function_comment, "tf.train.cosine_decay_restarts": decay_function_comment, "tf.train.linear_cosine_decay": decay_function_comment, "tf.train.noisy_linear_cosine_decay": decay_function_comment, "tf.estimator.LinearClassifier": default_loss_reduction_changed, "tf.estimator.LinearRegressor": default_loss_reduction_changed, "tf.estimator.DNNLinearCombinedClassifier": default_loss_reduction_changed, "tf.estimator.DNNLinearCombinedRegressor": default_loss_reduction_changed, "tf.estimator.DNNRegressor": default_loss_reduction_changed, "tf.estimator.DNNClassifier": default_loss_reduction_changed, "tf.estimator.BaselineClassifier": default_loss_reduction_changed, "tf.estimator.BaselineRegressor": default_loss_reduction_changed, "tf.nn.conv1d": "WARNING: use_cudnn_on_gpu argument has been removed and \"value\"" " was renamed to \"input\"", "tf.nn.conv2d": "WARNING: use_cudnn_on_gpu argument has been removed and " "\"filter\" was renamed to \"filters\"", "tf.nn.conv2d_backprop_filter": "WARNING: use_cudnn_on_gpu argument has been removed", "tf.nn.conv2d_backprop_input": "WARNING: use_cudnn_on_gpu argument has been removed and " "\"filter\" was renamed to \"filters\"", "tf.nn.erosion2d": "WARNING: <function name> now requires a data_format argument", "tf.nn.nce_loss": deprecate_partition_strategy_comment % "tf.nn.nce_loss", "tf.nn.safe_embedding_lookup_sparse": deprecate_partition_strategy_comment % "tf.nn.safe_embedding_lookup_sparse", "tf.nn.sampled_softmax_loss": deprecate_partition_strategy_comment % "tf.nn.sampled_softmax_loss", "tf.zeros_like": tf_01s_like_no_optimize_comment, "tf.ones_like": tf_01s_like_no_optimize_comment, "tf.nn.embedding_lookup": "WARNING: validate_indices argument has been removed.", "tf.while_loop": "tf.while_loop no longer takes 'return_same_structure' argument. " "'return_same_structure' now defaults to True. Also, 'name'" "argument is now the last argument.", "tf.image.sample_distorted_bounding_box": "tf.image.sample_distorted_bounding_box no longer takes 'seed2' " "argument.", "tf.nn.ctc_beam_search_decoder": "tf.nn.ctc_beam_search_decoder no longer takes 'merge_repeated' " "argument. 'merge_repeated' now defaults to False.", "tf.nn.fractional_avg_pool": "tf.nn.fractional_avg_pool no longer takes 'seed2' and " "'deterministic' arguments. Now it takes a single 'seed' arg. If " "'seed' is zero, the execution is random and deterministic " "otherwise", "tf.nn.fractional_max_pool": "tf.nn.fractional_max_pool no longer takes 'seed2' and " "'deterministic' arguments. Now it takes a single 'seed' arg. If " "'seed' is zero, the execution is random and deterministic " "otherwise", "tf.test.assert_equal_graph_def": "tf.assert_equal_graph_def no longer takes 'checkpoint_v2' " "argument. 'checkpoint_v2' now defaults to True.", "tf.keras.initializers.Zeros": initializers_no_dtype_comment, "tf.keras.initializers.zeros": initializers_no_dtype_comment, "tf.keras.initializers.Ones": initializers_no_dtype_comment, "tf.keras.initializers.ones": initializers_no_dtype_comment, "tf.keras.initializers.Constant": initializers_no_dtype_comment, "tf.keras.initializers.constant": initializers_no_dtype_comment, "tf.keras.initializers.VarianceScaling": initializers_no_dtype_comment, "tf.keras.initializers.Orthogonal": initializers_no_dtype_comment, "tf.keras.initializers.orthogonal": initializers_no_dtype_comment, "tf.keras.initializers.Identity": initializers_no_dtype_comment, "tf.keras.initializers.identity": initializers_no_dtype_comment, "tf.keras.initializers.glorot_uniform": initializers_no_dtype_comment, "tf.keras.initializers.glorot_normal": initializers_no_dtype_comment, "tf.initializers.zeros": initializers_no_dtype_comment, "tf.zeros_initializer": initializers_no_dtype_comment, "tf.initializers.ones": initializers_no_dtype_comment, "tf.ones_initializer": initializers_no_dtype_comment, "tf.initializers.constant": initializers_no_dtype_comment, "tf.constant_initializer": initializers_no_dtype_comment, "tf.initializers.random_uniform": initializers_no_dtype_comment, "tf.random_uniform_initializer": initializers_no_dtype_comment, "tf.initializers.random_normal": initializers_no_dtype_comment, "tf.random_normal_initializer": initializers_no_dtype_comment, "tf.initializers.truncated_normal": initializers_no_dtype_comment, "tf.truncated_normal_initializer": initializers_no_dtype_comment, "tf.initializers.variance_scaling": initializers_no_dtype_comment, "tf.variance_scaling_initializer": initializers_no_dtype_comment, "tf.initializers.orthogonal": initializers_no_dtype_comment, "tf.orthogonal_initializer": initializers_no_dtype_comment, "tf.initializers.identity": initializers_no_dtype_comment, "tf.glorot_uniform_initializer": initializers_no_dtype_comment, "tf.initializers.glorot_uniform": initializers_no_dtype_comment, "tf.glorot_normal_initializer": initializers_no_dtype_comment, "tf.initializers.glorot_normal": initializers_no_dtype_comment, "tf.initializers.uniform_unit_scaling": uniform_unit_scaling_initializer_comment, "tf.uniform_unit_scaling_initializer": uniform_unit_scaling_initializer_comment, "tf.losses.absolute_difference": losses_comment, "tf.losses.add_loss": losses_comment, "tf.losses.compute_weighted_loss": losses_comment, "tf.losses.cosine_distance": losses_comment, "tf.losses.get_losses": losses_comment, "tf.losses.get_regularization_loss": losses_comment, "tf.losses.get_regularization_losses": losses_comment, "tf.losses.get_total_loss": losses_comment, "tf.losses.hinge_loss": losses_comment, "tf.losses.huber_loss": losses_comment, "tf.losses.log_loss": losses_comment, "tf.losses.mean_pairwise_squared_error": losses_comment, "tf.losses.mean_squared_error": losses_comment, "tf.losses.sigmoid_cross_entropy": losses_comment, "tf.losses.softmax_cross_entropy": losses_comment, "tf.losses.sparse_softmax_cross_entropy": losses_comment, "tf.metrics.accuracy": metrics_comment, "tf.metrics.auc": metrics_comment, "tf.metrics.average_precision_at_k": metrics_comment, "tf.metrics.false_negatives": metrics_comment, "tf.metrics.false_negatives_at_thresholds": metrics_comment, "tf.metrics.false_positives": metrics_comment, "tf.metrics.false_positives_at_thresholds": metrics_comment, "tf.metrics.mean": metrics_comment, "tf.metrics.mean_absolute_error": metrics_comment, "tf.metrics.mean_cosine_distance": metrics_comment, "tf.metrics.mean_iou": metrics_comment, "tf.metrics.mean_per_class_accuracy": metrics_comment, "tf.metrics.mean_relative_error": metrics_comment, "tf.metrics.mean_squared_error": metrics_comment, "tf.metrics.mean_tensor": metrics_comment, "tf.metrics.percentage_below": metrics_comment, "tf.metrics.precision": metrics_comment, "tf.metrics.precision_at_k": metrics_comment, "tf.metrics.precision_at_thresholds": metrics_comment, "tf.metrics.precision_at_top_k": metrics_comment, "tf.metrics.recall": metrics_comment, "tf.metrics.recall_at_k": metrics_comment, "tf.metrics.recall_at_thresholds": metrics_comment, "tf.metrics.recall_at_top_k": metrics_comment, "tf.metrics.root_mean_squared_error": metrics_comment, "tf.metrics.sensitivity_at_specificity": metrics_comment, "tf.metrics.sparse_average_precision_at_k": metrics_comment, "tf.metrics.sparse_precision_at_k": metrics_comment, "tf.metrics.specificity_at_sensitivity": metrics_comment, "tf.metrics.true_negatives": metrics_comment, "tf.metrics.true_negatives_at_thresholds": metrics_comment, "tf.metrics.true_positives": metrics_comment, "tf.metrics.true_positives_at_thresholds": metrics_comment, } # Warnings that are emitted only if a specific arg is found. self.function_arg_warnings = { "tf.gradients": { ("colocate_gradients_with_ops", 4): "tf.gradients no longer takes " "'colocate_gradients_with_ops' argument, it behaves as if it " "was set to True.", }, "*.minimize": { ("colocate_gradients_with_ops", 5): "Optimizer.minimize no longer takes " "'colocate_gradients_with_ops' argument, it behaves as if it " "was set to True.", }, "*.compute_gradients": { ("colocate_gradients_with_ops", 4): "Optimizer.compute_gradients no " "longer takes 'colocate_gradients_with_ops' argument, it " "behaves as if it was set to True.", }, "tf.cond": { ("strict", 3): "tf.cond no longer takes 'strict' argument, it behaves as " "if was set to True." }, } self.symbol_renames = { name: new_name for name, new_name in self.symbol_renames.items() } @staticmethod def _dropout_transformer(parent, node, full_name, name, logs, errors): def _replace_keep_prob_node(parent, old_value): """Replaces old_value with 1-(old_value).""" one = ast.Num(n=1) one.lineno = 0 one.col_offset = 0 new_value = ast.BinOp(left=one, op=ast.Sub(), right=old_value) # This copies the prefix and suffix on old_value to new_value. pasta.ast_utils.replace_child(parent, old_value, new_value) ast.copy_location(new_value, old_value) # Put parentheses around keep_prob.value (and remove the old prefix/ # suffix, they should only be around new_value). pasta.base.formatting.set(old_value, "prefix", "(") pasta.base.formatting.set(old_value, "suffix", ")") # Check if we have a keep_prob keyword arg for keep_prob in node.keywords: if keep_prob.arg == "keep_prob": logs.append((node.lineno, node.col_offset, "Changing keep_prob arg of tf.nn.dropout to rate, and " "recomputing value. Please check this transformation.\n")) keep_prob.arg = "rate" _replace_keep_prob_node(keep_prob, keep_prob.value) return node # Maybe it was a positional arg if len(node.args) < 2: errors.append((node.lineno, node.col_offset, "ERROR: tf.nn.dropout called without arguments, so " "automatic fix was disabled. tf.nn.dropout has changed " "the semantics of the second argument.")) else: _replace_keep_prob_node(node, node.args[1]) logs.append((node.lineno, node.col_offset, "Changing keep_prob arg of tf.nn.dropout to rate, and " "recomputing value.\n")) errors.append((node.lineno, node.col_offset, "WARNING: tf.nn.dropout has changed the semantics of the " "second argument. Please check the applied transformation." )) return node @staticmethod def _cast_transformer(parent, node, full_name, name, logs, errors): """Transforms to_int and to_float to cast(..., dtype=...).""" # Find out the dtype to cast to from the function name dtype_str = name[3:] # Special cases where the full dtype is not given if dtype_str == "float": dtype_str = "float32" elif dtype_str == "double": dtype_str = "float64" new_arg = ast.keyword(arg="dtype", value=ast.Attribute(value=ast.Name(id="tf", ctx=ast.Load()), attr=dtype_str, ctx=ast.Load())) # Ensures a valid transformation when a positional name arg is given if len(node.args) == 2: name_arg = ast.keyword(arg="name", value=node.args[-1]) node.args = node.args[:-1] node.keywords.append(name_arg) # Python3 ast requires the args for the Attribute, but codegen will mess up # the arg order if we just set them to 0. new_arg.value.lineno = node.lineno new_arg.value.col_offset = node.col_offset+100 node.keywords.append(new_arg) if isinstance(node.func, ast.Attribute): node.func.attr = "cast" else: assert isinstance(node.func, ast.Name) node.func.id = "cast" logs.append((node.lineno, node.col_offset, "Changed %s call to tf.cast(..., dtype=tf.%s)." % (full_name, dtype_str))) return node @staticmethod def _softmax_cross_entropy_with_logits_transformer( parent, node, full_name, name, logs, errors): def _wrap_label(parent, old_value): """Wrap labels with tf.stop_gradient.""" if six.PY3: new_value = ast.Call( ast.Name(id="tf.stop_gradient", ctx=ast.Load()), [old_value], []) else: new_value = ast.Call( ast.Name(id="tf.stop_gradient", ctx=ast.Load()), [old_value], [], None, None) # This copies the prefix and suffix on old_value to new_value. pasta.ast_utils.replace_child(parent, old_value, new_value) ast.copy_location(new_value, old_value) # Check if we have a labels keyword arg for karg in node.keywords: if karg.arg == "labels": logs.append((node.lineno, node.col_offset, "Changing labels arg of " "tf.nn.softmax_cross_entropy_with_logits to " "tf.stop_gradient(labels). Please check this " "transformation.\n")) _wrap_label(karg, karg.value) return node return node @staticmethod def _batch_gather_transformer(parent, node, full_name, name, logs, errors): # Check if the call already has a batch_dims argument if any([kw.arg == "batch_dims" for kw in node.keywords]): logs.append((node.lineno, node.col_offset, "tf.batch_gather already has " "batch_dims argument. Neat.")) return None minus_one = ast.Num(n=-1) minus_one.lineno = 0 minus_one.col_offset = 0 new_arg = ast.keyword("batch_dims", minus_one) node.keywords.append(new_arg) logs.append((node.lineno, node.col_offset, "Added keyword argument batch_dims=-1 to tf.batch_gather.")) return node @staticmethod def _image_resize_transformer(parent, node, full_name, name, logs, errors): """Transforms image.resize_* to image.resize(..., method=*, ...).""" resize_method = name[7:].upper() new_arg = ast.keyword(arg="method", value=ast.Attribute( value=ast.Attribute( value=ast.Attribute( value=ast.Name(id="tf", ctx=ast.Load()), attr="image", ctx=ast.Load()), attr="ResizeMethod", ctx=ast.Load()), attr=resize_method, ctx=ast.Load())) # Ensures a valid transformation when a positional name arg is given if len(node.args) == 4: pos_arg = ast.keyword(arg="preserve_aspect_ratio", value=node.args[-1]) node.args = node.args[:-1] node.keywords.append(pos_arg) if len(node.args) == 3: pos_arg = ast.keyword(arg="align_corners", value=node.args[-1]) node.args = node.args[:-1] node.keywords.append(pos_arg) # Python3 ast requires the args for the Attribute, but codegen will mess up # the arg order if we just set them to 0. new_arg.value.lineno = node.lineno new_arg.value.col_offset = node.col_offset+100 node.keywords.append(new_arg) if isinstance(node.func, ast.Attribute): node.func.attr = "resize" else: assert isinstance(node.func, ast.Name) node.func.id = "resize" logs.append((node.lineno, node.col_offset, "Changed %s call to tf.image.resize(..., " "method=tf.image.ResizeMethod.%s)." % (full_name, resize_method))) return node
apache-2.0
-5,698,780,922,763,350,000
36.933008
80
0.559642
false
3.814329
false
false
false
deklungel/iRulez
old/modules/telegram/telegram.py
1
12009
#!/usr/bin/env python #Version 1.9 import sys sys.path.append('/var/www/html/modules/libraries') import time import pprint import telepot import mysql.connector import datetime import iRulez_logging as logger import paho.mqtt.client as mqtt from telepot.namedtuple import InlineKeyboardMarkup, InlineKeyboardButton from inspect import currentframe file = open('/var/www/html/config.php', 'r') debug = "DEBUG" info = "INFO" alert = "ALERT" logger.printLog(info,'**** Telgram Started ****', str(logger.get_linenumber())) for line in file: if "db_name" in line: MySQL_database = line.split('"')[3] elif "db_user" in line: MySQL_username = line.split('"')[3] elif "db_password" in line: MySQL_password = line.split('"')[3] try: cnx = mysql.connector.connect(user=MySQL_username,password=MySQL_password,database=MySQL_database) cursor = cnx.cursor() query = ("SELECT Setting,value FROM Settings") logger.printLog(debug,query,str(logger.get_linenumber())) cursor.execute(query) for (Setting, value) in cursor: if Setting == "MQTT_ip_address": MQTT_ip_address = value elif Setting == "MQTT_port_python": MQTT_port = int(value) elif Setting == "BotID": BotIDTmp = value elif Setting == "TokenBOT": BotToken = value elif Setting == "NotificationSnooze": NotificationSnooze = value elif Setting == "TimeBetweenNotification": TimeBetweenNotification = value elif Setting == "Notification Method": NotificationMethod = value if BotToken == "": raise Exception('NO BotToken provided') except Exception as e: logger.printLog(alert,e,str(logger.get_linenumber())) raise finally: cursor.close() cnx.close() BotIDS = BotIDTmp.split('|') AllLow = [] AllLowN = [] def handle(msg): chat_id = msg['chat']['id'] command = msg['text'] logger.printLog(debug,'Got command: %s' % command, str(logger.get_linenumber())) logger.printLog(debug,'Got chatID from : %s' % chat_id, str(logger.get_linenumber())) if str(chat_id) in BotIDS: if command == '/status': try: cnx = mysql.connector.connect(user=MySQL_username,password=MySQL_password,database=MySQL_database) cursor = cnx.cursor() query = ("SELECT naam, arduino, pin FROM Core_Arduino_Outputs WHERE Status = 'ON' AND telegram = '1'") logger.printLog(debug,str(query), str(logger.get_linenumber())) cursor.execute(query) NotificationList = [] for (naam,arduino,pin) in cursor: NotificationList.append([naam,arduino,pin]) except Exception as e: logger.printLog(alert,e,str(logger.get_linenumber())) raise finally: cursor.close() cnx.close() KeyBoardArray = [] if len(NotificationList) > 0: Message = "Following lights are on" else: Message = "No lights are on!" global AllLow AllLow = [] for Notication in NotificationList: text = str(Notication[0]) callback = NotificationMethod+'|Low|;'+str(Notication[0])+';'+str(Notication[1])+';'+str(Notication[2]) KeyBoardArray.append( [InlineKeyboardButton(text=str(text), callback_data=str(callback))],) AllLow.append([Notication[1],Notication[2]]) if len(NotificationList) > 1: text = "* Alles uit *" callback = NotificationMethod+'|Low|AllLow' KeyBoardArray.append( [InlineKeyboardButton(text=str(text), callback_data=str(callback))],) markup = InlineKeyboardMarkup(inline_keyboard=KeyBoardArray) logger.printLog(debug,"status has been send to "+ str(chat_id), str(logger.get_linenumber())) bot.sendMessage(chat_id, Message, reply_markup=markup) elif command == '/enroll': hide_keyboard = {'hide_keyboard': True} text = 'Give this ID to you iRulez Administrator: '+str(chat_id) logger.printLog(debug,"Enrollment has been send to "+ str(chat_id), str(logger.get_linenumber())) bot.sendMessage(chat_id, text , reply_markup=hide_keyboard) # elif command == '/time': # show_keyboard = {'keyboard': [['Yes','No']]} # hide_keyboard = {'hide_keyboard': True} # bot.sendMessage(chat_id, 'This is a custom keyboard', reply_markup=hide_keyboard) def on_callback_query(msg): query_id, from_id, data = telepot.glance(msg, flavor='callback_query') logger.printLog(debug,'Callback query:'+ str(query_id) +' '+ str(from_id) +' '+ str(data), str(logger.get_linenumber())) actionsArr = data.split('|') global AllLowN global AllLow relais = actionsArr[2].split(';') if actionsArr[1] == "Low": if relais[0] == "AllLow": tmpText = "All Lights are out" for relais in AllLow: topic = "arduino"+str(relais[0])+"/relais"+str(relais[1])+"/action" payload ="L" logger.printLog(debug,"Publish: " + topic +":"+ payload , str(logger.get_linenumber()) ) mqttc.publish(topic,payload, 0, False) elif relais[0] == "AllLowN": tmpText = "All Lights are out" global AllLowN for relais in AllLowN: topic = "arduino"+str(relais[1])+"/relais"+str(relais[2])+"/action" payload ="L" logger.printLog(debug,"Publish: " + topic +":"+ payload , str(logger.get_linenumber()) ) mqttc.publish(topic,payload, 0, False) else: tmpText = relais[1]+" out" topic = "arduino"+str(relais[2])+"/relais"+str(relais[3])+"/action" payload ="L" logger.printLog(debug,"Publish: " + topic +":"+ payload , str(logger.get_linenumber()) ) mqttc.publish(topic,payload, 0, False) elif actionsArr[1] == "Ignore": try: cnx = mysql.connector.connect(user=MySQL_username,password=MySQL_password,database=MySQL_database) cursor = cnx.cursor() for relais in AllLowN: query = ("UPDATE Core_Arduino_Outputs SET notification_dismiss = 1 WHERE id="+str(relais[0])) logger.printLog(debug,query, str(logger.get_linenumber())) cursor.execute(query) cnx.commit() except Exception as e: logger.printLog(alert,e,str(logger.get_linenumber())) raise finally: cursor.close() cnx.close() tmpText = "Notification ignored" elif actionsArr[1] == "Snooze": Time = datetime.datetime.now() + datetime.timedelta(seconds=int(NotificationSnooze)*60) try: cnx = mysql.connector.connect(user=MySQL_username,password=MySQL_password,database=MySQL_database) cursor = cnx.cursor() for relais in AllLowN: query = ("UPDATE Core_Arduino_Outputs SET notification_snooze = '"+str(Time)+"' WHERE id="+str(relais[0])) logger.printLog(debug,query, str(logger.get_linenumber())) cursor.execute(query) cnx.commit() except Exception as e: logger.printLog(alert,e,str(logger.get_linenumber())) raise finally: cursor.close() cnx.close() tmpText = "Notifications Snoozed for "+str(NotificationSnooze)+"min" if actionsArr[0] == 'notification': logger.printLog(debug,"Notification has been send to "+ str(query_id), str(logger.get_linenumber())) bot.answerCallbackQuery(query_id, text=tmpText) elif actionsArr[0] == 'alert': logger.printLog(debug,"Alert has been send to "+ str(query_id), str(logger.get_linenumber())) bot.answerCallbackQuery(query_id, text=tmpText, show_alert=True) def on_connect(mqttc, obj, rc): logger.printLog(debug,"rc: "+str(rc) , str(logger.get_linenumber())) def on_message(mqttc, obj, msg): hide_keyboard = {'hide_keyboard': True} for BotID in BotIDS: logger.printLog(debug,"Notification message has been send to "+BotID, str(logger.get_linenumber())) bot.sendMessage(int(BotID), str(msg.payload) , reply_markup=hide_keyboard) def on_publish(mqttc, obj, mid): logger.printLog(debug,"Publish: "+str(mid), str(logger.get_linenumber())) def on_subscribe(mqttc, obj, mid, granted_qos): logger.printLog(debug,"Subscribed: "+str(mid)+" "+str(granted_qos) , str(logger.get_linenumber())) def on_log(mqttc, obj, level, string): logger.printLog(debug,string , str(logger.get_linenumber())) def on_disconnect(client, userdata, rc): logger.printLog(info, "on_disconnect!", str(logger.get_linenumber())) exit() def checkRelay(): try: cnx = mysql.connector.connect(user=MySQL_username,password=MySQL_password,database=MySQL_database) cursor = cnx.cursor() query = ("SELECT id, naam, status_time, notification, arduino, pin, notification_snooze FROM Core_Arduino_Outputs WHERE notification IS NOT NULL AND notification <> '' AND notification_dismiss = 0 AND status = 'ON' ") logger.printLog(debug,str(query), str(logger.get_linenumber())) cursor.execute(query) NotificationList = [] for (id, naam, Time_on, notification,arduino,pin,snooze) in cursor: logger.printLog(debug,'Found Record: %s' % naam, str(logger.get_linenumber())) Time = datetime.datetime.now() time_delta = (Time - Time_on).total_seconds() if (int(time_delta) > int(notification)): if snooze is None: logger.printLog(debug,'Add : %s to notification list NOT SNOOZED' % naam, str(logger.get_linenumber())) NotificationList.append([id,naam,arduino,pin]) else: if snooze < Time : logger.printLog(debug,'Add : %s to notification list SNOOZED' % naam, str(logger.get_linenumber())) NotificationList.append([id,naam,arduino,pin]) except Exception as e: logger.printLog(alert,e,str(logger.get_linenumber())) raise finally: cursor.close() cnx.close() if len(NotificationList) >0: KeyBoardArray = [] Message = "Following lights are on" global AllLowN AllLowN = [] for Notication in NotificationList: text = str(Notication[1]) callback = NotificationMethod+'|Low|'+str(Notication[0])+';'+str(Notication[1])+';'+str(Notication[2])+';'+str(Notication[3]) KeyBoardArray.append( [InlineKeyboardButton(text=str(text), callback_data=str(callback))],) AllLowN.append([Notication[0],Notication[2],Notication[3]]) if len(NotificationList) > 1: text = "* Alles uit *" callback = NotificationMethod+'|Low|AllLowN' KeyBoardArray.append( [InlineKeyboardButton(text=str(text), callback_data=str(callback))],) text = "* Ignore *" callback = NotificationMethod+'|Ignore|' KeyBoardArray.append( [InlineKeyboardButton(text=str(text), callback_data=str(callback))],) text = "* Snooze "+str(NotificationSnooze)+"min *" callback = NotificationMethod+'|Snooze|' KeyBoardArray.append( [InlineKeyboardButton(text=str(text), callback_data=str(callback))],) markup = InlineKeyboardMarkup(inline_keyboard=KeyBoardArray) for BotID in BotIDS: logger.printLog(debug,"Notification message has been send to "+BotID, str(logger.get_linenumber())) bot.sendMessage(int(BotID), Message, reply_markup=markup) Time = datetime.datetime.now() + datetime.timedelta(seconds=int(TimeBetweenNotification)*60) try: cnx = mysql.connector.connect(user=MySQL_username,password=MySQL_password,database=MySQL_database) cursor = cnx.cursor() for relais in AllLowN: query = ("UPDATE Core_Arduino_Outputs SET notification_snooze = '"+str(Time)+"' WHERE id="+str(relais[0])) logger.printLog(debug,query, str(logger.get_linenumber())) cursor.execute(query) cnx.commit() except Exception as e: logger.printLog(alert,e,str(logger.get_linenumber())) raise finally: cursor.close() cnx.close() #tmpText = "Notifications Snoozed for "+str(NotificationSnooze)+"min" bot = telepot.Bot(BotToken) bot.message_loop({'chat': handle,'callback_query': on_callback_query}) logger.printLog(info,"Listening ...",str(logger.get_linenumber())) # Keep the program running. mqttc = mqtt.Client() mqttc.on_message = on_message mqttc.on_connect = on_connect mqttc.on_publish = on_publish mqttc.on_subscribe = on_subscribe mqttc.on_disconnect = on_disconnect # Uncomment to enable debug messages #mqttc.on_log = on_log running = True while running: try: mqttc.connect(MQTT_ip_address,int(MQTT_port), 60) running = False except: logger.printLog(alert,"Sleep" , str(logger.get_linenumber())) time.sleep(5) logger.printLog(info,"Connected" , str(logger.get_linenumber())) mqttc.subscribe("Telegram/Message", 0) counter = int(time.time()) while True: mqttc.loop() if(counter + 10 <= int(time.time())): checkRelay() counter = int(time.time())
mit
-7,143,340,784,685,586,000
35.840491
219
0.696394
false
3.0015
false
false
false
efiop/dvc
dvc/repo/get.py
1
1920
import logging import os from dvc.exceptions import DvcException from dvc.path_info import PathInfo from dvc.utils import resolve_output from dvc.utils.fs import remove logger = logging.getLogger(__name__) class GetDVCFileError(DvcException): def __init__(self): super().__init__( "the given path is a DVC file, you must specify a data file " "or a directory" ) def get(url, path, out=None, rev=None, jobs=None): import shortuuid from dvc.dvcfile import is_valid_filename from dvc.external_repo import external_repo out = resolve_output(path, out) if is_valid_filename(out): raise GetDVCFileError() # Creating a directory right beside the output to make sure that they # are on the same filesystem, so we could take the advantage of # reflink and/or hardlink. Not using tempfile.TemporaryDirectory # because it will create a symlink to tmpfs, which defeats the purpose # and won't work with reflink/hardlink. dpath = os.path.dirname(os.path.abspath(out)) tmp_dir = os.path.join(dpath, "." + str(shortuuid.uuid())) # Try any links possible to avoid data duplication. # # Not using symlink, because we need to remove cache after we # are done, and to make that work we would have to copy data # over anyway before removing the cache, so we might just copy # it right away. # # Also, we can't use theoretical "move" link type here, because # the same cache file might be used a few times in a directory. cache_types = ["reflink", "hardlink", "copy"] try: with external_repo( url=url, rev=rev, cache_dir=tmp_dir, cache_types=cache_types ) as repo: from_info = PathInfo(repo.root_dir) / path to_info = PathInfo(out) repo.repo_fs.download(from_info, to_info, jobs=jobs) finally: remove(tmp_dir)
apache-2.0
8,423,483,636,947,428,000
32.684211
74
0.663542
false
3.713733
false
false
false
GoogleCloudPlatform/iot-core-micropython
third_party/rsa/cli.py
1
9382
# -*- coding: utf-8 -*- # # Copyright 2011 Sybren A. Stüvel <sybren@stuvel.eu> # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Commandline scripts. These scripts are called by the executables defined in setup.py. """ from __future__ import with_statement, print_function import abc import sys from optparse import OptionParser import third_party.rsa import third_party.rsa.pkcs1 HASH_METHODS = sorted(third_party.rsa.pkcs1.HASH_METHODS.keys()) def keygen(): """Key generator.""" # Parse the CLI options parser = OptionParser(usage='usage: %prog [options] keysize', description='Generates a new RSA keypair of "keysize" bits.') parser.add_option('--pubout', type='string', help='Output filename for the public key. The public key is ' 'not saved if this option is not present. You can use ' 'pyrsa-priv2pub to create the public key file later.') parser.add_option('-o', '--out', type='string', help='Output filename for the private key. The key is ' 'written to stdout if this option is not present.') parser.add_option('--form', help='key format of the private and public keys - default PEM', choices=('PEM', 'DER'), default='PEM') (cli, cli_args) = parser.parse_args(sys.argv[1:]) if len(cli_args) != 1: parser.print_help() raise SystemExit(1) try: keysize = int(cli_args[0]) except ValueError: parser.print_help() print('Not a valid number: %s' % cli_args[0], file=sys.stderr) raise SystemExit(1) print('Generating %i-bit key' % keysize, file=sys.stderr) (pub_key, priv_key) = third_party.rsa.newkeys(keysize) # Save public key if cli.pubout: print('Writing public key to %s' % cli.pubout, file=sys.stderr) data = pub_key.save_pkcs1(format=cli.form) with open(cli.pubout, 'wb') as outfile: outfile.write(data) # Save private key data = priv_key.save_pkcs1(format=cli.form) if cli.out: print('Writing private key to %s' % cli.out, file=sys.stderr) with open(cli.out, 'wb') as outfile: outfile.write(data) else: print('Writing private key to stdout', file=sys.stderr) third_party.rsa._compat.write_to_stdout(data) class CryptoOperation(object): """CLI callable that operates with input, output, and a key.""" __metaclass__ = abc.ABCMeta keyname = 'public' # or 'private' usage = 'usage: %%prog [options] %(keyname)s_key' description = None operation = 'decrypt' operation_past = 'decrypted' operation_progressive = 'decrypting' input_help = 'Name of the file to %(operation)s. Reads from stdin if ' \ 'not specified.' output_help = 'Name of the file to write the %(operation_past)s file ' \ 'to. Written to stdout if this option is not present.' expected_cli_args = 1 has_output = True key_class = third_party.rsa.PublicKey def __init__(self): self.usage = self.usage % self.__class__.__dict__ self.input_help = self.input_help % self.__class__.__dict__ self.output_help = self.output_help % self.__class__.__dict__ @abc.abstractmethod def perform_operation(self, indata, key, cli_args): """Performs the program's operation. Implement in a subclass. :returns: the data to write to the output. """ def __call__(self): """Runs the program.""" (cli, cli_args) = self.parse_cli() key = self.read_key(cli_args[0], cli.keyform) indata = self.read_infile(cli.input) print(self.operation_progressive.title(), file=sys.stderr) outdata = self.perform_operation(indata, key, cli_args) if self.has_output: self.write_outfile(outdata, cli.output) def parse_cli(self): """Parse the CLI options :returns: (cli_opts, cli_args) """ parser = OptionParser(usage=self.usage, description=self.description) parser.add_option('-i', '--input', type='string', help=self.input_help) if self.has_output: parser.add_option('-o', '--output', type='string', help=self.output_help) parser.add_option('--keyform', help='Key format of the %s key - default PEM' % self.keyname, choices=('PEM', 'DER'), default='PEM') (cli, cli_args) = parser.parse_args(sys.argv[1:]) if len(cli_args) != self.expected_cli_args: parser.print_help() raise SystemExit(1) return cli, cli_args def read_key(self, filename, keyform): """Reads a public or private key.""" print('Reading %s key from %s' % (self.keyname, filename), file=sys.stderr) with open(filename, 'rb') as keyfile: keydata = keyfile.read() return self.key_class.load_pkcs1(keydata, keyform) def read_infile(self, inname): """Read the input file""" if inname: print('Reading input from %s' % inname, file=sys.stderr) with open(inname, 'rb') as infile: return infile.read() print('Reading input from stdin', file=sys.stderr) return sys.stdin.read() def write_outfile(self, outdata, outname): """Write the output file""" if outname: print('Writing output to %s' % outname, file=sys.stderr) with open(outname, 'wb') as outfile: outfile.write(outdata) else: print('Writing output to stdout', file=sys.stderr) third_party.rsa._compat.write_to_stdout(outdata) class EncryptOperation(CryptoOperation): """Encrypts a file.""" keyname = 'public' description = ('Encrypts a file. The file must be shorter than the key ' 'length in order to be encrypted.') operation = 'encrypt' operation_past = 'encrypted' operation_progressive = 'encrypting' def perform_operation(self, indata, pub_key, cli_args=None): """Encrypts files.""" return third_party.rsa.encrypt(indata, pub_key) class DecryptOperation(CryptoOperation): """Decrypts a file.""" keyname = 'private' description = ('Decrypts a file. The original file must be shorter than ' 'the key length in order to have been encrypted.') operation = 'decrypt' operation_past = 'decrypted' operation_progressive = 'decrypting' key_class = third_party.rsa.PrivateKey def perform_operation(self, indata, priv_key, cli_args=None): """Decrypts files.""" return third_party.rsa.decrypt(indata, priv_key) class SignOperation(CryptoOperation): """Signs a file.""" keyname = 'private' usage = 'usage: %%prog [options] private_key hash_method' description = ('Signs a file, outputs the signature. Choose the hash ' 'method from %s' % ', '.join(HASH_METHODS)) operation = 'sign' operation_past = 'signature' operation_progressive = 'Signing' key_class = third_party.rsa.PrivateKey expected_cli_args = 2 output_help = ('Name of the file to write the signature to. Written ' 'to stdout if this option is not present.') def perform_operation(self, indata, priv_key, cli_args): """Signs files.""" hash_method = cli_args[1] if hash_method not in HASH_METHODS: raise SystemExit('Invalid hash method, choose one of %s' % ', '.join(HASH_METHODS)) return third_party.rsa.sign(indata, priv_key, hash_method) class VerifyOperation(CryptoOperation): """Verify a signature.""" keyname = 'public' usage = 'usage: %%prog [options] public_key signature_file' description = ('Verifies a signature, exits with status 0 upon success, ' 'prints an error message and exits with status 1 upon error.') operation = 'verify' operation_past = 'verified' operation_progressive = 'Verifying' key_class = third_party.rsa.PublicKey expected_cli_args = 2 has_output = False def perform_operation(self, indata, pub_key, cli_args): """Verifies files.""" signature_file = cli_args[1] with open(signature_file, 'rb') as sigfile: signature = sigfile.read() try: third_party.rsa.verify(indata, signature, pub_key) except third_party.rsa.VerificationError: raise SystemExit('Verification failed.') print('Verification OK', file=sys.stderr) encrypt = EncryptOperation() decrypt = DecryptOperation() sign = SignOperation() verify = VerifyOperation()
apache-2.0
-8,427,974,887,891,780,000
31.572917
87
0.612941
false
3.874845
false
false
false
rachekalmir/pyJolt
pyjolt/util/tree_manager.py
1
6011
import itertools from collections import defaultdict from typing import Union, List, Dict from pyjolt.exceptions import JoltException def pairwise(iterable): """s -> (s0,s1), (s1,s2), (s2, s3), ...""" a, b = itertools.tee(iterable) next(b, None) return itertools.zip_longest(a, b) def type_generator(item): if isinstance(item, str): return {} elif isinstance(item, int): return [] raise JoltException() def id_generator(): """Generator function to generate numbers from 0 onwards""" start_value = 0 while True: yield start_value start_value += 1 class AutoDefaultDict(defaultdict): """Default dictionary that calls the specified function to get the new value.""" def __init__(self, f_of_x): super().__init__(None) # Create the base defaultdict class with no default self.f_of_x = f_of_x # Save the function def __missing__(self, key): # __missing__ is called when a default value is needed ret = next(self.f_of_x) # Calculate default value self[key] = ret # Save the default value in the local dictionary return ret class ResultManager(object): def __init__(self): self._data = {} def assign(self, path_list: list, value): dv = self._data for item, next_item in pairwise(path_list): if next_item is None: # If next_item is None then this is where the assignment to the value will take place if isinstance(dv, list): if len(dv) <= item: # If the current array is too short for the requested assignment, pad the array with Nones dv += [None] * (item + 1 - len(dv)) dv[item] = value elif isinstance(dv, dict) and dv.get(item) is not None: if isinstance(dv[item], list): dv[item] += [value] else: dv[item] = [dv[item], value] else: dv[item] = value break elif isinstance(dv, list) and len(dv) <= item: # Special case for array indexing to extend the array thereby ensuring no IndexOutOfBounds exception is encountered dv += [None] * (item + 1 - len(dv)) if isinstance(dv, dict) and dv.get(item) is not None: dv = dv[item] elif isinstance(dv, list) and len(dv) > item and dv[item] is not None: dv = dv[item] else: dv[item] = dv = type_generator(next_item) class PropertyHolder(object): def __init__(self, matches: list = None): self.matches = [] if matches is None else matches self.array_bind = AutoDefaultDict(id_generator()) def __repr__(self): return 'PropertyHolder({matches})'.format(matches=self.matches) class PropertyManager(object): def __init__(self): self._properties = {} def __getitem__(self, key: Union[tuple, list]) -> PropertyHolder: key = tuple(key) if isinstance(key, list) else key v = self._properties.get(key) if not isinstance(v, PropertyHolder): v = self._properties[key] = PropertyHolder() return v # def __setitem__(self, key, value): # self._properties[tuple(key) if isinstance(key, list) else key] = value class Tree(object): """ A recursive dictionary type object with tree context. """ def __init__(self, dictionary: dict): self.dictionary = dictionary def __getitem__(self, item): return self.dictionary[item] def __repr__(self): return "Tree(" + repr(self.dictionary) + ")" class TreeManager(object): """ Manager object to keep track of where you are in a dictionary tree object. self._tree is the full tree self.path is the current patch in the object self._dict is the local cached object computed using self.path on self._tree """ def __init__(self, tree: Union[Tree, Dict], path: List[str]): self._tree = tree if isinstance(tree, Tree) else Tree(tree) self.path = path self._dict = self._tree.dictionary for i in path: if isinstance(self._dict, dict): self._dict = self._dict[i] elif isinstance(self._dict, list): self._dict = self._dict[int(i)] elif self._dict == i: self._dict = None else: raise KeyError() def __getitem__(self, item: str): # type: (...) -> TreeManager return TreeManager(self._tree, self.path + [item]) def __iter__(self): if isinstance(self._dict, dict): for key in self._dict.keys(): yield key, TreeManager(self._tree, self.path + [key]) elif isinstance(self._dict, list): for index, _ in enumerate(self._dict): yield TreeManager(self._tree, self.path + [index]) else: raise JoltException() def __repr__(self): return 'TreeManager(' + repr(self.current_key) + ', ' + repr(self._dict) + ')' def keys(self): if isinstance(self._dict, dict): return self._dict.keys() elif isinstance(self._dict, list): return range(len(self._dict)) return [self._dict] @property def current_key(self): return self.path[-1] if self.path else None @property def value(self): return self._dict def ascend(self, levels: int): # type(...) -> DictWalker: if levels == 0: return self if levels < 0: # TODO raise exception here pass return TreeManager(self._tree, self.path[:-levels]) def descend(self, key: Union[str, list]): # type(...) -> DictWalker: return TreeManager(self._tree, self.path + (key if isinstance(key, list) else [key]))
apache-2.0
-7,570,709,542,070,465,000
31.491892
131
0.562635
false
4.105874
false
false
false
BirkbeckCTP/janeway
src/review/views.py
1
87770
__copyright__ = "Copyright 2017 Birkbeck, University of London" __author__ = "Martin Paul Eve & Andy Byers" __license__ = "AGPL v3" __maintainer__ = "Birkbeck Centre for Technology and Publishing" from uuid import uuid4 from collections import Counter from datetime import timedelta from django.contrib import messages from django.urls import reverse from django.shortcuts import render, get_object_or_404, redirect from django.db.models import Q from django.utils import timezone from django.http import Http404 from django.core.exceptions import PermissionDenied from django.conf import settings from urllib import parse from django.views.decorators.http import require_POST from django.http import HttpResponse, JsonResponse from core import models as core_models, files, forms as core_forms from events import logic as event_logic from review import models, logic, forms, hypothesis from security.decorators import ( editor_user_required, reviewer_user_required, reviewer_user_for_assignment_required, file_user_required, article_decision_not_made, article_author_required, editor_is_not_author, senior_editor_user_required, section_editor_draft_decisions, article_stage_review_required ) from submission import models as submission_models, forms as submission_forms from utils import models as util_models, ithenticate, shared, setting_handler from utils.logger import get_logger logger = get_logger(__name__) @senior_editor_user_required def home(request): """ Displays a list of review articles. :param request: HttpRequest object :return: HttpResponse """ articles = submission_models.Article.objects.filter( Q(stage=submission_models.STAGE_ASSIGNED) | Q(stage=submission_models.STAGE_UNDER_REVIEW) | Q(stage=submission_models.STAGE_UNDER_REVISION), journal=request.journal ) filter = request.GET.get('filter', None) if filter == 'me': assignments = models.EditorAssignment.objects.filter(article__journal=request.journal, editor=request.user) assignment_article_pks = [assignment.article.pk for assignment in assignments] articles = articles.filter(pk__in=assignment_article_pks) template = 'review/home.html' context = { 'articles': articles, 'filter': filter, } return render(request, template, context) @senior_editor_user_required def unassigned(request): """ Displays a list of unassigned articles. :param request: HttpRequest object :return: HttpResponse """ articles = submission_models.Article.objects.filter(stage=submission_models.STAGE_UNASSIGNED, journal=request.journal) template = 'review/unassigned.html' context = { 'articles': articles, } return render(request, template, context) @editor_user_required def unassigned_article(request, article_id): """ Displays metadata of an individual article, can send details to Crosscheck for reporting. :param request: HttpRequest object :param article_id: Article PK :return: HttpResponse or Redirect if POST """ article = get_object_or_404(submission_models.Article, pk=article_id) if article.ithenticate_id and not article.ithenticate_score: ithenticate.fetch_percentage(request.journal, [article]) if 'crosscheck' in request.POST: file_id = request.POST.get('crosscheck') file = get_object_or_404(core_models.File, pk=file_id) try: id = ithenticate.send_to_ithenticate(article, file) article.ithenticate_id = id article.save() except AssertionError: messages.add_message( request, messages.ERROR, 'Error returned by iThenticate. ' 'Check login details and API status.', ) return redirect( reverse( 'review_unassigned_article', kwargs={'article_id': article.pk}, ) ) current_editors = [assignment.editor.pk for assignment in models.EditorAssignment.objects.filter(article=article)] editors = core_models.AccountRole.objects.filter( role__slug='editor', journal=request.journal).exclude(user__id__in=current_editors) section_editors = core_models.AccountRole.objects.filter( role__slug='section-editor', journal=request.journal ).exclude(user__id__in=current_editors) template = 'review/unassigned_article.html' context = { 'article': article, 'editors': editors, 'section_editors': section_editors, } return render(request, template, context) @editor_user_required def add_projected_issue(request, article_id): """ Allows an editor to add a projected issue to an article. """ article = get_object_or_404( submission_models.Article, pk=article_id, ) form = submission_forms.ProjectedIssueForm(instance=article) if request.POST: form = submission_forms.ProjectedIssueForm( request.POST, instance=article, ) if form.is_valid(): form.save() messages.add_message( request, messages.SUCCESS, 'Projected Issue set.', ) if request.GET.get('return'): return redirect( request.GET.get('return'), ) else: return redirect( reverse( 'review_projected_issue', kwargs={'article_id': article.pk}, ) ) template = 'review/projected_issue.html' context = { 'article': article, 'form': form, } return render(request, template, context) @editor_user_required def view_ithenticate_report(request, article_id): """Allows editor to view similarity report.""" article = get_object_or_404( submission_models.Article, pk=article_id, ithenticate_id__isnull=False, ) ithenticate_url = ithenticate.fetch_url(article) if ithenticate_url: return redirect(ithenticate_url) template = 'review/ithenticate_failure.html' context = { 'article': article, } return render(request, template, context) @senior_editor_user_required def assign_editor_move_to_review(request, article_id, editor_id, assignment_type): """Allows an editor to assign another editor to an article and moves to review.""" assign_editor(request, article_id, editor_id, assignment_type, should_redirect=False) return move_to_review(request, article_id) @senior_editor_user_required def assign_editor(request, article_id, editor_id, assignment_type, should_redirect=True): """ Allows a Senior Editor to assign another editor to an article. :param request: HttpRequest object :param article_id: Article PK :param editor_id: Account PK :param assignment_type: string, 'section-editor' or 'editor' :param should_redirect: if true, we redirect the user to the notification page :return: HttpResponse or HttpRedirect """ article = get_object_or_404(submission_models.Article, pk=article_id) editor = get_object_or_404(core_models.Account, pk=editor_id) if not editor.has_an_editor_role(request): messages.add_message(request, messages.WARNING, 'User is not an Editor or Section Editor') return redirect(reverse('review_unassigned_article', kwargs={'article_id': article.pk})) _, created = logic.assign_editor(article, editor, assignment_type, request) messages.add_message(request, messages.SUCCESS, '{0} added as an Editor'.format(editor.full_name())) if created and should_redirect: return redirect('{0}?return={1}'.format( reverse('review_assignment_notification', kwargs={'article_id': article_id, 'editor_id': editor.pk}), request.GET.get('return'))) elif not created: messages.add_message(request, messages.WARNING, '{0} is already an Editor on this article.'.format(editor.full_name())) if should_redirect: return redirect(reverse('review_unassigned_article', kwargs={'article_id': article_id})) @senior_editor_user_required def unassign_editor(request, article_id, editor_id): """Unassigns an editor from an article""" article = get_object_or_404(submission_models.Article, pk=article_id) editor = get_object_or_404(core_models.Account, pk=editor_id) assignment = get_object_or_404( models.EditorAssignment, article=article, editor=editor ) email_content = logic.get_unassignment_notification(request, assignment) if request.method == "POST": email_content = request.POST.get('content_email') kwargs = {'message': email_content, 'assignment': assignment, 'request': request, 'skip': request.POST.get('skip', False) } event_logic.Events.raise_event( event_logic.Events.ON_ARTICLE_UNASSIGNED, **kwargs) assignment.delete() util_models.LogEntry.add_entry( types='EditorialAction', description='Editor {0} unassigned from article {1}' ''.format(editor.full_name(), article.id), level='Info', request=request, target=article, ) return redirect(reverse( 'review_unassigned_article', kwargs={'article_id': article_id} )) template = 'review/unassign_editor.html' context = { 'article': article, 'assignment': assignment, 'email_content': email_content, } return render(request, template, context) @senior_editor_user_required def assignment_notification(request, article_id, editor_id): """ A senior editor can sent a notification to an assigned editor. :param request: HttpRequest object :param article_id: Article PK :param editor_id: Account PK :return: HttpResponse or HttpRedirect """ article = get_object_or_404(submission_models.Article, pk=article_id) editor = get_object_or_404(core_models.Account, pk=editor_id) assignment = get_object_or_404(models.EditorAssignment, article=article, editor=editor, notified=False) email_content = logic.get_assignment_content(request, article, editor, assignment) if request.POST: email_content = request.POST.get('content_email') kwargs = {'user_message_content': email_content, 'editor_assignment': assignment, 'request': request, 'skip': False, 'acknowledgement': True} if 'skip' in request.POST: kwargs['skip'] = True event_logic.Events.raise_event(event_logic.Events.ON_ARTICLE_ASSIGNED_ACKNOWLEDGE, **kwargs) if request.GET.get('return', None): return redirect(request.GET.get('return')) else: return redirect(reverse('review_unassigned_article', kwargs={'article_id': article_id})) template = 'review/assignment_notification.html' context = { 'article': article_id, 'editor': editor, 'assignment': assignment, 'email_content': email_content, } return render(request, template, context) @editor_user_required def move_to_review(request, article_id, should_redirect=True): """Moves an article into the review stage""" article = get_object_or_404(submission_models.Article, pk=article_id) if article.editorassignment_set.all().count() > 0: article.stage = submission_models.STAGE_ASSIGNED article.save() review_round, created = models.ReviewRound.objects.get_or_create(article=article, round_number=1) if not created: messages.add_message(request, messages.WARNING, 'A default review round already exists for this article.') else: messages.add_message(request, messages.INFO, 'You must assign an editor before moving into reivew.') if should_redirect: if request.GET.get('return', None): return redirect(request.GET.get('return')) else: return redirect("{0}?modal_id={1}".format(reverse('kanban_home'), article_id)) @editor_is_not_author @editor_user_required def in_review(request, article_id): """ Displays an article's review management page :param request: HttpRequest object :param article_id: Article PK :return: HttpResponse """ article = get_object_or_404(submission_models.Article, pk=article_id) review_rounds = models.ReviewRound.objects.filter(article=article) revisions_requests = models.RevisionRequest.objects.filter(article=article) if not review_rounds: models.ReviewRound.objects.create(article=article, round_number=1) return redirect(reverse('review_in_review', kwargs={'article_id': article.id})) if request.POST: if 'new_review_round' in request.POST: # Complete all existing review assignments. for assignment in article.current_review_round_object().reviewassignment_set.all(): if not assignment.date_complete: assignment.date_complete = timezone.now() assignment.decision = 'withdrawn' assignment.is_complete = True assignment.save() messages.add_message(request, messages.INFO, 'Assignment {0} closed.'.format(assignment.id)) kwargs = {'review_assignment': assignment, 'request': request} event_logic.Events.raise_event(event_logic.Events.ON_REVIEW_CLOSED, task_object=assignment.article, **kwargs) # Add a new review round. new_round_number = article.current_review_round() + 1 models.ReviewRound.objects.create(article=article, round_number=new_round_number) article.stage = submission_models.STAGE_UNDER_REVIEW article.save() return redirect(reverse('review_in_review', kwargs={'article_id': article_id})) template = 'review/in_review.html' context = { 'article': article, 'review_rounds': review_rounds, 'revisions_requests': revisions_requests, } return render(request, template, context) @editor_user_required @article_stage_review_required def send_review_reminder(request, article_id, review_id, reminder_type): """ Allows an editor to resent a review invite or manually send a reminder. :param request: HttpRequest object :param article_id: PK of an Article object :param review_id: PK of a ReviewAssignment object :param type: string, either request or accepted :return: HttpResponse or HttpRedirect """ article = get_object_or_404( submission_models.Article, pk=article_id, journal=request.journal, ) review_assignment = get_object_or_404( models.ReviewAssignment, pk=review_id, article=article, is_complete=False, ) # If this review has not been accepted, you cannot send an accepted # reminder, add a message and redirect. if not review_assignment.date_accepted and reminder_type == 'accepted': messages.add_message( request, messages.INFO, 'You cannot send this reminder type. Review not accepted.' ) return redirect( reverse( 'review_in_review', kwargs={'article_id': article.pk} ) ) email_content = logic.get_reminder_content( reminder_type, article, review_assignment, request ) form_initials = { 'body': email_content, 'subject': 'Review Request Reminder' } form = forms.ReviewReminderForm( initial=form_initials ) if request.POST: form = forms.ReviewReminderForm( request.POST ) if form.is_valid(): logic.send_review_reminder( request, form, review_assignment, reminder_type ) messages.add_message( request, messages.SUCCESS, 'Email sent' ) return redirect( reverse( 'review_in_review', kwargs={'article_id': article.pk} ) ) template = 'review/send_review_reminder.html' context = { 'article': article, 'assignment': review_assignment, 'form': form, } return render(request, template, context) @editor_is_not_author @editor_user_required def delete_review_round(request, article_id, round_id): """ Deletes a review round if it is not already closed. :param request: HttpRequest object :param article_id: Article PK :param round_id: Round PK :return: HttpResponse or HttpRedirect """ article = get_object_or_404(submission_models.Article, pk=article_id) review_round = get_object_or_404(models.ReviewRound, pk=round_id) if request.POST: if 'delete' in request.POST: review_round.delete() if article.is_under_revision(): article.stage = submission_models.STAGE_UNDER_REVISION article.save() messages.add_message(request, messages.INFO, 'Round {0} deleted.'.format(review_round.round_number)) return redirect(reverse('review_in_review', kwargs={'article_id': article_id})) elif not review_round.round_number == article.current_review_round(): messages.add_message(request, messages.INFO, 'Cannot delete a closed round.') return redirect(reverse('review_in_review', kwargs={'article_id': article_id})) template = 'review/delete_review_round.html' context = { 'article': article, 'round': review_round, } return render(request, template, context) @editor_is_not_author @article_decision_not_made @editor_user_required def add_files(request, article_id, round_id): """ Interface for adding files to a review round. :param request: HttpRequest object :param article_id: Article PK :param round_id: Round PK :return: HttpResponse or HttpRedirect """ article = get_object_or_404(submission_models.Article.objects.prefetch_related('manuscript_files'), pk=article_id) review_round = get_object_or_404(models.ReviewRound.objects.prefetch_related('review_files'), pk=round_id) if request.POST: if 'upload' in request.POST: review_files = request.FILES.getlist('review_file') if review_files: for review_file in review_files: new_file_obj = files.save_file_to_article(review_file, article, request.user, 'Review File') article.manuscript_files.add(new_file_obj) messages.add_message(request, messages.SUCCESS, 'File uploaded') else: messages.add_message(request, messages.WARNING, 'No file uploaded.') return redirect(reverse('review_add_files', kwargs={'article_id': article.pk, 'round_id': review_round.pk})) for file in request.POST.getlist('file'): file = core_models.File.objects.get(id=file) review_round.review_files.add(file) messages.add_message(request, messages.INFO, 'File {0} added.'.format(file.label)) if not request.POST.getlist('file'): messages.add_message(request, messages.WARNING, 'Please select at least one file, or press the Cancel button.') return redirect(reverse('review_in_review', kwargs={'article_id': article_id})) template = 'review/add_files.html' context = { 'article': article, 'round': review_round, } return render(request, template, context) @editor_is_not_author @article_decision_not_made @editor_user_required def remove_file(request, article_id, round_id, file_id): """Removes a file from a review round.""" article = get_object_or_404(submission_models.Article, pk=article_id) review_round = get_object_or_404(models.ReviewRound, pk=round_id) file = get_object_or_404(core_models.File, pk=file_id) if review_round.round_number == article.current_review_round(): review_round.review_files.remove(file) messages.add_message(request, messages.INFO, 'File {0} removed.'.format(file.label)) else: messages.add_message(request, messages.INFO, 'Cannot remove a file from a closed review round.'.format(file.label)) return redirect(reverse('review_in_review', kwargs={'article_id': article_id})) @reviewer_user_for_assignment_required def accept_review_request(request, assignment_id): """ Accept a review request :param request: the request object :param assignment_id: the assignment ID to handle :return: a context for a Django template """ access_code = logic.get_access_code(request) # update the ReviewAssignment object if access_code: assignment = models.ReviewAssignment.objects.get(Q(pk=assignment_id) & Q(is_complete=False) & Q(access_code=access_code) & Q(article__stage=submission_models.STAGE_UNDER_REVIEW) & Q(date_accepted__isnull=True)) else: assignment = models.ReviewAssignment.objects.get(Q(pk=assignment_id) & Q(is_complete=False) & Q(reviewer=request.user) & Q(article__stage=submission_models.STAGE_UNDER_REVIEW) & Q(date_accepted__isnull=True)) assignment.date_accepted = timezone.now() assignment.save() kwargs = {'review_assignment': assignment, 'request': request, 'accepted': True} event_logic.Events.raise_event(event_logic.Events.ON_REVIEWER_ACCEPTED, task_object=assignment.article, **kwargs) return redirect(logic.generate_access_code_url('do_review', assignment, access_code)) @reviewer_user_for_assignment_required def decline_review_request(request, assignment_id): """ Decline a review request :param request: the request object :param assignment_id: the assignment ID to handle :return: a context for a Django template """ access_code = logic.get_access_code(request) if access_code: assignment = models.ReviewAssignment.objects.get( Q(pk=assignment_id) & Q(is_complete=False) & Q(article__stage=submission_models.STAGE_UNDER_REVIEW) & Q(access_code=access_code) ) else: assignment = models.ReviewAssignment.objects.get( Q(pk=assignment_id) & Q(is_complete=False) & Q(article__stage=submission_models.STAGE_UNDER_REVIEW) & Q(reviewer=request.user) ) assignment.date_declined = timezone.now() assignment.date_accepted = None assignment.is_complete = True assignment.save() template = 'review/review_decline.html' context = { 'assigned_articles_for_user_review': assignment, 'access_code': access_code if access_code else '' } kwargs = {'review_assignment': assignment, 'request': request, 'accepted': False} event_logic.Events.raise_event(event_logic.Events.ON_REVIEWER_DECLINED, task_object=assignment.article, **kwargs) return render(request, template, context) @reviewer_user_for_assignment_required def suggest_reviewers(request, assignment_id): """ Allows a user to suggest reviewers :param request: :param assignment_id: :return: """ try: access_code = logic.get_access_code(request) if access_code: assignment = models.ReviewAssignment.objects.get( Q(pk=assignment_id) & Q(is_complete=True) & Q(article__stage=submission_models.STAGE_UNDER_REVIEW) & Q(access_code=access_code) ) else: assignment = models.ReviewAssignment.objects.get( Q(pk=assignment_id) & Q(is_complete=True) & Q(article__stage=submission_models.STAGE_UNDER_REVIEW) & Q(reviewer=request.user) ) except models.ReviewAssignment.DoesNotExist: raise PermissionError('Suggested reviewers already supplied.') form = forms.SuggestReviewers(instance=assignment) if request.POST: form = forms.SuggestReviewers(request.POST, instance=assignment) if form.is_valid(): form.save() messages.add_message(request, messages.INFO, 'Thanks for suggesting reviewers for this article.') return redirect(reverse('website_index')) template = 'review/suggest_reviewers.html' context = { 'assignment': assignment, 'form': form, } return render(request, template, context) @reviewer_user_required def review_requests(request): """ A list of requests for the current user :param request: the request object :return: a context for a Django template """ new_requests = models.ReviewAssignment.objects.filter( Q(is_complete=False) & Q(reviewer=request.user) & Q(article__stage=submission_models.STAGE_UNDER_REVIEW) & Q(date_accepted__isnull=True), article__journal=request.journal ).select_related('article') active_requests = models.ReviewAssignment.objects.filter( Q(is_complete=False) & Q(reviewer=request.user) & Q(article__stage=submission_models.STAGE_UNDER_REVIEW), Q(date_accepted__isnull=False), article__journal=request.journal ).select_related('article') completed_requests = models.ReviewAssignment.objects.filter( Q(is_complete=True) & Q(reviewer=request.user), article__journal=request.journal ).select_related('article') template = 'review/review_requests.html' context = { 'new_requests': new_requests, 'active_requests': active_requests, 'completed_requests': completed_requests, } return render(request, template, context) @reviewer_user_for_assignment_required def do_review(request, assignment_id): """ Rendering of the review form for user to complete. :param request: the request object :param assignment_id: ReviewAssignment PK :return: a context for a Django template """ access_code = logic.get_access_code(request) if access_code: assignment = models.ReviewAssignment.objects.get( Q(pk=assignment_id) & Q(is_complete=False) & Q(article__stage=submission_models.STAGE_UNDER_REVIEW) & Q(access_code=access_code) ) else: assignment = models.ReviewAssignment.objects.get( Q(pk=assignment_id) & Q(is_complete=False) & Q(article__stage=submission_models.STAGE_UNDER_REVIEW) & Q(reviewer=request.user) ) allow_save_review = setting_handler.get_setting( 'general', 'enable_save_review_progress', request.journal, ).processed_value fields_required = decision_required = True if allow_save_review: fields_required = decision_required = False elif assignment.review_file: fields_required = False review_round = assignment.article.current_review_round_object() form = forms.GeneratedForm( review_assignment=assignment, fields_required=fields_required, ) decision_form = forms.ReviewerDecisionForm( instance=assignment, decision_required=decision_required, ) if 'review_file' in request.GET: return logic.serve_review_file(assignment) if request.POST: if request.FILES: assignment = upload_review_file( request, assignment_id=assignment_id) if 'decline' in request.POST: return redirect( logic.generate_access_code_url( 'decline_review', assignment, access_code, ) ) if 'accept' in request.POST: return redirect( logic.generate_access_code_url( 'accept_review', assignment, access_code, ) ) # If the submission has a review_file, reviewer does not need # to complete the generated part of the form. Same if this is # a POST for saving progress but not completing the review if "complete" in request.POST: if assignment.review_file: fields_required = False else: fields_required = True decision_required = True form = forms.GeneratedForm( request.POST, review_assignment=assignment, fields_required=fields_required, ) decision_form = forms.ReviewerDecisionForm( request.POST, instance=assignment, decision_required=decision_required, ) if form.is_valid() and decision_form.is_valid(): decision_form.save() assignment.save_review_form(form, assignment) if 'save_progress' in request.POST: messages.add_message( request, messages.SUCCESS, 'Progress saved', ) else: assignment.date_complete = timezone.now() assignment.is_complete = True if not assignment.date_accepted: assignment.date_accepted = timezone.now() assignment.save() kwargs = {'review_assignment': assignment, 'request': request} event_logic.Events.raise_event( event_logic.Events.ON_REVIEW_COMPLETE, task_object=assignment.article, **kwargs ) return redirect( logic.generate_access_code_url( 'thanks_review', assignment, access_code, ) ) else: messages.add_message( request, messages.ERROR, 'Found errors on the form. Please, resolve them and try again', ) template = 'review/review_form.html' context = { 'assignment': assignment, 'form': form, 'decision_form': decision_form, 'review_round': review_round, 'access_code': access_code, 'allow_save_review': allow_save_review, } return render(request, template, context) @require_POST @reviewer_user_for_assignment_required def upload_review_file(request, assignment_id): access_code = logic.get_access_code(request) if access_code: assignment = models.ReviewAssignment.objects.get( Q(pk=assignment_id) & Q(is_complete=False) & Q(article__stage=submission_models.STAGE_UNDER_REVIEW) & Q(access_code=access_code) ) else: assignment = models.ReviewAssignment.objects.get( Q(pk=assignment_id) & Q(is_complete=False) & Q(article__stage=submission_models.STAGE_UNDER_REVIEW) & Q(reviewer=request.user) ) if 'review_file' in request.FILES: uploaded_file = request.FILES.get('review_file', None) old_file = assignment.review_file if uploaded_file: new_file = files.save_file_to_article( uploaded_file, assignment.article, assignment.reviewer, ) assignment.review_file = new_file assignment.save() messages.add_message( request, messages.SUCCESS, 'File uploaded successfully.', ) if old_file: old_file.unlink_file(request.journal) old_file.delete() else: messages.add_message( request, messages.ERROR, 'Please select a file to upload.', ) return assignment @reviewer_user_for_assignment_required def thanks_review(request, assignment_id): """ Displays thank you message for the assignment form. :param request: HttpRequest object :param assignment_id: ReviewAssignment PK :return: HttpResponse """ access_code = logic.get_access_code(request) if access_code: assignment = models.ReviewAssignment.objects.get( Q(pk=assignment_id) & Q(is_complete=True) & Q(article__stage=submission_models.STAGE_UNDER_REVIEW) & Q(access_code=access_code) ) else: assignment = models.ReviewAssignment.objects.get( Q(pk=assignment_id) & Q(is_complete=True) & Q(article__stage=submission_models.STAGE_UNDER_REVIEW) & Q(reviewer=request.user) ) template = 'review/thanks.html' context = { 'assignment': assignment, 'access_code': access_code, } return render(request, template, context) @editor_is_not_author @article_decision_not_made @editor_user_required def add_review_assignment(request, article_id): """ Allow an editor to add a new review assignment :param request: HttpRequest object :param article_id: Article PK :return: HttpResponse """ article = get_object_or_404(submission_models.Article, pk=article_id) form = forms.ReviewAssignmentForm(journal=request.journal) new_reviewer_form = core_forms.QuickUserForm() reviewers = logic.get_reviewer_candidates(article, request.user) suggested_reviewers = logic.get_suggested_reviewers(article, reviewers) user_list = logic.get_enrollable_users(request) modal = None # Check if this review round has files if not article.current_review_round_object().review_files.all(): messages.add_message(request, messages.WARNING, 'You should select files for review before adding reviewers.') return redirect(reverse('review_in_review', kwargs={'article_id': article.pk})) if request.POST: if 'quick_assign' in request.POST: logic.quick_assign(request, article) return redirect(reverse('review_in_review', kwargs={'article_id': article_id})) elif 'add_and_assign' in request.POST: # first check whether the user exists new_reviewer_form = core_forms.QuickUserForm(request.POST) try: user = core_models.Account.objects.get(email=new_reviewer_form.data['email']) user.add_account_role('reviewer', request.journal) except core_models.Account.DoesNotExist: user = None if user: logic.quick_assign(request, article, reviewer_user=user) return redirect(reverse('review_in_review', kwargs={'article_id': article_id})) valid = new_reviewer_form.is_valid() if valid: acc = logic.handle_reviewer_form(request, new_reviewer_form) logic.quick_assign(request, article, reviewer_user=acc) return redirect(reverse('review_in_review', kwargs={'article_id': article_id})) else: modal = 'reviewer' elif 'assign' in request.POST: # first check whether the user exists new_reviewer_form = core_forms.QuickUserForm(request.POST) try: user = core_models.Account.objects.get(email=new_reviewer_form.data['email']) user.add_account_role('reviewer', request.journal) except core_models.Account.DoesNotExist: user = None if user: return redirect(reverse('review_add_review_assignment', kwargs={'article_id': article.pk}) + '?' + parse.urlencode({'user': new_reviewer_form.data['email'], 'id': str(user.pk)})) valid = new_reviewer_form.is_valid() if valid: acc = logic.handle_reviewer_form(request, new_reviewer_form) return redirect(reverse('review_add_review_assignment', kwargs={'article_id': article.pk}) + '?' + parse.urlencode({'user': new_reviewer_form.data['email'], 'id': str(acc.pk)})) else: modal = 'reviewer' elif 'enrollusers' in request.POST: user_ids = request.POST.getlist('user_id') users = core_models.Account.objects.filter(pk__in=user_ids) for user in users: user.add_account_role('reviewer', request.journal) messages.add_message(request, messages.SUCCESS, '{0} enrolled as a reviewer.'.format(user.full_name())) return redirect(reverse('review_add_review_assignment', kwargs={'article_id': article.pk})) else: form = forms.ReviewAssignmentForm(request.POST, journal=request.journal) if form.is_valid(): reviewer = logic.get_reviewer_from_post(request) if not reviewer: form.add_error(None, 'You must select a reviewer.') else: review_assignment = form.save(commit=False) review_assignment.reviewer = reviewer review_assignment.article = article review_assignment.editor = request.user review_assignment.review_round = article.current_review_round_object() review_assignment.access_code = uuid4() review_assignment.save() article.stage = submission_models.STAGE_UNDER_REVIEW article.save() kwargs = {'user_message_content': '', 'review_assignment': review_assignment, 'request': request, 'skip': False, 'acknowledgement': False} event_logic.Events.raise_event(event_logic.Events.ON_REVIEWER_REQUESTED, **kwargs) return redirect(reverse('review_notify_reviewer', kwargs={'article_id': article_id, 'review_id': review_assignment.id})) template = 'review/add_review_assignment.html' context = { 'article': article, 'form': form, 'reviewers': reviewers, 'new_reviewer_form': new_reviewer_form, 'modal': modal, 'user_list': user_list, 'suggested_reviewers': suggested_reviewers, } return render(request, template, context) @editor_is_not_author @article_decision_not_made @editor_user_required def notify_reviewer(request, article_id, review_id): """ Allows the editor to send a notification the the assigned peer reviewer :param request: HttpRequest object :param article_id: Articke PK :param review_id: ReviewAssignment PK :return: HttpResponse or HttpRedirect """ article = get_object_or_404(submission_models.Article, pk=article_id) review = get_object_or_404(models.ReviewAssignment, pk=review_id) email_content = logic.get_reviewer_notification(request, article, request.user, review) if request.POST: email_content = request.POST.get('content_email') kwargs = {'user_message_content': email_content, 'review_assignment': review, 'request': request, 'skip': False, 'acknowledgement': True} if 'skip' in request.POST: kwargs['skip'] = True event_logic.Events.raise_event(event_logic.Events.ON_REVIEWER_REQUESTED_ACKNOWLEDGE, **kwargs) return redirect(reverse('review_in_review', kwargs={'article_id': article_id})) event_logic.Events.raise_event(event_logic.Events.ON_REVIEWER_REQUESTED_ACKNOWLEDGE, **kwargs) review.date_requested = timezone.now() review.save() return redirect(reverse('review_in_review', kwargs={'article_id': article_id})) template = 'review/notify_reviewer.html' context = { 'article': article, 'review': review, 'email_content': email_content, 'assignment': review, } return render(request, template, context) @editor_is_not_author @editor_user_required def view_review(request, article_id, review_id): """ A view that allows the editor to view a review. :param request: Django's request object :param article_id: Article PK :param review_id: ReviewAssignment PK :return: a rendered django template """ article = get_object_or_404(submission_models.Article, pk=article_id) review = get_object_or_404(models.ReviewAssignment, pk=review_id) if request.POST: if 'author_consumption' in request.POST: if review.for_author_consumption: review.for_author_consumption = False else: review.for_author_consumption = True review.save() if 'individual_author_consumption' in request.POST: checkboxes = request.POST.getlist('answer_viewable') for answer in review.review_form_answers(): if str(answer.pk) in checkboxes: answer.author_can_see = True else: answer.author_can_see = False answer.save() if 'reset' in request.POST: answer_pk = request.POST.get('pk') answer = models.ReviewAssignmentAnswer.objects.get(pk=answer_pk) answer.edited_answer = None answer.save() if 'review_file_visible' in request.POST: logic.handle_review_file_switch(review, request.POST.get('review_file_visible')) messages.add_message(request, messages.SUCCESS, 'Review File visibility updated.') return redirect(reverse('review_view_review', kwargs={'article_id': article.pk, 'review_id': review.pk})) template = 'review/view_review.html' context = { 'article': article, 'review': review } return render(request, template, context) @editor_is_not_author @editor_user_required def edit_review_answer(request, article_id, review_id, answer_id): """ Allows an Editor to tweak an answer given for a peer review question. :param request: HttpRequest object :param article_id: Article PK :param review_id: ReviewAssignment PK :param answer_id: ReviewAssignmentAnswer PK :return: HttpResponse or HttpRedirect """ article = get_object_or_404(submission_models.Article, pk=article_id) review = get_object_or_404(models.ReviewAssignment, pk=review_id) answer = get_object_or_404(models.ReviewAssignmentAnswer, pk=answer_id) form = forms.GeneratedForm(answer=answer) if request.POST: form = forms.GeneratedForm(request.POST, answer=answer) if form.is_valid(): # Form element keys are posted as str element_key = str(answer.element.pk) answer.edited_answer = form.cleaned_data[element_key] answer.save() return redirect( reverse( 'review_view_review', kwargs={'article_id': article.pk, 'review_id': review.pk}, ) ) template = 'review/edit_review_answer.html' context = { 'article': article, 'review': review, 'answer': answer, 'form': form, } return render(request, template, context) @editor_is_not_author @article_decision_not_made @editor_user_required def edit_review(request, article_id, review_id): """ A view that allows a user to edit a review. :param request: Django's request object :param article_id: Article PK :param review_id: ReviewAssignment PK :return: a rendered django template """ article = get_object_or_404(submission_models.Article, pk=article_id) review = get_object_or_404(models.ReviewAssignment, pk=review_id) if review.date_complete: messages.add_message(request, messages.WARNING, 'You cannot edit a review that is already complete.') return redirect(reverse('review_in_review', kwargs={'article_id': article.pk})) form = forms.ReviewAssignmentForm(instance=review, journal=request.journal) if request.POST: form = forms.ReviewAssignmentForm(request.POST, instance=review, journal=request.journal) if form.is_valid(): form.save() messages.add_message(request, messages.INFO, 'Review updates.') util_models.LogEntry.add_entry('Review Deleted', 'Review updated.', level='Info', actor=request.user, request=request, target=review) return redirect(reverse('review_in_review', kwargs={'article_id': article.pk})) template = 'review/edit_review.html' context = { 'article': article, 'review': review, 'form': form, } return render(request, template, context) @editor_is_not_author @article_decision_not_made @editor_user_required def delete_review(request, article_id, review_id): """ A view that allows a user to delete a review. :param request: Django's request object :param article_id: Article PK :param review_id: ReviewAssignment PK :return: a rendered django template """ article = get_object_or_404(submission_models.Article, pk=article_id) review = get_object_or_404(models.ReviewAssignment, pk=review_id) if review.date_complete: messages.add_message(request, messages.WARNING, 'You cannot delete a review that is already complete.') return redirect(reverse('review_in_review', kwargs={'article_id': article.pk})) if request.POST and 'delete' in request.POST: user_message = request.POST.get('delete_rationale', 'No message supplied by user.') description = 'Review {0} for article {1} has been deleted by {2}. \n\n{3}'.format( review.pk, article.title, request.user.username, user_message, ) util_models.LogEntry.add_entry('Review Deleted', description, level='Info', actor=request.user, request=request, target=article) review.delete() messages.add_message(request, messages.SUCCESS, 'Review deleted.') return redirect(reverse('review_in_review', kwargs={'article_id': article.pk})) template = 'review/delete_review.html' context = { 'article': article, 'review': review, } return render(request, template, context) @editor_is_not_author @article_decision_not_made @editor_user_required def withdraw_review(request, article_id, review_id): """ A view that allows a user to withdraw a review. :param request: Django's request object :param article_id: Article PK :param review_id: ReviewAssignment PK :return:a rendered django template """ article = get_object_or_404(submission_models.Article, pk=article_id) review = get_object_or_404(models.ReviewAssignment, pk=review_id) if review.date_complete: messages.add_message(request, messages.WARNING, 'You cannot withdraw a review that is already complete.') return redirect(reverse('review_in_review', kwargs={'article_id': article.pk})) email_content = logic.get_withdrawl_notification(request, review) if request.POST: email_content = request.POST.get('content_email') kwargs = {'user_message_content': email_content, 'review_assignment': review, 'request': request, 'skip': False} if 'skip' in request.POST: kwargs['skip'] = True event_logic.Events.raise_event(event_logic.Events.ON_REVIEW_WITHDRAWL, **kwargs) review.date_complete = timezone.now() review.decision = 'withdrawn' review.is_complete = True review.save() messages.add_message(request, messages.SUCCESS, 'Review withdrawn') return redirect(reverse('review_in_review', kwargs={'article_id': article.pk})) template = 'review/withdraw_review.html' context = { 'article': article, 'review': review, 'email_content': email_content, } return render(request, template, context) @editor_is_not_author @article_decision_not_made @editor_user_required def reset_review(request, article_id, review_id): """ Allows an editor to reset a review that has previously been declined or withdrawn. :param request: django Django's request object :param article_id: pk of an Article :param review_id: pk of a ReviewAssignment :return: a contextualised django template """ article = get_object_or_404(submission_models.Article, pk=article_id) review = get_object_or_404(models.ReviewAssignment, pk=review_id) if request.POST: review.is_complete = False review.date_complete = None review.date_declined = None review.decision = None review.suggested_reviewers = "" review.save() messages.add_message(request, messages.INFO, 'Review reset.') return redirect(reverse('review_in_review', kwargs={'article_id': article.pk})) template = 'review/reset.html' context = { 'article': article, 'review': review, } return render(request, template, context) @section_editor_draft_decisions @editor_is_not_author @editor_user_required def review_decision(request, article_id, decision): """ Allows the editor to make a review decision, revisions are not a decision, only accept or delcine. :param request: the django request object :param article_id: Article PK :param decision :return: a contextualised django template """ article = get_object_or_404(submission_models.Article, pk=article_id) author_review_url = request.journal.site_url( reverse('review_author_view', kwargs={'article_id': article.id}) ) email_content = logic.get_decision_content(request, article, decision, author_review_url) if article.date_accepted or article.date_declined: messages.add_message(request, messages.WARNING, 'This article has already been accepted or declined.') return redirect(reverse('review_in_review', kwargs={'article_id': article.pk})) if request.POST: email_content = request.POST.get('decision_rationale') kwargs = { 'article': article, 'request': request, 'decision': decision, 'user_message_content': email_content, 'skip': False, } if 'skip' in request.POST: kwargs['skip'] = True if decision == 'accept': article.accept_article() article.snapshot_authors(article, force_update=False) event_logic.Events.raise_event(event_logic.Events.ON_ARTICLE_ACCEPTED, task_object=article, **kwargs) workflow_kwargs = {'handshake_url': 'review_home', 'request': request, 'article': article, 'switch_stage': True} return event_logic.Events.raise_event(event_logic.Events.ON_WORKFLOW_ELEMENT_COMPLETE, task_object=article, **workflow_kwargs) elif decision == 'decline': article.decline_article() event_logic.Events.raise_event(event_logic.Events.ON_ARTICLE_DECLINED, task_object=article, **kwargs) return redirect(reverse('core_dashboard')) messages.add_message(request, messages.INFO, 'Article {0} has been {1}ed'.format(article.title, decision)) return redirect(reverse('article_copyediting', kwargs={'article_id': article.pk})) template = 'review/decision.html' context = { 'article': article, 'decision': decision, 'email_content': email_content, } return render(request, template, context) @editor_is_not_author @editor_user_required def rate_reviewer(request, article_id, review_id): """ Allows an Editor to rate a Reviewer :param request: django's request object :param article_id: pk of a Article :param review_id: pk of a ReviewAssignment :return: a contextualised django template """ review = get_object_or_404(models.ReviewAssignment, pk=review_id, article__pk=article_id) if not review.is_complete: messages.add_message(request, messages.INFO, 'You cannot rate a reviewer until their review is complete.' 'You should withdraw this review if you want to rate the reviewer' 'before they are finished.') return redirect(reverse('review_in_review', kwargs={'article_id': review.article.id})) if request.POST: rating_int = int(request.POST.get('rating_number')) if review.review_rating: rating = review.review_rating rating.rating = rating_int rating.save() messages.add_message(request, messages.INFO, '{0}\'s rating updated to {1}'.format(review.reviewer.full_name(), rating_int)) else: messages.add_message(request, messages.INFO, '{0} assigned a rating of {1}'.format(review.reviewer.full_name(), rating_int)) models.ReviewerRating.objects.create(assignment=review, rating=rating_int, rater=request.user) return redirect(reverse('review_in_review', kwargs={'article_id': review.article.id})) template = 'review/rate_reviewer.html' context = { 'review': review, } return render(request, template, context) @article_author_required def author_view_reviews(request, article_id): """ View that allows an author to view the reviews for an article. :param request: django request object :param article_id: Article pk :return: a contextualised django template """ article = get_object_or_404(submission_models.Article, pk=article_id) reviews = models.ReviewAssignment.objects.filter( article=article, is_complete=True, for_author_consumption=True, ).exclude(decision='withdrawn') if not reviews.exists(): raise PermissionDenied( 'No reviews have been made available by the Editor.', ) if request.GET.get('file_id', None): viewable_files = logic.group_files(article, reviews) file_id = request.GET.get('file_id') file = get_object_or_404(core_models.File, pk=file_id) if file in viewable_files: return files.serve_file(request, file, article) template = 'review/author_view_reviews.html' context = { 'article': article, 'reviews': reviews, } return render(request, template, context) @editor_is_not_author @editor_user_required def request_revisions(request, article_id): """ View allows an Editor to request revisions to an article. :param request: django request object :param article_id: Article PK :return: a contextualised django template """ article = get_object_or_404(submission_models.Article, pk=article_id) form = forms.RevisionRequest() review_round = models.ReviewRound.latest_article_round( article=article, ) pending_approval = review_round.reviewassignment_set.filter( is_complete=True, for_author_consumption=False, ) incomplete = review_round.reviewassignment_set.filter( is_complete=False, ) if request.POST: form = forms.RevisionRequest(request.POST) if form.is_valid(): revision_request = form.save(commit=False) revision_request.editor = request.user revision_request.article = article revision_request.save() article.stage = submission_models.STAGE_UNDER_REVISION article.save() return redirect(reverse( 'request_revisions_notification', kwargs={ 'article_id': article.pk, 'revision_id': revision_request.pk, } )) template = 'review/revision/request_revisions.html' context = { 'article': article, 'form': form, 'pending_approval': pending_approval, 'incomplete': incomplete, } return render(request, template, context) @editor_is_not_author @editor_user_required def request_revisions_notification(request, article_id, revision_id): """ View allows an Editor to notify an Author of a Revision request :param request: django request object :param article_id: PK of an Article :param revision_id: PK of a RevisionRequest :return: a contextualised django template """ article = get_object_or_404(submission_models.Article, pk=article_id) revision = get_object_or_404(models.RevisionRequest, pk=revision_id) email_content = logic.get_revision_request_content(request, article, revision) if request.POST: user_message_content = request.POST.get('email_content') kwargs = { 'user_message_content': user_message_content, 'revision': revision, 'request': request, 'skip': False, } if 'skip' in request.POST: kwargs['skip'] = True event_logic.Events.raise_event(event_logic.Events.ON_REVISIONS_REQUESTED_NOTIFY, **kwargs) return redirect(reverse('review_in_review', kwargs={'article_id': article.pk})) template = 'review/revision/request_revisions_notification.html' context = { 'article': article, 'email_content': email_content, 'revision': revision, } return render(request, template, context) @editor_is_not_author @editor_user_required def edit_revision_request(request, article_id, revision_id): """ View allows an Editor to edit an existing Revision :param request: HttpRequest object :param article_id: Artickle PK :param revision_id: Revision PK :return: HttpResponse """ revision_request = get_object_or_404(models.RevisionRequest, article__pk=article_id, pk=revision_id) form = forms.EditRevisionDue(instance=revision_request) if revision_request.date_completed: messages.add_message(request, messages.WARNING, 'You cannot edit a revision request that is complete.') return redirect(reverse('review_in_review', kwargs={'article_id': article_id})) if request.POST: if 'update_due' in request.POST: form = forms.EditRevisionDue(request.POST, instance=revision_request) if form.is_valid(): form.save() messages.add_message(request, messages.INFO, 'Due date updated.') if 'delete_revision' in request.POST: rationale = request.POST.get('delete_rationale') util_models.LogEntry.add_entry('deletion', '{0} deleted a revision request with reason:\n\n{1}'.format( request.user.full_name(), rationale), level='Info', actor=request.user, target=revision_request.article ) revision_request.delete() messages.add_message(request, messages.INFO, 'Revision request deleted.') if 'mark_as_complete' in request.POST: util_models.LogEntry.add_entry('update', '{0} marked revision {1} as complete'.format( request.user.full_name(), revision_request.id), level='Info', actor=request.user, target=revision_request.article ) revision_request.date_completed = timezone.now() revision_request.save() messages.add_message(request, messages.INFO, 'Revision request marked as complete.') return redirect(reverse('review_in_review', kwargs={'article_id': article_id})) template = 'review/revision/edit_revision_request.html' context = { 'revision_request': revision_request, 'form': form, } return render(request, template, context) @article_author_required def do_revisions(request, article_id, revision_id): """ Allows an Author to complete a revision request of an article. :param request: django request object :param article_id: PK of an Article :param revision_id: PK of a RevisionRequest :return: """ revision_request = get_object_or_404( models.RevisionRequest, article__pk=article_id, pk=revision_id, date_completed__isnull=True, ) reviews = models.ReviewAssignment.objects.filter( article=revision_request.article, is_complete=True, for_author_consumption=True, ).exclude(decision='withdrawn') form = forms.DoRevisions(instance=revision_request) revision_files = logic.group_files(revision_request.article, reviews) if request.POST: if 'delete' in request.POST: file_id = request.POST.get('delete') file = get_object_or_404(core_models.File, pk=file_id) files.delete_file(revision_request.article, file) logic.log_revision_event( 'File {0} ({1}) deleted.'.format( file.id, file.original_filename ), request.user, revision_request, ) return redirect( reverse( 'do_revisions', kwargs={ 'article_id': article_id, 'revision_id': revision_id } ) ) elif 'save' in request.POST: covering_letter = request.POST.get('author_note') revision_request.author_note = covering_letter revision_request.save() messages.add_message( request, messages.SUCCESS, 'Thanks. Your covering letter has been saved.', ) return redirect( reverse( 'do_revisions', kwargs={ 'article_id': article_id, 'revision_id': revision_id } ) ) else: form = forms.DoRevisions(request.POST, instance=revision_request) if not revision_request.article.has_manuscript_file(): form.add_error( None, 'Your article must have at least one manuscript file.', ) if form.is_valid(): form.save() kwargs = { 'revision': revision_request, 'request': request, } event_logic.Events.raise_event( event_logic.Events.ON_REVISIONS_COMPLETE, **kwargs ) messages.add_message( request, messages.SUCCESS, 'Thank you for submitting your revisions. The Editor has been notified.', ) revision_request.date_completed = timezone.now() revision_request.save() return redirect(reverse('core_dashboard')) if request.GET.get('file_id', None): file_id = request.GET.get('file_id') file = get_object_or_404(core_models.File, pk=file_id) if file in revision_files: logic.log_revision_event( 'Downloaded file {0} ({1}).'.format( file.label, file.original_filename), request.user, revision_request, ) return files.serve_file(request, file, revision_request.article) template = 'admin/review/revision/do_revision.html' context = { 'revision_request': revision_request, 'form': form, 'article': revision_request.article, 'reviews': reviews, } return render(request, template, context) @article_author_required def replace_file(request, article_id, revision_id, file_id): revision_request = get_object_or_404(models.RevisionRequest, article__pk=article_id, pk=revision_id, date_completed__isnull=True) file = get_object_or_404(core_models.File, pk=file_id) if request.GET.get('download', None): logic.log_revision_event('Downloaded file {0} ({1})'.format(file.label, file.original_filename), request.user, revision_request) return files.serve_file(request, file, revision_request.article) if request.POST and request.FILES: if 'replacement' in request.POST: uploaded_file = request.FILES.get('replacement-file') label = request.POST.get('label') new_file = files.save_file_to_article(uploaded_file, revision_request.article, request.user, replace=file, is_galley=False, label=label) files.replace_file( revision_request.article, file, new_file, retain_old_label=False, ) logic.log_revision_event( 'File {0} ({1}) replaced with {2} ({3})'.format(file.label, file.original_filename, new_file.label, new_file.original_filename), request.user, revision_request) return redirect(reverse('do_revisions', kwargs={'article_id': article_id, 'revision_id': revision_id})) template = 'review/revision/replace_file.html' context = { 'revision_request': revision_request, 'article': revision_request.article, 'file': file, } return render(request, template, context) @article_author_required def upload_new_file(request, article_id, revision_id): """ View allows an author to upload new file to their article. :param request: HttpRequest object :param article_id: Article PK :param revision_id: RevisionRequest PK :return: Httpresponse or HttpRedirect """ revision_request = get_object_or_404(models.RevisionRequest, article__pk=article_id, pk=revision_id, date_completed__isnull=True) article = revision_request.article if request.POST and request.FILES: file_type = request.POST.get('file_type') uploaded_file = request.FILES.get('file') label = request.POST.get('label') new_file = files.save_file_to_article( uploaded_file, article, request.user, label=label, ) if file_type == 'manuscript': article.manuscript_files.add(new_file) if file_type == 'data': article.data_figure_files.add(new_file) logic.log_revision_event( 'New file {0} ({1}) uploaded'.format( new_file.label, new_file.original_filename), request.user, revision_request) return redirect(reverse( 'do_revisions', kwargs={'article_id': article_id, 'revision_id': revision_id}) ) template = 'review/revision/upload_file.html' context = { 'revision_request': revision_request, 'article': revision_request.article, } return render(request, template, context) @editor_is_not_author @editor_user_required def view_revision(request, article_id, revision_id): """ Allows an Editor to view a revisionrequest :param request: HttpRequest object :param article_id: Article PK :param revision_id: RevisionRequest PK :return: HttpResponse """ revision_request = get_object_or_404(models.RevisionRequest.objects.select_related('article'), pk=revision_id, article__pk=article_id) template = 'review/revision/view_revision.html' context = { 'revision_request': revision_request, 'article': revision_request.article } return render(request, template, context) @editor_user_required def review_warning(request, article_id): """ Checks if an editor user is the author of an article amd blocks their access temporarily. If overwritten, all Editors are notified. :param request: HttpRequest object :param article_id: Article PK :return: HttpResponse or HttpRedirect """ article = get_object_or_404(submission_models.Article, pk=article_id) if request.POST and request.user.is_editor(request): override = models.EditorOverride.objects.create( article=article, editor=request.user) kwargs = {'request': request, 'override': override} event_logic.Events.raise_event( event_logic.Events.ON_REVIEW_SECURITY_OVERRIDE, task_object=article, **kwargs ) return redirect(reverse('review_in_review', kwargs={'article_id': article.pk})) else: messages.add_message( request, messages.WARNING, 'This action is not allowed.') template = 'review/review_warning.html' context = { 'article': article } return render(request, template, context) @editor_user_required @file_user_required def editor_article_file(request, article_id, file_id): """ Serves an article file. :param request: the request associated with this call :param article_id: the id of an article :param file_id: the file ID to serve :return: a streaming response of the requested file or 404 """ article_object = submission_models.Article.objects.get(pk=article_id) file_object = get_object_or_404(core_models.File, pk=file_id) return files.serve_file(request, file_object, article_object) @reviewer_user_for_assignment_required def reviewer_article_file(request, assignment_id, file_id): """ Serves an article file. :param request: the request associated with this call :param assignment_id: the ReviewAssignment id. :param file_id: the file ID to serve :return: a streaming response of the requested file or 404 """ review_assignment = models.ReviewAssignment.objects.get(pk=assignment_id) article_object = review_assignment.article file_object = review_assignment.review_round.review_files.get(pk=file_id) if not file_object: raise Http404() return files.serve_file( request, file_object, article_object, hide_name=True ) @reviewer_user_for_assignment_required def review_download_all_files(request, assignment_id): review_assignment = models.ReviewAssignment.objects.get(pk=assignment_id) zip_file, file_name = files.zip_article_files( review_assignment.review_round.review_files.all(), ) return files.serve_temp_file(zip_file, file_name) @editor_is_not_author @editor_user_required def draft_decision(request, article_id): """ Allows a section editor to draft a decision for an editor. :param request: request object :param article_id: an Article primary key :return: a django template with context """ article = get_object_or_404(submission_models.Article, pk=article_id) drafts = models.DecisionDraft.objects.filter(article=article) message_to_editor = logic.get_draft_email_message(request, article) editors = request.journal.editors() form = forms.DraftDecisionForm( message_to_editor=message_to_editor, editors=editors, initial={ 'revision_request_due_date': timezone.now() + timedelta(days=14), } ) if request.POST: if 'delete' in request.POST: delete_id = request.POST.get('delete') draft = get_object_or_404(models.DecisionDraft, pk=delete_id, article=article) draft.delete() return redirect( reverse( 'review_draft_decision', kwargs={'article_id': article.pk}, ), ) else: form = forms.DraftDecisionForm( request.POST, editors=editors, message_to_editor=message_to_editor, ) if form.is_valid(): new_draft = form.save(commit=False) new_draft.section_editor = request.user new_draft.article = article new_draft.save() messages.add_message( request, messages.SUCCESS, 'A draft has been saved, the editor has been notified.', ) kwargs = {'request': request, 'article': article, 'draft': new_draft} event_logic.Events.raise_event( event_logic.Events.ON_DRAFT_DECISION, **kwargs, ) return redirect( reverse( 'review_draft_decision', kwargs={'article_id': article.pk}, ), ) template = 'review/draft_decision.html' context = { 'article': article, 'drafts': drafts, 'form': form, } return render(request, template, context) @require_POST @editor_user_required def draft_decision_text(request, article_id): """ Takes a POST and returns decision text. """ article = get_object_or_404( submission_models.Article, pk=article_id, journal=request.journal, ) decision = request.POST.get('decision') date = request.POST.get('date', None) if isinstance(date, str) and date != '': date = shared.make_timezone_aware(date, '%Y-%m-%d') else: date = timezone.now() + timedelta(days=14) author_review_url = request.journal.site_url( reverse( 'review_author_view', kwargs={'article_id': article.id}, ) ) if not decision: raise Http404 if decision in ['accept', 'reject']: decision_text = logic.get_decision_content( request=request, article=article, decision=decision, author_review_url=author_review_url, ) elif decision in ['minor_revisions', 'major_revisions']: revision = models.RevisionRequest( article=article, editor=request.user, type=decision, date_requested=timezone.now, date_due=date.strftime("%Y-%m-%d"), editor_note="[[Add Editor Note Here]]", ) decision_text = logic.get_revision_request_content( request=request, article=article, revision=revision, draft=True, ) return JsonResponse({'decision_text': decision_text}) @editor_is_not_author @editor_user_required def manage_draft(request, article_id, draft_id): article = get_object_or_404(submission_models.Article, pk=article_id) draft = get_object_or_404(models.DecisionDraft, pk=draft_id) if 'decline_draft' in request.POST: draft.editor_decision = 'declined' draft.save() logic.handle_draft_declined(article, draft, request) if 'accept_draft' in request.POST: draft.editor_decision = 'accept' draft.save() decision_action = logic.handle_decision_action(article, draft, request) if decision_action: return decision_action messages.add_message( request, messages.INFO, 'Draft {}'.format(draft.editor_decision) ) return redirect( reverse( 'decision_helper', kwargs={'article_id': article.pk}, ), ) @editor_is_not_author @editor_user_required def edit_draft_decision(request, article_id, draft_id): article = get_object_or_404(submission_models.Article, pk=article_id) draft = get_object_or_404(models.DecisionDraft, pk=draft_id) drafts = models.DecisionDraft.objects.filter(article=article) editors = request.journal.editors() form = forms.DraftDecisionForm( instance=draft, editors=editors, ) if request.POST: form = forms.DraftDecisionForm( request.POST, instance=draft, editors=editors, ) if form.is_valid(): form.save() messages.add_message(request, messages.SUCCESS, 'Draft has been updated') return redirect( reverse( 'review_edit_draft_decision', kwargs={'article_id': article.pk, 'draft_id': draft.pk}, ), ) template = 'review/draft_decision.html' context = { 'article': article, 'drafts': drafts, 'draft': draft, 'form': form, } return render(request, template, context) @senior_editor_user_required def review_forms(request): """ Displays a list of review forms and allows new ones to be created. :param request: HttpRequest object :return: HttpResponse or HttpRedirect """ form_list = models.ReviewForm.objects.filter( journal=request.journal, deleted=False, ) form = forms.NewForm() default_form = setting_handler.get_setting( 'general', 'default_review_form', request.journal, ).processed_value if default_form.isdigit(): default_form = int(default_form) if request.POST: if 'delete' in request.POST: form_id = request.POST["delete"] if form_id.isdigit(): form_id = int(form_id) if default_form == form_id: messages.add_message( request, messages.ERROR, "This form is selected as the defaul form and thus" " can't be deleted", ) return redirect(reverse('review_review_forms')) form_obj = get_object_or_404( models.ReviewForm, id=form_id, journal=request.journal, ) form_obj.deleted = True form_obj.save() messages.add_message(request, messages.SUCCESS, 'Form Deleted') return redirect(reverse('review_review_forms')) else: form = forms.NewForm(request.POST) if form.is_valid(): new_form = form.save(commit=False) new_form.journal = request.journal new_form.save() return redirect(reverse('review_review_forms')) template = 'review/manager/review_forms.html' context = { 'form_list': form_list, 'form': form, 'default_form': default_form, } return render(request, template, context) @senior_editor_user_required def edit_review_form(request, form_id, element_id=None): """ Allows the editing of an existing review form :param request: HttpRequest object :param form_id: ReviewForm PK :param element_id: Element PK, optional :return: HttpResponse or HttpRedirect """ edit_form = get_object_or_404(models.ReviewForm, pk=form_id) form = forms.NewForm(instance=edit_form) element_form = forms.ElementForm() element, modal = None, None if element_id: element = get_object_or_404(models.ReviewFormElement, pk=element_id) modal = 'element' element_form = forms.ElementForm(instance=element) if request.POST: if 'delete' in request.POST: delete_id = request.POST.get('delete') element_to_delete = get_object_or_404(models.ReviewFormElement, pk=delete_id) element_to_delete.delete() return redirect(reverse('edit_review_form', kwargs={'form_id': edit_form.pk})) if 'element' in request.POST: if element_id: element_form = forms.ElementForm(request.POST, instance=element) else: element_form = forms.ElementForm(request.POST) if element_form.is_valid(): element = element_form.save() edit_form.elements.add(element) messages.add_message(request, messages.SUCCESS, 'New element added.') return redirect(reverse('edit_review_form', kwargs={'form_id': edit_form.pk})) if 'review_form' in request.POST: form = forms.NewForm(request.POST, instance=edit_form) if form.is_valid(): form.save() messages.add_message(request, messages.SUCCESS, 'Form updated') return redirect(reverse('edit_review_form', kwargs={'form_id': edit_form.pk})) template = 'review/manager/edit_review_form.html' context = { 'form': form, 'edit_form': edit_form, 'element_form': element_form, 'modal': modal, } return render(request, template, context) @senior_editor_user_required def preview_form(request, form_id): """Displays a preview of a review form.""" form = get_object_or_404(models.ReviewForm, pk=form_id) generated_form = forms.GeneratedForm(preview=form) decision_form = forms.FakeReviewerDecisionForm() template = 'review/manager/preview_form.html' context = { 'form': form, 'generated_form': generated_form, 'decision_form': decision_form, } return render(request, template, context) @require_POST @senior_editor_user_required def order_review_elements(request, form_id): """ Reorders Review Form elements. :param request: HttpRequest object :param form_id: ReviewForm PK """ form = get_object_or_404( models.ReviewForm, pk=form_id, journal=request.journal, ) shared.set_order( form.elements.all(), 'order', request.POST.getlist('element[]'), ) return HttpResponse('Ok') @reviewer_user_for_assignment_required def hypothesis_review(request, assignment_id): """ Rendering of the review form for user to complete. :param request: the request object :param assignment_id: ReviewAssignment PK :return: a context for a Django template """ access_code = logic.get_access_code(request) if access_code: assignment = models.ReviewAssignment.objects.get( Q(pk=assignment_id) & Q(is_complete=False) & Q(article__stage=submission_models.STAGE_UNDER_REVIEW) & Q(access_code=access_code) ) else: assignment = models.ReviewAssignment.objects.get( Q(pk=assignment_id) & Q(is_complete=False) & Q(article__stage=submission_models.STAGE_UNDER_REVIEW) & Q(reviewer=request.user) ) pdf = assignment.review_round.review_files.get(mime_type='application/pdf') hypothesis.create_hypothesis_account(assignment.reviewer) grant_token = hypothesis.generate_grant_token(assignment.reviewer) template = 'review/annotation_pdf_review.html' context = { 'assignment': assignment, 'pdf': pdf, 'grant_token': grant_token, 'authority': settings.HYPOTHESIS_CLIENT_AUTHORITY, } return render(request, template, context) @editor_user_required def decision_helper(request, article_id): """ Displays all of the completed reviews to help the Editor make a decision. :param request: HttpRequest object :param article_id: Article object pk, integer :return: a django response """ article = get_object_or_404( submission_models.Article, pk=article_id, ) reviews = models.ReviewAssignment.objects.filter( article=article, ) uncomplete_reviews = reviews.filter( article=article, is_complete=False, date_complete__isnull=True, ) complete_reviews = reviews.filter( article=article, is_complete=True, date_complete__isnull=False, ).exclude( decision='withdrawn', ) withdraw_reviews = reviews.filter( decision='withdrawn', ) uncomplete_reviews = uncomplete_reviews.union(withdraw_reviews) decisions = Counter( [review.get_decision_display() for review in reviews if review.decision] ) if 'reveal_review' in request.POST: review = get_object_or_404( models.ReviewAssignment, article=article, id=request.POST.get('review'), ) review.for_author_consumption=True review.save() messages.add_message( request, messages.SUCCESS, "The author can now see review #%s" % review.pk, ) if 'hide_review' in request.POST: review = get_object_or_404( models.ReviewAssignment, article=article, id=request.POST.get('review'), ) review.for_author_consumption=False review.save() messages.add_message( request, messages.WARNING, "The author won't see the review #%s" % review.pk, ) if 'review_file_visible' in request.POST: review = get_object_or_404( models.ReviewAssignment, article=article, id=request.POST.get('review'), ) logic.handle_review_file_switch(review, request.POST.get('review_file_visible')) messages.add_message(request, messages.SUCCESS, 'Review File visibility updated.') template = 'admin/review/decision_helper.html' context = { 'article': article, 'complete_reviews': complete_reviews, 'uncomplete_reviews': uncomplete_reviews, 'decisions': dict(decisions) } return render(request, template, context)
agpl-3.0
7,063,019,516,852,951,000
33.460149
194
0.606278
false
4.176342
false
false
false
mickstar/2048-ai-python
game/gameboard.py
1
3987
import random from game.cell import Cell from game.move import Move class GameBoard: '''GameBoard defines the 2048 grid that should be a 4x4 square. This class contains 16 cells, and provides methods for permuting the board state in conjunction with 2048 rules. The code is designed with an unconcrete size, though this should be changed with caution.''' size = 4 def __init__(self): self.grid = [[Cell(Cell.EMPTY) for x in range(GameBoard.size)] for x in range(GameBoard.size)] def printBoard(self): for y in range(GameBoard.size): for x in range(GameBoard.size): cell = self.getCell(x, y) print(cell, end="\t") print("") # new line def hasEmptyTiles(self): '''Returns whether there exists any empty tiles (=0) in the grid.''' for row in self.grid: for cell in row: if cell.isEmpty(): return True return False def getCell(self, x, y): '''return cell at the (x,y) coordinates. grid is structured such that the top left corner is (0,0) likewise, the bottom right is (3,3)''' return self.grid[x][y] def getRandomlyAvailableCell(self): '''Returns a randomly selected empty cell from the grid.''' emptyCells = [] for row in self.grid: for cell in row: if cell.isEmpty(): emptyCells.append(cell) return random.choice(emptyCells) def makeMove(self, move): '''modifies the grid such in the direction of the move. Such that Right[0,2,2,0] -> [0,0,0,4] for each horizontal row in grid''' x_delta = 0 y_delta = 0 n = GameBoard.size x_range = list(range(n)) y_range = list(range(n)) if move == Move.LEFT_MOVE: x_delta = -1 x_range = list(reversed(range(n))) if move == Move.RIGHT_MOVE: x_delta = +1 if move == Move.UP_MOVE: y_delta = -1 x_range = list(reversed(range(n))) if move == Move.DOWN_MOVE: y_delta = +1 successfullyMoved = False score_delta = 0 for x in x_range: joined = [] for y in y_range: # first we check to see we are not on an edge cell. if (x + x_delta) in x_range and (y + y_delta) in y_range: curCell = self.getCell(x, y) adjCell = self.getCell(x + x_delta, y + y_delta) # Check to see if we can merge two cells, e.g RIGHT[0,0,2,2] -> [0,0,0,4] if (curCell not in joined and not curCell.isEmpty() and curCell.getValue() == adjCell.getValue()): successfullyMoved = True score_delta += 2*curCell.value adjCell.doubleValue() curCell.removeValue() joined = [curCell, adjCell] # Check to see if we can move a cell e.g RIGHT[2,0,0,0] -> [0,2,0,0] elif not curCell.isEmpty() and adjCell.isEmpty(): successfullyMoved = True adjCell.setValue(curCell.getValue()) curCell.removeValue() for y in y_range: for x in x_range: if (x + x_delta) in x_range and (y + y_delta) in y_range: curCell = self.getCell(x, y) adjCell = self.getCell(x + x_delta, y + y_delta) if (not curCell.isEmpty() and adjCell.isEmpty()): adjCell.setValue(curCell.getValue()) curCell.removeValue() return successfullyMoved, score_delta def hasMovesAvailable(self): '''Checks to see if any moves are available.''' if (self.hasEmptyTiles()): return True n = len(self.grid) # we iterate over all posible directions, where (0,1) corresponds to DOWN and (-1,0) to LEFT. for (x_delta, y_delta) in [(0, 1), (0, -1), (1, 0), (-1, 0)]: x_range = list(range(n)) y_range = list(range(n)) # we always want to start from the further most away s.t LEFT[2,2,0,0] starts at index 3. # as such we reverse the range to [3,2,1,0] if x_delta == -1: x_range = reversed(x_range) if y_delta == -1: y_range = reversed(y_range) for x in x_range: for y in y_range: if ((x+x_delta) in x_range and (y+y_delta) in y_range): curCell = self.getCell(x, y) adjCell = self.getCell(x+x_delta, y+y_delta) if (curCell.value == adjCell.value): return True # a move is available. return False
gpl-3.0
5,492,649,167,837,388,000
29.204545
105
0.641084
false
2.88913
false
false
false
rlindner81/pyload
module/plugins/hoster/LinksnappyCom.py
1
1943
# -*- coding: utf-8 -*- import re import urlparse from module.plugins.internal.misc import json from module.plugins.internal.MultiHoster import MultiHoster class LinksnappyCom(MultiHoster): __name__ = "LinksnappyCom" __type__ = "hoster" __version__ = "0.16" __status__ = "testing" __pattern__ = r'https?://(?:[^/]+\.)?linksnappy\.com' __config__ = [("activated", "bool", "Activated", True), ("use_premium", "bool", "Use premium account if available", True), ("fallback", "bool", "Fallback to free download if premium fails", False), ("chk_filesize", "bool", "Check file size", True), ("max_wait", "int", "Reconnect if waiting time is greater than minutes", 10), ("revertfailed", "bool", "Revert to standard download if fails", True)] __description__ = """Linksnappy.com multi-hoster plugin""" __license__ = "GPLv3" __authors__ = [("stickell", "l.stickell@yahoo.it"), ("Bilal Ghouri", None)] def handle_premium(self, pyfile): host = self._get_host(pyfile.url) json_params = json.dumps({'link': pyfile.url, 'type': host, 'username': self.account.user, 'password': self.account.get_login('password')}) r = self.load("https://linksnappy.com/api/linkgen", post={'genLinks': json_params}) self.log_debug("JSON data: " + r) j = json.loads(r)['links'][0] if j['error']: self.error(_("Error converting the link")) pyfile.name = j['filename'] self.link = j['generated'] @staticmethod def _get_host(url): host = urlparse.urlsplit(url).netloc return re.search(r'[\w\-]+\.\w+$', host).group(0)
gpl-3.0
4,289,568,237,312,601,000
33.696429
89
0.513639
false
3.973415
false
false
false
majkelx/astwro
astwro/coord/CoordMatch.py
1
2213
# coding=utf-8 from __future__ import absolute_import, division, print_function from astropy.coordinates import SkyCoord import astropy.units as u import numpy as np class CoordMatch(object): """Two catalogues crossmatch Object interface to `astropy.coordinates.match_to_catalog_sky`""" def __init__(self, cooA, cooB, radius_match=0.5, radius_separated=None, unit=None): super(CoordMatch, self).__init__() if radius_separated is None: radius_separated = radius_match if not isinstance(radius_match, u.Quantity): radius_match = radius_match * u.arcsec if not isinstance(radius_separated, u.Quantity): radius_separated = radius_separated * u.arcsec self.r_match = radius_match self.r_spe = radius_separated kwargs = {} if unit is None else {'unit': unit} self.A = SkyCoord(cooA, **kwargs) self.B = SkyCoord(cooB, **kwargs) self._ABidx = None self._ABdist = None def _calc_diff(self): self._ABidx, self._ABdist, _ = self.A.match_to_catalog_sky(self.B) @property def sepAB(self): if self._ABdist is None: self._calc_diff() return self._ABdist @property def mapAB(self): if self._ABidx is None: self._calc_diff() return self._ABidx @property def lenB(self): return len(self.B) @property def lenA(self): return len(self.A) @property def mAB(self): return self.sepAB < self.r_match @property def mBA(self): r = np.zeros_like(self.B, dtype=bool) r[self.iBA] = True return r @property def mAonly(self): return ~self.mAB @property def mBonly(self): return ~self.mBA @property def iAonly(self): return np.arange(self.lenA)[self.mAonly] @property def iBonly(self): return np.arange(self.lenB)[self.mBonly] @property def iBA(self): return np.unique(self.mappediBA) @property def iAB(self): return np.arange(self.lenA)[self.mAB] @property def mappediBA(self): return self.mapAB[self.mAB]
mit
-650,743,030,654,543,700
21.814433
87
0.597379
false
3.501582
false
false
false
zhangzr1026/monitor2
src/collector/tasks/APP_RECORD/projectXfile.py
1
1566
''' Created on 2014-12-29 @author: Administrator ''' from lib import db_mysql from lib import common def conf_file_daily_num(resource=None): ''' Get daliy file num Just yesterday because today is not finished ''' yesterday = common.lastday() TARGET_TABLE='apprec_file_daily_num' DBCoon = db_mysql.connect(user=resource['db']['user'], passwd=resource['db']['passwd'], host=resource['db']['host'], port=resource['db']['port'], db=resource['db']['db']) ''' Get Data ''' # daily file # mFile = db_mysql.Model('file_conf_info',DBCoon) strWhere = "create_time>'%s 00:00:00' and create_time<='%s 23:59:59'" % (yesterday,yesterday) dataResult = mFile.field("count(*) AS num").where(strWhere).find() if dataResult == False: return False fileNum = dataResult['num'] # daliy effective conf # strWhere = "type=2 and create_time>'%s 00:00:00' and create_time<='%s 23:59:59'" % (yesterday,yesterday) dataResult = mFile.field("count(*) AS num").where(strWhere).find() if dataResult == False: return False fileVideoNum = dataResult['num'] ''' Set Value ''' values = dict() values['type'] = 0 values['real_time'] = "%s 23:59:59" % yesterday values['file_num'] = fileNum values['file_video_num'] = fileVideoNum ''' fill message body ''' msgBody = common.fillMsgData(TARGET_TABLE, values) return msgBody
lgpl-3.0
-2,242,246,343,074,945,800
28
108
0.572158
false
3.616628
false
false
false
UCSC-MedBook/MedBook_
tools/old-external-tools/shazam/htmlFG.py
1
17966
#!/usr/bin/python2.6 import sys, string, os, time, fnmatch, imgFG, markup, re from markup import oneliner as o from numpy import * rootDir = "" pngDir = "" pngBase = 'png/' pathwayNameDict = {} entityDict = {} entityFile = {} imgFG.printPDF = True class1 = [] class2 = [] class3 = [] def parseContrast(file_name, red_label, grey_label): global class1 global class2 inFile = open(file_name) #class1 = ["DTB-004", "DTB-009", "DTB-024Pro", "DTB-030", "DTB-034", "DTB-036", "DTB-046", "DTB-049", "DTB-053", "DTB-064", "DTB-073"] #class2 = ["DTB-003", "DTB-005", "DTB-011", "DTB-018", "DTB-022", "DTB-023", "DTB-038", "DTB-040", "DTB-060", "DTB-063", "DTB-071", "DTB-080"] lineCount = 0 for line in inFile: lineCount+=1 data = line[:-1].split('\t') if len(data) == 2: sample = data[0] if sample == 'Sample': continue s_class = data[1] if s_class == red_label: class1.append(sample) elif grey_label == 'Null': class2.append(grey_label) elif s_class == grey_label: class2.append(sample) else: print "invalid sample label", line inFile.close() def getPathwayName(pid): pid = pid.split('_') if len(pid) != 2: return "N/A" pid = pid[1] pid = re.sub("\.","", pid) try: name = pathwayNameDict[pid] except: name = "N/A" return name def initEntityDict(file_name): inFile = open(file_name) lineCount = 0 for line in inFile: lineCount+=1 data = line[:-1].split('\t') if len(data) == 2: type = data[0] name = data[1] if name in entityDict: if entityDict[name] != type and file_name == entityFile[name]: print "on line ", lineCount, name, "cannot be assigned ",type, "when it is", entityDict[name] , "in", file_name , entityFile[name] assert(entityDict[name] == type) elif entityDict[name] != type: if type != 'protein' and entityFile[name] == 'protein': print "WARNING", lineCount, name, "has multiple types ",type, "and", entityDict[name] , "in", file_name , entityFile[name] type = 'protein' entityDict[name] = type entityFile[name] = file_name inFile.close() def initPathwayNameDict(path_file="pathway_pids.tab"): inFile = open(path_file) for line in inFile: data = line[:-1].split('\t') pid = data[0] name = data[1] pathwayNameDict[pid] = name inFile.close() def getFilesMatching(baseDir, patterns): list = [] for root, dirs, files in os.walk(baseDir): for file in files: ptr = os.path.join(root, file) for pattern in patterns: if fnmatch.fnmatch(ptr, pattern): list.append(ptr) return list def writePageToFile(page, fname): outFile = open(fname, 'w') outFile.write(str(page)) outFile.close() def initializePage(t, h, sort_list = "[[9,1]]"): currentTime = time.localtime() dateTime = str(currentTime[1]) + '/' + str(currentTime[2]) + '/' + str(currentTime[0]) + " " dateTime += str(currentTime[3]) + ":" + str(currentTime[4]) + ":" + str(currentTime[5]) csses = "style.css" tsStr = '\n$(document).ready(function()\n' tsStr += ' {\n' tsStr += ' $("table").tablesorter({\n' tsStr += ' // sort on the tenth column , order desc \n' tsStr += ' sortList: '+sort_list+' \n' tsStr += ' }); \n' tsStr += ' }\n' tsStr += ');\n' scripts = [('js/jquery-latest.js',['javascript','']), ('js/jquery.tablesorter.min.js',['javascript','']), ('js/jquery.metadata.js',['javascript','']), ('',['javascript',tsStr])] page = markup.page() pathway_name = re.sub(" ","_",re.sub("/","_",t)) summary_tsv = open(rootDir + pathway_name+'.tsv', 'wb') summary_tsv.write("Gene\tAvg num Alterations\tTotal alterations\tnum genes\tmin mean truth\tmax mean truth\tmin mean any\tmax mean any\tnormalized activity\n") page.init(title = t, header = h, script=scripts, css = (csses, 'print, projection, screen'), footer = "Last modified on " + dateTime) return page, summary_tsv def putSummaryTable(p, b, data, id, tsv): labels = data["sample"]["labels"] p.table(border=b, id=id, class_='tablesorter') p.thead() p.tr() p.th("Entity - Gene or Complex or Molecule") p.th(labels, class_="{sorter:'digit'}") p.tr.close() p.thead.close() p.tbody() for d in data["sample"]: if d == "labels": continue vals = data["sample"][d] p.tr() #name of gene geneUrl = 'http://www.genecards.org/cgi-bin/carddisp.pl?gene='+d tsv.write('<a href=%s target="_blank">%s</a>\t' % (geneUrl, d)) p.td(o.a(d, href=geneUrl, target="_blank")) tmp = [round(v, 3) for v in vals] for v in vals: tsv.write('%s\t' % str(round(v,3))) p.td(tmp) p.tr.close() tsv.write('\n') p.tbody.close() tsv.close() p.table.close() def getPathwayByFilename(f): i = f.find("pid") if i == -1: print "string 'pid' not found in file name", f sys.exit(0) tmp = f[i:-3].split('_') pid = tmp[0] + '_' + tmp[1] pid = re.sub("\.","", pid) print "pid:",pid return pid, getPathwayName(pid) def summarizePathway(samples, data, entitySummary): sampleIndex = [] nwIndex = [] naIndex = [] for i in range(len(samples)): s = samples[i] if s.startswith("nw_"): nwIndex.append(i) elif s.startswith("na_"): naIndex.append(i) else: sampleIndex.append(i) totalOutliers = 0 totalActivity = 0 count = 0 geneCount = 0 for d in entitySummary["sample"]: if d == "labels": continue vals = entitySummary["sample"][d] totalOutliers += vals[6] try: totalActivity += vals[7] except: print "error: no activity for ",d sys.exit(2) totalActivity += 0 try: if entityDict[d] == 'protein': geneCount += 1 except: pass count += 1 if geneCount > 0: avgOutliers = 1.0 * totalOutliers / geneCount; else: avgOutliers = 0.0 print "entities", count, "genes", geneCount minMean = 1000 maxMean = -1000 #minMeanNw = 1000 #maxMeanNw = -1000 minMeanNa = 1000 maxMeanNa = -1000 for d in data: vals = data[d] tmp = [vals[i] for i in sampleIndex] m = mean(tmp) if m < minMean: minMean = m elif m > maxMean: maxMean = m #tmp = [vals[i] for i in nwIndex] #m = mean(tmp) #if m < minMeanNw: # minMeanNw = m #elif m > maxMeanNw: # maxMeanNw = m tmp = [vals[i] for i in naIndex] m = mean(tmp) if m < minMeanNa: minMeanNa = m elif m > maxMeanNa: maxMeanNa = m if geneCount < 10: return None summary = {} summary["Avg Num Alterations"] = avgOutliers summary["Total Alterations"] = totalOutliers summary["Num Genes"] = geneCount summary["Min Mean Truth"] = minMean summary["Max Mean Truth"] = maxMean summary["Min Mean Any"] = minMeanNa summary["Max Mean Any"] = maxMeanNa if geneCount > 0: summary["Normalized Activity"] = 100 * totalActivity / geneCount print "summary Normalized Activity", 100 * totalActivity / geneCount else: print "#warning geneCount = 0" summary["order"] = ("Avg Num Alterations", "Total Alterations", "Num Genes", "Min Mean Truth", "Max Mean Truth", "Min Mean Any", "Max Mean Any", "Normalized Activity") return summary def fileData(fname): inFile = open(fname) line = inFile.readline() header = line[:-1].split('\t') sample_names = header[1:] fData = {} for line in inFile: data = line[:-1].split('\t') name = data[0] data = data[1:] if len(name.split("__")) > 1: continue try: vals = [float(d) for d in data] fData[name] = vals except: continue return sample_names, fData def createSampleListPage(path_f, parametric, uniqueName, red_label, grey_label): samples, data = fileData(path_f) pid, pathwayName = getPathwayByFilename(path_f) print "pathway:", pathwayName if parametric: imgFilename = pngDir + uniqueName + '_' + pid + "_p_summary.png" else: imgFilename = pngDir + uniqueName + '_' + pid + "_np_summary.png" #print "#image file ", imgFilename, "root", rootDir, "png", pngDir imgSize = (12,5) pathwayName, entitySummary, pngFile = imgFG.createPlotFromData(pathwayName, imgSize, imgFilename, parametric, samples, data, red_label, grey_label, class1, class2) basePNG = os.path.basename(pngFile) page, summary_tsv = initializePage(t = pathwayName + " -- " + uniqueName, h = "", sort_list = "[[8,1]]") #ipl plot at top of page #summary_tsv.write('<img src="%s" alt="Summary Plot"\n' % pngDir+basePNG) #summary_tsv.write('Result table\n') page.img(src=pngBase+basePNG, alt="Summary Plot") page.p("Result table") putSummaryTable(p=page, b="1", data=entitySummary, id="result_table", tsv=summary_tsv) fname = basePNG[:-4] + ".html" print "createSampleListPage" writePageToFile(page, rootDir + fname) summary = summarizePathway(samples, data, entitySummary) return fname, pathwayName, summary def createGeneListPage(path_f, parametric, uniqueName, red_label, grey_label): samples, data = fileData(path_f) pid, pathwayName = getPathwayByFilename(path_f) print "pathway:", pathwayName if parametric: imgFilename = pngDir + uniqueName + '_' + pid + "_p_summary.png" else: imgFilename = pngDir + uniqueName + '_' + pid + "_np_summary.png" print "#image file ", imgFilename, "root", rootDir, "png", pngDir imgSize = (12,5) pathwayName, entitySummary, pngFile = imgFG.createPlotFromData(pathwayName, imgSize, imgFilename, parametric, samples, data, red_label, grey_label, class1, class2) basePNG = os.path.basename(pngFile) page, summary_tsv = initializePage(t = pathwayName + " -- " + uniqueName, h = "", sort_list = "[[8,1]]") #ipl plot at top of page #summary_tsv.write('<img src="%s" alt="Summary Plot"\n' % pngDir+basePNG) #summary_tsv.write('Result table\n') page.img(src=pngBase+basePNG, alt="Summary Plot") page.p("Result table") putSummaryTable(p=page, b="1", data=entitySummary, id="result_table", tsv=summary_tsv) fname = basePNG[:-4] + ".html" print "createGeneListPage" writePageToFile(page, rootDir + fname) summary = summarizePathway(samples, data, entitySummary) return fname, pathwayName, summary def putResultsTable(p, b, data, id): # p -> page # data -> html_filename, pathwayName, summary dictionary (one row per pathway) r = data[0] summaryVals = r[2] header = summaryVals["order"] p.table(border=b, id=id, class_='tablesorter') p.thead() p.tr() p.th("Image") p.th("Name") p.th(header, class_="{sorter:'digit'}") p.tr.close() p.thead.close() summary_tsv = open(rootDir+'/summary.tsv','wb') summary_tsv.write("Pathway\tAvg num Alterations\tTotal alterations\tnum genes\tmin mean truth\tmax mean truth\tmin mean any\tmax mean any\tnormalized activity\n") p.tbody() rowCount = 0 rowSum = [0 for h in header] for r in data: htmlFile = r[0] pathwayName = r[1] summaryVals = r[2] p.tr() base = os.path.basename(htmlFile) #plot of ipls p.td(o.a(o.img(src = pngBase + base[:-5] + ".png", width=100), href=base)) #link to pathway details page p.td(o.a(pathwayName, href=base)) summary_tsv.write(pathwayName+'\t') vals = [round(summaryVals[h],3) for h in header] for v in vals: summary_tsv.write(str(v)+'\t') #additional columns of data p.td(vals) i = 0 #add data to totals for bottom of page for h in header: rowSum[i] += summaryVals[h] i += 1 #end of row summary_tsv.write('\n') p.tr.close() summary_tsv.close() p.tbody.close() p.tbody() p.tr() p.td('') p.td('Total') # last row in table is sums p.td(rowSum) p.tr.close() p.tbody.close() p.table.close() def createIndexPage(pResults, npResults, index_html): page, summary_tsv = initializePage(t = "Factor Graph Results", h = "") page.p("Per-pathway summary of activity") putResultsTable(p=page, b="1", data=pResults, id="result_table1") #page.p("Non-Parametric Results") #putResultsTable(p=page, b="1", data=npResults, id="result_table2") print "createIndexPage", index_html writePageToFile(page, index_html) def createTopPathwaysPage(pResults): page, summary_tsv = initializePage(t = "Per-pathway summary of activity", h = "") page.p("Per-pathway summary of activity") page.p('<a href="index.html">Click here for all pathways</a>') putResultsTable(p=page, b="1", data=pResults[0:10], id="results") page.p('<a href="index.html">Click here for all pathways</a>') print "createTopPathwaySummaryPage" writePageToFile(page, rootDir + "summary.html") def main(directory, pathway_directory, contrast_file, red_label, grey_label, index_html_path): # create all html pages for each individual run, including images # collect objects containing html page, whatever pathway-level summary info (in 2d dict) # use objects to generate root level index.html global rootDir global pngDir global class1 global class2 parseContrast(contrast_file, red_label, grey_label) print len(class1), "samples for class1 ", red_label, class1 print len(class2), "samples for class2 ", grey_label, class2 fdir = os.path.dirname(index_html_path) print "dir", fdir if fdir == "": fdir=os.getcwd() rootDir = fdir+'/html/' print "rootDir", rootDir pathways = getFilesMatching(pathway_directory, ["*pid*tab","*pid*spf"]) for fname in pathways: initEntityDict(fname) print "reading ipls", directory files = getFilesMatching(directory, ["*transpose*.out"]) pngDir = rootDir + pngBase os.system('mkdir -p '+pngDir) os.system('cp -p ./style.css '+rootDir) os.system('cp -pr ./js '+rootDir) print "mkdir -p "+pngDir pResults = [] parametric = True datasetName = os.path.basename(directory.strip('/')) for f in files: if f == "merged_transpose_pid_example.out": continue print "File: "+f, "dataset:", datasetName # list of genes and complexes for a pathway r = createGeneListPage(f, parametric, datasetName, red_label, grey_label) # fname, pathwayName, summary print " #createGeneListPage pathway ", r[1], r[2], r[0] if r[2] != None: pResults.append(r) npResults = [] #parametric = False #for f in files: # if f == "merged_transpose_pid_example.out": # continue # r = createGeneListPage(f, parametric, directory.strip('/')) # npResults.append(r) #pResults.sort(key=lambda x: -x[2]["Avg Num Alterations"]) pResults.sort(key=lambda x: -x[2]["Normalized Activity"]) #main pathway summary page (all pathways) createIndexPage(pResults, npResults, index_html_path) #main pathway summary page (top 10) createTopPathwaysPage(pResults) def usage(): print "usage: python htmlFG.py ipl_directory pathway_directory pathway_pids.tab contrast_file class1_label class2_label index.html" print " ipl_directory contains one IPL matrix per pathway" print " pathway_directory contains one spf file per pathway" print " pathway_pids.tab is a 3 col file with list of pathways in pathway_directory: pid, description, source" print " contrast_file contains tab delimted file, first col is sample id and second is class of sample" print " Note: pathway names must start with pid_ and end with _pathway.tab" print sys.exit(0) if __name__ == "__main__": if len(sys.argv) != 8: usage() directory = sys.argv[1] pathways = sys.argv[2] path_list = sys.argv[3] contrast_file = sys.argv[4] red_label = sys.argv[5] grey_label = sys.argv[6] index_html_path = sys.argv[7] initPathwayNameDict(path_file=path_list) import pdb main(directory, pathways, contrast_file, red_label, grey_label, index_html_path)
bsd-3-clause
7,746,036,898,464,188,000
31.844607
166
0.560169
false
3.495331
false
false
false
mcxiaoke/python-labs
archives/learning/security/otp.py
1
4777
# -*- coding: UTF-8 -*- # -*- coding: utf-8 -*- """ otpauth ~~~~~~~ Implements two-step verification of HOTP/TOTP. :copyright: (c) 2013 - 2014 by Hsiaoming Yang. :license: BSD, see LICENSE for more details. """ import base64 import hashlib import hmac import struct import sys import time import warnings if sys.version_info[0] == 3: python_version = 3 string_type = str else: python_version = 2 string_type = unicode range = xrange class OTPAuth(object): """One Time Password Authentication. :param secret: A secret token for the authentication. """ def __init__(self, secret): self.secret = secret def hotp(self, counter=4): """Generate a HOTP code. :param counter: HOTP is a counter based algorithm. """ return generate_hotp(self.secret, counter) def totp(self, period=30): """Generate a TOTP code. A TOTP code is an extension of HOTP algorithm. :param period: A period that a TOTP code is valid in seconds """ return generate_totp(self.secret, period) def valid_hotp(self, code, last=0, trials=100): """Valid a HOTP code. :param code: A number that is less than 6 characters. :param last: Guess HOTP code from last + 1 range. :param trials: Guest HOTP code end at last + trials + 1. """ if not valid_code(code): return False code = int(code) for i in range(last + 1, last + trials + 1): if self.hotp(counter=i) == code: return i return False def valid_totp(self, code, period=30): """Valid a TOTP code. :param code: A number that is less than 6 characters. :param period: A period that a TOTP code is valid in seconds """ return valid_code(code) and self.totp(period) == int(code) def to_uri(self, type, label, issuer, counter=None): """Generate the otpauth protocal string. :param type: Algorithm type, hotp or totp. :param label: Label of the identifier. :param issuer: The company, the organization or something else. :param counter: Counter of the HOTP algorithm. """ type = type.lower() if type not in ('hotp', 'totp'): raise ValueError('type must be hotp or totp') if type == 'hotp' and not counter: raise ValueError('HOTP type authentication need counter') secret = base64.b32encode(to_bytes(self.secret)) # bytes to string secret = secret.decode('utf-8') # remove pad string secret = secret.strip('=') # https://code.google.com/p/google-authenticator/wiki/KeyUriFormat url = ('otpauth://%(type)s/%(label)s?secret=%(secret)s' '&issuer=%(issuer)s') dct = dict( type=type, label=label, issuer=issuer, secret=secret, counter=counter ) ret = url % dct if type == 'hotp': ret = '%s&counter=%s' % (ret, counter) return ret def to_google(self, type, label, issuer, counter=None): """Generate the otpauth protocal string for Google Authenticator. .. deprecated:: 0.2.0 Use :func:`to_uri` instead. """ warnings.warn('deprecated, use to_uri instead', DeprecationWarning) return self.to_uri(type, label, issuer, counter) def generate_hotp(secret, counter=4): """Generate a HOTP code. :param secret: A secret token for the authentication. :param counter: HOTP is a counter based algorithm. """ # https://tools.ietf.org/html/rfc4226 msg = struct.pack('>Q', counter) digest = hmac.new(to_bytes(secret), msg, hashlib.sha1).digest() ob = digest[19] if python_version == 2: ob = ord(ob) pos = ob & 15 base = struct.unpack('>I', digest[pos:pos + 4])[0] & 0x7fffffff token = base % 1000000 return token def generate_totp(secret, period=30): """Generate a TOTP code. A TOTP code is an extension of HOTP algorithm. :param secret: A secret token for the authentication. :param period: A period that a TOTP code is valid in seconds """ counter = int(time.time()) // period return generate_hotp(secret, counter) def to_bytes(text): if isinstance(text, string_type): # Python3 str -> bytes # Python2 unicode -> str text = text.encode('utf-8') return text def valid_code(code): code = string_type(code) return code.isdigit() and len(code) <= 6 if __name__ == '__main__': gotp=OTPAuth('xjom6zpducm4mltk5stxcogv3wcvq7do') print gotp.totp() dotp=OTPAuth('PBFCKI5CSTEGFKDV4RHCLFZSCU') print dotp.totp()
apache-2.0
2,432,917,757,027,583,500
26.454023
75
0.601005
false
3.608006
false
false
false
bmd/twittrscrapr
twittrscrapr/scrapers/profilescrapr.py
1
2613
from datetime import datetime as dt import logging from base_scraper import TwittrScrapr logger = logging.getLogger("TwittrScrapr.ProfileScrapr") class ProfileScrapr(TwittrScrapr): def __init__(self, api_keys, writer): """ Construct the ProfileScraper object :param api_keys: A dict containing Twitter API parameters :param writer: A writer that implements the CSV module's DictReader and DictWriter interfaces :return: None """ super(ProfileScrapr, self).__init__(api_keys, writer) def _call_lookup_method(self, method, profile_type, profiles): """ Call the appropriate lookup method for the profile type provided and return the result from the Twitter API. :param method: :param profile_type: "screenname" or "user_ids" :param profiles: an array of profiles to iterate over :return: Dict """ if profile_type == 'screenname': profs = method(screen_name=','.join(profiles)) else: # type is user_ids profs = method(user_id=','.join(profiles)) results = [] for prof in profs: results.append({ 'screen_name': prof['screen_name'], 'display_name': prof['name'], 'twitter_join_dt': prof['created_at'], 'user_id': prof['id_str'], 'followers': prof['followers_count'], 'scrape_date': dt.strftime(dt.now(), '%Y-%m-%d'), 'location': prof['location'], 'website': prof['url'], 'tweets': prof['statuses_count'], 'friends': prof['friends_count'], 'listed_count': prof['listed_count'] }) self.reset_time = self.api.get_lastfunction_header('x-rate-limit-reset') self.calls_remaining = self.api.get_lastfunction_header('x-rate-limit-remaining') return results @TwittrScrapr.error_handler def fetch_user_profiles(self): """ Scrape a list of user profiles using the Twitter API's batch endpoints :return: None """ self.check_rate_limit() for x in range(0, (len(self.scrape_queue) // 100 + 1)): start = x * 100 end = start + 100 results = self._call_lookup_method( self.api.lookup_user, self.search_type, self.scrape_queue[start:min(end, len(self.scrape_queue))] ) self.writer.write(results) logger.info("Completed data pull") self.writer.cleanup()
mit
-934,643,122,492,740,400
33.381579
113
0.570608
false
4.076443
false
false
false
ParuninPavel/lenta4_hack
vkapp/bot/models/news.py
1
1052
from django.db import models from .users import Blogger, Admin class News(models.Model): id = models.AutoField(primary_key=True) link = models.CharField(max_length=300, blank=True, null=True) pic = models.CharField(max_length=300, blank=True, null=True) blogger = models.ForeignKey(Blogger, on_delete=models.CASCADE, null=True) media = models.CharField(max_length=3000, blank=True, null=True) date_time = models.DateTimeField(auto_now_add=True) class AdminReview(models.Model): id = models.AutoField(primary_key=True) admin = models.ForeignKey(Admin, on_delete=models.CASCADE) news = models.ForeignKey(News, on_delete=models.CASCADE, unique=True) rating = models.IntegerField() date_time = models.DateTimeField(auto_now_add=True) class Publication(models.Model): id = models.AutoField(primary_key=True) admin = models.ForeignKey(Admin, on_delete=models.CASCADE) news = models.ForeignKey(News, on_delete=models.CASCADE, unique=True) date_time = models.DateTimeField(auto_now_add=True)
mit
6,350,823,177,180,132,000
36.571429
77
0.734791
false
3.460526
false
false
false
caperren/Archives
OSU Robotics Club/Mars Rover 2017-2018/software/testing/ubiradio_testing.py
1
2372
import paramiko import json import time # ath0 21 channels in total; available frequencies : # Channel 01 : 2.412 GHz # Channel 31 : 2.414 GHz # Channel 02 : 2.417 GHz # Channel 32 : 2.419 GHz # Channel 03 : 2.422 GHz # Channel 33 : 2.424 GHz # Channel 04 : 2.427 GHz # Channel 34 : 2.429 GHz # Channel 05 : 2.432 GHz # Channel 35 : 2.434 GHz # Channel 06 : 2.437 GHz # Channel 36 : 2.439 GHz # Channel 07 : 2.442 GHz # Channel 37 : 2.444 GHz # Channel 08 : 2.447 GHz # Channel 38 : 2.449 GHz # Channel 09 : 2.452 GHz # Channel 39 : 2.454 GHz # Channel 10 : 2.457 GHz # Channel 40 : 2.459 GHz # Channel 11 : 2.462 GHz # Current Frequency:2.417 GHz (Channel 2) # Sets: iwconfig ath0 channel 01 # Gets: iwlist ath0 channel # NOTE # Only the access point has to get changed the station (client) will automatically choose the new freq channel = 3 get_general_info = "wstalist" get_wireless_info = "iwlist ath0 channel" set_wireless_frequency = "iwconfig ath0 channel " + "%02d" % channel # iwconfig ath0 freq 2.456G ssh = paramiko.SSHClient() ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy) # Before anyone complains, I'm not worried about this password being online. # We only set one because the web interfaces HAVE to have one ssh.connect("192.168.1.20", username="ubnt", password="rover4lyfe^", compress=True) while True: ssh_stdin, ssh_stdout, ssh_stderr = ssh.exec_command(get_general_info) output_json = json.loads(ssh_stdout.read())[0] successful_transmit_percent = output_json["ccq"] quality = output_json["airmax"]["quality"] capacity = output_json["airmax"]["capacity"] rx_rate = output_json["rx"] tx_rate = output_json["tx"] ground_tx_latency = output_json["tx_latency"] rover_tx_latency = output_json["remote"]["tx_latency"] print successful_transmit_percent, " | ", quality, " | ", capacity, " | ", rx_rate, " | ", tx_rate, " | ", ground_tx_latency, " | ", rover_tx_latency time.sleep(0.25) # ssh_stdin, ssh_stdout, ssh_stderr = ssh.exec_command(set_wireless_frequency) # ssh_stdin, ssh_stdout, ssh_stderr = ssh.exec_command(get_wireless_info) # # print ssh_stdout.read()
gpl-3.0
-8,870,408,355,310,936,000
34.402985
153
0.623524
false
3.171123
false
false
false
leschzinerlab/FreeHand
lib/fspace_param_consolidate.py
1
1070
#!/usr/bin/env python import linecache import sys #Convert parameter file format with CTF info untilt = sys.argv[1] ctf2 = sys.argv[2] fout = '%s_format' %(ctf2[:-4]) o1 = open(fout,'a') o1.write("C Frealign format parameter file created from Search_fspace parameter file\n") o1.write("C\n") o1.write("C PSI THETA PHI SHX SHY MAG FILM DF1 DF2 ANGAST CCMax\n") i = 1 tmp = open(ctf2,'r') tot = len(tmp.readlines()) while i <= tot: t = i + 3 param = linecache.getline(untilt,t) ctf = linecache.getline(ctf2,i) l1 = param.split() l2 = ctf.split() psi = float(l1[1]) theta = float(l1[2]) phi = float(l1[3]) shiftx = float(l1[4]) shifty = float(l1[5]) mag = float(l1[6]) film=float(l1[7]) df1 = float(l2[0]) df2 = float(l2[1]) astig = float(l2[2]) a = (l1[10]) test = '%s' %(a[-1:]) if test == '*': CC = 50 else: CC = float(l1[11]) o1.write("%7d%8.3f%8.3f%8.3f%8.3f%8.3f%8.0f%6d%9.1f%9.1f%8.2f%7.2f\n" %(i,psi,theta,phi,shiftx,shifty,mag,film,df1,df2,astig,CC)) i = i + 1 o1.write("C\n")
mit
2,811,378,361,588,945,000
17.77193
130
0.592523
false
2.049808
false
false
false
jordillinares/addons
stock_lot_enh_base/models/stock.py
1
10625
# -*- encoding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2012 Tiny SPRL (http://tiny.be). All Rights Reserved # # This module, # Copyright (C) 2015 Jordi Llinares López - bigandopen@bigandopen.com # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see http://www.gnu.org/licenses/. # ############################################################################## from openerp import models, fields, api from openerp.exceptions import except_orm from openerp.tools.translate import _ import time from openerp.tools import DEFAULT_SERVER_DATETIME_FORMAT from openerp.tools.float_utils import float_compare, float_round class stock_picking(models.Model): _inherit = 'stock.picking' @api.multi def do_transfer(self): res = super(stock_picking, self).do_transfer() self.refresh() for picking in self: for move in picking.move_lines: if move.linked_move_operation_ids: for operation_link in move.linked_move_operation_ids: if operation_link.operation_id.lot_id: reference_list = [] # Why this data format? see multi_m2o_text_widget # module description. add_reference = 'stock.picking,%s' % picking.id # Write destination (reference to picking) on # internal/outgoing pickings if picking.picking_type_code != 'incoming': if operation_link.operation_id.lot_id.destination: reference_list += operation_link.operation_id.lot_id.destination.split( ";") if add_reference not in reference_list: reference_list.append(add_reference) destination = ";".join(reference_list) or False if destination: operation_link.operation_id.lot_id.destination = destination # Write origin (reference to picking) on incoming # pickings else: if operation_link.operation_id.lot_id.origin: reference_list += operation_link.operation_id.lot_id.origin.split( ";") if add_reference not in reference_list: reference_list.append(add_reference) origin = ";".join(reference_list) or False if origin: operation_link.operation_id.lot_id.origin = origin return res class stock_quant(models.Model): _inherit = 'stock.quant' @api.model def _quants_get_order(self, location, product, quantity, domain=[], orderby='in_date'): ''' Implementation of removal strategies If it can not reserve, it will return a tuple (None, qty) ''' context = self._context domain += location and [('location_id', 'child_of', location.id)] or [] domain += [('product_id', '=', product.id)] if context.get('force_company'): domain += [('company_id', '=', context.get('force_company'))] else: domain += [('company_id', '=', self.env.user.company_id.id)] res = [] offset = 0 while float_compare(quantity, 0, precision_rounding=product.uom_id.rounding) > 0: quants = self.search( domain, order=orderby, limit=10, offset=offset) if not quants: res.append((None, quantity)) break for quant in quants: # Here we implement a change that affects FEFO removal strategy # (orderby = 'removal_date, in_date, id'): # If a quant is already expired (removal_date < current date), # skip it and send a warning message. if orderby == 'removal_date, in_date, id': if (quant.removal_date and quant.removal_date < time.strftime(DEFAULT_SERVER_DATETIME_FORMAT) ): if ('chatter_model' in context and \ context.get('chatter_model', False) and 'chatter_id' in context and context.get('chatter_id', False) ): model = self.env[context['chatter_model']] # maybe our active model class does not inherit # from 'mail.thread' try: record = model.browse(context['chatter_id']) message = _('A quant of lot %s has been ' 'ignored because it seems to ' 'have expired.\nPlease check it' ' and, if needed, remove the ' 'whole lot from stock.' ) % (quant.lot_id.name,) record.message_post( message, _('An expired lot must be ' 'retired!'), context=context) finally: # these pops throw an error: # raise NotImplementedError("'pop' not supported # on frozendict") # self._context.pop('chatter_model') # self._context.pop('chatter_id') pass continue rounding = product.uom_id.rounding if float_compare(quantity, abs(quant.qty), precision_rounding=rounding) >= 0: res += [(quant, abs(quant.qty))] quantity -= abs(quant.qty) elif float_compare(quantity, 0.0, precision_rounding=rounding) != 0: res += [(quant, quantity)] quantity = 0 break offset += 10 return res class stock_production_lot(models.Model): _inherit = 'stock.production.lot' @api.model def _get_lotname(self): context = self._context if context.get('product_id', False): product = self.env['product.product'].browse(context['product_id']) if product.lot_creation_mode == 'manual': return False elif product.lot_creation_mode == 'auto' and product.lot_sequence: return self.env['ir.sequence'].get_id(product.lot_sequence.id) return self.env['ir.sequence'].get_id('stock.lot.serial', code_or_id='code') name = fields.Char('Lot number', required=True, help="Unique lot/serial alphanumeric code.", index=True, copy=False, default=_get_lotname) origin = fields.Char( 'Origin', size=200, help="Reference of the document in which " "this lot was created (received or manufactured).", index=True) destination = fields.Char( 'Destination', size=200, help="Reference of the the documents " "in which this lot was used (consumed or served).", index=True) class stock_transfer_details_items(models.TransientModel): _inherit = 'stock.transfer_details_items' def _get_quick_lot_creation_allowed(self): for detail in self: if detail.product_id.track_incoming: # if (detail.packop_id # and detail.packop_id.picking_id # and detail.packop_id.picking_id.picking_type_code == 'incoming'): # detail.allows_quick_lot_creating = True detail.allows_quick_lot_creating = True else: detail.allows_quick_lot_creating = False allows_quick_lot_creating = fields.Boolean('Quick lot creation allowed', compute=_get_quick_lot_creation_allowed, help="Technical field that " "determines if quick lot " "creation button is shown " "for each detail row in " "transfer wizard.") @api.multi def quick_lot_create(self): for detail in self: if (detail.product_id and detail.product_id.lot_creation_mode == 'auto' and (detail.product_id.track_incoming or detail.product_id.track_outgoing or detail.product_id.track_all) ): self.lot_id = self.env['stock.production.lot'].with_context( product_id=detail.product_id.id).create({}) else: raise except_orm(_('Warning!'), _('Product has not lot tracking enabled, or ' 'has lot creation mode set to \'manual\'. ' 'A new lot number won\'t be automatically ' 'created.')) if self and self[0]: return self[0].transfer_id.wizard_view()
agpl-3.0
1,831,910,691,091,152,100
47.290909
107
0.483528
false
5.080823
false
false
false
donovanhide/BitMagic
bm/__init__.py
1
4167
## Copyright(c) 2009 William Waites <wwaites_at_gmail.com> ## ## Permission is hereby granted, free of charge, to any person ## obtaining a copy of this software and associated documentation ## files (the "Software"), to deal in the Software without restriction, ## including without limitation the rights to use, copy, modify, merge, ## publish, distribute, sublicense, and/or sell copies of the Software, ## and to permit persons to whom the Software is furnished to do so, ## subject to the following conditions: ## ## The above copyright notice and this permission notice shall be included ## in all copies or substantial portions of the Software. ## ## THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, ## EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES ## OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. ## IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, ## DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ## ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR ## OTHER DEALINGS IN THE SOFTWARE. from bm_ext import * __all__ = ["BitVector", "strat"] class BitVector(object): """ >>> v = BitVector() >>> v.resize(64) >>> for i in range(32): v[i*2] = True ... >>> print v <BitVector 10101010101010101010101010101010...> >>> print v[0], v[1] True False >>> ### the length and number of bits that are set >>> print len(v), v.count() 64 32 >>> ### bitwise NOT >>> u = ~v >>> print u <BitVector 01010101010101010101010101010101...> >>> ### bitwise AND >>> print v&u <BitVector 00000000000000000000000000000000...> >>> ### bitwise OR >>> print v|u <BitVector 11111111111111111111111111111111...> >>> ### iteration >>> v = BitVector() >>> v.resize(10) >>> for i in range(5): v[i*2] = True ... >>> for k in v: print k ... 0 2 4 6 8 >>> """ def __init__(self, v = None): if v is None: v = bvector() v.set_new_blocks_strat(strat.GAP) self.__vector__ = v self.count = v.count self.size = v.size self.resize = v.resize self.capacity = v.capacity self.set = v.set self.any = v.any self.none = v.none self.calc_stat = v.calc_stat self.optimize = v.optimize self.serialize = v.serialize self.deserialize = v.deserialize self.set_new_blocks_strat = v.set_new_blocks_strat def __str__(self): def _s(): i = 0 g = iter(self) size = len(self) max_size = 32 while i < min(max_size, size): try: one = g.next() zeros = min(max_size, one) - i if zeros > 0: yield "0"*zeros i = i + zeros if one < max_size: yield "1" i = i + 1 except StopIteration: zeros = min(max_size, size) - i if zeros > 0: yield "0"*zeros i = i + zeros if i < size: yield "..." return "<BitVector %s>" % ("".join(_s()),) def __len__(self): return len(self.__vector__) def __setitem__(self, k, v): self.__vector__[k] = v def __getitem__(self, k): return self.__vector__[k] def __and__(self, other): if isinstance(other, BitVector): other = other.__vector__ return BitVector(self.__vector__ & other) def __or__(self, other): if isinstance(other, BitVector): other = other.__vector__ return BitVector(self.__vector__ | other) def __invert__(self): return BitVector(~self.__vector__) def __eq__(self, other): if isinstance(other, BitVector): other = other.__vector__ return self.__vector__ == other def __iter__(self): e = enumerator(self.__vector__, 0) end = enumerator(self.__vector__, 1) while True: if e < end: yield e.value() else: break e.next() def clear(self, free=False): self.__vector__.clear(free) def print_stats(self): st = statistics() self.calc_stat(st) print "Size:".rjust(25), len(self) print "Bits count:".rjust(25), self.count() print "Bit blocks:".rjust(25), st.bit_blocks print "GAP blocks:".rjust(25), st.gap_blocks print "Memory used:".rjust(25), "%.02fMB" % (float(st.memory_used) / 1024 / 1024) if __name__ == '__main__': import doctest doctest.testmod()
mit
6,690,548,970,383,246,000
25.373418
84
0.62947
false
3.041606
false
false
false
sjdv1982/seamless
docs/archive/0.2-cleanup/fireworks/tutorial/cell-display-numpy.py
1
2519
from PyQt5.QtWidgets import QMainWindow, QLabel, QWidget, QFrame, QSizePolicy from PyQt5.QtGui import QImage, QPixmap from PyQt5.QtCore import Qt, QSize import numpy as np w = QMainWindow(size=QSize(640, 640)) ww = QWidget() w.setCentralWidget(ww) asp = AspectLayout(1.0) ww.setLayout(asp) w.setWindowFlags(Qt.WindowStaysOnTopHint) l = QLabel() l.setScaledContents(True) l.setSizePolicy(QSizePolicy(QSizePolicy.Maximum, QSizePolicy.Maximum)) asp.addWidget(l) l.setParent(ww) l.setFrameStyle(QFrame.NoFrame) w.show() def update(): if PINS.title.updated: w.setWindowTitle(PINS.title.get()) global arr arr = PINS.array.get() assert arr.dtype in (float, np.float32, np.uint8), arr.dtype arr = np.ascontiguousarray(arr) if arr.ndim == 1: arr = arr.reshape((len(arr), 1)) if arr.ndim == 3: if arr.shape[-1] == 4: arr = np.ascontiguousarray(arr[:,:,:3]) assert arr.shape[-1] == 3 if arr.dtype == np.uint8: arr_norm_255 = arr else: amin = arr.min(axis=0).min(axis=0) amax = arr.max(axis=0).max(axis=0) arange = np.maximum(amax-amin, 1e-12) arr_norm = (arr - amin) / arange arr_norm_255 = ((arr_norm- 1e-6)*256).astype(np.uint8) width, height = arr.shape[1], arr.shape[0] im = QImage(arr_norm_255, width, height, width*3, QImage.Format_RGB888) elif arr.ndim == 2: if arr.dtype == np.uint8: arr_norm_255 = arr else: amin = arr.min() amax = arr.max() arange = np.maximum(amax-amin, 1e-12) arr_norm = (arr - amin) / arange arr_norm_255 = ((arr_norm- 1e-6)*256).astype(np.uint8) arr_color = np.zeros((arr.shape) + (3,), dtype=np.uint8) arr_color[:,:,0] = arr_norm_255 arr_color[:,:,1] = 128 - np.abs(arr_norm_255.astype(int)-128) arr_color[:,:,2] = 255 - arr_norm_255 width, height = arr_color.shape[1], arr_color.shape[0] im = QImage(arr_color, width, height, width*3, QImage.Format_RGB888) pm = QPixmap.fromImage(im) aspect = width / height asp.aspect = aspect cwidth, cheight = w.size().width(), w.size().height() l.setPixmap(pm) l.setMinimumSize(1,1) scalex = width/cwidth scaley = height/cheight scale = max(scalex, scaley) if scale > 1: w.resize(width/scale, height/scale) w.updateGeometry() def destroy(): global w, l del l del w #update()
mit
7,318,279,161,187,710,000
31.714286
79
0.598253
false
3.005967
false
false
false
emilbjorklund/django-simplewebmentions
simplewebmentions/views.py
1
4196
""" TODO: send relevant signals when creating, deleting, unpublishing etc... TODO: How to best connect various bit that we can read from the URLs? """ from __future__ import unicode_literals from urlparse import urlparse from webmentiontools.urlinfo import UrlInfo from django.shortcuts import render, get_object_or_404 from django.http import HttpResponse, Http404, HttpResponseNotAllowed from django.core.urlresolvers import resolve, reverse from django.shortcuts import render_to_response from django.views.generic import View, DetailView from django.views.defaults import bad_request from django.views.decorators.csrf import csrf_exempt from django.utils.decorators import method_decorator from simplewebmentions.helpers import ( verify_params, is_valid_target, get_source_data, mention_status_check, delete_if_existing, get_article_text) from simplewebmentions.models import ( WebMention, MENTION_STATUS_UNMODERATED, MENTION_STATUS_DELETED) class WebMentionDetail(View): def dispatch(self, request, *args, **kwargs): allowed_methods = ['GET', 'HEAD'] if request.method not in allowed_methods: return HttpResponseNotAllowed(allowed_methods) mention = get_object_or_404(WebMention, **kwargs) message, status = mention_status_check(mention) return HttpResponse(message, status=status) class WebMentionEndpoint(View): @method_decorator(csrf_exempt) def dispatch(self, request, *args, **kwargs): return super(WebMentionEndpoint, self).dispatch(request, *args, **kwargs) def get(self, request, *args, **kwargs): """ Doing a get request should return a nice overview HTML page. """ response = render_to_response('webmentions/webmention_endpoint.html') response.Link = reverse('webmention_endpoint') return response def post(self, request, *args, **kwargs): """ Handles post requests to our endpoint. Should check parameters and trigger WebMention creation if present and correct. """ if not verify_params(request.POST): return bad_request(request) target = request.POST['target'] source = request.POST['source'] match = is_valid_target(target, request) # Does the target exist on the site, and is there a source to parse? if not match: """ If there doesn't seem to be content representing the target, the webmention is rejected. """ delete_if_existing(source, target) return bad_request(request) # Use webmention-tools to try and fetch/parse the source source_data = get_source_data(source) # Is there some source data to parse? if source_data.error: """ The source data could not be parsed by webmention-tools, webmention is rejected. """ delete_if_existing(source, target) return bad_request(request) if not source_data.linksTo(target): """ If the source page does not contain a link back to the target, the mention is rejected. """ delete_if_existing(source, target) return bad_request(request) target_app = match.app_name mention = WebMention( source=source, target=target, source_title=source_data.title(), target_app=target_app or "", source_link_excerpt=source_data.snippetWithLink(source_data.url) or "", source_pub_date=source_data.pubDate(), author_img_url=source_data.image() or "", source_text=get_article_text(source_data.soup) ) mention.save() return HttpResponse(mention.get_absolute_url(), status=202) def head(self, request, *args, **kwargs): """ Basically, disallow HEAD requests to the endpoint. """ return HttpResponseNotAllowed(['POST', 'GET'])
mit
-3,596,693,950,043,964,400
32.677686
83
0.624881
false
4.361746
false
false
false
wikimedia/ve-needcheck-reporter-bot
ircecho.py
1
1213
# Quick and ugly script to echo something to a given IRC channel # Alex Monk, 2014-07-22 from socket import socket, AF_INET, SOCK_STREAM def ircecho(nick, channel, message, host = "chat.freenode.net", port = 6667): s = socket(AF_INET, SOCK_STREAM) s.connect((host, port)) f = s.makefile() def readLineWithoutServername(f): l = f.readline().strip() print(l) return l[l.find(" ") + 1:] def send(s, text): s.send(text + "\r\n") print("> " + text) while True: line = readLineWithoutServername(f) if line == "NOTICE * :*** No Ident response" or line == "NOTICE * :*** Got Ident response": send(s, "user " + nick + " 0 0 :" + nick) send(s, "nick " + nick) break while True: line = readLineWithoutServername(f) if line == "376 " + nick + " :End of /MOTD command.": send(s, "join " + channel) break elif line == "433 * " + nick + " :Nickname is already in use.": nick += "_" send(s, "nick " + nick) while True: line = readLineWithoutServername(f) if line == "366 " + nick + " " + channel + " :End of /NAMES list.": for messageLine in message.splitlines(): send(s, "privmsg " + channel + " :" + messageLine) send(s, "quit :Done") s.close() break
mit
-5,310,313,400,393,842,000
27.209302
93
0.608409
false
2.788506
false
false
false
djurodrljaca/salamander-alm
server/trackermanagement/tracker_management.py
1
27334
""" Salamander ALM Copyright (c) 2016 Djuro Drljaca This Python module is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This Python module is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library. If not, see <http://www.gnu.org/licenses/>. """ from database.connection import Connection from database.database import DatabaseInterface from database.tables.tracker_information import TrackerSelection import datetime from typing import List, Optional class TrackerManagementInterface(object): """ Tracker management Dependencies: - DatabaseInterface """ def __init__(self): """ Constructor is disabled! """ raise RuntimeError() @staticmethod def read_all_tracker_ids(project_id: int, tracker_selection=TrackerSelection.Active, max_revision_id=None) -> List[int]: """ Reads all tracker IDs from the database :param project_id: ID of the project :param tracker_selection: Search for active, inactive or all tracker :param max_revision_id: Maximum revision ID for the search ("None" for latest revision) :return: List of tracker IDs """ connection = DatabaseInterface.create_connection() if max_revision_id is None: max_revision_id = DatabaseInterface.tables().revision.read_current_revision_id( connection) # Reads all tracker IDs from the database trackers = None if max_revision_id is not None: trackers = DatabaseInterface.tables().tracker_information.read_all_tracker_ids( connection, project_id, tracker_selection, max_revision_id) return trackers @staticmethod def read_tracker_by_id(tracker_id: int, max_revision_id=None) -> Optional[dict]: """ Reads a tracker (active or inactive) that matches the specified tracker ID :param tracker_id: ID of the tracker :param max_revision_id: Maximum revision ID for the search ("None" for latest revision) :return: Tracker information object Returned dictionary contains items: - id - project_id - short_name - full_name - description - active - revision_id """ connection = DatabaseInterface.create_connection() if max_revision_id is None: max_revision_id = DatabaseInterface.tables().revision.read_current_revision_id( connection) # Read a tracker that matches the specified tracker ID tracker = None if max_revision_id is not None: tracker = TrackerManagementInterface.__read_tracker_by_id(connection, tracker_id, max_revision_id) return tracker @staticmethod def read_tracker_by_short_name(short_name: str, max_revision_id=None) -> Optional[dict]: """ Reads an active tracker that matches the specified short name :param short_name: Tracker's short name :param max_revision_id: Maximum revision ID for the search ("None" for latest revision) :return: Tracker information object Returned dictionary contains items: - id - project_id - short_name - full_name - description - active - revision_id """ connection = DatabaseInterface.create_connection() if max_revision_id is None: max_revision_id = DatabaseInterface.tables().revision.read_current_revision_id( connection) # Read a tracker that matches the specified short name tracker = None if max_revision_id is not None: tracker = TrackerManagementInterface.__read_tracker_by_short_name(connection, short_name, max_revision_id) return tracker @staticmethod def read_trackers_by_short_name(short_name: str, max_revision_id=None) -> List[dict]: """ Reads all active and inactive trackers that match the specified short name :param short_name: Tracker's short name :param max_revision_id: Maximum revision ID for the search ("None" for latest revision) :return: Tracker information of all trackers that match the search attribute Each dictionary in the returned list contains items: - id - project_id - short_name - full_name - description - active - revision_id """ connection = DatabaseInterface.create_connection() if max_revision_id is None: max_revision_id = DatabaseInterface.tables().revision.read_current_revision_id( connection) # Read trackers that match the specified short name trackers = list() if max_revision_id is not None: tracker_information_list = \ DatabaseInterface.tables().tracker_information.read_information( connection, "short_name", short_name, TrackerSelection.All, max_revision_id) for tracker_information in tracker_information_list: trackers.append(TrackerManagementInterface.__parse_tracker_information( tracker_information)) return trackers @staticmethod def read_tracker_by_full_name(full_name: str, max_revision_id=None) -> Optional[dict]: """ Reads an active tracker that matches the specified full name :param full_name: Tracker's full name :param max_revision_id: Maximum revision ID for the search ("None" for latest revision) :return: Tracker information object Returned dictionary contains items: - id - project_id - short_name - full_name - description - active - revision_id """ connection = DatabaseInterface.create_connection() if max_revision_id is None: max_revision_id = DatabaseInterface.tables().revision.read_current_revision_id( connection) # Read a tracker that matches the specified full name tracker = None if max_revision_id is not None: tracker = TrackerManagementInterface.__read_tracker_by_full_name(connection, full_name, max_revision_id) return tracker @staticmethod def read_trackers_by_full_name(full_name: str, max_revision_id=None) -> List[dict]: """ Reads all active and inactive trackers that match the specified full name :param full_name: Tracker's full name :param max_revision_id: Maximum revision ID for the search ("None" for latest revision) :return: Tracker information of all trackers that match the search attribute Each dictionary in the returned list contains items: - id - project_id - short_name - full_name - description - active - revision_id """ connection = DatabaseInterface.create_connection() if max_revision_id is None: max_revision_id = DatabaseInterface.tables().revision.read_current_revision_id( connection) # Read trackers that match the specified full name trackers = list() if max_revision_id is not None: tracker_information_list = \ DatabaseInterface.tables().tracker_information.read_information( connection, "full_name", full_name, TrackerSelection.All, max_revision_id) for tracker_information in tracker_information_list: trackers.append(TrackerManagementInterface.__parse_tracker_information( tracker_information)) return trackers @staticmethod def create_tracker(requested_by_user: int, project_id: int, short_name: str, full_name: str, description: str) -> Optional[int]: """ Creates a new tracker :param requested_by_user: ID of the user that requested creation of the new tracker :param project_id: ID of the project :param short_name: Tracker's short name :param full_name: Tracker's full name :param description: Tracker's description :return: Tracker ID of the new tracker """ tracker_id = None connection = DatabaseInterface.create_connection() try: success = connection.begin_transaction() # Start a new revision revision_id = None if success: revision_id = DatabaseInterface.tables().revision.insert_row( connection, datetime.datetime.utcnow(), requested_by_user) if revision_id is None: success = False # Create the tracker if success: tracker_id = TrackerManagementInterface.__create_tracker(connection, project_id, short_name, full_name, description, revision_id) if tracker_id is None: success = False if success: connection.commit_transaction() else: connection.rollback_transaction() except: connection.rollback_transaction() raise return tracker_id @staticmethod def update_tracker_information(requested_by_user: int, tracker_to_modify: int, short_name: str, full_name: str, description: str, active: bool) -> bool: """ Updates tracker's information :param requested_by_user: ID of the user that requested modification of the user :param tracker_to_modify: ID of the tracker that should be modified :param short_name: Tracker's new short name :param full_name: Tracker's new full name :param description: Tracker's new description :param active: Tracker's new state (active or inactive) :return: Success or failure """ connection = DatabaseInterface.create_connection() try: success = connection.begin_transaction() # Start a new revision revision_id = None if success: revision_id = DatabaseInterface.tables().revision.insert_row( connection, datetime.datetime.utcnow(), requested_by_user) if revision_id is None: success = False # Check if there is already an existing tracker with the same short name if success: tracker = TrackerManagementInterface.__read_tracker_by_short_name(connection, short_name, revision_id) if tracker is not None: if tracker["id"] != tracker_to_modify: success = False # Check if there is already an existing tracker with the same full name if success: tracker = TrackerManagementInterface.__read_tracker_by_full_name(connection, full_name, revision_id) if tracker is not None: if tracker["id"] != tracker_to_modify: success = False # Update tracker's information in the new revision if success: row_id = DatabaseInterface.tables().tracker_information.insert_row( connection, tracker_to_modify, short_name, full_name, description, active, revision_id) if row_id is None: success = False if success: connection.commit_transaction() else: connection.rollback_transaction() except: connection.rollback_transaction() raise return success @staticmethod def activate_tracker(requested_by_user: int, tracker_id: int) -> bool: """ Activates an inactive tracker :param requested_by_user: ID of the user that requested modification of the user :param tracker_id: ID of the tracker that should be activated :return: Success or failure """ connection = DatabaseInterface.create_connection() try: success = connection.begin_transaction() # Start a new revision revision_id = None if success: revision_id = DatabaseInterface.tables().revision.insert_row( connection, datetime.datetime.utcnow(), requested_by_user) if revision_id is None: success = False # Read tracker tracker = None if success: tracker = TrackerManagementInterface.__read_tracker_by_id(connection, tracker_id, revision_id) if tracker is None: success = False elif tracker["active"]: # Error, tracker is already active success = False # Activate tracker if success: success = DatabaseInterface.tables().tracker_information.insert_row( connection, tracker_id, tracker["short_name"], tracker["full_name"], tracker["description"], True, revision_id) if success: connection.commit_transaction() else: connection.rollback_transaction() except: connection.rollback_transaction() raise return success @staticmethod def deactivate_tracker(requested_by_user: int, tracker_id: int) -> bool: """ Deactivates an active tracker :param requested_by_user: ID of the user that requested modification of the user :param tracker_id: ID of the tracker that should be deactivated :return: Success or failure """ connection = DatabaseInterface.create_connection() try: success = connection.begin_transaction() # Start a new revision revision_id = None if success: revision_id = DatabaseInterface.tables().revision.insert_row( connection, datetime.datetime.utcnow(), requested_by_user) if revision_id is None: success = False # Read tracker tracker = None if success: tracker = TrackerManagementInterface.__read_tracker_by_id(connection, tracker_id, revision_id) if tracker is None: success = False elif not tracker["active"]: # Error, tracker is already inactive success = False # Deactivate tracker if success: success = DatabaseInterface.tables().tracker_information.insert_row( connection, tracker_id, tracker["short_name"], tracker["full_name"], tracker["description"], False, revision_id) if success: connection.commit_transaction() else: connection.rollback_transaction() except: connection.rollback_transaction() raise return success @staticmethod def __read_tracker_by_id(connection: Connection, tracker_id: int, max_revision_id: int) -> Optional[dict]: """ Reads a tracker (active or inactive) that matches the search parameters :param connection: Database connection :param tracker_id: ID of the tracker :param max_revision_id: Maximum revision ID for the search :return: Tracker information object Returned dictionary contains items: - id - project_id - short_name - full_name - description - active - revision_id """ # Read the trackers that match the search attribute trackers = DatabaseInterface.tables().tracker_information.read_information( connection, "tracker_id", tracker_id, TrackerSelection.All, max_revision_id) # Return a tracker only if exactly one was found tracker = None if trackers is not None: if len(trackers) == 1: tracker = {"id": trackers[0]["tracker_id"], "project_id": trackers[0]["project_id"], "short_name": trackers[0]["short_name"], "full_name": trackers[0]["full_name"], "description": trackers[0]["description"], "active": trackers[0]["active"], "revision_id": trackers[0]["revision_id"]} return tracker @staticmethod def __read_tracker_by_short_name(connection: Connection, short_name: str, max_revision_id: int) -> Optional[dict]: """ Reads an active tracker that matches the specified short name :param connection: Database connection :param short_name: Tracker's short name :param max_revision_id: Maximum revision ID for the search :return: Tracker information object Returned dictionary contains items: - id - project_id - short_name - full_name - description - active - revision_id """ # Read the trackers that match the search attribute trackers = DatabaseInterface.tables().tracker_information.read_information( connection, "short_name", short_name, TrackerSelection.Active, max_revision_id) # Return a tracker only if exactly one was found tracker = None if trackers is not None: if len(trackers) == 1: tracker = {"id": trackers[0]["tracker_id"], "project_id": trackers[0]["project_id"], "short_name": trackers[0]["short_name"], "full_name": trackers[0]["full_name"], "description": trackers[0]["description"], "active": trackers[0]["active"], "revision_id": trackers[0]["revision_id"]} return tracker @staticmethod def __read_tracker_by_full_name(connection: Connection, full_name: str, max_revision_id: int) -> Optional[dict]: """ Reads an active tracker that matches the specified full name :param connection: Database connection :param full_name: Tracker's full name :param max_revision_id: Maximum revision ID for the search :return: Tracker information object Returned dictionary contains items: - id - project_id - short_name - full_name - description - active - revision_id """ # Read the trackers that match the search attribute trackers = DatabaseInterface.tables().tracker_information.read_information( connection, "full_name", full_name, TrackerSelection.Active, max_revision_id) # Return a tracker only if exactly one was found tracker = None if trackers is not None: if len(trackers) == 1: tracker = {"id": trackers[0]["tracker_id"], "project_id": trackers[0]["project_id"], "short_name": trackers[0]["short_name"], "full_name": trackers[0]["full_name"], "description": trackers[0]["description"], "active": trackers[0]["active"], "revision_id": trackers[0]["revision_id"]} return tracker @staticmethod def __create_tracker(connection: Connection, project_id: int, short_name: str, full_name: str, description: str, revision_id: int) -> Optional[int]: """ Creates a new tracker :param connection: Database connection :param project_id: ID of the project :param short_name: Tracker's short name :param full_name: Tracker's full name :param description: Tracker's description :param revision_id: Revision ID :return: Tracker ID of the newly created tracker """ # Check if a tracker with the same short name already exists tracker = TrackerManagementInterface.__read_tracker_by_short_name(connection, short_name, revision_id) if tracker is not None: return None # Check if a tracker with the same full name already exists tracker = TrackerManagementInterface.__read_tracker_by_full_name(connection, full_name, revision_id) if tracker is not None: return None # Create the tracker in the new revision tracker_id = DatabaseInterface.tables().tracker.insert_row(connection, project_id) if tracker_id is None: return None # Add tracker information to the tracker tracker_information_id = DatabaseInterface.tables().tracker_information.insert_row( connection, tracker_id, short_name, full_name, description, True, revision_id) if tracker_information_id is None: return None return tracker_id @staticmethod def __parse_tracker_information(raw_tracker_information: dict) -> dict: """ Parse raw tracker information object and convert it to a tracker information object :param raw_tracker_information: Tracker information :return: Tracker information object Input (raw) dictionary contains items: - project_id - tracker_id - short_name - full_name - description - active - revision_id Returned dictionary contains items: - id - project_id - short_name - full_name - description - active - revision_id """ return {"id": raw_tracker_information["tracker_id"], "project_id": raw_tracker_information["project_id"], "short_name": raw_tracker_information["short_name"], "full_name": raw_tracker_information["full_name"], "description": raw_tracker_information["description"], "active": raw_tracker_information["active"], "revision_id": raw_tracker_information["revision_id"]}
gpl-2.0
-3,854,890,622,643,095,000
35.013175
100
0.498207
false
5.608125
false
false
false
TheWardoctor/Wardoctors-repo
script.stargate.guide/strings.py
1
2287
# -*- coding: utf-8 -*- # # Copyright (C) 2012 Tommy Winther # http://tommy.winther.nu # # Modified for FTV Guide (09/2014 onwards) # by Thomas Geppert [bluezed] - bluezed.apps@gmail.com # # Modified for Stargate Guide (2016) # by wardoctor - wardoctor@tardisbuilds.com # # This Program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2, or (at your option) # any later version. # # This Program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this Program; see the file LICENSE.txt. If not, write to # the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. # http://www.gnu.org/copyleft/gpl.html # import xbmcaddon ADDON = xbmcaddon.Addon(id = 'script.stargate.guide') NO_DESCRIPTION = 30000 CALCULATING_REMAINING_TIME = 30002 TIME_LEFT = 30003 BACKGROUND_UPDATE_IN_PROGRESS = 30004 NO_PROGRAM_AVAILABLE = 30009 NO_STREAM_AVAILABLE_TITLE = 30100 NO_STREAM_AVAILABLE_LINE1 = 30101 NO_STREAM_AVAILABLE_LINE2 = 30102 CLEAR_CACHE = 30104 CLEAR_NOTIFICATIONS = 30108 DONE = 30105 LOAD_ERROR_TITLE = 30150 LOAD_ERROR_LINE1 = 30151 LOAD_ERROR_LINE2 = 30152 CONFIGURATION_ERROR_LINE2 = 30153 SKIN_ERROR_LINE1 = 30154 SKIN_ERROR_LINE2 = 30155 SKIN_ERROR_LINE3 = 30156 NOTIFICATION_5_MINS = 30200 NOTIFICATION_NOW = 30201 WATCH_CHANNEL = 30300 REMIND_PROGRAM = 30301 DONT_REMIND_PROGRAM = 30302 CHOOSE_STRM_FILE = 30304 REMOVE_STRM_FILE = 30306 PREVIEW_STREAM = 30604 STOP_PREVIEW = 30607 WEEBTV_WEBTV_MISSING_1 = 30802 WEEBTV_WEBTV_MISSING_2 = 30803 WEEBTV_WEBTV_MISSING_3 = 30804 DATABASE_SCHEMA_ERROR_1 = 30157 DATABASE_SCHEMA_ERROR_2 = 30158 DATABASE_SCHEMA_ERROR_3 = 30159 FETCH_ERROR_TITLE = 31000 FETCH_ERROR_LINE1 = 31001 FETCH_ERROR_LINE2 = 31002 def strings(id, replacements = None): string = ADDON.getLocalizedString(id) if replacements is not None: return string % replacements else: return string
apache-2.0
-2,401,772,511,313,800,700
26.238095
72
0.738085
false
3.069799
false
false
false
darvelo/chime
chime/error_functions.py
1
4103
from __future__ import absolute_import from logging import getLogger Logger = getLogger('chime.error_functions') from flask import current_app, request from urllib import quote from urlparse import urlparse from os.path import join, exists from .view_functions import get_repo, strip_index_file, path_display_type, get_value_from_front_matter, FOLDER_FILE_TYPE from .repo_functions import TASK_METADATA_FILENAME EMAIL_SUBJECT_TEXT = u'Chime Error Report' EMAIL_BODY_PREFIX = u'\n\n----- Please add any relevant details above this line -----\n\n' def common_error_template_args(app_config): ''' Return dictionary of template arguments common to error pages. ''' return { "activities_path": u'/', "support_email": app_config.get('SUPPORT_EMAIL_ADDRESS'), "support_phone_number": app_config.get('SUPPORT_PHONE_NUMBER') } def make_email_params(message, path=None, uuid=None): ''' Construct email params to send to the template. ''' email_subject = EMAIL_SUBJECT_TEXT email_message = EMAIL_BODY_PREFIX + message if path: email_message = u'\n'.join([email_message, u'path: {}'.format(path)]) if uuid: email_subject = u'{} ({})'.format(email_subject, uuid) return u'?subject={}&body={}'.format(quote(email_subject), quote(email_message)) def extract_branch_name_from_path(path): ''' If the name of a branch that exists in the passed repo is in the passed URL, return it ''' repo = get_repo(flask_app=current_app) for branch_name_candidate in path.split('/'): if branch_name_candidate in repo.branches: return branch_name_candidate return None def summarize_conflict_details(error): ''' Make an object that summarizes the files affected by a merge conflict. The object looks like this: [ {'edit_path': u'', 'display_type': u'Article', 'actions': u'Deleted', 'title': u'How to Find Us'}, {'edit_path': u'/tree/34246e3/edit/contact/hours-of-operation/', 'display_type': u'Article', 'actions': u'Edited', 'title': u'Hours of Operation'}, {'edit_path': u'/tree/34246e3/edit/contact/driving-directions/', 'display_type': u'Article', 'actions': u'Edited', 'title': u'Driving Directions'}, {'edit_path': u'/tree/34246e3/edit/contact/', 'display_type': u'Category', 'actions': u'Created', 'title': u'Contact'} ] ''' repo = get_repo(flask_app=current_app) path = urlparse(request.url).path # get the branch name (unless it's the default branch) branch_name = repo.active_branch.name if branch_name == current_app.config['default_branch']: branch_name = extract_branch_name_from_path(path) conflict_files = error.files() summary = [] for id_file in conflict_files: # skip the task metadata file if TASK_METADATA_FILENAME in id_file['path']: continue file_description = {'actions': id_file['actions'].title()} edit_path = u'' display_type = u'' title = id_file['path'].split('/')[-1] # construct location info if the file's there file_loc = join(repo.working_dir, id_file['path']) if exists(file_loc): dir_path = strip_index_file(id_file['path']) dir_loc = join(repo.working_dir, dir_path) display_type = path_display_type(dir_loc) # if it's not a category or article, it's just a file if display_type == FOLDER_FILE_TYPE: display_type = path_display_type(file_loc) title = get_value_from_front_matter('title', file_loc) or title edit_path = join(u'/tree/{}/edit/'.format(branch_name), dir_path) else: # the file's not there, so just dump the whole path into the title title = id_file['path'] display_type = u'Unknown' file_description['edit_path'] = edit_path file_description['display_type'] = display_type.title() file_description['title'] = title summary.append(file_description) return summary
bsd-3-clause
-526,122,277,231,651,140
40.867347
159
0.63612
false
3.706414
false
false
false
ThierryM/bCNC
bCNC/lib/bmath.py
2
53783
# # Copyright European Organization for Nuclear Research (CERN) # All rights reserved # # Author: Vasilis.Vlachoudis@cern.ch # Date: 15-May-2004 from __future__ import generators from __future__ import absolute_import __author__ = "Vasilis Vlachoudis" __email__ = "Vasilis.Vlachoudis@cern.ch" import random from math import acos, asin, atan2, copysign, cos, degrees, fmod, hypot, pi, pow, radians, sin, sqrt import rexx # Accuracy for comparison operators _accuracy = 1E-15 # Formatting _format = "%15g" #------------------------------------------------------------------------------- def sign(x): """Return sign of number""" return int(copysign(1,x)) #------------------------------------------------------------------------------- def Cmp0(x): """Compare against zero within _accuracy""" return abs(x)<_accuracy #------------------------------------------------------------------------------- def frange(start,stop,step): """range(start,stop,step) for floating point numbers""" x = start if step<0.0: while x>stop: yield x x += step else: while x<stop: yield x x += step #------------------------------------------------------------------------------- def limit(min_, num, max_): """limit a number within a specific range""" return max(min(num,max_),min_) #------------------------------------------------------------------------------- def dms(d,m,s): """dms - degrees from degrees, minutes, seconds""" return d + m/60.0 + s/3600.0 #------------------------------------------------------------------------------- def cbrt(x): """cubic root, this cubic root routine handles negative arguments""" if x == 0.0: return 0 elif x > 0.0: return pow(x, 1./3.) else: return -pow(-x, 1./3.) #------------------------------------------------------------------------------- def d2s(ang, fmt=""): """degrees to string D2S(angle[,"H"|"M"|"D"|"N"]) """ fmt.capitalize() if ang<0.0: neg = "-" ang = -ang else: neg = "" ang = round(ang*360000)/100 SS = "%05.2f" % (fmod(ang,60)) ang = int(ang / 60.0) MM = "%02d" % (ang % 60) HH = neg + str(ang / 60) if fmt=="H": return HH+"h"+MM+"m"+SS+"s" if fmt=="M": return HH+"h"+MM+"m" if fmt=="D": return HH+" "+MM+"'"+SS+'"' if fmt=="N": return HH+":"+MM return HH+":"+MM+":"+SS #------------------------------------------------------------------------------- def format(number, length=10, useExp=False, useD=False): """ Format a number to fit in the minimum space given by length""" _MAXLEN=22 # Convert number to string # XXX str cuts at 12 digits, repr shows everything but # numbers like e.g 9.2 will be converted to 9.1999999999999 # What should I do # Fields also in the CardWidget are converted with str and # are cut at 12 digits!!!! if isinstance(number, (float, int)): number = repr(number).upper() else: number = str(number).strip().upper() if not rexx.datatype(number, "N"): return number if useD: number = number.replace("E", "D") expE = "D" else: number = number.replace("D", "E") expE = "E" if len(number) < length: hasExp = (number.find(expE)>=0) if useExp: if hasExp: return number elif number.find(".")>=0 or hasExp: return number if number=="0": if useExp: return "0.%s0" % (expE) else: return "0.0" if length<5 or length>_MAXLEN: raise Exception("Format invalid length") # Dissect the number. It is in the normal Rexx format. try: (mantissa, exponent) = number.split(expE) if exponent == '': exponent = 0 else: exponent = int(exponent) except: mantissa = number exponent = 0 if mantissa[0] == '-': sgn = True mantissa = mantissa[1:] elif mantissa[0] == '+': sgn = False mantissa = mantissa[1:] else: sgn = False try: (befo, afte) = mantissa.split(".") except: befo = mantissa afte = "" # Count from the left for the decimal point. point = len(befo) # Make this a number without a point. integer = befo + afte # Remove leading zeros for p in range(len(integer)): if integer[p] != '0': if p>0: integer = integer[p:] point -= p break else: if useExp: return "0.%s0" % (expE) else: return "0.0" # ... and trailing for p in range(len(integer)-1,0,-1): if integer[p] != '0': integer = integer[0:p+1] break exponent += point # Cannot handle more than _MAXLEN digits lint = len(integer) if lint > _MAXLEN: r = integer[_MAXLEN] integer = integer[0:_MAXLEN] if r>='5': integer = str(int(integer)+1) if len(integer) > lint: exponent += 1 if len(integer) > _MAXLEN: integer = integer[0:_MAXLEN] # Now the number is described by: # sgn 0.integer "E" exponent # Make space for sign if sgn: length -= 1 while True: # Minimum length representation of a number # Length = Length of integer # + 1 for Dot if needed (no exponent) # + (2-4) for exponent # exponent can be in the following forms # nothing if dot can placed inside integer # E# 2 # E## 3 # E-# 3 # E-## 4 # integer is given as 0.integer lint = len(integer) if useExp: mNum = "%s%s%d"%(rexx.insert(".", integer, 1),expE,exponent-1) elif exponent==-2: mNum = ".00%s"%(integer) elif exponent==-1: mNum = ".0%s"%(integer) elif exponent==0: mNum = ".%s"%(integer) elif exponent==1: mNum = rexx.insert(".", integer, 1) elif exponent==length: mNum = "%s%s"%(integer,"0"*(length-lint)) elif exponent>1 and exponent<=lint: mNum = rexx.insert(".", integer, exponent) elif exponent>1 and exponent<=lint+2: if exponent>lint: mNum = "%s%s."%(integer, "0"*(exponent-lint)) else: mNum = "%s."%(integer.ljust(exponent)) elif exponent>lint and exponent+1<length: mNum = "%s%s."%(integer, "0"*(exponent-lint)) else: mNum = "%s%s%d"%(rexx.insert(".", integer, 1),expE,exponent-1) diff = len(mNum)-length if diff<=0: break elif diff<=2: r = integer[-1] integer = integer[0:-1] else: r = integer[-diff] integer = integer[0:-diff] if r>='5': lint = len(integer) if lint==0: integer = 0 integer = str(int(integer)+1) if len(integer) > lint: exponent += 1 # Remove trailing zeros for p in range(len(integer)-1,-1,-1): if integer[p] != '0': integer = integer[0:p+1] break else: if useExp: return "0.%s0"%(expE) else: return "0.0" if sgn: mNum = "-%s"%(mNum) return mNum #============================================================================== # Dangerous dictionary that unknown keys return a user default value # Use it with care #============================================================================== class DefaultDict(dict): """Dictionary where unknown keys will return a default value""" def __init__(self, default=None): dict.__init__(self) self._default = default # ---------------------------------------------------------------------- def __getitem__(self, key): return self.get(key,self._default) #============================================================================== class ZeroDict(DefaultDict): """Dictionary where unknown keys will return 0.0""" def __init__(self): DefaultDict.__init__(self, 0.0) #============================================================================== class ZeroIntDict(DefaultDict): """Dictionary where unknown keys will return 0""" def __init__(self): DefaultDict.__init__(self, 0) #=============================================================================== # Vector class # Inherits from List #=============================================================================== class Vector(list): """Vector class""" # ---------------------------------------------------------------------- def __init__(self, x=3, *args): """Create a new vector, Vector(size), Vector(list), Vector(x,y,z,...)""" list.__init__(self) if isinstance(x,int) and not args: for i in range(x): self.append(0.0) elif isinstance(x,(list,tuple)): for i in x: self.append(float(i)) else: self.append(float(x)) for i in args: self.append(float(i)) # ---------------------------------------------------------------------- def set(self, x, y, z=None): """Set vector""" self[0] = x self[1] = y if z: self[2] = z # ---------------------------------------------------------------------- def __repr__(self): return "[%s]"%(", ".join([repr(x) for x in self])) # ---------------------------------------------------------------------- def __str__(self): return "[%s]"%(", ".join([(_format%(x)).strip() for x in self])) # ---------------------------------------------------------------------- def eq(self, v, acc=_accuracy): """Test for equality with vector v within accuracy""" if len(self) != len(v): return False s2 = 0.0 for a,b in zip(self, v): s2 += (a-b)**2 return s2 <= acc**2 # ---------------------------------------------------------------------- def __eq__(self, v): return self.eq(v) # ---------------------------------------------------------------------- def __neg__(self): """Negate vector""" new = Vector(len(self)) for i,s in enumerate(self): new[i] = -s return new # ---------------------------------------------------------------------- def __add__(self, v): """Add 2 vectors""" size = min(len(self),len(v)) new = Vector(size) for i in range(size): new[i] = self[i] + v[i] return new # ---------------------------------------------------------------------- def __iadd__(self, v): """Add vector v to self""" for i in range(min(len(self),len(v))): self[i] += v[i] return self # ---------------------------------------------------------------------- def __sub__(self, v): """Subtract 2 vectors""" size = min(len(self),len(v)) new = Vector(size) for i in range(size): new[i] = self[i] - v[i] return new # ---------------------------------------------------------------------- def __isub__(self, v): """Subtract vector v from self""" for i in range(min(len(self),len(v))): self[i] -= v[i] return self # ---------------------------------------------------------------------- # Scale or Dot product # ---------------------------------------------------------------------- def __mul__(self, v): """scale*Vector() or Vector()*Vector() - Scale vector or dot product""" if isinstance(v,list): return self.dot(v) else: return Vector([x*v for x in self]) # ---------------------------------------------------------------------- # Scale or Dot product # ---------------------------------------------------------------------- def __rmul__(self, v): """scale*Vector() or Vector()*Vector() - Scale vector or dot product""" if isinstance(v,Vector): return self.dot(v) else: return Vector([x*v for x in self]) # ---------------------------------------------------------------------- # Divide by floating point # ---------------------------------------------------------------------- def __div__(self, b): return Vector([x/b for x in self]) # ---------------------------------------------------------------------- def __xor__(self, v): """Cross product""" return self.cross(v) # ---------------------------------------------------------------------- def dot(self, v): """Dot product of 2 vectors""" s = 0.0 for a,b in zip(self, v): s += a*b return s # ---------------------------------------------------------------------- def cross(self, v): """Cross product of 2 vectors""" if len(self)==3: return Vector( self[1]*v[2]-self[2]*v[1], self[2]*v[0]-self[0]*v[2], self[0]*v[1]-self[1]*v[0]) elif len(self)==2: return self[0]*v[1]-self[1]*v[0] else: raise Exception("Cross product needs 2d or 3d vectors") # ---------------------------------------------------------------------- def length2(self): """Return length squared of vector""" s2 = 0.0 for s in self: s2 += s**2 return s2 # ---------------------------------------------------------------------- def length(self): """Return length of vector""" s2 = 0.0 for s in self: s2 += s**2 return sqrt(s2) __abs__ = length # ---------------------------------------------------------------------- def arg(self): """return vector angle""" return atan2(self[1], self[0]) # ---------------------------------------------------------------------- def norm(self): """Normalize vector and return length""" l = self.length() if l>0.0: invlen = 1.0/l for i in range(len(self)): self[i] *= invlen return l normalize = norm # ---------------------------------------------------------------------- def unit(self): """return a unit vector""" v = self.clone() v.norm() return v # ---------------------------------------------------------------------- def clone(self): """Clone vector""" return Vector(self) # ---------------------------------------------------------------------- def x(self): return self[0] def y(self): return self[1] def z(self): return self[2] # ---------------------------------------------------------------------- def orthogonal(self): """return a vector orthogonal to self""" xx = abs(self.x()) yy = abs(self.y()) if len(self)>=3: zz = abs(self.z()) if xx < yy: if xx < zz: return Vector(0.0, self.z(), -self.y()) else: return Vector(self.y(), -self.x(), 0.0) else: if yy < zz: return Vector(-self.z(), 0.0, self.x()) else: return Vector(self.y(), -self.x(), 0.0) else: return Vector(-self.y(), self.x()) # ---------------------------------------------------------------------- def direction(self, zero=_accuracy): """return containing the direction if normalized with any of the axis""" v = self.clone() l = v.norm() if abs(l) <= zero: return "O" if abs(v[0]-1.0)<zero: return "X" elif abs(v[0]+1.0)<zero: return "-X" elif abs(v[1]-1.0)<zero: return "Y" elif abs(v[1]+1.0)<zero: return "-Y" elif abs(v[2]-1.0)<zero: return "Z" elif abs(v[2]+1.0)<zero: return "-Z" else: #nothing special about the direction, return N return "N" # ---------------------------------------------------------------------- # Set the vector directly in polar coordinates # @param ma magnitude of vector # @param ph azimuthal angle in radians # @param th polar angle in radians # ---------------------------------------------------------------------- def setPolar(self, ma, ph, th): """Set the vector directly in polar coordinates""" sf = sin(ph) cf = cos(ph) st = sin(th) ct = cos(th) self[0] = ma*st*cf self[1] = ma*st*sf self[2] = ma*ct # ---------------------------------------------------------------------- def phi(self): """return the azimuth angle.""" if Cmp0(self.x()) and Cmp0(self.y()): return 0.0 return atan2(self.y(), self.x()) # ---------------------------------------------------------------------- def theta(self): """return the polar angle.""" if Cmp0(self.x()) and Cmp0(self.y()) and Cmp0(self.z()): return 0.0 return atan2(self.perp(),self.z()) # ---------------------------------------------------------------------- def cosTheta(self): """return cosine of the polar angle.""" ptot = self.length() if Cmp0(ptot): return 1.0 else: return self.z()/ptot # ---------------------------------------------------------------------- def perp2(self): """return the transverse component squared (R^2 in cylindrical coordinate system).""" return self.x() * self.x() + self.y() * self.y() # ---------------------------------------------------------------------- def perp(self): """@return the transverse component (R in cylindrical coordinate system).""" return sqrt(self.perp2()) # ---------------------------------------------------------------------- # Return a random 3D vector # ---------------------------------------------------------------------- @staticmethod def random(): cosTheta = 2.0*random.random()-1.0 sinTheta = sqrt(1.0 - cosTheta**2) phi = 2.0*pi*random.random() return Vector(cos(phi)*sinTheta, sin(phi)*sinTheta, cosTheta) #------------------------------------------------------------------------------- # Basic 3D Vectors #------------------------------------------------------------------------------- Vector.O = Vector(0.0, 0.0, 0.0) Vector.X = Vector(1.0, 0.0, 0.0) Vector.Y = Vector(0.0, 1.0, 0.0) Vector.Z = Vector(0.0, 0.0, 1.0) # ------------------------------------------------------------------------------ # Return a random nolor # ------------------------------------------------------------------------------ def rndColor(x): def rnd(zw): w = zw & 0xffff z = (zw >> 16) & 0xffff z = 36969 * (z & 0xffff) + (z >> 16) w = 18000 * (w & 0xffff) + (w >> 16) return (z << 16) + (w & 0xffff) x = rnd(x) R = (x % 224) + 16 x = rnd(x) G = (x % 224) + 16 x = rnd(x) B = (x % 224) + 16 return R<<16 | G<<8 | B #=============================================================================== # Matrix class # Use 4x4 matrix for vector transformations #=============================================================================== class Matrix(list): """Matrix 4x4 used for vector transformations""" # ---------------------------------------------------------------------- def __init__(self, rows=4, cols=-1, type=0): """ Matrix(rows=4, cols=-1, type=0|1) if rows is integer then Create a matrix rows x cols either zero(type=0) or unary(type=1) elif rows is a list of lists create a matrix from a double-list """ if isinstance(rows, list): lst = rows self.rows = len(lst) self.extend([[]]*self.rows) if isinstance(lst[0], list): self.cols = len(lst[0]) for i in range(self.rows): self[i] = lst[i][:] if len(self[i]) != self.cols: raise Exception("Not a valid double-list for a matrix") else: self.cols = 1 for i in range(self.rows): self[i] = [lst[i]] else: if rows<2: raise Exception("Array size too small") if cols<0: cols=rows self.rows = rows self.cols = cols self += [[]]*rows if type==1: self.unary() else: self.zero() # ---------------------------------------------------------------------- # Create a diagonal square matrix from a list # ---------------------------------------------------------------------- @staticmethod def diagonal(lst): m = Matrix(len(lst), type=0) i = 0 for item in lst: m[i][i] = item i += 1 return m # ---------------------------------------------------------------------- # append row # ---------------------------------------------------------------------- def append(self, col): list.append(self, col) self.rows += 1 # ---------------------------------------------------------------------- @staticmethod def translate(x, y=0.0, z=0.0): """m = Matrix.translate(x,y,z|vector) @return a translation matrix""" m = Matrix(4, type=1) if isinstance(x,(list,tuple)): m[0][3] = x[0] m[1][3] = x[1] m[2][3] = x[2] else: m[0][3] = x m[1][3] = y m[2][3] = z return m # ---------------------------------------------------------------------- @staticmethod def scale(sx, sy=None, sz=None): """m = Matrix.scale(scale|vector) @return a scaling matrix""" m = Matrix(4, type=1) if sy is None: sy = sx if sz is None: sz = sx if isinstance(sx,(list,tuple)): m[0][0] = sx[0] m[1][1] = sx[1] m[2][2] = sx[2] else: m[0][0] = sx m[1][1] = sy m[2][2] = sz return m # ---------------------------------------------------------------------- def zero(self): """Zero matrix""" for i in range(self.rows): self[i] = [0.0]*self.cols # ---------------------------------------------------------------------- def unary(self): """Unary matrix""" self.zero() for i in range(min(self.rows, self.cols)): self[i][i] = 1.0 # ---------------------------------------------------------------------- # Create a transformation matrix from 3 normalized vectors # and optionally a translation # ---------------------------------------------------------------------- def make(self,X,Y,Z=None,T=None): """Create a transformation matrix from 3 normalized vectors""" self.unary() if (self.rows==3 or self.rows==4) and self.cols==self.rows: if Z is None: Z = X ^ Y Z.normalize() for i in range(3): self[0][i] = X[i] self[1][i] = Y[i] self[2][i] = Z[i] if T is not None and self.rows==4: self[i][3] = T[i] else: raise Exception("Matrix.make() works only on Matrix(3x3) or Matrix(4x4)") # ---------------------------------------------------------------------- def __repr__(self): """Multi line string representation of matrix""" s = "" for i in range(self.rows): if i==0: first="/" last="\\" elif i==self.rows-1: first="\\" last="/" else: first=last="|" s += first for j in range(self.cols): s += " " + repr(self[i][j]) s += " " + last + "\n" return s # ---------------------------------------------------------------------- def __str__(self): """Multi line string representation of matrix""" s = "" for i in range(self.rows): if i==0: first="/" last="\\" elif i==self.rows-1: first="\\" last="/" else: first=last="|" s += first for j in range(self.cols): s += " " + _format % self[i][j] s += " " + last + "\n" return s # ---------------------------------------------------------------------- def writeOctave(self, filename, name): """Write an octave matrix file""" f = open(filename,"w") f.write("# bmath.Matrix\n") f.write("# name: %s\n"%(name)) f.write("# type: matrix\n") f.write("# rows: %d\n"%(self.rows)) f.write("# columns: %d\n"%(self.cols)) for i in range(self.rows): for j in range(self.cols): f.write("%s "%(repr(self[i][j]))) f.write("\n") f.close() # ---------------------------------------------------------------------- def T(self): """@return transpose matrix""" m = Matrix(self.cols, self.rows) for i in range(self.rows): for j in range(self.cols): m[j][i] = self[i][j] return m transpose=T # ---------------------------------------------------------------------- def trace(self): """Return trace of matrix (sum of diagonal elements)""" t = 0.0 for i in range(min(self.rows,self.cols)): t += self[i][i] return t # ---------------------------------------------------------------------- def __eq__(self, m): """Test for equality of 2 matrices""" if self.rows!=m.rows or self.cols!=m.cols: return False for i in range(self.rows): for j in range(self.cols): if abs(self[i][j] - m[i][j]): return False return True # ---------------------------------------------------------------------- # Create a rotation matrix around one axis # X = 0 # Y = 1 # Z = 2 # or an arbitrary vector # ---------------------------------------------------------------------- def rotate(self, angle, axis): """Add rotation elements to the matrix around one axis Axis X=0, Y=1, Z=2, or an arbitrary one given by vector axis""" self.unary() c = cos(angle) s = sin(angle) if isinstance(axis,int): m1 = ((axis+1)%3)+1 m2 = m1%3 m1 = m1 - 1 self[m1][m1] = c self[m2][m2] = c self[m1][m2] = -s self[m2][m1] = s elif isinstance(axis,Vector): l = axis.length() x = axis[0] / l y = axis[1] / l z = axis[2] / l c1 = 1 - c self[0][0] = x*x + (1-x*x)*c self[0][1] = x*y*c1 - z*s self[0][2] = x*z*c1 + y*s self[1][0] = x*y*c1 + z*s self[1][1] = y*y + (1-y*y)*c self[1][2] = y*z*c1 - x*s self[2][0] = x*z*c1 - y*s self[2][1] = y*z*c1 + x*s self[2][2] = z*z + (1-z*z)*c # ---------------------------------------------------------------------- @staticmethod def rotX(angle): """m = Matrix.rotX(angle) - Return a rotation matrix around X""" m = Matrix(4, type=1) m.rotate(angle, 0) return m # ---------------------------------------------------------------------- @staticmethod def rotY(angle): """m = Matrix.rotY(angle) - Return a rotation matrix arround Y""" m = Matrix(4, type=1) m.rotate(angle, 1) return m # ---------------------------------------------------------------------- @staticmethod def rotZ(angle): """m = Matrix.rotZ(angle) - Return a rotation matrix arround Z""" m = Matrix(4, type=1) m.rotate(angle, 2) return m # ---------------------------------------------------------------------- def getEulerRotation(self): """return the Euler rotation angles ROTX(x) * ROTY(y) * ROTZ(z)""" # cos(z)*cos(y) # sin(z)*cos(y) # -sin(y) # -sin(z)*cos(x)+cos(z)*sin(y)*sin(x) # cos(z)*cos(x)+sin(z)*sin(y)*sin(x) # cos(y)*sin(x) # sin(z)*sin(x)+cos(z)*sin(y)*cos(x) # -cos(z)*sin(x)+sin(z)*sin(y)*cos(x) # cos(y)*cos(x) rx = atan2(self[1][2], self[2][2]) ry = -asin( self[0][2]) rz = atan2(self[0][1], self[0][0]) return rx,ry,rz # ---------------------------------------------------------------------- @staticmethod def eulerRotation(rx, ry, rz): """return a rotation matrix based on the Euler rotation ROTX(x) * ROTY(y) * ROTZ(z)""" m = Matrix(4, type=1) cx = cos(rx) cy = cos(ry) cz = cos(rz) sx = sin(rx) sy = sin(ry) sz = sin(rz) row = m[0] row[0] = cz*cy row[1] = sz*cy row[2] = -sy row = m[1] row[0] = -sz*cx+cz*sy*sx row[1] = cz*cx+sz*sy*sx row[2] = cy*sx row = m[2] row[0] = sz*sx+cz*sy*cx row[1] = -cz*sx+sz*sy*cx row[2] = cy*cx return m # ---------------------------------------------------------------------- def __add__(self, B): """Add 2 matrices""" if self.rows != B.rows or self.cols != B.cols: raise Exception("Matrix.add: matrices same size") m = Matrix(self.rows, self.cols) for i in range(self.rows): mrow = m[i] arow = self[i] brow = B[i] for j in range(self.cols): mrow[j] = arow[j] + brow[j] return m # ---------------------------------------------------------------------- def __sub__(self, B): """Subtract 2 matrices""" if self.rows != B.rows or self.cols != B.cols: raise Exception("Matrix.add: matrices same size") m = Matrix(self.rows, self.cols) for i in range(self.rows): mrow = m[i] arow = self[i] brow = B[i] for j in range(self.cols): mrow[j] = arow[j] - brow[j] return m # ---------------------------------------------------------------------- def __neg__(self): """Negate matrix""" m = Matrix(self.rows, self.cols) for i in range(self.rows): mrow = m[i] mold = self[i] for j in range(self.cols): mrow[j] = -mold[j] return m # ---------------------------------------------------------------------- def __mul__(self, B): """Multiply two matrices or vector A.__mul__(B|vec) <==> A*B or A*vec""" if isinstance(B, Matrix): # has to be a matrix of same cxN * Nxr if self.cols != B.rows: raise Exception("arrays don't have the correct dimensions") r = Matrix(self.rows, B.cols) for i in range(self.rows): for j in range(B.cols): s = 0.0 for k in range(self.cols): s += self[i][k]*B[k][j] r[i][j] = s return r elif isinstance(B, list): # Vector or list vecsize = len(B) v = Vector(vecsize) for i in range(vecsize): for j in range(min(self.cols, vecsize)): v[i] += self[i][j] * B[j] for j in range(vecsize, self.cols): v[i] += self[i][j] return v else: for row in self: for i in range(self.cols): row[i] *= B return self # ----------------------------------------------------------------------- # Special function to multiply a transformation matrix with a vector # ignoring the translation # ----------------------------------------------------------------------- def multNoTranslation(self, B): """Multiply matrix with a vector ignoring the translation part""" if not isinstance(B, list): raise Exception("Invalid operation") vecsize = len(B) v = Vector(vecsize) for i in range(vecsize): for j in range(min(self.cols, vecsize)): v[i] += self[i][j] * B[j] return v # ---------------------------------------------------------------------- def inv(self): """Inverse matrix in place""" if self.rows != self.cols: raise Exception("inverting a non square matrix") index = [ 0 ] * self.rows self.__ludcmp(index) y = Matrix(self.rows) for j in range(self.rows): col = [ 0.0 ] * self.rows col[j] = 1.0 self.__lubksb(index,col) for i in range(self.rows): y[i][j] = col[i] for j in range(self.rows): self[j] = y[j] inverse = inv # ---------------------------------------------------------------------- def clone(self): """Clone matrix""" m = Matrix(self.rows, self.cols) for i in range(self.rows): m[i] = self[i][:] return m # ---------------------------------------------------------------------- # determinant with Gauss method # ---------------------------------------------------------------------- def det(self, eps=_accuracy): """determinant of square matrix using Gauss method""" if self.rows == 2: return self[0][0]*self[1][1] - self[1][0]*self[0][1] elif self.rows == 3: return self[0][0]*(self[1][1]*self[2][2] - self[2][1]*self[1][2]) \ - self[0][1]*(self[1][0]*self[2][2] - self[2][0]*self[1][2]) \ + self[0][2]*(self[1][0]*self[2][1] - self[2][0]*self[1][1]) M = self.clone() s = 1.0 n = M.rows for i in range(n-1): # find the absolute maximum value ma = abs(M[i][i]) k = i for j in range(i+1, n): if abs(M[j][i]) > ma: ma = abs(M[j][i]) k = j if ma < eps: return 0.0 # swap rows i,k if i != k: s = -s # Change sign of determinate for j in range(n): d = M[i][j] M[i][j] = M[k][j] M[k][j] = d # make all the following rows with zero at the i column for j in range(i+1, n): if abs(M[j][i]) < _accuracy: continue d = - M[i][i] / M[j][i] s *= d for k in range(i,n): M[j][k] = M[i][k] + d * M[j][k] d = M[0][0] / s for i in range(1,n): d *= M[i][i] return d determinant = det # ---------------------------------------------------------------------- # LU decomposition. # Parameters # index[0:size] row permutation record # ---------------------------------------------------------------------- def __ludcmp(self, index): #procedure expose indx. size = self.rows vv = [ 0.0 ] * size for i in range(size): big = 0.0 for j in range(size): big = max(abs(self[i][j]), big) if big==0: raise Exception("Singular matrix found") vv[i] = 1.0/big for j in range(size): for i in range(j): s = self[i][j] for k in range(i): s -= self[i][k] * self[k][j] self[i][j] = s big = 0.0 for i in range(j,size): s = self[i][j] for k in range(j): s -= self[i][k] * self[k][j] self[i][j] = s dum = vv[i]*abs(s) if dum >= big: big = dum imax = i if j != imax: for k in range(size): dum = self[imax][k] self[imax][k] = self[j][k] self[j][k] = dum vv[imax] = vv[j] index[j] = imax if self[j][j] == 0.0: self[j][j] = 1E-20 if j != size-1: dum = 1.0/self[j][j] for i in range(j+1,size): self[i][j] *= dum # ---------------------------------------------------------------------- # backward substitution # index[0:size] row permutation record # col[0:size] right hand vector (?) # ---------------------------------------------------------------------- def __lubksb(self, index, col): ii = -1 size = self.rows for i in range(size): ip = index[i] s = col[ip] col[ip] = col[i] if ii >= 0: for j in range(ii,i): s -= self[i][j] * col[j] elif s != 0.0: ii = i col[i] = s for i in range(size-1,-1,-1): s = col[i] for j in range(i+1,size): s -= self[i][j] * col[j] col[i] = s/self[i][i] #------------------------------------------------------------------------------- # Basic Matrices #------------------------------------------------------------------------------- Matrix.O = Matrix(4, type=0) Matrix.U = Matrix(4, type=1) #------------------------------------------------------------------------------- # Quaternion # # Note: See the following for more information on quaternions: # # - Shoemake, K., Animating rotation with quaternion curves, Computer # Graphics 19, No 3 (Proc. SIGGRAPH'85), 245-254, 1985. # - Pletinckx, D., Quaternion calculus as a basic tool in computer # graphics, The Visual Computer 5, 2-13, 1989. #------------------------------------------------------------------------------- class Quaternion(list): def __init__(self, a, b=None, c=None, d=None): list.__init__(self) if isinstance(a, Quaternion): self.extend(a) elif isinstance(a, Matrix): tr = a[0][0] + a[1][1] + a[2][2] + 1.0 # trace of matrix if tr > 0: S = sqrt(tr) * 2.0 # S=4*qw qw = 0.25 * S qx = (a[2][1] - a[1][2]) / S qy = (a[0][2] - a[2][0]) / S qz = (a[1][0] - a[0][1]) / S elif a[0][0] > a[1][1] and a[0][0] > a[2][2]: S = sqrt(1.0 + a[0][0] - a[1][1] - a[2][2]) * 2.0 # S=4*qx qx = 0.25 * S qy = (a[0][1] + a[1][0]) / S qz = (a[0][2] + a[2][0]) / S qw = (a[2][1] - a[1][2]) / S elif a[1][1] > a[2][2]: S = sqrt(1.0 + a[1][1] - a[0][0] - a[2][2]) * 2.0 # S=4*qy qx = (a[0][1] + a[1][0]) / S qy = 0.25 * S qz = (a[1][2] + a[2][1]) / S qw = (a[0][2] - a[2][0]) / S else: S = sqrt(1.0 + a[2][2] - a[0][0] - a[1][1]) * 2.0 # S=4*qz qx = (a[0][2] + a[2][0]) / S qy = (a[1][2] + a[2][1]) / S qz = 0.25 * S qw = (a[1][0] - a[0][1]) / S self.extend([qx, qy, qz, qw]) elif isinstance(a,Vector) and isinstance(b,float): s = sin(b/2.0) / a.length() self.append(a[0]*s) self.append(a[1]*s) self.append(a[2]*s) self.append(cos(b/2.0)) else: self.extend([a,b,c,d]) # ---------------------------------------------------------------------- # Quaternions always obey: a^2 + b^2 + c^2 + d^2 = 1.0 # If they don't add up to 1.0, dividing by their magnitued will # renormalize them. # ---------------------------------------------------------------------- def norm(self): """normalize quaternion""" mag = sqrt(self[0]**2 + self[1]**2 + self[2]**2 + self[3]**2) self[0] /= mag self[1] /= mag self[2] /= mag self[3] /= mag return mag normalize = norm # ---------------------------------------------------------------------- def vector(self): """return vector of quaternion""" return Vector(self[0], self[1], self[2]) # ---------------------------------------------------------------------- # return rotation matrix # ---------------------------------------------------------------------- def matrix(self): """return rotation matrix""" m = Matrix(4, type=1) m[0][0] = 1.0 - 2.0 * (self[1] * self[1] + self[2] * self[2]) m[0][1] = 2.0 * (self[0] * self[1] - self[2] * self[3]) m[0][2] = 2.0 * (self[2] * self[0] + self[1] * self[3]) m[1][0] = 2.0 * (self[0] * self[1] + self[2] * self[3]) m[1][1] = 1.0 - 2.0 * (self[2] * self[2] + self[0] * self[0]) m[1][2] = 2.0 * (self[1] * self[2] - self[0] * self[3]) m[2][0] = 2.0 * (self[2] * self[0] - self[1] * self[3]) m[2][1] = 2.0 * (self[1] * self[2] + self[0] * self[3]) m[2][2] = 1.0 - 2.0 * (self[1] * self[1] + self[0] * self[0]) return m # ---------------------------------------------------------------------- # Given two rotations, e1 and e2, expressed as quaternion rotations, # figure out the equivalent single rotation and stuff it into dest. # This routine also normalizes the result every RENORMCOUNT times it is # called, to keep error from creeping in. # ---------------------------------------------------------------------- def __add__(self, b): v1 = self.vector() v2 = b.vector() t1 = v1 * b[3] t2 = v2 * self[3] t3 = v2.cross(v1) tf = t1 + t2 + t3 q = Quaternion(tf, self[3]*b[3] - v1.dot(v2)) q.norm() return q # ---------------------------------------------------------------------- def __iadd__(self, b): v1 = self.vector() v2 = b.vector() t1 = v1 * b[3] t2 = v2 * self[3] t3 = v2.cross(v1) tf = t1 + t2 + t3 self[0] = tf[0] self[1] = tf[1] self[2] = tf[2] self[3] = self[3]*b[3] - v1.dot(v2) self.norm() return self #------------------------------------------------------------------------------- def gauss(A, B): """Solve A*X = B using the Gauss elimination method""" n = len(A) s = [0.0]*n X = [0.0]*n p = [i for i in range(n)] for i in range(n): s[i] = max([abs(x) for x in A[i]]) for k in range(n-1): # select j>=k so that # |A[p[j]][k]| / s[p[i]] >= |A[p[i]][k]| / s[p[i]] for i = k,k+1,...,n j = k ap = abs(A[p[j]][k]) / s[p[j]] for i in range(k+1, n): api = abs(A[p[i]][k]) / s[p[i]] if api>ap: j = i ap = api if j!=k: p[k],p[j] = p[j],p[k] # Swap values for i in range(k+1, n): z = A[p[i]][k] / A[p[k]][k] A[p[i]][k] = z for j in range(k+1, n): A[p[i]][j] -= z * A[p[k]][j] for k in range(n-1): for i in range(k+1,n): B[p[i]] -= A[p[i]][k] * B[p[k]] for i in range(n-1, -1, -1): X[i] = B[p[i]] for j in range(i+1, n): X[i] -= A[p[i]][j] * X[j] X[i] /= A[p[i]][i] return X #------------------------------------------------------------------------------- def solveOverDetermined(A, B, W=None): """Solve the overdetermined linear system defined by the matrices A,B such as A*X = B Optionally a weight can be specified""" if A.rows < A.cols: raise Exception("solveOverDetermined: A matrix has more columns than rows") AT = A.transpose() if W: Wd = Matrix.diagonal(W) ATA = AT * Wd * A ATB = AT * Wd * B else: ATA = AT * A ATB = AT * B ATA.inv() RT = ATA * ATB return [RT[i][0] for i in range(len(RT))] #------------------------------------------------------------------------------- def linear(X, Y): """ Solve linear regression y = ax + b @return a,b,r """ Sx = Sy = Sx2 = Sy2 = Sxy = 0.0 for x,y in zip(X,Y): Sx += x Sy += y Sx2 += x*x Sy2 += y*y Sxy += x*y n = float(len(X)) try: b = (Sxy - Sx*Sy/n) / (Sx2 - Sx*Sx/n) a = Sy/n - b * Sx/n r = (Sxy - Sx*Sy/n) / sqrt(Sx2-Sx*Sx/n) * sqrt(Sy2-Sy*Sy/n) return a,b,r except ZeroDivisionError: return None #------------------------------------------------------------------------------- # Idiotimes pragmatikwv symmetrikwv pivakwv # # O algori8mos poy xrnsimopoieitai stnv roytiva eivai gvwstos sav # proseggistikn me8odos Jacobi. # O algori8mos ekmetaleyetai tnv idiotnta poy exoyv oi diagwvioi # pivakes, dnladn pivakes me mndevika ola ta stoixeia ektos tns # kyrias diagwvioy, va exoyv sav idiotimes ta diagwvia stoixeia. # Me tov metasxnmatismo. # T T # A1 = R1 (f) A R1(f), A2 = R2 (f) A1 R2(f) # metaballoyme syvexws tov pivaka A, mexris otoy to a8roisma olwv # twv mn diagwviwv stoixeiwv f8asei mia ka8orismevn timn tns eklogns # toy xrnstn n givei mndev # Ta bnmata tns diadikasias eivai: # 1. Avazntnsn toy apolytws megistoy mn diagwvioy stoixeioy # Divei ta p kai q # 2. Prosdiorismos tns gwvias peristrofns f. Divei ta sinf kai cosf # 3. Metasxnmatismos Ai -> Ai+1 # 4. Elegxos av to a8roisma twv mn diagwviwv stoixeiwv exei f8asei tnv # epi8ymntn timn. Eav vai tote ta diagwvia stoixeia eivai oi # proseggiseis twv idiotimwv, eav oxi tote epistrefoyme sto 1. # px. | 1 -2 -1 | # A = | -2 1 -1 | # | -1 -1 2.5| # apolyto megisto A(1,2) = -2 # Ypologizoyme tnv gwvia f, co=cos(f), si=sin(f) kai kavoyme tov # metasxnmatismo # | co -si 0 | | 1 -2 -1 | | co si 0 | # A = | si co 0 | x | -2 1 -1 | x | -si co 0 | # | 0 0 1 | | -1 -1 2.5| | 0 0 1 | # # # Oi parametroi tns roytivas eivai oi e3ns: # A - pivakas tetragwvikos # eps - akribeia (a8roisma tetragwvwv) # check - av prepei va elejei tnv symmetria toy arxikoy pivaka # n oxi #------------------------------------------------------------------------------- def eigenvalues(M, eps=_accuracy, check=False): """Return eigen values and eigen vectors of a symmetric matrix""" n = M.rows # elegxos av eivai symmetrikos o pivakas if check: if n != M.cols: return None for i in range(n): for j in range(i,n): if M[i][j] != M[j][i]: return None # Allocate arrays A = M.clone() R = Matrix(n, type=0) RT = Matrix(n, type=0) ZW = Matrix(n, type=0) V = None # kavovika 8a prepei meta apo merikes prospa8eies va tov aporiptei while True: # Bnma 1. Avazntnsn toy apolytws megistoy mn diagwvioy stoixeioy p=0; q=1; el=abs(A[p][q]) for i in range(1, n): for j in range(i): if abs(A[i][j]) > el: el = abs(A[i][j]) p = i; q = j if el==0: break # Ftiaxvei ta R, RT for i in range(n): for j in range(n): R[i][j] = RT[i][j] = (i==j) # Bnma 2. Prosdiorizei tnv gwvia f, cosf kai sinf fi = (A[q][q] - A[p][p]) / (2*A[p][q]) t = 1 / (fi + sqrt(fi*fi+1)) if fi<0: t = -t co = 1 / sqrt(1+t*t) si = t / sqrt(1+t*t) R[p][p] = R[q][q] = co RT[p][p] = RT[q][q] = co R[p][q] = si; R[q][p] = -si RT[p][q] = -si; RT[q][p] = si # Bnma 3. metasxnmatismos Ai+1 = Rt * Ai * R # ka8os kai to ginomeno Rn*...*R2*R1 that # gives us the eigenvectors if V is None: V = R.clone() else: V = V * R for i in range(n): for j in range(n): if j!=p and j!=q: ZW[i][j] = A[i][j] else: zw1 = 0 for k in range(n): zw1 += A[i][k] * R[k][j] ZW[i][j] = zw1 for i in range(n): for j in range(n): if i!=p and i!=q: A[i][j] = ZW[i][j] else: zw1 = 0 for k in range(n): zw1 += RT[i][k] * ZW[k][j] A[i][j] = zw1 # Bnma 4. Briskoymai to a8roisma kai elegxoyme av teleiwse zw1 = 0 k = 0 for i in range(1,n): for j in range(i): zw1 += A[i][j] * A[i][j] k += 1 zw1 /= n # Exit condition if zw1 <= eps: break return ([A[i][i] for i in range(n)],V.T()) #------------------------------------------------------------------------------- # Given a function, and given a bracketing triplet of abscissas ax,bx,cx (such # that bx is between ax and cx, and f(bx) is less than both f(ax) and f(cx), # this routing performs a golden section search for the minimum, isolating it # to a fractional precision of about eps. The abscissa of the minimum is # returned as xmin, and the minimum function value is returned as golden, the # returned function value. # # @param func function to be evaluated # @param ax triplet of abscissas ax,bx,cx # @param bx where func(x+bx*d) < min[ func(x+ax*d), func(x+cx*d) ] # @param cx ... # @param x starting vector/value # @param d direction vector/value # @param eps accuracy of search #------------------------------------------------------------------------------- def goldenSectionSearch(func, ax, bx, cx, x, d=1, eps=_accuracy): R = 0.61803399 # The golden ratio C = (1.0-R) x0 = ax # At any given time we will keep track of four points x3 = cx # x0, x1, x2, x3 if abs(cx-bx) > abs(bx-ax): x1 = bx x2 = bx + C*(cx-bx) else: x2 = bx x1 = bx - C*(bx-ax) f1 = func(x+x1*d) # The initial function evaluation f2 = func(x+x2*d) while abs(x3-x0) > eps*(abs(x1)+abs(x2)): if f2 < f1: x0 = x1 x1 = x2 x2 = R*x1 + C*x3 f1 = f2 f2 = func(x+x2*d) else: x3 = x2 x2 = x1 x1 = R*x2 + C*x0 f2 = f1 f1 = func(x+x1*d) if f1 < f2: return x1 else: return x2 #------------------------------------------------------------------------------- # Generators for calculating a) the permutations of a sequence and # b) the combinations and selections of a number of elements from a # sequence. Uses Python 2.2 generators. # Similar solutions found also in comp.lang.python # Keywords: generator, combination, permutation, selection # # See also: http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/105962 # See also: http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/66463 # See also: http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/66465 #------------------------------------------------------------------------------- def xcombinations(items, n): if n<=0: yield [] else: for i in range(len(items)): for cc in xcombinations(items[:i]+items[i+1:],n-1): yield [items[i]]+cc #------------------------------------------------------------------------------- def xuniqueCombinations(items, n): if n<=0: yield [] else: for i in range(len(items)): for cc in xuniqueCombinations(items[i+1:],n-1): yield [items[i]]+cc #------------------------------------------------------------------------------- def xselections(items, n): if n<=0: yield [] else: for i in range(len(items)): for ss in xselections(items, n-1): yield [items[i]]+ss #------------------------------------------------------------------------------- def xpermutations(items): return xcombinations(items, len(items)) #------------------------------------------------------------------------------- # Conversion between rectangular and polar coordinates # Usage: # real, real = rect(real, real [, deg=False]) # real, real = polar(real, real [, deg=False]) # Normally, rect() and polar() uses radian for angle; but, # if deg=True specified, degree is used instead. #------------------------------------------------------------------------------- # radian if deg=False; degree if deg=True def rect(r, w, deg=False): """ Convert from polar (r,w) to rectangular (x,y) x = r cos(w) y = r sin(w) """ if deg: w = radians(w) return r * cos(w), r * sin(w) #------------------------------------------------------------------------------- # radian if deg=False; degree if deg=True #------------------------------------------------------------------------------- def polar(x, y, deg=False): """ Convert from rectangular (x,y) to polar (r,w) r = sqrt(x^2 + y^2) w = arctan(y/x) = [-pi,pi] = [-180,180] """ if deg: return hypot(x, y), degrees(atan2(y, x)) else: return hypot(x, y), atan2(y, x) #------------------------------------------------------------------------------- # Quadratic equation: x^2 + ax + b = 0 (or ax^2 + bx + c = 0) # Solve quadratic equation with real coefficients # # Usage # number, number = quadratic(real, real [, real]) # # Normally, x^2 + ax + b = 0 is assumed with the 2 coefficients # as # arguments; but, if 3 arguments are present, then ax^2 + bx + c = 0 is assumed. #------------------------------------------------------------------------------- #def quadratic(a, b, c=None): # """ # x^2 + ax + b = 0 (or ax^2 + bx + c = 0) # By substituting x = y-t and t = a/2, # the equation reduces to y^2 + (b-t^2) = 0 # which has easy solution # y = +/- sqrt(t^2-b) # """ # if c: # (ax^2 + bx + c = 0) # a, b = b / float(a), c / float(a) # t = a / 2.0 # r = t**2 - b # if r >= 0: # real roots # y1 = sqrt(r) # else: # complex roots # y1 = cmath.sqrt(r) # y2 = -y1 # return y1 - t, y2 - t def quadratic(b, c, eps=_accuracy): D = b*b - 4.0*c if D <= 0.0: x1 = -0.5*b # Always return this as a solution!!! if D >= -eps*(b*b+abs(c)): return x1,x1 else: return None,None else: if b>0.0: bD = -b - sqrt(D) else: bD = -b + sqrt(D) return 0.5 * bD, 2.0 * c / bD #------------------------------------------------------------------------------- # Cubic equation: y^3 + a*y^2 + b*y + c = 0 (or ax^3 + bx^2 + cx + d = 0) # # Normally, x^3 + ax^2 + bx + c = 0 is assumed with the 3 coefficients as # arguments; but, if 4 arguments are present, then ax^3 + bx^2 + cx + d = 0 is # assumed. # # Even though both quadratic() and cubic() functions take real arguments, they # can be modified to accept any real or complex coefficients because the method # of solution does not make any assumptions. #------------------------------------------------------------------------------- def cubic(a, b, c, d=None, eps=_accuracy): if d is not None: # (ax^3 + bx^2 + cx + d = 0) a, b, c = b/float(a), c/float(a), d/float(a) Q = (a*a - 3.0*b) / 9.0 R = (2.*a**3 - 9.*a*b + 27.*c)/54. R2 = R**2 Q3 = Q**3 if R2 < Q3: # the cubic has 3 real solutions theta = acos(R/sqrt(Q3)) sqrt_Q = sqrt(Q) x1 = -2. * sqrt_Q * cos(theta/3.) - a/3. x2 = -2. * sqrt_Q * cos((theta+2.*pi)/3.) - a/3. x3 = -2. * sqrt_Q * cos((theta-2.*pi)/3.) - a/3. return x1,x2,x3 A = -copysign(1.0,R) * (abs(R) + sqrt(R2 - Q3))**(1./3.) if abs(A)>eps: B = Q / A else: B = 0.0 return (A+B) - a/3., None, None # imaginary roots # x2 = -(A+B)/2 - a/3 + i*sqrt(3)*(A-B) # x3 = -(A+B)/2 - a/3 - i*sqrt(3)*(A-B) #------------------------------------------------------------------------------- # Fit a plane to a set of points using least square fitting #------------------------------------------------------------------------------- def fitPlane(xyz): # First do statistics with points Sx = Sy = Sz = 0.0 Sx2 = Sy2 = Sz2 = 0.0 Sxy = Syz = Sxz = 0.0 for x,y,z in xyz: Sx += x Sy += y Sz += z Sx2 += x**2 Sy2 += y**2 Sz2 += z**2 Sxy += x*y Syz += y*z Sxz += x*z n = float(len(xyz)) Sx /= n Sy /= n Sz /= n Vx = Sx2/n - Sx**2 Vy = Sy2/n - Sy**2 Vz = Sz2/n - Sz**2 # Count zero variances nv = int(abs(Vx)<=_accuracy) + int(abs(Vy)<=_accuracy) + int(abs(Vz)<=_accuracy) if nv>1: return None elif nv==1: # Planes parallel to axes # Try the solution of x=Xo or y=Yo or z=Zo if abs(Vx)<=_accuracy: return 1.0, 0.0, 0.0, -Sx elif abs(Vy)<=_accuracy: return 0.0, 1.0, 0.0, -Sy else: return 0.0, 0.0, 1.0, -Sz # Try a generic solution # z = ax + by + d <=> ax + by -z + d = 0 # assuming c=-1 # it can only fail on ax + by + d = 0 # # / Sx2 Sxy Sx \ / Sxz \ # | Sxy Sy2 Sy | * X = | Syz | # \ Sx Sy n / \ Sz / A = Matrix([[Sx2, Sxy, Sx], [Sxy, Sy2, Sy], [Sx, Sy, n]]) B = Matrix([[Sxz], [Syz], [Sz]]) try: A.inverse() X = A*B return X[0][0], X[1][0], -1.0, X[2][0] except: pass # Try a solution where c=0 # y = ax + d <=> ax -y +d = 0 #. # / Sx2 Sx \ / Sxy \ # | | * X = | | # \ Sx n / \ Sy / A = Matrix([[Sx2, Sx], [Sx, n]]) B = Matrix([[Sxy], [Sy]]) try: A.inverse() X = A*B return X[0][0], -1.0, 0.0, X[1][0] except: return None #------------------------------------------------------------------------------- # Evaluating n'th degree polynomial is simple loop, starting with highest # coefficient a[n]. #------------------------------------------------------------------------------- def polyeval(a, x): """ p(x) = polyeval(a, x) = a[0] + a[1]x + a[2]x^2 +...+ a[n-1]x^{n-1} + a[n]x^n = a[0] + x(a[1] + x(a[2] +...+ x(a[n-1] + a[n]x)...) """ p = 0 a.reverse() for coef in a: p = p*x + coef a.reverse() return p #------------------------------------------------------------------------------- # Find the first derivative of a polynomial #------------------------------------------------------------------------------- def polyderiv(a): """ p'(x) = polyderiv(a) = b[0] + b[1]x + b[2]x^2 +...+ b[n-2]x^{n-2} + b[n-1]x^{n-1} where b[i] = (i+1)a[i+1] """ b = [] for i in range(1, len(a)): b.append(i * a[i]) return b #------------------------------------------------------------------------------- # Factor out a root from n'th degree polynomial, and return the remaining # (n-1)'th degree polynomial. # list = polyreduce(list, number) #------------------------------------------------------------------------------- def polyreduce(a, root): """ Given x = r is a root of n'th degree polynomial p(x) = (x-r)q(x), divide p(x) by linear factor (x-r) using the same algorithm as polynomial evaluation. Then, return the (n-1)'th degree quotient q(x) = polyreduce(a, r) = c[0] + c[1]x + c[2]x^2 +...+ c[n-2]x^{n-2} + c[n-1]x^{n-1} """ c, p = [], 0 a.reverse() for coef in a: p = p * root + coef c.append(p) a.reverse() c.reverse() return c[1:] #------------------------------------------------------------------------------- # Conversion from integer to Roman #------------------------------------------------------------------------------- def int2roman(num): """ Convert an integer to Roman numeral """ if not isinstance(num,int): raise TypeError("expected integer, got %s" % type(input)) if not 0 < num < 4000: raise ValueError("Argument must be between 1 and 3999") ints = (1000, 900, 500, 400, 100, 90, 50, 40, 10, 9, 5, 4, 1) nums = ('M', 'CM', 'D', 'CD','C', 'XC','L','XL','X','IX','V','IV','I') result = "" for i,n in zip(ints, nums): count = int(num / i) result += n * count num -= i * count return result #------------------------------------------------------------------------------- # Conversion from Roman to integer #------------------------------------------------------------------------------- def roman2int(roman): """ convert a roman string to integer """ if not isinstance(roman,str): raise TypeError("expected string, got %s"%type(roman)) roman = roman.upper() nums = ('M', 'D', 'C', 'L', 'X', 'V', 'I') ints = (1000, 500, 100, 50, 10, 5, 1) places = [] for c in roman: if not c in nums: raise ValueError("input is not a valid roman numeral: %s"%roman) for i in range(len(roman)): c = roman[i] value = ints[nums.index(c)] # If the next place holds a larger number, this value is negative. try: nextvalue = ints[nums.index(roman[i +1])] if nextvalue > value: value *= -1 except IndexError: # there is no next place. pass places.append(value) s = 0 for n in places: s += n # Easiest test for validity... if int2roman(s) == roman: return s else: raise ValueError('input is not a valid roman numeral: %s' % roman)
gpl-2.0
1,152,561,699,915,834,500
26.538658
100
0.457226
false
2.856392
false
false
false
kdmurray91/khmer
sandbox/find-high-abund-kmers.py
1
2443
#! /usr/bin/env python2 # # This file is part of khmer, https://github.com/dib-lab/khmer/, and is # Copyright (C) Michigan State University, 2009-2015. It is licensed under # the three-clause BSD license; see LICENSE. # Contact: khmer-project@idyll.org # """ @@ """ import sys import screed import khmer from khmer.khmer_args import build_counting_args, DEFAULT_MIN_TABLESIZE DEFAULT_LOWER_CUTOFF = 2000 DEFAULT_UPPER_CUTOFF = 65535 ### def main(): parser = build_construct_args() parser.add_argument('-l', '--lower-cutoff', type=int, dest='lower_cutoff', default=DEFAULT_LOWER_CUTOFF) parser.add_argument('-u', '--upper-cutoff', type=int, dest='upper_cutoff', default=DEFAULT_UPPER_CUTOFF) parser.add_argument('output_filename') parser.add_argument('input_filename') args = parser.parse_args() if not args.quiet: if args.min_hashsize == DEFAULT_MIN_HASHSIZE: print >>sys.stderr, "** WARNING: hashsize is default! " \ "You absodefly want to increase this!\n** " \ "Please read the docs!" print >>sys.stderr, '\nPARAMETERS:' print >>sys.stderr, ' - kmer size = %d \t\t(-k)' % args.ksize print >>sys.stderr, ' - n hashes = %d \t\t(-N)' % args.n_hashes print >>sys.stderr, ' - min hashsize = %-5.2g \t(-x)' % \ args.min_hashsize print >>sys.stderr, '' print >>sys.stderr, 'Estimated memory usage is %.2g bytes " \ "(n_hashes x min_hashsize)' % ( args.n_hashes * args.min_hashsize) print >>sys.stderr, '-' * 8 K = args.ksize HT_SIZE = args.min_hashsize N_HT = args.n_hashes output = args.output_filename input = args.input_filename print 'lower cutoff:', args.lower_cutoff print 'upper cutoff:', args.upper_cutoff print 'Saving stoptags to %s' % output print 'Loading sequences in %s' % input ### print 'making hashtable' ht = khmer.new_counting_hash(K, HT_SIZE, N_HT) ht.set_use_bigcount(True) print 'consuming input', input hb = ht.collect_high_abundance_kmers(input, args.lower_cutoff, args.upper_cutoff) print 'saving stoptags', output hb.save_stop_tags(output) if __name__ == '__main__': main() # vim: set ft=python ts=4 sts=4 sw=4 et tw=79:
bsd-3-clause
1,175,521,753,674,455,300
29.160494
78
0.588621
false
3.32381
false
false
false
ncbray/pystream
lib/PADS/Sudoku.py
1
67213
"""Sudoku.py PADS-based command-line application for generating and solving Sudoku puzzles. These puzzles are given as a 9x9 grid of cells, some of which are filled with digits in the range 1-9. The task is to fill the remaining cells in such a way that each row of the grid, each column of the grid, and each of nine 3x3 squares into which the grid is partitioned, all have one copy of each of the nine digits. A proper Sudoku puzzle must have a unique solution, and it should be possible to reach that solution by a sequence of logical deductions without trial and error. To the extent possible, we strive to keep the same ethic in our automated solver, by mimicking human rule-based reasoning, rather than resorting to brute force backtracking search. D. Eppstein, July 2005. """ import random import sys from optparse import OptionParser from BipartiteMatching import imperfections from StrongConnectivity import StronglyConnectedComponents from Repetitivity import NonrepetitiveGraph from Wrap import wrap try: set except NameError: from sets import Set as set class BadSudoku(Exception): pass # raised when we discover that a puzzle has no solutions # ====================================================================== # Bitmaps and patterns # ====================================================================== digits = range(1,10) class group: def __init__(self, i, j, x, y, name): mask = 0 h,k = [q for q in range(4) if q != i and q != j] for w in range(3): for z in range(3): mask |= 1L << (x*3**i + y*3**j + w*3**h + z*3**k) self.mask = mask self.pos = [None]*9 self.name = "%s %d" % (name,x+3*y+1) cols = [group(0,1,x,y,"column") for x in range(3) for y in range(3)] rows = [group(2,3,x,y,"row") for x in range(3) for y in range(3)] sqrs = [group(1,3,x,y,"square") for x in range(3) for y in range(3)] groups = sqrs+rows+cols neighbors = [0]*81 for i in range(81): b = 1L<<i for g in groups: if g.mask & b: neighbors[i] |= (g.mask &~ b) unmask = {} for i in range(81): unmask[1L<<i] = i alignments = {} for s in sqrs: for g in rows+cols: m = s.mask&g.mask if m: alignments[m] = (s,g) b1 = m &~ (m-1) m &=~ b1 b2 = m &~ (m-1) b3 = m &~ b2 alignments[b1|b2]=alignments[b1|b3]=alignments[b2|b3]=(s,g) triads = [] for square in sqrs: for group in rows+cols: triads.append((square.mask & group.mask,square,group)) # pairs of rows and columns that cross the same squares nearby = {} for g in rows+cols: nearby[g] = [] for r1 in rows: for s in sqrs: if r1.mask & s.mask != 0: for r2 in rows: if r1 != r2 and r2.mask & s.mask != 0: nearby[r1].append(r2) break for c1 in cols: for s in sqrs: if c1.mask & s.mask != 0: for c2 in cols: if c1.mask < c2.mask and c2.mask & s.mask != 0: nearby[c1].append(c2) break # ====================================================================== # Human-readable names for puzzle cells # ====================================================================== cellnames = [None]*81 for row in range(9): for col in range(9): cellnames[row*9+col] = ''.join(['R',str(row+1),'C',str(col+1)]) def andlist(list,conjunction="and"): """Turn list of strings into English text.""" if len(list) == 0: return "(empty list!)" if len(list) == 1: return list[0] elif len(list) == 2: return (' '+conjunction+' ').join(list) else: return ', '.join(list[:-1]+[conjunction+' '+list[-1]]) def namecells(mask,conjunction="and"): """English string describing a sequence of cells.""" names = [] while mask: bit = mask &~ (mask - 1) names.append(cellnames[unmask[bit]]) mask &=~ bit return andlist(names,conjunction) def pathname(cells): return '-'.join([cellnames[c] for c in cells]) def plural(howmany,objectname): if howmany == 1: return objectname else: return "%d %ss" % (howmany,objectname) # ====================================================================== # State for puzzle solver # ====================================================================== class Sudoku: """ Data structure for storing and manipulating Sudoku puzzles. The actual rules for solving the puzzles are implemented separately from this class. """ def __init__(self,initial_placements = None): """ Initialize a new Sudoku grid. If an argument is given, it should either be a sequence of 81 digits 0-9 (0 meaning a not-yet-filled cell), or a sequence of (digit,cell) pairs. The main state we use for the solver is an array contents[] of 81 cells containing digits 0-9 (0 for an unfilled cell) and an array locations[] indexed by the digits 1-9, containing bitmasks of the cells still available to each digit. We also store additional fields: - progress is a boolean, set whenever one of our methods changes the state of the puzzle, and used by step() to tell whether one of its rules fired. - rules_used is a set of the rule names that have made progress. - pairs is a dictionary mapping bitmasks of pairs of cells to lists of digits that must be located in that pair, as set up by the pair rule and used by other later rules. - bilocation is a NonrepetitiveGraph representing paths and cycles among bilocated digits, as constructed by the bilocal rule and used by the repeat and conflict rules. - bivalues is a NonrepetitiveGraph representing paths and cycles among bivalued cells, as constructed by the bivalue rule and used by the repeat and conflict rules. - otherbv maps pairs (cell,digit) in the bivalue graph to the other digit available at the same cell - logstream is a stream on which to log verbose descriptions of the steps made by the solver (typically sys.stderr), or None if verbose descriptions are not to be logged. - steps is used to count how many solver passes we've made so far. - original_cells is a bitmask of cells that were originally nonempty. - assume_unique should be set true to enable solution rules based on the assumption that there exists a unique solution """ self.contents = [0]*81 self.locations = [None]+[(1L<<81)-1]*9 self.rules_used = set() self.progress = False self.pairs = None self.bilocation = None self.logstream = False self.steps = 0 self.original_cells = 0 self.assume_unique = False if initial_placements: cell = 0 for item in initial_placements: try: digit = int(item) except TypeError: digit,cell = item if digit: self.place(digit,cell) self.original_cells |= 1L << cell cell += 1 def __iter__(self): """ If we are asked to loop over the items in a grid (for instance, if we pass one Sudoku instance as the argument to the initialization of another one) we simply list the known cell contents of the grid. """ return iter(self.contents) def mark_progress(self): """Set progress True and clear fields that depended on old state.""" self.progress = True self.pairs = None def log(self,items,explanation=None): """ Send a message for verbose output. Items should be a string or list of strings in the message. If explanation is not None, it is called as a function and the results appended to items. """ if not self.logstream: return if isinstance(items,str): items = [items] if explanation: if isinstance(explanation,str) or isinstance(explanation,list): x = explanation else: x = explanation() if isinstance(x,str): x = [x] else: x = [] text = ' '.join([str(i) for i in items+x]) for line in wrap(text): print >>self.logstream, line print >>self.logstream def place(self,digit,cell,explanation=None): """Change the puzzle by filling the given cell with the given digit.""" if digit != int(digit) or not 1 <= digit <= 9: raise ValueError("place(%d,%d): digit out of range" % (digit,cell)) if self.contents[cell] == digit: return if self.contents[cell]: self.log(["Unable to place",digit,"in",cellnames[cell], "as it already contains",str(self.contents[cell])+"."]) raise BadSudoku("place(%d,%d): cell already contains %d" % (digit,cell,self.contents[cell])) if (1L<<cell) & self.locations[digit] == 0: self.log(["Unable to place",digit,"in",cellnames[cell], "as that digit is not available to be placed there."]) raise BadSudoku("place(%d,%d): location not available" % (digit,cell)) self.contents[cell] = digit bit = 1L << cell for d in digits: if d != digit: self.unplace(d,bit,explanation,False) else: self.unplace(d,neighbors[cell],explanation,False) self.mark_progress() self.log(["Placing",digit,"in",cellnames[cell]+'.'],explanation) def unplace(self,digit,mask,explanation=None,log=True): """ Eliminate the masked positions as possible locations for digit. The log argument should be true for external callers, but false when called by Sudoku.place; it is used to disable verbose output that would be redundant to the output from place. """ if digit != int(digit) or not 1 <= digit <= 9: raise ValueError("unplace(%d): digit out of range" % digit) if self.locations[digit] & mask: if log and self.logstream: items = ["Preventing",digit,"from being placed in", namecells(self.locations[digit] & mask,"or")+'.'] self.log(items,explanation) self.locations[digit] &=~ mask self.mark_progress() def choices(self,cell): """Which digits are still available to be placed in the cell?""" bit = 1L<<cell return [d for d in digits if self.locations[d] & bit] def complete(self): """True if all cells have been filled in.""" return 0 not in self.contents # ====================================================================== # Rules for puzzle solver # ====================================================================== def locate(grid): """ Place digits that can only go in one cell of their group. If a digit x has only one remaining cell that it can be placed in, within some row, column, or square, then we place it in that cell. Any potential positions of x incompatible with that cell (because they lie in the same row, column, or square) are removed from future consideration. """ for d in digits: for g in groups: dglocs = grid.locations[d] & g.mask if dglocs & (dglocs-1) == 0: if dglocs == 0: grid.log(["Unable to place",d,"anywhere in",g.name+"."]) raise BadSudoku("No place for %d in %s" %(d,g.name)) grid.place(d,unmask[dglocs], ["It is the only cell in",g.name, "in which",d,"can be placed."]) def eliminate(grid): """ Fill cells that can only contain one possible digit. If a cell has only one digit x that can be placed in it, we place x in that cell. Incompatible positions for x are removed from future consideration. """ for cell in range(81): if not grid.contents[cell]: allowed = grid.choices(cell) if len(allowed) == 0: grid.log(["Unable to place any digit in",cellnames[cell]+"."]) raise BadSudoku("No digit for cell %d" % cell) if len(allowed) == 1: grid.place(allowed[0],cell, "No other digit may be placed in that cell.") def align(grid): """ Eliminate positions that leave no choices for another group. If the cells of a square that can contain a digit x all lie in a single row or column, we eliminate positions for x that are outside the square but inside that row or column. Similarly, if the cells that can contain x within a row or column all lie in a single square, we eliminate positions that are inside that square but outside the row or column. """ for d in digits: for g in groups: a = grid.locations[d] & g.mask if a in alignments: s = [x for x in alignments[a] if x != g][0] def explain(): un = grid.locations[d] & s.mask &~ a if un & (un - 1): this = "These placements" else: this = "This placement" return [this, "would conflict with", namecells(a)+",", "which are the only cells in", g.name, "that can contain that digit."] grid.unplace(d, s.mask &~ a, explain) enough_room = "To leave enough room for those digits, no other " \ "digits may be placed in those cells." def explain_pair(grid,digs,locs): """Concoct explanation for application of pair rule.""" d1,d2 = digs g1 = [g for g in groups if grid.locations[d1] & g.mask == grid.locations[d1] & locs] g2 = [g for g in groups if grid.locations[d2] & g.mask == grid.locations[d2] & locs] for g in g1: if g in g2: ing = ["In", g.name+",", "digits", d1, "and", d2] break else: # unlikely to get here due to align rule applying before pair ing = ["In",(g1 and g1[0].name or "no group")+",", "digit", str(d1)+",", "and in",(g2 and g2[0].name or "no group")+",", "digit", str(d2)] return ing+["may only be placed in",namecells(locs)+".", enough_room] def pair(grid): """ Eliminate positions that leave no choices for two other digits. If two digits x and y each share the same two cells as the only locations they may be placed within some row, column, or square, then all other digits must avoid those two cells. """ grid.pairs = pairs = {} for d in digits: for g in groups: dglocs = grid.locations[d] & g.mask fewerbits = dglocs & (dglocs - 1) if fewerbits & (fewerbits - 1) == 0: if d not in pairs.setdefault(dglocs,[d]): pairs[dglocs].append(d) for e in digits: if e not in pairs[dglocs]: def explain(): return explain_pair(grid,pairs[dglocs],dglocs) grid.unplace(e, dglocs, explain) def triad(grid): """ Find forced triples of digits within triples of cells. If some three cells, formed by intersecting a row or column with a square, have three digits whose only remaining positions within that row, column, or square are among those three cells, we prevent all other digits from being placed there. We also remove positions for those three forced digits outside the triple but within the row, column, or square containing it. """ for mask,sqr,grp in triads: forces = [d for d in digits if (grid.locations[d]&sqr.mask == grid.locations[d]&mask) or (grid.locations[d]&grp.mask == grid.locations[d]&mask)] if len(forces) == 3: outside = (sqr.mask | grp.mask) &~ mask for d in digits: def explain(): ing = ["In", grp.name, "and", sqr.name+",", "digits %d, %d, and %d" % tuple(forces), "may only be placed in", namecells(mask)+"."] if d not in forces: return ing+[enough_room] elif grid.locations[d]&sqr.mask == grid.locations[d]&mask: og = grp.name else: og = sqr.name return ing+["Therefore,", d, "may not be placed", "in any other cell of", og] grid.unplace(d, d in forces and outside or mask, explain) def digit(grid): """ Remove incompatible positions of a single digit. If the placement of digit x in cell y can not be extended to a placement of nine copies of x covering each row and column of the grid exactly once, we eliminate cell y from consideration as a placement for x. """ for d in digits: graph = {} locs = grid.locations[d] for r in range(9): graph[r] = [c for c in range(9) if rows[r].mask & cols[c].mask & locs] imp = imperfections(graph) mask = 0 forced = [] for r in imp: for c in imp[r]: mask |= rows[r].mask & cols[c].mask if imp[r][c] not in forced: forced.append(imp[r][c]) mask &= grid.locations[d] if not mask: continue def explain(): expl = [] for f in forced: fr = [rows[r].name for r in f] fr.sort() fc = list(set([cols[c].name for r in f for c in f[r]])) fc.sort() expl += ["In", andlist(fr)+", digit", d, "can only be placed in", andlist(fc,"or")+"."] return expl + ["Placing",d,"in",namecells(mask,"or"), "would leave too few columns for", d, "to be placed in all of these rows."] grid.unplace(d,mask,explain) def rectangles(): """Generate pairs of rows and columns that form two-square rectangles.""" for r1 in rows: for r2 in rows: if r2 in nearby[r1]: for c1 in range(9): for c2 in range(c1): if cols[c1] not in nearby[cols[c2]]: yield r1,r2,cols[c2],cols[c1] elif r1.mask < r2.mask: for c1 in cols: for c2 in nearby[c1]: yield r1,r2,c1,c2 def rectangle(grid): """ Avoid the formation of an ambiguous rectangle. That is, four corners of a rectangle within two squares, all four corners initially blank, and containing only two digits. If this situation occurred, the puzzle would necessarily have evenly many solutions, because we could swap the two digits in the rectangle corners in any solution to form a different solution, contradicting the assumption that there is only one. Therefore, we make sure that any such rectangle keeps at least three available values. """ if not grid.assume_unique: return for r1,r2,c1,c2 in rectangles(): mask = (r1.mask | r2.mask) & (c1.mask | c2.mask) if not (mask & grid.original_cells): # First rectangle test # If three cells are bivalued with the same two digits x,y # then we can eliminate x and y on the fourth safe_corners = 0 multiply_placable = [] for d in digits: dmask = grid.locations[d] & mask if dmask & (dmask - 1): multiply_placable.append(d) else: safe_corners |= dmask if len(multiply_placable) == 2 and \ safe_corners & (safe_corners-1) == 0: for d in multiply_placable: def explain(): return ["This placement would create an ambiguous", "rectangle for digits", str(multiply_placable[0]),"and", str(multiply_placable[1]),"in", r1.name+",",r2.name+",", c1.name+",","and",c2.name+"."] grid.unplace(d,safe_corners,explain) # Second rectangle test # If only three digits can be placed in the rectangle, # we eliminate placements that conflict with # all positions of one of the digits. placable = [d for d in digits if grid.locations[d] & mask] if len(placable) == 3: for d in placable: a = grid.locations[d] & mask conflicts = 0 for g in groups: if grid.locations[d] & g.mask & a == a: conflicts |= g.mask def explain(): un = conflicts &~ a if un & (un - 1): this = "These placements" else: this = "This placement" return ["The rectangle in", r1.name+",", r2.name+",", c1.name+", and", c2.name, "can only contain digits", andlist([str(dd) for dd in placable])+".", this, "would conflict with the placements", "of", str(d)+",", "creating an ambiguous", "rectangle on the remaining two digits."] grid.unplace(d, conflicts &~ a, explain) # Third rectangle test # If two cells are bivalued with digits x and y, # and the other two cells are bilocal with x, # then we can eliminate y from the two bilocal cells. for x1,x2 in ((r1,r2), (r2,r1), (c1,c2), (c2,c1)): xd = [d for d in digits if grid.locations[d] & mask & x1.mask] if len(xd) == 2: # found locked pair on x1's corners for d in xd: x2d = grid.locations[d] & x2.mask if x2d & mask == x2d: # and bilocal on x2 dd = xd[0]+xd[1]-d # other digit def explain(): return ["The rectangle in", r1.name+",", r2.name+",", c1.name+", and", c2.name, "can only contain digits", str(xd[0]),"and",str(xd[1]),"in", x1.name+".","In addition," "the only cells in",x2.name, "that can contain",str(d), "are in the rectangle.", "Therefore, to avoid creating an", "ambiguous rectangle, the",str(dd), "in",x2.name,"must be placed", "outside the rectangle."] grid.unplace(dd,x2d,explain) # Fourth rectangle test # If two cells are bivalued with digits x and y, # and a perpendicular side is bilocal with x, # then we can eliminate y from the remaining cell for x1,perp in ((r1,(c1,c2)),(r2,(c1,c2)), (c1,(r1,r2)),(c2,(r1,r2))): xd = [d for d in digits if grid.locations[d] & mask & x1.mask] if len(xd) == 2: # found locked pair on x1's corners for x2 in perp: for d in xd: x2d = grid.locations[d] & x2.mask if x2d & mask == x2d: # and bilocal on x2 dd = xd[0]+xd[1]-d # other digit def explain(): return ["For the rectangle in", r1.name+",", r2.name+",", c1.name+", and", c2.name, "the two corners in", x1.name,"must contain both digits", str(xd[0]),"and",str(xd[1]), "and the two corners in", x2.name,"must contain one",str(d)+".", "Therefore, to avoid creating an", "ambiguous rectangle, the", "remaining corner must not contain", str(dd)+"."] grid.unplace(dd,mask&~(x1.mask|x2.mask),explain) def trapezoid(grid): """ Force pairs of digits to form trapezoids instead of rectangles. If two digits can only be placed in five cells of two squares, four of which form a rectangle, then they must be placed in four cells that form a trapezoid out of those five. We prevent those digits from being placed in cells not part of a trapezoid, and prevent other digits from being placed in cells that are part of all such trapezoids. """ if not grid.assume_unique: return for r1,r2,c1,c2 in rectangles(): corners = (r1.mask | r2.mask) & (c1.mask | c2.mask) if not (corners & grid.original_cells): s1,s2 = [s for s in sqrs if s.mask & corners] uncorner = (s1.mask | s2.mask) &~ corners candidates = {} universal = None for d in digits: if not grid.locations[d] & uncorner: universal = d # can form five cells w/any other digit for d in digits: locs_for_d = grid.locations[d] & uncorner if locs_for_d and not (locs_for_d & (locs_for_d - 1)): if universal != None or locs_for_d in candidates: # found another digit sharing same five cells w/d if universal != None: d1,d2 = universal,d else: d1,d2 = candidates[locs_for_d],d explanation = ["Digits",str(d1),"and",str(d2), "must be placed in a trapezoid in", s1.name,"and",s2.name+",", "for if they were placed in a", "rectangle, their locations", "could be swapped, resulting", "in multiple solutions", "to the puzzle."] must = locs_for_d mustnt = 0 if s2.mask & locs_for_d: s1,s2 = s2,s1 # swap so s1 contains extra cell must |= corners & s2.mask for line in r1.mask,r2.mask,c1.mask,c2.mask: if line & locs_for_d and line & s2.mask: # most informative case: the extra cell # lies on a line through both squares. must |= corners & (s1.mask &~ line) mustnt |= corners & (s1.mask & line) for d3 in digits: if d3 == d1 or d3 == d2: grid.unplace(d3,mustnt,explanation) else: grid.unplace(d3,must,explanation) else: candidates[locs_for_d] = d def subproblem(grid): """ Remove incompatible positions within a single row, column, or square. If the placement of a digit x in cell y within a single row, column, or square can not be extended to a complete solution of that row, column, or square, then we eliminate that placement from consideration. """ for g in groups: graph = {} for d in digits: graph[d] = [] locs = grid.locations[d] & g.mask while locs: bit = locs &~ (locs-1) graph[d].append(unmask[bit]) locs &=~ bit imp = imperfections(graph) for d in imp.keys(): if not imp[d]: del imp[d] while imp: # Here with imp mapping digits to unplaceable cells. # We choose carefully the order of digits to handle, # so that our explanations make logical sense: if an # explanation includes the fact that a digit can only # go in certain cells, we need to have already handled # the unplaceable cells for that other digit. for d in imp: entailed = False for cell in imp[d]: for forced in imp[d][cell]: if forced in imp and imp[forced]: entailed = True break if not entailed: break # Here with imp[d] mapping d to some unplaceable cells. # We build up a bitmap of those cells, as we do collecting # the sets of digits and cells that must be matched to each # other and that prevent us from placing d in those cells. mask = 0 forces = [] for cell in imp[d]: bit = 1L<<cell if bit & grid.locations[d]: mask |= bit force = imp[d][cell] if force not in forces: forces.append(force) # Now that we have both the bitmap and the subgraphs describing # why each bit is in that bitmap, we are ready to make and # explain our unplacement decision. def explain(): that = "would make it impossible to place that digit." expls = [] for force in forces: if expls or len(force) > 1: that = "would leave too few remaining cells" \ " to place those digits." if expls: expls[-1] += ',' if force == forces[-1]: expls[-1] += ' and' forcedigs = [str(x) for x in force] forcedigs.sort() forcemask = 0 for dig in force: for cell in force[dig]: forcemask |= 1L<<cell expls += [len(forcedigs) == 1 and "digit" or "digits", andlist(forcedigs), "can only be placed in", namecells(forcemask)] expls[-1] += '.' return ["In", g.name+","] + expls + ["Placing", d, "in", namecells(mask,"or"), that] grid.unplace(d,mask,explain) del imp[d] if grid.progress: return # let changes propagate before trying more groups bilocal_explanation = \ "each two successive cells belong to a common row, column, or square," \ " and are the only two cells in that row, column, or square where one" \ " of the digits may be placed" incyclic = "In the cyclic sequence of cells" inpath = "In the sequence of cells" def bilocal(grid): """ Look for nonrepetitive cycles among bilocated digits. Despite the sesquipedalian summary line above, this is a form of analysis that is easy to perform by hand: draw a graph connecting two cells whenever some digit's location within a row, column, or square is forced to lie only in those two cells. We then search for cycles in the graph in which each two adjacent edges in the cycle have different labels. In any such cycle, each cell can only contain the digits labeling the two edges incident to it. """ if not grid.pairs: return # can only run after pair rule finds edges # Make labeled graph of pairs graph = dict([(i,{}) for i in range(81)]) for pair in grid.pairs: digs = grid.pairs[pair] bit = pair &~ (pair-1) pair &=~ bit if pair: v = unmask[bit] w = unmask[pair] graph[v][w] = graph[w][v] = digs # Apply repetitivity analysis to collect cyclic labels at each cell grid.bilocation = nrg = NonrepetitiveGraph(graph) forced = [set() for i in range(81)] for v,w,L in nrg.cyclic(): forced[v].add(L) forced[w].add(L) # Carry out forces indicated by our analysis for cell in range(81): if len(forced[cell]) == 2: # It's also possible for len(forced[cell]) to be > 2; # in this case multiple cycles go through the same edge # and cell must be filled with the digit labeling that edge. # But for simplicity's sake we ignore that possibility; # it doesn't happen very often and when it does the repetitive # cycle rule will find it instead. mask = 1L<<cell for d in digits: if d not in forced[cell]: def explain(): forced1,forced2 = tuple(forced[cell]) cycle = nrg.shortest(cell,forced1,cell,forced2) return [incyclic, pathname(cycle)+",", bilocal_explanation + ".", "This placement would prevent", forced1, "or", forced2, "from being placed in", cellnames[cell]+",", "making it impossible to place the cycle's", len(cycle)-1, "digits into the remaining", len(cycle)-2, "cells."] grid.unplace(d,mask,explain) bivalue_explanation = \ "each cell has two possible digits, each of which may also" \ " be placed at one of the cell's two neighbors in the sequence" def bivalue(grid): """ Look for nonrepetitive cycles among bivalued cells. We draw a graph connecting two cells whenever both can only contain two digits, one of those digits is the same for both cells, and both cells belong to the same row, column, or square. Edges are labeled by the digit(s) the two cells share. If any edge of this graph is contained in a cycle with no two consecutive edges having equal labels, then the digit labeling that edge must be placed on one of its two endpoints, and can not be placed in any other cell of the row, column, or square containing the edge. """ # Find and make bitmask per digit of bivalued cells graph = {} grid.otherbv = otherbv = {} tvmask = [0]*10 for c in range(81): ch = grid.choices(c) if len(ch) == 2: graph[c] = {} tvmask[ch[0]] |= 1L<<c tvmask[ch[1]] |= 1L<<c otherbv[c,ch[0]] = ch[1] otherbv[c,ch[1]] = ch[0] edgegroup = {} # Form edges and map back to their groups for g in groups: for d in digits: mask = tvmask[d] & g.mask dgcells = [] while mask: bit = mask &~ (mask - 1) dgcells.append(unmask[bit]) mask &=~ bit for v in dgcells: for w in dgcells: if v != w: edgegroup.setdefault((v,w),[]).append(g) graph[v].setdefault(w,set()).add(d) # Apply repetitivity analysis to collect cyclic labels at each cell # and eliminate that label from other cells of the same group grid.bivalues = nrg = NonrepetitiveGraph(graph) for v,w,digit in nrg.cyclic(): mask = 0 for g in edgegroup[v,w]: mask |= g.mask mask &=~ (1L << v) mask &=~ (1L << w) def explain(): cycle = [v] + nrg.shortest(w,grid.otherbv[w,digit], v,grid.otherbv[v,digit]) return ["In the cyclic sequence of cells", pathname(cycle)+",", bivalue_explanation + ".", "This placement would conflict with placing", digit, "in", namecells((1L<<v)|(1L<<w))+",", "making it impossible to fill the cycle's", len(cycle)-1, "cells with the remaining", len(cycle)-2, "digits."] grid.unplace(digit,mask,explain) def repeat(grid): """ Look for cycles of bilocated or bivalued vertices with one repetition. We use the same graphs described for the bilocal and bivalue rules; if there exists a cycle in which some two adjacent edges are labeled by the same digit, and all other adjacent pairs of cycle edges have differing digits, then the repeated digit must be placed at the cell where the two same-labeled edges meet (in the case of the bilocal graph) or can be eliminated from that cell (in the case of the bivalue graph). """ if not grid.bilocation or not grid.bivalues: return for cell in range(81): if not grid.contents[cell]: for d in grid.choices(cell): if (cell,d) in grid.bilocation.reachable(cell,d): cycle = grid.bilocation.shortest(cell,d,cell,d) if cycle[1] == cycle[-2]: # Degenerate repetitive cycle, look for a better one. # It would be a correct decision to place d in cell: # due to prior application of the bilocal rule, the # part of the cycle from cycle[1] to cycle[-2] must # itself be a repetitive cycle. But the explanation # will be clearer if we avoid using this cycle. break def explain(): expl = [incyclic, pathname(cycle)+",", bilocal_explanation + ".", "If",d,"were not placed in",cellnames[cell]+",", "it would have to be placed in", cellnames[cycle[1]],"and", cellnames[cycle[-2]],"instead,", "making it impossible to place the"] if len(cycle) == 4: expl.append("remaining digit.") else: expl += ["cycle's remaining",len(cycle)-3,"digits", "in the remaining"] if len(cycle) == 5: expl.append("cell.") else: expl += [len(cycle)-4,"cells."] return expl grid.place(d,cell,explain) return # allow changes to propagate w/simpler rules elif (cell,d) in grid.bivalues.reachable(cell,d): cycle = grid.bivalues.shortest(cell,d,cell,d) if cycle[1] == cycle[-2]: break def explain(): return [incyclic, pathname(cycle)+",", bivalue_explanation + ",", "except that", cellnames[cell], "shares", d, "as a possible value", "with both of its neighbors.", "Placing", d, "in", cellnames[cell], "would make it impossible", "to fill the cycle's remaining", len(cycle)-2, "cells with the remaining", len(cycle)-3, "digits, so only", grid.otherbv[cell,d], "can be placed in", cellnames[cell]+"."] grid.place(grid.otherbv[cell,d],cell,explain) return # allow changes to propagate w/simpler rules def path(grid): """ Look for paths of bilocated or bivalued cells with conflicting endpoints. In the same graphs used by the bilocal and repeat rules, if there exists a path that starts and ends with the same digit, with no two consecutive edges labeled by the same digit, then the digit ending the path can be placed in no cell that conflicts with both endpoints of the path. If the path endpoints belong to the same row, column, or square as each other, this eliminates other placements within that row, column, or square; otherwise, it eliminates placements at the other two corners of a rectangle having the two path endpoints as opposite corners. """ if not grid.bilocation or not grid.bivalues: return for cell in range(81): if not grid.contents[cell]: for d in grid.choices(cell): for neighbor,nd in grid.bilocation.reachable(cell,d): if nd == d: def explain(): path = grid.bilocation.shortest(cell,d,neighbor,d) return [inpath, pathname(path)+",", bilocal_explanation+".", "This placement conflicts with placing", d, "in", cellnames[cell], "or", cellnames[neighbor]+",", "making it", "impossible to place the sequence's", len(path)-1, "digits in the remaining", len(path)-2, "cells."] grid.unplace(d,neighbors[cell]&neighbors[neighbor], explain) if cell in grid.bivalues: for neighbor,nd in grid.bivalues.reachable(cell, grid.otherbv[cell,d]): if d == grid.otherbv[neighbor,nd]: def explain(): path = grid.bivalues.shortest(cell, grid.otherbv[cell,d],neighbor,nd) return [inpath, pathname(path)+",", bivalue_explanation+".", "This placement conflicts with placing", d, "in", cellnames[cell], "or", cellnames[neighbor]+",", "making it", "impossible to fill the sequence's", len(path), "cells using only the", len(path)-1, "shared digits of the sequence."] grid.unplace(d,neighbors[cell]&neighbors[neighbor], explain) def explain_conflict_path(grid,cell,d,why,reached,dd): """Explain why either cell,d or reached,dd must be placed.""" if why[reached,dd]: path = grid.bilocation.shortest(cell,d,reached,dd) if len(path) == 2: mask = (1L<<cell)|(1L<<reached) for g in groups: if g.mask & mask == mask: break return [cellnames[cell],"and",cellnames[reached], "are the only cells in",g.name, "in which",d,"may be placed, so if",d, "were not placed in",cellnames[cell]+",", "it would have to be placed in",cellnames[reached]+"."] return [inpath, pathname(path)+",", bilocal_explanation+".", "If",d,"were not placed in",cellnames[cell]+",", "then",dd,"would have to be placed in",cellnames[reached]+",", "in order to make room for the remaining", plural(len(path)-2,"digit"),"in the remaining", plural(len(path)-2,"cell"),"of the sequence."] path = grid.bivalues.shortest(cell,grid.otherbv[cell,d], reached,grid.otherbv[reached,dd]) if len(path) == 2: mask = (1L<<cell)|(1L<<reached) return [cellnames[cell],"and",cellnames[reached], "each have two possible values.", "If",d,"were not placed in",cellnames[cell], "it would have to contain",grid.otherbv[cell,d], "instead, forcing",cellnames[reached],"to contain",str(dd)+"."] return [inpath, pathname(path)+",", bivalue_explanation+".", "If",d,"were not placed in",cellnames[cell]+",", "then",dd,"would have to be placed in",cellnames[reached]+",", "in order to make allow the remaining",plural(len(path)-1,"cell"), "of the sequence to be filled by the remaining", plural(len(path)-1,"digit")+"."] def explain_conflict(grid,cell,d,why,reached,dd): """Concoct explanation for pair of conflicting paths, one to reached.""" for neighbor,ddd in why: if ddd == dd: if (1L<<neighbor) & neighbors[reached]: return explain_conflict_path(grid,cell,d,why,reached,dd) + \ explain_conflict_path(grid,cell,d,why,neighbor,dd) + \ [cellnames[reached],"and",cellnames[neighbor], "cannot both contain",str(dd)+",","so",cellnames[cell], "must contain",str(d)+"."] return explain_conflict_path(grid,cell,d,why,reached,dd) + \ ["This conflicts with another path that has become lost."] def explain_conflict_group(grid,cell,d,why,g,dd): """Conflict explanation for set of conflicting paths that cover a group.""" mask = g.mask & grid.locations[dd] conflicts = [] confmask = 0 for reached,ddd in why: if dd == ddd and neighbors[reached] & mask: conflicts.append(reached) confmask |= 1L<<reached mask &=~ neighbors[reached] conflicts.sort() expl = [] for c in conflicts: expl += explain_conflict_path(grid,cell,d,why,c,dd) expl += ["In",g.name+",",namecells(g.mask&grid.locations[dd]), "are the only cells in which",dd,"may be placed."] return expl + ["Placing",dd,"in",namecells(confmask), "would prevent it from being placed anywhere in",g.name+",", "so",d,"must be placed in",cellnames[cell]+"."] def conflict(grid): """ Look for conflicting paths of bilocated or bivalued cells. In the same graph used by the bilocal and repeat rules, if there exist two paths that start with the same cell and digit, and that end with equal digits in different cells of the same row, column, or square, then the start cell must contain the starting digit for otherwise it would cause the end cells to conflict with each other. One or both paths can instead be in the bivalue graph, starting and ending with the other digit than the one for the bilocal path. We also find similar pairs of paths that end in sets of cells that together eliminate all positions for the end digit in another row, column, or square of the grid. """ if not grid.bilocation or not grid.bivalues: return for cell in range(81): if not grid.contents[cell]: for d in grid.choices(cell): conflicts = [0]*10 why = {} for reached,dd in grid.bilocation.reachable(cell,d): why[reached,dd] = True if (1L<<reached) & conflicts[dd]: def explain(): return explain_conflict(grid,cell,d,why,reached,dd) grid.place(d,cell,explain) return # allow changes to propagate else: conflicts[dd] |= neighbors[reached] if cell in grid.bivalues: for reached,dd in grid.bivalues.reachable(cell, grid.otherbv[cell,d]): other = grid.otherbv[reached,dd] why[reached,other] = False if (1L<<reached) & conflicts[other]: def explain(): return explain_conflict(grid,cell,d, why,reached,other) grid.place(d,cell,explain) return # allow changes to propagate else: conflicts[other] |= neighbors[reached] for g in groups: for dd in digits: if grid.locations[dd] & g.mask &~ conflicts[dd] == 0: def explain(): return explain_conflict_group(grid,cell,d, why,g,dd) grid.place(d,cell,explain) return # allow changes to propagate # triples of name, rule, difficulty level rules = [ ("locate",locate,0), ("eliminate",eliminate,1), ("align",align,2), ("pair",pair,2), ("triad",triad,2), ("trapezoid",trapezoid,2), ("rectangle",rectangle,2), ("subproblem",subproblem,3), ("digit",digit,3), ("bilocal",bilocal,3), ("bivalue",bivalue,3), ("repeat",repeat,4), ("path",path,4), ("conflict",conflict,4), ] def step(grid, quick_and_dirty = False): """Try the rules, return True if one succeeds.""" if grid.complete(): return False grid.progress = False grid.steps += 1 grid.log(["Beginning solver iteration",str(grid.steps)+'.']) for name,rule,level in rules: if level <= 1 or not quick_and_dirty: rule(grid) if grid.progress: grid.rules_used.add(name) grid.log(["Ending solver iteration",grid.steps, "after successful application of the", name,"rule."]) return True grid.log(["Ending solver iteration",grid.steps, "with no additional progress."]) return False # ====================================================================== # Random permutation of puzzles # ====================================================================== def block_permutation(preserve_symmetry = True): """Choose order to rearrange rows or columns of blocks.""" if preserve_symmetry: return random.choice([[0,1,2],[2,1,0]]) result = [0,1,2] random.shuffle(result) return result def permute1d(preserve_symmetry = True): """Choose order to rearrange rows or columns of puzzle.""" bp = block_permutation(preserve_symmetry) ip = [block_permutation(False),block_permutation(preserve_symmetry)] if preserve_symmetry: ip.append([2-ip[0][2],2-ip[0][1],2-ip[0][0]]) else: ip.append(block_permutation(False)) return [bp[i]*3+ip[i][j] for i in [0,1,2] for j in [0,1,2]] def permute(grid, preserve_symmetry = True): """Generate a randomly permuted version of the input puzzle.""" digit_permutation = list(digits) random.shuffle(digit_permutation) digit_permutation = [0]+digit_permutation row_permutation = permute1d(preserve_symmetry) col_permutation = permute1d(preserve_symmetry) transpose = random.choice([[1,9],[9,1]]) contents = [None]*81 for row in range(9): for col in range(9): contents[row_permutation[row]*transpose[0] + col_permutation[col]*transpose[1]] = \ digit_permutation[grid.contents[9*row+col]] return Sudoku(contents) # ====================================================================== # Output of puzzles # ====================================================================== # Output functions should return True if it's ok to add difficulty/level, # false otherwise def text_format(grid): for row in digits: if row % 3 != 1: print ('|' + ' '*11)*3+'|' elif row == 1: print ' ' + '-'*35 + ' ' else: print '|' + '-'*35 + '|' for col in digits: if col % 3 == 1: print '|', else: print ' ', print grid.contents[(row-1)*9+(col-1)] or '.', print '|' print ' ' + '-'*35 + ' ' return True def numeric_format(grid): row = [] for digit in grid: row.append(str(digit)) if len(row) == 9: print ''.join(row) row = [] return True def html_format(grid): print "<table border=1>" for a in range(3): print "<tr>" for b in range(3): print "<td><table border=0>" for c in range(3): print "<tr>" for d in range(3): row = 3*a+c col = 3*b+d cell = 9*row+col if grid.contents[cell]: print '<td width=30 height=30 align=center valign=middle style="font-family:times,serif; font-size:16pt; text-align:center; color:black">%d</td>' % grid.contents[cell] # sty = '; color:black' # val = ' value="%d" readonly' % grid.contents[cell] else: print '<td width=30 height=30 align=center valign=middle><input style="font-family:times,serif; font-size:16pt; text-align:center; color:#555; margin:0pt; border-width:0" size=1 maxlength=1></td>' # sty = '; color:gray' # val = '' # print '<td width=30 height=30 align=center valign=middle><input style="font-size:16pt; text-align:center%s" size=1 maxlength=1%s></td>' % (sty,val) print "</tr>" print "</table></td>" print "</tr>" print "</table>" return False def svg_format(grid): print '''<?xml version="1.0" encoding="iso-8859-1"?> <!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN" "http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd"> <svg xmlns="http://www.w3.org/2000/svg" version="1.1" width="274pt" height="274pt" viewBox="0 0 273 273">''' print ' <g fill="none" stroke="black" stroke-width="1.5">' print ' <rect x="2" y="2" width="270" height="270" />' for i in [3,6]: print ' <line x1="2" y1="%d" x2="272" y2="%d" />' % (30*i+2,30*i+2) print ' <line x1="%d" y1="2" x2="%d" y2="272" />' % (30*i+2,30*i+2) print ' </g>' print ' <g fill="none" stroke="black" stroke-width="0.5">' for i in [1,2,4,5,7,8]: print ' <line x1="2" y1="%d" x2="272" y2="%d" />' % (30*i+2,30*i+2) print ' <line x1="%d" y1="2" x2="%d" y2="272" />' % (30*i+2,30*i+2) print ' </g>' print ' <g font-family="Times" font-size="24" fill="black" text-anchor="middle">' for row in range(9): for col in range(9): cell = row*9+col if grid.contents[cell]: print ' <text x="%d" y="%d">%d</text>' % \ (30*col+17, 30*row+25, grid.contents[cell]) print ' </g>' print '</svg>' return False output_formats = { "text": text_format, "txt": text_format, "t": text_format, "numeric": numeric_format, "num": numeric_format, "n": numeric_format, "html": html_format, "h": html_format, "svg": svg_format, "s": svg_format, } # ====================================================================== # Backtracking search for all solutions # ====================================================================== def all_solutions(grid, fastrules = True): """Generate sequence of completed Sudoku grids from initial puzzle.""" while True: # first try the usual non-backtracking rules try: while step(grid,fastrules): pass except BadSudoku: grid.log("A contradiction was found," " so this branch has no solutions.") return # no solutions # if they finished off the puzzle, there's only one solution if grid.complete(): grid.log("A solution to the puzzle has been found.") yield grid return # find a cell with few remaining possibilities def choices(c): ch = grid.choices(c) if len(ch) < 2: return (10,0,0) return (len(ch),c,ch[0]) L,c,d = min([choices(c) for c in range(81)]) # try it both ways branch = Sudoku(grid) grid.log("Failed to progress, " "creating a new backtracking search branch.") branch.logstream = grid.logstream branch.steps = grid.steps branch.original_cells = grid.original_cells branch.place(d,c,"The backtracking search will try this placement" " first. Then, after returning from this branch," " it will try preventing this placement.") for sol in all_solutions(branch,fastrules): yield sol grid.log(["Returned from backtracking branch; undoing placement of", d,"in",cellnames[c],"and all subsequent decisions."]) grid.rules_used.update(branch.rules_used) grid.rules_used.add("backtrack") grid.steps = branch.steps grid.unplace(d,1L<<c,"The backtracking search has already tried this" " placement, and now must try the opposite decision.") def unisolvent(grid): """Does this puzzle have a unique solution?""" stream = all_solutions(grid) try: stream.next() except StopIteration: return False try: stream.next() except StopIteration: return True return False # ====================================================================== # Command-line interface # ====================================================================== parser = OptionParser() parser.add_option("-r","--rules",dest="show_rules", action="store_true", help = "show description of known solver rules and exit") parser.add_option("-l","--levels",dest="show_levels", action="store_true", help = "show description of difficulty levels and exit") parser.add_option("-0", "--blank", dest="empty", action="store_true", help = "output blank sudoku grid and exit") parser.add_option("-t","--translate", dest="translate", action="store_true", help = "translate format of input puzzle without solving") parser.add_option("-p","--permute",dest="permute", action="store_true", help = "randomly rearrange the input puzzle") parser.add_option("-g","--generate", dest="generate", action="store_true", help = "generate new puzzle rather than reading from stdin") parser.add_option("-a", "--asymmetric", dest="asymmetric", action="store_true", help = "allow asymmetry in generated puzzles") parser.add_option("-u", "--unique", dest="assume_unique", action="store_false", help = "disallow rules that assume a unique solution", default = True) parser.add_option("-b", "--backtrack", dest="backtrack", action="store_true", help = "enable trial and error search for all solutions") parser.add_option("-v", "--verbose", dest="verbose", action="store_true", help = "output description of each step in puzzle solution") parser.add_option("-x", "--empty", dest="emptychars", action="store", type="string", default=".0", help="characters representing empty cells in input puzzle") parser.add_option("-2", "--output-both", dest="output_both", action="store_true", help = "output both the puzzle and its solution") parser.add_option("-f", "--format", dest="format", action="store", type="string", default="text", help="output format (options: text, numeric, html, svg)") if __name__ == '__main__': options,args = parser.parse_args() if args: print >>sys.stderr, "Unrecognized command line syntax, use --help for input documentation" sys.exit(0) if options.show_rules: print """This solver knows the following rules. Rules occurring later in the list are attempted only when all earlier rules have failed to make progress. """ for name,rule,difficulty in rules: print name + ":" + rule.__doc__ sys.exit(0) if options.show_levels: print """ Puzzles are classified by difficulty, according to a weighted combination of the set of rules needed to solve each puzzle. There are six levels, in order by difficulty: easy, moderate, tricky, difficult, evil, and fiendish. In addition, a puzzle is classified as impossible if this program cannot find a solution for it, or if backtracking is needed to find the solution. """ sys.exit(0) if options.translate: if options.generate: print "Can not simultaneously generate and translate puzzles." sys.exit(0) try: outputter = output_formats[options.format.lower()] except KeyError: print "Unrecognized output format." sys.exit(0) if options.empty: outputter(Sudoku()) sys.exit(0) # ====================================================================== # Initial puzzle setup # ====================================================================== def random_puzzle(generate_symmetric = True): """Generate and return a randomly constructed Sudoku puzzle instance.""" puzzle = [] grid = Sudoku() def choices(cell): c = grid.choices(cell) return len(c) > 1 and c or [] while True: try: while not grid.complete(): d,c = random.choice([(d,c) for c in range(81) for d in choices(c)]) grid.place(d,c) while step(grid,True): pass puzzle.append((d,c)) if generate_symmetric: c = 80-c ch = grid.choices(c) if not ch: # avoid IndexError from random.choice raise BadSudoku("Placement invalidated symmetric cell") d = random.choice(ch) grid.place(d,c) while step(grid,True): pass puzzle.append((d,c)) except BadSudoku: puzzle = [] grid = Sudoku() continue break # find redundant information in initial state q = 0 while q < len(puzzle): grid = Sudoku(puzzle[:q] + puzzle[q+1+generate_symmetric:]) if not unisolvent(grid): q += 1+generate_symmetric else: del puzzle[q] if generate_symmetric: del puzzle[q] return Sudoku(puzzle) def read_puzzle(empty = ".0"): """Read and return a Sudoku instance from standard input.""" def digits(): for digit in sys.stdin.read(): if digit in empty: yield 0 elif '1' <= digit <= '9': yield int(digit) return Sudoku(digits()) if __name__ == '__main__': if options.generate: puzzle = random_puzzle(not options.asymmetric) print_puzzle = True print_solution = options.output_both else: puzzle = read_puzzle(options.emptychars) print_puzzle = options.output_both or options.translate print_solution = options.output_both or not options.translate if options.permute: puzzle = permute(puzzle, not options.asymmetric) if options.verbose: puzzle.logstream = sys.stderr if options.assume_unique: puzzle.assume_unique = True # ====================================================================== # Main program: print and solve puzzle # ====================================================================== if __name__ == '__main__': print_level = True if print_puzzle: print_level = outputter(puzzle) if options.output_both and print_level: print if options.backtrack: solns = all_solutions(puzzle,False) else: while step(puzzle): pass solns = [puzzle] nsolns = 0 for soln in solns: if print_solution: print_level = outputter(soln) nsolns += 1 difficulty = 0 used_names = [] for name,rule,level in rules: if name in puzzle.rules_used: used_names.append(name) difficulty += 1<<level if "backtrack" in puzzle.rules_used: used_names.append("backtrack") if print_level: print "\nRules used:", ", ".join(used_names) if nsolns != 1: print "Number of solutions:",nsolns if not puzzle.complete() or "backtrack" in puzzle.rules_used: print "Level: impossible" elif difficulty <= 1: print "Level: easy" elif difficulty <= 5: print "Level: moderate" elif difficulty <= 9: print "Level: tricky" elif difficulty <= 17: print "Level: difficult" elif difficulty <= 33: print "Level: evil" else: print "Level: fiendish"
apache-2.0
2,277,679,837,284,832,000
41.245757
220
0.512326
false
4.230958
false
false
false
CityGrid/twonicorn
twonicornweb/views/cp_user.py
1
8124
# Copyright 2015 CityGrid Media, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from pyramid.view import view_config from pyramid.httpexceptions import HTTPFound from pyramid.httpexceptions import HTTPConflict from pyramid.response import Response from datetime import datetime import logging from passlib.hash import sha512_crypt from twonicornweb.views import ( site_layout, get_user, ) from twonicornweb.models import ( DBSession, User, UserGroupAssignment, Group, ) log = logging.getLogger(__name__) @view_config(route_name='cp_user', permission='cp', renderer='twonicornweb:templates/cp_user.pt') def view_cp_user(request): page_title = 'Control Panel - Users' user = get_user(request) users = DBSession.query(User).all() groups = DBSession.query(Group).all() params = {'mode': None, 'commit': None, 'user_id': None, } for p in params: try: params[p] = request.params[p] except: pass mode = params['mode'] commit = params['commit'] user_id = params['user_id'] error_msg = None this_user = None this_groups = None subtitle = 'Users' if mode == 'add': subtitle = 'Add a new user' if commit: user_names = request.POST.getall('user_name') first_names = request.POST.getall('first_name') last_names= request.POST.getall('last_name') email_addresses = request.POST.getall('email_address') passwords = request.POST.getall('password') try: utcnow = datetime.utcnow() for u in range(len(user_names)): salt = sha512_crypt.genconfig()[17:33] encrypted_password = sha512_crypt.encrypt(passwords[u], salt=salt) create = User(user_name=user_names[u], first_name=first_names[u], last_name=last_names[u], email_address=email_addresses[u], salt=salt, password=encrypted_password, updated_by=user['login'], created=utcnow, updated=utcnow) DBSession.add(create) DBSession.flush() user_id = create.user_id group_assignments = request.POST.getall('group_assignments') for a in group_assignments: g = DBSession.query(Group).filter(Group.group_name==a).one() create = UserGroupAssignment(group_id=g.group_id, user_id=user_id, updated_by=user['login'], created=utcnow, updated=utcnow) DBSession.add(create) DBSession.flush() return_url = '/cp/user' return HTTPFound(return_url) except Exception as ex: if type(ex).__name__ == 'IntegrityError': log.error('User already exists in the db, please edit instead.') # Rollback DBSession.rollback() # FIXME: Return a nice page return HTTPConflict('User already exists in the db, please edit instead.') else: raise # FIXME not trapping correctly DBSession.rollback() error_msg = ("Failed to create user (%s)" % (ex)) log.error(error_msg) if mode == 'edit': subtitle = 'Edit user' if not commit: try: q = DBSession.query(User) q = q.filter(User.user_id == user_id) this_user = q.one() q = DBSession.query(Group) q = q.join(UserGroupAssignment, Group.group_id== UserGroupAssignment.group_id) q = q.filter(UserGroupAssignment.user_id==this_user.user_id) results = q.all() this_groups = [] for r in results: this_groups.append(r.group_name) except Exception, e: conn_err_msg = e return Response(str(conn_err_msg), content_type='text/plain', status_int=500) if commit: if 'form.submitted' in request.POST: user_id = request.POST.get('user_id') user_name = request.POST.get('user_name') first_name = request.POST.get('first_name') last_name = request.POST.get('last_name') email_address = request.POST.get('email_address') password = request.POST.get('password') group_assignments = request.POST.getall('group_assignments') # Update the user utcnow = datetime.utcnow() this_user = DBSession.query(User).filter(User.user_id==user_id).one() this_user.user_name = user_name this_user.first_name = first_name this_user.last_name = last_name this_user.email_address = email_address if password: salt = sha512_crypt.genconfig()[17:33] encrypted_password = sha512_crypt.encrypt(password, salt=salt) this_user.salt = salt this_user.password = encrypted_password this_user.updated_by=user['login'] DBSession.flush() for g in groups: if str(g.group_id) in group_assignments: # assign log.debug("Group: %s is in group assignments" % g.group_name) q = DBSession.query(UserGroupAssignment).filter(UserGroupAssignment.group_id==g.group_id, UserGroupAssignment.user_id==this_user.user_id) check = DBSession.query(q.exists()).scalar() if not check: log.info("Assigning local user %s to group %s" % (this_user.user_name, g.group_name)) update = UserGroupAssignment(group_id=g.group_id, user_id=user_id, updated_by=user['login'], created=utcnow, updated=utcnow) DBSession.add(update) DBSession.flush() else: # delete log.debug("Checking to see if we need to remove assignment for user: %s in group %s" % (this_user.user_name,g.group_name)) q = DBSession.query(UserGroupAssignment).filter(UserGroupAssignment.group_id==g.group_id, UserGroupAssignment.user_id==this_user.user_id) check = DBSession.query(q.exists()).scalar() if check: log.info("Removing local user %s from group %s" % (this_user.user_name, g.group_name)) assignment = DBSession.query(UserGroupAssignment).filter(UserGroupAssignment.group_id==g.group_id, UserGroupAssignment.user_id==this_user.user_id).one() DBSession.delete(assignment) DBSession.flush() return_url = '/cp/user' return HTTPFound(return_url) return {'layout': site_layout(), 'page_title': page_title, 'user': user, 'this_user': this_user, 'this_groups': this_groups, 'user_id': user_id, 'users': users, 'groups': groups, 'subtitle': subtitle, 'mode': mode, 'commit': commit, 'error_msg': error_msg, }
apache-2.0
223,268,084,307,298,000
40.661538
242
0.546406
false
4.233455
false
false
false
mseclab/PyJFuzz
pyjfuzz/core/pjf_server.py
1
8559
""" The MIT License (MIT) Copyright (c) 2016 Daniele Linguaglossa <d.linguaglossa@mseclab.com> Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON INFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ from wsgiref.simple_server import make_server, WSGIRequestHandler from bottle import route, run, ServerAdapter, response, request, static_file from .pjf_testcase_server import PJFTestcaseServer from .errors import PJFBaseException from .errors import PJFMissingArgument from threading import Thread from .pjf_logger import PJFLogger from .pjf_factory import PJFFactory from .certs import CERT_PATH import multiprocessing import signal import time import ssl import sys import os import socket class WSGIRefServer(ServerAdapter): """ WSGI based server class using SSL """ def run(self, handler): class QuietHandler(WSGIRequestHandler): def log_request(*args, **kw): pass def log_error(self, format, *args): pass self.options['handler_class'] = QuietHandler srv = make_server(self.host, self.port, handler, **self.options) srv.serve_forever() class SSLWSGIRefServer(ServerAdapter): """ WSGI based server class using SSL """ def run(self, handler): class QuietHandler(WSGIRequestHandler): def log_request(*args, **kw): pass def log_error(self, format, *args): pass self.options['handler_class'] = QuietHandler srv = make_server(self.host, self.port, handler, **self.options) srv.socket = ssl.wrap_socket(srv.socket, certfile=CERT_PATH, server_side=True) srv.serve_forever() class PJFServer: """ Class used to run both HTTP and HTTPS server using bottle web server """ def __init__(self, configuration): self.client_queue = multiprocessing.Queue(0) self.apply_patch() self.logger = self.init_logger() if ["debug", "html", "content_type", "notify", "ports"] not in configuration: raise PJFMissingArgument() if configuration.debug: print("[\033[92mINFO\033[0m] Starting HTTP ({0}) and HTTPS ({1}) built-in server...".format( configuration.ports["servers"]["HTTP_PORT"], configuration.ports["servers"]["HTTPS_PORT"] )) if not configuration.content_type: configuration.content_type = False if not configuration.content_type: configuration.content_type = "application/json" self.config = configuration self.json = PJFFactory(configuration) self.https = SSLWSGIRefServer(host="0.0.0.0", port=self.config.ports["servers"]["HTTPS_PORT"]) self.http = WSGIRefServer(host="0.0.0.0", port=self.config.ports["servers"]["HTTP_PORT"]) self.httpsd = multiprocessing.Process(target=run, kwargs={"server": self.https, "quiet": True}) self.httpd = multiprocessing.Process(target=run, kwargs={"server": self.http, "quiet": True}) if self.config.fuzz_web: self.request_checker = Thread(target=self.request_pool, args=()) self.logger.debug("[{0}] - PJFServer successfully initialized".format(time.strftime("%H:%M:%S"))) def run(self): """ Start the servers """ route("/")(self.serve) if self.config.html: route("/<filepath:path>")(self.custom_html) if self.config.fuzz_web: self.request_checker.start() self.httpd.start() self.httpsd.start() def save_testcase(self, ip, testcases): try: count = 0 dir_name = "testcase_{0}".format(ip) print("[\033[92mINFO\033[0m] Client {0} seems to not respond anymore, saving testcases".format(ip)) try: os.mkdir(dir_name) except OSError: pass for test in testcases: with open("{0}/testcase_{1}.json".format(dir_name, count), "wb") as t: t.write(test) t.close() count += 1 except Exception as e: raise PJFBaseException(e.message if hasattr(e, "message") else str(e)) def request_pool(self): try: clients = {} end = False while not end: try: client = self.client_queue.get(timeout=5) if client == (0,0): end = True else: if client[0] not in clients: clients.update({client[0]: {"timestamp": time.time(), "testcases": []}}) else: clients[client[0]]["timestamp"] = time.time() if len(clients[client[0]]["testcases"]) <= 10: clients[client[0]]["testcases"].append(client[1]) else: clients[client[0]]["testcases"].pop(0) clients[client[0]]["testcases"].append(client[1]) except: pass for c in list(clients.keys()): if time.time() - clients[c]["timestamp"] >= 30: self.save_testcase(c, clients[c]["testcases"]) del clients[c] except Exception as e: raise PJFBaseException(e.message if hasattr(e, "message") else str(e)) def stop(self): """ Kill the servers """ os.kill(self.httpd.pid, signal.SIGKILL) os.kill(self.httpsd.pid, signal.SIGKILL) self.client_queue.put((0,0)) if self.config.fuzz_web: self.request_checker.join() self.logger.debug("[{0}] - PJFServer successfully completed".format(time.strftime("%H:%M:%S"))) def custom_html(self, filepath): """ Serve custom HTML page """ try: response.headers.append("Access-Control-Allow-Origin", "*") response.headers.append("Accept-Encoding", "identity") response.headers.append("Content-Type", "text/html") return static_file(filepath, root=self.config.html) except Exception as e: raise PJFBaseException(e.message if hasattr(e, "message") else str(e)) def serve(self): """ Serve fuzzed JSON object """ try: fuzzed = self.json.fuzzed if self.config.fuzz_web: self.client_queue.put((request.environ.get('REMOTE_ADDR'), fuzzed)) response.headers.append("Access-Control-Allow-Origin", "*") response.headers.append("Accept-Encoding", "identity") response.headers.append("Content-Type", self.config.content_type) if self.config.notify: PJFTestcaseServer.send_testcase(fuzzed, '127.0.0.1', self.config.ports["servers"]["TCASE_PORT"]) yield fuzzed except Exception as e: raise PJFBaseException(e.message if hasattr(e, "message") else str(e)) def init_logger(self): """ Init the default logger """ return PJFLogger.init_logger() def apply_patch(self): """ Fix default socket lib to handle client disconnection while receiving data (Broken pipe) """ if sys.version_info >= (3, 0): # No patch for python >= 3.0 pass else: from .patch.socket import socket as patch socket.socket = patch
mit
-7,245,142,948,344,012,000
38.442396
112
0.592242
false
4.164964
true
false
false
elebihan/yaprogen
data/templates/setuptools-python-app/skeleton/disthelpers.py
1
7324
# -*- coding: utf-8 -*- # # disthelpers.py - useful distutils helper commands # # Copyright (c) 2014 Eric Le Bihan <eric.le.bihan.dev@free.fr> # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. # from distutils import cmd from distutils.command.build import build as _build from distutils.errors import DistutilsOptionError from docutils.core import publish_file import os import subprocess class extract_messages(cmd.Command): description = 'extract localizable strings from source code' user_options = [] def initialize_options(self): pass def finalize_options(self): pass def run(self): domain = self.distribution.get_name() potin_file = os.path.join(os.curdir, 'po', 'POTFILES.in') pot_file = os.path.join(os.curdir, 'po', domain + '.pot') args = [ 'xgettext', '-Lpython', '-k_', '-f', potin_file, '-o', pot_file, '--package-name', self.distribution.get_name(), ] subprocess.check_call(args) class init_catalog(cmd.Command): description = 'create a new catalog based on a POT file' user_options = [ ('locale=', 'l', 'locale for the new localized catalog'), ] def initialize_options(self): self.locale = None def finalize_options(self): if not self.locale: raise DistutilsOptionError('please provide a locale') def run(self): domain = self.distribution.get_name() pot_file = os.path.join(os.curdir, 'po', domain + '.pot') po_file = os.path.join(os.curdir, 'po', self.locale + '.po') args = [ 'msginit', '--input', pot_file, '--output', po_file, '--locale', self.locale, ] subprocess.check_call(args) class update_catalog(cmd.Command): description = 'update an existing catalog from a POT file' user_options = [ ('locale=', 'l', 'locale of the localized catalog'), ] def initialize_options(self): self.locale = None def finalize_options(self): if not self.locale: raise DistutilsOptionError('please provide a locale') def run(self): domain = self.distribution.get_name() pot_file = os.path.join(os.curdir, 'po', domain + '.pot') po_file = os.path.join(os.curdir, 'po', self.locale + '.po') args = ['msgmerge', '--update', po_file, pot_file] subprocess.check_call(args) class build_catalog(cmd.Command): description = 'compile *.po file into *.mo file' user_options = [ ('locale=', 'l', 'locale of the localized catalog'), ] def initialize_options(self): self.locale = None def finalize_options(self): pass def run(self): locales = [] domain = self.distribution.get_name() po_dir = os.path.join(os.path.dirname(os.curdir), 'po') if self.locale: locales.append(self.locale) else: for path, names, filenames in os.walk(po_dir): for f in filenames: if f.endswith('.po'): locale = f[:-3] locales.append(locale) for locale in locales: mo_dir = os.path.join('build', 'locale', locale, 'LC_MESSAGES') src = os.path.join(po_dir, locale + '.po') dst = os.path.join(mo_dir, domain + '.mo') if not os.path.exists(mo_dir): os.makedirs(mo_dir) print("compiling {0}".format(src)) args = ['msgfmt', src, '--output-file', dst] subprocess.check_call(args) locale_dir = os.path.join('share', 'locale', locale, 'LC_MESSAGES') self.distribution.data_files.append((locale_dir, [dst])) class build_man(cmd.Command): description = 'build MAN page from restructuredtext' def initialize_options(self): pass def finalize_options(self): pass def run(self): src_dir = os.path.join(os.path.dirname(os.curdir), 'man') dst_dir = os.path.join('build', 'man') for path, names, filenames in os.walk(src_dir): for f in filenames: if f.endswith('.rst'): filename, section, ext = f.rsplit('.', 2) if not os.path.exists(dst_dir): os.makedirs(dst_dir) src = os.path.join(path, f) dst = os.path.join(dst_dir, filename + '.' + section) print("converting {0}".format(src)) publish_file(source_path=src, destination_path=dst, writer_name='manpage') man_dir = os.path.join('share', 'man', 'man' + section) self.distribution.data_files.append((man_dir, [dst])) class build_html(cmd.Command): description = 'build HTML version of MAN pages from restructuredtext' def initialize_options(self): pass def finalize_options(self): pass def run(self): name = self.distribution.get_name() src_dir = os.path.join(os.path.dirname(os.curdir), 'man') dst_dir = os.path.join('build', 'html') for path, names, filenames in os.walk(src_dir): for f in filenames: if f.endswith('.rst'): filename, section, ext = f.rsplit('.', 2) if not os.path.exists(dst_dir): os.makedirs(dst_dir) src = os.path.join(path, f) dst = os.path.join(dst_dir, filename + '.' + section + '.html') print("converting {0}".format(src)) publish_file(source_path=src, destination_path=dst, writer_name='html') html_dir = os.path.join('share', 'doc', name, 'html') self.distribution.data_files.append((html_dir, [dst])) class build(_build): sub_commands = _build.sub_commands sub_commands += [('build_catalog', None)] sub_commands += [('build_man', None)] sub_commands += [('build_html', None)] def run(self): _build.run(self) # vim: ts=4 sts=4 sw=4 sta et ai
gpl-3.0
159,195,571,268,875,170
34.553398
79
0.578372
false
3.995636
false
false
false
cctags/gumpad2
gumpad2.py
1
50733
#!/usr/bin/env python # coding: utf-8 import wx import wx.richtext import wx.lib import wx.lib.wordwrap import os import sys import uuid import tempfile import optparse import StringIO import time import locale import hashlib import zshelve import PyRTFParser import xtea from wx.lib.embeddedimage import PyEmbeddedImage try: dirName = os.path.dirname(os.path.abspath(__file__)) except: dirName = os.path.dirname(os.path.abspath(sys.argv[0])) sys.path.append(os.path.split(dirName)[0]) try: from agw import aui from agw.aui import aui_switcherdialog as ASD except ImportError: # if it's not there locally, try the wxPython lib. import wx.lib.agw.aui as aui from wx.lib.agw.aui import aui_switcherdialog as ASD import images program_name = "Gumpad2" program_version = "v0.1.3" program_title = "%s %s" % (program_name, program_version) program_dbpath = "%s.db" % (program_name.lower()) program_main_icon = os.path.join(dirName, "main.ico") ############################################################################ # # debug tools # import inspect def debug_line(): try: raise Exception except: return sys.exc_info()[2].tb_frame.f_back.f_lineno def debug_file(): return inspect.currentframe().f_code.co_filename def fall_into(x, a, b): assert a < b return a <= x and x < b ############################################################################ # # VsTempFile # class VsTempFile: def __init__(self): self.fd, self.filename = tempfile.mkstemp() def __del__(self): self.Close() def AppendString(self, str): os.write(self.fd, str) def Close(self): os.close(self.fd) os.unlink(self.filename) ############################################################################ # # data format: # version: xx # magic: xx # [uuid]: {type: xx, title: xx, body: xx, xtea: sha1sum}, type = (root, dir, html) # tree: item = {id: xx, subs: [item *]} # VsData_Format_Version = 1 VsData_Format_Magic = "gumpad_magic_jshcm" VsData_Type_Root = 1 VsData_Type_Dir = 2 VsData_Type_Html = 3 class VsData: def __init__(self, filename): self.m_filename = filename bFileExist = os.access(filename, os.R_OK | os.W_OK) self.db = zshelve.btopen(filename) if not bFileExist: self.__CreateData__() def __CreateData__(self): self.SetMagic(VsData_Format_Magic) self.SetVersion(VsData_Format_Version) id = self.GenerateId() self.db[id] = {"type": VsData_Type_Root, "title": "root", "body": ""} self.db["tree"] = {"id": id, "subs": []} self.db.sync() def __GetTree__(self, tree, id): if id == tree["id"]: return None, tree for i in tree["subs"]: parent, t = self.__GetTree__(i, id) if t is not None: if parent is None: parent = tree return parent, t return None, None def GetFileName(self): return self.m_filename def GetVersion(self): return self.db["version"] def SetVersion(self, version): self.db["version"] = version self.db.sync() def GetMagic(self): return self.db["magic"] def SetMagic(self, magic): self.db["magic"] = magic self.db.sync() def GetTree(self, parent, id=None): """从 parent 往下查找指定 id 的结点,返回 父结点、结点, 不存在时返回 None """ if id is None: return None, parent else: return self.__GetTree__(parent, id) def GetRoot(self): return self.db["tree"] def SetRoot(self, dir_tree): """更新目录树""" self.set_root_tree_root = None self.set_root_last_node = [] for i in dir_tree: id = i[0] path = i[1] new = {"id": id, "subs": []} if path == 0: self.set_root_tree_root = new self.set_root_last_node.append(new) else: while len(self.set_root_last_node) > path: self.set_root_last_node.pop() assert len(self.set_root_last_node) == path parent = self.set_root_last_node[-1] parent["subs"].append(new) self.set_root_last_node.append(new) assert self.set_root_tree_root is not None self.db["tree"] = self.set_root_tree_root self.db.sync() def GenerateId(self): return str(uuid.uuid1()) def Add(self, title, body, parent_id=None, type=None): root = self.db["tree"] dummy, t = self.GetTree(root, parent_id) if type is None: type = VsData_Type_Html elif type not in (VsData_Type_Dir, VsData_Type_Html): type = VsData_Type_Dir new_id = self.GenerateId() t["subs"].append({"id": new_id, "subs": []}) self.db["tree"] = root self.db[new_id] = {"type": type, "title": title, "body": body} self.db.sync() return new_id def Delete(self, id): """删除指定Id的叶子结点,根结点除外 成功时返回 True,失败时返回 False """ if id is None: return False root = self.db["tree"] if id == root["id"]: return False parent, t = self.GetTree(root, id) if t is None: return False if len(t["subs"]) != 0: return False # 删除关系记录 for i in range(len(parent["subs"])): if id == parent["subs"][i]["id"]: del parent["subs"][i] break self.db["tree"] = root # 删除结点记录 if id in self.db: del self.db[id] self.db.sync() def GetTitle(self, id=None): if id is None: id = self.db["tree"]["id"] return self.db[id]["title"] def SetTitle(self, id, title): if id is None: id = self.db["tree"]["id"] t = self.db[id] t["title"] = title self.db[id] = t self.db.sync() def GetBody(self, id=None): if id is None: id = self.db["tree"]["id"] return self.db[id]["body"] def SetBody(self, id, body): if id is None: id = self.db["tree"]["id"] t = self.db[id] t["body"] = body self.db[id] = t self.db.sync() def GetType(self, id=None): if id is None: id = self.db["tree"]["id"] return self.db[id]["type"] def SetXtea(self, id, key): assert not self.HasXtea(id) t = self.db[id] t["xtea"] = hashlib.sha1(key).hexdigest() self.db[id] = t self.db.sync() def ClearXtea(self, id): assert self.HasXtea(id) t = self.db[id] del t["xtea"] self.db[id] = t self.db.sync() def HasXtea(self, id): return self.db[id].has_key("xtea") def CheckXtea(self, id, key): assert self.HasXtea(id) return self.db[id]["xtea"] == hashlib.sha1(key).hexdigest() def IsEditable(self, id=None): """判断指定Id对应的内容是否允许编辑""" if id is None: return False t = self.GetType(id) return VsData_Type_Html == t ############################################################################ # # VsConfig # class VsConfig: def __init__(self): pass def GetDefaultFont(): return wx.Font(10, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL, False, u"宋体", wx.FONTENCODING_SYSTEM) ############################################################################ # # Control item Id # VsGenerateMenuId_Start = wx.ID_HIGHEST + 1 def VsGenerateMenuId(): global VsGenerateMenuId_Start VsGenerateMenuId_Start += 1 return VsGenerateMenuId_Start ID_Menu_CreateHtml = VsGenerateMenuId() ID_Menu_CreateDir = VsGenerateMenuId() ID_Menu_RenameEntry = VsGenerateMenuId() ID_Menu_DeleteEntry = VsGenerateMenuId() ID_Menu_Save = VsGenerateMenuId() ID_Menu_SaveAs = VsGenerateMenuId() ID_Menu_Exit = VsGenerateMenuId() ID_Menu_Encrypt = VsGenerateMenuId() ID_Menu_ToogleDirectory = VsGenerateMenuId() ID_Menu_ToogleToolBar = VsGenerateMenuId() ID_Menu_FindItem = VsGenerateMenuId() ID_Menu_FindNextItem = VsGenerateMenuId() ID_Menu_About = VsGenerateMenuId() ID_ToolBar_Bold = VsGenerateMenuId() ID_ToolBar_Italic = VsGenerateMenuId() ID_ToolBar_Underline = VsGenerateMenuId() ID_ToolBar_AlignLeft = VsGenerateMenuId() ID_ToolBar_Center = VsGenerateMenuId() ID_ToolBar_AlignRight = VsGenerateMenuId() ID_ToolBar_IndentLess = VsGenerateMenuId() ID_ToolBar_IndentMore = VsGenerateMenuId() ID_ToolBar_Font = VsGenerateMenuId() ID_ToolBar_FontColor = VsGenerateMenuId() ID_ToolBar_InsertPic = VsGenerateMenuId() ID_Ctx_InsertAsSibling = VsGenerateMenuId() ID_Ctx_InsertAsChild = VsGenerateMenuId() ############################################################################ # # VsStatusBar # class VsStatusBar(wx.StatusBar): def __init__(self, parent): wx.StatusBar.__init__(self, parent, -1) self.SetFieldsCount(3) self.SetStatusStyles([wx.SB_FLAT, wx.SB_NORMAL, wx.SB_NORMAL]) # 显示当前操作数据 str = "@ %s" % (self.GetParent().db.GetFileName()) self.SetStatusText(str, 1) # 初始时显示时间 self.OnTimer() # 调整控件大小 width, height = self.GetTextExtent(self.GetStatusText(2)) width += 48 self.SetStatusWidths([0, -1, width]) # 控件时间显示 self.timer = wx.PyTimer(self.OnTimer) self.timer.Start(1000 * 20) def OnTimer(self): # 显示当前时间 t = time.localtime() str = time.strftime("[%Y-%m-%d %H:%M %A]", t) self.SetStatusText(str, 2) ############################################################################ # # VsTreeCtrl # class VsTreeCtrl(wx.TreeCtrl): def __init__(self, parent, id, pos, size, style): wx.TreeCtrl.__init__(self, parent, id, pos, size, style) def Traverse(self, func, startNode): """Apply 'func' to each node in a branch, beginning with 'startNode'. """ def TraverseAux(node, depth, func): nc = self.GetChildrenCount(node, 0) child, cookie = self.GetFirstChild(node) # In wxPython 2.5.4, GetFirstChild only takes 1 argument for i in xrange(nc): func(child, depth) TraverseAux(child, depth + 1, func) child, cookie = self.GetNextChild(node, cookie) func(startNode, 0) TraverseAux(startNode, 1, func) def ItemIsChildOf(self, item1, item2): ''' Tests if item1 is a child of item2, using the Traverse function ''' self.result = False def test_func(node, depth): if node == item1: self.result = True self.Traverse(test_func, item2) return self.result ############################################################################ # # VsFrame # class VsFrame(wx.Frame): def __init__(self, parent, id=wx.ID_ANY, title="", pos=wx.DefaultPosition, size=wx.DefaultSize, style=wx.DEFAULT_FRAME_STYLE | wx.SUNKEN_BORDER): wx.Frame.__init__(self, parent, id, title, pos, size, style) self.Bind(wx.EVT_CLOSE, self.OnCloseWindow) self.db = VsData(program_dbpath) self.tree = None self.editor_list = [] # [id, ctrl, modified] self.passwd_map = {} # id:passwd self._mgr = aui.AuiManager() # tell AuiManager to manage this frame self._mgr.SetManagedWindow(self) # set frame icon icon = wx.EmptyIcon() icon.LoadFile(program_main_icon, wx.BITMAP_TYPE_ICO) self.SetIcon(icon) # set up default notebook style self._notebook_style = aui.AUI_NB_DEFAULT_STYLE | aui.AUI_NB_TAB_EXTERNAL_MOVE | wx.NO_BORDER self._notebook_theme = 0 # 状态栏 self.SetStatusBar(VsStatusBar(self)) self.CreateMenuBar() self.BuildPanes() # 查找功能 self.finddlg = None self.finddata = wx.FindReplaceData() self.finddata.SetFlags(wx.FR_DOWN) self.Bind(wx.EVT_FIND, self.OnFind) self.Bind(wx.EVT_FIND_NEXT, self.OnFind) self.Bind(wx.EVT_FIND_CLOSE, self.OnFindClose) def CreateMenuBar(self): """创建菜单""" mb = wx.MenuBar() def DoBindMenuHandler(item, handler, updateUI=None): self.Bind(wx.EVT_MENU, handler, item) if updateUI is not None: self.Bind(wx.EVT_UPDATE_UI, updateUI, item) file_menu = wx.Menu() DoBindMenuHandler(file_menu.Append(ID_Menu_CreateHtml, u"新建笔记"), self.OnCreateHtml, self.OnMenuUpdateUI) DoBindMenuHandler(file_menu.Append(ID_Menu_CreateDir, u"新建目录"), self.OnCreateDir, self.OnMenuUpdateUI) file_menu.AppendSeparator() DoBindMenuHandler(file_menu.Append(ID_Menu_Save, u"保存(&S)\tCtrl-S"), self.OnSave, self.OnMenuUpdateUI) DoBindMenuHandler(file_menu.Append(ID_Menu_SaveAs, u"另存为(&A)"), self.OnSaveAs, self.OnMenuUpdateUI) file_menu.AppendSeparator() self.Bind(wx.EVT_MENU, self.OnExit, file_menu.Append(ID_Menu_Exit, u"退出(&X)")) ope_menu = wx.Menu() DoBindMenuHandler(ope_menu.AppendCheckItem(ID_Menu_ToogleDirectory, u"显示目录树(&D)\tCtrl-D"), self.OnToogleDirTree, self.OnMenuUpdateUI) DoBindMenuHandler(ope_menu.AppendCheckItem(ID_Menu_ToogleToolBar, u"显示工具栏(&T)\tCtrl-T"), self.OnToogleToolBar, self.OnMenuUpdateUI) ope_menu.AppendSeparator() DoBindMenuHandler(ope_menu.Append(ID_Menu_FindItem, u"查找(&F)\tCtrl-F"), self.OnFindItem, self.OnMenuUpdateUI) DoBindMenuHandler(ope_menu.Append(ID_Menu_FindNextItem, u"查找下一个(&N)\tF3"), self.OnFindNextItem, self.OnMenuUpdateUI) help_menu = wx.Menu() self.Bind(wx.EVT_MENU, self.OnAbout, help_menu.Append(ID_Menu_About, u"关于(&A)...")) mb.Append(file_menu, u"文件(&F)") mb.Append(ope_menu, u"操作(&O)") mb.Append(help_menu, u"帮助(&H)") self.SetMenuBar(mb) def CreateToolBar(self): def DoBind(item, handler, updateUI=None): self.Bind(wx.EVT_TOOL, handler, item) if updateUI is not None: self.Bind(wx.EVT_UPDATE_UI, updateUI, item) tb = aui.AuiToolBar(self, -1, wx.DefaultPosition, wx.DefaultSize, aui.AUI_TB_DEFAULT_STYLE | aui.AUI_TB_OVERFLOW) tb.SetToolBitmapSize(wx.Size(16, 16)) DoBind(tb.AddToggleTool(wx.ID_CUT, images._rt_cut.GetBitmap(), wx.NullBitmap, False, None, "Cut"), self.ForwardEvent, self.ForwardEvent) DoBind(tb.AddToggleTool(wx.ID_COPY, images._rt_copy.GetBitmap(), wx.NullBitmap, False, None, "Copy"), self.ForwardEvent, self.ForwardEvent) DoBind(tb.AddToggleTool(wx.ID_PASTE, images._rt_paste.GetBitmap(), wx.NullBitmap, False, None, "Paste"), self.ForwardEvent, self.ForwardEvent) tb.AddSeparator() DoBind(tb.AddToggleTool(wx.ID_UNDO, images._rt_undo.GetBitmap(), wx.NullBitmap, False, None, "Undo"), self.ForwardEvent, self.ForwardEvent) DoBind(tb.AddToggleTool(wx.ID_REDO, images._rt_redo.GetBitmap(), wx.NullBitmap, False, None, "Redo"), self.ForwardEvent, self.ForwardEvent) tb.AddSeparator() DoBind(tb.AddToggleTool(ID_ToolBar_Bold, images._rt_bold.GetBitmap(), wx.NullBitmap, True, None, "Bold"), self.OnBold, self.OnToolBarUpdateUI) DoBind(tb.AddToggleTool(ID_ToolBar_Italic, images._rt_italic.GetBitmap(), wx.NullBitmap, True, None, "Italic"), self.OnItalics, self.OnToolBarUpdateUI) DoBind(tb.AddToggleTool(ID_ToolBar_Underline, images._rt_underline.GetBitmap(), wx.NullBitmap, True, None, "Underline"), self.OnUnderline, self.OnToolBarUpdateUI) tb.AddSeparator() DoBind(tb.AddToggleTool(ID_ToolBar_AlignLeft, images._rt_alignleft.GetBitmap(), wx.NullBitmap, True, None, "Align left"), self.OnAlignLeft, self.OnToolBarUpdateUI) DoBind(tb.AddToggleTool(ID_ToolBar_Center, images._rt_centre.GetBitmap(), wx.NullBitmap, True, None, "Center"), self.OnAlignCenter, self.OnToolBarUpdateUI) DoBind(tb.AddToggleTool(ID_ToolBar_AlignRight, images._rt_alignright.GetBitmap(), wx.NullBitmap, True, None, "Align right"), self.OnAlignRight, self.OnToolBarUpdateUI) tb.AddSeparator() DoBind(tb.AddToggleTool(ID_ToolBar_IndentLess, images._rt_indentless.GetBitmap(), wx.NullBitmap, False, None, "Indent Less"), self.OnIndentLess, self.OnToolBarUpdateUI) DoBind(tb.AddToggleTool(ID_ToolBar_IndentMore, images._rt_indentmore.GetBitmap(), wx.NullBitmap, False, None, "Indent More"), self.OnIndentMore, self.OnToolBarUpdateUI) tb.AddSeparator() DoBind(tb.AddToggleTool(ID_ToolBar_Font, images._rt_font.GetBitmap(), wx.NullBitmap, False, None, "Font"), self.OnFont, self.OnToolBarUpdateUI) DoBind(tb.AddToggleTool(ID_ToolBar_FontColor, images._rt_colour.GetBitmap(), wx.NullBitmap, False, None, "Font Color"), self.OnColour, self.OnToolBarUpdateUI) tb.AddSeparator() DoBind(tb.AddToggleTool(ID_ToolBar_InsertPic, images.images.GetBitmap(), wx.NullBitmap, False, None, "Insert Picture"), self.OnInsertPicture, self.OnToolBarUpdateUI) tb.Realize() self.toolbar_updateui_funcs = { ID_ToolBar_Bold: self.OnUpdateBold, ID_ToolBar_Italic: self.OnUpdateItalic, ID_ToolBar_Underline: self.OnUpdateUnderline, ID_ToolBar_AlignLeft: self.OnUpdateAlignLeft, ID_ToolBar_Center: self.OnUpdateAlignCenter, ID_ToolBar_AlignRight: self.OnUpdateAlignRight, ID_ToolBar_IndentLess: None, ID_ToolBar_IndentMore: None, ID_ToolBar_Font: None, ID_ToolBar_FontColor: None, ID_ToolBar_InsertPic: None, } return tb def BuildPanes(self): # min size for the frame itself isn't completely done. # see the end up AuiManager.Update() for the test # code. For now, just hard code a frame minimum size self.SetMinSize(wx.Size(400, 300)) self._mgr.AddPane(self.CreateTreeCtrl(), aui.AuiPaneInfo().Name("VsFrame_Dir_Tree").Caption(u"目录树"). Left().Layer(1).Position(1).CloseButton(True).MaximizeButton(False). MinimizeButton(False)) self._mgr.AddPane(self.CreateNotebook(), aui.AuiPaneInfo().Name("VsFrame_Notebook"). CenterPane().PaneBorder(False)) self._mgr.AddPane(self.CreateToolBar(), aui.AuiPaneInfo().Name("VsFrame_Html_Edit_Toolbar").Caption("Toobar").ToolbarPane().Top()) # make some default perspectives # perspective_all = self._mgr.SavePerspective() all_panes = self._mgr.GetAllPanes() for pane in all_panes: if not pane.IsToolbar(): pane.Hide() self._mgr.GetPane("VsFrame_Dir_Tree").Show().Left().Layer(0).Row(0).Position(0) self._mgr.GetPane("VsFrame_Notebook").Show() perspective_default = self._mgr.SavePerspective() self._nb_perspectives = [] auibook = self._mgr.GetPane("VsFrame_Notebook").window nb_perspective_default = auibook.SavePerspective() self._nb_perspectives.append(nb_perspective_default) self._mgr.LoadPerspective(perspective_default) # "commit" all changes made to AuiManager self._mgr.Update() def IsModified(self, index): """检查指定编辑控件是否已经有修改而未保存""" assert fall_into(index, 0, len(self.editor_list)) return self.editor_list[index][2] def SetModified(self, index, modified=True): """标记为已经修改""" self.editor_list[index][2] = modified def GetToolBarPanelInfo(self): return self._mgr.GetPane("VsFrame_Html_Edit_Toolbar") def GetNotebook(self): notebook = self._mgr.GetPane("VsFrame_Notebook").window assert notebook is not None return notebook def GetDirTreePanelInfo(self): return self._mgr.GetPane("VsFrame_Dir_Tree") def GetDirTree(self): tree = self.GetDirTreePanelInfo().window assert tree is not None return tree def GetDirTreeImageIndexByType(self, t): if t == VsData_Type_Root: return 0 elif t == VsData_Type_Dir: return 0 elif t == VsData_Type_Html: return 1 else: assert False def GetView(self, index=None): parent = self.GetNotebook() if index is None: index = parent.GetSelection() if index < 0: return parent, None, None assert fall_into(index, 0, len(self.editor_list)) return parent, index, self.editor_list[index][1] def GetCurrentView(self): """获取当前窗口视图""" return self.GetView() def UpdateViewTitle(self, index=None): parent, index, ctrl = self.GetView(index) id = self.editor_list[index][0] str = self.db.GetTitle(id) if self.IsModified(index): str = "* " + str parent.SetPageText(index, str) def SaveDirTree(self, tree): self.save_dir_tree = [] tree.Traverse(lambda node, path: \ self.save_dir_tree.append((tree.GetItemPyData(node), path)), tree.GetRootItem()) self.db.SetRoot(self.save_dir_tree) def DoSave(self, id, body, encrypt=False): # 原始内容 -->(加密)--> 保存 # 加密内容 -->(解密)--> 保存 if encrypt or self.db.HasXtea(id): assert self.passwd_map.has_key(id) kk = hashlib.md5(self.passwd_map[id]).digest() cc = xtea.crypt(kk, body) body = cc self.db.SetBody(id, body) def OnSave(self, event): parent, index, ctrl = self.GetCurrentView() if index is None: return # 如果没有改动,则直接返回 if not self.IsModified(index): return # 恢复标题 self.SetModified(index, False) id = self.editor_list[index][0] self.UpdateViewTitle() # 保存内容 s = StringIO.StringIO() handler = wx.richtext.RichTextXMLHandler() handler.SaveStream(ctrl.GetBuffer(), s) self.DoSave(id, s.getvalue()) def OnSaveAs(self, event): parent, index, ctrl = self.GetCurrentView() assert ctrl is not None # 默认的文件名 default_title = parent.GetPageText(index) # Display a File Save Dialog for RTF files dlg = wx.FileDialog(self, "Choose a filename", wildcard=u'Rich Text Format files (*.rtf)|*.rtf', defaultFile=default_title, style=wx.FD_SAVE | wx.FD_OVERWRITE_PROMPT) if dlg.ShowModal() != wx.ID_OK: return # assign it to path path = dlg.GetPath() dlg.Destroy() # Use the custom RTF Handler to save the file handler = PyRTFParser.PyRichTextRTFHandler() handler.SaveFile(ctrl.GetBuffer(), path) def OnToogleDirTree(self, event): panel = self.GetDirTreePanelInfo() panel.Show(not panel.IsShown()) self._mgr.Update() def OnToogleToolBar(self, event): panel = self.GetToolBarPanelInfo() panel.Show(not panel.IsShown()) self._mgr.Update() def OnFind(self, event): parent, index, ctrl = self.GetCurrentView() assert ctrl is not None end = ctrl.GetLastPosition() textstring = ctrl.GetRange(0, end).lower() findstring = self.finddata.GetFindString().lower() backward = not (self.finddata.GetFlags() & wx.FR_DOWN) if backward: start = ctrl.GetSelection()[0] loc = textstring.rfind(findstring, 0, start) else: start = ctrl.GetSelection()[1] loc = textstring.find(findstring, start) if loc == -1 and start != 0: # string not found, start at beginning if backward: start = end loc = textstring.rfind(findstring, 0, start) else: start = 0 loc = textstring.find(findstring, start) if loc == -1: wx.MessageBox(u"搜索字符串未找到!", program_name, wx.OK | wx.ICON_EXCLAMATION) if self.finddlg: if loc == -1: self.finddlg.SetFocus() return else: self.finddlg.Destroy() self.finddlg = None ctrl.ShowPosition(loc) ctrl.SetSelection(loc, loc + len(findstring)) def OnFindClose(self, event): event.GetDialog().Destroy() self.finddlg = None def OnFindItem(self, event): if self.finddlg is not None: return parent, index, ctrl = self.GetCurrentView() assert ctrl is not None self.finddlg = wx.FindReplaceDialog(self, self.finddata, "Find") self.finddlg.Show(True) def OnFindNextItem(self, event): if self.finddata.GetFindString(): self.OnFind(event) else: self.OnFindItem(event) def OnMenuUpdateUI(self, event): evId = event.GetId() if evId == ID_Menu_ToogleDirectory: event.Check(self.GetDirTreePanelInfo().IsShown()) elif evId == ID_Menu_ToogleToolBar: event.Check(self.GetToolBarPanelInfo().IsShown()) elif evId in (ID_Menu_Save, ID_Menu_SaveAs, ID_Menu_FindItem, ID_Menu_FindNextItem): parent, index, ctrl = self.GetCurrentView() exist = ctrl is not None event.Enable(exist) if evId == ID_Menu_Save and exist: event.Enable(self.IsModified(index)) elif evId in (ID_Menu_CreateHtml, ID_Menu_CreateDir): # 目录树隐藏时,禁用菜单里的新建功能 event.Enable(self.GetDirTreePanelInfo().IsShown()) def OnCopy(self, event): parent, index, ctrl = self.GetCurrentView() if ctrl is not None: ctrl.Copy() wx.TheClipboard.Flush() def OnCut(self, event): parent, index, ctrl = self.GetCurrentView() if ctrl is not None: ctrl.Cut() wx.TheClipboard.Flush() def OnRichtextContentChanged(self, event): parent, index, ctrl = self.GetCurrentView() assert index is not None assert event.GetEventObject() is ctrl if not self.IsModified(index): self.SetModified(index, True) self.UpdateViewTitle() def OnTreeItemActivated(self, event): id = self.tree.GetItemPyData(event.GetItem()) parent = self.GetNotebook() passwd = "" # 如果内容不可编辑,则直接返回 if not self.db.IsEditable(id): return # 如果已经打开,则将其选中,并返回 for i in range(len(self.editor_list)): if id == self.editor_list[i][0]: parent.SetSelection(i) return # 要求输入密码 encrypted = self.db.HasXtea(id) if encrypted: passwd = wx.GetPasswordFromUser(message=u"请输入密码:", caption=u"打开加密文档", default_value="", parent=None) if not self.db.CheckXtea(id, passwd): if len(passwd) != 0: wx.MessageBox(u"密码不正确!", program_name, wx.OK | wx.ICON_ERROR) return self.passwd_map[id] = passwd # 创建新的编辑页 ctrl = wx.richtext.RichTextCtrl(parent, style=wx.VSCROLL | wx.HSCROLL | wx.NO_BORDER) ctrl.Bind(wx.richtext.EVT_RICHTEXT_CONTENT_INSERTED, self.OnRichtextContentChanged) ctrl.Bind(wx.richtext.EVT_RICHTEXT_CONTENT_DELETED, self.OnRichtextContentChanged) ctrl.Bind(wx.richtext.EVT_RICHTEXT_STYLE_CHANGED, self.OnRichtextContentChanged) ctrl.Bind(wx.EVT_MENU, self.OnCopy, id=wx.ID_COPY) ctrl.Bind(wx.EVT_MENU, self.OnCut, id=wx.ID_CUT) # 设置默认字体 ctrl.SetFont(GetDefaultFont()) # 解析正文内容 body = self.db.GetBody(id) if encrypted: kk = hashlib.md5(passwd).digest() cc = xtea.crypt(kk, body) body = cc if len(body) != 0: tmpfile = VsTempFile() tmpfile.AppendString(body) ctrl.Freeze() ctrl.BeginSuppressUndo() handler = wx.richtext.RichTextXMLHandler() # Load the XML file via the XML Handler. # Note that for XML, the BUFFER is passed. handler.LoadFile(ctrl.GetBuffer(), tmpfile.filename) # Signal the end of changing the control ctrl.EndSuppressUndo() ctrl.Thaw() # 更新到内存记录里去 self.editor_list.append([id, ctrl, False]) parent.AddPage(ctrl, self.db.GetTitle(id), select=True) def OnTreeEndLabelEdit_After(self, item, old_text): """更新 title,如果已经打开,则同步更新""" item_text = self.tree.GetItemText(item) s = item_text.strip() # 更新目录树里的显示 if s != item_text: self.tree.SetItemText(item, s) # 如果没有变化,则直接返回 if old_text == s: return # 更新数据 id = self.tree.GetItemPyData(item) self.db.SetTitle(id, s) # 更新打开文件标题 for i in range(len(self.editor_list)): if id == self.editor_list[i][0]: self.UpdateViewTitle(i) break def OnTreeEndLabelEdit(self, event): item = event.GetItem() wx.CallAfter(self.OnTreeEndLabelEdit_After, item, self.tree.GetItemText(item)) def OnTreeBeginDrag(self, event): tree = event.GetEventObject() self.drag_source = event.GetItem() if self.drag_source != tree.GetRootItem(): event.Allow() else: event.Veto() def OnTreeEndDrag(self, event): drop_target = event.GetItem() if not drop_target.IsOk(): return tree = event.GetEventObject() source_id = tree.GetItemPyData(self.drag_source) # 不允许目标项是源项的子项 if tree.ItemIsChildOf(drop_target, self.drag_source): tree.Unselect() return # One of the following methods of inserting will be called... def MoveNodes(parent, target): # 删除源项及子项 tree.Delete(self.drag_source) # 将源项添加到目标位置 imgidx = self.GetDirTreeImageIndexByType(self.db.GetType(source_id)) title = self.db.GetTitle(source_id) if target is not None: new_item = tree.InsertItem(parent, target, title, imgidx) else: new_item = tree.InsertItemBefore(parent, 0, title, imgidx) tree.SetItemPyData(new_item, source_id) # 添加子项 dummy, t = self.db.GetTree(self.db.GetRoot(), source_id) self.Tree_AddNode(t, new_item) # 设置树结点属性 tree.ExpandAllChildren(new_item) tree.SelectItem(new_item) # 保存目录树 self.SaveDirTree(tree) def InsertAsSibling(event): MoveNodes(tree.GetItemParent(drop_target), drop_target) def InsertAsChild(event): MoveNodes(drop_target, None) # 如果不是根项,则询问是作为目标项的兄弟项还是子项 if drop_target == tree.GetRootItem(): InsertAsChild(None) else: menu = wx.Menu() menu.Append(ID_Ctx_InsertAsSibling, u"与目标项平级", "") menu.Append(ID_Ctx_InsertAsChild, u"作为目标项的子项", "") menu.UpdateUI() menu.Bind(wx.EVT_MENU, InsertAsSibling, id=ID_Ctx_InsertAsSibling) menu.Bind(wx.EVT_MENU, InsertAsChild, id=ID_Ctx_InsertAsChild) self.PopupMenu(menu) def OnBold(self, event): parent, index, ctrl = self.GetCurrentView() if ctrl is not None: ctrl.ApplyBoldToSelection() def OnItalics(self, event): parent, index, ctrl = self.GetCurrentView() if ctrl is not None: ctrl.ApplyItalicToSelection() def OnAlignLeft(self, event): parent, index, ctrl = self.GetCurrentView() if ctrl is not None: ctrl.ApplyAlignmentToSelection(wx.richtext.TEXT_ALIGNMENT_LEFT) def OnAlignCenter(self, event): parent, index, ctrl = self.GetCurrentView() if ctrl is not None: ctrl.ApplyAlignmentToSelection(wx.richtext.TEXT_ALIGNMENT_CENTRE) def OnAlignRight(self, event): parent, index, ctrl = self.GetCurrentView() if ctrl is not None: ctrl.ApplyAlignmentToSelection(wx.richtext.TEXT_ALIGNMENT_RIGHT) def OnIndentLess(self, event): parent, index, ctrl = self.GetCurrentView() if ctrl is None: return attr = wx.richtext.TextAttrEx() attr.SetFlags(wx.richtext.TEXT_ATTR_LEFT_INDENT) ip = ctrl.GetInsertionPoint() if ctrl.GetStyle(ip, attr): r = wx.richtext.RichTextRange(ip, ip) if ctrl.HasSelection(): r = ctrl.GetSelectionRange() if attr.GetLeftIndent() >= 100: attr.SetLeftIndent(attr.GetLeftIndent() - 100) attr.SetFlags(wx.richtext.TEXT_ATTR_LEFT_INDENT) ctrl.SetStyle(r, attr) def OnIndentMore(self, event): parent, index, ctrl = self.GetCurrentView() if ctrl is None: return attr = wx.richtext.TextAttrEx() attr.SetFlags(wx.richtext.TEXT_ATTR_LEFT_INDENT) ip = ctrl.GetInsertionPoint() if ctrl.GetStyle(ip, attr): r = wx.richtext.RichTextRange(ip, ip) if ctrl.HasSelection(): r = ctrl.GetSelectionRange() attr.SetLeftIndent(attr.GetLeftIndent() + 100) attr.SetFlags(wx.richtext.TEXT_ATTR_LEFT_INDENT) ctrl.SetStyle(r, attr) def OnUnderline(self, event): parent, index, ctrl = self.GetCurrentView() if ctrl is not None: ctrl.ApplyUnderlineToSelection() def OnFont(self, event): parent, index, ctrl = self.GetCurrentView() if ctrl is None: return if not ctrl.HasSelection(): return r = ctrl.GetSelectionRange() fontData = wx.FontData() fontData.EnableEffects(False) attr = wx.richtext.TextAttrEx() attr.SetFlags(wx.richtext.TEXT_ATTR_FONT) if ctrl.GetStyle(ctrl.GetInsertionPoint(), attr): fontData.SetInitialFont(attr.GetFont()) dlg = wx.FontDialog(ctrl, fontData) if dlg.ShowModal() == wx.ID_OK: fontData = dlg.GetFontData() font = fontData.GetChosenFont() if font: attr.SetFlags(wx.richtext.TEXT_ATTR_FONT) attr.SetFont(font) ctrl.SetStyle(r, attr) dlg.Destroy() def OnColour(self, event): parent, index, ctrl = self.GetCurrentView() if ctrl is None: return if not ctrl.HasSelection(): return colourData = wx.ColourData() attr = wx.richtext.TextAttrEx() attr.SetFlags(wx.richtext.TEXT_ATTR_TEXT_COLOUR) if ctrl.GetStyle(ctrl.GetInsertionPoint(), attr): colourData.SetColour(attr.GetTextColour()) dlg = wx.ColourDialog(self, colourData) if dlg.ShowModal() == wx.ID_OK: colourData = dlg.GetColourData() colour = colourData.GetColour() if colour: if not ctrl.HasSelection(): ctrl.BeginTextColour(colour) else: r = ctrl.GetSelectionRange() attr.SetFlags(wx.richtext.TEXT_ATTR_TEXT_COLOUR) attr.SetTextColour(colour) ctrl.SetStyle(r, attr) dlg.Destroy() def OnInsertPicture(self, event): parent, index, ctrl = self.GetCurrentView() assert ctrl is not None # 选择图片 dlg = wx.FileDialog(self, "Choose a file", defaultFile="", wildcard="All files (*.*)|*.*", style=wx.OPEN | wx.CHANGE_DIR) if dlg.ShowModal() != wx.ID_OK: return # 加载图片,如果图片无效,则返回 image = wx.Image(dlg.GetPath()) if not image.IsOk(): return # 插入图片 ctrl.WriteImage(image) def ForwardEvent(self, event): parent, index, ctrl = self.GetCurrentView() if ctrl is not None: ctrl.ProcessEvent(event) def OnToolBarUpdateUI(self, event): parent, index, ctrl = self.GetCurrentView() if ctrl is not None: event.Enable(True) id = event.GetId() if id in self.toolbar_updateui_funcs: f = self.toolbar_updateui_funcs[id] if f is not None: f(event) else: event.Enable(False) def OnUpdateBold(self, event): parent, index, ctrl = self.GetCurrentView() if ctrl is not None: event.Check(ctrl.IsSelectionBold()) def OnUpdateItalic(self, event): parent, index, ctrl = self.GetCurrentView() if ctrl is not None: event.Check(ctrl.IsSelectionItalics()) def OnUpdateUnderline(self, event): parent, index, ctrl = self.GetCurrentView() if ctrl is not None: event.Check(ctrl.IsSelectionUnderlined()) def OnUpdateAlignLeft(self, event): parent, index, ctrl = self.GetCurrentView() if ctrl is not None: event.Check(ctrl.IsSelectionAligned(wx.richtext.TEXT_ALIGNMENT_LEFT)) def OnUpdateAlignCenter(self, event): parent, index, ctrl = self.GetCurrentView() if ctrl is not None: event.Check(ctrl.IsSelectionAligned(wx.richtext.TEXT_ALIGNMENT_CENTRE)) def OnUpdateAlignRight(self, event): parent, index, ctrl = self.GetCurrentView() if ctrl is not None: event.Check(ctrl.IsSelectionAligned(wx.richtext.TEXT_ALIGNMENT_RIGHT)) def OnRightDown(self, event): tree = self.GetDirTree() pt = event.GetPosition() item, flags = tree.HitTest(pt) if item: tree.SelectItem(item) def OnRightUp(self, event): tree = self.GetDirTree() menu = wx.Menu() self.Bind(wx.EVT_MENU, self.OnCreateHtml, menu.Append(ID_Menu_CreateHtml, u"新建笔记")) self.Bind(wx.EVT_MENU, self.OnCreateDir, menu.Append(ID_Menu_CreateDir, u"新建目录")) menu.AppendSeparator() self.Bind(wx.EVT_MENU, self.OnRenameEntry, menu.Append(ID_Menu_RenameEntry, u"重命名")) self.Bind(wx.EVT_MENU, self.OnDeleteEntry, menu.Append(ID_Menu_DeleteEntry, u"删除")) # 如果当前选择了根结点,则禁用 ID_Menu_DeleteEntry # 如果有子结点,也禁用 # cursel = tree.GetSelection() if cursel == tree.GetRootItem(): menu.Enable(ID_Menu_DeleteEntry, False) if tree.ItemHasChildren(cursel): menu.Enable(ID_Menu_DeleteEntry, False) # 加密/解密 menu.AppendSeparator() self.Bind(wx.EVT_MENU, self.OnEncrypt, menu.Append(ID_Menu_Encrypt, u"加密")) id = tree.GetItemPyData(cursel) if VsData_Type_Html == self.db.GetType(id): # 如果已经加密 if self.db.HasXtea(id): menu.SetLabel(ID_Menu_Encrypt, u"清除密码") # 在修改状态下禁止操作 for i in range(len(self.editor_list)): if id == self.editor_list[i][0]: if self.IsModified(i): menu.Enable(ID_Menu_Encrypt, False) break else: menu.Enable(ID_Menu_Encrypt, False) self.PopupMenu(menu) menu.Destroy() event.Skip() def OnCreateEntry(self, event, type): tree = self.GetDirTree() parent_item = tree.GetSelection() parent_id = tree.GetItemPyData(parent_item) name = "new item" if VsData_Type_Dir == type: image_index = 0 else: image_index = 1 child_id = self.db.Add(name, "", parent_id, type) child_item = tree.AppendItem(parent_item, name, image_index) tree.SetItemPyData(child_item, child_id) tree.SelectItem(child_item) tree.EditLabel(child_item) def OnCreateHtml(self, event): self.OnCreateEntry(event, VsData_Type_Html) def OnCreateDir(self, event): self.OnCreateEntry(event, VsData_Type_Dir) def OnRenameEntry(self, event): tree = self.GetDirTree() item = tree.GetSelection() tree.EditLabel(item) def OnDeleteEntry(self, event): """删除一个结点""" tree = self.GetDirTree() item = tree.GetSelection() id = tree.GetItemPyData(item) # 确认删除 ret = wx.MessageBox(u'确实要删除吗?', u'确认删除', wx.YES_NO | wx.ICON_QUESTION) if wx.YES != ret: return # 从数据库里删除 self.db.Delete(id) # 清空密码 if self.passwd_map.has_key(id): del self.passwd_map[id] # 如果已经打开,则关闭 for i in range(len(self.editor_list)): if id == self.editor_list[i][0]: del self.editor_list[i] self.GetNotebook().DeletePage(i) break # 从目录树里删除 tree.Delete(item) def OnEncrypt(self, event): tree = self.GetDirTree() cursel = tree.GetSelection() id = tree.GetItemPyData(cursel) assert VsData_Type_Html == self.db.GetType(id) if not self.db.HasXtea(id): # 加密 # 用户输入密码 p1 = wx.GetPasswordFromUser(message=u"请输入新密码:", caption=u"加密", default_value="", parent=None) p2 = wx.GetPasswordFromUser(message=u"请再次输入新密码:", caption=u"加密", default_value="", parent=None) if p1 != p2: wx.MessageBox(u"输入密码不一致!", program_name, wx.OK | wx.ICON_ERROR) return elif len(p1) == 0: wx.MessageBox(u"密码不允许为空!", program_name, wx.OK | wx.ICON_ERROR) return # 记录明文密码 assert not self.passwd_map.has_key(id) self.passwd_map[id] = p1 # 提交密码散列值、数据 self.db.SetXtea(id, p1) self.DoSave(id, self.db.GetBody(id), encrypt=True) else: # 解密 # 需要输入旧密码 p1 = wx.GetPasswordFromUser(message=u"请输入密码:", caption=u"解密", default_value="", parent=None) if not self.db.CheckXtea(id, p1): wx.MessageBox(u"密码不正确!", program_name, wx.OK | wx.ICON_ERROR) return self.passwd_map[id] = p1 self.DoSave(id, self.db.GetBody(id), encrypt=True) self.db.ClearXtea(id) del self.passwd_map[id] def UserQuitConfirm(self): ret = wx.MessageBox(u"内容已经修改但没有保存,确认要继续吗?", u'确认关闭', wx.YES_NO | wx.ICON_QUESTION) return ret def OnNotebookPageClose(self, event): index = event.GetSelection() assert fall_into(index, 0, len(self.editor_list)) # 提示当前内容已经修改但还没有保存 if self.IsModified(index): if wx.YES != self.UserQuitConfirm(): event.Veto() return # 确认关闭,清除相应数据结构 del self.editor_list[index] def OnExit(self, event): self.Close(False) def OnCloseWindow(self, event): # 查看是否有已经修复但还没有保存的内容 modified = False for i in range(len(self.editor_list)): if self.IsModified(i): modified = True break # 用户确认 if modified: if wx.YES != self.UserQuitConfirm(): event.Veto() return # 退出 self.Destroy() def OnAbout(self, event): info = wx.AboutDialogInfo() info.Name = program_name info.Version = program_version info.Copyright = "(C) 2010-2011 sherking@gmail.com" info.Description = wx.lib.wordwrap.wordwrap( program_name + " is a simple richtext notepad.\n\nTHIS SOFTWARE COMES WITH ABSOLUTELY NO WARRANTY! USE AT YOUR OWN RISK!", 430, wx.ClientDC(self)) info.WebSite = ("http://code.google.com/p/gumpad2") info.Developers = ["sherking@gmail.com"] info.License = wx.lib.wordwrap.wordwrap("The MIT License", 500, wx.ClientDC(self)) # Then we call wx.AboutBox giving it that info object wx.AboutBox(info) def Tree_AddNode(self, db_node, node): for i in range(len(db_node["subs"])): child_id = db_node["subs"][i]["id"] imgidx = self.GetDirTreeImageIndexByType(self.db.GetType(child_id)) n = self.tree.AppendItem(node, self.db.GetTitle(child_id), imgidx) self.tree.SetItemPyData(n, child_id) self.Tree_AddNode(db_node["subs"][i], n) def CreateTreeCtrl(self): self.tree = VsTreeCtrl(self, -1, wx.Point(0, 0), wx.Size(200, 250), wx.TR_DEFAULT_STYLE | wx.NO_BORDER | wx.TR_EDIT_LABELS | wx.TR_NO_BUTTONS) imglist = wx.ImageList(16, 16, True, 2) imglist.Add(wx.ArtProvider.GetBitmap(wx.ART_FOLDER, wx.ART_OTHER, wx.Size(16, 16))) imglist.Add(wx.ArtProvider.GetBitmap(wx.ART_NORMAL_FILE, wx.ART_OTHER, wx.Size(16, 16))) self.tree.AssignImageList(imglist) db_root = self.db.GetRoot() root = self.tree.AddRoot(self.db.GetTitle(), 0) self.tree.SetItemPyData(root, db_root["id"]) self.Tree_AddNode(db_root, root) self.tree.ExpandAllChildren(root) self.tree.SelectItem(root) self.tree.Bind(wx.EVT_RIGHT_DOWN, self.OnRightDown) self.tree.Bind(wx.EVT_RIGHT_UP, self.OnRightUp) self.tree.Bind(wx.EVT_TREE_ITEM_ACTIVATED, self.OnTreeItemActivated) self.tree.Bind(wx.EVT_TREE_END_LABEL_EDIT, self.OnTreeEndLabelEdit) self.tree.Bind(wx.EVT_TREE_BEGIN_DRAG, self.OnTreeBeginDrag) self.tree.Bind(wx.EVT_TREE_END_DRAG, self.OnTreeEndDrag) return self.tree def CreateNotebook(self): client_size = self.GetClientSize() ctrl = aui.AuiNotebook(self, -1, wx.Point(client_size.x, client_size.y), wx.Size(430, 200), self._notebook_style) ctrl.SetArtProvider(aui.AuiDefaultTabArt()) ctrl.Bind(aui.EVT_AUINOTEBOOK_PAGE_CLOSE, self.OnNotebookPageClose) return ctrl class MyApp(wx.App): def __init__(self): wx.App.__init__(self, 0) def OnInit(self): self.frame = VsFrame(None, -1, program_title, size=(800, 600)) self.frame.CenterOnScreen() self.frame.Show() self.Bind(wx.EVT_ACTIVATE_APP, self.OnActivate) return True def OnActivate(self, event): if event.GetActive(): pass def main(): global program_dbpath # 本地化设置 locale.setlocale(locale.LC_ALL, '') # 命令行参数解析 usage = program_name + " [-f <file>] [-h] [-v]" program_dbpath = os.path.join(os.path.expanduser("~"), program_dbpath) parser = optparse.OptionParser(usage) parser.add_option("-v", "--version", action="store_true", dest="version", default=False, help="print the version number of the executable and exit") parser.add_option("-f", "--file", action="store", type="string", dest="file", default=program_dbpath, help="specify the data file") options, args = parser.parse_args(sys.argv[1:]) if options.version: print program_title return if len(args) > 0: parser.print_help() return # 解析用户指定文件是否有效 program_dbpath = os.path.expanduser(options.file) if not os.path.isabs(program_dbpath): program_dbpath = os.path.realpath(os.path.join(os.curdir, program_dbpath)) # 创建多层目录 dirname = os.path.dirname(program_dbpath) if not os.path.exists(dirname): try: os.makedirs(dirname) except: print "Error: " + options.file + " is not a valid filename" return elif not os.path.isdir(dirname): print "Error: " + options.file + " is not a valid filename" return if os.path.exists(program_dbpath): # 如果路径存在、且不是文件,则退出 if not os.path.isfile(program_dbpath): print "Error: " + options.file + " is not a valid filename" return # 如果不是有效的数据库,则退出 try: db = VsData(program_dbpath) assert db.GetMagic() == VsData_Format_Magic if db.GetVersion() > VsData_Format_Version: print "Error: " + options.file + " has version (%d), higher than the executable (%d)" % (db.GetVersion(), VsData_Format_Version) return except: print "Error: " + options.file + " exists but corrupted" return # 启动程序界面 app = MyApp() app.MainLoop() if __name__ == '__main__': main()
mit
-7,152,632,794,733,037,000
32.421268
176
0.58031
false
3.292747
false
false
false
arunkgupta/gramps
gramps/gui/merge/mergefamily.py
1
9809
# # Gramps - a GTK+/GNOME based genealogy program # # Copyright (C) 2010 Michiel D. Nauta # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA # # $Id$ """ Provide merge capabilities for families. """ #------------------------------------------------------------------------- # # Gramps modules # #------------------------------------------------------------------------- from gramps.gen.ggettext import sgettext as _ from gramps.gen.display.name import displayer as name_displayer from gramps.gen.const import URL_MANUAL_PAGE from ..display import display_help from gramps.gen.errors import MergeError from ..dialog import ErrorDialog from ..managedwindow import ManagedWindow from gramps.gen.merge import MergePersonQuery, MergeFamilyQuery #------------------------------------------------------------------------- # # Gramps constants # #------------------------------------------------------------------------- WIKI_HELP_PAGE = '%s_-_Entering_and_Editing_Data:_Detailed_-_part_3' % \ URL_MANUAL_PAGE WIKI_HELP_SEC = _('manual|Merge_Families') _GLADE_FILE = 'mergefamily.glade' #------------------------------------------------------------------------- # # MergeFamily # #------------------------------------------------------------------------- class MergeFamily(ManagedWindow): """ Merges two families into a single family. Displays a dialog box that allows the families to be combined into one. """ def __init__(self, dbstate, uistate, handle1, handle2): ManagedWindow.__init__(self, uistate, [], self.__class__) self.database = dbstate.db self.fy1 = self.database.get_family_from_handle(handle1) self.fy2 = self.database.get_family_from_handle(handle2) self.define_glade('mergefamily', _GLADE_FILE) self.set_window(self._gladeobj.toplevel, self.get_widget("family_title"), _("Merge Families")) # Detailed selection widgets father1 = self.fy1.get_father_handle() father2 = self.fy2.get_father_handle() father1 = self.database.get_person_from_handle(father1) father2 = self.database.get_person_from_handle(father2) father_id1 = father1.get_gramps_id() if father1 else "" father_id2 = father2.get_gramps_id() if father2 else "" father1 = name_displayer.display(father1) if father1 else "" father2 = name_displayer.display(father2) if father2 else "" entry1 = self.get_widget("father1") entry2 = self.get_widget("father2") entry1.set_text("%s [%s]" % (father1, father_id1)) entry2.set_text("%s [%s]" % (father2, father_id2)) deactivate = False if father_id1 == "" and father_id2 == "": deactivate = True elif father_id2 == "": self.get_widget("father_btn1").set_active(True) deactivate = True elif father_id1 == "": self.get_widget("father_btn2").set_active(True) deactivate = True elif entry1.get_text() == entry2.get_text(): deactivate = True if deactivate: for widget_name in ('father1', 'father2', 'father_btn1', 'father_btn2'): self.get_widget(widget_name).set_sensitive(False) mother1 = self.fy1.get_mother_handle() mother2 = self.fy2.get_mother_handle() mother1 = self.database.get_person_from_handle(mother1) mother2 = self.database.get_person_from_handle(mother2) mother_id1 = mother1.get_gramps_id() if mother1 else "" mother_id2 = mother2.get_gramps_id() if mother2 else "" mother1 = name_displayer.display(mother1) if mother1 else "" mother2 = name_displayer.display(mother2) if mother2 else "" entry1 = self.get_widget("mother1") entry2 = self.get_widget("mother2") entry1.set_text("%s [%s]" % (mother1, mother_id1)) entry2.set_text("%s [%s]" % (mother2, mother_id2)) deactivate = False if mother_id1 == "" and mother_id2 == "": deactivate = True elif mother_id1 == "": self.get_widget("mother_btn2").set_active(True) deactivate = True elif mother_id2 == "": self.get_widget("mother_btn1").set_active(True) deactivate = True elif entry1.get_text() == entry2.get_text(): deactivate = True if deactivate: for widget_name in ('mother1', 'mother2', 'mother_btn1', 'mother_btn2'): self.get_widget(widget_name).set_sensitive(False) entry1 = self.get_widget("rel1") entry2 = self.get_widget("rel2") entry1.set_text(str(self.fy1.get_relationship())) entry2.set_text(str(self.fy2.get_relationship())) if entry1.get_text() == entry2.get_text(): for widget_name in ('rel1', 'rel2', 'rel_btn1', 'rel_btn2'): self.get_widget(widget_name).set_sensitive(False) gramps1 = self.fy1.get_gramps_id() gramps2 = self.fy2.get_gramps_id() entry1 = self.get_widget("gramps1") entry2 = self.get_widget("gramps2") entry1.set_text(gramps1) entry2.set_text(gramps2) if entry1.get_text() == entry2.get_text(): for widget_name in ('gramps1', 'gramps2', 'gramps_btn1', 'gramps_btn2'): self.get_widget(widget_name).set_sensitive(False) # Main window widgets that determine which handle survives rbutton1 = self.get_widget("handle_btn1") rbutton_label1 = self.get_widget("label_handle_btn1") rbutton_label2 = self.get_widget("label_handle_btn2") rbutton_label1.set_label("%s and %s [%s]" %(father1, mother1, gramps1)) rbutton_label2.set_label("%s and %s [%s]" %(father2, mother2, gramps2)) rbutton1.connect("toggled", self.on_handle1_toggled) self.connect_button("family_help", self.cb_help) self.connect_button("family_ok", self.cb_merge) self.connect_button("family_cancel", self.close) self.show() def on_handle1_toggled(self, obj): """Preferred family changes""" if obj.get_active(): father1_text = self.get_widget("father1").get_text() if (father1_text != " []" or self.get_widget("father2").get_text() == " []"): self.get_widget("father_btn1").set_active(True) mother1_text = self.get_widget("mother1").get_text() if (mother1_text != " []" or self.get_widget("mother2").get_text() == " []"): self.get_widget("mother_btn1").set_active(True) self.get_widget("rel_btn1").set_active(True) self.get_widget("gramps_btn1").set_active(True) else: father2_text = self.get_widget("father2").get_text() if (father2_text != " []" or self.get_widget("father1").get_text() == " []"): self.get_widget("father_btn2").set_active(True) mother2_text = self.get_widget("mother2").get_text() if (mother2_text != " []" or self.get_widget("mother1").get_text() == " []"): self.get_widget("mother_btn2").set_active(True) self.get_widget("rel_btn2").set_active(True) self.get_widget("gramps_btn2").set_active(True) def cb_help(self, obj): """Display the relevant portion of the Gramps manual""" display_help(webpage = WIKI_HELP_PAGE, section = WIKI_HELP_SEC) def cb_merge(self, obj): """ Perform the merge of the families when the merge button is clicked. """ self.uistate.set_busy_cursor(True) use_handle1 = self.get_widget("handle_btn1").get_active() if use_handle1: phoenix = self.fy1 titanic = self.fy2 else: phoenix = self.fy2 titanic = self.fy1 # Add second handle to history so that when merge is complete, # phoenix is the selected row. self.uistate.viewmanager.active_page.get_history().push( phoenix.get_handle()) phoenix_fh = phoenix.get_father_handle() phoenix_mh = phoenix.get_mother_handle() if self.get_widget("father_btn1").get_active() ^ use_handle1: phoenix_fh = titanic.get_father_handle() if self.get_widget("mother_btn1").get_active() ^ use_handle1: phoenix_mh = titanic.get_mother_handle() if self.get_widget("rel_btn1").get_active() ^ use_handle1: phoenix.set_relationship(titanic.get_relationship()) if self.get_widget("gramps_btn1").get_active() ^ use_handle1: phoenix.set_gramps_id(titanic.get_gramps_id()) try: query = MergeFamilyQuery(self.database, phoenix, titanic, phoenix_fh, phoenix_mh) query.execute() except MergeError, err: ErrorDialog( _("Cannot merge people"), str(err)) self.uistate.set_busy_cursor(False) self.close()
gpl-2.0
-4,250,079,644,733,064,000
42.402655
79
0.574269
false
3.489506
false
false
false
tensorflow/datasets
tensorflow_datasets/scripts/documentation/generate_visualization.py
1
2247
# coding=utf-8 # Copyright 2021 The TensorFlow Datasets Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. r"""Script which generates datasets figures. """ import functools import os import tempfile from absl import flags import matplotlib import matplotlib.pyplot as plt import tensorflow as tf import tensorflow_datasets as tfds from tensorflow_datasets.scripts.documentation import script_utils FLAGS = flags.FLAGS flags.DEFINE_string( 'datasets', None, 'Comma separated list of datasets to generates. None for all datasets.') flags.DEFINE_string('dst_dir', None, 'Destination dir to save the images.') flags.DEFINE_boolean('overwrite', False, 'If True, overwrite the existing visualizations.') def _save_fig(dst_path: str, figure: matplotlib.figure.Figure) -> None: """Save the generated figures for the dataset in dst_dir.""" # `savefig` do not support GCS, so first save the image locally. with tempfile.TemporaryDirectory() as tmp_dir: tmp_path = os.path.join(tmp_dir, 'tmp.png') figure.savefig(tmp_path) tf.io.gfile.copy(tmp_path, dst_path, overwrite=FLAGS.overwrite) plt.close(figure) def main(_): """Main script.""" datasets = FLAGS.datasets.split(',') if FLAGS.datasets else None generate_and_save_figure_fn = functools.partial( script_utils.generate_and_save_artifact, dst_dir=FLAGS.dst_dir or tfds.core.gcs_path('visualization/fig'), overwrite=FLAGS.overwrite, file_extension='.png', get_artifact_fn=tfds.show_examples, save_artifact_fn=_save_fig, ) script_utils.multi_process_map( worker_fn=generate_and_save_figure_fn, datasets=datasets, ) if __name__ == '__main__': script_utils.multi_process_run(main)
apache-2.0
7,384,031,377,480,852,000
31.1
76
0.723632
false
3.738769
false
false
false
endlessm/chromium-browser
third_party/catapult/dashboard/dashboard/graph_csv_test.py
1
6662
# Copyright 2015 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. from __future__ import print_function from __future__ import division from __future__ import absolute_import import csv import StringIO import unittest import webapp2 import webtest from dashboard import graph_csv from dashboard.common import datastore_hooks from dashboard.common import testing_common from dashboard.common import utils from dashboard.models import graph_data class GraphCsvTest(testing_common.TestCase): def setUp(self): super(GraphCsvTest, self).setUp() app = webapp2.WSGIApplication([('/graph_csv', graph_csv.GraphCsvHandler)]) self.testapp = webtest.TestApp(app) self.SetCurrentUser('foo@bar.com', is_admin=True) def _AddMockData(self): master = graph_data.Master(id='ChromiumPerf').put() bots = [] for name in ['win7', 'mac']: bot = graph_data.Bot(id=name, parent=master).put() bots.append(bot) t = graph_data.TestMetadata(id='ChromiumPerf/%s/dromaeo' % name) t.UpdateSheriff() t.put() dom_test = graph_data.TestMetadata( id='ChromiumPerf/%s/dromaeo/dom' % name, has_rows=True) dom_test.UpdateSheriff() dom_test.put() test_container_key = utils.GetTestContainerKey(dom_test) for i in range(15000, 16000, 5): graph_data.Row(parent=test_container_key, id=i, value=float(i * 2.5), error=(i + 5)).put() def _AddMockInternalData(self): master = graph_data.Master(id='ChromiumPerf').put() bots = [] for name in ['win7', 'mac']: bot = graph_data.Bot(id=name, parent=master, internal_only=True).put() bots.append(bot) t = graph_data.TestMetadata( id='ChromiumPerf/%s/dromaeo' % name, internal_only=True) t.UpdateSheriff() t.put() dom_test = graph_data.TestMetadata( id='ChromiumPerf/%s/dromaeo/dom' % name, has_rows=True, internal_only=True) dom_test.UpdateSheriff() dom_test.put() test_container_key = utils.GetTestContainerKey(dom_test) for i in range(1, 50): graph_data.Row( parent=test_container_key, id=i, value=float(i * 2), error=(i + 10), internal_only=True).put() def _CheckGet( self, result_query, expected_result, whitelisted_ip='', status=200): """Asserts that the given query has the given CSV result. Args: result_query: The path and query string to request. expected_result: The expected table of values (list of lists). whitelisted_ip: The IP address to set as request remote address. """ response_rows = [] response = self.testapp.get( result_query, extra_environ={'REMOTE_ADDR': whitelisted_ip}, status=status) if status != 200: return for row in csv.reader(StringIO.StringIO(response.body)): response_rows.append(row) self.assertEqual(expected_result, response_rows) def testGetCsv(self): self._AddMockData() response = self.testapp.get( '/graph_csv?test_path=ChromiumPerf/win7/dromaeo/dom') for index, row, in enumerate(csv.reader(StringIO.StringIO(response.body))): # Skip the headers if index > 0: expected_rev = str(15000 + ((index - 1) * 5)) expected_value = str(int(expected_rev) * 2.5) self.assertEqual([expected_rev, expected_value], row) def testPost(self): self._AddMockData() response = self.testapp.post( '/graph_csv?', {'test_path': 'ChromiumPerf/win7/dromaeo/dom'}) for index, row, in enumerate(csv.reader(StringIO.StringIO(response.body))): # Skip the headers if index > 0: expected_rev = str(15000 + ((index - 1) * 5)) expected_value = str(int(expected_rev) * 2.5) self.assertEqual([expected_rev, expected_value], row) def testRevNumRows(self): self._AddMockData() query = ('/graph_csv?test_path=ChromiumPerf/win7/dromaeo/dom&' 'rev=15270&num_points=5') expected = [ ['revision', 'value'], ['15250', '38125.0'], ['15255', '38137.5'], ['15260', '38150.0'], ['15265', '38162.5'], ['15270', '38175.0'], ] self._CheckGet(query, expected) def testAttrRows(self): self._AddMockData() query = ('/graph_csv?test_path=ChromiumPerf/win7/dromaeo/dom&' 'rev=15270&num_points=5&attr=revision,error,value') expected = [ ['revision', 'error', 'value'], ['15250', '15255.0', '38125.0'], ['15255', '15260.0', '38137.5'], ['15260', '15265.0', '38150.0'], ['15265', '15270.0', '38162.5'], ['15270', '15275.0', '38175.0'], ] self._CheckGet(query, expected) query = ('/graph_csv?test_path=ChromiumPerf/win7/dromaeo/dom&' 'rev=15270&num_points=5&attr=value') expected = [ ['value'], ['38125.0'], ['38137.5'], ['38150.0'], ['38162.5'], ['38175.0'], ] self._CheckGet(query, expected) query = ('/graph_csv?test_path=ChromiumPerf/win7/dromaeo/dom&' 'num_points=5&attr=revision,random,value') expected = [ ['revision', 'random', 'value'], ['15975', '', '39937.5'], ['15980', '', '39950.0'], ['15985', '', '39962.5'], ['15990', '', '39975.0'], ['15995', '', '39987.5'], ] self._CheckGet(query, expected) def testGet_WithNonInternalUserAndWhitelistedIP(self): self._AddMockInternalData() self.UnsetCurrentUser() datastore_hooks.InstallHooks() testing_common.SetIpWhitelist(['123.45.67.89']) query = '/graph_csv?test_path=ChromiumPerf/win7/dromaeo/dom&num_points=3' expected = [['revision', 'value']] self._CheckGet(query, expected, status=500) def testGet_WhitelistedIPOnly(self): self.PatchDatastoreHooksRequest('123.45.67.89') self._AddMockInternalData() self.UnsetCurrentUser() datastore_hooks.InstallHooks() testing_common.SetIpWhitelist(['123.45.67.89']) query = '/graph_csv?test_path=ChromiumPerf/win7/dromaeo/dom&num_points=3' expected = [ ['revision', 'value'], ['47', '94.0'], ['48', '96.0'], ['49', '98.0'] ] self._CheckGet(query, expected, whitelisted_ip='123.45.67.89') def testGet_NoTestPathGiven_GivesError(self): testing_common.SetIpWhitelist(['123.45.67.89']) self.testapp.get( '/graph_csv', extra_environ={'REMOTE_ADDR': '123.45.67.89'}, status=400) if __name__ == '__main__': unittest.main()
bsd-3-clause
-4,719,458,784,556,416,000
32.31
80
0.613329
false
3.326011
true
false
false
huntxu/neutron
neutron/common/constants.py
1
9868
# Copyright (c) 2012 OpenStack Foundation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from neutron_lib.api.definitions import l3 from neutron_lib import constants as lib_constants from neutron_lib.plugins import constants as plugin_consts ROUTER_PORT_OWNERS = lib_constants.ROUTER_INTERFACE_OWNERS_SNAT + \ (lib_constants.DEVICE_OWNER_ROUTER_GW,) ROUTER_STATUS_ACTIVE = 'ACTIVE' ROUTER_STATUS_ALLOCATING = 'ALLOCATING' ROUTER_STATUS_ERROR = 'ERROR' VALID_ROUTER_STATUS = (ROUTER_STATUS_ACTIVE, ROUTER_STATUS_ALLOCATING, ROUTER_STATUS_ERROR) HA_ROUTER_STATE_KEY = '_ha_state' METERING_LABEL_KEY = '_metering_labels' FLOATINGIP_AGENT_INTF_KEY = '_floatingip_agent_interfaces' SNAT_ROUTER_INTF_KEY = '_snat_router_interfaces' HA_NETWORK_NAME = 'HA network tenant %s' HA_SUBNET_NAME = 'HA subnet tenant %s' HA_PORT_NAME = 'HA port tenant %s' HA_ROUTER_STATE_ACTIVE = 'active' HA_ROUTER_STATE_STANDBY = 'standby' HA_ROUTER_STATE_UNKNOWN = 'unknown' VALID_HA_STATES = (HA_ROUTER_STATE_ACTIVE, HA_ROUTER_STATE_STANDBY, HA_ROUTER_STATE_UNKNOWN) PAGINATION_INFINITE = 'infinite' SORT_DIRECTION_ASC = 'asc' SORT_DIRECTION_DESC = 'desc' ETHERTYPE_NAME_ARP = 'arp' ETHERTYPE_ARP = 0x0806 ETHERTYPE_IP = 0x0800 ETHERTYPE_IPV6 = 0x86DD IP_PROTOCOL_NAME_ALIASES = {lib_constants.PROTO_NAME_IPV6_ICMP_LEGACY: lib_constants.PROTO_NAME_IPV6_ICMP} IP_PROTOCOL_NUM_TO_NAME_MAP = { str(v): k for k, v in lib_constants.IP_PROTOCOL_MAP.items()} # When using iptables-save we specify '-p {proto}', # but sometimes those values are not identical. This is a map # of known protocol numbers that require a name to be used and # protocol names that require a different name to be used, # because that is how iptables-save will display them. # # This is how the list was created, so there is a possibility # it will need to be updated in the future: # # $ for num in {0..255}; do iptables -A INPUT -p $num; done # $ iptables-save # # These cases are special, and were found by inspection: # - 'ipv6-encap' uses 'ipv6' # - 'icmpv6' uses 'ipv6-icmp' # - 'pgm' uses '113' instead of its name # - protocol '0' uses no -p argument IPTABLES_PROTOCOL_NAME_MAP = {lib_constants.PROTO_NAME_IPV6_ENCAP: 'ipv6', lib_constants.PROTO_NAME_IPV6_ICMP_LEGACY: 'ipv6-icmp', lib_constants.PROTO_NAME_PGM: '113', '0': None, '1': 'icmp', '2': 'igmp', '3': 'ggp', '4': 'ipencap', '5': 'st', '6': 'tcp', '8': 'egp', '9': 'igp', '12': 'pup', '17': 'udp', '20': 'hmp', '22': 'xns-idp', '27': 'rdp', '29': 'iso-tp4', '33': 'dccp', '36': 'xtp', '37': 'ddp', '38': 'idpr-cmtp', '41': 'ipv6', '43': 'ipv6-route', '44': 'ipv6-frag', '45': 'idrp', '46': 'rsvp', '47': 'gre', '50': 'esp', '51': 'ah', '57': 'skip', '58': 'ipv6-icmp', '59': 'ipv6-nonxt', '60': 'ipv6-opts', '73': 'rspf', '81': 'vmtp', '88': 'eigrp', '89': 'ospf', '93': 'ax.25', '94': 'ipip', '97': 'etherip', '98': 'encap', '103': 'pim', '108': 'ipcomp', '112': 'vrrp', '115': 'l2tp', '124': 'isis', '132': 'sctp', '133': 'fc', '135': 'mobility-header', '136': 'udplite', '137': 'mpls-in-ip', '138': 'manet', '139': 'hip', '140': 'shim6', '141': 'wesp', '142': 'rohc'} # A length of a iptables chain name must be less than or equal to 11 # characters. # <max length of iptables chain name> - (<binary_name> + '-') = 28-(16+1) = 11 MAX_IPTABLES_CHAIN_LEN_WRAP = 11 MAX_IPTABLES_CHAIN_LEN_NOWRAP = 28 # Timeout in seconds for getting an IPv6 LLA LLA_TASK_TIMEOUT = 40 # length of all device prefixes (e.g. qvo, tap, qvb) LINUX_DEV_PREFIX_LEN = 3 # must be shorter than linux IFNAMSIZ (which is 16) LINUX_DEV_LEN = 14 # Possible prefixes to partial port IDs in interface names used by the OVS, # Linux Bridge, and IVS VIF drivers in Nova and the neutron agents. See the # 'get_ovs_interfaceid' method in Nova (nova/virt/libvirt/vif.py) for details. INTERFACE_PREFIXES = (lib_constants.TAP_DEVICE_PREFIX, lib_constants.VETH_DEVICE_PREFIX, lib_constants.SNAT_INT_DEV_PREFIX) ATTRIBUTES_TO_UPDATE = 'attributes_to_update' # TODO(amuller): Re-define the RPC namespaces once Oslo messaging supports # Targets with multiple namespaces. Neutron will then implement callbacks # for its RPC clients in order to support rolling upgrades. # RPC Interface for agents to call DHCP API implemented on the plugin side RPC_NAMESPACE_DHCP_PLUGIN = None # RPC interface for the metadata service to get info from the plugin side RPC_NAMESPACE_METADATA = None # RPC interface for agent to plugin security group API RPC_NAMESPACE_SECGROUP = None # RPC interface for agent to plugin DVR api RPC_NAMESPACE_DVR = None # RPC interface for reporting state back to the plugin RPC_NAMESPACE_STATE = None # RPC interface for agent to plugin resources API RPC_NAMESPACE_RESOURCES = None # Default network MTU value when not configured DEFAULT_NETWORK_MTU = 1500 IPV6_MIN_MTU = 1280 ROUTER_MARK_MASK = "0xffff" VALID_ETHERTYPES = (lib_constants.IPv4, lib_constants.IPv6) IP_ALLOWED_VERSIONS = [lib_constants.IP_VERSION_4, lib_constants.IP_VERSION_6] PORT_RANGE_MIN = 1 PORT_RANGE_MAX = 65535 # Configuration values for accept_ra sysctl, copied from linux kernel # networking (netdev) tree, file Documentation/networking/ip-sysctl.txt # # Possible values are: # 0 Do not accept Router Advertisements. # 1 Accept Router Advertisements if forwarding is disabled. # 2 Overrule forwarding behaviour. Accept Router Advertisements # even if forwarding is enabled. ACCEPT_RA_DISABLED = 0 ACCEPT_RA_WITHOUT_FORWARDING = 1 ACCEPT_RA_WITH_FORWARDING = 2 # Some components communicate using private address ranges, define # them all here. These address ranges should not cause any issues # even if they overlap since they are used in disjoint namespaces, # but for now they are unique. # We define the metadata cidr since it falls in the range. PRIVATE_CIDR_RANGE = '169.254.0.0/16' DVR_FIP_LL_CIDR = '169.254.64.0/18' L3_HA_NET_CIDR = '169.254.192.0/18' METADATA_CIDR = '169.254.169.254/32' # The only defined IpamAllocation status at this stage is 'ALLOCATED'. # More states will be available in the future - e.g.: RECYCLABLE IPAM_ALLOCATION_STATUS_ALLOCATED = 'ALLOCATED' VALID_IPAM_ALLOCATION_STATUSES = (IPAM_ALLOCATION_STATUS_ALLOCATED,) # Port binding states for Live Migration PORT_BINDING_STATUSES = (lib_constants.ACTIVE, lib_constants.INACTIVE) VALID_FLOATINGIP_STATUS = (lib_constants.FLOATINGIP_STATUS_ACTIVE, lib_constants.FLOATINGIP_STATUS_DOWN, lib_constants.FLOATINGIP_STATUS_ERROR) # Floating IP host binding states FLOATING_IP_HOST_UNBOUND = "FLOATING_IP_HOST_UNBOUND" FLOATING_IP_HOST_NEEDS_BINDING = "FLOATING_IP_HOST_NEEDS_BINDING" # Possible types of values (e.g. in QoS rule types) VALUES_TYPE_CHOICES = "choices" VALUES_TYPE_RANGE = "range" # Units base SI_BASE = 1000 IEC_BASE = 1024 # Port bindings handling NO_ACTIVE_BINDING = 'no_active_binding' # Registered extension parent resource check mapping # If we want to register some service plugin resources into policy and check # the owner when operating their subresources. We can write here to use # existing policy engine for parent resource owner check. # Each entry here should be PARENT_RESOURCE_NAME: SERVICE_PLUGIN_NAME, # PARENT_RESOURCE_NAME is usually from api definition. # SERVICE_PLUGIN_NAME is the service plugin which introduced the resource and # registered the service plugin name in neutron-lib. EXT_PARENT_RESOURCE_MAPPING = { l3.FLOATINGIP: plugin_consts.L3 } EXT_PARENT_PREFIX = 'ext_parent'
apache-2.0
3,229,446,407,521,964,500
38.15873
78
0.583502
false
3.750665
false
false
false
davzhang/helix-python-binding
org/apache/helix/messaging/handling/GroupMessageHandler.py
1
3490
# package org.apache.helix.messaging.handling #from org.apache.helix.messaging.handling import * #from java.util import HashMap #from java.util import List #from java.util import Map #from java.util.concurrent import ConcurrentHashMap #from java.util.concurrent import ConcurrentLinkedQueue #from java.util.concurrent.atomic import AtomicInteger from org.apache.helix.PropertyKey import PropertyKey from org.apache.helix.model.CurrentState import CurrentState from org.apache.helix.model.Message import Message from org.apache.helix.model.Message import Attributes class CurrentStateUpdate: """ Parameters: PropertyKey key CurrentState curStateDelta """ def __init__(self, key, curStateDelta): self._key = key self._curStateDelta = curStateDelta def merge(self, curState): """ Returns void Parameters: curState: CurrentState """ self._curStateDelta.getRecord().merge(curState.getRecord()) class GroupMessageInfo: """ Parameters: Message message """ def __init__(self, message): self._message = message # List<String> partitionNames = message.getPartitionNames() self._countDown = partitionNames.size() # self._countDown = AtomicInteger(partitionNames.size()) self._curStateUpdateList = [] # self._curStateUpdateList = ConcurrentLinkedQueue<CurrentStateUpdate>() def merge(self): """ Returns Map<PropertyKey, CurrentState> """ # Map<String, CurrentStateUpdate> curStateUpdateMap = {} for update in self._curStateUpdateList: # String path = update._key.getPath() if not curStateUpdateMap.containsKey(path): curStateUpdateMap.put(path, update) else: curStateUpdateMap.get(path).merge(update._curStateDelta) # Map<PropertyKey, CurrentState> ret = {} for update in curStateUpdateMap.values(): ret[update._key] = update._curStateDelta return ret class GroupMessageHandler: """ """ def __init__(self): # self._groupMsgMap = ConcurrentHashMap<String, GroupMessageInfo>() self._groupMsgMap = {} def put(self, message): """ Returns void Parameters: message: Message """ self._groupMsgMap.putIfAbsent(message.getId(), GroupMessageInfo(message)) def onCompleteSubMessage(self, subMessage): """ Returns GroupMessageInfo Parameters: subMessage: Message """ # String parentMid = subMessage.getAttribute(Attributes.PARENT_MSG_ID) # GroupMessageInfo info = self._groupMsgMap.get(parentMid) if info != None: # int val = info._countDown.decrementAndGet() if val <= 0: return self._groupMsgMap.remove(parentMid) return None def addCurStateUpdate(self, subMessage, key, delta): """ Returns void Parameters: subMessage: Messagekey: PropertyKeydelta: CurrentState """ # String parentMid = subMessage.getAttribute(Attributes.PARENT_MSG_ID) # GroupMessageInfo info = self._groupMsgMap.get(parentMid) if info != None: info._curStateUpdateList.add(CurrentStateUpdate(key, delta))
apache-2.0
-5,867,024,208,946,304,000
23.928571
83
0.619771
false
4.335404
false
false
false
qedsoftware/commcare-hq
corehq/apps/repeaters/tests/test_dbaccessors.py
1
3943
from datetime import datetime, timedelta from django.test import TestCase from corehq.apps.repeaters.dbaccessors import ( get_pending_repeat_record_count, get_success_repeat_record_count, get_failure_repeat_record_count, get_repeat_record_count, get_repeaters_by_domain, get_paged_repeat_records, iterate_repeat_records, ) from corehq.apps.repeaters.models import RepeatRecord, CaseRepeater from corehq.apps.repeaters.const import RECORD_PENDING_STATE class TestRepeatRecordDBAccessors(TestCase): repeater_id = '1234' other_id = '5678' domain = 'test-domain-2' @classmethod def setUpClass(cls): before = datetime.utcnow() - timedelta(minutes=5) failed = RepeatRecord( domain=cls.domain, failure_reason='Some python error', repeater_id=cls.repeater_id, next_event=before, ) success = RepeatRecord( domain=cls.domain, succeeded=True, repeater_id=cls.repeater_id, next_event=before, ) pending = RepeatRecord( domain=cls.domain, succeeded=False, repeater_id=cls.repeater_id, next_event=before, ) other_id = RepeatRecord( domain=cls.domain, succeeded=False, repeater_id=cls.other_id, next_event=before, ) cls.records = [ failed, success, pending, other_id, ] for record in cls.records: record.save() @classmethod def tearDownClass(cls): for record in cls.records: record.delete() def test_get_pending_repeat_record_count(self): count = get_pending_repeat_record_count(self.domain, self.repeater_id) self.assertEqual(count, 1) def test_get_success_repeat_record_count(self): count = get_success_repeat_record_count(self.domain, self.repeater_id) self.assertEqual(count, 1) def test_get_failure_repeat_record_count(self): count = get_failure_repeat_record_count(self.domain, self.repeater_id) self.assertEqual(count, 1) def test_get_paged_repeat_records_with_state_and_no_records(self): count = get_repeat_record_count('wrong-domain', state=RECORD_PENDING_STATE) self.assertEqual(count, 0) def test_get_paged_repeat_records(self): records = get_paged_repeat_records(self.domain, 0, 2) self.assertEqual(len(records), 2) def test_get_paged_repeat_records_with_repeater_id(self): records = get_paged_repeat_records(self.domain, 0, 2, repeater_id=self.other_id) self.assertEqual(len(records), 1) def test_get_paged_repeat_records_with_state(self): records = get_paged_repeat_records(self.domain, 0, 10, state=RECORD_PENDING_STATE) self.assertEqual(len(records), 2) def test_get_paged_repeat_records_wrong_domain(self): records = get_paged_repeat_records('wrong-domain', 0, 2) self.assertEqual(len(records), 0) def test_iterate_repeat_records(self): records = list(iterate_repeat_records(datetime.utcnow(), chunk_size=2)) self.assertEqual(len(records), 3) # Should grab all but the succeeded one class TestRepeatersDBAccessors(TestCase): domain = 'test-domain' @classmethod def setUpClass(cls): repeater = CaseRepeater( domain=cls.domain, ) cls.repeaters = [ repeater ] for repeater in cls.repeaters: repeater.save() @classmethod def tearDownClass(cls): for repeater in cls.repeaters: repeater.delete() def test_get_repeaters_by_domain(self): repeaters = get_repeaters_by_domain(self.domain) self.assertEqual(len(repeaters), 1) self.assertEqual(repeaters[0].__class__, CaseRepeater)
bsd-3-clause
-8,391,664,742,251,161,000
30.293651
90
0.628963
false
3.678172
true
false
false
y4smeen/friendly-spork
camera/take-picture.py
1
3087
from __future__ import print_function import sys import cv2 def main(argv): #capture from camera at location 0 cap = cv2.VideoCapture(0) # Change the camera setting using the set() function # cap.set(cv2.cv.CV_CAP_PROP_EXPOSURE, -6.0) # cap.set(cv2.cv.CV_CAP_PROP_GAIN, 4.0) # cap.set(cv2.cv.CV_CAP_PROP_BRIGHTNESS, 144.0) # cap.set(cv2.cv.CV_CAP_PROP_CONTRAST, 27.0) # cap.set(cv2.cv.CV_CAP_PROP_HUE, 13.0) # 13.0 # cap.set(cv2.cv.CV_CAP_PROP_SATURATION, 28.0) # Read the current setting from the camera test = cap.get(cv2.cv.CV_CAP_PROP_POS_MSEC) ratio = cap.get(cv2.cv.CV_CAP_PROP_POS_AVI_RATIO) frame_rate = cap.get(cv2.cv.CV_CAP_PROP_FPS) width = cap.get(cv2.cv.CV_CAP_PROP_FRAME_WIDTH) height = cap.get(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT) brightness = cap.get(cv2.cv.CV_CAP_PROP_BRIGHTNESS) contrast = cap.get(cv2.cv.CV_CAP_PROP_CONTRAST) saturation = cap.get(cv2.cv.CV_CAP_PROP_SATURATION) hue = cap.get(cv2.cv.CV_CAP_PROP_HUE) gain = cap.get(cv2.cv.CV_CAP_PROP_GAIN) exposure = cap.get(cv2.cv.CV_CAP_PROP_EXPOSURE) print("Test: ", test) print("Ratio: ", ratio) print("Frame Rate: ", frame_rate) print("Height: ", height) print("Width: ", width) print("Brightness: ", brightness) print("Contrast: ", contrast) print("Saturation: ", saturation) print("Hue: ", hue) print("Gain: ", gain) print("Exposure: ", exposure) while True: ret, img = cap.read() cv2.imshow("input", img) key = cv2.waitKey(10) if key == 27: break cv2.destroyAllWindows() cv2.VideoCapture(0).release() # 0 CV_CAP_PROP_POS_MSEC Current position of the video file in milliseconds. # 1 CV_CAP_PROP_POS_FRAMES 0-based index of the frame to be decoded/captured next. # 2 CV_CAP_PROP_POS_AVI_RATIO Relative position of the video file # 3 CV_CAP_PROP_FRAME_WIDTH Width of the frames in the video stream. # 4 CV_CAP_PROP_FRAME_HEIGHT Height of the frames in the video stream. # 5 CV_CAP_PROP_FPS Frame rate. # 6 CV_CAP_PROP_FOURCC 4-character code of codec. # 7 CV_CAP_PROP_FRAME_COUNT Number of frames in the video file. # 8 CV_CAP_PROP_FORMAT Format of the Mat objects returned by retrieve() . # 9 CV_CAP_PROP_MODE Backend-specific value indicating the current capture mode. # 10 CV_CAP_PROP_BRIGHTNESS Brightness of the image (only for cameras). # 11 CV_CAP_PROP_CONTRAST Contrast of the image (only for cameras). # 12 CV_CAP_PROP_SATURATION Saturation of the image (only for cameras). # 13 CV_CAP_PROP_HUE Hue of the image (only for cameras). # 14 CV_CAP_PROP_GAIN Gain of the image (only for cameras). # 15 CV_CAP_PROP_EXPOSURE Exposure (only for cameras). # 16 CV_CAP_PROP_CONVERT_RGB Boolean flags indicating whether images should be converted to RGB. # 17 CV_CAP_PROP_WHITE_BALANCE Currently unsupported # 18 CV_CAP_PROP_RECTIFICATION Rectification flag for stereo cameras (note: only supported by DC1394 v 2.x backend currently) if __name__ == '__main__': main(sys.argv)
mit
2,285,477,887,663,510,300
41.875
127
0.675089
false
2.814038
false
false
false
SumiTomohiko/Yog
tools/update_prototype.py
1
8073
#! python # -*- coding: utf-8 -*- from glob import glob import re class DeclarationInserter(object): start = "/* PROTOTYPE_START */" end = "/* PROTOTYPE_END */" files = { "include/yog/dir.h": [ "src/dir.c", ], "include/yog/path.h": [ "src/path.c", ], "include/yog/process.h": [ "src/process.c", ], "include/yog/handle.h": [ "src/handle.c", ], "include/yog/env.h": [ "src/env.c", ], "include/yog/ffi.h": [ "src/ffi.c", ], "include/yog/sprintf.h": [ "src/sprintf.c", ], "include/yog/get_args.h": [ "src/get_args.c", ], "include/yog/coroutine.h": [ "src/coroutine.c", ], "include/yog/misc.h": [ "src/misc.c", ], "include/yog/comparable.h": [ "src/comparable.c", ], "include/yog/set.h": [ "src/set.c", ], "include/yog/module.h": [ "src/module.c", ], "include/yog/file.h": [ "src/file.c", ], "include/yog/dict.h": [ "src/dict.c", ], "include/yog/classmethod.h": [ "src/classmethod.c", ], "include/yog/property.h": [ "src/property.c", ], "include/yog/symbol.h": [ "src/symbol.c", ], "include/yog/repl.h": [ "src/repl.c", ], "include/yog/yog.h": [ "src/value.c", ], "include/yog/builtins.h": [ "src/builtins.c", ], "include/yog/error.h": [ "src/error.c", ], "include/yog/bool.h": [ "src/bool.c", ], "include/yog/fixnum.h": [ "src/fixnum.c", ], "include/yog/arg.h": [ "src/arg.c", ], "include/yog/class.h": [ "src/class.c", ], "include/yog/nil.h": [ "src/nil.c", ], "include/yog/callable.h": [ "src/callable.c", ], "include/yog/binary.h": [ "src/binary.c", ], "include/yog/package.h": [ "src/package.c", ], "include/yog/code.h": [ "src/code.c", "src/code.inc", ], "include/yog/compile.h": [ "src/compile.c", ], "include/yog/array.h": [ "src/array.c", ], "include/yog/parser.h": [ "src/lexer.c", "src/parser.y", ], "include/yog/regexp.h": [ "src/regexp.c", ], "include/yog/string.h": [ "src/string.c", ], "include/yog/encoding.h": [ "src/encoding.c", ], "include/yog/exception.h": [ "src/exception.c", "src/stacktrace.c", ], "include/yog/inst.h.tmpl": [ "src/inst.c", ], "include/yog/table.h": [ "src/table.c", ], "include/yog/gc/mark-sweep-compact.h": [ "src/gc/mark-sweep-compact.c", ], "include/yog/gc/copying.h": [ "src/gc/copying.c", ], "include/yog/gc/mark-sweep.h": [ "src/gc/mark-sweep.c", ], "include/yog/gc/generational.h": [ "src/gc/generational.c", ], "include/yog/thread.h": [ "src/thread.c", ], "include/yog/vm.h": [ "src/vm.c", ], "include/yog/object.h": [ "src/object.c", ], "include/yog/frame.h": [ "src/frame.c", ], "include/yog/float.h": [ "src/float.c", ], "include/yog/eval.h": [ "src/eval.c", ], "include/yog/gc.h": [ "src/gc.c", ], "include/yog/bignum.h": [ "src/bignum.c", ], } def _find(self, lines, s, start): processed = [] i = start while True: try: line = lines[i] except IndexError: break try: line.index(s) except ValueError: processed.append(line) else: processed.append(line) break i += 1 return processed, i def _rewrite_header(self, header_filename, declarations): try: fp = open(header_filename) try: lines = fp.readlines() finally: fp.close() except IOError: lines = [] header, i = self._find(lines, self.start, 0) header.append(""" /** * DON'T EDIT THIS AREA. HERE IS GENERATED BY update_prototype.py. */ """) for filename in sorted(declarations): functions = declarations[filename] header.append("/* %(filename)s */\n" % { "filename": filename }) for function in functions: header.append(function + "\n") header.append("\n") old, i = self._find(lines, self.end, i) header.extend(lines[i:]) fp = open(header_filename, "wb") try: fp.write("".join(header)) finally: fp.close() re_function = re.compile(r"\A(?P<name>\w+)\s*\([\w\s\*\\(\).,]*\)\Z") re_function_pointer \ = re.compile(r"\A(?P<head>[\w\*]+\s+\(\*)\w+(?P<tail>\)\(.*\))\Z") def _split_params(self, line): params = [] paren_depth = 0 param = [] for c in line: if (paren_depth == 0) and (c == ","): params.append("".join(param).strip()) param = [] else: if (c == "("): paren_depth += 1 elif (c == ")"): paren_depth -= 1 param.append(c) last_param = "".join(param).strip() if last_param != "": params.append(last_param) return params def _get_functions(self, filename): declarations = {} lines = [] fp = open(filename) try: lines = fp.readlines() finally: fp.close() comment = 0 line = "" while True: prev_line = line try: line = lines.pop(0).rstrip() except IndexError: break if line == "#if 0": comment += 1 elif (line == "#endif") and (0 < comment): comment -= 1 if 0 < comment: continue m = self.re_function.search(line) if m is None: continue if prev_line.startswith("INTERNAL ") or prev_line.startswith("static ") or prev_line.startswith("inline "): continue return_type = prev_line name = m.group("name") args = [] n = line.index("(") m = line.rindex(")") params = self._split_params(line[n + 1:m]) for param in params: param = param.strip() if param == "...": args.append(param) else: m = self.re_function_pointer.search(param) if m: type_ = m.group("head") + m.group("tail") else: param = param.strip() param = param.split(" ") try: n = param[-1].rindex("*") except ValueError: type_ = " ".join(param[:-1]) else: type_ = " ".join(param[:-1]) \ + param[-1][:n + 1] args.append(type_) declarations[name] = \ "%(return_type)s %(name)s(%(args)s);" % { "return_type": return_type, "name": name, "args": ", ".join(args) } retval = [] for name in sorted(declarations): retval.append(declarations[name]) return retval def do(self): for header, sources in self.files.items(): declarations = {} for source in sources: declarations[source] = self._get_functions(source) self._rewrite_header(header, declarations) if __name__ == "__main__": inserter = DeclarationInserter() inserter.do() # vim: tabstop=4 shiftwidth=4 expandtab softtabstop=4
mit
-4,793,780,574,572,863,000
34.563877
119
0.440852
false
3.618557
false
false
false
jopcode/whoUR
whoUR.py
1
1600
import sys, os import argparse import urllib3, urllib import re # Modules from libs.colors import * from libs.selectChoice import select_choice Parser = argparse.ArgumentParser(prog='whoUR.py', description='Tool for information gathering') ''' this has been use in the future Parser.add_argument('-d', '--dic-path', help='Dictonaries Path, Example: -d /root/', action='store', default='dicts/', dest='dicPath') Parser.add_argument('-a', '--dic-adminspage', help='Admin Page dictonary, Example: -a adminspage.txt', action='store', default='adminspage.txt', dest='dicAdminsPage') Args = Parser.parse_args() # Dictonaries dic_adminsPage = Args.dicPath +'/'+ Args.dicAdminsPage ''' def main(): print('\n') print(B+' _ ___ ') print(B+' __ __ | |__ ___ _ _ _ __ |__ \ ') print(B+' \ \ /\ / / | \_ \ / _ \ | | | | | |__| / / ') print(B+' \ V V / | | | | | (_) | | |_| | | | |_| ') print(B+' \_/\_/ |_| |_| \___/ \__,_| |_| (_) ') print('\n') print(lC+'Beta 1.6 JopCode') print('\n') select_choice() print(lG+'\n---------') print(lG+'- C Y A -') print(lG+'---------\n') print(lR+'[+] Script by JopCode\n') if __name__ == '__main__': try: main() except KeyboardInterrupt: print(lG+'\n---------') print(lG+'- C Y A -') print(lG+'---------\n') print(lR+'[+] Script by JopCode\n') try: sys.exit(0) except SystemExit: os._exit(0)
gpl-3.0
5,525,027,244,532,273,000
31
166
0.476875
false
3.11284
false
false
false
fake-name/ReadableWebProxy
WebMirror/management/rss_parser_funcs/feed_parse_extractWanderingmusetranslationWordpressCom.py
1
1211
def extractWanderingmusetranslationWordpressCom(item): ''' Parser for 'wanderingmusetranslation.wordpress.com' ''' vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title']) if not (chp or vol) or "preview" in item['title'].lower(): return None chp_prefixes = [ ('SH Chapter ', 'Swain Hakushaku', 'translated'), ('AS Chapter ', 'My Status As An Assassin Is Obviously Stronger Than That Of the Hero’s', 'translated'), ('Cat ', 'Me and My Beloved Cat (Girlfriend)', 'translated'), ] if item['tags'] == ['Uncategorized']: for prefix, series, tl_type in chp_prefixes: if item['title'].lower().startswith(prefix.lower()): return buildReleaseMessageWithType(item, series, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type) tagmap = [ ('PRC', 'PRC', 'translated'), ('Loiterous', 'Loiterous', 'oel'), ] for tagname, name, tl_type in tagmap: if tagname in item['tags']: return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type) return False
bsd-3-clause
9,097,084,817,052,766,000
35.666667
122
0.595533
false
3.321429
false
false
false
portableant/open-context-py
opencontext_py/apps/edit/inputs/rules/models.py
1
1257
import collections from jsonfield import JSONField from datetime import datetime from django.utils import timezone from django.db import models # Stores information about fields for a data entry form class InputRule(models.Model): uuid = models.CharField(max_length=50, primary_key=True) # uuid for the rule itself project_uuid = models.CharField(max_length=50, db_index=True) profile_uuid = models.CharField(max_length=50, db_index=True) # uuid for the input profile label = models.CharField(max_length=200) # label for data entry form note = models.TextField() # note for instructions in data entry rules = models.TextField() # for JSON data for check values t_note = models.TextField() # note for condition being True f_note = models.TextField() # note for condition being False created = models.DateTimeField() updated = models.DateTimeField(auto_now=True) def save(self, *args, **kwargs): """ saves a record with a creation date """ if self.created is None: self.created = datetime.now() super(InputRule, self).save(*args, **kwargs) class Meta: db_table = 'crt_rules' ordering = ['profile_uuid', 'label']
gpl-3.0
2,131,226,074,972,182,500
38.28125
95
0.674622
false
4.067961
false
false
false
henriquegemignani/randovania
randovania/game_description/node_search.py
1
2783
from typing import Dict, Optional import networkx from randovania.game_description.area import Area from randovania.game_description.game_patches import GamePatches from randovania.game_description.node import Node, DockNode, TeleporterNode, PickupNode, ResourceNode from randovania.game_description.resources.pickup_index import PickupIndex from randovania.game_description.resources.resource_info import ResourceInfo from randovania.game_description.world_list import WorldList def distances_to_node(world_list: WorldList, starting_node: Node, *, ignore_elevators: bool = True, cutoff: Optional[int] = None, patches: Optional[GamePatches] = None, ) -> Dict[Area, int]: """ Compute the shortest distance from a node to all reachable areas. :param world_list: :param starting_node: :param ignore_elevators: :param cutoff: Exclude areas with a length longer that cutoff. :param patches: :return: Dict keyed by area to shortest distance to starting_node. """ g = networkx.DiGraph() dock_connections = patches.dock_connection if patches is not None else {} elevator_connections = patches.elevator_connection if patches is not None else {} for area in world_list.all_areas: g.add_node(area) for world in world_list.worlds: for area in world.areas: new_areas = set() for node in area.nodes: if isinstance(node, DockNode): connection = dock_connections.get((area.area_asset_id, node.dock_index), node.default_connection) new_areas.add(world.area_by_asset_id(connection.area_asset_id)) elif isinstance(node, TeleporterNode) and not ignore_elevators: connection = elevator_connections.get(node.teleporter_instance_id, node.default_connection) new_areas.add(world_list.area_by_area_location(connection)) for next_area in new_areas: g.add_edge(area, next_area) return networkx.single_source_shortest_path_length(g, world_list.nodes_to_area(starting_node), cutoff) def pickup_index_to_node(world_list: WorldList, index: PickupIndex) -> PickupNode: for node in world_list.all_nodes: if isinstance(node, PickupNode) and node.pickup_index == index: return node raise ValueError(f"PickupNode with {index} not found.") def node_with_resource(world_list: WorldList, resource: ResourceInfo) -> ResourceNode: for node in world_list.all_nodes: if isinstance(node, ResourceNode) and node.resource() == resource: return node raise ValueError(f"ResourceNode with {resource} not found.")
gpl-3.0
-1,139,548,063,911,922,300
42.484375
117
0.673015
false
3.903226
false
false
false
m-ober/byceps
scripts/import_permissions_and_roles.py
1
1418
#!/usr/bin/env python """Import permissions, roles, and their relations from a JSON file. :Copyright: 2006-2020 Jochen Kupperschmidt :License: Modified BSD, see LICENSE for details. """ import json import click from byceps.services.authorization import service as authz_service from byceps.util.system import get_config_filename_from_env_or_exit from _util import app_context @click.command() @click.argument('json_file', type=click.File()) def execute(json_file): data = json.load(json_file) permissions = data['permissions'] roles = data['roles'] click.echo(f'Importing {len(permissions)} permissions ... ', nl=False) create_permissions(permissions) click.secho('done.', fg='green') click.echo(f'Importing {len(roles)} roles ... ', nl=False) create_roles(roles) click.secho('done.', fg='green') def create_permissions(permissions): for permission in permissions: authz_service.create_permission(permission['id'], permission['title']) def create_roles(roles): for role in roles: role_id = role['id'] authz_service.create_role(role_id, role['title']) for permission_id in role['assigned_permissions']: authz_service.assign_permission_to_role(permission_id, role_id) if __name__ == '__main__': config_filename = get_config_filename_from_env_or_exit() with app_context(config_filename): execute()
bsd-3-clause
-46,085,055,061,123,140
25.259259
78
0.687588
false
3.571788
false
false
false
platformio/platformio
platformio/proc.py
1
5706
# Copyright (c) 2014-present PlatformIO <contact@platformio.org> # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import subprocess import sys from threading import Thread from platformio import exception from platformio.compat import ( WINDOWS, get_filesystem_encoding, get_locale_encoding, string_types, ) class AsyncPipeBase(object): def __init__(self): self._fd_read, self._fd_write = os.pipe() self._pipe_reader = os.fdopen(self._fd_read) self._buffer = "" self._thread = Thread(target=self.run) self._thread.start() def get_buffer(self): return self._buffer def fileno(self): return self._fd_write def run(self): try: self.do_reading() except (KeyboardInterrupt, SystemExit, IOError): self.close() def do_reading(self): raise NotImplementedError() def close(self): self._buffer = "" os.close(self._fd_write) self._thread.join() class BuildAsyncPipe(AsyncPipeBase): def __init__(self, line_callback, data_callback): self.line_callback = line_callback self.data_callback = data_callback super(BuildAsyncPipe, self).__init__() def do_reading(self): line = "" print_immediately = False for byte in iter(lambda: self._pipe_reader.read(1), ""): self._buffer += byte if line and byte.strip() and line[-3:] == (byte * 3): print_immediately = True if print_immediately: # leftover bytes if line: self.data_callback(line) line = "" self.data_callback(byte) if byte == "\n": print_immediately = False else: line += byte if byte != "\n": continue self.line_callback(line) line = "" self._pipe_reader.close() class LineBufferedAsyncPipe(AsyncPipeBase): def __init__(self, line_callback): self.line_callback = line_callback super(LineBufferedAsyncPipe, self).__init__() def do_reading(self): for line in iter(self._pipe_reader.readline, ""): self._buffer += line self.line_callback(line) self._pipe_reader.close() def exec_command(*args, **kwargs): result = {"out": None, "err": None, "returncode": None} default = dict(stdout=subprocess.PIPE, stderr=subprocess.PIPE) default.update(kwargs) kwargs = default p = subprocess.Popen(*args, **kwargs) try: result["out"], result["err"] = p.communicate() result["returncode"] = p.returncode except KeyboardInterrupt: raise exception.AbortedByUser() finally: for s in ("stdout", "stderr"): if isinstance(kwargs[s], AsyncPipeBase): kwargs[s].close() for s in ("stdout", "stderr"): if isinstance(kwargs[s], AsyncPipeBase): result[s[3:]] = kwargs[s].get_buffer() for k, v in result.items(): if isinstance(result[k], bytes): try: result[k] = result[k].decode( get_locale_encoding() or get_filesystem_encoding() ) except UnicodeDecodeError: result[k] = result[k].decode("latin-1") if v and isinstance(v, string_types): result[k] = result[k].strip() return result def is_ci(): return os.getenv("CI", "").lower() == "true" def is_container(): if os.path.exists("/.dockerenv"): return True if not os.path.isfile("/proc/1/cgroup"): return False with open("/proc/1/cgroup") as fp: return ":/docker/" in fp.read() def get_pythonexe_path(): return os.environ.get("PYTHONEXEPATH", os.path.normpath(sys.executable)) def copy_pythonpath_to_osenv(): _PYTHONPATH = [] if "PYTHONPATH" in os.environ: _PYTHONPATH = os.environ.get("PYTHONPATH").split(os.pathsep) for p in os.sys.path: conditions = [p not in _PYTHONPATH] if not WINDOWS: conditions.append( os.path.isdir(os.path.join(p, "click")) or os.path.isdir(os.path.join(p, "platformio")) ) if all(conditions): _PYTHONPATH.append(p) os.environ["PYTHONPATH"] = os.pathsep.join(_PYTHONPATH) def where_is_program(program, envpath=None): env = os.environ if envpath: env["PATH"] = envpath # try OS's built-in commands try: result = exec_command(["where" if WINDOWS else "which", program], env=env) if result["returncode"] == 0 and os.path.isfile(result["out"].strip()): return result["out"].strip() except OSError: pass # look up in $PATH for bin_dir in env.get("PATH", "").split(os.pathsep): if os.path.isfile(os.path.join(bin_dir, program)): return os.path.join(bin_dir, program) if os.path.isfile(os.path.join(bin_dir, "%s.exe" % program)): return os.path.join(bin_dir, "%s.exe" % program) return program
apache-2.0
-7,367,964,481,521,665,000
28.564767
82
0.58307
false
3.935172
false
false
false
monaparty/counterparty-lib
counterpartylib/lib/messages/destroy.py
1
4261
#! /usr/bin/python3 """Destroy a quantity of an asset.""" import struct import json import logging logger = logging.getLogger(__name__) from counterpartylib.lib import util from counterpartylib.lib import config from counterpartylib.lib import script from counterpartylib.lib import message_type from counterpartylib.lib.script import AddressError from counterpartylib.lib.exceptions import * FORMAT = '>QQ8s' LENGTH = 8 + 8 + 8 ID = 110 def initialise(db): cursor = db.cursor() cursor.execute('''CREATE TABLE IF NOT EXISTS destructions( tx_index INTEGER PRIMARY KEY, tx_hash TEXT UNIQUE, block_index INTEGER, source TEXT, asset INTEGER, quantity INTEGER, tag TEXT, status TEXT, FOREIGN KEY (tx_index, tx_hash, block_index) REFERENCES transactions(tx_index, tx_hash, block_index)) ''') cursor.execute('''CREATE INDEX IF NOT EXISTS status_idx ON destructions (status) ''') cursor.execute('''CREATE INDEX IF NOT EXISTS address_idx ON destructions (source) ''') def pack(db, asset, quantity, tag): data = message_type.pack(ID) if isinstance(tag, str): tag = bytes.fromhex(tag) data += struct.pack(FORMAT, util.get_asset_id(db, asset, util.CURRENT_BLOCK_INDEX), quantity, tag) return data def unpack(db, message): try: asset_id, quantity, tag = struct.unpack(FORMAT, message) asset = util.get_asset_name(db, asset_id, util.CURRENT_BLOCK_INDEX) except struct.error: raise UnpackError('could not unpack') except AssetIDError: raise UnpackError('asset id invalid') return asset, quantity, tag def validate (db, source, destination, asset, quantity): try: util.get_asset_id(db, asset, util.CURRENT_BLOCK_INDEX) except AssetError: raise ValidateError('asset invalid') try: script.validate(source) except AddressError: raise ValidateError('source address invalid') if destination: raise ValidateError('destination exists') if asset == config.BTC: raise ValidateError('cannot destroy {}'.format(config.BTC)) if type(quantity) != int: raise ValidateError('quantity not integer') if quantity > config.MAX_INT: raise ValidateError('integer overflow, quantity too large') if quantity < 0: raise ValidateError('quantity negative') if util.get_balance(db, source, asset) < quantity: raise BalanceError('balance insufficient') def compose (db, source, asset, quantity, tag): # resolve subassets asset = util.resolve_subasset_longname(db, asset) validate(db, source, None, asset, quantity) data = pack(db, asset, quantity, tag) return (source, [], data) def parse (db, tx, message): status = 'valid' asset, quantity, tag = None, None, None try: asset, quantity, tag = unpack(db, message) validate(db, tx['source'], tx['destination'], asset, quantity) util.debit(db, tx['source'], asset, quantity, 'destroy', tx['tx_hash']) except UnpackError as e: status = 'invalid: ' + ''.join(e.args) except (ValidateError, BalanceError) as e: status = 'invalid: ' + ''.join(e.args) bindings = { 'tx_index': tx['tx_index'], 'tx_hash': tx['tx_hash'], 'block_index': tx['block_index'], 'source': tx['source'], 'asset': asset, 'quantity': quantity, 'tag': tag, 'status': status, } if "integer overflow" not in status: sql = 'insert into destructions values(:tx_index, :tx_hash, :block_index, :source, :asset, :quantity, :tag, :status)' cursor = db.cursor() cursor.execute(sql, bindings) else: logger.warn("Not storing [destroy] tx [%s]: %s" % (tx['tx_hash'], status)) logger.debug("Bindings: %s" % (json.dumps(bindings), )) # vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
mit
-1,950,463,795,646,828,500
29.007042
125
0.596574
false
4.073614
false
false
false
DeltaEpsilon-HackFMI2/FMICalendar-REST
venv/lib/python2.7/site-packages/rest_framework/views.py
1
15361
""" Provides an APIView class that is the base of all views in REST framework. """ from __future__ import unicode_literals from django.core.exceptions import PermissionDenied from django.http import Http404 from django.utils.datastructures import SortedDict from django.views.decorators.csrf import csrf_exempt from rest_framework import status, exceptions from rest_framework.compat import smart_text, HttpResponseBase, View from rest_framework.request import Request from rest_framework.response import Response from rest_framework.settings import api_settings from rest_framework.utils import formatting def get_view_name(view_cls, suffix=None): """ Given a view class, return a textual name to represent the view. This name is used in the browsable API, and in OPTIONS responses. This function is the default for the `VIEW_NAME_FUNCTION` setting. """ name = view_cls.__name__ name = formatting.remove_trailing_string(name, 'View') name = formatting.remove_trailing_string(name, 'ViewSet') name = formatting.camelcase_to_spaces(name) if suffix: name += ' ' + suffix return name def get_view_description(view_cls, html=False): """ Given a view class, return a textual description to represent the view. This name is used in the browsable API, and in OPTIONS responses. This function is the default for the `VIEW_DESCRIPTION_FUNCTION` setting. """ description = view_cls.__doc__ or '' description = formatting.dedent(smart_text(description)) if html: return formatting.markup_description(description) return description def exception_handler(exc): """ Returns the response that should be used for any given exception. By default we handle the REST framework `APIException`, and also Django's builtin `Http404` and `PermissionDenied` exceptions. Any unhandled exceptions may return `None`, which will cause a 500 error to be raised. """ if isinstance(exc, exceptions.APIException): headers = {} if getattr(exc, 'auth_header', None): headers['WWW-Authenticate'] = exc.auth_header if getattr(exc, 'wait', None): headers['X-Throttle-Wait-Seconds'] = '%d' % exc.wait return Response({'detail': exc.detail}, status=exc.status_code, headers=headers) elif isinstance(exc, Http404): return Response({'detail': 'Not found'}, status=status.HTTP_404_NOT_FOUND) elif isinstance(exc, PermissionDenied): return Response({'detail': 'Permission denied'}, status=status.HTTP_403_FORBIDDEN) # Note: Unhandled exceptions will raise a 500 error. return None class APIView(View): # The following policies may be set at either globally, or per-view. renderer_classes = api_settings.DEFAULT_RENDERER_CLASSES parser_classes = api_settings.DEFAULT_PARSER_CLASSES authentication_classes = api_settings.DEFAULT_AUTHENTICATION_CLASSES throttle_classes = api_settings.DEFAULT_THROTTLE_CLASSES permission_classes = api_settings.DEFAULT_PERMISSION_CLASSES content_negotiation_class = api_settings.DEFAULT_CONTENT_NEGOTIATION_CLASS # Allow dependancy injection of other settings to make testing easier. settings = api_settings @classmethod def as_view(cls, **initkwargs): """ Store the original class on the view function. This allows us to discover information about the view when we do URL reverse lookups. Used for breadcrumb generation. """ view = super(APIView, cls).as_view(**initkwargs) view.cls = cls return view @property def allowed_methods(self): """ Wrap Django's private `_allowed_methods` interface in a public property. """ return self._allowed_methods() @property def default_response_headers(self): # TODO: deprecate? # TODO: Only vary by accept if multiple renderers return { 'Allow': ', '.join(self.allowed_methods), 'Vary': 'Accept' } def http_method_not_allowed(self, request, *args, **kwargs): """ If `request.method` does not correspond to a handler method, determine what kind of exception to raise. """ raise exceptions.MethodNotAllowed(request.method) def permission_denied(self, request): """ If request is not permitted, determine what kind of exception to raise. """ if not self.request.successful_authenticator: raise exceptions.NotAuthenticated() raise exceptions.PermissionDenied() def throttled(self, request, wait): """ If request is throttled, determine what kind of exception to raise. """ raise exceptions.Throttled(wait) def get_authenticate_header(self, request): """ If a request is unauthenticated, determine the WWW-Authenticate header to use for 401 responses, if any. """ authenticators = self.get_authenticators() if authenticators: return authenticators[0].authenticate_header(request) def get_parser_context(self, http_request): """ Returns a dict that is passed through to Parser.parse(), as the `parser_context` keyword argument. """ # Note: Additionally `request` will also be added to the context # by the Request object. return { 'view': self, 'args': getattr(self, 'args', ()), 'kwargs': getattr(self, 'kwargs', {}) } def get_renderer_context(self): """ Returns a dict that is passed through to Renderer.render(), as the `renderer_context` keyword argument. """ # Note: Additionally 'response' will also be added to the context, # by the Response object. return { 'view': self, 'args': getattr(self, 'args', ()), 'kwargs': getattr(self, 'kwargs', {}), 'request': getattr(self, 'request', None) } def get_view_name(self): """ Return the view name, as used in OPTIONS responses and in the browsable API. """ func = self.settings.VIEW_NAME_FUNCTION return func(self.__class__, getattr(self, 'suffix', None)) def get_view_description(self, html=False): """ Return some descriptive text for the view, as used in OPTIONS responses and in the browsable API. """ func = self.settings.VIEW_DESCRIPTION_FUNCTION return func(self.__class__, html) # API policy instantiation methods def get_format_suffix(self, **kwargs): """ Determine if the request includes a '.json' style format suffix """ if self.settings.FORMAT_SUFFIX_KWARG: return kwargs.get(self.settings.FORMAT_SUFFIX_KWARG) def get_renderers(self): """ Instantiates and returns the list of renderers that this view can use. """ return [renderer() for renderer in self.renderer_classes] def get_parsers(self): """ Instantiates and returns the list of parsers that this view can use. """ return [parser() for parser in self.parser_classes] def get_authenticators(self): """ Instantiates and returns the list of authenticators that this view can use. """ return [auth() for auth in self.authentication_classes] def get_permissions(self): """ Instantiates and returns the list of permissions that this view requires. """ return [permission() for permission in self.permission_classes] def get_throttles(self): """ Instantiates and returns the list of throttles that this view uses. """ return [throttle() for throttle in self.throttle_classes] def get_content_negotiator(self): """ Instantiate and return the content negotiation class to use. """ if not getattr(self, '_negotiator', None): self._negotiator = self.content_negotiation_class() return self._negotiator # API policy implementation methods def perform_content_negotiation(self, request, force=False): """ Determine which renderer and media type to use render the response. """ renderers = self.get_renderers() conneg = self.get_content_negotiator() try: return conneg.select_renderer(request, renderers, self.format_kwarg) except Exception: if force: return (renderers[0], renderers[0].media_type) raise def perform_authentication(self, request): """ Perform authentication on the incoming request. Note that if you override this and simply 'pass', then authentication will instead be performed lazily, the first time either `request.user` or `request.auth` is accessed. """ request.user def check_permissions(self, request): """ Check if the request should be permitted. Raises an appropriate exception if the request is not permitted. """ for permission in self.get_permissions(): if not permission.has_permission(request, self): self.permission_denied(request) def check_object_permissions(self, request, obj): """ Check if the request should be permitted for a given object. Raises an appropriate exception if the request is not permitted. """ for permission in self.get_permissions(): if not permission.has_object_permission(request, self, obj): self.permission_denied(request) def check_throttles(self, request): """ Check if request should be throttled. Raises an appropriate exception if the request is throttled. """ for throttle in self.get_throttles(): if not throttle.allow_request(request, self): self.throttled(request, throttle.wait()) # Dispatch methods def initialize_request(self, request, *args, **kargs): """ Returns the initial request object. """ parser_context = self.get_parser_context(request) return Request(request, parsers=self.get_parsers(), authenticators=self.get_authenticators(), negotiator=self.get_content_negotiator(), parser_context=parser_context) def initial(self, request, *args, **kwargs): """ Runs anything that needs to occur prior to calling the method handler. """ self.format_kwarg = self.get_format_suffix(**kwargs) # Ensure that the incoming request is permitted self.perform_authentication(request) self.check_permissions(request) self.check_throttles(request) # Perform content negotiation and store the accepted info on the request neg = self.perform_content_negotiation(request) request.accepted_renderer, request.accepted_media_type = neg def finalize_response(self, request, response, *args, **kwargs): """ Returns the final response object. """ # Make the error obvious if a proper response is not returned assert isinstance(response, HttpResponseBase), ( 'Expected a `Response`, `HttpResponse` or `HttpStreamingResponse` ' 'to be returned from the view, but received a `%s`' % type(response) ) if isinstance(response, Response): if not getattr(request, 'accepted_renderer', None): neg = self.perform_content_negotiation(request, force=True) request.accepted_renderer, request.accepted_media_type = neg response.accepted_renderer = request.accepted_renderer response.accepted_media_type = request.accepted_media_type response.renderer_context = self.get_renderer_context() for key, value in self.headers.items(): response[key] = value return response def handle_exception(self, exc): """ Handle any exception that occurs, by returning an appropriate response, or re-raising the error. """ if isinstance(exc, (exceptions.NotAuthenticated, exceptions.AuthenticationFailed)): # WWW-Authenticate header for 401 responses, else coerce to 403 auth_header = self.get_authenticate_header(self.request) if auth_header: exc.auth_header = auth_header else: exc.status_code = status.HTTP_403_FORBIDDEN response = self.settings.EXCEPTION_HANDLER(exc) if response is None: raise response.exception = True return response # Note: session based authentication is explicitly CSRF validated, # all other authentication is CSRF exempt. @csrf_exempt def dispatch(self, request, *args, **kwargs): """ `.dispatch()` is pretty much the same as Django's regular dispatch, but with extra hooks for startup, finalize, and exception handling. """ self.args = args self.kwargs = kwargs request = self.initialize_request(request, *args, **kwargs) self.request = request self.headers = self.default_response_headers # deprecate? try: self.initial(request, *args, **kwargs) # Get the appropriate handler method if request.method.lower() in self.http_method_names: handler = getattr(self, request.method.lower(), self.http_method_not_allowed) else: handler = self.http_method_not_allowed response = handler(request, *args, **kwargs) except Exception as exc: response = self.handle_exception(exc) self.response = self.finalize_response(request, response, *args, **kwargs) return self.response def options(self, request, *args, **kwargs): """ Handler method for HTTP 'OPTIONS' request. We may as well implement this as Django will otherwise provide a less useful default implementation. """ return Response(self.metadata(request), status=status.HTTP_200_OK) def metadata(self, request): """ Return a dictionary of metadata about the view. Used to return responses for OPTIONS requests. """ # By default we can't provide any form-like information, however the # generic views override this implementation and add additional # information for POST and PUT methods, based on the serializer. ret = SortedDict() ret['name'] = self.get_view_name() ret['description'] = self.get_view_description() ret['renders'] = [renderer.media_type for renderer in self.renderer_classes] ret['parses'] = [parser.media_type for parser in self.parser_classes] return ret
mit
-931,368,044,246,631,700
35.143529
84
0.627238
false
4.61985
false
false
false
monikagrabowska/osf.io
osf/models/base.py
1
26122
import logging import random from datetime import datetime import bson import modularodm.exceptions import pytz from django.contrib.contenttypes.fields import (GenericForeignKey, GenericRelation) from django.contrib.contenttypes.models import ContentType from django.contrib.postgres.fields import ArrayField from django.core.exceptions import MultipleObjectsReturned from django.core.exceptions import ValidationError as DjangoValidationError from django.db import models from django.db.models import F from django.db.models import ForeignKey from django.db.models import Q from django.db.models.signals import post_save from django.dispatch import receiver from django.utils import timezone from osf.utils.caching import cached_property from osf.exceptions import ValidationError from osf.modm_compat import to_django_query from osf.utils.datetime_aware_jsonfield import (DateTimeAwareJSONField, coerce_nonnaive_datetimes) from osf.utils.fields import LowercaseCharField, NonNaiveDateTimeField ALPHABET = '23456789abcdefghjkmnpqrstuvwxyz' logger = logging.getLogger(__name__) def generate_guid(length=5): while True: guid_id = ''.join(random.sample(ALPHABET, length)) try: # is the guid in the blacklist BlackListGuid.objects.get(guid=guid_id) except BlackListGuid.DoesNotExist: # it's not, check and see if it's already in the database try: Guid.objects.get(_id=guid_id) except Guid.DoesNotExist: # valid and unique guid return guid_id def generate_object_id(): return str(bson.ObjectId()) class MODMCompatibilityQuerySet(models.QuerySet): def __getitem__(self, k): item = super(MODMCompatibilityQuerySet, self).__getitem__(k) if hasattr(item, 'wrapped'): return item.wrapped() else: return item def __iter__(self): items = super(MODMCompatibilityQuerySet, self).__iter__() for item in items: if hasattr(item, 'wrapped'): yield item.wrapped() else: yield item def sort(self, *fields): # Fields are passed in as e.g. [('title', 1), ('date_created', -1)] if isinstance(fields[0], list): fields = fields[0] def sort_key(item): if isinstance(item, basestring): return item elif isinstance(item, tuple): field_name, direction = item prefix = '-' if direction == -1 else '' return ''.join([prefix, field_name]) sort_keys = [sort_key(each) for each in fields] return self.order_by(*sort_keys) def limit(self, n): return self[:n] class BaseModel(models.Model): """Base model that acts makes subclasses mostly compatible with the modular-odm ``StoredObject`` interface. """ migration_page_size = 50000 objects = MODMCompatibilityQuerySet.as_manager() class Meta: abstract = True @classmethod def load(cls, data): try: if issubclass(cls, GuidMixin): return cls.objects.get(guids___id=data) elif issubclass(cls, ObjectIDMixin): return cls.objects.get(_id=data) elif isinstance(data, basestring): # Some models (CitationStyle) have an _id that is not a bson # Looking up things by pk will never work with a basestring return cls.objects.get(_id=data) return cls.objects.get(pk=data) except cls.DoesNotExist: return None @classmethod def find_one(cls, query): try: return cls.objects.get(to_django_query(query, model_cls=cls)) except cls.DoesNotExist: raise modularodm.exceptions.NoResultsFound() except cls.MultipleObjectsReturned as e: raise modularodm.exceptions.MultipleResultsFound(*e.args) @classmethod def find(cls, query=None): if not query: return cls.objects.all() else: return cls.objects.filter(to_django_query(query, model_cls=cls)) @classmethod def remove(cls, query=None): return cls.find(query).delete() @classmethod def remove_one(cls, obj): if obj.pk: return obj.delete() @classmethod def migrate_from_modm(cls, modm_obj): """ Given a modm object, make a django object with the same local fields. This is a base method that may work for simple objects. It should be customized in the child class if it doesn't work. :param modm_obj: :return: """ django_obj = cls() local_django_fields = set([x.name for x in django_obj._meta.get_fields() if not x.is_relation]) intersecting_fields = set(modm_obj.to_storage().keys()).intersection( set(local_django_fields)) for field in intersecting_fields: modm_value = getattr(modm_obj, field) if modm_value is None: continue if isinstance(modm_value, datetime): modm_value = pytz.utc.localize(modm_value) # TODO Remove this after migration if isinstance(django_obj._meta.get_field(field), DateTimeAwareJSONField): modm_value = coerce_nonnaive_datetimes(modm_value) setattr(django_obj, field, modm_value) return django_obj @property def _primary_name(self): return '_id' def reload(self): return self.refresh_from_db() def _natural_key(self): return self.pk def clone(self): """Create a new, unsaved copy of this object.""" copy = self.__class__.objects.get(pk=self.pk) copy.id = None # empty all the fks fk_field_names = [f.name for f in self._meta.model._meta.get_fields() if isinstance(f, (ForeignKey, GenericForeignKey))] for field_name in fk_field_names: setattr(copy, field_name, None) try: copy._id = bson.ObjectId() except AttributeError: pass return copy def save(self, *args, **kwargs): # Make Django validate on save (like modm) if not kwargs.get('force_insert') and not kwargs.get('force_update'): try: self.full_clean() except DjangoValidationError as err: raise ValidationError(*err.args) return super(BaseModel, self).save(*args, **kwargs) # TODO: Rename to Identifier? class Guid(BaseModel): """Stores either a short guid or long object_id for any model that inherits from BaseIDMixin. Each ID field (e.g. 'guid', 'object_id') MUST have an accompanying method, named with 'initialize_<ID type>' (e.g. 'initialize_guid') that generates and sets the field. """ primary_identifier_name = '_id' # TODO DELETE ME POST MIGRATION modm_query = None migration_page_size = 500000 # /TODO DELETE ME POST MIGRATION id = models.AutoField(primary_key=True) _id = LowercaseCharField(max_length=255, null=False, blank=False, default=generate_guid, db_index=True, unique=True) referent = GenericForeignKey() content_type = models.ForeignKey(ContentType, null=True, blank=True) object_id = models.PositiveIntegerField(null=True, blank=True) created = NonNaiveDateTimeField(db_index=True, default=timezone.now) # auto_now_add=True) # Override load in order to load by GUID @classmethod def load(cls, data): try: return cls.objects.get(_id=data) except cls.DoesNotExist: return None def reload(self): del self._referent_cache return super(Guid, self).reload() @classmethod def migrate_from_modm(cls, modm_obj, object_id=None, content_type=None): """ Given a modm Guid make a django Guid :param object_id: :param content_type: :param modm_obj: :return: """ django_obj = cls() if modm_obj._id != modm_obj.referent._id: # if the object has a BSON id, get the created date from that django_obj.created = bson.ObjectId(modm_obj.referent._id).generation_time else: # just make it now django_obj.created = timezone.now() django_obj._id = modm_obj._id if object_id and content_type: # if the referent was passed set the GFK to point to it django_obj.content_type = content_type django_obj.object_id = object_id return django_obj class Meta: ordering = ['-created'] get_latest_by = 'created' index_together = ( ('content_type', 'object_id', 'created'), ) class BlackListGuid(BaseModel): # TODO DELETE ME POST MIGRATION modm_model_path = 'framework.guid.model.BlacklistGuid' primary_identifier_name = 'guid' modm_query = None migration_page_size = 500000 # /TODO DELETE ME POST MIGRATION id = models.AutoField(primary_key=True) guid = LowercaseCharField(max_length=255, unique=True, db_index=True) @property def _id(self): return self.guid @classmethod def migrate_from_modm(cls, modm_obj): """ Given a modm BlacklistGuid make a django BlackListGuid :param modm_obj: :return: """ django_obj = cls() django_obj.guid = modm_obj._id return django_obj def generate_guid_instance(): return Guid.objects.create().id class PKIDStr(str): def __new__(self, _id, pk): return str.__new__(self, _id) def __init__(self, _id, pk): self.__pk = pk def __int__(self): return self.__pk class BaseIDMixin(models.Model): @classmethod def migrate_from_modm(cls, modm_obj): """ Given a modm object, make a django object with the same local fields. This is a base method that may work for simple objects. It should be customized in the child class if it doesn't work. :param modm_obj: :return: """ django_obj = cls() local_django_fields = set([x.name for x in django_obj._meta.get_fields() if not x.is_relation]) intersecting_fields = set(modm_obj.to_storage().keys()).intersection( set(local_django_fields)) for field in intersecting_fields: modm_value = getattr(modm_obj, field) if modm_value is None: continue if isinstance(modm_value, datetime): modm_value = pytz.utc.localize(modm_value) # TODO Remove this after migration if isinstance(django_obj._meta.get_field(field), DateTimeAwareJSONField): modm_value = coerce_nonnaive_datetimes(modm_value) setattr(django_obj, field, modm_value) return django_obj class Meta: abstract = True class ObjectIDMixin(BaseIDMixin): primary_identifier_name = '_id' _id = models.CharField(max_length=24, default=generate_object_id, unique=True, db_index=True) @classmethod def load(cls, q): try: return cls.objects.get(_id=q) except cls.DoesNotExist: # modm doesn't throw exceptions when loading things that don't exist return None @classmethod def migrate_from_modm(cls, modm_obj): django_obj = super(ObjectIDMixin, cls).migrate_from_modm(modm_obj) django_obj._id = str(modm_obj._id) return django_obj class Meta: abstract = True def _natural_key(self): return self._id class InvalidGuid(Exception): pass class OptionalGuidMixin(BaseIDMixin): """ This makes it so that things can **optionally** have guids. Think files. Things that inherit from this must also inherit from ObjectIDMixin ... probably """ __guid_min_length__ = 5 guids = GenericRelation(Guid, related_name='referent', related_query_name='referents') guid_string = ArrayField(models.CharField(max_length=255, null=True, blank=True), null=True, blank=True) content_type_pk = models.PositiveIntegerField(null=True, blank=True) def get_guid(self, create=False): if create: try: guid, created = Guid.objects.get_or_create( object_id=self.pk, content_type_id=ContentType.objects.get_for_model(self).pk ) except MultipleObjectsReturned: # lol, hacks pass else: return guid return self.guids.order_by('-created').first() @classmethod def migrate_from_modm(cls, modm_obj): instance = super(OptionalGuidMixin, cls).migrate_from_modm(modm_obj) from website.models import Guid as MODMGuid from modularodm import Q as MODMQ if modm_obj.get_guid(): guids = MODMGuid.find(MODMQ('referent', 'eq', modm_obj._id)) setattr(instance, 'guid_string', [x.lower() for x in guids.get_keys()]) setattr(instance, 'content_type_pk', ContentType.objects.get_for_model(cls).pk) return instance class Meta: abstract = True class GuidMixinQuerySet(MODMCompatibilityQuerySet): tables = ['osf_guid', 'django_content_type'] GUID_FIELDS = [ 'guids__id', 'guids___id', 'guids__content_type_id', 'guids__object_id', 'guids__created' ] def safe_table_alias(self, table_name, create=False): """ Returns a table alias for the given table_name and whether this is a new alias or not. If 'create' is true, a new alias is always created. Otherwise, the most recently created alias for the table (if one exists) is reused. """ alias_list = self.query.table_map.get(table_name) if not create and alias_list: alias = alias_list[0] if alias in self.query.alias_refcount: self.query.alias_refcount[alias] += 1 else: self.query.alias_refcount[alias] = 1 return alias, False # Create a new alias for this table. if alias_list: alias = '%s%d' % (self.query.alias_prefix, len(self.query.alias_map) + 1) alias_list.append(alias) else: # The first occurrence of a table uses the table name directly. alias = table_name self.query.table_map[alias] = [alias] self.query.alias_refcount[alias] = 1 self.tables.append(alias) return alias, True def annotate_query_with_guids(self): self._prefetch_related_lookups = ['guids'] for field in self.GUID_FIELDS: self.query.add_annotation( F(field), '_{}'.format(field), is_summary=False ) for table in self.tables: if table not in self.query.tables: self.safe_table_alias(table) def remove_guid_annotations(self): for k, v in self.query.annotations.iteritems(): if k[1:] in self.GUID_FIELDS: del self.query.annotations[k] for table_name in ['osf_guid', 'django_content_type']: if table_name in self.query.alias_map: del self.query.alias_map[table_name] if table_name in self.query.alias_refcount: del self.query.alias_refcount[table_name] if table_name in self.query.tables: del self.query.tables[self.query.tables.index(table_name)] def _clone(self, annotate=False, **kwargs): query = self.query.clone() if self._sticky_filter: query.filter_is_sticky = True if annotate: self.annotate_query_with_guids() clone = self.__class__(model=self.model, query=query, using=self._db, hints=self._hints) # this method was copied from the default django queryset except for the below two lines if annotate: clone.annotate_query_with_guids() clone._for_write = self._for_write clone._prefetch_related_lookups = self._prefetch_related_lookups[:] clone._known_related_objects = self._known_related_objects clone._iterable_class = self._iterable_class clone._fields = self._fields clone.__dict__.update(kwargs) return clone def annotate(self, *args, **kwargs): self.annotate_query_with_guids() return super(GuidMixinQuerySet, self).annotate(*args, **kwargs) def _filter_or_exclude(self, negate, *args, **kwargs): if args or kwargs: assert self.query.can_filter(), \ 'Cannot filter a query once a slice has been taken.' clone = self._clone(annotate=True) if negate: clone.query.add_q(~Q(*args, **kwargs)) else: clone.query.add_q(Q(*args, **kwargs)) return clone def all(self): return self._clone(annotate=True) # does implicit filter def get(self, *args, **kwargs): # add this to make sure we don't get dupes self.query.add_distinct_fields('id') return super(GuidMixinQuerySet, self).get(*args, **kwargs) # TODO: Below lines are commented out to ensure that # the annotations are used after running .count() # e.g. # queryset.count() # queryset[0] # This is more efficient when doing chained operations # on a queryset, but less efficient when only getting a count. # Figure out a way to get the best of both worlds # def count(self): # self.remove_guid_annotations() # return super(GuidMixinQuerySet, self).count() def update(self, **kwargs): self.remove_guid_annotations() return super(GuidMixinQuerySet, self).update(**kwargs) def update_or_create(self, defaults=None, **kwargs): self.remove_guid_annotations() return super(GuidMixinQuerySet, self).update_or_create(defaults=defaults, **kwargs) def values(self, *fields): self.remove_guid_annotations() return super(GuidMixinQuerySet, self).values(*fields) def create(self, **kwargs): self.remove_guid_annotations() return super(GuidMixinQuerySet, self).create(**kwargs) def bulk_create(self, objs, batch_size=None): self.remove_guid_annotations() return super(GuidMixinQuerySet, self).bulk_create(objs, batch_size) def get_or_create(self, defaults=None, **kwargs): self.remove_guid_annotations() return super(GuidMixinQuerySet, self).get_or_create(defaults, **kwargs) def values_list(self, *fields, **kwargs): self.remove_guid_annotations() return super(GuidMixinQuerySet, self).values_list(*fields, **kwargs) def exists(self): self.remove_guid_annotations() return super(GuidMixinQuerySet, self).exists() def _fetch_all(self): if self._result_cache is None: self._result_cache = list(self._iterable_class(self)) if self._prefetch_related_lookups and not self._prefetch_done: if 'guids' in self._prefetch_related_lookups and self._result_cache and hasattr(self._result_cache[0], '_guids__id'): # if guids is requested for prefetch and there are things in the result cache and the first one has # the annotated guid fields then remove guids from prefetch_related_lookups del self._prefetch_related_lookups[self._prefetch_related_lookups.index('guids')] results = [] for result in self._result_cache: # loop through the result cache guid_dict = {} for field in self.GUID_FIELDS: # pull the fields off of the result object and put them in a dictionary without prefixed names guid_dict[field] = getattr(result, '_{}'.format(field), None) if None in guid_dict.values(): # if we get an invalid result field value, stop logger.warning( 'Annotated guids came back will None values for {}, resorting to extra query'.format(result)) return if not hasattr(result, '_prefetched_objects_cache'): # initialize _prefetched_objects_cache result._prefetched_objects_cache = {} if 'guids' not in result._prefetched_objects_cache: # intialize guids in _prefetched_objects_cache result._prefetched_objects_cache['guids'] = [] # build a result dictionary of even more proper fields result_dict = {key.replace('guids__', ''): value for key, value in guid_dict.iteritems()} # make an unsaved guid instance guid = Guid(**result_dict) result._prefetched_objects_cache['guids'].append(guid) results.append(result) # replace the result cache with the new set of results self._result_cache = results self._prefetch_related_objects() class GuidMixin(BaseIDMixin): __guid_min_length__ = 5 primary_identifier_name = 'guid_string' guids = GenericRelation(Guid, related_name='referent', related_query_name='referents') guid_string = ArrayField(models.CharField(max_length=255, null=True, blank=True), null=True, blank=True) content_type_pk = models.PositiveIntegerField(null=True, blank=True) objects = GuidMixinQuerySet.as_manager() # TODO: use pre-delete signal to disable delete cascade def _natural_key(self): return self.guid_string @cached_property def _id(self): try: guid = self.guids.all()[0] except IndexError: return None if guid: return guid._id return None @_id.setter def _id(self, value): # TODO do we really want to allow this? guid, created = Guid.objects.get_or_create(_id=value) if created: guid.object_id = self.pk guid.content_type = ContentType.objects.get_for_model(self) guid.save() elif guid.content_type == ContentType.objects.get_for_model(self) and guid.object_id == self.pk: # TODO should this up the created for the guid until now so that it appears as the first guid # for this object? return else: raise InvalidGuid('Cannot indirectly repoint an existing guid, please use the Guid model') _primary_key = _id @classmethod def load(cls, q): try: content_type = ContentType.objects.get_for_model(cls) # if referent doesn't exist it will return None return Guid.objects.get(_id=q, content_type=content_type).referent except Guid.DoesNotExist: # modm doesn't throw exceptions when loading things that don't exist return None @property def deep_url(self): return None @classmethod def migrate_from_modm(cls, modm_obj): """ Given a modm object, make a django object with the same local fields. This is a base method that may work for simple objects. It should be customized in the child class if it doesn't work. :param modm_obj: :return: """ django_obj = cls() local_django_fields = set( [x.name for x in django_obj._meta.get_fields() if not x.is_relation and x.name != '_id']) intersecting_fields = set(modm_obj.to_storage().keys()).intersection( set(local_django_fields)) for field in intersecting_fields: modm_value = getattr(modm_obj, field) if modm_value is None: continue if isinstance(modm_value, datetime): modm_value = pytz.utc.localize(modm_value) # TODO Remove this after migration if isinstance(django_obj._meta.get_field(field), DateTimeAwareJSONField): modm_value = coerce_nonnaive_datetimes(modm_value) setattr(django_obj, field, modm_value) from website.models import Guid as MODMGuid from modularodm import Q as MODMQ guids = MODMGuid.find(MODMQ('referent', 'eq', modm_obj._id)) setattr(django_obj, 'guid_string', list(set([x.lower() for x in guids.get_keys()]))) setattr(django_obj, 'content_type_pk', ContentType.objects.get_for_model(cls).pk) return django_obj class Meta: abstract = True @receiver(post_save) def ensure_guid(sender, instance, created, **kwargs): if not issubclass(sender, GuidMixin): return False existing_guids = Guid.objects.filter(object_id=instance.pk, content_type=ContentType.objects.get_for_model(instance)) has_cached_guids = hasattr(instance, '_prefetched_objects_cache') and 'guids' in instance._prefetched_objects_cache if not existing_guids.exists(): # Clear query cache of instance.guids if has_cached_guids: del instance._prefetched_objects_cache['guids'] Guid.objects.create(object_id=instance.pk, content_type=ContentType.objects.get_for_model(instance), _id=generate_guid(instance.__guid_min_length__)) elif not existing_guids.exists() and instance.guid_string is not None: # Clear query cache of instance.guids if has_cached_guids: del instance._prefetched_objects_cache['guids'] Guid.objects.create(object_id=instance.pk, content_type_id=instance.content_type_pk, _id=instance.guid_string)
apache-2.0
2,214,120,148,595,470,600
34.588556
129
0.607917
false
4.037403
false
false
false
rohitwaghchaure/frappe
frappe/model/meta.py
1
15604
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors # MIT License. See license.txt # metadata ''' Load metadata (DocType) class Example: meta = frappe.get_meta('User') if meta.has_field('first_name'): print "DocType" table has field "first_name" ''' from __future__ import unicode_literals import frappe, json, os from frappe.utils import cstr, cint from frappe.model import default_fields, no_value_fields, optional_fields from frappe.model.document import Document from frappe.model.base_document import BaseDocument from frappe.model.db_schema import type_map from frappe.modules import load_doctype_module from frappe import _ def get_meta(doctype, cached=True): if cached: return frappe.cache().hget("meta", doctype, lambda: Meta(doctype)) else: return Meta(doctype) def get_table_columns(doctype): return frappe.cache().hget("table_columns", doctype, lambda: frappe.db.get_table_columns(doctype)) def load_doctype_from_file(doctype): fname = frappe.scrub(doctype) with open(frappe.get_app_path("frappe", "core", "doctype", fname, fname + ".json"), "r") as f: txt = json.loads(f.read()) for d in txt.get("fields", []): d["doctype"] = "DocField" for d in txt.get("permissions", []): d["doctype"] = "DocPerm" txt["fields"] = [BaseDocument(d) for d in txt["fields"]] if "permissions" in txt: txt["permissions"] = [BaseDocument(d) for d in txt["permissions"]] return txt class Meta(Document): _metaclass = True default_fields = list(default_fields)[1:] special_doctypes = ("DocField", "DocPerm", "Role", "DocType", "Module Def") def __init__(self, doctype): self._fields = {} if isinstance(doctype, Document): super(Meta, self).__init__(doctype.as_dict()) else: super(Meta, self).__init__("DocType", doctype) self.process() def load_from_db(self): try: super(Meta, self).load_from_db() except frappe.DoesNotExistError: if self.doctype=="DocType" and self.name in self.special_doctypes: self.__dict__.update(load_doctype_from_file(self.name)) else: raise def get_link_fields(self): return self.get("fields", {"fieldtype": "Link", "options":["!=", "[Select]"]}) def get_dynamic_link_fields(self): if not hasattr(self, '_dynamic_link_fields'): self._dynamic_link_fields = self.get("fields", {"fieldtype": "Dynamic Link"}) return self._dynamic_link_fields def get_select_fields(self): return self.get("fields", {"fieldtype": "Select", "options":["not in", ["[Select]", "Loading..."]]}) def get_table_fields(self): if not hasattr(self, "_table_fields"): if self.name!="DocType": self._table_fields = self.get('fields', {"fieldtype":"Table"}) else: self._table_fields = doctype_table_fields return self._table_fields def get_global_search_fields(self): '''Returns list of fields with `in_global_search` set and `name` if set''' fields = self.get("fields", {"in_global_search": 1 }) if getattr(self, 'show_name_in_global_search', None): fields.append(frappe._dict(fieldtype='Data', fieldname='name', label='Name')) return fields def get_valid_columns(self): if not hasattr(self, "_valid_columns"): if self.name in ("DocType", "DocField", "DocPerm", "Property Setter"): self._valid_columns = get_table_columns(self.name) else: self._valid_columns = self.default_fields + \ [df.fieldname for df in self.get("fields") if df.fieldtype in type_map] return self._valid_columns def get_table_field_doctype(self, fieldname): return { "fields": "DocField", "permissions": "DocPerm"}.get(fieldname) def get_field(self, fieldname): '''Return docfield from meta''' if not self._fields: for f in self.get("fields"): self._fields[f.fieldname] = f return self._fields.get(fieldname) def has_field(self, fieldname): '''Returns True if fieldname exists''' return True if self.get_field(fieldname) else False def get_label(self, fieldname): '''Get label of the given fieldname''' df = self.get_field(fieldname) if df: label = df.label else: label = { 'name': _('ID'), 'owner': _('Created By'), 'modified_by': _('Modified By'), 'creation': _('Created On'), 'modified': _('Last Modified On') }.get(fieldname) or _('No Label') return label def get_options(self, fieldname): return self.get_field(fieldname).options def get_link_doctype(self, fieldname): df = self.get_field(fieldname) if df.fieldtype == "Link": return df.options elif df.fieldtype == "Dynamic Link": return self.get_options(df.options) else: return None def get_search_fields(self): search_fields = self.search_fields or "name" search_fields = [d.strip() for d in search_fields.split(",")] if "name" not in search_fields: search_fields.append("name") return search_fields def get_fields_to_fetch(self, link_fieldname=None): '''Returns a list of docfield objects for fields whose values are to be fetched and updated for a particular link field These fields are of type Data, Link, Text, Readonly and their options property is set as `link_fieldname`.`source_fieldname`''' out = [] if not link_fieldname: link_fields = [df.fieldname for df in self.get_link_fields()] for df in self.fields: if df.fieldtype in ('Data', 'Read Only', 'Text', 'Small Text', 'Text Editor', 'Code') and df.options: if link_fieldname: if df.options.startswith(link_fieldname + '.'): out.append(df) else: if '.' in df.options: fieldname = df.options.split('.', 1)[0] if fieldname in link_fields: out.append(df) return out def get_list_fields(self): list_fields = ["name"] + [d.fieldname \ for d in self.fields if (d.in_list_view and d.fieldtype in type_map)] if self.title_field and self.title_field not in list_fields: list_fields.append(self.title_field) return list_fields def get_custom_fields(self): return [d for d in self.fields if d.get('is_custom_field')] def get_title_field(self): '''Return the title field of this doctype, explict via `title_field`, or `title` or `name`''' title_field = getattr(self, 'title_field', None) if not title_field and self.has_field('title'): title_field = 'title' else: title_field = 'name' return title_field def process(self): # don't process for special doctypes # prevent's circular dependency if self.name in self.special_doctypes: return self.add_custom_fields() self.apply_property_setters() self.sort_fields() self.get_valid_columns() self.set_custom_permissions() def add_custom_fields(self): try: self.extend("fields", frappe.db.sql("""SELECT * FROM `tabCustom Field` WHERE dt = %s AND docstatus < 2""", (self.name,), as_dict=1, update={"is_custom_field": 1})) except Exception, e: if e.args[0]==1146: return else: raise def apply_property_setters(self): property_setters = frappe.db.sql("""select * from `tabProperty Setter` where doc_type=%s""", (self.name,), as_dict=1) if not property_setters: return integer_docfield_properties = [d.fieldname for d in frappe.get_meta('DocField').fields if d.fieldtype in ('Int', 'Check')] for ps in property_setters: if ps.doctype_or_field=='DocType': if ps.property_type in ('Int', 'Check'): ps.value = cint(ps.value) self.set(ps.property, ps.value) else: docfield = self.get("fields", {"fieldname":ps.field_name}, limit=1) if docfield: docfield = docfield[0] else: continue if ps.property in integer_docfield_properties: ps.value = cint(ps.value) docfield.set(ps.property, ps.value) def sort_fields(self): """sort on basis of insert_after""" custom_fields = sorted(self.get_custom_fields(), key=lambda df: df.idx) if custom_fields: newlist = [] # if custom field is at top # insert_after is false for c in list(custom_fields): if not c.insert_after: newlist.append(c) custom_fields.pop(custom_fields.index(c)) # standard fields newlist += [df for df in self.get('fields') if not df.get('is_custom_field')] newlist_fieldnames = [df.fieldname for df in newlist] for i in xrange(2): for df in list(custom_fields): if df.insert_after in newlist_fieldnames: cf = custom_fields.pop(custom_fields.index(df)) idx = newlist_fieldnames.index(df.insert_after) newlist.insert(idx + 1, cf) newlist_fieldnames.insert(idx + 1, cf.fieldname) if not custom_fields: break # worst case, add remaining custom fields to last if custom_fields: newlist += custom_fields # renum idx for i, f in enumerate(newlist): f.idx = i + 1 self.fields = newlist def set_custom_permissions(self): '''Reset `permissions` with Custom DocPerm if exists''' if frappe.flags.in_patch or frappe.flags.in_import: return if not self.istable and self.name not in ('DocType', 'DocField', 'DocPerm', 'Custom DocPerm'): custom_perms = frappe.get_all('Custom DocPerm', fields='*', filters=dict(parent=self.name), update=dict(doctype='Custom DocPerm')) if custom_perms: self.permissions = [Document(d) for d in custom_perms] def get_fieldnames_with_value(self): return [df.fieldname for df in self.fields if df.fieldtype not in no_value_fields] def get_fields_to_check_permissions(self, user_permission_doctypes): fields = self.get("fields", { "fieldtype":"Link", "parent": self.name, "ignore_user_permissions":("!=", 1), "options":("in", user_permission_doctypes) }) if self.name in user_permission_doctypes: fields.append(frappe._dict({ "label":"Name", "fieldname":"name", "options": self.name })) return fields def get_high_permlevel_fields(self): """Build list of fields with high perm level and all the higher perm levels defined.""" if not hasattr(self, "high_permlevel_fields"): self.high_permlevel_fields = [] for df in self.fields: if df.permlevel > 0: self.high_permlevel_fields.append(df) return self.high_permlevel_fields def get_dashboard_data(self): '''Returns dashboard setup related to this doctype. This method will return the `data` property in the `[doctype]_dashboard.py` file in the doctype folder''' data = frappe._dict() try: module = load_doctype_module(self.name, suffix='_dashboard') if hasattr(module, 'get_data'): data = frappe._dict(module.get_data()) except ImportError: pass return data def get_row_template(self): return self.get_web_template(suffix='_row') def get_web_template(self, suffix=''): '''Returns the relative path of the row template for this doctype''' module_name = frappe.scrub(self.module) doctype = frappe.scrub(self.name) template_path = frappe.get_module_path(module_name, 'doctype', doctype, 'templates', doctype + suffix + '.html') if os.path.exists(template_path): return '{module_name}/doctype/{doctype_name}/templates/{doctype_name}{suffix}.html'.format( module_name = module_name, doctype_name = doctype, suffix=suffix) return None doctype_table_fields = [ frappe._dict({"fieldname": "fields", "options": "DocField"}), frappe._dict({"fieldname": "permissions", "options": "DocPerm"}) ] ####### def is_single(doctype): try: return frappe.db.get_value("DocType", doctype, "issingle") except IndexError: raise Exception, 'Cannot determine whether %s is single' % doctype def get_parent_dt(dt): parent_dt = frappe.db.sql("""select parent from tabDocField where fieldtype="Table" and options=%s and (parent not like "old_parent:%%") limit 1""", dt) return parent_dt and parent_dt[0][0] or '' def set_fieldname(field_id, fieldname): frappe.db.set_value('DocField', field_id, 'fieldname', fieldname) def get_field_currency(df, doc=None): """get currency based on DocField options and fieldvalue in doc""" currency = None if not df.get("options"): return None if not doc: return None if not getattr(frappe.local, "field_currency", None): frappe.local.field_currency = frappe._dict() if not (frappe.local.field_currency.get((doc.doctype, doc.name), {}).get(df.fieldname) or (doc.parent and frappe.local.field_currency.get((doc.doctype, doc.parent), {}).get(df.fieldname))): ref_docname = doc.parent or doc.name if ":" in cstr(df.get("options")): split_opts = df.get("options").split(":") if len(split_opts)==3: currency = frappe.db.get_value(split_opts[0], doc.get(split_opts[1]), split_opts[2]) else: currency = doc.get(df.get("options")) if doc.parent: if currency: ref_docname = doc.name else: currency = frappe.db.get_value(doc.parenttype, doc.parent, df.get("options")) if currency: frappe.local.field_currency.setdefault((doc.doctype, ref_docname), frappe._dict())\ .setdefault(df.fieldname, currency) return frappe.local.field_currency.get((doc.doctype, doc.name), {}).get(df.fieldname) or \ (doc.parent and frappe.local.field_currency.get((doc.doctype, doc.parent), {}).get(df.fieldname)) def get_field_precision(df, doc=None, currency=None): """get precision based on DocField options and fieldvalue in doc""" from frappe.utils import get_number_format_info if cint(df.precision): precision = cint(df.precision) elif df.fieldtype == "Currency": number_format = None if not currency and doc: currency = get_field_currency(df, doc) if not currency: # use default currency currency = frappe.db.get_default("currency") if currency: number_format = frappe.db.get_value("Currency", currency, "number_format", cache=True) if not number_format: number_format = frappe.db.get_default("number_format") or "#,###.##" decimal_str, comma_str, precision = get_number_format_info(number_format) else: precision = cint(frappe.db.get_default("float_precision")) or 3 return precision def get_default_df(fieldname): if fieldname in default_fields: if fieldname in ("creation", "modified"): return frappe._dict( fieldname = fieldname, fieldtype = "Datetime" ) else: return frappe._dict( fieldname = fieldname, fieldtype = "Data" ) def trim_tables(): """Use this to remove columns that don't exist in meta""" ignore_fields = default_fields + optional_fields for doctype in frappe.db.get_all("DocType", filters={"issingle": 0}): doctype = doctype.name columns = frappe.db.get_table_columns(doctype) fields = frappe.get_meta(doctype).get_fieldnames_with_value() columns_to_remove = [f for f in list(set(columns) - set(fields)) if f not in ignore_fields and not f.startswith("_")] if columns_to_remove: print doctype, "columns removed:", columns_to_remove columns_to_remove = ", ".join(["drop `{0}`".format(c) for c in columns_to_remove]) query = """alter table `tab{doctype}` {columns}""".format( doctype=doctype, columns=columns_to_remove) frappe.db.sql_ddl(query) def clear_cache(doctype=None): cache = frappe.cache() for key in ('is_table', 'doctype_modules'): cache.delete_value(key) groups = ["meta", "form_meta", "table_columns", "last_modified", "linked_doctypes", 'email_alerts'] def clear_single(dt): for name in groups: cache.hdel(name, dt) if doctype: clear_single(doctype) # clear all parent doctypes for dt in frappe.db.sql("""select parent from tabDocField where fieldtype="Table" and options=%s""", (doctype,)): clear_single(dt[0]) # clear all notifications from frappe.desk.notifications import delete_notification_count_for delete_notification_count_for(doctype) else: # clear all for name in groups: cache.delete_value(name)
mit
-7,037,875,001,456,010,000
28.721905
101
0.680338
false
3.147872
false
false
false
mkhutornenko/incubator-aurora
src/test/python/apache/aurora/client/cli/test_help.py
1
3763
# # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import contextlib import unittest from mock import patch from apache.aurora.client.cli import EXIT_INVALID_PARAMETER, EXIT_OK from apache.aurora.client.cli.client import AuroraCommandLine class TestHelp(unittest.TestCase): """Tests of the help command for the Aurora v2 client framework""" def setUp(self): self.cmd = AuroraCommandLine() self.transcript = [] self.err_transcript = [] def mock_print(self, str): for str in str.split('\n'): self.transcript.append(str) def mock_print_err(self, str): for str in str.split('\n'): self.err_transcript.append(str) def test_help(self): with patch('apache.aurora.client.cli.client.AuroraCommandLine.print_out', side_effect=self.mock_print): self.cmd.execute(['help']) assert len(self.transcript) > 10 assert self.transcript[1] == 'Usage:' assert '==Commands for jobs' in self.transcript assert '==Commands for quotas' in self.transcript def test_help_noun(self): with patch('apache.aurora.client.cli.client.AuroraCommandLine.print_out', side_effect=self.mock_print): self.cmd.execute(['help', 'job']) assert len(self.transcript) > 10 assert self.transcript[0] == 'Usage for noun "job":' in self.transcript assert not any('quota' in t for t in self.transcript) assert any('job status' in t for t in self.transcript) assert any('job list' in t for t in self.transcript) def test_help_verb(self): with patch('apache.aurora.client.cli.client.AuroraCommandLine.print_out', side_effect=self.mock_print): assert self.cmd.execute(['help', 'job', 'status']) == EXIT_OK assert len(self.transcript) > 5 assert self.transcript[0] == 'Usage for verb "job status":' in self.transcript assert not any('quota' in t for t in self.transcript) assert not any('list' in t for t in self.transcript) assert "Options:" in self.transcript assert any('status' for t in self.transcript) def test_help_unknown_noun(self): with contextlib.nested( patch('apache.aurora.client.cli.client.AuroraCommandLine.print_out', side_effect=self.mock_print), patch('apache.aurora.client.cli.client.AuroraCommandLine.print_err', side_effect=self.mock_print_err)): assert self.cmd.execute(['help', 'nothing']) == EXIT_INVALID_PARAMETER assert len(self.transcript) == 0 assert len(self.err_transcript) == 2 assert 'Unknown noun "nothing"' == self.err_transcript[0] assert "Valid nouns" in self.err_transcript[1] def test_help_unknown_verb(self): with contextlib.nested( patch('apache.aurora.client.cli.client.AuroraCommandLine.print_out', side_effect=self.mock_print), patch('apache.aurora.client.cli.client.AuroraCommandLine.print_err', side_effect=self.mock_print_err)): assert self.cmd.execute(['help', 'job', 'nothing']) == EXIT_INVALID_PARAMETER assert len(self.transcript) == 0 assert len(self.err_transcript) == 2 assert 'Noun "job" does not support a verb "nothing"' == self.err_transcript[0] assert 'Valid verbs for "job" are' in self.err_transcript[1]
apache-2.0
-4,082,335,328,770,860,000
39.902174
85
0.68642
false
3.594078
true
false
false
MindPass/Code
Interface_graphique/mindmap/svgwrite-1.1.6/tests/test_elementfactory.py
1
2340
#!/usr/bin/env python #coding:utf-8 # Author: mozman --<mozman@gmx.at> # Purpose: test elementfactory # Created: 15.10.2010 # Copyright (C) 2010, Manfred Moitzi # License: MIT License import sys import unittest from svgwrite.elementfactory import ElementFactory from svgwrite.params import Parameter class MockFactory(ElementFactory): _parameter = Parameter(debug=True, profile='full') debug = True profile = 'full' class TestElementFactory(unittest.TestCase): def setUp(self): self.factory = MockFactory() def test_g(self): group = self.factory.g(id='test') self.assertEqual(group.elementname, 'g') self.assertEqual(group['id'], 'test') def test_svg(self): svg = self.factory.svg() self.assertEqual(svg.elementname, 'svg') def test_defs(self): defs = self.factory.defs() self.assertEqual(defs.elementname, 'defs') def test_symbol(self): element = self.factory.symbol() self.assertEqual(element.elementname, 'symbol') def test_use(self): element = self.factory.use('link') self.assertEqual(element.elementname, 'use') def test_a(self): element = self.factory.a('link') self.assertEqual(element.elementname, 'a') def test_line(self): element = self.factory.line((0,0), (1,1)) self.assertEqual(element.elementname, 'line') def test_rect(self): element = self.factory.rect((0,0), (1,1)) self.assertEqual(element.elementname, 'rect') def test_circle(self): element = self.factory.circle((0,0), 5) self.assertEqual(element.elementname, 'circle') def test_ellipse(self): element = self.factory.ellipse((0,0), (5, 5)) self.assertEqual(element.elementname, 'ellipse') def test_polygon(self): element = self.factory.polygon([(0, 0), (5, 5)]) self.assertEqual(element.elementname, 'polygon') def test_polyline(self): element = self.factory.polyline([(0, 0), (5, 5)]) self.assertEqual(element.elementname, 'polyline') def test_AttributeError(self): try: self.factory.test() self.fail('AttributeError not raised.') except AttributeError: self.assertTrue(True) if __name__=='__main__': unittest.main()
gpl-3.0
-9,142,067,781,533,273,000
27.888889
57
0.632479
false
3.639191
true
false
false
CloudNiner/fadds-parser
fadds/twr.py
1
2207
# -*- coding: utf-8 -*- """ Author: @sposs Date: 19.08.16 """ from fadds.base_file import BaseFile, BaseData import re value_re = re.compile(r"(?P<value>[0-9]+\.*[0-9]*)(?P<use>[A-Z ()0-9-/&]*)") class TWRParser(BaseFile): def __init__(self, twr_file): super(TWRParser, self).__init__(twr_file) self.object = TWR class TWR(BaseData): key_length = 4 NEW = "TWR1" DATA = 'TWR1' HOURS = 'TWR2' COMFREQ = 'TWR3' SERVICES = 'TWR4' RADAR = 'TWR5' TERMCOM = 'TWR6' SATELLITE = 'TWR7' AIRSPACECLASS = 'TWR8' ATISDATA = 'TWR9' def __init__(self): super(TWR, self).__init__() self.infodate = "" self.site_num = "" self.term_facility_type = "" self.freqs = {"freqs": [], "freqs_untrunc": []} def special_data(self, record_type, line): """ We only look at genral info and communication frequencies :param str record_type: :param str line: :return: None """ if record_type == self.DATA: self.infodate = self.get_value(line, 9, 10) self.site_num = self.get_value(line, 19, 11).strip() self.term_facility_type = self.get_value(line, 239, 12).strip() elif record_type == self.COMFREQ: d = {"freqs": [], "freqs_untrunc": []} freqs = [] freqs_untrunc = [] period = 94 for i in range(9): val = self.get_value(line, 9+period*i, 44).strip() info = "" match = value_re.match(val) if match: val = match.group("value") if len(match.groups()) > 1: info = match.group("use").strip() use = self.get_value(line, 44+period*i, 50).strip() if val: freqs.append({"val": float(val), "type": use, "use": info}) for i in range(9): val = self.get_value(line, 855+i*60, 60) if val: freqs_untrunc.append(val) self.freqs['freqs'].extend(freqs) self.freqs['freqs_untrunc'].extend(freqs_untrunc)
mit
4,184,547,079,323,127,000
28.426667
79
0.492524
false
3.323795
false
false
false
apbard/scipy
scipy/spatial/tests/test_kdtree.py
1
41366
# Copyright Anne M. Archibald 2008 # Released under the scipy license from __future__ import division, print_function, absolute_import from numpy.testing import (assert_equal, assert_array_equal, assert_almost_equal, assert_array_almost_equal, assert_, assert_raises) import numpy as np from scipy.spatial import KDTree, Rectangle, distance_matrix, cKDTree from scipy.spatial.ckdtree import cKDTreeNode from scipy.spatial import minkowski_distance import itertools def distance_box(a, b, p, boxsize): diff = a - b diff[diff > 0.5 * boxsize] -= boxsize diff[diff < -0.5 * boxsize] += boxsize d = minkowski_distance(diff, 0, p) return d class ConsistencyTests: def distance(self, a, b, p): return minkowski_distance(a, b, p) def test_nearest(self): x = self.x d, i = self.kdtree.query(x, 1) assert_almost_equal(d**2,np.sum((x-self.data[i])**2)) eps = 1e-8 assert_(np.all(np.sum((self.data-x[np.newaxis,:])**2,axis=1) > d**2-eps)) def test_m_nearest(self): x = self.x m = self.m dd, ii = self.kdtree.query(x, m) d = np.amax(dd) i = ii[np.argmax(dd)] assert_almost_equal(d**2,np.sum((x-self.data[i])**2)) eps = 1e-8 assert_equal(np.sum(np.sum((self.data-x[np.newaxis,:])**2,axis=1) < d**2+eps),m) def test_points_near(self): x = self.x d = self.d dd, ii = self.kdtree.query(x, k=self.kdtree.n, distance_upper_bound=d) eps = 1e-8 hits = 0 for near_d, near_i in zip(dd,ii): if near_d == np.inf: continue hits += 1 assert_almost_equal(near_d**2,np.sum((x-self.data[near_i])**2)) assert_(near_d < d+eps, "near_d=%g should be less than %g" % (near_d,d)) assert_equal(np.sum(self.distance(self.data,x,2) < d**2+eps),hits) def test_points_near_l1(self): x = self.x d = self.d dd, ii = self.kdtree.query(x, k=self.kdtree.n, p=1, distance_upper_bound=d) eps = 1e-8 hits = 0 for near_d, near_i in zip(dd,ii): if near_d == np.inf: continue hits += 1 assert_almost_equal(near_d,self.distance(x,self.data[near_i],1)) assert_(near_d < d+eps, "near_d=%g should be less than %g" % (near_d,d)) assert_equal(np.sum(self.distance(self.data,x,1) < d+eps),hits) def test_points_near_linf(self): x = self.x d = self.d dd, ii = self.kdtree.query(x, k=self.kdtree.n, p=np.inf, distance_upper_bound=d) eps = 1e-8 hits = 0 for near_d, near_i in zip(dd,ii): if near_d == np.inf: continue hits += 1 assert_almost_equal(near_d,self.distance(x,self.data[near_i],np.inf)) assert_(near_d < d+eps, "near_d=%g should be less than %g" % (near_d,d)) assert_equal(np.sum(self.distance(self.data,x,np.inf) < d+eps),hits) def test_approx(self): x = self.x k = self.k eps = 0.1 d_real, i_real = self.kdtree.query(x, k) d, i = self.kdtree.query(x, k, eps=eps) assert_(np.all(d <= d_real*(1+eps))) class test_random(ConsistencyTests): def setup_method(self): self.n = 100 self.m = 4 np.random.seed(1234) self.data = np.random.randn(self.n, self.m) self.kdtree = KDTree(self.data,leafsize=2) self.x = np.random.randn(self.m) self.d = 0.2 self.k = 10 class test_random_far(test_random): def setup_method(self): test_random.setUp(self) self.x = np.random.randn(self.m)+10 class test_small(ConsistencyTests): def setup_method(self): self.data = np.array([[0,0,0], [0,0,1], [0,1,0], [0,1,1], [1,0,0], [1,0,1], [1,1,0], [1,1,1]]) self.kdtree = KDTree(self.data) self.n = self.kdtree.n self.m = self.kdtree.m np.random.seed(1234) self.x = np.random.randn(3) self.d = 0.5 self.k = 4 def test_nearest(self): assert_array_equal( self.kdtree.query((0,0,0.1), 1), (0.1,0)) def test_nearest_two(self): assert_array_equal( self.kdtree.query((0,0,0.1), 2), ([0.1,0.9],[0,1])) class test_small_nonleaf(test_small): def setup_method(self): test_small.setUp(self) self.kdtree = KDTree(self.data,leafsize=1) class test_small_compiled(test_small): def setup_method(self): test_small.setUp(self) self.kdtree = cKDTree(self.data) class test_small_nonleaf_compiled(test_small): def setup_method(self): test_small.setUp(self) self.kdtree = cKDTree(self.data,leafsize=1) class test_random_compiled(test_random): def setup_method(self): test_random.setUp(self) self.kdtree = cKDTree(self.data) class test_random_far_compiled(test_random_far): def setup_method(self): test_random_far.setUp(self) self.kdtree = cKDTree(self.data) class test_vectorization: def setup_method(self): self.data = np.array([[0,0,0], [0,0,1], [0,1,0], [0,1,1], [1,0,0], [1,0,1], [1,1,0], [1,1,1]]) self.kdtree = KDTree(self.data) def test_single_query(self): d, i = self.kdtree.query(np.array([0,0,0])) assert_(isinstance(d,float)) assert_(np.issubdtype(i, int)) def test_vectorized_query(self): d, i = self.kdtree.query(np.zeros((2,4,3))) assert_equal(np.shape(d),(2,4)) assert_equal(np.shape(i),(2,4)) def test_single_query_multiple_neighbors(self): s = 23 kk = self.kdtree.n+s d, i = self.kdtree.query(np.array([0,0,0]),k=kk) assert_equal(np.shape(d),(kk,)) assert_equal(np.shape(i),(kk,)) assert_(np.all(~np.isfinite(d[-s:]))) assert_(np.all(i[-s:] == self.kdtree.n)) def test_vectorized_query_multiple_neighbors(self): s = 23 kk = self.kdtree.n+s d, i = self.kdtree.query(np.zeros((2,4,3)),k=kk) assert_equal(np.shape(d),(2,4,kk)) assert_equal(np.shape(i),(2,4,kk)) assert_(np.all(~np.isfinite(d[:,:,-s:]))) assert_(np.all(i[:,:,-s:] == self.kdtree.n)) def test_single_query_all_neighbors(self): d, i = self.kdtree.query([0,0,0],k=None,distance_upper_bound=1.1) assert_(isinstance(d,list)) assert_(isinstance(i,list)) def test_vectorized_query_all_neighbors(self): d, i = self.kdtree.query(np.zeros((2,4,3)),k=None,distance_upper_bound=1.1) assert_equal(np.shape(d),(2,4)) assert_equal(np.shape(i),(2,4)) assert_(isinstance(d[0,0],list)) assert_(isinstance(i[0,0],list)) class test_vectorization_compiled: def setup_method(self): self.data = np.array([[0,0,0], [0,0,1], [0,1,0], [0,1,1], [1,0,0], [1,0,1], [1,1,0], [1,1,1]]) self.kdtree = cKDTree(self.data) def test_single_query(self): d, i = self.kdtree.query([0,0,0]) assert_(isinstance(d,float)) assert_(isinstance(i,int)) def test_vectorized_query(self): d, i = self.kdtree.query(np.zeros((2,4,3))) assert_equal(np.shape(d),(2,4)) assert_equal(np.shape(i),(2,4)) def test_vectorized_query_noncontiguous_values(self): np.random.seed(1234) qs = np.random.randn(3,1000).T ds, i_s = self.kdtree.query(qs) for q, d, i in zip(qs,ds,i_s): assert_equal(self.kdtree.query(q),(d,i)) def test_single_query_multiple_neighbors(self): s = 23 kk = self.kdtree.n+s d, i = self.kdtree.query([0,0,0],k=kk) assert_equal(np.shape(d),(kk,)) assert_equal(np.shape(i),(kk,)) assert_(np.all(~np.isfinite(d[-s:]))) assert_(np.all(i[-s:] == self.kdtree.n)) def test_vectorized_query_multiple_neighbors(self): s = 23 kk = self.kdtree.n+s d, i = self.kdtree.query(np.zeros((2,4,3)),k=kk) assert_equal(np.shape(d),(2,4,kk)) assert_equal(np.shape(i),(2,4,kk)) assert_(np.all(~np.isfinite(d[:,:,-s:]))) assert_(np.all(i[:,:,-s:] == self.kdtree.n)) class ball_consistency: def distance(self, a, b, p): return minkowski_distance(a, b, p) def test_in_ball(self): l = self.T.query_ball_point(self.x, self.d, p=self.p, eps=self.eps) for i in l: assert_(self.distance(self.data[i],self.x,self.p) <= self.d*(1.+self.eps)) def test_found_all(self): c = np.ones(self.T.n,dtype=bool) l = self.T.query_ball_point(self.x, self.d, p=self.p, eps=self.eps) c[l] = False assert_(np.all(self.distance(self.data[c],self.x,self.p) >= self.d/(1.+self.eps))) class test_random_ball(ball_consistency): def setup_method(self): n = 100 m = 4 np.random.seed(1234) self.data = np.random.randn(n,m) self.T = KDTree(self.data,leafsize=2) self.x = np.random.randn(m) self.p = 2. self.eps = 0 self.d = 0.2 class test_random_ball_compiled(ball_consistency): def setup_method(self): n = 100 m = 4 np.random.seed(1234) self.data = np.random.randn(n,m) self.T = cKDTree(self.data,leafsize=2) self.x = np.random.randn(m) self.p = 2. self.eps = 0 self.d = 0.2 class test_random_ball_compiled_periodic(ball_consistency): def distance(self, a, b, p): return distance_box(a, b, p, 1.0) def setup_method(self): n = 10000 m = 4 np.random.seed(1234) self.data = np.random.uniform(size=(n,m)) self.T = cKDTree(self.data,leafsize=2, boxsize=1) self.x = np.ones(m) * 0.1 self.p = 2. self.eps = 0 self.d = 0.2 def test_in_ball_outside(self): l = self.T.query_ball_point(self.x + 1.0, self.d, p=self.p, eps=self.eps) for i in l: assert_(self.distance(self.data[i],self.x,self.p) <= self.d*(1.+self.eps)) l = self.T.query_ball_point(self.x - 1.0, self.d, p=self.p, eps=self.eps) for i in l: assert_(self.distance(self.data[i],self.x,self.p) <= self.d*(1.+self.eps)) def test_found_all_outside(self): c = np.ones(self.T.n,dtype=bool) l = self.T.query_ball_point(self.x + 1.0, self.d, p=self.p, eps=self.eps) c[l] = False assert_(np.all(self.distance(self.data[c],self.x,self.p) >= self.d/(1.+self.eps))) l = self.T.query_ball_point(self.x - 1.0, self.d, p=self.p, eps=self.eps) c[l] = False assert_(np.all(self.distance(self.data[c],self.x,self.p) >= self.d/(1.+self.eps))) class test_random_ball_approx(test_random_ball): def setup_method(self): test_random_ball.setUp(self) self.eps = 0.1 class test_random_ball_approx_compiled(test_random_ball_compiled): def setup_method(self): test_random_ball_compiled.setUp(self) self.eps = 0.1 class test_random_ball_approx_compiled_periodic(test_random_ball_compiled_periodic): def setup_method(self): test_random_ball_compiled_periodic.setUp(self) self.eps = 0.1 class test_random_ball_far(test_random_ball): def setup_method(self): test_random_ball.setUp(self) self.d = 2. class test_random_ball_far_compiled(test_random_ball_compiled): def setup_method(self): test_random_ball_compiled.setUp(self) self.d = 2. class test_random_ball_far_compiled_periodic(test_random_ball_compiled_periodic): def setup_method(self): test_random_ball_compiled_periodic.setUp(self) self.d = 2. class test_random_ball_l1(test_random_ball): def setup_method(self): test_random_ball.setUp(self) self.p = 1 class test_random_ball_l1_compiled(test_random_ball_compiled): def setup_method(self): test_random_ball_compiled.setUp(self) self.p = 1 class test_random_ball_l1_compiled_periodic(test_random_ball_compiled_periodic): def setup_method(self): test_random_ball_compiled_periodic.setUp(self) self.p = 1 class test_random_ball_linf(test_random_ball): def setup_method(self): test_random_ball.setUp(self) self.p = np.inf class test_random_ball_linf_compiled_periodic(test_random_ball_compiled_periodic): def setup_method(self): test_random_ball_compiled_periodic.setUp(self) self.p = np.inf def test_random_ball_vectorized(): n = 20 m = 5 T = KDTree(np.random.randn(n,m)) r = T.query_ball_point(np.random.randn(2,3,m),1) assert_equal(r.shape,(2,3)) assert_(isinstance(r[0,0],list)) def test_random_ball_vectorized_compiled(): n = 20 m = 5 np.random.seed(1234) T = cKDTree(np.random.randn(n,m)) r = T.query_ball_point(np.random.randn(2,3,m),1) assert_equal(r.shape,(2,3)) assert_(isinstance(r[0,0],list)) def test_query_ball_point_multithreading(): np.random.seed(0) n = 5000 k = 2 points = np.random.randn(n,k) T = cKDTree(points) l1 = T.query_ball_point(points,0.003,n_jobs=1) l2 = T.query_ball_point(points,0.003,n_jobs=64) l3 = T.query_ball_point(points,0.003,n_jobs=-1) for i in range(n): if l1[i] or l2[i]: assert_array_equal(l1[i],l2[i]) for i in range(n): if l1[i] or l3[i]: assert_array_equal(l1[i],l3[i]) class two_trees_consistency: def distance(self, a, b, p): return minkowski_distance(a, b, p) def test_all_in_ball(self): r = self.T1.query_ball_tree(self.T2, self.d, p=self.p, eps=self.eps) for i, l in enumerate(r): for j in l: assert_(self.distance(self.data1[i],self.data2[j],self.p) <= self.d*(1.+self.eps)) def test_found_all(self): r = self.T1.query_ball_tree(self.T2, self.d, p=self.p, eps=self.eps) for i, l in enumerate(r): c = np.ones(self.T2.n,dtype=bool) c[l] = False assert_(np.all(self.distance(self.data2[c],self.data1[i],self.p) >= self.d/(1.+self.eps))) class test_two_random_trees(two_trees_consistency): def setup_method(self): n = 50 m = 4 np.random.seed(1234) self.data1 = np.random.randn(n,m) self.T1 = KDTree(self.data1,leafsize=2) self.data2 = np.random.randn(n,m) self.T2 = KDTree(self.data2,leafsize=2) self.p = 2. self.eps = 0 self.d = 0.2 class test_two_random_trees_compiled(two_trees_consistency): def setup_method(self): n = 50 m = 4 np.random.seed(1234) self.data1 = np.random.randn(n,m) self.T1 = cKDTree(self.data1,leafsize=2) self.data2 = np.random.randn(n,m) self.T2 = cKDTree(self.data2,leafsize=2) self.p = 2. self.eps = 0 self.d = 0.2 class test_two_random_trees_compiled_periodic(two_trees_consistency): def distance(self, a, b, p): return distance_box(a, b, p, 1.0) def setup_method(self): n = 50 m = 4 np.random.seed(1234) self.data1 = np.random.uniform(size=(n,m)) self.T1 = cKDTree(self.data1,leafsize=2, boxsize=1.0) self.data2 = np.random.uniform(size=(n,m)) self.T2 = cKDTree(self.data2,leafsize=2, boxsize=1.0) self.p = 2. self.eps = 0 self.d = 0.2 class test_two_random_trees_far(test_two_random_trees): def setup_method(self): test_two_random_trees.setUp(self) self.d = 2 class test_two_random_trees_far_compiled(test_two_random_trees_compiled): def setup_method(self): test_two_random_trees_compiled.setUp(self) self.d = 2 class test_two_random_trees_far_compiled_periodic(test_two_random_trees_compiled_periodic): def setup_method(self): test_two_random_trees_compiled_periodic.setUp(self) self.d = 2 class test_two_random_trees_linf(test_two_random_trees): def setup_method(self): test_two_random_trees.setUp(self) self.p = np.inf class test_two_random_trees_linf_compiled(test_two_random_trees_compiled): def setup_method(self): test_two_random_trees_compiled.setUp(self) self.p = np.inf class test_two_random_trees_linf_compiled_periodic(test_two_random_trees_compiled_periodic): def setup_method(self): test_two_random_trees_compiled_periodic.setUp(self) self.p = np.inf class test_rectangle: def setup_method(self): self.rect = Rectangle([0,0],[1,1]) def test_min_inside(self): assert_almost_equal(self.rect.min_distance_point([0.5,0.5]),0) def test_min_one_side(self): assert_almost_equal(self.rect.min_distance_point([0.5,1.5]),0.5) def test_min_two_sides(self): assert_almost_equal(self.rect.min_distance_point([2,2]),np.sqrt(2)) def test_max_inside(self): assert_almost_equal(self.rect.max_distance_point([0.5,0.5]),1/np.sqrt(2)) def test_max_one_side(self): assert_almost_equal(self.rect.max_distance_point([0.5,1.5]),np.hypot(0.5,1.5)) def test_max_two_sides(self): assert_almost_equal(self.rect.max_distance_point([2,2]),2*np.sqrt(2)) def test_split(self): less, greater = self.rect.split(0,0.1) assert_array_equal(less.maxes,[0.1,1]) assert_array_equal(less.mins,[0,0]) assert_array_equal(greater.maxes,[1,1]) assert_array_equal(greater.mins,[0.1,0]) def test_distance_l2(): assert_almost_equal(minkowski_distance([0,0],[1,1],2),np.sqrt(2)) def test_distance_l1(): assert_almost_equal(minkowski_distance([0,0],[1,1],1),2) def test_distance_linf(): assert_almost_equal(minkowski_distance([0,0],[1,1],np.inf),1) def test_distance_vectorization(): np.random.seed(1234) x = np.random.randn(10,1,3) y = np.random.randn(1,7,3) assert_equal(minkowski_distance(x,y).shape,(10,7)) class count_neighbors_consistency: def test_one_radius(self): r = 0.2 assert_equal(self.T1.count_neighbors(self.T2, r), np.sum([len(l) for l in self.T1.query_ball_tree(self.T2,r)])) def test_large_radius(self): r = 1000 assert_equal(self.T1.count_neighbors(self.T2, r), np.sum([len(l) for l in self.T1.query_ball_tree(self.T2,r)])) def test_multiple_radius(self): rs = np.exp(np.linspace(np.log(0.01),np.log(10),3)) results = self.T1.count_neighbors(self.T2, rs) assert_(np.all(np.diff(results) >= 0)) for r,result in zip(rs, results): assert_equal(self.T1.count_neighbors(self.T2, r), result) class test_count_neighbors(count_neighbors_consistency): def setup_method(self): n = 50 m = 2 np.random.seed(1234) self.T1 = KDTree(np.random.randn(n,m),leafsize=2) self.T2 = KDTree(np.random.randn(n,m),leafsize=2) class test_count_neighbors_compiled(count_neighbors_consistency): def setup_method(self): n = 50 m = 2 np.random.seed(1234) self.T1 = cKDTree(np.random.randn(n,m),leafsize=2) self.T2 = cKDTree(np.random.randn(n,m),leafsize=2) class sparse_distance_matrix_consistency: def distance(self, a, b, p): return minkowski_distance(a, b, p) def test_consistency_with_neighbors(self): M = self.T1.sparse_distance_matrix(self.T2, self.r) r = self.T1.query_ball_tree(self.T2, self.r) for i,l in enumerate(r): for j in l: assert_almost_equal(M[i,j], self.distance(self.T1.data[i], self.T2.data[j], self.p), decimal=14) for ((i,j),d) in M.items(): assert_(j in r[i]) def test_zero_distance(self): # raises an exception for bug 870 (FIXME: Does it?) self.T1.sparse_distance_matrix(self.T1, self.r) class test_sparse_distance_matrix(sparse_distance_matrix_consistency): def setup_method(self): n = 50 m = 4 np.random.seed(1234) data1 = np.random.randn(n,m) data2 = np.random.randn(n,m) self.T1 = cKDTree(data1,leafsize=2) self.T2 = cKDTree(data2,leafsize=2) self.r = 0.5 self.p = 2 self.data1 = data1 self.data2 = data2 self.n = n self.m = m class test_sparse_distance_matrix_compiled(sparse_distance_matrix_consistency): def setup_method(self): n = 50 m = 4 np.random.seed(0) data1 = np.random.randn(n,m) data2 = np.random.randn(n,m) self.T1 = cKDTree(data1,leafsize=2) self.T2 = cKDTree(data2,leafsize=2) self.ref_T1 = KDTree(data1, leafsize=2) self.ref_T2 = KDTree(data2, leafsize=2) self.r = 0.5 self.n = n self.m = m self.data1 = data1 self.data2 = data2 self.p = 2 def test_consistency_with_python(self): M1 = self.T1.sparse_distance_matrix(self.T2, self.r) M2 = self.ref_T1.sparse_distance_matrix(self.ref_T2, self.r) assert_array_almost_equal(M1.todense(), M2.todense(), decimal=14) def test_against_logic_error_regression(self): # regression test for gh-5077 logic error np.random.seed(0) too_many = np.array(np.random.randn(18, 2), dtype=int) tree = cKDTree(too_many, balanced_tree=False, compact_nodes=False) d = tree.sparse_distance_matrix(tree, 3).todense() assert_array_almost_equal(d, d.T, decimal=14) def test_ckdtree_return_types(self): # brute-force reference ref = np.zeros((self.n,self.n)) for i in range(self.n): for j in range(self.n): v = self.data1[i,:] - self.data2[j,:] ref[i,j] = np.dot(v,v) ref = np.sqrt(ref) ref[ref > self.r] = 0. # test return type 'dict' dist = np.zeros((self.n,self.n)) r = self.T1.sparse_distance_matrix(self.T2, self.r, output_type='dict') for i,j in r.keys(): dist[i,j] = r[(i,j)] assert_array_almost_equal(ref, dist, decimal=14) # test return type 'ndarray' dist = np.zeros((self.n,self.n)) r = self.T1.sparse_distance_matrix(self.T2, self.r, output_type='ndarray') for k in range(r.shape[0]): i = r['i'][k] j = r['j'][k] v = r['v'][k] dist[i,j] = v assert_array_almost_equal(ref, dist, decimal=14) # test return type 'dok_matrix' r = self.T1.sparse_distance_matrix(self.T2, self.r, output_type='dok_matrix') assert_array_almost_equal(ref, r.todense(), decimal=14) # test return type 'coo_matrix' r = self.T1.sparse_distance_matrix(self.T2, self.r, output_type='coo_matrix') assert_array_almost_equal(ref, r.todense(), decimal=14) def test_distance_matrix(): m = 10 n = 11 k = 4 np.random.seed(1234) xs = np.random.randn(m,k) ys = np.random.randn(n,k) ds = distance_matrix(xs,ys) assert_equal(ds.shape, (m,n)) for i in range(m): for j in range(n): assert_almost_equal(minkowski_distance(xs[i],ys[j]),ds[i,j]) def test_distance_matrix_looping(): m = 10 n = 11 k = 4 np.random.seed(1234) xs = np.random.randn(m,k) ys = np.random.randn(n,k) ds = distance_matrix(xs,ys) dsl = distance_matrix(xs,ys,threshold=1) assert_equal(ds,dsl) def check_onetree_query(T,d): r = T.query_ball_tree(T, d) s = set() for i, l in enumerate(r): for j in l: if i < j: s.add((i,j)) assert_(s == T.query_pairs(d)) def test_onetree_query(): np.random.seed(0) n = 50 k = 4 points = np.random.randn(n,k) T = KDTree(points) yield check_onetree_query, T, 0.1 points = np.random.randn(3*n,k) points[:n] *= 0.001 points[n:2*n] += 2 T = KDTree(points) yield check_onetree_query, T, 0.1 yield check_onetree_query, T, 0.001 yield check_onetree_query, T, 0.00001 yield check_onetree_query, T, 1e-6 def test_onetree_query_compiled(): np.random.seed(0) n = 100 k = 4 points = np.random.randn(n,k) T = cKDTree(points) yield check_onetree_query, T, 0.1 points = np.random.randn(3*n,k) points[:n] *= 0.001 points[n:2*n] += 2 T = cKDTree(points) yield check_onetree_query, T, 0.1 yield check_onetree_query, T, 0.001 yield check_onetree_query, T, 0.00001 yield check_onetree_query, T, 1e-6 def test_query_pairs_single_node(): tree = KDTree([[0, 1]]) assert_equal(tree.query_pairs(0.5), set()) def test_query_pairs_single_node_compiled(): tree = cKDTree([[0, 1]]) assert_equal(tree.query_pairs(0.5), set()) def test_ckdtree_query_pairs(): np.random.seed(0) n = 50 k = 2 r = 0.1 r2 = r**2 points = np.random.randn(n,k) T = cKDTree(points) # brute force reference brute = set() for i in range(n): for j in range(i+1,n): v = points[i,:] - points[j,:] if np.dot(v,v) <= r2: brute.add((i,j)) l0 = sorted(brute) # test default return type s = T.query_pairs(r) l1 = sorted(s) assert_array_equal(l0,l1) # test return type 'set' s = T.query_pairs(r, output_type='set') l1 = sorted(s) assert_array_equal(l0,l1) # test return type 'ndarray' s = set() arr = T.query_pairs(r, output_type='ndarray') for i in range(arr.shape[0]): s.add((int(arr[i,0]),int(arr[i,1]))) l2 = sorted(s) assert_array_equal(l0,l2) def test_ball_point_ints(): # Regression test for #1373. x, y = np.mgrid[0:4, 0:4] points = list(zip(x.ravel(), y.ravel())) tree = KDTree(points) assert_equal(sorted([4, 8, 9, 12]), sorted(tree.query_ball_point((2, 0), 1))) points = np.asarray(points, dtype=float) tree = KDTree(points) assert_equal(sorted([4, 8, 9, 12]), sorted(tree.query_ball_point((2, 0), 1))) def test_kdtree_comparisons(): # Regression test: node comparisons were done wrong in 0.12 w/Py3. nodes = [KDTree.node() for _ in range(3)] assert_equal(sorted(nodes), sorted(nodes[::-1])) def test_ckdtree_build_modes(): # check if different build modes for cKDTree give # similar query results np.random.seed(0) n = 5000 k = 4 points = np.random.randn(n, k) T1 = cKDTree(points).query(points, k=5)[-1] T2 = cKDTree(points, compact_nodes=False).query(points, k=5)[-1] T3 = cKDTree(points, balanced_tree=False).query(points, k=5)[-1] T4 = cKDTree(points, compact_nodes=False, balanced_tree=False).query(points, k=5)[-1] assert_array_equal(T1, T2) assert_array_equal(T1, T3) assert_array_equal(T1, T4) def test_ckdtree_pickle(): # test if it is possible to pickle # a cKDTree try: import cPickle as pickle except ImportError: import pickle np.random.seed(0) n = 50 k = 4 points = np.random.randn(n, k) T1 = cKDTree(points) tmp = pickle.dumps(T1) T2 = pickle.loads(tmp) T1 = T1.query(points, k=5)[-1] T2 = T2.query(points, k=5)[-1] assert_array_equal(T1, T2) def test_ckdtree_pickle_boxsize(): # test if it is possible to pickle a periodic # cKDTree try: import cPickle as pickle except ImportError: import pickle np.random.seed(0) n = 50 k = 4 points = np.random.uniform(size=(n, k)) T1 = cKDTree(points, boxsize=1.0) tmp = pickle.dumps(T1) T2 = pickle.loads(tmp) T1 = T1.query(points, k=5)[-1] T2 = T2.query(points, k=5)[-1] assert_array_equal(T1, T2) def test_ckdtree_copy_data(): # check if copy_data=True makes the kd-tree # impervious to data corruption by modification of # the data arrray np.random.seed(0) n = 5000 k = 4 points = np.random.randn(n, k) T = cKDTree(points, copy_data=True) q = points.copy() T1 = T.query(q, k=5)[-1] points[...] = np.random.randn(n, k) T2 = T.query(q, k=5)[-1] assert_array_equal(T1, T2) def test_ckdtree_parallel(): # check if parallel=True also generates correct # query results np.random.seed(0) n = 5000 k = 4 points = np.random.randn(n, k) T = cKDTree(points) T1 = T.query(points, k=5, n_jobs=64)[-1] T2 = T.query(points, k=5, n_jobs=-1)[-1] T3 = T.query(points, k=5)[-1] assert_array_equal(T1, T2) assert_array_equal(T1, T3) def test_ckdtree_view(): # Check that the nodes can be correctly viewed from Python. # This test also sanity checks each node in the cKDTree, and # thus verifies the internal structure of the kd-tree. np.random.seed(0) n = 100 k = 4 points = np.random.randn(n, k) kdtree = cKDTree(points) # walk the whole kd-tree and sanity check each node def recurse_tree(n): assert_(isinstance(n, cKDTreeNode)) if n.split_dim == -1: assert_(n.lesser is None) assert_(n.greater is None) assert_(n.indices.shape[0] <= kdtree.leafsize) else: recurse_tree(n.lesser) recurse_tree(n.greater) x = n.lesser.data_points[:, n.split_dim] y = n.greater.data_points[:, n.split_dim] assert_(x.max() < y.min()) recurse_tree(kdtree.tree) # check that indices are correctly retreived n = kdtree.tree assert_array_equal(np.sort(n.indices), range(100)) # check that data_points are correctly retreived assert_array_equal(kdtree.data[n.indices, :], n.data_points) # cKDTree is specialized to type double points, so no need to make # a unit test corresponding to test_ball_point_ints() def test_ckdtree_list_k(): # check ckdtree periodic boundary n = 200 m = 2 klist = [1, 2, 3] kint = 3 np.random.seed(1234) data = np.random.uniform(size=(n, m)) kdtree = cKDTree(data, leafsize=1) # check agreement between arange(1,k+1) and k dd, ii = kdtree.query(data, klist) dd1, ii1 = kdtree.query(data, kint) assert_equal(dd, dd1) assert_equal(ii, ii1) # now check skipping one element klist = np.array([1, 3]) kint = 3 dd, ii = kdtree.query(data, kint) dd1, ii1 = kdtree.query(data, klist) assert_equal(dd1, dd[..., klist - 1]) assert_equal(ii1, ii[..., klist - 1]) # check k == 1 special case # and k == [1] non-special case dd, ii = kdtree.query(data, 1) dd1, ii1 = kdtree.query(data, [1]) assert_equal(len(dd.shape), 1) assert_equal(len(dd1.shape), 2) assert_equal(dd, np.ravel(dd1)) assert_equal(ii, np.ravel(ii1)) def test_ckdtree_box(): # check ckdtree periodic boundary n = 2000 m = 3 k = 3 np.random.seed(1234) data = np.random.uniform(size=(n, m)) kdtree = cKDTree(data, leafsize=1, boxsize=1.0) # use the standard python KDTree for the simulated periodic box kdtree2 = cKDTree(data, leafsize=1) for p in [1, 2, 3.0, np.inf]: dd, ii = kdtree.query(data, k, p=p) dd1, ii1 = kdtree.query(data + 1.0, k, p=p) assert_almost_equal(dd, dd1) assert_equal(ii, ii1) dd1, ii1 = kdtree.query(data - 1.0, k, p=p) assert_almost_equal(dd, dd1) assert_equal(ii, ii1) dd2, ii2 = simulate_periodic_box(kdtree2, data, k, boxsize=1.0, p=p) assert_almost_equal(dd, dd2) assert_equal(ii, ii2) def test_ckdtree_box_0boxsize(): # check ckdtree periodic boundary that mimics non-periodic n = 2000 m = 2 k = 3 np.random.seed(1234) data = np.random.uniform(size=(n, m)) kdtree = cKDTree(data, leafsize=1, boxsize=0.0) # use the standard python KDTree for the simulated periodic box kdtree2 = cKDTree(data, leafsize=1) for p in [1, 2, np.inf]: dd, ii = kdtree.query(data, k, p=p) dd1, ii1 = kdtree2.query(data, k, p=p) assert_almost_equal(dd, dd1) assert_equal(ii, ii1) def test_ckdtree_box_upper_bounds(): data = np.linspace(0, 2, 10).reshape(-1, 2) data[:, 1] += 10 assert_raises(ValueError, cKDTree, data, leafsize=1, boxsize=1.0) assert_raises(ValueError, cKDTree, data, leafsize=1, boxsize=(0.0, 2.0)) # skip a dimension. cKDTree(data, leafsize=1, boxsize=(2.0, 0.0)) def test_ckdtree_box_lower_bounds(): data = np.linspace(-1, 1, 10) assert_raises(ValueError, cKDTree, data, leafsize=1, boxsize=1.0) def simulate_periodic_box(kdtree, data, k, boxsize, p): dd = [] ii = [] x = np.arange(3 ** data.shape[1]) nn = np.array(np.unravel_index(x, [3] * data.shape[1])).T nn = nn - 1.0 for n in nn: image = data + n * 1.0 * boxsize dd2, ii2 = kdtree.query(image, k, p=p) dd2 = dd2.reshape(-1, k) ii2 = ii2.reshape(-1, k) dd.append(dd2) ii.append(ii2) dd = np.concatenate(dd, axis=-1) ii = np.concatenate(ii, axis=-1) result = np.empty([len(data), len(nn) * k], dtype=[ ('ii', 'i8'), ('dd', 'f8')]) result['ii'][:] = ii result['dd'][:] = dd result.sort(order='dd') return result['dd'][:, :k], result['ii'][:,:k] def test_ckdtree_memuse(): # unit test adaptation of gh-5630 # NOTE: this will fail when run via valgrind, # because rss is no longer a reliable memory usage indicator. try: import resource except ImportError: # resource is not available on Windows with Python 2.6 return # Make some data dx, dy = 0.05, 0.05 y, x = np.mgrid[slice(1, 5 + dy, dy), slice(1, 5 + dx, dx)] z = np.sin(x)**10 + np.cos(10 + y*x) * np.cos(x) z_copy = np.empty_like(z) z_copy[:] = z # Place FILLVAL in z_copy at random number of random locations FILLVAL = 99. mask = np.random.randint(0, z.size, np.random.randint(50) + 5) z_copy.flat[mask] = FILLVAL igood = np.vstack(np.where(x != FILLVAL)).T ibad = np.vstack(np.where(x == FILLVAL)).T mem_use = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss # burn-in for i in range(10): tree = cKDTree(igood) # count memleaks while constructing and querying cKDTree num_leaks = 0 for i in range(100): mem_use = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss tree = cKDTree(igood) dist, iquery = tree.query(ibad, k=4, p=2) new_mem_use = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss if new_mem_use > mem_use: num_leaks += 1 # ideally zero leaks, but errors might accidentally happen # outside cKDTree assert_(num_leaks < 10) def test_ckdtree_weights(): data = np.linspace(0, 1, 4).reshape(-1, 1) tree1 = cKDTree(data, leafsize=1) weights = np.ones(len(data), dtype='f4') nw = tree1._build_weights(weights) assert_array_equal(nw, [4, 2, 1, 1, 2, 1, 1]) assert_raises(ValueError, tree1._build_weights, weights[:-1]) for i in range(10): # since weights are uniform, these shall agree: c1 = tree1.count_neighbors(tree1, np.linspace(0, 10, i)) c2 = tree1.count_neighbors(tree1, np.linspace(0, 10, i), weights=(weights, weights)) c3 = tree1.count_neighbors(tree1, np.linspace(0, 10, i), weights=(weights, None)) c4 = tree1.count_neighbors(tree1, np.linspace(0, 10, i), weights=(None, weights)) c5 = tree1.count_neighbors(tree1, np.linspace(0, 10, i), weights=weights) assert_array_equal(c1, c2) assert_array_equal(c1, c3) assert_array_equal(c1, c4) for i in range(len(data)): # this tests removal of one data point by setting weight to 0 w1 = weights.copy() w1[i] = 0 data2 = data[w1 != 0] w2 = weights[w1 != 0] tree2 = cKDTree(data2) c1 = tree1.count_neighbors(tree1, np.linspace(0, 10, 100), weights=(w1, w1)) # "c2 is correct" c2 = tree2.count_neighbors(tree2, np.linspace(0, 10, 100)) assert_array_equal(c1, c2) #this asserts for two different trees, singular weights # crashes assert_raises(ValueError, tree1.count_neighbors, tree2, np.linspace(0, 10, 100), weights=w1) def test_ckdtree_count_neighbous_multiple_r(): n = 2000 m = 2 np.random.seed(1234) data = np.random.normal(size=(n, m)) kdtree = cKDTree(data, leafsize=1) r0 = [0, 0.01, 0.01, 0.02, 0.05] i0 = np.arange(len(r0)) n0 = kdtree.count_neighbors(kdtree, r0) nnc = kdtree.count_neighbors(kdtree, r0, cumulative=False) assert_equal(n0, nnc.cumsum()) for i, r in zip(itertools.permutations(i0), itertools.permutations(r0)): # permute n0 by i and it shall agree n = kdtree.count_neighbors(kdtree, r) assert_array_equal(n, n0[list(i)]) def test_len0_arrays(): # make sure len-0 arrays are handled correctly # in range queries (gh-5639) np.random.seed(1234) X = np.random.rand(10,2) Y = np.random.rand(10,2) tree = cKDTree(X) # query_ball_point (single) d,i = tree.query([.5, .5], k=1) z = tree.query_ball_point([.5, .5], 0.1*d) assert_array_equal(z, []) # query_ball_point (multiple) d,i = tree.query(Y, k=1) mind = d.min() z = tree.query_ball_point(Y, 0.1*mind) y = np.empty(shape=(10,), dtype=object) y.fill([]) assert_array_equal(y, z) # query_ball_tree other = cKDTree(Y) y = tree.query_ball_tree(other, 0.1*mind) assert_array_equal(10*[[]], y) # count_neighbors y = tree.count_neighbors(other, 0.1*mind) assert_(y == 0) # sparse_distance_matrix y = tree.sparse_distance_matrix(other, 0.1*mind, output_type='dok_matrix') assert_array_equal(y == np.zeros((10,10)), True) y = tree.sparse_distance_matrix(other, 0.1*mind, output_type='coo_matrix') assert_array_equal(y == np.zeros((10,10)), True) y = tree.sparse_distance_matrix(other, 0.1*mind, output_type='dict') assert_equal(y, {}) y = tree.sparse_distance_matrix(other,0.1*mind, output_type='ndarray') _dtype = [('i',np.intp), ('j',np.intp), ('v',np.float64)] res_dtype = np.dtype(_dtype, align=True) z = np.empty(shape=(0,), dtype=res_dtype) assert_array_equal(y, z) # query_pairs d,i = tree.query(X, k=2) mind = d[:,-1].min() y = tree.query_pairs(0.1*mind, output_type='set') assert_equal(y, set()) y = tree.query_pairs(0.1*mind, output_type='ndarray') z = np.empty(shape=(0,2), dtype=np.intp) assert_array_equal(y, z) def test_ckdtree_duplicated_inputs(): # check ckdtree with duplicated inputs n = 1024 for m in range(1, 8): data = np.concatenate([ np.ones((n // 2, m)) * 1, np.ones((n // 2, m)) * 2], axis=0) # it shall not divide more than 3 nodes. # root left (1), and right (2) kdtree = cKDTree(data, leafsize=1) assert_equal(kdtree.size, 3) kdtree = cKDTree(data) assert_equal(kdtree.size, 3) # if compact_nodes are disabled, the number # of nodes is n (per leaf) + (m - 1)* 2 (splits per dimension) + 1 # and the root kdtree = cKDTree(data, compact_nodes=False, leafsize=1) assert_equal(kdtree.size, n + m * 2 - 1) def test_ckdtree_noncumulative_nondecreasing(): # check ckdtree with duplicated inputs # it shall not divide more than 3 nodes. # root left (1), and right (2) kdtree = cKDTree([[0]], leafsize=1) assert_raises(ValueError, kdtree.count_neighbors, kdtree, [0.1, 0], cumulative=False) def test_short_knn(): # The test case is based on github: #6425 by @SteveDoyle2 xyz = np.array([ [0., 0., 0.], [1.01, 0., 0.], [0., 1., 0.], [0., 1.01, 0.], [1., 0., 0.], [1., 1., 0.],], dtype='float64') ckdt = cKDTree(xyz) deq, ieq = ckdt.query(xyz, k=4, distance_upper_bound=0.2) assert_array_almost_equal(deq, [[0., np.inf, np.inf, np.inf], [0., 0.01, np.inf, np.inf], [0., 0.01, np.inf, np.inf], [0., 0.01, np.inf, np.inf], [0., 0.01, np.inf, np.inf], [0., np.inf, np.inf, np.inf]])
bsd-3-clause
5,765,639,288,397,933,000
30.055556
102
0.572282
false
2.921328
true
false
false
cfc603/django-twilio-sms-models
django_twilio_sms/models.py
1
12882
# -*- coding: utf-8 -*- from __future__ import unicode_literals import time from django.conf import settings from django.db import models from django.utils.encoding import python_2_unicode_compatible from django_twilio.client import twilio_client from django_twilio.models import Caller from twilio.rest.exceptions import TwilioRestException from .signals import response_message, unsubscribe_signal from .utils import AbsoluteURI # Abstract Models class CreatedUpdated(models.Model): date_created = models.DateTimeField(auto_now_add=True) date_updated = models.DateTimeField(auto_now=True) class Meta: abstract = True @python_2_unicode_compatible class Sid(CreatedUpdated): sid = models.CharField(max_length=34, primary_key=True) def __str__(self): return '{}'.format(self.sid) class Meta: abstract = True # Message Model ForeignKeys class Account(Sid): # account type choices TRIAL = 0 FULL = 1 ACCOUNT_TYPE_CHOICES = ( (TRIAL, 'Trial'), (FULL, 'Full'), ) # status choices ACTIVE = 0 SUSPENDED = 1 CLOSED = 2 STATUS_CHOICES = ( (ACTIVE, 'active'), (SUSPENDED, 'suspended'), (CLOSED, 'closed'), ) friendly_name = models.CharField(max_length=64) account_type = models.PositiveSmallIntegerField( choices=ACCOUNT_TYPE_CHOICES ) status = models.PositiveSmallIntegerField(choices=STATUS_CHOICES) owner_account_sid = models.ForeignKey('self', null=True) @classmethod def get_account_type_choice(cls, account_type_display): for choice in cls.ACCOUNT_TYPE_CHOICES: if account_type_display == choice[1]: return choice[0] @classmethod def get_status_choice(cls, status_display): for choice in cls.STATUS_CHOICES: if status_display == choice[1]: return choice[0] @classmethod def get_or_create(cls, account_sid=None, account=None): if not account_sid: account_sid = account.sid try: return cls.objects.get(sid=account_sid) except cls.DoesNotExist: account_obj = cls(sid=account_sid) account_obj.sync_twilio_account(account) return account_obj @property def twilio_account(self): return twilio_client.accounts.get(self.sid) def sync_twilio_account(self, account=None): if not account: account = self.twilio_account self.friendly_name = account.friendly_name self.account_type = self.get_account_type_choice(account.type) self.status = self.get_status_choice(account.status) if account.sid != account.owner_account_sid: self.owner_account_sid = Account.get_or_create( account.owner_account_sid ) self.save() @python_2_unicode_compatible class ApiVersion(models.Model): date = models.DateField(unique=True) def __str__(self): return '{}'.format(self.date) @classmethod def get_or_create(cls, message_date): api_version, created = cls.objects.get_or_create( date=message_date ) return api_version @python_2_unicode_compatible class Currency(models.Model): code = models.CharField(max_length=3, primary_key=True) def __str__(self): return '{}'.format(self.code) @classmethod def get_or_create(cls, message_price_unit): currency, created = cls.objects.get_or_create(code=message_price_unit) return currency @python_2_unicode_compatible class Error(models.Model): code = models.CharField(max_length=5, primary_key=True) message = models.CharField(max_length=255) def __str__(self): return '{}'.format(self.code) @classmethod def get_or_create(cls, message_error_code, message_error_message): error, created = cls.objects.get_or_create( code=message_error_code, defaults={'message': message_error_message} ) return error class MessagingService(Sid): pass @classmethod def get_or_create(cls, messaging_service_sid): messaging_service, created = cls.objects.get_or_create( sid=messaging_service_sid ) return messaging_service @python_2_unicode_compatible class PhoneNumber(CreatedUpdated): caller = models.OneToOneField(Caller) unsubscribed = models.BooleanField(default=False) def __str__(self): return '{}'.format(self.caller.phone_number) @classmethod def get_or_create(cls, phone_number, unsubscribed=False): if isinstance(phone_number, cls): return phone_number caller, created = Caller.objects.get_or_create( phone_number=phone_number ) phone_number_obj, create = cls.objects.get_or_create( caller=caller, defaults={'unsubscribed': unsubscribed} ) return phone_number_obj @property def as_e164(self): return self.caller.phone_number.as_e164 def subscribe(self): self.unsubscribed = False self.save() def unsubscribe(self): self.unsubscribed = True self.save() class Message(Sid): # status choices ACCEPTED = 0 QUEUED = 1 SENDING = 2 SENT = 3 RECEIVING = 4 RECEIVED = 5 DELIVERED = 6 UNDELIVERED = 7 FAILED = 8 UNKNOWN = 9 STATUS_CHOICES = ( (ACCEPTED, 'accepted'), (QUEUED, 'queued'), (SENDING, 'sending'), (SENT, 'sent'), (RECEIVING, 'receiving'), (RECEIVED, 'received'), (DELIVERED, 'delivered'), (UNDELIVERED, 'undelivered'), (FAILED, 'failed'), ) # direction choices INBOUND = 0 OUTBOUND_API = 1 OUTBOUND_CALL = 2 OUTBOUND_REPLY = 3 DIRECTION_CHOICES = ( (INBOUND, 'inbound'), (OUTBOUND_API, 'outbound-api'), (OUTBOUND_CALL, 'outbound-call'), (OUTBOUND_REPLY, 'outbound-reply'), ) UNSUBSCRIBE_MESSAGES = [ 'STOP', 'STOPALL', 'UNSUBSCRIBE', 'CANCEL', 'END', 'QUIT' ] SUBSCRIBE_MESSAGES = ['START', 'YES'] date_sent = models.DateTimeField(null=True) account = models.ForeignKey(Account) messaging_service = models.ForeignKey(MessagingService, null=True) from_phone_number = models.ForeignKey(PhoneNumber, related_name='to_phone') to_phone_number = models.ForeignKey(PhoneNumber, related_name='from_phone') body = models.CharField(max_length=160) num_media = models.PositiveSmallIntegerField() num_segments = models.PositiveSmallIntegerField() status = models.PositiveSmallIntegerField( choices=STATUS_CHOICES, default=QUEUED ) error = models.ForeignKey(Error, null=True, related_name='error') direction = models.PositiveSmallIntegerField(choices=DIRECTION_CHOICES) price = models.DecimalField(max_digits=6, decimal_places=5) currency = models.ForeignKey(Currency) api_version = models.ForeignKey(ApiVersion) @classmethod def get_direction_choice(cls, direction_display): for choice in cls.DIRECTION_CHOICES: if direction_display == choice[1]: return choice[0] @classmethod def get_status_choice(cls, status_display): for choice in cls.STATUS_CHOICES: if status_display == choice[1]: return choice[0] @classmethod def get_or_create(cls, message_sid=None, message=None): if not message_sid: message_sid = message.sid try: return (cls.objects.get(sid=message_sid), False) except cls.DoesNotExist: message_obj = cls(sid=message_sid) message_obj.sync_twilio_message(message) return (message_obj, True) @classmethod def send_message(cls, body, to, from_=settings.TWILIO_DEFAULT_CALLERID): to_phone_number = PhoneNumber.get_or_create(to) from_phone_number = PhoneNumber.get_or_create(from_) twilio_message = twilio_client.messages.create( body=body, to=to_phone_number.as_e164, from_=from_phone_number.as_e164, status_callback=cls.get_status_callback() ) return cls.get_or_create(message=twilio_message) @property def twilio_message(self): max_retries = getattr(settings, 'DJANGO_TWILIO_SMS_MAX_RETRIES', 5) retry_sleep = getattr(settings, 'DJANGO_TWILIO_SMS_RETRY_SLEEP', .5) retries = 0 while True: try: return twilio_client.messages.get(self.sid) except TwilioRestException: if retries < max_retries: time.sleep(retry_sleep) retries = retries + 1 else: raise @staticmethod def get_status_callback(): absolute_uri = AbsoluteURI('django_twilio_sms', 'callback_view') return absolute_uri.get_absolute_uri() def check_for_subscription_message(self): if self.direction is self.INBOUND: body = self.body.upper().strip() if body in self.UNSUBSCRIBE_MESSAGES: self.from_phone_number.unsubscribe() unsubscribe_signal.send_robust( sender=self.__class__, message=self, unsubscribed=True ) elif body in self.SUBSCRIBE_MESSAGES: self.from_phone_number.subscribe() unsubscribe_signal.send_robust( sender=self.__class__, message=self, unsubscribed=False ) def send_response_message(self): if self.direction is self.INBOUND: if not self.from_phone_number.unsubscribed: action = Action.get_action(self.body) Message.send_message( body=action.get_active_response().body, to=self.from_phone_number, from_=self.to_phone_number ) response_message.send_robust( sender=self.__class__, action=action, message=self ) def sync_twilio_message(self, message=None): if not message: message = self.twilio_message self.date_sent = message.date_sent self.account = Account.get_or_create(message.account_sid) if message.messaging_service_sid: self.messaging_service = MessagingService.get_or_create( message.messaging_service_sid ) self.num_media = message.num_media self.num_segments = message.num_segments if message.status: self.status = self.get_status_choice(message.status) else: self.status = self.UNKNOWN if message.error_code: self.error = Error.get_or_create( message.error_code, message.error_message ) self.direction = self.get_direction_choice(message.direction) self.price = message.price or '0.0' self.currency = Currency.get_or_create(message.price_unit) self.api_version = ApiVersion.get_or_create(message.api_version) self.from_phone_number = PhoneNumber.get_or_create(message.from_) self.to_phone_number = PhoneNumber.get_or_create(message.to) self.body = message.body self.check_for_subscription_message() self.save() @python_2_unicode_compatible class Action(CreatedUpdated): name = models.CharField(max_length=50, unique=True) active = models.BooleanField(default=True) def __str__(self): return '{}'.format(self.name) @classmethod def get_action(cls, message_body): try: return cls.objects.get( name=message_body.strip().upper(), active=True ) except cls.DoesNotExist: return cls.objects.get(name='UNKNOWN', active=True) def get_active_response(self): return self.response_set.filter(active=True)[0] def save(self, *args, **kwargs): self.name = self.name.upper() super(Action, self).save(*args, **kwargs) @python_2_unicode_compatible class Response(CreatedUpdated): body = models.CharField(max_length=160) action = models.ForeignKey(Action) active = models.BooleanField(default=True) def __str__(self): return 'Response for {}'.format(self.action) def save(self, *args, **kwargs): if self.active: try: current = Response.objects.get(action=self.action, active=True) if self != current: current.active = False current.save() except Response.DoesNotExist: pass super(Response, self).save(*args, **kwargs)
bsd-3-clause
5,583,914,624,737,663,000
28.682028
79
0.613957
false
3.925046
false
false
false
totalgood/twote
twote/models_calendar.py
1
1157
from django.db import models from django.utils import timezone from django.contrib.auth.models import User from django.core.exceptions import ValidationError class Event(models.Model): ''' This model represents an one-time event ''' title = models.CharField(max_length=255) description = models.TextField() start = models.DateTimeField() end = models.DateTimeField( blank=True, # validators=[validate_after] ) #TODO in view, make logic that end time must be later than start time. location = models.CharField(max_length=100) creator = models.ForeignKey(User, null=True) created = models.DateTimeField(auto_now_add=True) last_updated = models.DateTimeField(auto_now=True) def save(self, *args, **kwargs): if not self.end: self.end = self.start + timezone.timedelta(hours=1) super(Event, self).save(*args, **kwargs) if self.end - self.start < timezone.timedelta(0): raise ValidationError('end time must occur after start time, is now occuring {} before'.format(self.end - self.start)) def __str__(self): return self.title
mit
-7,334,137,260,139,386,000
34.060606
130
0.675886
false
4.045455
false
false
false
compmem/ptsa
ptsa/data/hdf5wrapper.py
1
10253
#emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- #ex: set sts=4 ts=4 sw=4 et: ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## # # See the COPYING file distributed along with the PTSA package for the # copyright and license terms. # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## # global imports import numpy as np import h5py # local imports from basewrapper import BaseWrapper from timeseries import TimeSeries class HDF5Wrapper(BaseWrapper): """ Interface to data stored in an HDF5 file. """ def __init__(self, filepath, dataset_name='data', annotations_name='annotations', channel_info_name='channel_info', data=None, file_dtype=None, apply_gain=True, gain_buffer=.005, samplerate=None, nchannels=None, nsamples=None, annotations=None, channel_info=None, **hdf5opts): """ Initialize the interface to the data. Much documentation is needed here. For example, here is one way to create an HDF5 dataset from a TimeSeries instance: HDF5Wrapper('data.hdf5', data=data, compression='gzip') Now let's say the TimeSeries is float64, but you want to save space (and lose significant digits), you can specify a file_dtype, which will apply a gain factor to ensure you retain as much data accuracy as possible. Here's how you can save the data in int16: HDF5Wrapper('data.hdf5', data=data, file_dtype=np.int16, compression='gzip') """ # set up the basic params of the data self.filepath = filepath self.dataset_name = dataset_name self.annotations_name = annotations_name self.channel_info_name = channel_info_name self.apply_gain = apply_gain self.gain_buffer = gain_buffer self.gain = None self.hdf5opts = hdf5opts self.file_dtype = file_dtype self.data_dtype = None # see if create dataset if not data is None: # must provide samplerate and data # connect to the file and get the dataset f = h5py.File(self.filepath,'a') # use the data to create a dataset self.data_dtype = data.dtype d = f.create_dataset(self.dataset_name, data=self._data_to_file(data), **hdf5opts) d.attrs['data_dtype'] = data.dtype.char d.attrs['gain'] = self.gain if not 'samplerate' in d.attrs: # must have provided samplerate if isinstance(data, TimeSeries): # get the samplerate from the TimeSeries samplerate = data.samplerate if samplerate is None: raise ValueError("You must specify a samplerate " + "if the dataset does not already exist.") # set the samplerate d.attrs['samplerate'] = samplerate # create annotations if necessary if not annotations is None: if self.annotations_name in f: raise ValueError("Told to create dataset annotations, " + "but %s already exists." % self.annotations_name) a = f.create_dataset(self.annotations_name, data=annotations, **hdf5opts) # create channel_info if necessary if not channel_info is None: if self.channel_info_name in f: raise ValueError("Told to create dataset channel_info, " + "but %s already exists." % self.channel_info_name) c = f.create_dataset(self.channel_info_name, data=channel_info, **hdf5opts) # close the hdf5 file f.close() else: # connect to the file and get info f = h5py.File(self.filepath,'r') d = f[self.dataset_name] self.data_dtype = np.dtype(d.attrs['data_dtype']) self.file_dtype = d.dtype self.gain = d.attrs['gain'] def _data_to_file(self, data): # process the datatypes if self.file_dtype is None: # load from data self.file_dtype = data.dtype else: # make sure it's a dtype if not isinstance(self.file_dtype, np.dtype): try: self.file_dtype = np.dtype(self.file_dtype) except: ValueError("file_dtype should be a numpy dtype.") # process the gain if self.gain is None: # default to 1.0 self.gain = 1.0 # calc it if we are going from float to int if (self.file_dtype.kind == 'i') and (self.data_dtype.kind == 'f'): fr = np.iinfo(self.file_dtype).max*2 dr = np.abs(data).max()*2 * (1.+self.gain_buffer) self.gain = dr/fr # calc and apply gain if necessary if self.apply_gain and self.gain != 1.0: return np.asarray(data/self.gain,dtype=self.file_dtype) else: return np.asarray(data,dtype=self.file_dtype) def _data_from_file(self, data): # see if apply gain we've already calculated if self.apply_gain and self.gain != 1.0: return np.asarray(data*self.gain, dtype=self.data_dtype) else: return np.asarray(data, dtype=self.data_dtype) def _get_samplerate(self, channel=None): # Same samplerate for all channels. # get the samplerate property of the dataset f = h5py.File(self.filepath,'r') data = f[self.dataset_name] samplerate = data.attrs['samplerate'] f.close() return samplerate def _get_nsamples(self,channel=None): # get the dimensions of the data f = h5py.File(self.filepath,'r') data = f[self.dataset_name] nsamples = data.shape[1] f.close() return nsamples def _get_nchannels(self): # get the dimensions of the data f = h5py.File(self.filepath,'r') data = f[self.dataset_name] nchannels = data.shape[0] f.close() return nchannels def _get_annotations(self): # get the dimensions of the data f = h5py.File(self.filepath,'r') if self.annotations_name in f: annot = f[self.annotations_name][:] else: annot = None f.close() return annot def _set_annotations(self, annotations): # get the dimensions of the data f = h5py.File(self.filepath,'a') if self.annotations_name in f: del f[self.annotations_name] a = f.create_dataset(self.annotations_name, data=annotations, **self.hdf5opts) f.close() def _get_channel_info(self): # get the dimensions of the data f = h5py.File(self.filepath,'r') if self.channel_info_name in f: chan_info = f[self.channel_info_name][:] else: chan_info = None f.close() return chan_info def _set_channel_info(self, channel_info): # get the dimensions of the data f = h5py.File(self.filepath,'a') if self.channel_info_name in f: del f[self.channel_info_name] a = f.create_dataset(self.channel_info_name, data=channel_info, **self.hdf5opts) f.close() def _load_data(self,channels,event_offsets,dur_samp,offset_samp): """ """ # connect to the file and get the dataset f = h5py.File(self.filepath,'r') data = f[self.dataset_name] # allocate for data eventdata = np.empty((len(channels),len(event_offsets),dur_samp), dtype=self.data_dtype)*np.nan # loop over events for e,evOffset in enumerate(event_offsets): # set the range ssamp = offset_samp+evOffset esamp = ssamp + dur_samp # check the ranges if ssamp < 0 or esamp > data.shape[1]: raise IOError('Event with offset '+str(evOffset)+ ' is outside the bounds of the data.') eventdata[:,e,:] = self._data_from_file(data[channels,ssamp:esamp]) # close the file f.close() return eventdata def append_data(self, data): """ Must be all channels. """ # connect to the file and get the dataset f = h5py.File(self.filepath,'a') # get the dataset (must already exist) d = f[self.dataset_name] # check data size if data.shape[0] != d.shape[0]: raise ValueError("New data must have the same number of channels: %d." % d.shape[0]) # reshape to hold new data cursamp = d.shape[1] newsamp = data.shape[1] d.shape = (d.shape[0], cursamp+newsamp) # append the data d[:,cursamp:cursamp+newsamp] = self._data_to_file(data) # close the file f.close() def set_channel_data(self, channel, data): """ Set the data for an entire channel. Will reshape the nsamples of the entire dataset to match, throwing out data if smaller. """ # connect to the file and get the dataset f = h5py.File(self.filepath,'a') # get the dataset (must already exist) d = f[self.dataset_name] # reshape if necessary cursamp = d.shape[1] newsamp = len(data) if cursamp != newsamp: d.shape = (d.shape[0], newsamp) # set the data d[channel,:] = self._data_to_file(data) # close the file f.close()
gpl-3.0
8,240,189,674,893,211,000
34.477509
84
0.531649
false
4.111067
false
false
false
gdraynz/nyuki
nyuki/bus/persistence/memory_backend.py
1
1899
import logging from nyuki.bus.persistence.events import EventStatus from nyuki.bus.persistence.backend import PersistenceBackend log = logging.getLogger(__name__) class FIFOSizedQueue(object): def __init__(self, size): self._list = list() self._size = size def __len__(self): return len(self._list) @property def size(self): return self._size @property def list(self): return self._list @property def is_full(self): return len(self._list) >= self._size def put(self, item): while self.is_full: log.debug('queue full (%d), poping first item', len(self._list)) self._list.pop(0) self._list.append(item) def empty(self): while self._list: yield self._list.pop(0) class MemoryBackend(PersistenceBackend): def __init__(self, max_size=10000, **kwargs): self._last_events = FIFOSizedQueue(max_size) def __repr__(self): return '<MemoryBackend max_size={}>'.format(self._last_events.size) async def store(self, event): self._last_events.put(event) async def update(self, uid, status): for event in self._last_events.list: if event['id'] == uid: event['status'] = status.value return async def retrieve(self, since, status): def check_params(item): since_check = True status_check = True if since: since_check = item['created_at'] >= since if status: if isinstance(status, list): status_check = EventStatus[item['status']] in status else: status_check = item['status'] == status.value return since_check and status_check return list(filter(check_params, self._last_events.list))
apache-2.0
-7,991,617,701,439,254,000
24.662162
76
0.567667
false
4.049041
false
false
false
aristanetworks/arista-ovs-quantum
quantum/plugins/ryu/db/models_v2.py
1
2004
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2012 Isaku Yamahata <yamahata at private email ne jp> # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import sqlalchemy as sa from quantum.db import model_base class OFPServer(model_base.BASEV2): """Openflow Server/API address.""" __tablename__ = 'ofp_server' id = sa.Column(sa.Integer, primary_key=True, autoincrement=True) address = sa.Column(sa.String(64)) # netloc <host ip address>:<port> host_type = sa.Column(sa.String(255)) # server type # Controller, REST_API def __repr__(self): return "<OFPServer(%s,%s,%s)>" % (self.id, self.address, self.host_type) class TunnelKeyLast(model_base.BASEV2): """Lastly allocated Tunnel key. The next key allocation will be started from this value + 1 """ last_key = sa.Column(sa.Integer, primary_key=True) def __repr__(self): return "<TunnelKeyLast(%x)>" % self.last_key class TunnelKey(model_base.BASEV2): """Netowrk ID <-> tunnel key mapping.""" network_id = sa.Column(sa.String(36), sa.ForeignKey("networks.id"), nullable=False) tunnel_key = sa.Column(sa.Integer, primary_key=True, nullable=False, autoincrement=False) def __repr__(self): return "<TunnelKey(%s,%x)>" % (self.network_id, self.tunnel_key)
apache-2.0
5,778,167,961,306,299,000
36.111111
79
0.63523
false
3.711111
false
false
false
dNG-git/pas_gapi_core
setup.py
1
2785
# -*- coding: utf-8 -*- """ direct PAS Python Application Services ---------------------------------------------------------------------------- (C) direct Netware Group - All rights reserved https://www.direct-netware.de/redirect?pas;gapi;core The following license agreement remains valid unless any additions or changes are being made by direct Netware Group in a written form. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. ---------------------------------------------------------------------------- https://www.direct-netware.de/redirect?licenses;gpl ---------------------------------------------------------------------------- setup.py """ def get_version(): """ Returns the version currently in development. :return: (str) Version string :since: v0.1.02 """ return "v0.2.00" # from dNG.distutils.command.build_py import BuildPy from dNG.distutils.command.install_data import InstallData from dNG.distutils.temporary_directory import TemporaryDirectory from distutils.core import setup from os import path with TemporaryDirectory(dir = ".") as build_directory: parameters = { "pasGapiCoreVersion": get_version() } InstallData.set_build_target_path(build_directory) InstallData.set_build_target_parameters(parameters) _build_path = path.join(build_directory, "src") setup(name = "pas_gapi_core", version = get_version(), description = "Python Application Services", long_description = """"pas_gapi_core" is an adapter and abstraction layer for the C-Level GObject Introspection API.""", author = "direct Netware Group et al.", author_email = "web@direct-netware.de", license = "GPLv2+", url = "https://www.direct-netware.de/redirect?pas;gapi;core", platforms = [ "any" ], package_dir = { "": _build_path }, packages = [ "dNG" ], data_files = [ ( "docs", [ "LICENSE", "README" ]) ], # Override build_py to first run builder.py over all PAS modules cmdclass = { "build_py": BuildPy, "install_data": InstallData } ) #
gpl-2.0
-6,165,827,496,580,687,000
34.253165
130
0.631957
false
4.238965
false
false
false
gford1000/awssl
examples/wait_state_example.py
1
1277
import os import sys sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))) import awssl def wait_state_example(): # Construct states final_state = awssl.Task( Name="FinalState", EndState=True, ResourceArn="arn:aws:lambda:REGION:ACCOUNT_ID:function:FUNCTION_NAME") wait_using_seconds_path = awssl.Wait( Name="wait_using_seconds_path", NextState=final_state, WaitForSecondsPath="$.expiryseconds") wait_using_timestamp_path = awssl.Wait( Name="wait_using_timestamp_path", NextState=wait_using_seconds_path, WaitUntilISO8601TimestampPath="$.expirydate") wait_using_timestamp = awssl.Wait( Name="wait_using_timestamp", NextState=wait_using_timestamp_path, WaitUntilISO8601Timestamp="2015-09-04T01:59:00Z") wait_using_seconds = awssl.Wait( Name="wait_using_second", NextState=wait_using_timestamp, WaitForSeconds=10) first_state = awssl.Task( Name="FirstState", ResourceArn="arn:aws:lambda:REGION:ACCOUNT_ID:function:FUNCTION_NAME", EndState=False, NextState=wait_using_seconds) # Construct state machine return awssl.StateMachine( Comment="An example of the Amazon States Language using wait states", StartState=first_state) if __name__ == "__main__": sm = wait_state_example() print sm
mit
-2,577,020,994,533,087,700
25.061224
82
0.735317
false
3.011792
false
false
false
KarnUllrich/HDToolsPython
classification.py
1
1403
#!/usr/bin/env python # -*- coding: utf-8 -*- """ Provides a kNN classier. """ import numpy as np __author__ = ["Karen Ullrich"] __email__ = "karen.ullrich@ofai.at" __version__ = "Dec 2014" def kNN(data, k): ''' Performs kNN. Computes LOOCV accuracy. k ...neighborhood size D ...distance matrix n ...number of data instances t ...ground truth/ labels acc ...classification accuracy for LOOCV ''' n = len(data.D) num_evaluations = len(k) # How many neigbohood sizes are given acc = np.zeros(num_evaluations) corr = np.zeros((n, num_evaluations)) for i in xrange(n): ground_truth = data.t[i] row = data.D[i, :] row[i] = float('inf') idx = np.argsort(row) for j in xrange(num_evaluations): nn_class = findMostCommonElementOfSet(data.t[idx[:k[j]]]) if ground_truth == nn_class: acc[j] += 1./n corr[i,j] = 1 return acc def findMostCommonElementOfSet(elements): ''' Returns the most common element in a set.'For ties it decides randomly. Input: a ... list or np.array ''' elementCounter = Counter(elements).most_common() highest_count = max([i[1] for i in elementCounter]) element = [i[0] for i in elementCounter if i[1] == highest_count] r.shuffle(element) return element[0]
gpl-2.0
8,126,564,849,569,907,000
22.79661
75
0.577334
false
3.348449
false
false
false
cullophid/Scienceman
spritesheettest.py
1
1419
import sys, os import pygame from pygame.locals import * from pygame.color import * from gamelib import data class Spritesheet: def __init__(self, filename): self.sheet = pygame.image.load(os.path.join('data',filename)).convert() def imgat(self,rect,colorkey=None): rect = Rect(rect) image = pygame.Surface(rect.size).convert() image.blit(self.sheet, (0,0),rect) if colorkey is not None: if colorkey is -1: colorkey = image.get_at((0,0)) image.set_colorkey(colorkey, RLEACCEL) return image def imgsat(self,rects,colorkey=None): imgs = [] for rect in rects: imgs.append(self.imgat(rect, colorkey)) return imgs os.environ["SDL_VIDEO_CENTERED"] = "1" #pygame.mixer.pre_init(44100, -16, 2, 4096) pygame.init() pygame.mouse.set_visible(1) pygame.display.set_caption("In the name of Science") screen = pygame.display.set_mode((1000, 480)) font = pygame.font.Font((os.path.join('data','font.ttf')), 16) ren = font.render("YOU DIED!", 1, (255, 255, 255)) screen.blit(ren, (320-ren.get_width()/2, 235)) sheet = Spritesheet("CaptainCommando.gif") image = sheet.imgat((14,6,64,86),-1) while True: for e in pygame.event.get(): if e.type == QUIT: sys.exit() if e.type == KEYDOWN: if e.key == K_ESCAPE: self.end() ren = font.render("YOU DIED!", 1, (255, 255, 255)) screen.blit(ren, (320-ren.get_width()/2, 235)) screen.blit(image,(100,100)) pygame.display.flip()
lgpl-2.1
-1,517,194,131,688,636,400
27.38
73
0.674419
false
2.667293
false
false
false
davy39/eric
Helpviewer/Bookmarks/BookmarksManager.py
1
21594
# -*- coding: utf-8 -*- # Copyright (c) 2009 - 2014 Detlev Offenbach <detlev@die-offenbachs.de> # """ Module implementing the bookmarks manager. """ from __future__ import unicode_literals import os from PyQt5.QtCore import pyqtSignal, Qt, QT_TRANSLATE_NOOP, QObject, QFile, \ QIODevice, QXmlStreamReader, QDate, QDateTime, QFileInfo, QUrl, \ QCoreApplication from PyQt5.QtWidgets import QUndoStack, QUndoCommand, QDialog from E5Gui import E5MessageBox, E5FileDialog from .BookmarkNode import BookmarkNode from Utilities.AutoSaver import AutoSaver import Utilities import Preferences BOOKMARKBAR = QT_TRANSLATE_NOOP("BookmarksManager", "Bookmarks Bar") BOOKMARKMENU = QT_TRANSLATE_NOOP("BookmarksManager", "Bookmarks Menu") StartRoot = 0 StartMenu = 1 StartToolBar = 2 class BookmarksManager(QObject): """ Class implementing the bookmarks manager. @signal entryAdded(BookmarkNode) emitted after a bookmark node has been added @signal entryRemoved(BookmarkNode, int, BookmarkNode) emitted after a bookmark node has been removed @signal entryChanged(BookmarkNode) emitted after a bookmark node has been changed @signal bookmarksSaved() emitted after the bookmarks were saved @signal bookmarksReloaded() emitted after the bookmarks were reloaded """ entryAdded = pyqtSignal(BookmarkNode) entryRemoved = pyqtSignal(BookmarkNode, int, BookmarkNode) entryChanged = pyqtSignal(BookmarkNode) bookmarksSaved = pyqtSignal() bookmarksReloaded = pyqtSignal() def __init__(self, parent=None): """ Constructor @param parent reference to the parent object (QObject) """ super(BookmarksManager, self).__init__(parent) self.__saveTimer = AutoSaver(self, self.save) self.entryAdded.connect(self.__saveTimer.changeOccurred) self.entryRemoved.connect(self.__saveTimer.changeOccurred) self.entryChanged.connect(self.__saveTimer.changeOccurred) self.__initialize() def __initialize(self): """ Private method to initialize some data. """ self.__loaded = False self.__bookmarkRootNode = None self.__toolbar = None self.__menu = None self.__bookmarksModel = None self.__commands = QUndoStack() @classmethod def getFileName(cls): """ Class method to get the file name of the bookmark file. @return name of the bookmark file (string) """ return os.path.join(Utilities.getConfigDir(), "browser", "bookmarks.xbel") def close(self): """ Public method to close the bookmark manager. """ self.__saveTimer.saveIfNeccessary() def undoRedoStack(self): """ Public method to get a reference to the undo stack. @return reference to the undo stack (QUndoStack) """ return self.__commands def changeExpanded(self): """ Public method to handle a change of the expanded state. """ self.__saveTimer.changeOccurred() def reload(self): """ Public method used to initiate a reloading of the bookmarks. """ self.__initialize() self.load() self.bookmarksReloaded.emit() def load(self): """ Public method to load the bookmarks. @exception RuntimeError raised to indicate an error loading the bookmarks """ if self.__loaded: return self.__loaded = True bookmarkFile = self.getFileName() if not QFile.exists(bookmarkFile): from . import DefaultBookmarks_rc # __IGNORE_WARNING__ bookmarkFile = QFile(":/DefaultBookmarks.xbel") bookmarkFile.open(QIODevice.ReadOnly) from .XbelReader import XbelReader reader = XbelReader() self.__bookmarkRootNode = reader.read(bookmarkFile) if reader.error() != QXmlStreamReader.NoError: E5MessageBox.warning( None, self.tr("Loading Bookmarks"), self.tr( """Error when loading bookmarks on line {0},""" """ column {1}:\n {2}""") .format(reader.lineNumber(), reader.columnNumber(), reader.errorString())) others = [] for index in range( len(self.__bookmarkRootNode.children()) - 1, -1, -1): node = self.__bookmarkRootNode.children()[index] if node.type() == BookmarkNode.Folder: if (node.title == self.tr("Toolbar Bookmarks") or node.title == BOOKMARKBAR) and \ self.__toolbar is None: node.title = self.tr(BOOKMARKBAR) self.__toolbar = node if (node.title == self.tr("Menu") or node.title == BOOKMARKMENU) and \ self.__menu is None: node.title = self.tr(BOOKMARKMENU) self.__menu = node else: others.append(node) self.__bookmarkRootNode.remove(node) if len(self.__bookmarkRootNode.children()) > 0: raise RuntimeError("Error loading bookmarks.") if self.__toolbar is None: self.__toolbar = BookmarkNode(BookmarkNode.Folder, self.__bookmarkRootNode) self.__toolbar.title = self.tr(BOOKMARKBAR) else: self.__bookmarkRootNode.add(self.__toolbar) if self.__menu is None: self.__menu = BookmarkNode(BookmarkNode.Folder, self.__bookmarkRootNode) self.__menu.title = self.tr(BOOKMARKMENU) else: self.__bookmarkRootNode.add(self.__menu) for node in others: self.__menu.add(node) self.__convertFromOldBookmarks() def save(self): """ Public method to save the bookmarks. """ if not self.__loaded: return from .XbelWriter import XbelWriter writer = XbelWriter() bookmarkFile = self.getFileName() # save root folder titles in English (i.e. not localized) self.__menu.title = BOOKMARKMENU self.__toolbar.title = BOOKMARKBAR if not writer.write(bookmarkFile, self.__bookmarkRootNode): E5MessageBox.warning( None, self.tr("Saving Bookmarks"), self.tr("""Error saving bookmarks to <b>{0}</b>.""") .format(bookmarkFile)) # restore localized titles self.__menu.title = self.tr(BOOKMARKMENU) self.__toolbar.title = self.tr(BOOKMARKBAR) self.bookmarksSaved.emit() def addBookmark(self, parent, node, row=-1): """ Public method to add a bookmark. @param parent reference to the node to add to (BookmarkNode) @param node reference to the node to add (BookmarkNode) @param row row number (integer) """ if not self.__loaded: return self.setTimestamp(node, BookmarkNode.TsAdded, QDateTime.currentDateTime()) command = InsertBookmarksCommand(self, parent, node, row) self.__commands.push(command) def removeBookmark(self, node): """ Public method to remove a bookmark. @param node reference to the node to be removed (BookmarkNode) """ if not self.__loaded: return parent = node.parent() row = parent.children().index(node) command = RemoveBookmarksCommand(self, parent, row) self.__commands.push(command) def setTitle(self, node, newTitle): """ Public method to set the title of a bookmark. @param node reference to the node to be changed (BookmarkNode) @param newTitle title to be set (string) """ if not self.__loaded: return command = ChangeBookmarkCommand(self, node, newTitle, True) self.__commands.push(command) def setUrl(self, node, newUrl): """ Public method to set the URL of a bookmark. @param node reference to the node to be changed (BookmarkNode) @param newUrl URL to be set (string) """ if not self.__loaded: return command = ChangeBookmarkCommand(self, node, newUrl, False) self.__commands.push(command) def setNodeChanged(self, node): """ Public method to signal changes of bookmarks other than title, URL or timestamp. @param node reference to the bookmark (BookmarkNode) """ self.__saveTimer.changeOccurred() def setTimestamp(self, node, timestampType, timestamp): """ Public method to set the URL of a bookmark. @param node reference to the node to be changed (BookmarkNode) @param timestampType type of the timestamp to set (BookmarkNode.TsAdded, BookmarkNode.TsModified, BookmarkNode.TsVisited) @param timestamp timestamp to set (QDateTime) """ if not self.__loaded: return assert timestampType in [BookmarkNode.TsAdded, BookmarkNode.TsModified, BookmarkNode.TsVisited] if timestampType == BookmarkNode.TsAdded: node.added = timestamp elif timestampType == BookmarkNode.TsModified: node.modified = timestamp elif timestampType == BookmarkNode.TsVisited: node.visited = timestamp self.__saveTimer.changeOccurred() def bookmarks(self): """ Public method to get a reference to the root bookmark node. @return reference to the root bookmark node (BookmarkNode) """ if not self.__loaded: self.load() return self.__bookmarkRootNode def menu(self): """ Public method to get a reference to the bookmarks menu node. @return reference to the bookmarks menu node (BookmarkNode) """ if not self.__loaded: self.load() return self.__menu def toolbar(self): """ Public method to get a reference to the bookmarks toolbar node. @return reference to the bookmarks toolbar node (BookmarkNode) """ if not self.__loaded: self.load() return self.__toolbar def bookmarksModel(self): """ Public method to get a reference to the bookmarks model. @return reference to the bookmarks model (BookmarksModel) """ if self.__bookmarksModel is None: from .BookmarksModel import BookmarksModel self.__bookmarksModel = BookmarksModel(self, self) return self.__bookmarksModel def importBookmarks(self): """ Public method to import bookmarks. """ from .BookmarksImportDialog import BookmarksImportDialog dlg = BookmarksImportDialog() if dlg.exec_() == QDialog.Accepted: importRootNode = dlg.getImportedBookmarks() if importRootNode is not None: self.addBookmark(self.menu(), importRootNode) def exportBookmarks(self): """ Public method to export the bookmarks. """ fileName, selectedFilter = E5FileDialog.getSaveFileNameAndFilter( None, self.tr("Export Bookmarks"), "eric6_bookmarks.xbel", self.tr("XBEL bookmarks (*.xbel);;" "XBEL bookmarks (*.xml);;" "HTML Bookmarks (*.html)")) if not fileName: return ext = QFileInfo(fileName).suffix() if not ext: ex = selectedFilter.split("(*")[1].split(")")[0] if ex: fileName += ex ext = QFileInfo(fileName).suffix() if ext == "html": from .NsHtmlWriter import NsHtmlWriter writer = NsHtmlWriter() else: from .XbelWriter import XbelWriter writer = XbelWriter() if not writer.write(fileName, self.__bookmarkRootNode): E5MessageBox.critical( None, self.tr("Exporting Bookmarks"), self.tr("""Error exporting bookmarks to <b>{0}</b>.""") .format(fileName)) def __convertFromOldBookmarks(self): """ Private method to convert the old bookmarks into the new ones. """ bmNames = Preferences.Prefs.settings.value('Bookmarks/Names') bmFiles = Preferences.Prefs.settings.value('Bookmarks/Files') if bmNames is not None and bmFiles is not None: if len(bmNames) == len(bmFiles): convertedRootNode = BookmarkNode(BookmarkNode.Folder) convertedRootNode.title = self.tr("Converted {0}")\ .format(QDate.currentDate().toString( Qt.SystemLocaleShortDate)) for i in range(len(bmNames)): node = BookmarkNode(BookmarkNode.Bookmark, convertedRootNode) node.title = bmNames[i] url = QUrl(bmFiles[i]) if not url.scheme(): url.setScheme("file") node.url = url.toString() self.addBookmark(self.menu(), convertedRootNode) Preferences.Prefs.settings.remove('Bookmarks') def iconChanged(self, url): """ Public slot to update the icon image for an URL. @param url URL of the icon to update (QUrl or string) """ if isinstance(url, QUrl): url = url.toString() nodes = self.bookmarksForUrl(url) for node in nodes: self.bookmarksModel().entryChanged(node) def bookmarkForUrl(self, url, start=StartRoot): """ Public method to get a bookmark node for a given URL. @param url URL of the bookmark to search for (QUrl or string) @keyparam start indicator for the start of the search (StartRoot, StartMenu, StartToolBar) @return bookmark node for the given url (BookmarkNode) """ if start == StartMenu: startNode = self.__menu elif start == StartToolBar: startNode = self.__toolbar else: startNode = self.__bookmarkRootNode if startNode is None: return None if isinstance(url, QUrl): url = url.toString() return self.__searchBookmark(url, startNode) def __searchBookmark(self, url, startNode): """ Private method get a bookmark node for a given URL. @param url URL of the bookmark to search for (string) @param startNode reference to the node to start searching (BookmarkNode) @return bookmark node for the given url (BookmarkNode) """ bm = None for node in startNode.children(): if node.type() == BookmarkNode.Folder: bm = self.__searchBookmark(url, node) elif node.type() == BookmarkNode.Bookmark: if node.url == url: bm = node if bm is not None: return bm return None def bookmarksForUrl(self, url, start=StartRoot): """ Public method to get a list of bookmark nodes for a given URL. @param url URL of the bookmarks to search for (QUrl or string) @keyparam start indicator for the start of the search (StartRoot, StartMenu, StartToolBar) @return list of bookmark nodes for the given url (list of BookmarkNode) """ if start == StartMenu: startNode = self.__menu elif start == StartToolBar: startNode = self.__toolbar else: startNode = self.__bookmarkRootNode if startNode is None: return None if isinstance(url, QUrl): url = url.toString() return self.__searchBookmarks(url, startNode) def __searchBookmarks(self, url, startNode): """ Private method get a list of bookmark nodes for a given URL. @param url URL of the bookmarks to search for (string) @param startNode reference to the node to start searching (BookmarkNode) @return list of bookmark nodes for the given url (list of BookmarkNode) """ bm = [] for node in startNode.children(): if node.type() == BookmarkNode.Folder: bm.extend(self.__searchBookmarks(url, node)) elif node.type() == BookmarkNode.Bookmark: if node.url == url: bm.append(node) return bm class RemoveBookmarksCommand(QUndoCommand): """ Class implementing the Remove undo command. """ def __init__(self, bookmarksManager, parent, row): """ Constructor @param bookmarksManager reference to the bookmarks manager (BookmarksManager) @param parent reference to the parent node (BookmarkNode) @param row row number of bookmark (integer) """ super(RemoveBookmarksCommand, self).__init__( QCoreApplication.translate("BookmarksManager", "Remove Bookmark")) self._row = row self._bookmarksManager = bookmarksManager try: self._node = parent.children()[row] except IndexError: self._node = BookmarkNode() self._parent = parent def undo(self): """ Public slot to perform the undo action. """ self._parent.add(self._node, self._row) self._bookmarksManager.entryAdded.emit(self._node) def redo(self): """ Public slot to perform the redo action. """ self._parent.remove(self._node) self._bookmarksManager.entryRemoved.emit( self._parent, self._row, self._node) class InsertBookmarksCommand(RemoveBookmarksCommand): """ Class implementing the Insert undo command. """ def __init__(self, bookmarksManager, parent, node, row): """ Constructor @param bookmarksManager reference to the bookmarks manager (BookmarksManager) @param parent reference to the parent node (BookmarkNode) @param node reference to the node to be inserted (BookmarkNode) @param row row number of bookmark (integer) """ RemoveBookmarksCommand.__init__(self, bookmarksManager, parent, row) self.setText(QCoreApplication.translate( "BookmarksManager", "Insert Bookmark")) self._node = node def undo(self): """ Public slot to perform the undo action. """ RemoveBookmarksCommand.redo(self) def redo(self): """ Public slot to perform the redo action. """ RemoveBookmarksCommand.undo(self) class ChangeBookmarkCommand(QUndoCommand): """ Class implementing the Insert undo command. """ def __init__(self, bookmarksManager, node, newValue, title): """ Constructor @param bookmarksManager reference to the bookmarks manager (BookmarksManager) @param node reference to the node to be changed (BookmarkNode) @param newValue new value to be set (string) @param title flag indicating a change of the title (True) or the URL (False) (boolean) """ super(ChangeBookmarkCommand, self).__init__() self._bookmarksManager = bookmarksManager self._title = title self._newValue = newValue self._node = node if self._title: self._oldValue = self._node.title self.setText(QCoreApplication.translate( "BookmarksManager", "Name Change")) else: self._oldValue = self._node.url self.setText(QCoreApplication.translate( "BookmarksManager", "Address Change")) def undo(self): """ Public slot to perform the undo action. """ if self._title: self._node.title = self._oldValue else: self._node.url = self._oldValue self._bookmarksManager.entryChanged.emit(self._node) def redo(self): """ Public slot to perform the redo action. """ if self._title: self._node.title = self._newValue else: self._node.url = self._newValue self._bookmarksManager.entryChanged.emit(self._node)
gpl-3.0
-6,105,752,820,382,670,000
32.635514
79
0.563119
false
4.687215
false
false
false
Gavitron/pipulator
tcp_fakeclient.py
1
2704
# Connect to a known game Server and spew out whatever it tells us, before the missing heartbeat causes us to disco # import socket import struct import sys import json # internet variables game_address = ('127.0.0.1', 27001) # a hack so that I can use the tcpserver when testing. ###### # misc helper function declarations ###### # build a byte string for tx on the wire def msg_builder(msg_type=0,contents=''): return struct.pack('<LB', len(contents),msg_type)+contents # generator f'n to take an arbitrary string and pump it through, one byte at a time def byte_pump(byte_string): for byte in byte_string: yield byte ###### # Main block starts here # the client connected, so make a connection to the server now try: print >>sys.stderr, 'CLIENT : connecting to %s port %s...' % game_address game_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) game_socket.connect(game_address) isRunning=True while isRunning: message = game_socket.recv(5) if message: (msg_len,code) = struct.unpack('<LB', message) if msg_len > 0: payload = game_socket.recv(msg_len) print >>sys.stderr, 'CLIENT : recd %d bytes payload with code %r' % (msg_len, code) else: payload = False if code==0: #no-op for heartbeat if payload: print >>sys.stderr, 'WARNING, NONZERO PAYLOAD OF %d BYTES IN HEARTBEAT MESSAGE.\n ABORTING RUN AND DUMPING PAYLOAD:\n%u' % \ (msg_len, payload) isRunning=False break elif code == 1: data=json.loads(payload) print >>sys.stderr, 'CLIENT : app version: %s lang: %s ' % (data['version'],data['lang']) elif code == 3: print >>sys.stderr, 'CLIENT : gamestate update, %d bytes' % len(payload) elif code == 5: data=json.loads(payload) print >>sys.stderr, 'CLIENT : unknown JSON state message. Dumping:\n%s\n\n' % \ json.dumps(data, sort_keys=True, indent=4, separators=(',', ': ')) else: print >>sys.stderr, 'CLIENT : unknown code "%d", payload of %d bytes ' % (code,len(payload)) # reply with an empty heartbeat game_socket.sendall(msg_builder()) else: print >>sys.stderr, 'CLIENT : error from socket' isRunning = False finally: # close out the connections print >>sys.stderr, 'CLIENT : closing socket' game_socket.close()
bsd-3-clause
-4,238,135,791,722,192,400
36.555556
144
0.565089
false
3.896254
false
false
false
ebar0n/SD-Fumadores
agent.py
1
3769
import threading import time from random import choice import socketserver from storage import codes, packet_size, store, time_sleep, time_smoke from utils import _print global smoke smoke = False global smoke_code class MyTCPServer(socketserver.ThreadingMixIn, socketserver.TCPServer): pass class MyTCPServerHandler(socketserver.BaseRequestHandler): bufer = '' def process(self): while True: message = self.request.recv(packet_size).decode('UTF-8') if message == 'need': _print('{}: Necesito {}!'.format( store.get(self.code)['name'], store.get(self.code)['required'] )) if self.smoke_released: self.smoke_released = False global smoke smoke = False elif message == 'enable': _print('{}: Termino de fumar!'.format(store.get(self.code)['name'])) self.smoke_released = True elif message == 'ack': time.sleep(time_smoke) elif message == 'exit': break time.sleep(time_sleep) def handle(self): # Proceso de reconocimiento # cur_thread = threading.current_thread() self.code = self.request.recv(packet_size).decode('UTF-8') self.rejected = False self.smoke_released = False _print('Conectando fumador...') if store.get(self.code)['flag'] is False: store.get(self.code)['request'] = self.request store.get(self.code)['flag'] = True _print('Fumador aceptado *{}*'.format(store.get(self.code)['name'])) self.request.send('accepte'.encode('UTF-8')) self.process() else: self.rejected = True _print('Fumador rechazado *{}*'.format(store.get(self.code)['name'])) self.request.send('rejected'.encode('UTF-8')) def finish(self): _print('Fumador desconectado *{}*'.format(store.get(self.code)['name'])) if self.rejected is False: store.get(self.code)['flag'] = False global smoke_code if smoke_code == self.code: global smoke smoke = False def handle_timeout(self): print('tiempo de espera agotado') def verify_smoking(): # Se verifica si estan todos los fumadores conectados while True: active_smokers = True for i in codes: if store[i].get('flag') is False: active_smokers = False break time.sleep(time_sleep) if active_smokers and smoke is False: break else: if active_smokers is False: _print('Agente: Esperando todos los fumadores') def init(port): try: server = MyTCPServer(('0.0.0.0', port), MyTCPServerHandler) server.timeout = 10 server_thread = threading.Thread(target=server.serve_forever) server_thread.timeout = 10 # iniciando agente _print("Esperando fumadores...") server_thread.daemon = True server_thread.start() while True: verify_smoking() global smoke_code smoke_code = choice(codes) _print('Agente: Tengo disponible {}!'.format( store.get(smoke_code)['required'] )) global smoke smoke = True store.get(smoke_code)['request'].send('enable'.encode('UTF-8')) _print('Agente: fumador {} servido!'.format(store.get(smoke_code)['name'])) except KeyboardInterrupt: _print('Cerrando conexiones...') server.shutdown() server.server_close()
bsd-3-clause
6,229,673,584,993,234,000
30.940678
87
0.557177
false
3.905699
false
false
false
anselmobd/fo2
src/lotes/views/ajax/estoque_depositos_modelo.py
1
1486
from pprint import pprint from django.db.models import Exists, OuterRef from django.http import JsonResponse from fo2.connections import db_cursor_so from systextil.queries.deposito.total_modelo import totais_modelos_depositos import comercial.models def estoque_depositos_modelo(request, modelo, filtra=''): cursor = db_cursor_so(request) data = { 'modelo': modelo, } try: if filtra == 'm': metas = comercial.models.MetaEstoque.objects metas = metas.annotate(antiga=Exists( comercial.models.MetaEstoque.objects.filter( modelo=OuterRef('modelo'), data__gt=OuterRef('data') ) )) metas = metas.filter(antiga=False) metas = metas.exclude(venda_mensal=0) metas = metas.values('modelo') modelos = [m['modelo'] for m in metas] else: modelos = None totais = totais_modelos_depositos( cursor, ('101', '102', '103', '122', '231'), modelos) try: total_est = totais[modelo] except KeyError: total_est = 0 data.update({ 'result': 'OK', 'total_est': total_est, }) except Exception as e: raise e data.update({ 'result': 'ERR', 'descricao_erro': 'Erro ao buscar estoque nos depósitos', }) return JsonResponse(data, safe=False)
mit
5,535,124,717,280,641,000
27.557692
76
0.556229
false
3.527316
false
false
false
sistason/pa3
src/pa3_frontend/pa3_django/pa3/statistics_handling.py
1
4335
import logging import time from django.utils import timezone from django.db.models import Sum from django.http import HttpResponse from django.core.exceptions import ObjectDoesNotExist, MultipleObjectsReturned from pa3.models import WaitingNumberBatch, WaitingNumber, NewestNumberBatch, StatisticalData def get_src_statistic(_src): try: return StatisticalData.objects.get(src=_src) except MultipleObjectsReturned: for stats_ in StatisticalData.objects.filter(src=_src): stats_.delete() except ObjectDoesNotExist: pass except Exception as e: logging.exception('Exception while updating Stats: {}'.format(e)) def create_statistic(_src, date_): real_data_begin = timezone.datetime(2013, 9, 1) stat_ = StatisticalData(src=_src, date=date_) stat_qs = WaitingNumber.objects.filter(src=_src).filter(date__gt=real_data_begin).filter( date_delta__lt=60 * 60 * 3).filter(date_delta__gt=1) stat_.avg_len = stat_qs.count() stat_.avg_sum = stat_qs.aggregate(s=Sum('date_delta'))['s'] if stat_.avg_sum is None: stat_.avg_sum = 0 stat_.avg = 0 else: stat_.avg = 1.0 * stat_.avg_sum / stat_.avg_len stat_.avg_last_two_weeks = stat_.avg stat_.avg_last_same_day = stat_.avg stat_.avg_whole = (stat_.avg + stat_.avg_last_two_weeks + stat_.avg_last_same_day) / 3 stat_.avg_proc_delay_sum = stat_qs.aggregate(s=Sum('proc_delay'))['s'] if stat_.avg_proc_delay_sum is None: stat_.avg_proc_delay_sum = 0 stat_.avg_proc_delay_whole = 0 else: stat_.avg_proc_delay_whole = 1.0 * stat_.avg_proc_delay_sum / stat_.avg_len stat_.save() return stat_ def update_statistic(_src, dd, new_batch, date_): stat_ = StatisticalData.objects.get(src=_src) stat_.avg_sum += dd stat_.avg_len += 1 # sum/len = avg | sum=avg*len | new_avg = sum+dd/len+1 stat_.avg = 1.0 * stat_.avg_sum / stat_.avg_len stat_.avg_whole = (stat_.avg + stat_.avg_last_two_weeks + stat_.avg_last_same_day) / 3 if new_batch.proc_delay is not None and new_batch.proc_delay > 0: stat_.avg_proc_delay_sum += new_batch.proc_delay stat_.avg_proc_delay_len += 1 stat_.avg_proc_delay_whole = 1.0 * stat_.avg_proc_delay_sum / stat_.avg_proc_delay_len stat_.date = date_ stat_.save() def recompute_stats(request): # Recomputes the last_two_weeks average and the last_day average # Requires calls, e.g. CRON real_data_begin = timezone.datetime(2013, 9, 1) for stat_data in StatisticalData.objects.all(): # Get average over the last two weeks last_two_weeks_qs = WaitingNumber.objects.filter( src=stat_data.src).filter( date__gt=real_data_begin).filter( date_delta__lt=60*60*3).filter( date_delta__gt=1).filter( date__gt=int(time.time())-(60*60*24*14)) last_two_weeks_len = last_two_weeks_qs.count() stat_data.avg_last_two_weeks = last_two_weeks_qs.aggregate( s=Sum('date_delta'))['s'] / last_two_weeks_len if last_two_weeks_len else 0 # Get average from weekday last week (Tuesday last week) last_sameday_qs = WaitingNumber.objects.filter( src=stat_data.src).filter( date__gt=real_data_begin).filter( date_delta__lt=60*60*3).filter( date_delta__gt=1).filter( date__lt=timezone.now() + (24*60*60) - (60*60*24*7), date__gt=timezone.now() - (60*60*24*7)) last_sameday_len = last_sameday_qs.count() stat_data.avg_last_same_day = last_sameday_qs.aggregate( s=Sum('date_delta'))['s'] / last_sameday_len if last_sameday_len else 0 # Weights of whole, last two weeks and last weekday are equal if last_two_weeks_len and last_sameday_len: stat_data.avg_whole = (stat_data.avg + stat_data.avg_last_two_weeks + stat_data.avg_last_same_day) / 3.0 elif last_two_weeks_len: stat_data.avg_whole = (stat_data.avg + stat_data.avg_last_two_weeks) / 2.0 elif last_sameday_len: stat_data.avg_whole = (stat_data.avg + stat_data.avg_last_same_day) / 2.0 else: stat_data.avg_whole = stat_data.avg stat_data.save() return HttpResponse(status=200)
gpl-3.0
-7,438,429,729,537,712,000
39.148148
116
0.62722
false
3.206361
false
false
false
ElettraSciComp/STP-Core
STP-Core/preview_phaseretrieval.py
1
5490
########################################################################### # (C) 2016 Elettra - Sincrotrone Trieste S.C.p.A.. All rights reserved. # # # # # # This file is part of STP-Core, the Python core of SYRMEP Tomo Project, # # a software tool for the reconstruction of experimental CT datasets. # # # # STP-Core is free software: you can redistribute it and/or modify it # # under the terms of the GNU General Public License as published by the # # Free Software Foundation, either version 3 of the License, or (at your # # option) any later version. # # # # STP-Core is distributed in the hope that it will be useful, but WITHOUT # # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or # # FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License # # for more details. # # # # You should have received a copy of the GNU General Public License # # along with STP-Core. If not, see <http://www.gnu.org/licenses/>. # # # ########################################################################### # # Author: Francesco Brun # Last modified: July, 8th 2016 # from sys import argv, exit from os import remove, sep, linesep from os.path import exists from numpy import float32, double, nanmin, nanmax, finfo, ndarray from time import time from multiprocessing import Process, Lock from pyfftw.interfaces.cache import enable as pyfftw_cache_enable, disable as pyfftw_cache_disable from pyfftw.interfaces.cache import set_keepalive_time as pyfftw_set_keepalive_time from phaseretrieval.tiehom import tiehom, tiehom_plan from phaseretrieval.tiehom2020 import tiehom2020, tiehom_plan2020 from phaseretrieval.phrt import phrt, phrt_plan from h5py import File as getHDF5 from utils.caching import cache2plan, plan2cache from preprocess.extract_flatdark import extract_flatdark import stpio.tdf as tdf def main(argv): """To do... """ lock = Lock() skip_flat = True first_done = False pyfftw_cache_disable() pyfftw_cache_enable() pyfftw_set_keepalive_time(1800) # Get the from and to number of files to process: idx = int(argv[0]) # Get full paths of input TDF and output TDF: infile = argv[1] outfile = argv[2] # Get the phase retrieval parameters: method = int(argv[3]) param1 = double(argv[4]) # param1( e.g. regParam, or beta) param2 = double(argv[5]) # param2( e.g. thresh or delta) energy = double(argv[6]) distance = double(argv[7]) pixsize = double(argv[8]) / 1000.0 # pixsixe from micron to mm: pad = True if argv[9] == "True" else False # Tmp path and log file: tmppath = argv[10] if not tmppath.endswith(sep): tmppath += sep logfilename = argv[11] # Open the HDF5 file and check it contains flat files: skipflat = False f_in = getHDF5(infile, 'r') if "/tomo" in f_in: dset = f_in['tomo'] if not "/flat" in f_in: skipflat = True else: dset = f_in['exchange/data'] if not "/exchange/data_white" in f_in: skipflat = True num_proj = tdf.get_nr_projs(dset) num_sinos = tdf.get_nr_sinos(dset) # Check if the HDF5 makes sense: if (num_proj == 0): log = open(logfilename,"a") log.write(linesep + "\tNo projections found. Process will end.") log.close() exit() # Get flats and darks from cache or from file: if not skipflat: try: corrplan = cache2plan(infile, tmppath) except Exception as e: #print "Error(s) when reading from cache" corrplan = extract_flatdark(f_in, True, logfilename) remove(logfilename) plan2cache(corrplan, infile, tmppath) # Read projection: im = tdf.read_tomo(dset,idx).astype(float32) f_in.close() # Apply simple flat fielding (if applicable): if not skipflat: if (isinstance(corrplan['im_flat_after'], ndarray) and isinstance(corrplan['im_flat'], ndarray) and isinstance(corrplan['im_dark'], ndarray) and isinstance(corrplan['im_dark_after'], ndarray)) : if (idx < num_proj/2): im = (im - corrplan['im_dark']) / (abs(corrplan['im_flat'] - corrplan['im_dark']) + finfo(float32).eps) else: im = (im - corrplan['im_dark_after']) / (abs(corrplan['im_flat_after'] - corrplan['im_dark_after']) + finfo(float32).eps) # Prepare plan: im = im.astype(float32) if (method == 0): # Paganin 2002: plan = tiehom_plan (im, param1, param2, energy, distance, pixsize, pad) im = tiehom(im, plan).astype(float32) elif (method == 1): # Paganin 2020: plan = tiehom_plan2020 (im, param1, param2, energy, distance, pixsize, pad) im = tiehom2020(im, plan).astype(float32) else: plan = phrt_plan (im, energy, distance, pixsize, param2, param1, method, pad) im = phrt(im, plan, method).astype(float32) # Write down reconstructed preview file (file name modified with metadata): im = im.astype(float32) outfile = outfile + '_' + str(im.shape[1]) + 'x' + str(im.shape[0]) + '_' + str( nanmin(im)) + '$' + str( nanmax(im) ) im.tofile(outfile) if __name__ == "__main__": main(argv[1:])
gpl-3.0
5,234,524,101,130,132,000
35.838926
120
0.598579
false
3.255042
false
false
false
williamyangcn/iBlah_py
ui/ui_profile_dialog.py
1
4945
# -*- coding: utf-8 -*- # Form implementation generated from reading ui file '/Users/lee/backups/code/iblah_py/ui/ui_profile_dialog.ui' # # Created: Fri May 6 21:47:58 2011 # by: PyQt4 UI code generator 4.8.3 # # WARNING! All changes made in this file will be lost! from PyQt4 import QtCore, QtGui try: _fromUtf8 = QtCore.QString.fromUtf8 except AttributeError: _fromUtf8 = lambda s: s class Ui_ProfileDialog(object): def setupUi(self, ProfileDialog): ProfileDialog.setObjectName(_fromUtf8("ProfileDialog")) ProfileDialog.setEnabled(True) ProfileDialog.resize(470, 300) self.save_btn = QtGui.QPushButton(ProfileDialog) self.save_btn.setEnabled(True) self.save_btn.setGeometry(QtCore.QRect(330, 240, 114, 32)) self.save_btn.setObjectName(_fromUtf8("save_btn")) self.avatar_label = QtGui.QLabel(ProfileDialog) self.avatar_label.setGeometry(QtCore.QRect(310, 20, 130, 130)) self.avatar_label.setStyleSheet(_fromUtf8("border: 2px solid #ccc;")) self.avatar_label.setObjectName(_fromUtf8("avatar_label")) self.label_2 = QtGui.QLabel(ProfileDialog) self.label_2.setGeometry(QtCore.QRect(21, 117, 26, 16)) self.label_2.setObjectName(_fromUtf8("label_2")) self.impresa_text_edit = QtGui.QTextEdit(ProfileDialog) self.impresa_text_edit.setGeometry(QtCore.QRect(80, 170, 361, 51)) self.impresa_text_edit.setObjectName(_fromUtf8("impresa_text_edit")) self.fullname_line_edit = QtGui.QLineEdit(ProfileDialog) self.fullname_line_edit.setGeometry(QtCore.QRect(81, 117, 201, 22)) self.fullname_line_edit.setObjectName(_fromUtf8("fullname_line_edit")) self.label_3 = QtGui.QLabel(ProfileDialog) self.label_3.setGeometry(QtCore.QRect(21, 21, 39, 16)) self.label_3.setObjectName(_fromUtf8("label_3")) self.label_4 = QtGui.QLabel(ProfileDialog) self.label_4.setGeometry(QtCore.QRect(21, 53, 39, 16)) self.label_4.setObjectName(_fromUtf8("label_4")) self.cellphone_no_line_edit = QtGui.QLineEdit(ProfileDialog) self.cellphone_no_line_edit.setEnabled(True) self.cellphone_no_line_edit.setGeometry(QtCore.QRect(81, 53, 201, 22)) self.cellphone_no_line_edit.setText(_fromUtf8("")) self.cellphone_no_line_edit.setReadOnly(True) self.cellphone_no_line_edit.setObjectName(_fromUtf8("cellphone_no_line_edit")) self.fetion_no_line_edit = QtGui.QLineEdit(ProfileDialog) self.fetion_no_line_edit.setEnabled(True) self.fetion_no_line_edit.setGeometry(QtCore.QRect(81, 21, 201, 22)) self.fetion_no_line_edit.setText(_fromUtf8("")) self.fetion_no_line_edit.setReadOnly(True) self.fetion_no_line_edit.setObjectName(_fromUtf8("fetion_no_line_edit")) self.label_5 = QtGui.QLabel(ProfileDialog) self.label_5.setGeometry(QtCore.QRect(21, 85, 33, 16)) self.label_5.setObjectName(_fromUtf8("label_5")) self.email_line_edit = QtGui.QLineEdit(ProfileDialog) self.email_line_edit.setEnabled(True) self.email_line_edit.setGeometry(QtCore.QRect(81, 85, 201, 22)) self.email_line_edit.setText(_fromUtf8("")) self.email_line_edit.setReadOnly(True) self.email_line_edit.setObjectName(_fromUtf8("email_line_edit")) self.label_6 = QtGui.QLabel(ProfileDialog) self.label_6.setGeometry(QtCore.QRect(21, 170, 52, 16)) self.label_6.setObjectName(_fromUtf8("label_6")) self.retranslateUi(ProfileDialog) QtCore.QObject.connect(self.save_btn, QtCore.SIGNAL(_fromUtf8("clicked()")), ProfileDialog.accept) QtCore.QMetaObject.connectSlotsByName(ProfileDialog) def retranslateUi(self, ProfileDialog): ProfileDialog.setWindowTitle(QtGui.QApplication.translate("ProfileDialog", "Profile", None, QtGui.QApplication.UnicodeUTF8)) self.save_btn.setText(QtGui.QApplication.translate("ProfileDialog", "关闭 (&C)", None, QtGui.QApplication.UnicodeUTF8)) self.save_btn.setShortcut(QtGui.QApplication.translate("ProfileDialog", "Return", None, QtGui.QApplication.UnicodeUTF8)) self.avatar_label.setText(QtGui.QApplication.translate("ProfileDialog", "avatar", None, QtGui.QApplication.UnicodeUTF8)) self.label_2.setText(QtGui.QApplication.translate("ProfileDialog", "姓名", None, QtGui.QApplication.UnicodeUTF8)) self.label_3.setText(QtGui.QApplication.translate("ProfileDialog", "飞信号", None, QtGui.QApplication.UnicodeUTF8)) self.label_4.setText(QtGui.QApplication.translate("ProfileDialog", "手机号", None, QtGui.QApplication.UnicodeUTF8)) self.label_5.setText(QtGui.QApplication.translate("ProfileDialog", "EMail", None, QtGui.QApplication.UnicodeUTF8)) self.label_6.setText(QtGui.QApplication.translate("ProfileDialog", "心情短语", None, QtGui.QApplication.UnicodeUTF8))
bsd-3-clause
-4,948,456,608,463,711,000
57.535714
132
0.702054
false
3.519685
false
false
false
thoreg/satchmo
store/localsite/templatetags/email_munge.py
1
1857
""" Stolen from: http://tomcoote.co.uk/code-bank/django-email-munger/ """ from django import template from django.template.defaultfilters import stringfilter from django.utils.safestring import mark_safe from django.utils.html import conditional_escape import re register = template.Library() @register.filter @stringfilter def mungify(email, text=None, autoescape=None): ''' Template filter to hide an email address away from any sort of email harvester type web scrapers and so keep away from spam etc. The filter should be applied on a string which represents an email address. You can optionally give the filter a parameter which will represent the name of the resulting email href link. If no extra parameter is given the email address will be used as the href text. {{ email|mungify:"contact me" }} or {{ email|mungify }} The output is javascript which will write out the email href link in a way so as to not actually show the email address in the source code as plain text. ''' text = text or email if autoescape: email = conditional_escape(email) text = conditional_escape(text) emailArrayContent = '' textArrayContent = '' r = lambda c: '"' + str(ord(c)) + '",' for c in email: emailArrayContent += r(c) for c in text: textArrayContent += r(c) result = """<script> var _tyjsdf = [%s], _qplmks = [%s]; document.write('<a href="mailto:'); for(_i=0;_i<_tyjsdf.length;_i++){document.write('&#'+_tyjsdf[_i]+';');} document.write('">'); for(_i=0;_i<_qplmks.length;_i++){document.write('&#'+_qplmks[_i]+';');} document.write('</a>'); </script>""" % (re.sub(r',$', '', emailArrayContent), re.sub(r',$', '', textArrayContent)) return mark_safe(result) mungify.needs_autoescape = True
bsd-3-clause
-8,291,347,823,628,032,000
32.160714
98
0.655358
false
3.655512
false
false
false
vicente-gonzalez-ruiz/QSVC
trunk/src/old_py/info_mc_j2k.py
1
7723
#!/usr/bin/python # -*- coding: iso-8859-15 -*- ## @file info_mc_j2k.py # The size in bytes, and a codestream Kbps, even detailed subband # level and neglecting headers, from a MCJ2K codestream. # # @authors Jose Carmelo Maturana-Espinosa\n Vicente Gonzalez-Ruiz. # @date Last modification: 2015, January 7. # ## @package info_mc_j2k # The size in bytes, and a codestream Kbps, even detailed subband # level and neglecting headers, from a MCJ2K codestream. import sys import os import re import math import os.path from GOP import GOP from subprocess import check_call from subprocess import CalledProcessError from MCTF_parser import MCTF_parser ## Refers to high frequency subbands. HIGH = "high_" ## Refers to low frequency subbands. LOW = "low_" ## Refers to fields of motion. MOTION = "motion_residue_" ## Indicates whether a log is recorded in a file. print_file = False ## Number of Group Of Pictures to process. GOPs = 1 ## Number of Temporal Resolution Levels. TRLs = 5 ## Frames per second. FPS = 30 # 30 # 50 ## The parser module provides an interface to Python's internal parser ## and byte-code compiler. parser = MCTF_parser(description="Information of codestream.") parser.GOPs(GOPs) parser.FPS(FPS) ## A script may only parse a few of the command-line arguments, ## passing the remaining arguments on to another script or program. args = parser.parse_known_args()[0] if args.GOPs : GOPs = int(args.GOPs) if args.TRLs: TRLs = int(args.TRLs) if args.FPS : FPS = int(args.FPS) ## Initializes the class GOP (Group Of Pictures). gop=GOP() ## Extract the value of the size of a GOP, that is, the number of images. GOP_size = gop.get_size(TRLs) ## Calculate the total number of video images. pictures = GOPs * GOP_size + 1 ## Duration of the sequence. duration = pictures / (FPS * 1.0) ## Number of bytes of an entire directory. The size in bytes, and a ## codestream Kbps, even detailed subband level and neglecting headers ## is performed in info.py. # @param the_path Directory path. # @param key If you want to have only a certain type of files in the directory. # @return Files size. def get_size (the_path, key) : path_size = 0 for path, dirs, files in os.walk(the_path) : for fil in files : if re.search(key, fil) : path_size += os.path.getsize(the_path + "/" + fil) return path_size #----------------------------------------------- #----------------------------------------------- #- MAIN ---------------------------------------- #----------------------------------------------- #----------------------------------------------- # info = [[kbps GOP1, kbps GOP2, kbps GOPn], kbps GOPs, rmse1D] ## Current path. p = sub.Popen("echo $PWD", shell=True, stdout=sub.PIPE, stderr=sub.PIPE) out, err = p.communicate() ## Reconstruction path. path_tmp = out[:-1] ######## # RMSE # ######## # Existe la reconstrucción. Entonces se calcula su distorsión. if os.path.exists(path_tmp + "/low_0") : ########## # SNR 1D # ########## # BRC y UnaSubParaTodas p = sub.Popen("snr --file_A=low_0 --file_B=../low_0 2> /dev/null | grep RMSE | cut -f 3", shell=True, stdout=sub.PIPE, stderr=sub.PIPE) # subIndependientes #p = sub.Popen("snr --file_A=high_4 --file_B=../high_4 2> /dev/null | grep RMSE | cut -f 3", # shell=True, stdout=sub.PIPE, stderr=sub.PIPE) out, err = p.communicate() #errcode = p.returncode if out == "" : #if err in locals() : check_call("echo SNR sin salida.", shell=True) exit (0) rmse1D = float(out) ########## # SNR 2D # ########## #rmse2D=`snr2D --block_size=$block_size_snr --dim_X=$RES_X --dim_Y=$RES_Y --file_A=$DATA/$VIDEO.yuv --file_B=$data_dir/tmp/low_0_UP --FFT 2> /dev/null | grep RMSE | cut -f 3` # FFT en 3D ########## # SNR 3D # ########## #rmse3D=`snr3D --block_size=$block_size_snr --dim_X=$RES_X --dim_Y=$RES_Y --dim_Z=5 --file_A=$DATA/$VIDEO.yuv --file_B=$data_dir/tmp/low_0_UP --FFT 2> /dev/null | grep RMSE | cut -f 3` # FFT en 3D #################### # export variables # #################### globals()["info_rmse1D"] = rmse1D #p = sub.Popen("export info_mc_j2k_rmse1D=" + rmse1D, shell=True, stdout=sub.PIPE, stderr=sub.PIPE) info[2] = rmse1D # info = [[kbps GOP1, kbps GOP2, kbps GOPn], kbps GOPs, rmse1D] if not 'info' in globals() : globals()["info"] = [[]] ################### # Print a fichero # ################### ######################### # Media Pts de cada GOP # ######################### for par in range (0, len(info[0])) : info[1] = Pts_GOPs[par] check_call("echo \"" + Pts_GOPs + "\" >> ../info_PtsGOPs", shell=True) #BASH: check_call("echo \"" + ${PtsGOPs[@]} + "\" >> ../info_PtsGOPs", shell=True) check_call("echo \"" + average_Pts_GOPs + "\" >> ../info_average_PtsGOPs", shell=True) ######## # KBPS # ######## # No existe la reconstrucción. Entonces se calculan los kbps del codestream aún comprimido. else : TO_KBPS = 8.0 / duration / 1000 ############ # KBPS GOP # ############ nGOP = 1 while nGOP <= GOPs : # H's subband = TRLs - 1 nImage = 0 pictures_sub = GOP_size while subband > 0 : pictures_sub = ( pictures_sub + 1 ) / 2 # SIZES MOTION un GOP # _kbps_M.append( get_size(path_tmp, MOTION + str(subband) + "_*_[" + str('%04d'%(nImage*1)) + "-" + str('%04d'%pictures_sub) + "].j2c") * TO_KBPS ) # SIZES H's un GOP # # SIZES L un GOP # subband -= 1 nImage = pictures_sub # L # SUMATORIA # print ("sumatoria size de este GOP. Y apuntarlo.") nGOP += 1 ########################################## # M kbps_M = get_size(path_tmp, MOTION) * TO_KBPS # T 1ªL (fuera del GOP) kbps_T_first_L = [ get_size(path_tmp, LOW + str(TRLs-1) + "_[YUV]_0000.j2c") * TO_KBPS ] # T L (la del GOP) _kbps_T = [ get_size(path_tmp, LOW + str(TRLs-1) + "_[YUV]_000?.j2c") * TO_KBPS ] # = [ get_size(path_extract, LOW) ] (las imagenes de 2 L) # T (Hs) for i in range (1, TRLs) : _kbps_T.append( get_size(path_tmp, HIGH + str(TRLs - i)) * TO_KBPS ) # T del GOP (2ªL + Hs) _kbps_T.append( _kbps_T[0] + (get_size(path_tmp, HIGH) * TO_KBPS) ) # kbps_GOP (M + T) kbps_GOP = kbps_M + _kbps_T[TRLs] # kbps_ALL (M + T). Siendo T = con 1ªL + types bytes_mj2k = get_size(path_tmp, "") kbps_ALL = bytes_mj2k * TO_KBPS #################### # export variables # #################### globals()["info_kbps_M"] = kbps_M globals()["info_kbps_T"] = _kbps_T globals()["info_kbps_GOP"] = kbps_GOP globals()["info_kbps_ALL"] = kbps_ALL ''' ############## NOTAS DEL CODIGO # ''' ############## ''' #POR BASH #p = sub.Popen("export info_mc_j2k_kbps_M=" + kbps_M + "; " # "export info_mc_j2k_kbps_T=" + _kbps_T + "; " # "export info_mc_j2k_kbps_GOP=" + kbps_GOP + "; " # "export info_mc_j2k_kbps_ALL=" + kbps_ALL # , shell=True, stdout=sub.PIPE, stderr=sub.PIPE) #out, err = p.communicate() ##errcode = p.returncode #POR PYTHON ''' ''' import re import os path_size = 0 for path, dirs, files in os.walk("/home/cmaturana") : for fil in files : if re.search("aaa0[2-3]", fil) : path_size += os.path.getsize("/home/cmaturana" + "/" + fil) path_size http://www.tutorialspoint.com/python/python_reg_expressions.htm '''
gpl-2.0
6,323,669,541,710,254,000
25.179661
200
0.545125
false
2.880642
false
false
false
numerical-mathematics/extrapolation
ex_parallel_original.py
1
28458
from __future__ import division import numpy as np import multiprocessing as mp import math NUM_WORKERS = None def set_NUM_WORKERS(nworkers): global NUM_WORKERS if nworkers == None: try: NUM_WORKERS = mp.cpu_count() except NotImplementedError: NUM_WORKERS = 4 else: NUM_WORKERS = max(nworkers, 1) def error_norm(y1, y2, atol, rtol): tol = atol + np.maximum(np.abs(y1),np.abs(y2))*rtol return np.linalg.norm((y1-y2)/tol)/(len(y1)**0.5) def adapt_step(method, func, tn_1, yn_1, args, y, y_hat, h, p, atol, rtol, pool, seq=(lambda t: 2*t), dense=False): ''' Only called when adaptive == 'step'; i.e., for fixed order. Checks if the step size is accepted. If not, computes a new step size and checks again. Repeats until step size is accepted **Inputs**: - method: -- the method on which the extrapolation is based - func -- the right hand side function of the IVP. Must output a non-scalar numpy.ndarray - tn_1, yn_1 -- y(tn_1) = yn_1 is the last accepted value of the computed solution - args -- Extra arguments to pass to function. - y, y_hat -- the computed values of y(tn_1 + h) of order p and (p-1), respectively - h -- the step size taken and to be tested - p -- the order of the higher extrapolation method Assumed to be greater than 1. - atol, rtol -- the absolute and relative tolerance of the local error. - seq -- the step-number sequence. optional; defaults to the harmonic sequence given by (lambda t: 2*t) **Outputs**: - y, y_hat -- the computed solution of orders p and (p-1) at the accepted step size - h -- the accepted step taken to compute y and y_hat - h_new -- the proposed next step size - (fe_seq, fe_tot) -- the number of sequential f evaluations, and the total number of f evaluations ''' facmax = 5 facmin = 0.2 fac = 0.8 err = error_norm(y, y_hat, atol, rtol) h_new = h*min(facmax, max(facmin, fac*((1/err)**(1/p)))) fe_seq = 0 fe_tot = 0 while err > 1: h = h_new if dense: y, y_hat, (fe_seq_, fe_tot_), poly = method(func, tn_1, yn_1, args, h, p, pool, seq=seq, dense=dense) else: y, y_hat, (fe_seq_, fe_tot_) = method(func, tn_1, yn_1, args, h, p, pool, seq=seq, dense=dense) fe_seq += fe_seq_ fe_tot += fe_tot_ err = error_norm(y, y_hat, atol, rtol) h_new = h*min(facmax, max(facmin, fac*((1/err)**(1/p)))) if dense: return (y, y_hat, h, h_new, (fe_seq, fe_tot), poly) else: return (y, y_hat, h, h_new, (fe_seq, fe_tot)) def extrapolation_parallel (method, func, y0, t, args=(), full_output=False, rtol=1.0e-8, atol=1.0e-8, h0=0.5, mxstep=10e4, adaptive="order", p=4, seq=(lambda t: 2*t), nworkers=None): ''' Solves the system of IVPs dy/dt = func(y, t0, ...) with parallel extrapolation. **Parameters** - method: callable() The method on which the extrapolation is based - func: callable(y, t0, ...) Computes the derivative of y at t0 (i.e. the right hand side of the IVP). Must output a non-scalar numpy.ndarray - y0 : numpy.ndarray Initial condition on y (can be a vector). Must be a non-scalar numpy.ndarray - t : array A sequence of time points for which to solve for y. The initial value point should be the first element of this sequence. - args : tuple, optional Extra arguments to pass to function. - full_output : bool, optional True if to return a dictionary of optional outputs as the second output. Defaults to False **Returns** - ys : numpy.ndarray, shape (len(t), len(y0)) Array containing the value of y for each desired time in t, with the initial value y0 in the first row. - infodict : dict, only returned if full_output == True Dictionary containing additional output information KEY MEANING 'fe_seq' cumulative number of sequential derivative evaluations 'fe_tot' cumulative number of total derivative evaluations 'nstp' cumulative number of successful time steps 'h_avg' average step size if adaptive == "order" (None otherwise) 'k_avg' average extrapolation order if adaptive == "order" ... (None otherwise) **Other Parameters** - rtol, atol : float, optional The input parameters rtol and atol determine the error control performed by the solver. The solver will control the vector, e = y2 - y1, of estimated local errors in y, according to an inequality of the form l2-norm of (e / (ewt * len(e))) <= 1, where ewt is a vector of positive error weights computed as ewt = atol + max(y1, y2) * rtol. rtol and atol can be either vectors the same length as y0 or scalars. Both default to 1.0e-8. - h0 : float, optional The step size to be attempted on the first step. Defaults to 0.5 - mxstep : int, optional Maximum number of (internally defined) steps allowed for each integration point in t. Defaults to 10e4 - adaptive: string, optional Specifies the strategy of integration. Can take three values: -- "fixed" = use fixed step size and order strategy. -- "step" = use adaptive step size but fixed order strategy. -- "order" = use adaptive step size and adaptive order strategy. Defaults to "order" - p: int, optional The order of extrapolation if adaptive is not "order", and the starting order otherwise. Defaults to 4 - seq: callable(k) (k: positive int), optional The step-number sequence. Defaults to the harmonic sequence given by (lambda t: 2*t) - nworkers: int, optional The number of workers working in parallel. If nworkers==None, then the the number of workers is set to the number of CPUs on the the running machine. Defaults to None. ''' set_NUM_WORKERS(nworkers) pool = mp.Pool(NUM_WORKERS) assert len(t) > 1, ("the array t must be of length at least 2, " + "and the initial value point should be the first element of t") dense = True if len(t) > 2 else False ys = np.zeros((len(t), len(y0)), dtype=(type(y0[0]))) ys[0] = y0 t0 = t[0] fe_seq = 0 fe_tot = 0 nstp = 0 cur_stp = 0 if adaptive == "fixed": # Doesn't work correctly with dense output ts, h = np.linspace(t0, t[-1], (t[-1]-t0)/h0 + 1, retstep=True) y = 1*y0 for i in range(len(ts) - 1): if dense: y, _, (fe_seq_, fe_tot_), poly = method(func, ts[i], y, args, h, p, pool, seq=seq, dense=dense) else: y, _, (fe_seq_, fe_tot_) = method(func, ts[i], y, args, h, p, pool, seq=seq, dense=dense) fe_seq += fe_seq_ fe_tot += fe_tot_ nstp += 1 cur_stp += 1 if cur_stp > mxstep: raise Exception('Reached Max Number of Steps. Current t = ' + str(t_curr)) ys[1] = 1*y elif adaptive == "step": assert p > 1, "order of method must be greater than 1 if adaptive=step" t_max = t[-1] t_index = 1 y, t_curr = 1*y0, t0 h = min(h0, t_max-t0) while t_curr < t_max: if dense: y_, y_hat, (fe_seq_, fe_tot_), poly = method(func, t_curr, y, args, h, p, pool, seq=seq, dense=dense) else: y_, y_hat, (fe_seq_, fe_tot_) = method(func, t_curr, y, args, h, p, pool, seq=seq, dense=dense) fe_seq += fe_seq_ fe_tot += fe_tot_ if dense: reject_inter = True while reject_inter: y_temp, _, h, h_new, (fe_seq_, fe_tot_), poly = adapt_step( method, func, t_curr, y, args, y_, y_hat, h, p, atol, rtol, pool, seq=seq, dense=dense) reject_inter = False while t_index < len(t) and t[t_index] <= t_curr + h: y_poly, errint, h_int = poly((t[t_index] - t_curr)/h) if errint <= 10: ys[t_index] = 1*y_poly cur_stp = 0 t_index += 1 reject_inter = False else: h = h_int fe_seq += fe_seq_ fe_tot += fe_tot_ reject_inter = True break if not reject_inter: y = 1*y_temp else: y, _, h, h_new, (fe_seq_, fe_tot_) = adapt_step(method, func, t_curr, y, args, y_, y_hat, h, p, atol, rtol, pool, seq=seq, dense=dense) t_curr += h fe_seq += fe_seq_ fe_tot += fe_tot_ nstp += 1 cur_stp += 1 if cur_stp > mxstep: raise Exception('Reached Max Number of Steps. Current t = ' + str(t_curr)) h = min(h_new, t_max - t_curr) if not dense: ys[-1] = 1*y elif adaptive == "order": t_max = t[-1] t_index = 1 y, t_curr, k = 1*y0, t0, p h = min(h0, t_max-t0) sum_ks, sum_hs = 0, 0 while t_curr < t_max: if dense: reject_inter = True while reject_inter: y_temp, h, k, h_new, k_new, (fe_seq_, fe_tot_), poly = method( func, t_curr, y, args, h, k, atol, rtol, pool, seq=seq, dense=dense) reject_inter = False old_index = t_index while t_index < len(t) and t[t_index] <= t_curr + h: y_poly, errint, h_int = poly((t[t_index] - t_curr)/h) if errint <= 10: ys[t_index] = 1*y_poly cur_stp = 0 t_index += 1 reject_inter = False else: h = h_int fe_seq += fe_seq_ fe_tot += fe_tot_ reject_inter = True t_index = old_index break if not reject_inter: y = 1*y_temp else: y, h, k, h_new, k_new, (fe_seq_, fe_tot_) = method(func, t_curr, y, args, h, k, atol, rtol, pool, seq=seq, dense=dense) t_curr += h fe_seq += fe_seq_ fe_tot += fe_tot_ sum_ks += k sum_hs += h nstp += 1 cur_stp += 1 if cur_stp > mxstep: raise Exception('Reached Max Number of Steps. Current t = ' + str(t_curr)) h = min(h_new, t_max - t_curr) k = k_new if not dense: ys[-1] = 1*y pool.close() if full_output: infodict = {'fe_seq': fe_seq, 'nfe': fe_tot, 'nst': nstp, 'nje': 0, 'h_avg': sum_hs/nstp, 'k_avg': sum_ks/nstp} return (ys, infodict) else: return ys else: raise Exception("\'" + str(adaptive) + "\' is not a valid value for the argument \'adaptive\'") pool.close() if full_output: infodict = {'fe_seq': fe_seq, 'fe_tot': fe_tot, 'nst': nstp, 'h_avg': None, 'k_avg': None} return (ys, infodict) else: return ys def compute_stages_dense((func, tn, yn, args, h, k_nj_lst)): res = [] for (k, nj) in k_nj_lst: f_tot=0 nj = int(nj) Y = np.zeros((nj+1, len(yn)), dtype=(type(yn[0]))) f_yj = np.zeros((nj+1, len(yn)), dtype=(type(yn[0]))) Y[0] = yn f_yj[0] = func(*(Y[0], tn) + args) f_tot+=1 Y[1] = Y[0] + h/nj*f_yj[0] for j in range(2,nj+1): if j == nj/2 + 1: y_half = Y[j-1] f_yj[j-1] = func(*(Y[j-1], tn + (j-1)*(h/nj)) + args) f_tot+=1 Y[j] = Y[j-2] + (2*h/nj)*f_yj[j-1] f_yj[nj] = func(*(Y[nj], tn + h) + args) f_tot+=1 res += [(k, nj, Y[nj], y_half, f_yj, f_tot)] return res def compute_stages((func, tn, yn, args, h, k_nj_lst)): res = [] for (k, nj) in k_nj_lst: nj = int(nj) Y = np.zeros((nj+1, len(yn)), dtype=(type(yn[0]))) Y[0] = yn Y[1] = Y[0] + h/nj*func(*(Y[0], tn) +args) for j in range(2,nj+1): Y[j] = Y[j-2] + (2*h/nj)*func(*(Y[j-1], tn + (j-1)*(h/nj))+ args) res += [(k, nj, Y[nj])] return res def balance_load(k, seq=(lambda t: 2*t)): if k <= NUM_WORKERS: k_nj_lst = [[(i,seq(i))] for i in range(k, 0, -1)] else: k_nj_lst = [[] for i in range(NUM_WORKERS)] index = range(NUM_WORKERS) i = k while 1: if i >= NUM_WORKERS: for j in index: k_nj_lst[j] += [(i, seq(i))] i -= 1 else: for j in index: if i == 0: break k_nj_lst[j] += [(i, seq(i))] i -= 1 break index = index[::-1] fe_tot = 0 for i in range(len(k_nj_lst)): fe_tot += sum([pair[1] for pair in k_nj_lst[i]]) fe_seq = sum([pair[1] for pair in k_nj_lst[0]]) return (k_nj_lst, fe_seq, fe_tot) def compute_ex_table(func, tn, yn, args, h, k, pool, seq=(lambda t: 2*t), dense=False): """ **Inputs**: - func: RHS of ODE - tn, yn: time and solution values from previous step - args: any extra args to func - h: proposed step size - k: proposed # of extrapolation iterations - pool: parallel worker pool - seq: extrapolation step number sequence - dense: whether to provide dense output """ T = np.zeros((k+1,k+1, len(yn)), dtype=(type(yn[0]))) k_nj_lst, fe_seq, fe_tot= balance_load(k, seq=seq) jobs = [(func, tn, yn, args, h, k_nj) for k_nj in k_nj_lst] if dense: results = pool.map(compute_stages_dense, jobs, chunksize=1) else: results = pool.map(compute_stages, jobs, chunksize=1) # process the returned results from the pool if dense: fe_tot=0 y_half = (k+1)*[None] f_yj = (k+1)*[None] hs = (k+1)*[None] for res in results: for (k_, nj_, Tk_, y_half_, f_yj_, fe_tot_) in res: T[k_, 1] = Tk_ y_half[k_] = y_half_ f_yj[k_] = f_yj_ hs[k_] = h/nj_ fe_tot += fe_tot_ else: for res in results: for (k_, nj_, Tk_) in res: T[k_, 1] = Tk_ # compute extrapolation table # only correct for midpoint method for i in range(2, k+1): for j in range(i, k+1): T[j,i] = T[j,i-1] + (T[j,i-1] - T[j-1,i-1])/((seq(j)/(seq(j-i+1)))**2 - 1) if dense: Tkk = T[k,k] f_Tkk = func(*(Tkk, tn+h) + args) fe_seq +=1 fe_tot +=1 return (T, fe_seq, fe_tot, yn, Tkk, f_Tkk, y_half, f_yj, hs) else: return (T, fe_seq, fe_tot) def finite_diff(j, f_yj, hj): # Called by interpolate max_order = 2*j nj = len(f_yj) - 1 coeff = [1,1] dj = (max_order+1)*[None] dj[1] = 1*f_yj[nj/2] dj[2] = (f_yj[nj/2+1] - f_yj[nj/2-1])/(2*hj) for order in range(2,max_order): coeff = [1] + [coeff[j] + coeff[j+1] for j in range(len(coeff)-1)] + [1] index = [nj/2 + order - 2*i for i in range(order+1)] sum_ = 0 for i in range(order+1): sum_ += ((-1)**i)*coeff[i]*f_yj[index[i]] dj[order+1] = sum_ / (2*hj)**order return dj def compute_ds(y_half, f_yj, hs, k, seq=(lambda t: 4*t-2)): # Called by interpolate dj_kappa = np.zeros((2*k+1, k+1), dtype=(type(y_half[1]))) ds = np.zeros((2*k+1), dtype=(type(y_half[1]))) for j in range(1,k+1): dj_kappa[0,j] = 1*y_half[j] nj = len(f_yj[j])-1 dj_ = finite_diff(j,f_yj[j], hs[j]) for kappa in range(1,2*j+1): dj_kappa[kappa,j] = 1*dj_[kappa] skip = 0 for kappa in range(2*k+1): T = np.zeros((k+1-int(skip/2), k+1 - int(skip/2)), dtype=(type(y_half[1]))) T[:,1] = 1*dj_kappa[kappa, int(skip/2):] # print("T1"+str(T[:,1])) for i in range(2, k+1-int(skip/2)): for j in range(i, k+1-int(skip/2)): T[j,i] = T[j,i-1] + (T[j,i-1] - T[j-1,i-1])/((seq(j)/(seq(j-i+1)))**2 - 1) ds[kappa] = 1*T[k-int(skip/2),k-int(skip/2)] if not(kappa == 0): skip +=1 return ds def interpolate(y0, Tkk, f_Tkk, y_half, f_yj, hs, H, k, atol, rtol, seq=(lambda t: 4*t-2)): u = 2*k-3 u_1 = u - 1 ds = compute_ds(y_half, f_yj, hs, k, seq=seq) print "ds->" + str(ds) a_u = (u+5)*[None] a_u_1 = (u_1+5)*[None] for i in range(u+1): a_u[i] = (H**i)*ds[i]/math.factorial(i) for i in range(u_1 + 1): a_u_1[i] = (H**i)*ds[i]/math.factorial(i) A_inv_u = (2**(u-2))*np.matrix( [[(-2*(3 + u))*(-1)**u, -(-1)**u, 2*(3 + u), -1], [(4*(4 + u))*(-1)**u, 2*(-1)**u, 4*(4 + u), -2], [(8*(1 + u))*(-1)**u, 4*(-1)**u, -8*(1 + u), 4], [(-16*(2 + u))*(-1)**u, -8*(-1)**u, -16*(2 + u), 8]] ) A_inv_u_1 = (2**(u_1-2))*np.matrix( [[(-2*(3 + u_1))*(-1)**u_1, -(-1)**u_1, 2*(3 + u_1), -1], [(4*(4 + u_1))*(-1)**u_1, 2*(-1)**u_1, 4*(4 + u_1), -2], [(8*(1 + u_1))*(-1)**u_1, 4*(-1)**u_1, -8*(1 + u_1), 4], [(-16*(2 + u_1))*(-1)**u_1, -8*(-1)**u_1, -16*(2 + u_1), 8]] ) b1_u = 1*y0 for i in range(u+1): b1_u -= a_u[i]/(-2)**i b1_u_1 = 1*y0 for i in range(u_1+1): b1_u_1 -= a_u_1[i]/(-2)**i b2_u = H*f_yj[1][0] for i in range(1, u+1): b2_u -= i*a_u[i]/(-2)**(i-1) b2_u_1 = H*f_yj[1][0] for i in range(1, u_1+1): b2_u_1 -= i*a_u_1[i]/(-2)**(i-1) b3_u = 1*Tkk for i in range(u+1): b3_u -= a_u[i]/(2**i) b3_u_1 = 1*Tkk for i in range(u_1+1): b3_u_1 -= a_u_1[i]/(2**i) b4_u = H*f_Tkk for i in range(1, u+1): b4_u -= i*a_u[i]/(2**(i-1)) b4_u_1 = H*f_Tkk for i in range(1, u_1+1): b4_u_1 -= i*a_u_1[i]/(2**(i-1)) b_u = np.array([b1_u,b2_u,b3_u,b4_u]) b_u_1 = np.array([b1_u_1,b2_u_1,b3_u_1,b4_u_1]) x = A_inv_u*b_u x = np.array(x) x_1 = A_inv_u_1*b_u_1 x_1 = np.array(x) a_u[u+1] = x[0] a_u[u+2] = x[1] a_u[u+3] = x[2] a_u[u+4] = x[3] a_u_1[u_1+1] = x_1[0] a_u_1[u_1+2] = x_1[1] a_u_1[u_1+3] = x_1[2] a_u_1[u_1+4] = x_1[3] # polynomial of degree u+4 defined on [0,1] and centered about 1/2 # also returns the interpolation error (errint). If errint > 10, then reject # step def poly (t): res = 1*a_u[0] for i in range(1, len(a_u)): res += a_u[i]*((t-0.5)**i) res_u_1 = 1*a_u_1[0] for i in range(1, len(a_u_1)): res_u_1 += a_u_1[i]*((t-0.5)**i) errint = error_norm(res, res_u_1, atol, rtol) h_int = H*((1/errint)**(1/(u+4))) return (res, errint, h_int) return poly def midpoint_fixed_step(func, tn, yn, args, h, p, pool, seq=(lambda t: 2*t), dense=False): k = int(round(p/2)) if dense: T, fe_seq, fe_tot, y0, Tkk, f_Tkk, y_half, f_yj, hs = compute_ex_table( func, tn, yn, args, h, k, pool, seq=seq, dense=dense) poly = interpolate(y0, Tkk, f_Tkk, y_half, f_yj, hs, h, k, atol, rtol, seq=seq) return (T[k,k], T[k-1,k-1], (fe_seq, fe_tot), poly) else: T, fe_seq, fe_tot = compute_ex_table(func, tn, yn, args, h, k, pool, seq=seq, dense=dense) return (T[k,k], T[k-1,k-1], (fe_seq, fe_tot)) def midpoint_adapt_order(func, tn, yn, args, h, k, atol, rtol, pool, seq=(lambda t: 2*t), dense=False): k_max = 10 k_min = 3 k = min(k_max, max(k_min, k)) def A_k(k): """ Expected time to compute k lines of the extrapolation table, in units of RHS evaluations. """ sum_ = 0 for i in range(k): sum_ += seq(i+1) return max(seq(k), sum_/NUM_WORKERS) # The second value is only an estimate H_k = lambda h, k, err_k: h*0.94*(0.65/err_k)**(1/(2*k-1)) W_k = lambda Ak, Hk: Ak/Hk if dense: T, fe_seq, fe_tot, y0, Tkk, f_Tkk, y_half, f_yj, hs = compute_ex_table( func, tn, yn, args, h, k, pool, seq=seq, dense=dense) else: T, fe_seq, fe_tot = compute_ex_table(func, tn, yn, args, h, k, pool, seq=seq, dense=dense) # compute the error and work function for the stages k-2 and k err_k_2 = error_norm(T[k-2,k-3], T[k-2,k-2], atol, rtol) err_k_1 = error_norm(T[k-1,k-2], T[k-1,k-1], atol, rtol) err_k = error_norm(T[k,k-1], T[k,k], atol, rtol) h_k_2 = H_k(h, k-2, err_k_2) h_k_1 = H_k(h, k-1, err_k_1) h_k = H_k(h, k, err_k) w_k_2 = W_k(A_k(k-2), h_k_2) w_k_1 = W_k(A_k(k-1), h_k_1) w_k = W_k(A_k(k), h_k) if err_k_1 <= 1: # convergence in line k-1 if err_k <= 1: y = T[k,k] else: y = T[k-1,k-1] k_new = k if w_k_1 < 0.9*w_k_2 else k-1 h_new = h_k_1 if k_new <= k-1 else h_k_1*A_k(k)/A_k(k-1) if dense: poly = interpolate(y0, Tkk, f_Tkk, y_half, f_yj, hs, h, k, atol, rtol, seq=seq) elif err_k <= 1: # convergence in line k y = T[k,k] k_new = k-1 if w_k_1 < 0.9*w_k else ( k+1 if w_k < 0.9*w_k_1 else k) h_new = h_k_1 if k_new == k-1 else ( h_k if k_new == k else h_k*A_k(k+1)/A_k(k)) if dense: poly = interpolate(y0, Tkk, f_Tkk, y_half, f_yj, hs, h, k, atol, rtol, seq=seq) else: # no convergence # reject (h, k) and restart with new values accordingly k_new = k-1 if w_k_1 < 0.9*w_k else k h_new = min(h_k_1 if k_new == k-1 else h_k, h) if dense: y, h, k, h_new, k_new, (fe_seq_, fe_tot_), poly = midpoint_adapt_order( func, tn, yn, args, h_new, k_new, atol, rtol, pool, seq=seq, dense=dense) else: y, h, k, h_new, k_new, (fe_seq_, fe_tot_) = midpoint_adapt_order( func, tn, yn, args, h_new, k_new, atol, rtol, pool, seq=seq, dense=dense) fe_seq += fe_seq_ fe_tot += fe_tot_ if dense: return (y, h, k, h_new, k_new, (fe_seq, fe_tot), poly) else: return (y, h, k, h_new, k_new, (fe_seq, fe_tot)) def ex_midpoint_parallel(func, y0, t, args=(), full_output=0, rtol=1.0e-8, atol=1.0e-8, h0=0.5, mxstep=10e4, adaptive="order", p=4, nworkers=None): ''' (An instantiation of extrapolation_parallel() function with the midpoint method.) Solves the system of IVPs dy/dt = func(y, t0, ...) with parallel extrapolation. **Parameters** - func: callable(y, t0, ...) Computes the derivative of y at t0 (i.e. the right hand side of the IVP). Must output a non-scalar numpy.ndarray - y0 : numpy.ndarray Initial condition on y (can be a vector). Must be a non-scalar numpy.ndarray - t : array A sequence of time points for which to solve for y. The initial value point should be the first element of this sequence. - args : tuple, optional Extra arguments to pass to function. - full_output : bool, optional True if to return a dictionary of optional outputs as the second output. Defaults to False **Returns** - ys : numpy.ndarray, shape (len(t), len(y0)) Array containing the value of y for each desired time in t, with the initial value y0 in the first row. - infodict : dict, only returned if full_output == True Dictionary containing additional output information KEY MEANING 'fe_seq' cumulative number of sequential derivative evaluations 'fe_tot' cumulative number of total derivative evaluations 'nstp' cumulative number of successful time steps 'h_avg' average step size if adaptive == "order" (None otherwise) 'k_avg' average extrapolation order if adaptive == "order" ... (None otherwise) **Other Parameters** - rtol, atol : float, optional The input parameters rtol and atol determine the error control performed by the solver. The solver will control the vector, e = y2 - y1, of estimated local errors in y, according to an inequality of the form l2-norm of (e / (ewt * len(e))) <= 1, where ewt is a vector of positive error weights computed as ewt = atol + max(y1, y2) * rtol. rtol and atol can be either vectors the same length as y0 or scalars. Both default to 1.0e-8. - h0 : float, optional The step size to be attempted on the first step. Defaults to 0.5 - mxstep : int, optional Maximum number of (internally defined) steps allowed for each integration point in t. Defaults to 10e4 - adaptive: string, optional Specifies the strategy of integration. Can take three values: -- "fixed" = use fixed step size and order strategy. -- "step" = use adaptive step size but fixed order strategy. -- "order" = use adaptive step size and adaptive order strategy. Defaults to "order" - p: int, optional The order of extrapolation if adaptive is not "order", and the starting order otherwise. Defaults to 4 - nworkers: int, optional The number of workers working in parallel. If nworkers==None, then the the number of workers is set to the number of CPUs on the the running machine. Defaults to None. ''' if len(t) > 2: seq = lambda t: 4*t - 2 # {2,6,10,14,...} sequence for dense output else: seq = lambda t: 2*t # harmonic sequence for midpoint method method = midpoint_adapt_order if adaptive == "order" else midpoint_fixed_step return extrapolation_parallel(method, func, y0, t, args=args, full_output=full_output, rtol=rtol, atol=atol, h0=h0, mxstep=mxstep, adaptive=adaptive, p=p, seq=seq, nworkers=nworkers)
mit
6,856,311,973,943,909,000
35.4379
90
0.476667
false
3.127253
false
false
false
sirpercival/kivy
kivy/uix/textinput.py
1
92223
# -*- encoding: utf8 -*- ''' Text Input ========== .. versionadded:: 1.0.4 .. image:: images/textinput-mono.jpg .. image:: images/textinput-multi.jpg The :class:`TextInput` widget provides a box of editable plain text. Unicode, multiline, cursor navigation, selection and clipboard features are supported. .. note:: Two different coordinate systems are used with TextInput: - (x, y) - coordinates in pixels, mostly used for rendering on screen. - (row, col) - cursor index in characters / lines, used for selection and cursor movement. Usage example ------------- To create a multiline textinput ('enter' key adds a new line):: from kivy.uix.textinput import TextInput textinput = TextInput(text='Hello world') To create a singleline textinput, set the multiline property to False ('enter' key will defocus the textinput and emit on_text_validate event):: def on_enter(instance, value): print('User pressed enter in', instance) textinput = TextInput(text='Hello world', multiline=False) textinput.bind(on_text_validate=on_enter) The textinput's text is stored on its :attr:`TextInput.text` property. To run a callback when the text changes:: def on_text(instance, value): print('The widget', instance, 'have:', value) textinput = TextInput() textinput.bind(text=on_text) You can 'focus' a textinput, meaning that the input box will be highlighted and keyboard focus will be requested:: textinput = TextInput(focus=True) The textinput is defocused if the 'escape' key is pressed, or if another widget requests the keyboard. You can bind a callback to the focus property to get notified of focus changes:: def on_focus(instance, value): if value: print('User focused', instance) else: print('User defocused', instance) textinput = TextInput() textinput.bind(focus=on_focus) Selection --------- The selection is automatically updated when the cursor position changes. You can get the currently selected text from the :attr:`TextInput.selection_text` property. Filtering --------- You can control which text can be added to the :class:`TextInput` by overwriting :meth:`TextInput.insert_text`.Every string that is typed, pasted or inserted by any other means to the :class:`TextInput` is passed through this function. By overwriting it you can reject or change unwanted characters. For example, to write only in capitalized characters:: class CapitalInput(TextInput): def insert_text(self, substring, from_undo=False): s = substring.upper() return super(CapitalInput, self).insert_text(s,\ from_undo=from_undo) Or to only allow floats (0 - 9 and a single period):: class FloatInput(TextInput): pat = re.compile('[^0-9]') def insert_text(self, substring, from_undo=False): pat = self.pat if '.' in self.text: s = re.sub(pat, '', substring) else: s = '.'.join([re.sub(pat, '', s) for s in\ substring.split('.', 1)]) return super(FloatInput, self).insert_text(s, from_undo=from_undo) Default shortcuts ----------------- =============== ======================================================== Shortcuts Description --------------- -------------------------------------------------------- Left Move cursor to left Right Move cursor to right Up Move cursor to up Down Move cursor to down Home Move cursor at the beginning of the line End Move cursor at the end of the line PageUp Move cursor to 3 lines before PageDown Move cursor to 3 lines after Backspace Delete the selection or character before the cursor Del Delete the selection of character after the cursor Shift + <dir> Start a text selection. Dir can be Up, Down, Left, Right Control + c Copy selection Control + x Cut selection Control + p Paste selection Control + a Select all the content Control + z undo Control + r redo =============== ======================================================== ''' __all__ = ('TextInput', ) import re import sys from functools import partial from os import environ from weakref import ref from kivy.animation import Animation from kivy.base import EventLoop from kivy.cache import Cache from kivy.clock import Clock from kivy.config import Config from kivy.compat import PY2 from kivy.logger import Logger from kivy.metrics import inch from kivy.utils import boundary, platform from kivy.core.text import Label from kivy.graphics import Color, Rectangle from kivy.graphics.texture import Texture from kivy.uix.widget import Widget from kivy.uix.bubble import Bubble from kivy.uix.behaviors import ButtonBehavior from kivy.uix.image import Image from kivy.properties import StringProperty, NumericProperty, \ BooleanProperty, AliasProperty, \ ListProperty, ObjectProperty, VariableListProperty, OptionProperty Cache_register = Cache.register Cache_append = Cache.append Cache_get = Cache.get Cache_remove = Cache.remove Cache_register('textinput.label', timeout=60.) Cache_register('textinput.width', timeout=60.) FL_IS_NEWLINE = 0x01 # late binding Clipboard = None _platform = platform # for reloading, we need to keep a list of textinput to retrigger the rendering _textinput_list = [] # cache the result _is_osx = sys.platform == 'darwin' # When we are generating documentation, Config doesn't exist _is_desktop = False if Config: _is_desktop = Config.getboolean('kivy', 'desktop') # register an observer to clear the textinput cache when OpenGL will reload if 'KIVY_DOC' not in environ: def _textinput_clear_cache(*l): Cache_remove('textinput.label') Cache_remove('textinput.width') for wr in _textinput_list[:]: textinput = wr() if textinput is None: _textinput_list.remove(wr) else: textinput._trigger_refresh_text() from kivy.graphics.context import get_context get_context().add_reload_observer(_textinput_clear_cache, True) class Selector(ButtonBehavior, Image): # Internal class for managing the selection Handles. def on_touch_down(self, touch): self._touch_diff = self.top - touch.y return super(Selector, self).on_touch_down(touch) class TextInputCutCopyPaste(Bubble): # Internal class used for showing the little bubble popup when # copy/cut/paste happen. textinput = ObjectProperty(None) ''' Holds a reference to the TextInput this Bubble belongs to. ''' but_cut = ObjectProperty(None) but_copy = ObjectProperty(None) but_paste = ObjectProperty(None) but_selectall = ObjectProperty(None) def __init__(self, **kwargs): self.mode = 'normal' super(TextInputCutCopyPaste, self).__init__(**kwargs) Clock.schedule_interval(self._check_parent, .5) def on_textinput(self, instance, value): if value and not Clipboard and _platform == 'android': value._ensure_clipboard() def _check_parent(self, dt): # this is a prevention to get the Bubble staying on the screen, if the # attached textinput is not on the screen anymore. parent = self.textinput while parent is not None: if parent == parent.parent: break parent = parent.parent if parent is None: Clock.unschedule(self._check_parent) if self.textinput: self.textinput._hide_cut_copy_paste() def on_parent(self, instance, value): parent = self.textinput mode = self.mode if parent: self.clear_widgets() if mode == 'paste': # show only paste on long touch self.but_selectall.opacity = 1 widget_list = [self.but_selectall, ] if not parent.readonly: widget_list.append(self.but_paste) elif parent.readonly: # show only copy for read only text input widget_list = (self.but_copy, ) else: # normal mode widget_list = (self.but_cut, self.but_copy, self.but_paste) for widget in widget_list: self.add_widget(widget) def do(self, action): textinput = self.textinput if action == 'cut': textinput._cut(textinput.selection_text) elif action == 'copy': textinput._copy(textinput.selection_text) elif action == 'paste': textinput._paste() elif action == 'selectall': textinput.select_all() self.mode = '' anim = Animation(opacity=0, d=.333) anim.bind(on_complete=lambda *args: self.on_parent(self, self.parent)) anim.start(self.but_selectall) class TextInput(Widget): '''TextInput class. See module documentation for more information. :Events: `on_text_validate` Fired only in multiline=False mode when the user hits 'enter'. This will also unfocus the textinput. `on_double_tap` Fired when a double tap happens in the text input. The default behavior selects the text around the cursor position. More info at :meth:`on_double_tap`. `on_triple_tap` Fired when a triple tap happens in the text input. The default behavior selects the line around the cursor position. More info at :meth:`on_triple_tap`. `on_quad_touch` Fired when four fingers are touching the text input. The default behavior selects the whole text. More info at :meth:`on_quad_touch`. .. versionchanged:: 1.7.0 `on_double_tap`, `on_triple_tap` and `on_quad_touch` events added. ''' __events__ = ('on_text_validate', 'on_double_tap', 'on_triple_tap', 'on_quad_touch') def __init__(self, **kwargs): self._win = None self._cursor_blink_time = Clock.get_time() self._cursor = [0, 0] self._selection = False self._selection_finished = True self._selection_touch = None self.selection_text = u'' self._selection_from = None self._selection_to = None self._handle_left = None self._handle_right = None self._handle_middle = None self._bubble = None self._lines_flags = [] self._lines_labels = [] self._lines_rects = [] self._hint_text_flags = [] self._hint_text_labels = [] self._hint_text_rects = [] self._label_cached = None self._line_options = None self._keyboard = None self._keyboard_mode = Config.get('kivy', 'keyboard_mode') self._command_mode = False self._command = '' self.reset_undo() self._touch_count = 0 self.interesting_keys = { 8: 'backspace', 13: 'enter', 127: 'del', 271: 'enter', 273: 'cursor_up', 274: 'cursor_down', 275: 'cursor_right', 276: 'cursor_left', 278: 'cursor_home', 279: 'cursor_end', 280: 'cursor_pgup', 281: 'cursor_pgdown', 303: 'shift_L', 304: 'shift_R'} super(TextInput, self).__init__(**kwargs) self.bind(font_size=self._trigger_refresh_line_options, font_name=self._trigger_refresh_line_options) self.bind(padding=self._update_text_options, tab_width=self._update_text_options, font_size=self._update_text_options, font_name=self._update_text_options, size=self._update_text_options, password=self._update_text_options) self.bind(pos=self._trigger_update_graphics) self._trigger_position_handles = Clock.create_trigger( self._position_handles) self._trigger_show_handles = Clock.create_trigger( self._show_handles, .05) self._trigger_refresh_line_options() self._trigger_refresh_text() self.bind(pos=self._trigger_position_handles, size=self._trigger_position_handles) # when the gl context is reloaded, trigger the text rendering again. _textinput_list.append(ref(self, TextInput._reload_remove_observer)) def on_disabled(self, instance, value): if value: self.focus = False def on_text_validate(self): pass def cursor_index(self, cursor=None): '''Return the cursor index in the text/value. ''' if not cursor: cursor = self.cursor try: l = self._lines if len(l) == 0: return 0 lf = self._lines_flags index, cr = cursor for row in range(cr): if row >= len(l): continue index += len(l[row]) if lf[row] & FL_IS_NEWLINE: index += 1 if lf[cr] & FL_IS_NEWLINE: index += 1 return index except IndexError: return 0 def cursor_offset(self): '''Get the cursor x offset on the current line. ''' offset = 0 row = self.cursor_row col = self.cursor_col _lines = self._lines if col and row < len(_lines): offset = self._get_text_width( _lines[row][:col], self.tab_width, self._label_cached) return offset def get_cursor_from_index(self, index): '''Return the (row, col) of the cursor from text index. ''' index = boundary(index, 0, len(self.text)) if index <= 0: return 0, 0 lf = self._lines_flags l = self._lines i = 0 for row in range(len(l)): ni = i + len(l[row]) if lf[row] & FL_IS_NEWLINE: ni += 1 i += 1 if ni >= index: return index - i, row i = ni return index, row def select_text(self, start, end): ''' Select a portion of text displayed in this TextInput. .. versionadded:: 1.4.0 :Parameters: `start` Index of textinput.text from where to start selection `end` Index of textinput.text till which the selection should be displayed ''' if end < start: raise Exception('end must be superior to start') m = len(self.text) self._selection_from = boundary(start, 0, m) self._selection_to = boundary(end, 0, m) self._selection_finished = True self._update_selection(True) self._update_graphics_selection() def select_all(self): ''' Select all of the text displayed in this TextInput. .. versionadded:: 1.4.0 ''' self.select_text(0, len(self.text)) re_indent = re.compile('^(\s*|)') def _auto_indent(self, substring): index = self.cursor_index() _text = self._get_text(encode=False) if index > 0: line_start = _text.rfind('\n', 0, index) if line_start > -1: line = _text[line_start + 1:index] indent = self.re_indent.match(line).group() substring += indent return substring def insert_text(self, substring, from_undo=False): '''Insert new text at the current cursor position. Override this function in order to pre-process text for input validation. ''' if self.readonly or not substring: return self._hide_handles(self._win) # check for command modes if ord(substring[0]) == 1: self._command_mode = True self._command = '' if ord(substring[0]) == 2: self._command_mode = False self._command = self._command[1:] if self._command_mode: self._command += substring return _command = self._command if _command and ord(substring[0]) == 2: from_undo = True _command, data = _command.split(':') self._command = '' if _command == 'DEL': count = int(data) end = self.cursor_index() self._selection_from = max(end - count, 0) self._selection_to = end self._selection = True self.delete_selection(from_undo=True) return elif _command == 'INSERT': substring = data elif _command == 'INSERTN': from_undo = False substring = data if not from_undo and self.multiline and self.auto_indent \ and substring == u'\n': substring = self._auto_indent(substring) cc, cr = self.cursor sci = self.cursor_index ci = sci() text = self._lines[cr] len_str = len(substring) new_text = text[:cc] + substring + text[cc:] self._set_line_text(cr, new_text) wrap = (self._get_text_width( new_text, self.tab_width, self._label_cached) > self.width) if len_str > 1 or substring == u'\n' or wrap: # Avoid refreshing text on every keystroke. # Allows for faster typing of text when the amount of text in # TextInput gets large. start, finish, lines,\ lineflags, len_lines = self._get_line_from_cursor(cr, new_text) # calling trigger here could lead to wrong cursor positioning # and repeating of text when keys are added rapidly in a automated # fashion. From Android Keyboard for example. self._refresh_text_from_property('insert', start, finish, lines, lineflags, len_lines) self.cursor = self.get_cursor_from_index(ci + len_str) # handle undo and redo self._set_unredo_insert(ci, ci + len_str, substring, from_undo) def _get_line_from_cursor(self, start, new_text): # get current paragraph from cursor position finish = start lines = self._lines linesflags = self._lines_flags if start and not linesflags[start]: start -= 1 new_text = u''.join((lines[start], new_text)) try: while not linesflags[finish + 1]: new_text = u''.join((new_text, lines[finish + 1])) finish += 1 except IndexError: pass lines, lineflags = self._split_smart(new_text) len_lines = max(1, len(lines)) return start, finish, lines, lineflags, len_lines def _set_unredo_insert(self, ci, sci, substring, from_undo): # handle undo and redo if from_undo: return self._undo.append({'undo_command': ('insert', ci, sci), 'redo_command': (ci, substring)}) # reset redo when undo is appended to self._redo = [] def reset_undo(self): '''Reset undo and redo lists from memory. .. versionadded:: 1.3.0 ''' self._redo = self._undo = [] def do_redo(self): '''Do redo operation. .. versionadded:: 1.3.0 This action re-does any command that has been un-done by do_undo/ctrl+z. This function is automatically called when `ctrl+r` keys are pressed. ''' try: x_item = self._redo.pop() undo_type = x_item['undo_command'][0] _get_cusror_from_index = self.get_cursor_from_index if undo_type == 'insert': ci, substring = x_item['redo_command'] self.cursor = _get_cusror_from_index(ci) self.insert_text(substring, True) elif undo_type == 'bkspc': self.cursor = _get_cusror_from_index(x_item['redo_command']) self.do_backspace(from_undo=True) else: # delsel ci, sci = x_item['redo_command'] self._selection_from = ci self._selection_to = sci self._selection = True self.delete_selection(True) self.cursor = _get_cusror_from_index(ci) self._undo.append(x_item) except IndexError: # reached at top of undo list pass def do_undo(self): '''Do undo operation. .. versionadded:: 1.3.0 This action un-does any edits that have been made since the last call to reset_undo(). This function is automatically called when `ctrl+z` keys are pressed. ''' try: x_item = self._undo.pop() undo_type = x_item['undo_command'][0] self.cursor = self.get_cursor_from_index(x_item['undo_command'][1]) if undo_type == 'insert': ci, sci = x_item['undo_command'][1:] self._selection_from = ci self._selection_to = sci self._selection = True self.delete_selection(True) elif undo_type == 'bkspc': substring = x_item['undo_command'][2:][0] self.insert_text(substring, True) else: # delsel substring = x_item['undo_command'][2:][0] self.insert_text(substring, True) self._redo.append(x_item) except IndexError: # reached at top of undo list pass def do_backspace(self, from_undo=False, mode='bkspc'): '''Do backspace operation from the current cursor position. This action might do several things: - removing the current selection if available. - removing the previous char and move the cursor back. - do nothing, if we are at the start. ''' if self.readonly: return cc, cr = self.cursor _lines = self._lines text = _lines[cr] cursor_index = self.cursor_index() text_last_line = _lines[cr - 1] if cc == 0 and cr == 0: return _lines_flags = self._lines_flags start = cr if cc == 0: substring = u'\n' if _lines_flags[cr] else u' ' new_text = text_last_line + text self._set_line_text(cr - 1, new_text) self._delete_line(cr) start = cr - 1 else: #ch = text[cc-1] substring = text[cc - 1] new_text = text[:cc - 1] + text[cc:] self._set_line_text(cr, new_text) # refresh just the current line instead of the whole text start, finish, lines, lineflags, len_lines =\ self._get_line_from_cursor(start, new_text) # avoid trigger refresh, leads to issue with # keys/text send rapidly through code. self._refresh_text_from_property('del', start, finish, lines, lineflags, len_lines) self.cursor = self.get_cursor_from_index(cursor_index - 1) # handle undo and redo self._set_undo_redo_bkspc( cursor_index, cursor_index - 1, substring, from_undo) def _set_undo_redo_bkspc(self, ol_index, new_index, substring, from_undo): # handle undo and redo for backspace if from_undo: return self._undo.append({ 'undo_command': ('bkspc', new_index, substring), 'redo_command': ol_index}) #reset redo when undo is appended to self._redo = [] def do_cursor_movement(self, action): '''Move the cursor relative to it's current position. Action can be one of : - cursor_left: move the cursor to the left - cursor_right: move the cursor to the right - cursor_up: move the cursor on the previous line - cursor_down: move the cursor on the next line - cursor_home: move the cursor at the start of the current line - cursor_end: move the cursor at the end of current line - cursor_pgup: move one "page" before - cursor_pgdown: move one "page" after .. warning:: Current page has three lines before/after. ''' pgmove_speed = 3 col, row = self.cursor if action == 'cursor_up': row = max(row - 1, 0) col = min(len(self._lines[row]), col) elif action == 'cursor_down': row = min(row + 1, len(self._lines) - 1) col = min(len(self._lines[row]), col) elif action == 'cursor_left': if col == 0: if row: row -= 1 col = len(self._lines[row]) else: col, row = col - 1, row elif action == 'cursor_right': if col == len(self._lines[row]): if row < len(self._lines) - 1: col = 0 row += 1 else: col, row = col + 1, row elif action == 'cursor_home': col = 0 elif action == 'cursor_end': col = len(self._lines[row]) elif action == 'cursor_pgup': row /= pgmove_speed col = min(len(self._lines[row]), col) elif action == 'cursor_pgdown': row = min((row + 1) * pgmove_speed, len(self._lines) - 1) col = min(len(self._lines[row]), col) self.cursor = (col, row) def get_cursor_from_xy(self, x, y): '''Return the (row, col) of the cursor from an (x, y) position. ''' padding_left = self.padding[0] padding_top = self.padding[1] l = self._lines dy = self.line_height + self.line_spacing cx = x - self.x scrl_y = self.scroll_y scrl_x = self.scroll_x scrl_y = scrl_y / dy if scrl_y > 0 else 0 cy = (self.top - padding_top + scrl_y * dy) - y cy = int(boundary(round(cy / dy - 0.5), 0, len(l) - 1)) dcx = 0 _get_text_width = self._get_text_width _tab_width = self.tab_width _label_cached = self._label_cached for i in range(1, len(l[cy]) + 1): if _get_text_width(l[cy][:i], _tab_width, _label_cached) + padding_left >= cx + scrl_x: break dcx = i cx = dcx return cx, cy # # Selection control # def cancel_selection(self): '''Cancel current selection (if any). ''' self._selection_from = self._selection_to = self.cursor_index() self._selection = False self._selection_finished = True self._selection_touch = None self._trigger_update_graphics() def delete_selection(self, from_undo=False): '''Delete the current text selection (if any). ''' if self.readonly: return self._hide_handles(self._win) scrl_x = self.scroll_x scrl_y = self.scroll_y cc, cr = self.cursor if not self._selection: return v = self._get_text(encode=False) a, b = self._selection_from, self._selection_to if a > b: a, b = b, a self.cursor = cursor = self.get_cursor_from_index(a) start = cursor finish = self.get_cursor_from_index(b) cur_line = self._lines[start[1]][:start[0]] +\ self._lines[finish[1]][finish[0]:] lines, lineflags = self._split_smart(cur_line) len_lines = len(lines) if start[1] == finish[1]: self._set_line_text(start[1], cur_line) else: self._refresh_text_from_property('del', start[1], finish[1], lines, lineflags, len_lines) self.scroll_x = scrl_x self.scroll_y = scrl_y # handle undo and redo for delete selecttion self._set_unredo_delsel(a, b, v[a:b], from_undo) self.cancel_selection() def _set_unredo_delsel(self, a, b, substring, from_undo): # handle undo and redo for backspace if from_undo: return self._undo.append({ 'undo_command': ('delsel', a, substring), 'redo_command': (a, b)}) # reset redo when undo is appended to self._redo = [] def _update_selection(self, finished=False): '''Update selection text and order of from/to if finished is True. Can be called multiple times until finished is True. ''' a, b = self._selection_from, self._selection_to if a > b: a, b = b, a self._selection_finished = finished _selection_text = self._get_text(encode=False)[a:b] self.selection_text = ("" if not self.allow_copy else (('*' * (b - a)) if self.password else _selection_text)) if not finished: self._selection = True else: self._selection = bool(len(_selection_text)) self._selection_touch = None if a == 0: # update graphics only on new line # allows smoother scrolling, noticeably # faster when dealing with large text. self._update_graphics_selection() #self._trigger_update_graphics() # # Touch control # def long_touch(self, dt): if self._selection_to == self._selection_from: self._show_cut_copy_paste(self._long_touch_pos, self._win, mode='paste') def on_double_tap(self): '''This event is dispatched when a double tap happens inside TextInput. The default behavior is to select the word around the current cursor position. Override this to provide different behavior. Alternatively, you can bind to this event to provide additional functionality. ''' ci = self.cursor_index() cc = self.cursor_col line = self._lines[self.cursor_row] len_line = len(line) start = max(0, len(line[:cc]) - line[:cc].rfind(u' ') - 1) end = line[cc:].find(u' ') end = end if end > - 1 else (len_line - cc) Clock.schedule_once(lambda dt: self.select_text(ci - start, ci + end)) def on_triple_tap(self): '''This event is dispatched when a triple tap happens inside TextInput. The default behavior is to select the line around current cursor position. Override this to provide different behavior. Alternatively, you can bind to this event to provide additional functionality. ''' ci = self.cursor_index() cc = self.cursor_col line = self._lines[self.cursor_row] len_line = len(line) Clock.schedule_once(lambda dt: self.select_text(ci - cc, ci + (len_line - cc))) def on_quad_touch(self): '''This event is dispatched when four fingers are touching inside TextInput. The default behavior is to select all text. Override this to provide different behavior. Alternatively, you can bind to this event to provide additional functionality. ''' Clock.schedule_once(lambda dt: self.select_all()) def on_touch_down(self, touch): if self.disabled: return touch_pos = touch.pos if not self.collide_point(*touch_pos): if self._keyboard_mode == 'multi': if self.readonly: self.focus = False else: self.focus = False return False if not self.focus: self.focus = True touch.grab(self) self._touch_count += 1 if touch.is_double_tap: self.dispatch('on_double_tap') if touch.is_triple_tap: self.dispatch('on_triple_tap') if self._touch_count == 4: self.dispatch('on_quad_touch') win = self._win if not win: self._win = win = EventLoop.window if not win: Logger.warning('Textinput: ' 'Cannot show bubble, unable to get root window') return True self._hide_cut_copy_paste(self._win) # schedule long touch for paste self._long_touch_pos = touch.pos Clock.schedule_once(self.long_touch, .5) self.cursor = self.get_cursor_from_xy(*touch_pos) if not self._selection_touch: self.cancel_selection() self._selection_touch = touch self._selection_from = self._selection_to = self.cursor_index() self._update_selection() return False def on_touch_move(self, touch): if touch.grab_current is not self: return if not self.focus: touch.ungrab(self) if self._selection_touch is touch: self._selection_touch = None return False if self._selection_touch is touch: self.cursor = self.get_cursor_from_xy(touch.x, touch.y) self._selection_to = self.cursor_index() self._update_selection() return True def on_touch_up(self, touch): if touch.grab_current is not self: return touch.ungrab(self) self._touch_count -= 1 # schedule long touch for paste Clock.unschedule(self.long_touch) if not self.focus: return False if self._selection_touch is touch: self._selection_to = self.cursor_index() self._update_selection(True) # show Bubble win = self._win if self._selection_to != self._selection_from: self._show_cut_copy_paste(touch.pos, win) elif self.use_handles: self._hide_handles() handle_middle = self._handle_middle if handle_middle is None: self._handle_middle = handle_middle = Selector( source=self.handle_image_middle, size_hint=(None, None), size=('45dp', '45dp')) handle_middle.bind(on_press=self._handle_pressed, on_touch_move=self._handle_move, on_release=self._handle_released) if not self._handle_middle.parent and self.text: self._win.add_widget(handle_middle) self._position_handles(mode='middle') return True def _handle_pressed(self, instance): self._hide_cut_copy_paste() sf, st = self._selection_from, self.selection_to if sf > st: self._selection_from , self._selection_to = st, sf def _handle_released(self, instance): sf, st = self._selection_from, self.selection_to if sf == st: return self._update_selection() self._show_cut_copy_paste( (instance.x + ((1 if instance is self._handle_left else - 1) * self._bubble.width / 2) if self._bubble else 0, instance.y + self.line_height), self._win) def _handle_move(self, instance, touch): if touch.grab_current != instance: return get_cursor = self.get_cursor_from_xy handle_right = self._handle_right handle_left = self._handle_left handle_middle = self._handle_middle x, y = self.to_widget(*touch.pos) cursor = get_cursor( x, y + instance._touch_diff + (self.line_height / 2)) if instance != touch.grab_current: return if instance == handle_middle: self.cursor = cursor self._position_handles(mode='middle') return ci = self.cursor_index(cursor=cursor) sf, st = self._selection_from, self.selection_to if instance == handle_left: self._selection_from = ci elif instance == handle_right: self._selection_to = ci self._trigger_update_graphics() self._trigger_position_handles() def _position_handles(self, *args, **kwargs): if not self.text: return mode = kwargs.get('mode', 'both') lh = self.line_height to_win = self.to_window handle_middle = self._handle_middle if handle_middle: hp_mid = self.cursor_pos pos = to_win(*hp_mid) handle_middle.x = pos[0] - handle_middle.width / 2 handle_middle.top = pos[1] - lh if mode[0] == 'm': return group = self.canvas.get_group('selection') if not group: return self._win.remove_widget(self._handle_middle) handle_left = self._handle_left if not handle_left: return hp_left = group[2].pos handle_left.pos = to_win(*hp_left) handle_left.x -= handle_left.width handle_left.y -= handle_left.height handle_right = self._handle_right last_rect = group[-1] hp_right = last_rect.pos[0], last_rect.pos[1] x, y = to_win(*hp_right) handle_right.x = x + last_rect.size[0] handle_right.y = y - handle_right.height def _hide_handles(self, win=None): win = win or self._win if win is None: return self._win.remove_widget(self._handle_right) self._win.remove_widget(self._handle_left) self._win.remove_widget(self._handle_middle) def _hide_cut_copy_paste(self, win=None): win = win or self._win if win is None: return bubble = self._bubble if bubble is not None: anim = Animation(opacity=0, d=.225) anim.bind(on_complete=lambda *args: win.remove_widget(bubble)) anim.start(bubble) def _show_handles(self, dt): if not self.use_handles or not self.text: return win = self._win if not win: self._set_window() win = self._win handle_right = self._handle_right handle_left = self._handle_left if self._handle_left is None: self._handle_left = handle_left = Selector( source=self.handle_image_left, size_hint=(None, None), size=('45dp', '45dp')) handle_left.bind(on_press=self._handle_pressed, on_touch_move=self._handle_move, on_release=self._handle_released) self._handle_right = handle_right = Selector( source=self.handle_image_right, size_hint=(None, None), size=('45dp', '45dp')) handle_right.bind(on_press=self._handle_pressed, on_touch_move=self._handle_move, on_release=self._handle_released) else: if self._handle_left.parent: self._position_handles() return if not self.parent: return self._trigger_position_handles() if self.selection_from != self.selection_to: self._handle_left.opacity = self._handle_right.opacity = 0 win.add_widget(self._handle_left) win.add_widget(self._handle_right) anim = Animation(opacity=1, d=.4) anim.start(self._handle_right) anim.start(self._handle_left) def _show_cut_copy_paste(self, pos, win, parent_changed=False, mode='', *l): # Show a bubble with cut copy and paste buttons if not self.use_bubble: return bubble = self._bubble if bubble is None: self._bubble = bubble = TextInputCutCopyPaste(textinput=self) self.bind(parent=partial(self._show_cut_copy_paste, pos, win, True)) else: win.remove_widget(bubble) if not self.parent: return if parent_changed: return # Search the position from the touch to the window lh, ls = self.line_height, self.line_spacing x, y = pos t_pos = self.to_window(x, y) bubble_size = bubble.size win_size = win.size bubble.pos = (t_pos[0] - bubble_size[0] / 2., t_pos[1] + inch(.25)) bubble_pos = bubble.pos if bubble_pos[0] < 0: # bubble beyond left of window if bubble.pos[1] > (win_size[1] - bubble_size[1]): # bubble above window height bubble.pos = (0, (t_pos[1]) - (bubble_size[1] + lh + ls)) bubble.arrow_pos = 'top_left' else: bubble.pos = (0, bubble_pos[1]) bubble.arrow_pos = 'bottom_left' elif bubble.right > win_size[0]: # bubble beyond right of window if bubble_pos[1] > (win_size[1] - bubble_size[1]): # bubble above window height bubble.pos = (win_size[0] - bubble_size[0], (t_pos[1]) - (bubble_size[1] + lh + ls)) bubble.arrow_pos = 'top_right' else: bubble.right = win_size[0] bubble.arrow_pos = 'bottom_right' else: if bubble_pos[1] > (win_size[1] - bubble_size[1]): # bubble above window height bubble.pos = (bubble_pos[0], (t_pos[1]) - (bubble_size[1] + lh + ls)) bubble.arrow_pos = 'top_mid' else: bubble.arrow_pos = 'bottom_mid' bubble.mode = mode Animation.cancel_all(bubble) bubble.opacity = 0 win.add_widget(bubble) Animation(opacity=1, d=.225).start(bubble) # # Private # @staticmethod def _reload_remove_observer(wr): # called when the textinput is deleted if wr in _textinput_list: _textinput_list.remove(wr) def _set_window(self, *largs): win = self._win if not win: self._win = win = EventLoop.window if not win: # we got argument, it could be the previous schedule # cancel focus. if len(largs): Logger.warning('Textinput: ' 'Cannot focus the element, unable to get ' 'root window') return else: #XXX where do `value` comes from? Clock.schedule_once(partial(self.on_focus, self, largs), 0) return def on_focus(self, instance, value, *largs): self._set_window(*largs) if value: if self.keyboard_mode != 'managed': self._bind_keyboard() else: if self.keyboard_mode != 'managed': self._unbind_keyboard() def _unbind_keyboard(self): self._set_window() win = self._win if self._keyboard: keyboard = self._keyboard keyboard.unbind( on_key_down=self._keyboard_on_key_down, on_key_up=self._keyboard_on_key_up) keyboard.release() self._keyboard = None self.cancel_selection() Clock.unschedule(self._do_blink_cursor) self._hide_cut_copy_paste(win) self._hide_handles(win) self._win = None def _bind_keyboard(self): self._set_window() win = self._win self._editable = editable = (not (self.readonly or self.disabled) or _is_desktop and self._keyboard_mode == 'system') if not _is_desktop and not editable: return keyboard = win.request_keyboard( self._keyboard_released, self, input_type=self.input_type) self._keyboard = keyboard if editable: keyboard.bind( on_key_down=self._keyboard_on_key_down, on_key_up=self._keyboard_on_key_up) Clock.schedule_interval(self._do_blink_cursor, 1 / 2.) else: # in non-editable mode, we still want shortcut (as copy) keyboard.bind( on_key_down=self._keyboard_on_key_down) def on_readonly(self, instance, value): if not value: self.focus = False def _ensure_clipboard(self): global Clipboard if hasattr(self, '_clip_mime_type'): return if Clipboard is None: from kivy.core.clipboard import Clipboard # NOQA if _platform == 'win': self._clip_mime_type = 'text/plain;charset=utf-8' # windows clipboard uses a utf-16 encoding self._encoding = 'utf-16' elif _platform == 'linux': self._clip_mime_type = 'UTF8_STRING' self._encoding = 'utf-8' else: self._clip_mime_type = 'text/plain' self._encoding = 'utf-8' def cut(self): ''' Copy current selection to clipboard then delete it from TextInput. .. versionadded:: 1.8.0 ''' self._cut(self.selection_text) def _cut(self, data): self._copy(data) self.delete_selection() def copy(self, data=''): ''' Copy the value provided in argument `data` into current clipboard. If data is not of type string it will be converted to string. If no data is provided then current selection if present is copied. .. versionadded:: 1.8.0 ''' if data: self._copy(data) return if self.selection_text: self._copy(self.selection_text) def _copy(self, data): # explicitly terminate strings with a null character # so as to avoid putting spurious data after the end. # MS windows issue. self._ensure_clipboard() data = data.encode(self._encoding) + b'\x00' Clipboard.put(data, self._clip_mime_type) def paste(self): ''' Insert text from system :class:`~kivy.core.clipboard.Clipboard` into the :class:`~kivy.uix.textinput.TextInput` at current cursor position. .. versionadded:: 1.8.0 ''' self._paste() def _paste(self): self._ensure_clipboard() _clip_types = Clipboard.get_types() mime_type = self._clip_mime_type if mime_type not in _clip_types: mime_type = 'text/plain' data = Clipboard.get(mime_type) if data is not None: # decode only if we don't have unicode # we would still need to decode from utf-16 (windows) # data is of type bytes in PY3 data = data.decode(self._encoding, 'ignore') # remove null strings mostly a windows issue data = data.replace(u'\x00', u'') self.delete_selection() self.insert_text(data) data = None def _keyboard_released(self): # Callback called when the real keyboard is taken by someone else # called by the window if the keyboard is taken by somebody else # FIXME: handle virtual keyboard. self.focus = False def _get_text_width(self, text, tab_width, _label_cached): # Return the width of a text, according to the current line options kw = self._get_line_options() try: cid = u'{}\0{}'.format(text, kw) except UnicodeDecodeError: cid = '{}\0{}'.format(text, kw) width = Cache_get('textinput.width', cid) if width: return width if not _label_cached: _label_cached = self._label_cached text = text.replace('\t', ' ' * tab_width) if not self.password: width = _label_cached.get_extents(text)[0] else: width = _label_cached.get_extents('*' * len(text))[0] Cache_append('textinput.width', cid, width) return width def _do_blink_cursor(self, dt): # Callback called by the timer to blink the cursor, according to the # last activity in the widget b = (Clock.get_time() - self._cursor_blink_time) self.cursor_blink = int(b * 2) % 2 def on_cursor(self, instance, value): # When the cursor is moved, reset the activity timer, and update all # the graphics. self._cursor_blink_time = Clock.get_time() self._trigger_update_graphics() def _delete_line(self, idx): # Delete current line, and fix cursor position assert(idx < len(self._lines)) self._lines_flags.pop(idx) self._lines_labels.pop(idx) self._lines.pop(idx) self.cursor = self.cursor def _set_line_text(self, line_num, text): # Set current line with other text than the default one. self._lines_labels[line_num] = self._create_line_label(text) self._lines[line_num] = text def _trigger_refresh_line_options(self, *largs): Clock.unschedule(self._refresh_line_options) Clock.schedule_once(self._refresh_line_options, 0) def _refresh_line_options(self, *largs): self._line_options = None self._get_line_options() self._refresh_text_from_property() self._refresh_hint_text() self.cursor = self.get_cursor_from_index(len(self.text)) def _trigger_refresh_text(self, *largs): if len(largs) and largs[0] == self: largs = () Clock.unschedule(lambda dt: self._refresh_text_from_property(*largs)) Clock.schedule_once(lambda dt: self._refresh_text_from_property(*largs)) def _update_text_options(self, *largs): Cache_remove('textinput.width') self._trigger_refresh_text() def _refresh_text_from_trigger(self, dt, *largs): self._refresh_text_from_property(*largs) def _refresh_text_from_property(self, *largs): self._refresh_text(self._get_text(encode=False), *largs) def _refresh_text(self, text, *largs): # Refresh all the lines from a new text. # By using cache in internal functions, this method should be fast. mode = 'all' if len(largs) > 1: mode, start, finish, _lines, _lines_flags, len_lines = largs #start = max(0, start) else: _lines, self._lines_flags = self._split_smart(text) _lines_labels = [] _line_rects = [] _create_label = self._create_line_label for x in _lines: lbl = _create_label(x) _lines_labels.append(lbl) _line_rects.append( Rectangle(size=(lbl.size if lbl else (0, 0)))) lbl = None if mode == 'all': self._lines_labels = _lines_labels self._lines_rects = _line_rects self._lines = _lines elif mode == 'del': if finish > start: self._insert_lines(start, finish if start == finish else (finish + 1), len_lines, _lines_flags, _lines, _lines_labels, _line_rects) elif mode == 'insert': self._insert_lines( start, finish if (start == finish and not len_lines) else (finish + 1), len_lines, _lines_flags, _lines, _lines_labels, _line_rects) line_label = _lines_labels[0] min_line_ht = self._label_cached.get_extents('_')[1] if line_label is None: self.line_height = max(1, min_line_ht) else: # with markup texture can be of height `1` self.line_height = max(line_label.height, min_line_ht) #self.line_spacing = 2 # now, if the text change, maybe the cursor is not at the same place as # before. so, try to set the cursor on the good place row = self.cursor_row self.cursor = self.get_cursor_from_index(self.cursor_index()) # if we back to a new line, reset the scroll, otherwise, the effect is # ugly if self.cursor_row != row: self.scroll_x = 0 # with the new text don't forget to update graphics again self._trigger_update_graphics() def _insert_lines(self, start, finish, len_lines, _lines_flags, _lines, _lines_labels, _line_rects): self_lines_flags = self._lines_flags _lins_flags = [] _lins_flags.extend(self_lines_flags[:start]) if len_lines: # if not inserting at first line then if start: # make sure line flags restored for first line # _split_smart assumes first line to be not a new line _lines_flags[0] = self_lines_flags[start] _lins_flags.extend(_lines_flags) _lins_flags.extend(self_lines_flags[finish:]) self._lines_flags = _lins_flags _lins_lbls = [] _lins_lbls.extend(self._lines_labels[:start]) if len_lines: _lins_lbls.extend(_lines_labels) _lins_lbls.extend(self._lines_labels[finish:]) self._lines_labels = _lins_lbls _lins_rcts = [] _lins_rcts.extend(self._lines_rects[:start]) if len_lines: _lins_rcts.extend(_line_rects) _lins_rcts.extend(self._lines_rects[finish:]) self._lines_rects = _lins_rcts _lins = [] _lins.extend(self._lines[:start]) if len_lines: _lins.extend(_lines) _lins.extend(self._lines[finish:]) self._lines = _lins def _trigger_update_graphics(self, *largs): Clock.unschedule(self._update_graphics) Clock.schedule_once(self._update_graphics, -1) def _update_graphics(self, *largs): # Update all the graphics according to the current internal values. # # This is a little bit complex, cause we have to : # - handle scroll_x # - handle padding # - create rectangle for the lines matching the viewport # - crop the texture coordinates to match the viewport # # This is the first step of graphics, the second is the selection. self.canvas.clear() add = self.canvas.add lh = self.line_height dy = lh + self.line_spacing # adjust view if the cursor is going outside the bounds sx = self.scroll_x sy = self.scroll_y # draw labels if not self.focus and (not self._lines or ( not self._lines[0] and len(self._lines) == 1)): rects = self._hint_text_rects labels = self._hint_text_labels lines = self._hint_text_lines else: rects = self._lines_rects labels = self._lines_labels lines = self._lines padding_left, padding_top, padding_right, padding_bottom = self.padding x = self.x + padding_left y = self.top - padding_top + sy miny = self.y + padding_bottom maxy = self.top - padding_top for line_num, value in enumerate(lines): if miny <= y <= maxy + dy: texture = labels[line_num] if not texture: y -= dy continue size = list(texture.size) texc = texture.tex_coords[:] # calcul coordinate viewport_pos = sx, 0 vw = self.width - padding_left - padding_right vh = self.height - padding_top - padding_bottom tw, th = list(map(float, size)) oh, ow = tch, tcw = texc[1:3] tcx, tcy = 0, 0 # adjust size/texcoord according to viewport if vw < tw: tcw = (vw / tw) * tcw size[0] = vw if vh < th: tch = (vh / th) * tch size[1] = vh if viewport_pos: tcx, tcy = viewport_pos tcx = tcx / tw * (ow) tcy = tcy / th * oh # cropping mlh = lh if y > maxy: vh = (maxy - y + lh) tch = (vh / float(lh)) * oh tcy = oh - tch size[1] = vh if y - lh < miny: diff = miny - (y - lh) y += diff vh = lh - diff tch = (vh / float(lh)) * oh size[1] = vh texc = ( tcx, tcy + tch, tcx + tcw, tcy + tch, tcx + tcw, tcy, tcx, tcy) # add rectangle. r = rects[line_num] r.pos = int(x), int(y - mlh) r.size = size r.texture = texture r.tex_coords = texc add(r) y -= dy self._update_graphics_selection() def _update_graphics_selection(self): if not self._selection: return self.canvas.remove_group('selection') dy = self.line_height + self.line_spacing rects = self._lines_rects padding_top = self.padding[1] padding_bottom = self.padding[3] _top = self.top y = _top - padding_top + self.scroll_y miny = self.y + padding_bottom maxy = _top - padding_top draw_selection = self._draw_selection a, b = self._selection_from, self._selection_to if a > b: a, b = b, a get_cursor_from_index = self.get_cursor_from_index s1c, s1r = get_cursor_from_index(a) s2c, s2r = get_cursor_from_index(b) s2r += 1 # pass only the selection lines[] # passing all the lines can get slow when dealing with a lot of text y -= s1r * dy _lines = self._lines _get_text_width = self._get_text_width tab_width = self.tab_width _label_cached = self._label_cached width = self.width padding_left = self.padding[0] padding_right = self.padding[2] x = self.x canvas_add = self.canvas.add selection_color = self.selection_color for line_num, value in enumerate(_lines[s1r:s2r], start=s1r): if miny <= y <= maxy + dy: r = rects[line_num] draw_selection(r.pos, r.size, line_num, (s1c, s1r), (s2c, s2r - 1), _lines, _get_text_width, tab_width, _label_cached, width, padding_left, padding_right, x, canvas_add, selection_color) y -= dy self._position_handles('both') def _draw_selection(self, *largs): pos, size, line_num, (s1c, s1r), (s2c, s2r),\ _lines, _get_text_width, tab_width, _label_cached, width,\ padding_left, padding_right, x, canvas_add, selection_color = largs # Draw the current selection on the widget. if line_num < s1r or line_num > s2r: return x, y = pos w, h = size x1 = x x2 = x + w if line_num == s1r: lines = _lines[line_num] x1 -= self.scroll_x x1 += _get_text_width(lines[:s1c], tab_width, _label_cached) if line_num == s2r: lines = _lines[line_num] x2 = (x - self.scroll_x) + _get_text_width(lines[:s2c], tab_width, _label_cached) width_minus_padding = width - (padding_right + padding_left) maxx = x + width_minus_padding if x1 > maxx: return x1 = max(x1, x) x2 = min(x2, x + width_minus_padding) canvas_add(Color(*selection_color, group='selection')) canvas_add(Rectangle( pos=(x1, pos[1]), size=(x2 - x1, size[1]), group='selection')) def on_size(self, instance, value): # if the size change, we might do invalid scrolling / text split # size the text maybe be put after size_hint have been resolved. self._trigger_refresh_text() self._refresh_hint_text() self.scroll_x = self.scroll_y = 0 def _get_cursor_pos(self): # return the current cursor x/y from the row/col dy = self.line_height + self.line_spacing padding_left = self.padding[0] padding_top = self.padding[1] left = self.x + padding_left top = self.top - padding_top y = top + self.scroll_y y -= self.cursor_row * dy x, y = left + self.cursor_offset() - self.scroll_x, y if x < left: self.scroll_x = 0 x = left if y > top: y = top self.scroll_y = 0 return x, y def _get_line_options(self): # Get or create line options, to be used for Label creation if self._line_options is None: self._line_options = kw = { 'font_size': self.font_size, 'font_name': self.font_name, 'anchor_x': 'left', 'anchor_y': 'top', 'padding_x': 0, 'padding_y': 0, 'padding': (0, 0)} self._label_cached = Label(**kw) return self._line_options def _create_line_label(self, text, hint=False): # Create a label from a text, using line options ntext = text.replace(u'\n', u'').replace(u'\t', u' ' * self.tab_width) if self.password and not hint: # Don't replace hint_text with * ntext = u'*' * len(ntext) kw = self._get_line_options() cid = '%s\0%s' % (ntext, str(kw)) texture = Cache_get('textinput.label', cid) if not texture: # FIXME right now, we can't render very long line... # if we move on "VBO" version as fallback, we won't need to # do this. try to found the maximum text we can handle label = None label_len = len(ntext) ld = None # check for blank line if not ntext: texture = Texture.create(size=(1, 1)) Cache_append('textinput.label', cid, texture) return texture while True: try: label = Label(text=ntext[:label_len], **kw) label.refresh() if ld is not None and ld > 2: ld = int(ld / 2) label_len += ld else: break except: # exception happen when we tried to render the text # reduce it... if ld is None: ld = len(ntext) ld = int(ld / 2) if ld < 2 and label_len: label_len -= 1 label_len -= ld continue # ok, we found it. texture = label.texture Cache_append('textinput.label', cid, texture) return texture def _tokenize(self, text): # Tokenize a text string from some delimiters if text is None: return delimiters = u' ,\'".;:\n\r\t' oldindex = 0 for index, char in enumerate(text): if char not in delimiters: continue if oldindex != index: yield text[oldindex:index] yield text[index:index + 1] oldindex = index + 1 yield text[oldindex:] def _split_smart(self, text): # Do a "smart" split. If autowidth or autosize is set, # we are not doing smart split, just a split on line break. # Otherwise, we are trying to split as soon as possible, to prevent # overflow on the widget. # depend of the options, split the text on line, or word if not self.multiline: lines = text.split(u'\n') lines_flags = [0] + [FL_IS_NEWLINE] * (len(lines) - 1) return lines, lines_flags # no autosize, do wordwrap. x = flags = 0 line = [] lines = [] lines_flags = [] _join = u''.join lines_append, lines_flags_append = lines.append, lines_flags.append padding_left = self.padding[0] padding_right = self.padding[2] width = self.width - padding_left - padding_right text_width = self._get_text_width _tab_width, _label_cached = self.tab_width, self._label_cached # try to add each word on current line. for word in self._tokenize(text): is_newline = (word == u'\n') w = text_width(word, _tab_width, _label_cached) # if we have more than the width, or if it's a newline, # push the current line, and create a new one if (x + w > width and line) or is_newline: lines_append(_join(line)) lines_flags_append(flags) flags = 0 line = [] x = 0 if is_newline: flags |= FL_IS_NEWLINE else: x += w line.append(word) if line or flags & FL_IS_NEWLINE: lines_append(_join(line)) lines_flags_append(flags) return lines, lines_flags def _key_down(self, key, repeat=False): displayed_str, internal_str, internal_action, scale = key if internal_action is None: if self._selection: self.delete_selection() self.insert_text(displayed_str) elif internal_action in ('shift', 'shift_L', 'shift_R'): if not self._selection: self._selection_from = self._selection_to = self.cursor_index() self._selection = True self._selection_finished = False elif internal_action.startswith('cursor_'): cc, cr = self.cursor self.do_cursor_movement(internal_action) if self._selection and not self._selection_finished: self._selection_to = self.cursor_index() self._update_selection() else: self.cancel_selection() elif self._selection and internal_action in ('del', 'backspace'): self.delete_selection() elif internal_action == 'del': # Move cursor one char to the right. If that was successful, # do a backspace (effectively deleting char right of cursor) cursor = self.cursor self.do_cursor_movement('cursor_right') if cursor != self.cursor: self.do_backspace(mode='del') elif internal_action == 'backspace': self.do_backspace() elif internal_action == 'enter': if self.multiline: self.insert_text(u'\n') else: self.dispatch('on_text_validate') self.focus = False elif internal_action == 'escape': self.focus = False if internal_action != 'escape': #self._recalc_size() pass def _key_up(self, key, repeat=False): displayed_str, internal_str, internal_action, scale = key if internal_action in ('shift', 'shift_L', 'shift_R'): if self._selection: self._update_selection(True) def _keyboard_on_key_down(self, window, keycode, text, modifiers): # Keycodes on OSX: ctrl, cmd = 64, 1024 key, key_str = keycode # This allows *either* ctrl *or* cmd, but not both. is_shortcut = (modifiers == ['ctrl'] or ( _is_osx and modifiers == ['meta'])) is_interesting_key = key in (list(self.interesting_keys.keys()) + [27]) if not self._editable: # duplicated but faster testing for non-editable keys if text and not is_interesting_key: if is_shortcut and key == ord('c'): self._copy(self.selection_text) elif key == 27: self.focus = False return True if text and not is_interesting_key: self._hide_handles(self._win) self._hide_cut_copy_paste() self._win.remove_widget(self._handle_middle) if is_shortcut: if key == ord('x'): # cut selection self._cut(self.selection_text) elif key == ord('c'): # copy selection self._copy(self.selection_text) elif key == ord('v'): # paste selection self._paste() elif key == ord('a'): # select all self.select_all() elif key == ord('z'): # undo self.do_undo() elif key == ord('r'): # redo self.do_redo() else: if self._selection: self.delete_selection() self.insert_text(text) #self._recalc_size() return if key == 27: # escape self.focus = False return True elif key == 9: # tab self.insert_text(u'\t') return True k = self.interesting_keys.get(key) if k: key = (None, None, k, 1) self._key_down(key) def _keyboard_on_key_up(self, window, keycode): key, key_str = keycode k = self.interesting_keys.get(key) if k: key = (None, None, k, 1) self._key_up(key) def on_hint_text(self, instance, value): self._refresh_hint_text() def _refresh_hint_text(self): _lines, self._hint_text_flags = self._split_smart(self.hint_text) _hint_text_labels = [] _hint_text_rects = [] _create_label = self._create_line_label for x in _lines: lbl = _create_label(x, hint=True) _hint_text_labels.append(lbl) _hint_text_rects.append( Rectangle(size=(lbl.size if lbl else (0, 0)))) lbl = None self._hint_text_lines = _lines self._hint_text_labels = _hint_text_labels self._hint_text_rects = _hint_text_rects # Remember to update graphics self._trigger_update_graphics() # # Properties # _lines = ListProperty([]) _hint_text_lines = ListProperty([]) _editable = BooleanProperty(True) readonly = BooleanProperty(False) '''If True, the user will not be able to change the content of a textinput. .. versionadded:: 1.3.0 :attr:`readonly` is a :class:`~kivy.properties.BooleanProperty` and defaults to False. ''' multiline = BooleanProperty(True) '''If True, the widget will be able show multiple lines of text. If False, the "enter" keypress will defocus the textinput instead of adding a new line. :attr:`multiline` is a :class:`~kivy.properties.BooleanProperty` and defaults to True. ''' password = BooleanProperty(False) '''If True, the widget will display its characters as the character '*'. .. versionadded:: 1.2.0 :attr:`password` is a :class:`~kivy.properties.BooleanProperty` and defaults to False. ''' keyboard_suggestions = BooleanProperty(True) '''If True provides auto suggestions on top of keyboard. This will only work if :attr:`input_type` is set to `text`. .. versionadded:: 1.8.0 :attr:`keyboard_suggestions` is a :class:`~kivy.properties.BooleanProperty` defaults to True. ''' cursor_blink = BooleanProperty(False) '''This property is used to blink the cursor graphic. The value of :attr:`cursor_blink` is automatically computed. Setting a value on it will have no impact. :attr:`cursor_blink` is a :class:`~kivy.properties.BooleanProperty` and defaults to False. ''' def _get_cursor(self): return self._cursor def _set_cursor(self, pos): if not self._lines: self._trigger_refresh_text() return l = self._lines cr = boundary(pos[1], 0, len(l) - 1) cc = boundary(pos[0], 0, len(l[cr])) cursor = cc, cr if self._cursor == cursor: return self._cursor = cursor # adjust scrollview to ensure that the cursor will be always inside our # viewport. padding_left = self.padding[0] padding_right = self.padding[2] viewport_width = self.width - padding_left - padding_right sx = self.scroll_x offset = self.cursor_offset() # if offset is outside the current bounds, reajust if offset > viewport_width + sx: self.scroll_x = offset - viewport_width if offset < sx: self.scroll_x = offset # do the same for Y # this algo try to center the cursor as much as possible dy = self.line_height + self.line_spacing offsety = cr * dy sy = self.scroll_y padding_top = self.padding[1] padding_bottom = self.padding[3] viewport_height = self.height - padding_top - padding_bottom - dy if offsety > viewport_height + sy: sy = offsety - viewport_height if offsety < sy: sy = offsety self.scroll_y = sy return True cursor = AliasProperty(_get_cursor, _set_cursor) '''Tuple of (row, col) values indicating the current cursor position. You can set a new (row, col) if you want to move the cursor. The scrolling area will be automatically updated to ensure that the cursor is visible inside the viewport. :attr:`cursor` is an :class:`~kivy.properties.AliasProperty`. ''' def _get_cursor_col(self): return self._cursor[0] cursor_col = AliasProperty(_get_cursor_col, None, bind=('cursor', )) '''Current column of the cursor. :attr:`cursor_col` is an :class:`~kivy.properties.AliasProperty` to cursor[0], read-only. ''' def _get_cursor_row(self): return self._cursor[1] cursor_row = AliasProperty(_get_cursor_row, None, bind=('cursor', )) '''Current row of the cursor. :attr:`cursor_row` is an :class:`~kivy.properties.AliasProperty` to cursor[1], read-only. ''' cursor_pos = AliasProperty(_get_cursor_pos, None, bind=( 'cursor', 'padding', 'pos', 'size', 'focus', 'scroll_x', 'scroll_y')) '''Current position of the cursor, in (x, y). :attr:`cursor_pos` is an :class:`~kivy.properties.AliasProperty`, read-only. ''' cursor_color = ListProperty([1, 0, 0, 1]) '''Current color of the cursor, in (r, g, b, a) format. .. versionadded:: 1.8.1 :attr:`cursor_color` is a :class:`~kivy.properties.ListProperty` and defaults to [1, 0, 0, 1]. ''' line_height = NumericProperty(1) '''Height of a line. This property is automatically computed from the :attr:`font_name`, :attr:`font_size`. Changing the line_height will have no impact. .. note:: :attr:`line_height` is the height of a single line of text. Use :attr:`minimum_height`, which also includes padding, to get the height required to display the text properly. :attr:`line_height` is a :class:`~kivy.properties.NumericProperty`, read-only. ''' tab_width = NumericProperty(4) '''By default, each tab will be replaced by four spaces on the text input widget. You can set a lower or higher value. :attr:`tab_width` is a :class:`~kivy.properties.NumericProperty` and defaults to 4. ''' padding_x = VariableListProperty([0, 0], length=2) '''Horizontal padding of the text: [padding_left, padding_right]. padding_x also accepts a one argument form [padding_horizontal]. :attr:`padding_x` is a :class:`~kivy.properties.VariableListProperty` and defaults to [0, 0]. This might be changed by the current theme. .. deprecated:: 1.7.0 Use :attr:`padding` instead. ''' def on_padding_x(self, instance, value): self.padding[0] = value[0] self.padding[2] = value[1] padding_y = VariableListProperty([0, 0], length=2) '''Vertical padding of the text: [padding_top, padding_bottom]. padding_y also accepts a one argument form [padding_vertical]. :attr:`padding_y` is a :class:`~kivy.properties.VariableListProperty` and defaults to [0, 0]. This might be changed by the current theme. .. deprecated:: 1.7.0 Use :attr:`padding` instead. ''' def on_padding_y(self, instance, value): self.padding[1] = value[0] self.padding[3] = value[1] padding = VariableListProperty([6, 6, 6, 6]) '''Padding of the text: [padding_left, padding_top, padding_right, padding_bottom]. padding also accepts a two argument form [padding_horizontal, padding_vertical] and a one argument form [padding]. .. versionchanged:: 1.7.0 Replaced AliasProperty with VariableListProperty. :attr:`padding` is a :class:`~kivy.properties.VariableListProperty` and defaults to [6, 6, 6, 6]. ''' scroll_x = NumericProperty(0) '''X scrolling value of the viewport. The scrolling is automatically updated when the cursor is moved or text changed. If there is no user input, the scroll_x and scroll_y properties may be changed. :attr:`scroll_x` is a :class:`~kivy.properties.NumericProperty` and defaults to 0. ''' scroll_y = NumericProperty(0) '''Y scrolling value of the viewport. See :attr:`scroll_x` for more information. :attr:`scroll_y` is a :class:`~kivy.properties.NumericProperty` and defaults to 0. ''' selection_color = ListProperty([0.1843, 0.6549, 0.8313, .5]) '''Current color of the selection, in (r, g, b, a) format. .. warning:: The color should always have an "alpha" component less than 1 since the selection is drawn after the text. :attr:`selection_color` is a :class:`~kivy.properties.ListProperty` and defaults to [0.1843, 0.6549, 0.8313, .5]. ''' border = ListProperty([16, 16, 16, 16]) '''Border used for :class:`~kivy.graphics.vertex_instructions.BorderImage` graphics instruction. Used with :attr:`background_normal` and :attr:`background_active`. Can be used for a custom background. .. versionadded:: 1.4.1 It must be a list of four values: (top, right, bottom, left). Read the BorderImage instruction for more information about how to use it. :attr:`border` is a :class:`~kivy.properties.ListProperty` and defaults to (16, 16, 16, 16). ''' background_normal = StringProperty( 'atlas://data/images/defaulttheme/textinput') '''Background image of the TextInput when it's not in focus. .. versionadded:: 1.4.1 :attr:`background_normal` is a :class:`~kivy.properties.StringProperty` and defaults to 'atlas://data/images/defaulttheme/textinput'. ''' background_disabled_normal = StringProperty( 'atlas://data/images/defaulttheme/textinput_disabled') '''Background image of the TextInput when disabled. .. versionadded:: 1.8.0 :attr:`background_disabled_normal` is a :class:`~kivy.properties.StringProperty` and defaults to 'atlas://data/images/defaulttheme/textinput_disabled'. ''' background_active = StringProperty( 'atlas://data/images/defaulttheme/textinput_active') '''Background image of the TextInput when it's in focus. .. versionadded:: 1.4.1 :attr:`background_active` is a :class:`~kivy.properties.StringProperty` and defaults to 'atlas://data/images/defaulttheme/textinput_active'. ''' background_disabled_active = StringProperty( 'atlas://data/images/defaulttheme/textinput_disabled_active') '''Background image of the TextInput when it's in focus and disabled. .. versionadded:: 1.8.0 :attr:`background_disabled_active` is a :class:`~kivy.properties.StringProperty` and defaults to 'atlas://data/images/defaulttheme/textinput_disabled_active'. ''' background_color = ListProperty([1, 1, 1, 1]) '''Current color of the background, in (r, g, b, a) format. .. versionadded:: 1.2.0 :attr:`background_color` is a :class:`~kivy.properties.ListProperty` and defaults to [1, 1, 1, 1] (white). ''' foreground_color = ListProperty([0, 0, 0, 1]) '''Current color of the foreground, in (r, g, b, a) format. .. versionadded:: 1.2.0 :attr:`foreground_color` is a :class:`~kivy.properties.ListProperty` and defaults to [0, 0, 0, 1] (black). ''' disabled_foreground_color = ListProperty([0, 0, 0, .5]) '''Current color of the foreground when disabled, in (r, g, b, a) format. .. versionadded:: 1.8.0 :attr:`disabled_foreground_color` is a :class:`~kivy.properties.ListProperty` and defaults to [0, 0, 0, 5] (50% transparent black). ''' use_bubble = BooleanProperty(not _is_desktop) '''Indicates whether the cut/copy/paste bubble is used. .. versionadded:: 1.7.0 :attr:`use_bubble` is a :class:`~kivy.properties.BooleanProperty` and defaults to True on mobile OS's, False on desktop OS's. ''' use_handles = BooleanProperty(not _is_desktop) '''Indicates whether the selection handles are displayed. .. versionadded:: 1.8.0 :attr:`use_handles` is a :class:`~kivy.properties.BooleanProperty` and defaults to True on mobile OS's, False on desktop OS's. ''' def get_sel_from(self): return self._selection_from selection_from = AliasProperty(get_sel_from, None) '''If a selection is in progress or complete, this property will represent the cursor index where the selection started. .. versionchanged:: 1.4.0 :attr:`selection_from` is an :class:`~kivy.properties.AliasProperty` and defaults to None, readonly. ''' def get_sel_to(self): return self._selection_to selection_to = AliasProperty(get_sel_to, None) '''If a selection is in progress or complete, this property will represent the cursor index where the selection started. .. versionchanged:: 1.4.0 :attr:`selection_to` is an :class:`~kivy.properties.AliasProperty` and defaults to None, readonly. ''' selection_text = StringProperty(u'') '''Current content selection. :attr:`selection_text` is a :class:`~kivy.properties.StringProperty` and defaults to '', readonly. ''' def on_selection_text(self, instance, value): if value and self.use_handles: self._trigger_show_handles() focus = BooleanProperty(False) '''If focus is True, the keyboard will be requested and you can start entering text into the textinput. :attr:`focus` is a :class:`~kivy.properties.BooleanProperty` and defaults to False. .. Note:: Selection is cancelled when TextInput is focused. If you need to show selection when TextInput is focused, you should delay (use Clock.schedule) the call to the functions for selecting text (select_all, select_text). ''' def _get_text(self, encode=True): lf = self._lines_flags l = self._lines len_l = len(l) if len(lf) < len_l: lf.append(1) text = u''.join([(u'\n' if (lf[i] & FL_IS_NEWLINE) else u'') + l[i] for i in range(len_l)]) if PY2 and encode and type(text) is not str: text = text.encode('utf-8') return text def _set_text(self, text): if PY2 and type(text) is str: text = text.decode('utf-8') if self._get_text(encode=False) == text: return self._refresh_text(text) self.cursor = self.get_cursor_from_index(len(text)) text = AliasProperty(_get_text, _set_text, bind=('_lines', )) '''Text of the widget. Creation of a simple hello world:: widget = TextInput(text='Hello world') If you want to create the widget with an unicode string, use:: widget = TextInput(text=u'My unicode string') :attr:`text` a :class:`~kivy.properties.StringProperty`. ''' font_name = StringProperty('DroidSans') '''Filename of the font to use. The path can be absolute or relative. Relative paths are resolved by the :func:`~kivy.resources.resource_find` function. .. warning:: Depending on your text provider, the font file may be ignored. However, you can mostly use this without problems. If the font used lacks the glyphs for the particular language/symbols you are using, you will see '[]' blank box characters instead of the actual glyphs. The solution is to use a font that has the glyphs you need to display. For example, to display |unicodechar|, use a font like freesans.ttf that has the glyph. .. |unicodechar| image:: images/unicode-char.png :attr:`font_name` is a :class:`~kivy.properties.StringProperty` and defaults to 'DroidSans'. ''' font_size = NumericProperty('15sp') '''Font size of the text in pixels. :attr:`font_size` is a :class:`~kivy.properties.NumericProperty` and defaults to 10. ''' hint_text = StringProperty('') '''Hint text of the widget. Shown if text is '' and focus is False. .. versionadded:: 1.6.0 :attr:`hint_text` a :class:`~kivy.properties.StringProperty` and defaults to ''. ''' hint_text_color = ListProperty([0.5, 0.5, 0.5, 1.0]) '''Current color of the hint_text text, in (r, g, b, a) format. .. versionadded:: 1.6.0 :attr:`hint_text_color` is a :class:`~kivy.properties.ListProperty` and defaults to [0.5, 0.5, 0.5, 1.0] (grey). ''' auto_indent = BooleanProperty(False) '''Automatically indent multiline text. .. versionadded:: 1.7.0 :attr:`auto_indent` is a :class:`~kivy.properties.BooleanProperty` and defaults to False. ''' allow_copy = BooleanProperty(True) '''Decides whether to allow copying the text. .. versionadded:: 1.8.0 :attr:`allow_copy` is a :class:`~kivy.properties.BooleanProperty` and defaults to True. ''' def _get_min_height(self): return (len(self._lines) * (self.line_height + self.line_spacing) + self.padding[0] + self.padding[2]) minimum_height = AliasProperty(_get_min_height, None, bind=('_lines', 'line_spacing', 'padding', 'font_size', 'font_name', 'password', 'hint_text')) '''Minimum height of the content inside the TextInput. .. versionadded:: 1.8.0 :attr:`minimum_height` is a readonly :class:`~kivy.properties.AliasProperty`. ''' line_spacing = NumericProperty(0) '''Space taken up between the lines. .. versionadded:: 1.8.0 :attr:`line_spacing` is a :class:`~kivy.properties.NumericProperty` and defaults to 0. ''' input_type = OptionProperty('text', options=('text', 'number', 'url', 'mail', 'datetime', 'tel', 'address')) '''The kind of input, keyboard to request .. versionadded:: 1.8.0 :attr:`input_type` is an :class:`~kivy.properties.OptionsProperty` and defaults to 'text'. Can be one of 'text', 'number', 'url', 'mail', 'datetime', 'tel', 'address'. ''' handle_image_middle = StringProperty( 'atlas://data/images/defaulttheme/selector_middle') '''Image used to display the middle handle on the TextInput for cursor positioning. .. versionadded:: 1.8.0 :attr:`handle_image_middle` is a :class:`~kivy.properties.StringProperty` and defaults to 'atlas://data/images/defaulttheme/selector_middle'. ''' def on_handle_image_middle(self, instance, value): if self._handle_middle: self._handle_middle.source = value handle_image_left = StringProperty( 'atlas://data/images/defaulttheme/selector_left') '''Image used to display the Left handle on the TextInput for selection. .. versionadded:: 1.8.0 :attr:`handle_image_left` is a :class:`~kivy.properties.StringProperty` and defaults to 'atlas://data/images/defaulttheme/selector_left'. ''' def on_handle_image_left(self, instance, value): if self._handle_left: self._handle_left.source = value handle_image_right = StringProperty( 'atlas://data/images/defaulttheme/selector_right') '''Image used to display the Right handle on the TextInput for selection. .. versionadded:: 1.8.0 :attr:`handle_image_right` is a :class:`~kivy.properties.StringProperty` and defaults to 'atlas://data/images/defaulttheme/selector_right'. ''' def on_handle_image_right(self, instance, value): if self._handle_right: self._handle_right.source = value keyboard_mode = OptionProperty('auto', options=('auto', 'managed')) '''How the keyboard visibility should be managed (auto will have standard behaviour to show/hide on focus, managed requires setting keyboard_visible manually, or calling the helper functions ``show_keyboard()`` and ``hide_keyboard()``. .. versionadded:: 1.8.0 :attr:`keyboard_mode` is an :class:`~kivy.properties.OptionsProperty` and defaults to 'auto'. Can be one of 'auto' or 'managed'. ''' def show_keyboard(self): """ Convenience function to show the keyboard in managed mode """ if self.keyboard_mode == "managed": self._bind_keyboard() def hide_keyboard(self): """ Convenience function to hide the keyboard in managed mode """ if self.keyboard_mode == "managed": self._unbind_keyboard() if __name__ == '__main__': from kivy.app import App from kivy.uix.boxlayout import BoxLayout class TextInputApp(App): def build(self): root = BoxLayout(orientation='vertical') textinput = TextInput(multiline=True, use_bubble=True, use_handles=True) textinput.text = __doc__ root.add_widget(textinput) textinput2 = TextInput(multiline=False, text='monoline textinput', size_hint=(1, None), height=30) root.add_widget(textinput2) return root TextInputApp().run()
mit
-7,725,666,435,607,477,000
33.735593
80
0.553452
false
4.001519
false
false
false
ninepints/hootvetica
food/management/commands/__init__.py
1
2086
import uuid from django.db.models import Q from food.models import Category, Item, WeeklyClosure, OneTimeClosure def get_closures(current_date): weekly_closures = set( vals['location_id'] for vals in WeeklyClosure.objects.filter( weekday=current_date.weekday()) .values('location_id')) onetime_closures = set( vals['location_id'] for vals in OneTimeClosure.objects.filter( start_date__lte=current_date, end_date__gte=current_date) .values('location_id')) return (weekly_closures, onetime_closures) def open_locations(locations, current_time): count = locations.update(open=True, last_modified=current_time) Item.objects.filter(parent__parent__in=locations).update( status='AVA', last_modified=current_time) do_hardcoded_menu_insertions(locations, current_time) return count def close_locations(locations, current_time): count = locations.update(open=False, message='', last_modified=current_time) do_hardcoded_menu_deletions(locations) return count def do_hardcoded_menu_insertions(locations, current_time): if current_time.weekday() == 6: Item.objects.filter(parent__name='Chicken').update(status='OUT') Item.objects.bulk_create( [Item( uid=uuid.uuid4().hex, parent=cat, name='HBCB', status='AVA', last_modified=current_time) for cat in Category.objects.filter( name='Chicken', parent__in=locations)]) Item.objects.bulk_create( [Item( uid=uuid.uuid4().hex, parent=cat, name='Specialty', status='AVA', last_modified=current_time) for cat in Category.objects.filter( name='Pizza', parent__in=locations)]) def do_hardcoded_menu_deletions(locations): Item.objects.filter(parent__parent__in=locations).filter( Q(name='HBCB') | Q(name='Specialty')).delete()
mit
2,817,346,170,367,118,300
36.927273
80
0.610738
false
3.827523
false
false
false
openstack/python-openstackclient
openstackclient/common/sdk_utils.py
2
2358
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. def get_osc_show_columns_for_sdk_resource( sdk_resource, osc_column_map, invisible_columns=None ): """Get and filter the display and attribute columns for an SDK resource. Common utility function for preparing the output of an OSC show command. Some of the columns may need to get renamed, others made invisible. :param sdk_resource: An SDK resource :param osc_column_map: A hash of mappings for display column names :param invisible_columns: A list of invisible column names :returns: Two tuples containing the names of the display and attribute columns """ if getattr(sdk_resource, 'allow_get', None) is not None: resource_dict = sdk_resource.to_dict( body=True, headers=False, ignore_none=False) else: resource_dict = sdk_resource # Build the OSC column names to display for the SDK resource. attr_map = {} display_columns = list(resource_dict.keys()) invisible_columns = [] if invisible_columns is None else invisible_columns for col_name in invisible_columns: if col_name in display_columns: display_columns.remove(col_name) for sdk_attr, osc_attr in osc_column_map.items(): if sdk_attr in display_columns: attr_map[osc_attr] = sdk_attr display_columns.remove(sdk_attr) if osc_attr not in display_columns: display_columns.append(osc_attr) sorted_display_columns = sorted(display_columns) # Build the SDK attribute names for the OSC column names. attr_columns = [] for column in sorted_display_columns: new_column = attr_map[column] if column in attr_map else column attr_columns.append(new_column) return tuple(sorted_display_columns), tuple(attr_columns)
apache-2.0
-8,352,966,954,439,167,000
39.655172
78
0.692536
false
4.003396
false
false
false
mishbahr/django-connected
connected_accounts/providers/instagram.py
1
1535
from django.utils.translation import ugettext_lazy as _ from connected_accounts.conf import settings from connected_accounts.provider_pool import providers from .base import OAuth2Provider, ProviderAccount class InstagramAccount(ProviderAccount): PROFILE_URL = 'http://instagram.com/' def get_profile_url(self): return self.PROFILE_URL + self.account.extra_data.get('username', '') def get_avatar_url(self): return self.account.extra_data.get('profile_picture') def to_str(self): default = super(InstagramAccount, self).to_str() return self.account.extra_data.get('username', default) def extract_common_fields(self): data = self.account.extra_data return dict(username=data.get('username'), name=data.get('full_name')) class InstagramProvider(OAuth2Provider): id = 'instagram' name = _('Instagram') account_class = InstagramAccount access_token_url = 'https://api.instagram.com/oauth/access_token' authorization_url = 'https://api.instagram.com/oauth/authorize' profile_url = 'https://api.instagram.com/v1/users/self' consumer_key = settings.CONNECTED_ACCOUNTS_INSTAGRAM_CONSUMER_KEY consumer_secret = settings.CONNECTED_ACCOUNTS_INSTAGRAM_CONSUMER_SECRET scope = settings.CONNECTED_ACCOUNTS_INSTAGRAM_SCOPE def extract_uid(self, data): return str(data['data']['id']) def extract_extra_data(self, data): return data.get('data', {}) providers.register(InstagramProvider)
bsd-3-clause
-4,676,531,674,048,964,000
30.979167
77
0.700326
false
3.681055
false
false
false
sunset1995/py360convert
py360convert/c2e.py
1
1865
import numpy as np from . import utils def c2e(cubemap, h, w, mode='bilinear', cube_format='dice'): if mode == 'bilinear': order = 1 elif mode == 'nearest': order = 0 else: raise NotImplementedError('unknown mode') if cube_format == 'horizon': pass elif cube_format == 'list': cubemap = utils.cube_list2h(cubemap) elif cube_format == 'dict': cubemap = utils.cube_dict2h(cubemap) elif cube_format == 'dice': cubemap = utils.cube_dice2h(cubemap) else: raise NotImplementedError('unknown cube_format') assert len(cubemap.shape) == 3 assert cubemap.shape[0] * 6 == cubemap.shape[1] assert w % 8 == 0 face_w = cubemap.shape[0] uv = utils.equirect_uvgrid(h, w) u, v = np.split(uv, 2, axis=-1) u = u[..., 0] v = v[..., 0] cube_faces = np.stack(np.split(cubemap, 6, 1), 0) # Get face id to each pixel: 0F 1R 2B 3L 4U 5D tp = utils.equirect_facetype(h, w) coor_x = np.zeros((h, w)) coor_y = np.zeros((h, w)) for i in range(4): mask = (tp == i) coor_x[mask] = 0.5 * np.tan(u[mask] - np.pi * i / 2) coor_y[mask] = -0.5 * np.tan(v[mask]) / np.cos(u[mask] - np.pi * i / 2) mask = (tp == 4) c = 0.5 * np.tan(np.pi / 2 - v[mask]) coor_x[mask] = c * np.sin(u[mask]) coor_y[mask] = c * np.cos(u[mask]) mask = (tp == 5) c = 0.5 * np.tan(np.pi / 2 - np.abs(v[mask])) coor_x[mask] = c * np.sin(u[mask]) coor_y[mask] = -c * np.cos(u[mask]) # Final renormalize coor_x = (np.clip(coor_x, -0.5, 0.5) + 0.5) * face_w coor_y = (np.clip(coor_y, -0.5, 0.5) + 0.5) * face_w equirec = np.stack([ utils.sample_cubefaces(cube_faces[..., i], tp, coor_y, coor_x, order=order) for i in range(cube_faces.shape[3]) ], axis=-1) return equirec
mit
-4,961,470,892,452,482,000
28.140625
83
0.538338
false
2.601116
false
false
false
mad-lab/transit
src/pytransit/draw_trash.py
1
12680
# Copyright 2015. # Michael A. DeJesus, Chaitra Ambadipudi, and Thomas R. Ioerger. # # # This file is part of TRANSIT. # # TRANSIT is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License. # # # TRANSIT is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with TRANSIT. If not, see <http://www.gnu.org/licenses/>. import pytransit.view_trash as view_trash from math import * import os import platform import numpy from PIL import Image, ImageDraw, ImageFont def normalize(X, old_min, old_max, new_min, new_max): old_range = (old_max - old_min) new_range = (new_max - new_min) if old_range == 0: return new_min else: return (((X - old_min) * new_range) / old_range) + new_min linuxFonts = [] linuxFonts.append("/usr/share/fonts/truetype/ttf-dejavu/DejaVuSans-Bold.ttf") linuxFonts.append("/usr/share/fonts/dejavu-lgc/DejaVuLGCSerifCondensed-Bold.ttf") linuxFonts.append("/usr/share/fonts/dejavu-lgc/DejaVuLGCSansCondensed-Bold.ttf") winFonts = [] winFonts.append("consolab.ttf") winFonts.append("courb.ttf") winFonts.append("arial.ttf") fontsize = 16 font = ImageFont.load_default() if platform.system() == "Linux": for fontpath in linuxFonts: if os.path.isfile(fontpath): font = ImageFont.truetype(fontpath, fontsize) break elif platform.system() == "Windows": for fontpath in winFonts: try: font = ImageFont.truetype(fontpath, fontsize) break except: pass def draw_reads(draw, reads, ta_sites, start_x=0, start_y=0, width=400, height=100, start=0, end=500, min_read=0, max_read=500, lwd=2): TRUNC_READS = [min(rd, max_read) for rd in reads] NORM_READS = [normalize(rd, 0, max_read, 0, max_read) for rd in TRUNC_READS] new_min_w = start_x new_max_w = start_x + width #- self.padding_r new_min_h = start_y new_max_h = start_y + height for i,TA in enumerate(ta_sites): TApos = normalize(TA, start, end, new_min_w, new_max_w) if NORM_READS[i] == 0: continue read_h = normalize(NORM_READS[i], 0, max_read, new_min_h, new_max_h) # height of read line Y1 = start_y Y2 = start_y - (read_h-start_y) draw.line([(TApos, Y1), (TApos, Y2)], width=lwd, fill=(255,0,0)) def draw_ta_sites(draw, ta_sites, start_x=0, start_y=0, width=200, height=0, start=0, end=500, lwd=2): new_min_w = start_x new_max_w = start_x + width #- self.padding_r for i,TA in enumerate(ta_sites): TApos = normalize(TA, start, end, new_min_w, new_max_w) draw.line([(TApos, start_y+0), (TApos, start_y + height)], width=lwd, fill="black") def draw_scale(draw, start_x, start_y, height, max_read): #print("scale", start_x, start_y, height) MIDREAD = int(max_read/2.0) top_text_w, top_text_h = draw.textsize(str(max_read), font=font) draw.text((start_x, start_y), str(max_read), font=font, fill="black") draw.text((start_x, start_y + height/2.0), str(MIDREAD), font=font, fill="black") bottom_text_w, bottom_text_h = draw.textsize(str(MIDREAD), font=font) draw.text((start_x+bottom_text_w-(top_text_w/2.0), start_y+height), "0", font=font, fill="black") def draw_features(draw, GENES, orf2data, start, end, start_x, start_y, width, height): padding_h = 3 text_w, text_h = draw.textsize("RV0001", font=font) gene_h = height - text_h triangle_size = 10 for gene in GENES: if gene not in orf2data: continue gene_start = orf2data[gene][2] gene_end = orf2data[gene][3] strand = orf2data[gene][4] name = orf2data[gene][0] new_min = start_x new_max = start_x + width norm_start = normalize(max(gene_start, start), start, end, new_min, new_max) norm_end = normalize(min(gene_end, end), start, end, new_min, new_max) color = "gray" if gene.startswith("ES-"): color = "red" elif gene.startswith("GD-"): color = "yellow" elif gene.startswith("NE-"): color = "blue" elif gene.startswith("GA-"): color = "green" if strand == "-": if gene_start >= start: draw.rectangle(((norm_start, start_y+5),(norm_end,start_y+gene_h-5)), fill=color) else: draw.rectangle(((norm_start, start_y+5),(norm_end,start_y+gene_h-5)), fill=color) else: if gene_end <= end: draw.rectangle(((norm_start, start_y+5),(norm_end, start_y+gene_h-5)), fill=color) else: draw.rectangle(((norm_start, start_y+5),(norm_end, start_y+gene_h-5)), fill=color) if name == "-": name = gene if not name.startswith("non-coding"): name_text_w, name_text_h = draw.textsize(name, font=font) if abs(norm_start-norm_end) >= name_text_w: draw.text(( norm_start + (abs(norm_start-norm_end) - name_text_w)/2.0 , start_y+gene_h+text_h), name, font=font, fill="black") def draw_genes(draw, GENES, orf2data, start, end, start_x, start_y, width, height, doTriangle=True): padding_h = 3 text_w, text_h = draw.textsize("RV0001", font=font) gene_h = height - text_h triangle_size = 10 if not doTriangle: triangle_size = 0 for gene in GENES: if gene not in orf2data: continue gene_start = orf2data[gene][2] gene_end = orf2data[gene][3] strand = orf2data[gene][4] name = orf2data[gene][0] new_min = start_x new_max = start_x + width norm_start = normalize(max(gene_start, start), start, end, new_min, new_max) norm_end = normalize(min(gene_end, end), start, end, new_min, new_max) if strand == "-": if gene_start >= start: draw.rectangle(((norm_start+triangle_size, start_y+5),(norm_end,start_y+gene_h-5)), fill="blue") if doTriangle: draw.polygon([(norm_start+triangle_size, start_y),(norm_start+triangle_size,start_y+gene_h), (norm_start,start_y+gene_h/2.0)], fill="blue" ) else: draw.rectangle(((norm_start, start_y+5),(norm_end,start_y+gene_h-5)), fill="blue") else: if gene_end <= end: draw.rectangle(((norm_start, start_y+5),(norm_end-triangle_size, start_y+gene_h-5)), fill="blue") if doTriangle: draw.polygon([(norm_end-triangle_size, start_y),(norm_end-triangle_size,start_y+gene_h), (norm_end,start_y+gene_h/2.0)], fill="blue" ) else: draw.rectangle(((norm_start, start_y+5),(norm_end, start_y+gene_h-5)), fill="blue") if name == "-": name = gene if not name.startswith("non-coding"): name_text_w, name_text_h = draw.textsize(name, font=font) if abs(norm_start-norm_end) >= name_text_w: draw.text(( norm_start + (abs(norm_start-norm_end) - name_text_w)/2.0 , start_y+gene_h+text_h), name, font=font, fill="black") def get_dynamic_height(N): #Set rest of heights and widths read_h = 100 gene_h = 50 ta_h = 20 padding_h = 3 canvas_h = read_h*N + ta_h + gene_h + padding_h + padding_h + 80 return (canvas_h) def draw_canvas(fulldata, position, hash, orf2data, feature_hashes, feature_data, labels=[], min_read=0, scale=[500], globalScale = False, start=1, end=500, canvas_h=-1, canvas_w=1000): temp_image = Image.new("RGB",(200, 200),"white") temp_draw = ImageDraw.Draw(temp_image) #Set main draw object N = len(fulldata) Nfeat = len(feature_hashes) #Set Labels if not labels: labels= ["Read Counts"]*N GENES = [] FEATURES = [[] for j in range(len(feature_hashes))] TA_SITES = [] READS = [] nc_count = 1 for j,data in enumerate(fulldata): #print(j) temp = [] for i,read in enumerate(data): pos = position[i] if start <= pos <= end: gene = hash.get(pos,["non-coding"])[0] if gene == "non-coding" and len(GENES) > 0 and not GENES[-1].startswith("non-coding"): gene+="_%d" % nc_count nc_count +=1 if j ==0: if gene not in GENES: GENES.append(gene) TA_SITES.append(pos) for f,f_hash in enumerate(feature_hashes): feat = f_hash.get(pos,["non-coding"])[0] if feat not in FEATURES[f]: FEATURES[f].append(feat) temp.append(read) READS.append(temp) max_reads = [] if globalScale: max_reads = [int(numpy.max(READS))] * len(READS) else: for j,s in enumerate(scale): #print(j,s) if s < 0: max_reads.append(int(numpy.max(READS[j]))) else: max_reads.append(s) #Get dynamic text widths #print("Labels:") max_label_w = 0 for L in labels: label_text_w, label_text_h = temp_draw.textsize(L, font=font) max_label_w = max(label_text_w, max_label_w) #print(L) scale_text_w, scale_text_h = temp_draw.textsize(str(max(max_reads)), font=font) #Set rest of heights and widths read_h = 100 gene_h = 50 ta_h = 20 padding_w = 3 padding_h = 3 read_w = canvas_w - (max_label_w + scale_text_w + padding_w + padding_w + 30) if canvas_h == -1: canvas_h = read_h*N + ta_h + gene_h + padding_h + padding_h + 80 + (gene_h+padding_h+50)*(Nfeat) image = Image.new("RGB",(canvas_w, canvas_h),"white") draw = ImageDraw.Draw(image) lwd = 2 #print(READS) #print("start", start) #print("end", end) #print(len(READS), len(TA_SITES)) #print("") #for rd in READS: # print(rd) #print("") start_x = max_label_w + padding_w + 21 draw.line([(start_x, 0), (start_x, canvas_h)], width=lwd, fill="black") start_y = 0 half = 100*0.5 start_x += 5 for j in range(len(fulldata)): temp_label_text_w, temp_label_text_h = temp_draw.textsize(labels[j], font=font) label_text_x = (start_x/2.0) - (temp_label_text_w/2.0) start_y+=read_h+padding_h #draw.text((10, start_y - half), labels[j], font=font, fill="black") draw.text((label_text_x, start_y - half), labels[j], font=font, fill="black") draw_reads(draw, READS[j], TA_SITES, start_x, start_y, read_w, read_h, start, end, min_read, max_reads[j]) draw_scale(draw, start_x+read_w+padding_w+2, start_y-100+10, 70, max_reads[j]) start_y+=10 #start_x+=5 #TA sites temp_label_text_w, temp_label_text_h = temp_draw.textsize('TA Sites', font=font) label_text_x = (start_x/2.0) - (temp_label_text_w/2.0) #draw.text((30, start_y),'TA Sites', font=font, fill="black") draw.text((label_text_x, start_y),'TA Sites', font=font, fill="black") draw_ta_sites(draw, TA_SITES, start_x, start_y, read_w, ta_h, start, end) #Genes temp_label_text_w, temp_label_text_h = temp_draw.textsize('Genes', font=font) label_text_x = (start_x/2.0) - (temp_label_text_w/2.0) start_y += 50 #draw.text((30, start_y+10),'Genes', font=font, fill="black") draw.text((label_text_x, start_y+10),'Genes', font=font, fill="black") width = read_w draw_genes(draw, GENES, orf2data, start, end, start_x, start_y, width, gene_h) start_y += gene_h -20#+ padding_h #Features: for f in range(len(FEATURES)): start_y += gene_h + padding_h + 25 temp_label_text_w, temp_label_text_h = temp_draw.textsize('Feature-%d' % (f+1), font=font) label_text_x = (start_x/2.0) - (temp_label_text_w/2.0) draw.text((label_text_x, start_y+10),'Feature-%d' % (f+1), font=font, fill="black") width = read_w #print(FEATURES[f]) #draw_genes(draw, FEATURES[f], feature_data[f], start, end, start_x, start_y, width, gene_h)) draw_features(draw, FEATURES[f], feature_data[f], start, end, start_x, start_y, width, gene_h) start_y +=10 return(image)
gpl-3.0
3,052,072,579,063,662,000
31.429668
185
0.579101
false
2.989156
false
false
false
mitsuhiko/fungiform
setup.py
1
1459
#!/usr/bin/env python # -*- coding: utf-8 -*- """ Fungiform ~~~~~~~~~ A form handling system that previously was used for Pocoo's Zine and Plurk's Solace software. Unbundled into a separate library that is framework independent. This is still a preview release. Check the source for more information. :copyright: (c) 2010 by Armin Ronacher, see AUTHORS for more details. :license: BSD, see LICENSE for more details. """ try: from setuptools import setup except ImportError: from distutils.core import setup setup( name = 'Fungiform', version = '0.2', url = 'http://github.com/mitsuhiko/fungiform', license = 'BSD License', author = 'Armin Ronacher', author_email = 'armin.ronacher@active-4.com', description = 'form library', long_description = __doc__, keywords = 'form library', packages = ['fungiform', 'fungiform.tests'], platforms = 'any', zip_safe = False, test_suite = 'fungiform.tests.suite', include_package_data = True, classifiers = [ 'Environment :: Web Environment', 'Intended Audience :: Developers', 'License :: OSI Approved :: BSD License', 'Operating System :: OS Independent', 'Programming Language :: Python', 'Topic :: Internet :: WWW/HTTP :: Dynamic Content', 'Topic :: Software Development :: Libraries :: Python Modules', 'Development Status :: 4 - Beta' ], )
bsd-3-clause
3,290,813,701,191,853,600
29.395833
76
0.632625
false
3.975477
false
false
false
googleapis/googleapis-gen
google/devtools/cloudtrace/v1/devtools-cloudtrace-v1-py/google/cloud/trace_v1/types/trace.py
1
12909
# -*- coding: utf-8 -*- # Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import proto # type: ignore from google.protobuf import timestamp_pb2 # type: ignore __protobuf__ = proto.module( package='google.devtools.cloudtrace.v1', manifest={ 'Trace', 'Traces', 'TraceSpan', 'ListTracesRequest', 'ListTracesResponse', 'GetTraceRequest', 'PatchTracesRequest', }, ) class Trace(proto.Message): r"""A trace describes how long it takes for an application to perform an operation. It consists of a set of spans, each of which represent a single timed event within the operation. Attributes: project_id (str): Project ID of the Cloud project where the trace data is stored. trace_id (str): Globally unique identifier for the trace. This identifier is a 128-bit numeric value formatted as a 32-byte hex string. For example, ``382d4f4c6b7bb2f4a972559d9085001d``. spans (Sequence[google.cloud.trace_v1.types.TraceSpan]): Collection of spans in the trace. """ project_id = proto.Field( proto.STRING, number=1, ) trace_id = proto.Field( proto.STRING, number=2, ) spans = proto.RepeatedField( proto.MESSAGE, number=3, message='TraceSpan', ) class Traces(proto.Message): r"""List of new or updated traces. Attributes: traces (Sequence[google.cloud.trace_v1.types.Trace]): List of traces. """ traces = proto.RepeatedField( proto.MESSAGE, number=1, message='Trace', ) class TraceSpan(proto.Message): r"""A span represents a single timed event within a trace. Spans can be nested and form a trace tree. Often, a trace contains a root span that describes the end-to-end latency of an operation and, optionally, one or more subspans for its suboperations. Spans do not need to be contiguous. There may be gaps between spans in a trace. Attributes: span_id (int): Identifier for the span. Must be a 64-bit integer other than 0 and unique within a trace. For example, ``2205310701640571284``. kind (google.cloud.trace_v1.types.TraceSpan.SpanKind): Distinguishes between spans generated in a particular context. For example, two spans with the same name may be distinguished using ``RPC_CLIENT`` and ``RPC_SERVER`` to identify queueing latency associated with the span. name (str): Name of the span. Must be less than 128 bytes. The span name is sanitized and displayed in the Stackdriver Trace tool in the Google Cloud Platform Console. The name may be a method name or some other per- call site name. For the same executable and the same call point, a best practice is to use a consistent name, which makes it easier to correlate cross-trace spans. start_time (google.protobuf.timestamp_pb2.Timestamp): Start time of the span in nanoseconds from the UNIX epoch. end_time (google.protobuf.timestamp_pb2.Timestamp): End time of the span in nanoseconds from the UNIX epoch. parent_span_id (int): Optional. ID of the parent span, if any. labels (Sequence[google.cloud.trace_v1.types.TraceSpan.LabelsEntry]): Collection of labels associated with the span. Label keys must be less than 128 bytes. Label values must be less than 16 kilobytes (10MB for ``/stacktrace`` values). Some predefined label keys exist, or you may create your own. When creating your own, we recommend the following formats: - ``/category/product/key`` for agents of well-known products (e.g. ``/db/mongodb/read_size``). - ``short_host/path/key`` for domain-specific keys (e.g. ``foo.com/myproduct/bar``) Predefined labels include: - ``/agent`` - ``/component`` - ``/error/message`` - ``/error/name`` - ``/http/client_city`` - ``/http/client_country`` - ``/http/client_protocol`` - ``/http/client_region`` - ``/http/host`` - ``/http/method`` - ``/http/path`` - ``/http/redirected_url`` - ``/http/request/size`` - ``/http/response/size`` - ``/http/route`` - ``/http/status_code`` - ``/http/url`` - ``/http/user_agent`` - ``/pid`` - ``/stacktrace`` - ``/tid`` """ class SpanKind(proto.Enum): r"""Type of span. Can be used to specify additional relationships between spans in addition to a parent/child relationship. """ SPAN_KIND_UNSPECIFIED = 0 RPC_SERVER = 1 RPC_CLIENT = 2 span_id = proto.Field( proto.FIXED64, number=1, ) kind = proto.Field( proto.ENUM, number=2, enum=SpanKind, ) name = proto.Field( proto.STRING, number=3, ) start_time = proto.Field( proto.MESSAGE, number=4, message=timestamp_pb2.Timestamp, ) end_time = proto.Field( proto.MESSAGE, number=5, message=timestamp_pb2.Timestamp, ) parent_span_id = proto.Field( proto.FIXED64, number=6, ) labels = proto.MapField( proto.STRING, proto.STRING, number=7, ) class ListTracesRequest(proto.Message): r"""The request message for the ``ListTraces`` method. All fields are required unless specified. Attributes: project_id (str): Required. ID of the Cloud project where the trace data is stored. view (google.cloud.trace_v1.types.ListTracesRequest.ViewType): Optional. Type of data returned for traces in the list. Default is ``MINIMAL``. page_size (int): Optional. Maximum number of traces to return. If not specified or <= 0, the implementation selects a reasonable value. The implementation may return fewer traces than the requested page size. page_token (str): Token identifying the page of results to return. If provided, use the value of the ``next_page_token`` field from a previous request. start_time (google.protobuf.timestamp_pb2.Timestamp): Start of the time interval (inclusive) during which the trace data was collected from the application. end_time (google.protobuf.timestamp_pb2.Timestamp): End of the time interval (inclusive) during which the trace data was collected from the application. filter (str): Optional. A filter against labels for the request. By default, searches use prefix matching. To specify exact match, prepend a plus symbol (``+``) to the search term. Multiple terms are ANDed. Syntax: - ``root:NAME_PREFIX`` or ``NAME_PREFIX``: Return traces where any root span starts with ``NAME_PREFIX``. - ``+root:NAME`` or ``+NAME``: Return traces where any root span's name is exactly ``NAME``. - ``span:NAME_PREFIX``: Return traces where any span starts with ``NAME_PREFIX``. - ``+span:NAME``: Return traces where any span's name is exactly ``NAME``. - ``latency:DURATION``: Return traces whose overall latency is greater or equal to than ``DURATION``. Accepted units are nanoseconds (``ns``), milliseconds (``ms``), and seconds (``s``). Default is ``ms``. For example, ``latency:24ms`` returns traces whose overall latency is greater than or equal to 24 milliseconds. - ``label:LABEL_KEY``: Return all traces containing the specified label key (exact match, case-sensitive) regardless of the key:value pair's value (including empty values). - ``LABEL_KEY:VALUE_PREFIX``: Return all traces containing the specified label key (exact match, case-sensitive) whose value starts with ``VALUE_PREFIX``. Both a key and a value must be specified. - ``+LABEL_KEY:VALUE``: Return all traces containing a key:value pair exactly matching the specified text. Both a key and a value must be specified. - ``method:VALUE``: Equivalent to ``/http/method:VALUE``. - ``url:VALUE``: Equivalent to ``/http/url:VALUE``. order_by (str): Optional. Field used to sort the returned traces. Can be one of the following: - ``trace_id`` - ``name`` (``name`` field of root span in the trace) - ``duration`` (difference between ``end_time`` and ``start_time`` fields of the root span) - ``start`` (``start_time`` field of the root span) Descending order can be specified by appending ``desc`` to the sort field (for example, ``name desc``). Only one sort field is permitted. """ class ViewType(proto.Enum): r"""Type of data returned for traces in the list.""" VIEW_TYPE_UNSPECIFIED = 0 MINIMAL = 1 ROOTSPAN = 2 COMPLETE = 3 project_id = proto.Field( proto.STRING, number=1, ) view = proto.Field( proto.ENUM, number=2, enum=ViewType, ) page_size = proto.Field( proto.INT32, number=3, ) page_token = proto.Field( proto.STRING, number=4, ) start_time = proto.Field( proto.MESSAGE, number=5, message=timestamp_pb2.Timestamp, ) end_time = proto.Field( proto.MESSAGE, number=6, message=timestamp_pb2.Timestamp, ) filter = proto.Field( proto.STRING, number=7, ) order_by = proto.Field( proto.STRING, number=8, ) class ListTracesResponse(proto.Message): r"""The response message for the ``ListTraces`` method. Attributes: traces (Sequence[google.cloud.trace_v1.types.Trace]): List of trace records as specified by the view parameter. next_page_token (str): If defined, indicates that there are more traces that match the request and that this value should be passed to the next request to continue retrieving additional traces. """ @property def raw_page(self): return self traces = proto.RepeatedField( proto.MESSAGE, number=1, message='Trace', ) next_page_token = proto.Field( proto.STRING, number=2, ) class GetTraceRequest(proto.Message): r"""The request message for the ``GetTrace`` method. Attributes: project_id (str): Required. ID of the Cloud project where the trace data is stored. trace_id (str): Required. ID of the trace to return. """ project_id = proto.Field( proto.STRING, number=1, ) trace_id = proto.Field( proto.STRING, number=2, ) class PatchTracesRequest(proto.Message): r"""The request message for the ``PatchTraces`` method. Attributes: project_id (str): Required. ID of the Cloud project where the trace data is stored. traces (google.cloud.trace_v1.types.Traces): Required. The body of the message. """ project_id = proto.Field( proto.STRING, number=1, ) traces = proto.Field( proto.MESSAGE, number=2, message='Traces', ) __all__ = tuple(sorted(__protobuf__.manifest))
apache-2.0
8,235,140,834,633,207,000
32.270619
77
0.577736
false
4.285857
false
false
false
googleapis/googleapis-gen
google/cloud/bigquery/storage/v1beta1/bigquery-storage-v1beta1-py/google/cloud/bigquery/storage_v1beta1/services/big_query_storage/transports/base.py
1
10654
# -*- coding: utf-8 -*- # Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import abc from typing import Awaitable, Callable, Dict, Optional, Sequence, Union import packaging.version import pkg_resources import google.auth # type: ignore import google.api_core # type: ignore from google.api_core import exceptions as core_exceptions # type: ignore from google.api_core import gapic_v1 # type: ignore from google.api_core import retry as retries # type: ignore from google.auth import credentials as ga_credentials # type: ignore from google.oauth2 import service_account # type: ignore from google.cloud.bigquery.storage_v1beta1.types import storage from google.protobuf import empty_pb2 # type: ignore try: DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( gapic_version=pkg_resources.get_distribution( 'google-cloud-bigquery-storage', ).version, ) except pkg_resources.DistributionNotFound: DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() try: # google.auth.__version__ was added in 1.26.0 _GOOGLE_AUTH_VERSION = google.auth.__version__ except AttributeError: try: # try pkg_resources if it is available _GOOGLE_AUTH_VERSION = pkg_resources.get_distribution("google-auth").version except pkg_resources.DistributionNotFound: # pragma: NO COVER _GOOGLE_AUTH_VERSION = None class BigQueryStorageTransport(abc.ABC): """Abstract transport class for BigQueryStorage.""" AUTH_SCOPES = ( 'https://www.googleapis.com/auth/bigquery', 'https://www.googleapis.com/auth/bigquery.readonly', 'https://www.googleapis.com/auth/cloud-platform', ) DEFAULT_HOST: str = 'bigquerystorage.googleapis.com' def __init__( self, *, host: str = DEFAULT_HOST, credentials: ga_credentials.Credentials = None, credentials_file: Optional[str] = None, scopes: Optional[Sequence[str]] = None, quota_project_id: Optional[str] = None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, always_use_jwt_access: Optional[bool] = False, **kwargs, ) -> None: """Instantiate the transport. Args: host (Optional[str]): The hostname to connect to. credentials (Optional[google.auth.credentials.Credentials]): The authorization credentials to attach to requests. These credentials identify the application to the service; if none are specified, the client will attempt to ascertain the credentials from the environment. credentials_file (Optional[str]): A file with credentials that can be loaded with :func:`google.auth.load_credentials_from_file`. This argument is mutually exclusive with credentials. scopes (Optional[Sequence[str]]): A list of scopes. quota_project_id (Optional[str]): An optional project to use for billing and quota. client_info (google.api_core.gapic_v1.client_info.ClientInfo): The client info used to send a user-agent string along with API requests. If ``None``, then default info will be used. Generally, you only need to set this if you're developing your own client library. always_use_jwt_access (Optional[bool]): Whether self signed JWT should be used for service account credentials. """ # Save the hostname. Default to port 443 (HTTPS) if none is specified. if ':' not in host: host += ':443' self._host = host scopes_kwargs = self._get_scopes_kwargs(self._host, scopes) # Save the scopes. self._scopes = scopes # If no credentials are provided, then determine the appropriate # defaults. if credentials and credentials_file: raise core_exceptions.DuplicateCredentialArgs("'credentials_file' and 'credentials' are mutually exclusive") if credentials_file is not None: credentials, _ = google.auth.load_credentials_from_file( credentials_file, **scopes_kwargs, quota_project_id=quota_project_id ) elif credentials is None: credentials, _ = google.auth.default(**scopes_kwargs, quota_project_id=quota_project_id) # If the credentials is service account credentials, then always try to use self signed JWT. if always_use_jwt_access and isinstance(credentials, service_account.Credentials) and hasattr(service_account.Credentials, "with_always_use_jwt_access"): credentials = credentials.with_always_use_jwt_access(True) # Save the credentials. self._credentials = credentials # TODO(busunkim): This method is in the base transport # to avoid duplicating code across the transport classes. These functions # should be deleted once the minimum required versions of google-auth is increased. # TODO: Remove this function once google-auth >= 1.25.0 is required @classmethod def _get_scopes_kwargs(cls, host: str, scopes: Optional[Sequence[str]]) -> Dict[str, Optional[Sequence[str]]]: """Returns scopes kwargs to pass to google-auth methods depending on the google-auth version""" scopes_kwargs = {} if _GOOGLE_AUTH_VERSION and ( packaging.version.parse(_GOOGLE_AUTH_VERSION) >= packaging.version.parse("1.25.0") ): scopes_kwargs = {"scopes": scopes, "default_scopes": cls.AUTH_SCOPES} else: scopes_kwargs = {"scopes": scopes or cls.AUTH_SCOPES} return scopes_kwargs def _prep_wrapped_messages(self, client_info): # Precompute the wrapped methods. self._wrapped_methods = { self.create_read_session: gapic_v1.method.wrap_method( self.create_read_session, default_retry=retries.Retry( initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( core_exceptions.DeadlineExceeded, core_exceptions.ServiceUnavailable, ), deadline=600.0, ), default_timeout=600.0, client_info=client_info, ), self.read_rows: gapic_v1.method.wrap_method( self.read_rows, default_retry=retries.Retry( initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( core_exceptions.ServiceUnavailable, ), deadline=86400.0, ), default_timeout=86400.0, client_info=client_info, ), self.batch_create_read_session_streams: gapic_v1.method.wrap_method( self.batch_create_read_session_streams, default_retry=retries.Retry( initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( core_exceptions.DeadlineExceeded, core_exceptions.ServiceUnavailable, ), deadline=600.0, ), default_timeout=600.0, client_info=client_info, ), self.finalize_stream: gapic_v1.method.wrap_method( self.finalize_stream, default_retry=retries.Retry( initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( core_exceptions.DeadlineExceeded, core_exceptions.ServiceUnavailable, ), deadline=600.0, ), default_timeout=600.0, client_info=client_info, ), self.split_read_stream: gapic_v1.method.wrap_method( self.split_read_stream, default_retry=retries.Retry( initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( core_exceptions.DeadlineExceeded, core_exceptions.ServiceUnavailable, ), deadline=600.0, ), default_timeout=600.0, client_info=client_info, ), } @property def create_read_session(self) -> Callable[ [storage.CreateReadSessionRequest], Union[ storage.ReadSession, Awaitable[storage.ReadSession] ]]: raise NotImplementedError() @property def read_rows(self) -> Callable[ [storage.ReadRowsRequest], Union[ storage.ReadRowsResponse, Awaitable[storage.ReadRowsResponse] ]]: raise NotImplementedError() @property def batch_create_read_session_streams(self) -> Callable[ [storage.BatchCreateReadSessionStreamsRequest], Union[ storage.BatchCreateReadSessionStreamsResponse, Awaitable[storage.BatchCreateReadSessionStreamsResponse] ]]: raise NotImplementedError() @property def finalize_stream(self) -> Callable[ [storage.FinalizeStreamRequest], Union[ empty_pb2.Empty, Awaitable[empty_pb2.Empty] ]]: raise NotImplementedError() @property def split_read_stream(self) -> Callable[ [storage.SplitReadStreamRequest], Union[ storage.SplitReadStreamResponse, Awaitable[storage.SplitReadStreamResponse] ]]: raise NotImplementedError() __all__ = ( 'BigQueryStorageTransport', )
apache-2.0
-6,510,122,743,847,912,000
39.819923
161
0.598085
false
4.489676
false
false
false
freeflightsim/fg-flying-club
flying-club.appspot.com/app/AuthHandler.py
1
4541
# -*- coding: utf-8 -*- import os import uuid import datetime from google.appengine.ext import webapp from google.appengine.api import users from google.appengine.ext import db from google.appengine.api import mail from google.appengine.ext.webapp import template from django.utils import simplejson as json from google.appengine.api import urlfetch import urllib import conf import app.FlyingClub import app.CoreHandler from app.models import Comment, Crew class AuthHandler(webapp.RequestHandler): ################################################################################################### ## Get Actions ################################################################################################### def get(self, section=None, page=None): #sessID = self.do_cookie_check() section = 'auth' template_vars = {} App = app.FlyingClub.FlyingClub(section, page) template_vars['app'] = App #tvars['appo'] = Appo #tvars['conf'] = conf #tvars['user'] = None #template_vars['crewID'] = crewID #f 'sessIdent' in self.request.cookies: #sessIdent = self.request.cookies['sessIdent'] #lse: # sessIdent = None ## Setup Section and Page #if section == None: #section = "index" #template_vars['section'] = section #template_vars['page'] = page ## Get Comments q = db.GqlQuery("SELECT * FROM Comment " + "WHERE section = :1 " + "ORDER BY dated DESC", section) results = q.fetch(50) #template_vars['comments'] = results ## Application Object #template_vars['page_title'] = Appo.title("/%s/" % section) ## Setup User + Aauth #user = users.get_current_user() #if not user: # template_vars['user'] = None # template_vars['login_url'] = users.create_login_url("/set_session/") #else: # template_vars['user'] = user # template_vars['logout_url'] = users.create_logout_url("/subscribe/") ## Sign In Section #if section == 'ssignin' : # if sessID: # self.redirect("/profile/") # return #template_vars['page_title'] = 'Sign In with OpenId' #if section == 'sdo_logout': # cook_str = 'sessID=%s; expires=Fri, 31-Dec-1980 23:59:59 GMT; Path=/;' % '' # self.response.headers.add_header( 'Set-Cookie', # cook_str # ) # self.redirect("/") # return #if section == 'sprofile': # if not sessID: # self.redirect("/signin/") # return #template_vars['welcome'] = True if self.request.get("welcome") == '1' else False #template_vars['page_title'] = 'My Profile' main_template = '%s.html' % (section) path = '/%s/' % (section) #template_vars['path'] = path template_path = os.path.join(os.path.dirname(__file__), '../templates/pages/%s' % main_template) self.response.out.write(template.render(template_path, template_vars)) ################################################################################################### ## Post Actions ################################################################################################### def post(self, page=None): if page == 'rpx': token = self.request.get('token') url = 'https://rpxnow.com/api/v2/auth_info' args = { 'format': 'json', 'apiKey': conf.RPX_API_KEY, 'token': token } r = urlfetch.fetch( url=url, payload=urllib.urlencode(args), method=urlfetch.POST, headers={'Content-Type':'application/x-www-form-urlencoded'} ) data = json.loads(r.content) if data['stat'] == 'ok': welcome = 0 unique_identifier = data['profile']['identifier'] q = db.GqlQuery("select * from Crew where ident= :1", unique_identifier) crew = q.get() if not crew: crew = Crew(ident=unique_identifier) crew.name = data['profile']['preferredUsername'] if data['profile'].has_key('email'): crew.email = data['profile']['email'] crew.put() welcome = 1 subject = "New Login: %s" % crew.name body = "New login on schedule" else: subject = "Return Login: %s" % crew.name body = "New login on schedule" sessID = str(crew.key()) cook_str = 'crewID=%s; expires=Fri, 31-Dec-2020 23:59:59 GMT; Path=/;' % crew.id() self.response.headers.add_header( 'Set-Cookie', cook_str ) mail.send_mail( sender = conf.EMAIL, to = "Dev <dev@freeflightsim.org>", subject = subject, body = body ) self.redirect("/profile/?welcome=%s" % welcome) return else: print section, page #self.redirect("/")
gpl-2.0
-6,413,474,163,439,239,000
24.227778
100
0.566175
false
3.168876
false
false
false
amitdhiman000/dais
politics/migrations/0001_initial.py
1
4585
# -*- coding: utf-8 -*- # Generated by Django 1.10.3 on 2016-12-18 20:58 from __future__ import unicode_literals from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): initial = True dependencies = [ ('user', '0002_auto_20161215_0806'), ] operations = [ migrations.CreateModel( name='Leader', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(default='*****', max_length=50)), ], options={ 'verbose_name_plural': 'Leaders', 'verbose_name': 'Leader', }, ), migrations.CreateModel( name='LegislativeConstituency', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(default='*****', max_length=50)), ], options={ 'verbose_name_plural': 'LegislativeConstituencies', 'verbose_name': 'LegislativeConstituency', }, ), migrations.CreateModel( name='MemberLegislative', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('constituency', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='politics.LegislativeConstituency')), ('leader', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='politics.Leader')), ], options={ 'verbose_name_plural': 'MemberLegislatives', 'verbose_name': 'MemberLegislative', }, ), migrations.CreateModel( name='MemberParliament', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ], options={ 'verbose_name_plural': 'MemberParliaments', 'verbose_name': 'MemberParliament', }, ), migrations.CreateModel( name='ParliamentConstituency', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(default='*****', max_length=50)), ('lc', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='politics.LegislativeConstituency')), ('state', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='user.State')), ], options={ 'verbose_name_plural': 'ParliamentConstituencies', 'verbose_name': 'ParliamentConstituency', }, ), migrations.CreateModel( name='Party', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('short_name', models.CharField(default='***', max_length=10)), ('full_name', models.CharField(default='*****', max_length=50)), ], options={ 'verbose_name_plural': 'Parties', 'verbose_name': 'Party', }, ), migrations.AddField( model_name='memberparliament', name='constituency', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='politics.ParliamentConstituency'), ), migrations.AddField( model_name='memberparliament', name='leader', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='politics.Leader'), ), migrations.AddField( model_name='legislativeconstituency', name='pc', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='politics.ParliamentConstituency'), ), migrations.AddField( model_name='legislativeconstituency', name='state', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='user.State'), ), migrations.AddField( model_name='leader', name='party', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='politics.Party'), ), ]
apache-2.0
-7,740,220,461,772,860,000
39.9375
136
0.551145
false
4.198718
false
false
false
gandelman-a/neutron-lbaas
neutron_lbaas/tests/tempest/lib/common/utils/linux/remote_client.py
2
6497
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import netaddr import re import time import six from neutron_lbaas.tests.tempest.lib import config from neutron_lbaas.tests.tempest.lib import exceptions from neutron_lbaas.tests.tempest.lib.common import ssh CONF = config.CONF class RemoteClient(object): # NOTE(afazekas): It should always get an address instead of server def __init__(self, server, username, password=None, pkey=None): ssh_timeout = CONF.compute.ssh_timeout network = CONF.compute.network_for_ssh ip_version = CONF.compute.ip_version_for_ssh ssh_channel_timeout = CONF.compute.ssh_channel_timeout if isinstance(server, six.string_types): ip_address = server else: addresses = server['addresses'][network] for address in addresses: if address['version'] == ip_version: ip_address = address['addr'] break else: raise exceptions.ServerUnreachable() self.ssh_client = ssh.Client(ip_address, username, password, ssh_timeout, pkey=pkey, channel_timeout=ssh_channel_timeout) def exec_command(self, cmd): # Shell options below add more clearness on failures, # path is extended for some non-cirros guest oses (centos7) cmd = "set -eu -o pipefail; PATH=$PATH:/sbin; " + cmd return self.ssh_client.exec_command(cmd) def validate_authentication(self): """Validate ssh connection and authentication This method raises an Exception when the validation fails. """ self.ssh_client.test_connection_auth() def hostname_equals_servername(self, expected_hostname): # Get host name using command "hostname" actual_hostname = self.exec_command("hostname").rstrip() return expected_hostname == actual_hostname def get_ram_size_in_mb(self): output = self.exec_command('free -m | grep Mem') if output: return output.split()[1] def get_number_of_vcpus(self): command = 'cat /proc/cpuinfo | grep processor | wc -l' output = self.exec_command(command) return int(output) def get_partitions(self): # Return the contents of /proc/partitions command = 'cat /proc/partitions' output = self.exec_command(command) return output def get_boot_time(self): cmd = 'cut -f1 -d. /proc/uptime' boot_secs = self.exec_command(cmd) boot_time = time.time() - int(boot_secs) return time.localtime(boot_time) def write_to_console(self, message): message = re.sub("([$\\`])", "\\\\\\\\\\1", message) # usually to /dev/ttyS0 cmd = 'sudo sh -c "echo \\"%s\\" >/dev/console"' % message return self.exec_command(cmd) def ping_host(self, host, count=CONF.compute.ping_count, size=CONF.compute.ping_size): addr = netaddr.IPAddress(host) cmd = 'ping6' if addr.version == 6 else 'ping' cmd += ' -c{0} -w{0} -s{1} {2}'.format(count, size, host) return self.exec_command(cmd) def get_mac_address(self): cmd = "ip addr | awk '/ether/ {print $2}'" return self.exec_command(cmd) def get_nic_name(self, address): cmd = "ip -o addr | awk '/%s/ {print $2}'" % address return self.exec_command(cmd) def get_ip_list(self): cmd = "ip address" return self.exec_command(cmd) def assign_static_ip(self, nic, addr): cmd = "sudo ip addr add {ip}/{mask} dev {nic}".format( ip=addr, mask=CONF.network.tenant_network_mask_bits, nic=nic ) return self.exec_command(cmd) def turn_nic_on(self, nic): cmd = "sudo ip link set {nic} up".format(nic=nic) return self.exec_command(cmd) def get_pids(self, pr_name): # Get pid(s) of a process/program cmd = "ps -ef | grep %s | grep -v 'grep' | awk {'print $1'}" % pr_name return self.exec_command(cmd).split('\n') def get_dns_servers(self): cmd = 'cat /etc/resolv.conf' resolve_file = self.exec_command(cmd).strip().split('\n') entries = (l.split() for l in resolve_file) dns_servers = [l[1] for l in entries if len(l) and l[0] == 'nameserver'] return dns_servers def send_signal(self, pid, signum): cmd = 'sudo /bin/kill -{sig} {pid}'.format(pid=pid, sig=signum) return self.exec_command(cmd) def _renew_lease_udhcpc(self, fixed_ip=None): """Renews DHCP lease via udhcpc client. """ file_path = '/var/run/udhcpc.' nic_name = self.get_nic_name(fixed_ip) nic_name = nic_name.strip().lower() pid = self.exec_command('cat {path}{nic}.pid'. format(path=file_path, nic=nic_name)) pid = pid.strip() self.send_signal(pid, 'USR1') def _renew_lease_dhclient(self, fixed_ip=None): """Renews DHCP lease via dhclient client. """ cmd = "sudo /sbin/dhclient -r && sudo /sbin/dhclient" self.exec_command(cmd) def renew_lease(self, fixed_ip=None): """Wrapper method for renewing DHCP lease via given client Supporting: * udhcpc * dhclient """ # TODO(yfried): add support for dhcpcd supported_clients = ['udhcpc', 'dhclient'] dhcp_client = CONF.scenario.dhcp_client if dhcp_client not in supported_clients: raise exceptions.InvalidConfiguration('%s DHCP client unsupported' % dhcp_client) if dhcp_client == 'udhcpc' and not fixed_ip: raise ValueError("need to set 'fixed_ip' for udhcpc client") return getattr(self, '_renew_lease_' + dhcp_client)(fixed_ip=fixed_ip)
apache-2.0
-1,462,144,387,955,705,300
37.217647
78
0.59843
false
3.725344
false
false
false
zxjsdp/NodeFinderGUI
setup.py
1
1280
from setuptools import setup, find_packages from codecs import open from os import path here = path.abspath(path.dirname(__file__)) with open(path.join(here, 'README.md'), encoding='utf-8') as f: long_description = f.read() setup( name='NodeFinderGUI', version='0.5.0', description=('GUI Tool for node related operations in ' 'phylogenetic analyses.'), author='Haofei Jin', author_email='zxjsdp@gmail.com', url='https://github.com/zxjsdp/NodeFinderGUI', license='Apache', keywords='node phylogenetic tools calibration clade', packages=['nodefinder_gui'], install_requires=[], # $ pip install -e .[dev,test] extras_require={ 'dev': ['pytest', 'tox', 'sphinx'], 'test': ['pytest'], }, long_description=long_description, classifiers=[ 'Development Status :: 3 - Alpha', 'License :: OSI Approved :: Apache Software License', 'Programming Language :: Python', 'Programming Language :: Python :: 2.6', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.3', 'Programming Language :: Python :: 3.4', 'Programming Language :: Python :: 3.5', ], )
apache-2.0
-9,152,846,158,618,841,000
31.820513
63
0.609375
false
3.914373
false
false
false
AndiDog/git-cola
cola/widgets/merge.py
1
8995
from __future__ import division, absolute_import, unicode_literals from qtpy import QtWidgets from qtpy.QtCore import Qt from ..i18n import N_ from ..interaction import Interaction from ..qtutils import get from .. import cmds from .. import icons from .. import qtutils from . import completion from . import standard from . import defs def local_merge(context): """Provides a dialog for merging branches""" view = Merge(context, qtutils.active_window()) view.show() view.raise_() return view class Merge(standard.Dialog): """Provides a dialog for merging branches.""" def __init__(self, context, parent=None, ref=None): standard.Dialog.__init__(self, parent=parent) self.context = context self.cfg = cfg = context.cfg self.model = model = context.model if parent is not None: self.setWindowModality(Qt.WindowModal) # Widgets self.title_label = QtWidgets.QLabel() self.revision_label = QtWidgets.QLabel() self.revision_label.setText(N_('Revision to Merge')) self.revision = completion.GitRefLineEdit(context) self.revision.setToolTip(N_('Revision to Merge')) if ref: self.revision.set_value(ref) self.radio_local = qtutils.radio(text=N_('Local Branch'), checked=True) self.radio_remote = qtutils.radio(text=N_('Tracking Branch')) self.radio_tag = qtutils.radio(text=N_('Tag')) self.revisions = QtWidgets.QListWidget() self.revisions.setAlternatingRowColors(True) self.button_viz = qtutils.create_button(text=N_('Visualize'), icon=icons.visualize()) tooltip = N_('Squash the merged commits into a single commit') self.checkbox_squash = qtutils.checkbox(text=N_('Squash'), tooltip=tooltip) tooltip = N_('Always create a merge commit when enabled, ' 'even when the merge is a fast-forward update') self.checkbox_noff = qtutils.checkbox(text=N_('No fast forward'), tooltip=tooltip, checked=False) self.checkbox_noff_state = False tooltip = N_('Commit the merge if there are no conflicts. ' 'Uncheck to leave the merge uncommitted') self.checkbox_commit = qtutils.checkbox(text=N_('Commit'), tooltip=tooltip, checked=True) self.checkbox_commit_state = True text = N_('Create Signed Commit') checked = cfg.get('cola.signcommits', False) tooltip = N_('GPG-sign the merge commit') self.checkbox_sign = qtutils.checkbox(text=text, checked=checked, tooltip=tooltip) self.button_close = qtutils.close_button() icon = icons.merge() self.button_merge = qtutils.create_button(text=N_('Merge'), icon=icon, default=True) # Layouts self.revlayt = qtutils.hbox(defs.no_margin, defs.spacing, self.revision_label, self.revision, qtutils.STRETCH, self.title_label) self.radiolayt = qtutils.hbox(defs.no_margin, defs.spacing, self.radio_local, self.radio_remote, self.radio_tag) self.buttonlayt = qtutils.hbox(defs.no_margin, defs.button_spacing, self.button_close, qtutils.STRETCH, self.checkbox_squash, self.checkbox_noff, self.checkbox_commit, self.checkbox_sign, self.button_viz, self.button_merge) self.mainlayt = qtutils.vbox(defs.margin, defs.spacing, self.radiolayt, self.revisions, self.revlayt, self.buttonlayt) self.setLayout(self.mainlayt) # Signal/slot connections # pylint: disable=no-member self.revision.textChanged.connect(self.update_title) self.revision.enter.connect(self.merge_revision) self.revisions.itemSelectionChanged.connect(self.revision_selected) qtutils.connect_released(self.radio_local, self.update_revisions) qtutils.connect_released(self.radio_remote, self.update_revisions) qtutils.connect_released(self.radio_tag, self.update_revisions) qtutils.connect_button(self.button_merge, self.merge_revision) qtutils.connect_button(self.checkbox_squash, self.toggle_squash) qtutils.connect_button(self.button_viz, self.viz_revision) qtutils.connect_button(self.button_close, self.reject) # Observer messages model.add_observer(model.message_updated, self.update_all) self.update_all() self.init_size(parent=parent) self.revision.setFocus() def update_all(self): """Set the branch name for the window title and label.""" self.update_title() self.update_revisions() def update_title(self, _txt=None): branch = self.model.currentbranch revision = self.revision.text() if revision: txt = (N_('Merge "%(revision)s" into "%(branch)s"') % dict(revision=revision, branch=branch)) else: txt = N_('Merge into "%s"') % branch self.button_merge.setEnabled(bool(revision)) self.title_label.setText(txt) self.setWindowTitle(txt) def toggle_squash(self): """Toggles the commit checkbox based on the squash checkbox.""" if get(self.checkbox_squash): self.checkbox_commit_state = self.checkbox_commit.checkState() self.checkbox_commit.setCheckState(Qt.Unchecked) self.checkbox_commit.setDisabled(True) self.checkbox_noff_state = self.checkbox_noff.checkState() self.checkbox_noff.setCheckState(Qt.Unchecked) self.checkbox_noff.setDisabled(True) else: self.checkbox_noff.setDisabled(False) oldstateff = self.checkbox_noff_state self.checkbox_noff.setCheckState(oldstateff) self.checkbox_commit.setDisabled(False) oldstate = self.checkbox_commit_state self.checkbox_commit.setCheckState(oldstate) def update_revisions(self): """Update the revision list whenever a radio button is clicked""" self.revisions.clear() self.revisions.addItems(self.current_revisions()) def revision_selected(self): """Update the revision field when a list item is selected""" revlist = self.current_revisions() widget = self.revisions revision = qtutils.selected_item(widget, revlist) if revision is not None: self.revision.setText(revision) def current_revisions(self): """Retrieve candidate items to merge""" if get(self.radio_local): return self.model.local_branches elif get(self.radio_remote): return self.model.remote_branches elif get(self.radio_tag): return self.model.tags return [] def viz_revision(self): """Launch a gitk-like viewer on the selection revision""" revision = self.revision.text() if not revision: Interaction.information( N_('No Revision Specified'), N_('You must specify a revision to view.')) return cmds.do(cmds.VisualizeRevision, self.context, revision) def merge_revision(self): """Merge the selected revision/branch""" revision = self.revision.text() if not revision: Interaction.information( N_('No Revision Specified'), N_('You must specify a revision to merge.')) return noff = get(self.checkbox_noff) no_commit = not get(self.checkbox_commit) squash = get(self.checkbox_squash) sign = get(self.checkbox_sign) context = self.context cmds.do(cmds.Merge, context, revision, no_commit, squash, noff, sign) self.accept() def export_state(self): """Export persistent settings""" state = super(Merge, self).export_state() state['no-ff'] = get(self.checkbox_noff) state['sign'] = get(self.checkbox_sign) state['commit'] = get(self.checkbox_commit) return state def apply_state(self, state): """Apply persistent settings""" result = super(Merge, self).apply_state(state) self.checkbox_noff.setChecked(state.get('no-ff', False)) self.checkbox_sign.setChecked(state.get('sign', False)) self.checkbox_commit.setChecked(state.get('commit', True)) return result
gpl-2.0
5,567,192,275,756,753,000
39.518018
80
0.597332
false
4.236929
false
false
false
tyiannak/pySLRF
slrf.py
1
4205
import numpy import scipy.signal import scipy.interpolate from matplotlib import pyplot as plt from breezylidar import URG04LX def flags2segs(Flags, window): ''' ARGUMENTS: - Flags: a sequence of class flags (per time window) - window: window duration (in seconds) RETURNS: - segs: a sequence of segment's limits: segs[i,0] is start and segs[i,1] are start and end point of segment i - classes: a sequence of class flags: class[i] is the class ID of the i-th segment ''' preFlag = 0 curFlag = 0 numOfSegments = 0 curVal = Flags[curFlag] segsList = [] classes = [] while (curFlag<len(Flags)-1): stop = 0 preFlag = curFlag preVal = curVal while (stop==0): curFlag = curFlag + 1 tempVal = Flags[curFlag] if ((tempVal != curVal) | (curFlag==len(Flags)-1)): # stop numOfSegments = numOfSegments + 1 stop = 1 curSegment = curVal curVal = Flags[curFlag] segsList.append((curFlag*window)) classes.append(preVal) segs = numpy.zeros ((len(segsList),2)) for i in range(len(segsList)): if i>0: segs[i, 0] = segsList[i-1] segs[i, 1] = segsList[i] return (segs, classes) def preProcess(angleRange, Scan): Scan = numpy.array(Scan) Scan = scipy.signal.medfilt(Scan, 3) Scan = scipy.signal.medfilt(Scan, 5) #f = scipy.interpolate.interp1d(angleRange, Scan, kind='cubic') I = Scan==0 segs, classes = flags2segs(I, 1) Scan2 = numpy.copy(Scan) for i in range(1, segs.shape[0]-1): if classes[i]: a1 = angleRange[segs[i-1,0]:segs[i-1,1]] a2 = angleRange[segs[i+1,0]:segs[i+1,1]] a1 = a1[-1::] a2 = a2[0:1] A = numpy.concatenate((a1, a2)) b1 = Scan[segs[i-1,0]:segs[i-1,1]] b2 = Scan[segs[i+1,0]:segs[i+1,1]] b1 = b1[-1::] b2 = b2[0:1] B = numpy.concatenate((b1, b2)) #f = scipy.interpolate.interp1d(A, B, kind='cubic') f = scipy.interpolate.interp1d(A, B) Scan2[segs[i,0]: segs[i,1]] = f(angleRange[segs[i,0]: segs[i,1]]) Scan2[Scan2<0] = 0 Scan2 = scipy.signal.medfilt(Scan2, 3) Scan2 = scipy.signal.medfilt(Scan2, 5) return Scan, Scan2 laser = URG04LX('/dev/ttyACM0') count = 0 angleRange = numpy.arange(-120, 120, 0.352) print angleRange.shape plt.figure(figsize=(6*3.13,4*3.13)) while True: count += 1 Scan = laser.getScan() Scan, Scan2 = preProcess(angleRange, Scan) if count==1: diffScan = numpy.zeros(Scan.shape) diffScan2 = numpy.zeros(Scan2.shape) else: diffScan = numpy.abs(Scan - ScanPrev) diffScan2 = numpy.abs(Scan2 - ScanPrev2) diffScan = scipy.signal.medfilt(diffScan, 3) diffScan = scipy.signal.medfilt(diffScan, 15) diffScan2 = scipy.signal.medfilt(diffScan2, 3) diffScan2 = scipy.signal.medfilt(diffScan2, 15) X = numpy.cos(numpy.deg2rad(angleRange)) * Scan Y = numpy.sin(numpy.deg2rad(angleRange)) * Scan X2 = numpy.cos(numpy.deg2rad(angleRange)) * Scan2 Y2 = numpy.sin(numpy.deg2rad(angleRange)) * Scan2 plt.clf() ax = plt.subplot(2,3,1) plt.plot(angleRange, Scan) plt.plot(angleRange, Scan2, 'r') plt.title(count) plt.ylim([-120, 120]) plt.ylim([0, 6000]) ax.set_ylim([0, 6000]) ax = plt.subplot(2,3,2, aspect='equal') plt.plot(X, Y, '*') ax.set_xlim([-3000, 3000]) ax.set_ylim([-3000, 3000]) ax = plt.subplot(2,3,3, aspect='equal') plt.plot(X2, Y2, '*') ax.set_xlim([-3000, 3000]) ax.set_ylim([-3000, 3000]) ax = plt.subplot(2,3,4) plt.plot(angleRange, diffScan) plt.plot(angleRange, diffScan2, 'r') plt.title(count) plt.ylim([-120, 120]) plt.ylim([0, 6000]) ax.set_ylim([0, 6000]) plt.draw() plt.show(block=False) ScanPrev = Scan ScanPrev2 = Scan2
apache-2.0
1,125,670,961,743,430,300
28.612676
117
0.557669
false
3.05374
false
false
false
cbetheridge/simpleclassroom
views/views.py
1
4252
""" Contains all of the page view handlers for the app. These handlers should be GET handlers indended for the serving of HTTP. AJAX or otherwise action-based handlers should be stored in another module. """ import json from django.core.urlresolvers import reverse from django.http import HttpResponse from django.shortcuts import redirect from django.template import RequestContext from django.template.loader import get_template from django.utils import html from django.views.decorators.csrf import ensure_csrf_cookie from django.views.decorators.http import require_GET from models.classroom import Classroom from models.classroom import Student def _make_class_anchor_html(class_data): """Creates an escaped html anchor tag for a link. Args: class_data: A dick with keys [class_id, class_name] Returns: A string html anchor tag, rendered safe from html injection attacks. """ classroom_url = reverse(display_students) anchor_text = '<a href={}?Id={}>{}</a>'.format( classroom_url, class_data['class_id'], html.escape(class_data['class_name'])) return anchor_text @ensure_csrf_cookie @require_GET def display_classrooms(request): """Displays a list of Classrooms. Args: request: django.http.HttpRequest object. Returns: An django.http.HttpResponse object. """ template = get_template('classrooms.html') db_classes = Classroom.objects.all() classes_repr = [c.get_jsonable_repr() for c in db_classes] db_students = Student.objects.all() students_list = [] for student in db_students: students_list.append({'id': student.pk, 'name': student.full_name}) context = RequestContext(request, { 'stored_classes': json.dumps(classes_repr), 'stored_students': json.dumps(students_list)}) return HttpResponse(template.render(context)) @ensure_csrf_cookie @require_GET def display_students(request): """Displays a list of Students. GET data params: id: (Optional) A classroom ID number. If 'id' is not set or if 'id' is set to the string 'all', an HTTPResponse of all students will be returned. Args: request: django.http.HttpRequest object Returns: An django.http.HttpResponse object. """ template = get_template('student_list.html') params = request.GET if request.GET else None s_query = Student.objects.all() if (not params or 'Id' not in params or str(params['Id']).lower() == 'all'): class_name = 'All Classes' class_desc = 'All Classes' else: # TODO(cbetheridge@gmail.com): Better handling of one vs many ID params. class_ids = [params['Id']] s_query = s_query.filter(membership__classroom__pk__in=class_ids) class_objs = Classroom.objects.filter(pk__in=class_ids) if len(class_ids) > 1: class_a_names = [] for classroom in class_objs: class_data = classroom.get_jsonable_repr() class_a_names.append(_make_class_anchor_html(class_data)) class_name = ', '.join(class_a_names) class_desc = 'Multiple' else: class_name = class_objs[0].name class_desc = class_objs[0].desc s_query = s_query.order_by('pk') s_query = s_query.distinct() students = [s.get_jsonable_repr() for s in list(s_query)] context = RequestContext(request, { 'class_name': class_name, 'class_desc': class_desc, 'class_student_data': json.dumps(students)}) return HttpResponse(template.render(context)) @ensure_csrf_cookie @require_GET def display_student_details(request): """Displays detailed information about a student. GET data params: Id: A student ID number. Args: request: django.http.HttpRequest object """ template = get_template('student_details.html') params = request.GET if request.GET else None # TODO(cbetheridge@gmail.com): standardize IDs to lower case. if (not params or 'Id' not in params or not params['Id']): return redirect('student list') student_obj = Student.objects.get(pk=params['Id']) student_data = student_obj.get_jsonable_repr() classes = [_make_class_anchor_html(c) for c in student_obj.class_list] student_data['class_list'] = classes context = RequestContext(request, student_data) return HttpResponse(template.render(context))
mit
-7,785,976,437,739,663,000
28.123288
79
0.699671
false
3.60339
false
false
false
ARM-software/armnn
python/pyarmnn/examples/example_utils.py
1
6828
# Copyright 2020 NXP # SPDX-License-Identifier: MIT from urllib.parse import urlparse import os from PIL import Image import pyarmnn as ann import numpy as np import requests import argparse import warnings def parse_command_line(desc: str = ""): """Adds arguments to the script. Args: desc(str): Script description. Returns: Namespace: Arguments to the script command. """ parser = argparse.ArgumentParser(description=desc) parser.add_argument("-v", "--verbose", help="Increase output verbosity", action="store_true") return parser.parse_args() def __create_network(model_file: str, backends: list, parser=None): """Creates a network based on a file and parser type. Args: model_file (str): Path of the model file. backends (list): List of backends to use when running inference. parser_type: Parser instance. (pyarmnn.ITFliteParser/pyarmnn.IOnnxParser...) Returns: int: Network ID. int: Graph ID. IParser: TF Lite parser instance. IRuntime: Runtime object instance. """ args = parse_command_line() options = ann.CreationOptions() runtime = ann.IRuntime(options) if parser is None: # try to determine what parser to create based on model extension _, ext = os.path.splitext(model_file) if ext == ".onnx": parser = ann.IOnnxParser() elif ext == ".tflite": parser = ann.ITfLiteParser() assert (parser is not None) network = parser.CreateNetworkFromBinaryFile(model_file) preferred_backends = [] for b in backends: preferred_backends.append(ann.BackendId(b)) opt_network, messages = ann.Optimize(network, preferred_backends, runtime.GetDeviceSpec(), ann.OptimizerOptions()) if args.verbose: for m in messages: warnings.warn(m) net_id, w = runtime.LoadNetwork(opt_network) if args.verbose and w: warnings.warn(w) return net_id, parser, runtime def create_tflite_network(model_file: str, backends: list = ['CpuAcc', 'CpuRef']): """Creates a network from a tflite model file. Args: model_file (str): Path of the model file. backends (list): List of backends to use when running inference. Returns: int: Network ID. int: Graph ID. ITFliteParser: TF Lite parser instance. IRuntime: Runtime object instance. """ net_id, parser, runtime = __create_network(model_file, backends, ann.ITfLiteParser()) graph_id = parser.GetSubgraphCount() - 1 return net_id, graph_id, parser, runtime def create_onnx_network(model_file: str, backends: list = ['CpuAcc', 'CpuRef']): """Creates a network from an onnx model file. Args: model_file (str): Path of the model file. backends (list): List of backends to use when running inference. Returns: int: Network ID. IOnnxParser: ONNX parser instance. IRuntime: Runtime object instance. """ return __create_network(model_file, backends, ann.IOnnxParser()) def preprocess_default(img: Image, width: int, height: int, data_type, scale: float, mean: list, stddev: list): """Default preprocessing image function. Args: img (PIL.Image): PIL.Image object instance. width (int): Width to resize to. height (int): Height to resize to. data_type: Data Type to cast the image to. scale (float): Scaling value. mean (list): RGB mean offset. stddev (list): RGB standard deviation. Returns: np.array: Resized and preprocessed image. """ img = img.resize((width, height), Image.BILINEAR) img = img.convert('RGB') img = np.array(img) img = np.reshape(img, (-1, 3)) # reshape to [RGB][RGB]... img = ((img / scale) - mean) / stddev img = img.flatten().astype(data_type) return img def load_images(image_files: list, input_width: int, input_height: int, data_type=np.uint8, scale: float = 1., mean: list = [0., 0., 0.], stddev: list = [1., 1., 1.], preprocess_fn=preprocess_default): """Loads images, resizes and performs any additional preprocessing to run inference. Args: img (list): List of PIL.Image object instances. input_width (int): Width to resize to. input_height (int): Height to resize to. data_type: Data Type to cast the image to. scale (float): Scaling value. mean (list): RGB mean offset. stddev (list): RGB standard deviation. preprocess_fn: Preprocessing function. Returns: np.array: Resized and preprocessed images. """ images = [] for i in image_files: img = Image.open(i) img = preprocess_fn(img, input_width, input_height, data_type, scale, mean, stddev) images.append(img) return images def load_labels(label_file: str): """Loads a labels file containing a label per line. Args: label_file (str): Labels file path. Returns: list: List of labels read from a file. """ with open(label_file, 'r') as f: labels = [l.rstrip() for l in f] return labels return None def print_top_n(N: int, results: list, labels: list, prob: list): """Prints TOP-N results Args: N (int): Result count to print. results (list): Top prediction indices. labels (list): A list of labels for every class. prob (list): A list of probabilities for every class. Returns: None """ assert (len(results) >= 1 and len(results) == len(labels) == len(prob)) for i in range(min(len(results), N)): print("class={0} ; value={1}".format(labels[results[i]], prob[results[i]])) def download_file(url: str, force: bool = False, filename: str = None, dest: str = "tmp"): """Downloads a file. Args: url (str): File url. force (bool): Forces to download the file even if it exists. filename (str): Renames the file when set. Returns: str: Path to the downloaded file. """ if filename is None: # extract filename from url when None filename = urlparse(url) filename = os.path.basename(filename.path) if str is not None: if not os.path.exists(dest): os.makedirs(dest) filename = os.path.join(dest, filename) print("Downloading '{0}' from '{1}' ...".format(filename, url)) if not os.path.exists(filename) or force is True: r = requests.get(url) with open(filename, 'wb') as f: f.write(r.content) print("Finished.") else: print("File already exists.") return filename
mit
-5,464,050,138,903,942,000
29.895928
96
0.616139
false
3.89726
false
false
false
symbooglix/boogie-runner
prepare-smoke-tests.py
1
6375
#!/usr/bin/env python # vim: set sw=2 ts=2 softtabstop=2 expandtab: """ Script to run a Symbooglix's AxiomAndEntryRequiresCheckTransformPass pass on a set of boogie programs (from a program List) in preparation for running a smoke test to check that all the assumptions leading to an entry point are satisfiable. """ import argparse import logging import multiprocessing import os import pprint from BoogieRunner import ProgramListLoader from BoogieRunner import EntryPointFinder import traceback import yaml import signal import sys import subprocess _logger = None futureToRunner = None def handleInterrupt(signum, frame): logging.info('Received signal {}'.format(signum)) if futureToRunner != None: cancel(futureToRunner) def entryPoint(args): global _logger, futureToRunner parser = argparse.ArgumentParser(description=__doc__) parser.add_argument("-l","--log-level",type=str, default="info", dest="log_level", choices=['debug','info','warning','error']) parser.add_argument("--rprefix", default=os.getcwd(), help="Prefix for relative paths for program_list") parser.add_argument("-j", "--jobs", default=multiprocessing.cpu_count(), type=int, help="jobs to run in parallel") parser.add_argument("input_program_list", help="File containing list of Boogie programs") parser.add_argument("output_dir", help="Directory to create working transformed programs in") parser.add_argument("output_program_list") parser.add_argument("--spr-path", dest='spr_path', required=True, help="Path to Symbooglix pass runner tool (spr)") # Options to set the entry point group = parser.add_mutually_exclusive_group(required=True) group.add_argument("--entry-point", dest='entry_point', default=None, help="Entry point name") group.add_argument("--entry-point-from-bool-attribute", dest='entry_point_from_bool_attribute', default=None, help="Get entry point from bool attribute on procedure e.g. {:entry_point}") pargs = parser.parse_args() if pargs.jobs < 1: _logger.error('Jobs must be >= 1') return 1 logLevel = getattr(logging, pargs.log_level.upper(),None) if logLevel == logging.DEBUG: logFormat = '%(levelname)s:%(threadName)s: %(filename)s:%(lineno)d %(funcName)s() : %(message)s' else: logFormat = '%(levelname)s:%(threadName)s: %(message)s' logging.basicConfig(level=logLevel, format=logFormat) _logger = logging.getLogger(__name__) # Check paths that must exist for pathToCheck in [ pargs.input_program_list, pargs.spr_path]: if not os.path.exists(pathToCheck): _logger.error('"{}" does not exist'.format(pathToCheck)) return 1 # Check paths that must not already exist for pathToCheck in [ pargs.output_program_list, pargs.output_dir]: if os.path.exists(pathToCheck): _logger.error('Refusing to overwrite "{}"'.format(pathToCheck)) return 1 # Load list of programs programList = None try: _logger.debug('Loading program_list from "{}"'.format(pargs.input_program_list)) programList = ProgramListLoader.load(pargs.input_program_list, pargs.rprefix) except (ProgramListLoader.ProgramListLoaderException) as e: _logger.error(e) _logger.debug(traceback.format_exc()) return 1 # Compute list index to entry point name mapping entryPoints = [ ] _logger.info('Getting program entry points...') for programPath in programList: if pargs.entry_point != None: entryPoints.append(pargs.entry_point) else: assert pargs.entry_point_from_bool_attribute != None entryPointName = EntryPointFinder.findEntryPointWithBooleanAttribute(pargs.entry_point_from_bool_attribute, programPath) assert entryPointName != None entryPoints.append(entryPointName) # Generate new programs _logger.info('Generating new programs') tasks = [ ] os.mkdir(pargs.output_dir) for index, (programPath, entryPoint) in enumerate(zip(programList, entryPoints)): outputPath = os.path.join(pargs.output_dir, 'program-{}.bpl'.format(index)) tasks.append( ProgramGenTask(programPath, entryPoint, outputPath, pargs.spr_path) ) # Run if pargs.jobs == 1: for t in tasks: t.run() else: signal.signal(signal.SIGINT, handleInterrupt) signal.signal(signal.SIGTERM, handleInterrupt) import concurrent.futures with concurrent.futures.ThreadPoolExecutor(max_workers=pargs.jobs) as executor: futureToRunner = { executor.submit(t.run) : t for t in tasks } for future in concurrent.futures.as_completed(futureToRunner): r = futureToRunner[future] _logger.info('{} runner finished'.format(r.outputPath)) if future.exception(): e = future.exception() cancel(futureToRunner) _logger.error(e) return 1 if r.exitCode != 0: _logger.error('Tool failed') cancel(futureToRunner) return 1 # Write program list with open(pargs.output_program_list, 'w') as f: for t in tasks: if t.exitCode != None: f.writelines('# Generated from {}\n'.format(t.programPath)) f.writelines('{}\n\n'.format(t.outputPath)) else: f.writelines('# Skipping program "{}" due to failure\n'.format(t.programPath)) _logger.info('Finished') return 0 def cancel(futureToRunner): _logger.warning('Cancelling futures') for future in futureToRunner.keys(): future.cancel() class ProgramGenTask: def __init__(self, programPath, entryPoint, outputPath, sprPath): assert isinstance(programPath, str) assert isinstance(entryPoint, str) assert isinstance(outputPath, str) assert isinstance(sprPath, str) assert os.path.exists(programPath) self.programPath = programPath self.entryPoint = entryPoint self.outputPath = outputPath self.sprPath = sprPath self.exitCode = None def run(self): cmdLine = [ self.sprPath, '-e', self.entryPoint, '-p', 'Transform.AxiomAndEntryRequiresCheckTransformPass', '-o', self.outputPath, self.programPath ] _logger.debug('Running:\n{}'.format(pprint.pformat(cmdLine))) exitCode = subprocess.call(cmdLine) _logger.debug('Finished') self.exitCode = exitCode if __name__ == '__main__': try: sys.exit(entryPoint(sys.argv[1:])) except KeyboardInterrupt: sys.exit(2)
bsd-3-clause
-8,607,544,325,035,027,000
34.614525
128
0.694431
false
3.689236
false
false
false
datawire/mdk
functionaltests/webservers/djangoserver/settings.py
1
3262
""" Django settings for djangoserver project. Generated by 'django-admin startproject' using Django 1.9.9. For more information on this file, see https://docs.djangoproject.com/en/1.9/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/1.9/ref/settings/ """ import os # Build paths inside the project like this: os.path.join(BASE_DIR, ...) BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) # Quick-start development settings - unsuitable for production # See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/ # SECURITY WARNING: keep the secret key used in production secret! SECRET_KEY = 'vo%5*42w1e@@eh%7)oss9t#fh9q8@a3(h2#$qg+63@0w#5wy-s' # SECURITY WARNING: don't run with debug turned on in production! DEBUG = True ALLOWED_HOSTS = [] # Application definition INSTALLED_APPS = [ 'djangoserver.apps.MyMDKAppConfig', 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', ] MIDDLEWARE_CLASSES = [ 'django.middleware.security.SecurityMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'mdk.django.MDKSessionMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.auth.middleware.SessionAuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', ] ROOT_URLCONF = 'djangoserver.urls' TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [], 'APP_DIRS': True, 'OPTIONS': { 'context_processors': [ 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', ], }, }, ] WSGI_APPLICATION = 'djangoserver.wsgi.application' # Database # https://docs.djangoproject.com/en/1.9/ref/settings/#databases DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'), } } # Password validation # https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators AUTH_PASSWORD_VALIDATORS = [ { 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', }, { 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', }, { 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', }, { 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', }, ] # Internationalization # https://docs.djangoproject.com/en/1.9/topics/i18n/ LANGUAGE_CODE = 'en-us' TIME_ZONE = 'UTC' USE_I18N = True USE_L10N = True USE_TZ = True # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/1.9/howto/static-files/ STATIC_URL = '/static/'
apache-2.0
610,415,732,512,119,600
25.737705
91
0.692213
false
3.518878
false
false
false
erikdejonge/newsrivr
daemons/hn.py
1
17357
""" This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. """ from __future__ import division from __future__ import print_function from __future__ import unicode_literals from __future__ import absolute_import from builtins import open from builtins import int from future import standard_library standard_library.install_aliases() from builtins import chr from builtins import str from builtins import range from past.utils import old_div from xml.sax.saxutils import escape import urllib.request, urllib.parse, urllib.error, re, os, urllib.parse import html.parser, feedparser from BeautifulSoup import BeautifulSoup, Comment from pprint import pprint import codecs import sys import html.entities streamWriter = codecs.lookup("utf-8")[-1] sys.stdout = streamWriter(sys.stdout) HN_RSS_FEED = "http://news.ycombinator.com/rss" negative_str = "([A-Z,a-z,0-9,-,_ ]*comments[A-Z,a-z,0-9,-,_ ]*)|" negative_str += "([A-Z,a-z,0-9,-,_ ]*comment[A-Z,a-z,0-9,-,_ ]*)|" negative_str += "([A-Z,a-z,0-9,-,_ ]*bcomments[A-Z,a-z,0-9,-,_ ]*)|" negative_str += "([A-Z,a-z,0-9,-,_ ]*meta[A-Z,a-z,0-9,-,_ ]*)|" negative_str += "([A-Z,a-z,0-9,-,_ ]*footer[A-Z,a-z,0-9,-,_ ]*)|" negative_str += "([A-Z,a-z,0-9,-,_ ]*footnote[A-Z,a-z,0-9,-,_ ]*)|" negative_str += "([A-Z,a-z,0-9,-,_ ]*foot[A-Z,a-z,0-9,-,_ ]*)|" negative_str += "([A-Z,a-z,0-9,-,_ ]*bottom[A-Z,a-z,0-9,-,_ ]*)|" negative_str += "([A-Z,a-z,0-9,-,_ ]*klasbox[A-Z,a-z,0-9,-,_ ]*)|" negative_str += "([A-Z,a-z,0-9,-,_ ]*side[A-Z,a-z,0-9,-,_ ]*)|" negative_str += "([A-Z,a-z,0-9,-,_ ]*inner[A-Z,a-z,0-9,-,_ ]*)|" negative_str += "([A-Z,a-z,0-9,-,_ ]*sidebar[A-Z,a-z,0-9,-,_ ]*)|" negative_str += "([A-Z,a-z,0-9,-,_ ]*hide[A-Z,a-z,0-9,-,_ ]*)|" negative_str += "([A-Z,a-z,0-9,-,_ ]*component[A-Z,a-z,0-9,-,_ ]*)|" negative_str += "([A-Z,a-z,0-9,-,_ ]*reactie[A-Z,a-z,0-9,-,_ ]*)|" negative_str += "([A-Z,a-z,0-9,-,_ ]*ad[A-Z,a-z,0-9,-,_ ]*)|" negative_str += "([A-Z,a-z,0-9,-,_ ]*ads[A-Z,a-z,0-9,-,_ ]*)|" negative_str += "([A-Z,a-z,0-9,-,_ ]*transcript[A-Z,a-z,0-9,-,_ ]*)|" negative_str += "([A-Z,a-z,0-9,-,_ ]*react[A-Z,a-z,0-9,-,_ ]*)|" negative_str += "([A-Z,a-z,0-9,-,_ ]*transcript[A-Z,a-z,0-9,-,_ ]*)|" negative_str += "([A-Z,a-z,0-9,-,_ ]*transcriptText[A-Z,a-z,0-9,-,_ ]*)|" negative_str += "([A-Z,a-z,0-9,-,_ ]*error[A-Z,a-z,0-9,-,_ ]*)|" negative_str += "([A-Z,a-z,0-9,-,_ ]*related[A-Z,a-z,0-9,-,_ ]*)|" negative_str += "([A-Z,a-z,0-9,-,_ ]*also[A-Z,a-z,0-9,-,_ ]*)|" negative_str += "([A-Z,a-z,0-9,-,_ ]*share[A-Z,a-z,0-9,-,_ ]*)|" negative_str += "([A-Z,a-z,0-9,-,_ ]*sideblock[A-Z,a-z,0-9,-,_ ]*)|" negative_str += "([A-Z,a-z,0-9,-,_ ]*policy[A-Z,a-z,0-9,-,_ ]*)|" negative_str += "([A-Z,a-z,0-9,-,_ ]*related[A-Z,a-z,0-9,-,_ ]*)|" negative_str += "([A-Z,a-z,0-9,-,_ ]*social[A-Z,a-z,0-9,-,_ ]*)|" negative_str += "([A-Z,a-z,0-9,-,_ ]*reflist[A-Z,a-z,0-9,-,_ ]*)|" negative_str += "([A-Z,a-z,0-9,-,_ ]*postmetadata[A-Z,a-z,0-9,-,_ ]*)|" negative_str += "([A-Z,a-z,0-9,-,_ ]*references[A-Z,a-z,0-9,-,_ ]*)|" negative_str += "([A-Z,a-z,0-9,-,_ ]*promo[A-Z,a-z,0-9,-,_ ]*)" NEGATIVE = re.compile(negative_str) super_negative_str = "([A-Z,a-z,0-9,-,_ ]*comment[A-Z,a-z,0-9,-,_ ]*)|" super_negative_str += "([A-Z,a-z,0-9,-,_ ]*voting[A-Z,a-z,0-9,-,_ ]*)|" super_negative_str += "([A-Z,a-z,0-9,-,_ ]*reactie[A-Z,a-z,0-9,-,_ ]*)|" super_negative_str += "([A-Z,a-z,0-9,-,_ ]*reaction[A-Z,a-z,0-9,-,_ ]*)|" super_negative_str += "([A-Z,a-z,0-9,-,_ ]*idgedragregelsusercontent[A-Z,a-z,0-9,-,_ ]*)|" super_negative_str += "([A-Z,a-z,0-9,-,_ ]*vote[A-Z,a-z,0-9,-,_ ]*)" SUPERNEGATIVE = re.compile(super_negative_str) positive_str = "([A-Z,a-z,0-9,-,_ ]*summary[A-Z,a-z,0-9,-,_ ]*)|" positive_str += "([A-Z,a-z,0-9,-,_ ]*post[A-Z,a-z,0-9,-,_ ]*)|" positive_str += "([A-Z,a-z,0-9,-,_ ]*hentry[A-Z,a-z,0-9,-,_ ]*)|" positive_str += "([A-Z,a-z,0-9,-,_ ]*entry[A-Z,a-z,0-9,-,_ ]*)|" positive_str += "([A-Z,a-z,0-9,-,_ ]*content[A-Z,a-z,0-9,-,_ ]*)|" positive_str += "([A-Z,a-z,0-9,-,_ ]*text[A-Z,a-z,0-9,-,_ ]*)|" positive_str += "([A-Z,a-z,0-9,-,_ ]*tekst[A-Z,a-z,0-9,-,_ ]*)|" positive_str += "([A-Z,a-z,0-9,-,_ ]*venue[A-Z,a-z,0-9,-,_ ]*)|" positive_str += "([A-Z,a-z,0-9,-,_ ]*venueInfo[A-Z,a-z,0-9,-,_ ]*)|" positive_str += "([A-Z,a-z,0-9,-,_ ]*venueDetails[A-Z,a-z,0-9,-,_ ]*)|" positive_str += "([A-Z,a-z,0-9,-,_ ]*body[A-Z,a-z,0-9,-,_ ]*)|" positive_str += "([A-Z,a-z,0-9,-,_ ]*bodycontent[A-Z,a-z,0-9,-,_ ]*)|" positive_str += "([A-Z,a-z,0-9,-,_ ]*content permalink[A-Z,a-z,0-9,-,_ ]*)|" positive_str += "([A-Z,a-z,0-9,-,_ ]*wrapper[A-Z,a-z,0-9,-,_ ]*)|" positive_str += "([A-Z,a-z,0-9,-,_ ]*article[A-Z,a-z,0-9,-,_ ]*)|" positive_str += "([A-Z,a-z,0-9,-,_ ]*articleblock[A-Z,a-z,0-9,-,_ ]*)|" positive_str += "([A-Z,a-z,0-9,-,_ ]*text[A-Z,a-z,0-9,-,_ ]*)|" positive_str += "([A-Z,a-z,0-9,-,_ ]*tekst[A-Z,a-z,0-9,-,_ ]*)|" positive_str += "([A-Z,a-z,0-9,-,_ ]*lead[A-Z,a-z,0-9,-,_ ]*)|" positive_str += "([A-Z,a-z,0-9,-,_ ]*leadarticle[A-Z,a-z,0-9,-,_ ]*)|" positive_str += "([A-Z,a-z,0-9,-,_ ]*story[A-Z,a-z,0-9,-,_ ]*)|" positive_str += "([A-Z,a-z,0-9,-,_ ]*permalink[A-Z,a-z,0-9,-,_ ]*)" POSITIVE = re.compile(positive_str) PUNCTUATION = re.compile("""[!"#$%&\"()*+,-./:;<=>?@[\\]^_`{|}~]""") MAXLINKS = 50 def latin1_to_ascii (unicrap): xlate={0xc0:'A', 0xc1:'A', 0xc2:'A', 0xc3:'A', 0xc4:'A', 0xc5:'A', 0xc6:'Ae', 0xc7:'C', 0xc8:'E', 0xc9:'E', 0xca:'E', 0xcb:'E', 0xcc:'I', 0xcd:'I', 0xce:'I', 0xcf:'I', 0xd0:'Th', 0xd1:'N', 0xd2:'O', 0xd3:'O', 0xd4:'O', 0xd5:'O', 0xd6:'O', 0xd8:'O', 0xd9:'U', 0xda:'U', 0xdb:'U', 0xdc:'U', 0xdd:'Y', 0xde:'th', 0xdf:'ss', 0xe0:'a', 0xe1:'a', 0xe2:'a', 0xe3:'a', 0xe4:'a', 0xe5:'a', 0xe6:'ae', 0xe7:'c', 0xe8:'e', 0xe9:'e', 0xea:'e', 0xeb:'e', 0xec:'i', 0xed:'i', 0xee:'i', 0xef:'i', 0xf0:'th', 0xf1:'n', 0xf2:'o', 0xf3:'o', 0xf4:'o', 0xf5:'o', 0xf6:'o', 0xf8:'o', 0xf9:'u', 0xfa:'u', 0xfb:'u', 0xfc:'u', 0xfd:'y', 0xfe:'th', 0xff:'y', 0xa1:'!', 0xa2:'{cent}', 0xa3:'{pound}', 0xa4:'{currency}', 0xa5:'{yen}', 0xa6:'|', 0xa7:'{section}', 0xa8:'{umlaut}', 0xa9:'{C}', 0xaa:'{^a}', 0xab:'<<', 0xac:'{not}', 0xad:'-', 0xae:'{R}', 0xaf:'_', 0xb0:'{degrees}', 0xb1:'{+/-}', 0xb2:'{^2}', 0xb3:'{^3}', 0xb4:"'", 0xb5:'{micro}', 0xb6:'{paragraph}', 0xb7:'*', 0xb8:'{cedilla}', 0xb9:'{^1}', 0xba:'{^o}', 0xbb:'>>', 0xbc:'{1/4}', 0xbd:'{1/2}', 0xbe:'{3/4}', 0xbf:'?', 0xd7:'*', 0xf7:'/' } r = '' for i in unicrap: if ord(i) in xlate: r += xlate[ord(i)] elif ord(i) >= 0x80: pass else: r += str(i) return r def toUTF8(data): try: data = data.encode("utf-8") except: data = latin1_to_ascii(data) return data def text2simpleHtml(data): data = data.replace("<h1"," <b").replace("</h1>","</b><br><br>") data = data.replace("<h2"," <b").replace("</h2>","</b><br>") data = data.replace("<h3>","").replace("</h3>","<br>") VALID_TAGS = ["strong", "b", "i", "table", "th", "tr", "td", "a", "code", "em", "p", "ul", "li", "br"] soup = BeautifulSoup(data) for tag in soup.findAll(True): if tag.name not in VALID_TAGS: tag.hidden = True return soup.renderContents() def _text(node): return " ".join(node.findAll(text=True)) def get_link_density(elem): link_length = len("".join([i.text or "" for i in elem.findAll("a")])) text_length = len(_text(elem)) return old_div(float(link_length), max(text_length, 1)) def removeFrontBreaks(s): try: soup = BeautifulSoup(s) whitespace = True for tag in soup.findAll(True): tagname = str(tag.name) if tagname!="br": whitespace=False if tagname!="p": whitespace=False if tagname=="br" or tagname=="p" and whitespace: tag.extract() return str(soup).strip() except Exception as e: clog(e) return s def convertentity(m): """Convert a HTML entity into normal string (ISO-8859-1)""" if m.group(1)=='#': try: return chr(int(m.group(2))) except ValueError: return '&#%s;' % m.group(2) try: return html.entities.entitydefs[m.group(2)] except KeyError: return '&%s;' % m.group(2) def unquotehtml(s): """Convert a HTML quoted string into normal string (ISO-8859-1). Works with &#XX; and with &nbsp; &gt; etc.""" return re.sub(r'&(#?)(.+?);',convertentity,s) def getNumLinks(s): try: cnt = 0 soup = BeautifulSoup(s) for a in soup.findAll("a"): if "href" in a: #print a cnt += 1 return cnt except: return 0 def removeEmptyParas(html): foundempty = False soup = BeautifulSoup(html) for p in soup.findAll("p"): if "id" in p: if "error_" in p["id"]: p.extract() if 0==len(p.text.strip().replace("\n", "")): if foundempty: p.extract() foundempty = True else: foundempty = False return soup.renderContents() def removeEmptyLis(html): soup = BeautifulSoup(html) for li in soup.findAll("li"): for a in li.findAll("a"): if len(a.contents)>0: if len(a.contents[0])<5: a.extract() if len(li.renderContents().strip())==0: li.extract() else: for x in li.findAll(): if len(x.renderContents().strip())==0: li.extract() for ul in soup.findAll("ul"): if 0==len(ul.findAll("li")): ul.extract() return soup.renderContents() def removeExtraBreaks(s): try: l = [] brcnt = 0 soup = BeautifulSoup(s) for tag in soup.findAll(): if tag.name=="p": if len(tag.text.strip().replace("\n", ""))<1: tag.extract() brcnt += 1 if tag.name=="br": brcnt += 1 if brcnt>1: tag.extract() else: brcnt = 0 return str(soup) except Exception as e: clog(e) return s def grabContent(link, html): if "&gt;" in html: html = unquotehtml(html) html = "<!DOCTYPE html><html><head><meta charset=\"utf-8\"></head><body>"+html+"</body></html>" #open("usedforscoring.html", "w").write(html) #exit(1) replaceBrs = re.compile("<br */? *>[ \r\n]*<br */? *>") html = re.sub(replaceBrs, "</p><p>", html) try: soup = BeautifulSoup(html) except html.parser.HTMLParseError as e: try: soup = BeautifulSoup(text2simpleHtml(html)) except html.parser.HTMLParseError: return "" #print str(soup) # REMOVE SCRIPTS for s in soup.findAll("div"): if get_link_density(s)>0.5 and len(s.renderContents())>1000: s.extract() if "id" in s: if SUPERNEGATIVE.match(str(s["id"]).lower()): s.extract() if "class" in s: if SUPERNEGATIVE.match(str(s["class"]).lower()): s.extract() for s in soup.findAll("script"): s.extract() for a in soup.findAll("a"): if "href" in a: if "javascript:" in a["href"]: a.extract() if "onclick" in a: if "return " in a["onclick"]: a.extract() allParagraphs = soup.findAll("p") topParent = None parents = [] for paragraph in allParagraphs: parent = paragraph.parent if (parent not in parents): parents.append(parent) parent.score = 0 if ("class" in parent): if (NEGATIVE.match(parent["class"].lower())): #print parent["class"] if len(parent.findAll('a'))>MAXLINKS: parent.score -= 500 parent.score -= 50 if (POSITIVE.match(parent["class"].lower())): if len(parent.findAll('a'))<MAXLINKS: parent.score += 25 else: parent.score -= 150 parent.score += 50 if ("id" in parent): if (NEGATIVE.match(parent["id"].lower())): #print parent["id"] if len(parent.findAll('a'))>MAXLINKS: parent.score -= 500 parent.score -= 50 if (POSITIVE.match(parent["id"].lower())): if len(parent.findAll('a'))<MAXLINKS: parent.score += 25 else: parent.score -= 150 parent.score += 50 if (parent.score == None): parent.score = 0 innerText = paragraph.renderContents() #"".join(paragraph.findAll(text=True)) if (len(innerText) > 10): parent.score += 1 if (len(innerText) > 300): parent.score += 2 parent.score += innerText.count(",")*3 parent.score += innerText.count(".")*3 for parent in parents: #print parent.score #print str(parent ) #print "-------------" if ((not topParent) or (parent.score > topParent.score)): topParent = parent if (not topParent): return "" # REMOVE LINK"D STYLES styleLinks = soup.findAll("link", attrs={"type" : "text/css"}) for s in styleLinks: s.extract() # REMOVE ON PAGE STYLES for s in soup.findAll("style"): s.extract() # CLEAN STYLES FROM ELEMENTS IN TOP PARENT for ele in topParent.findAll(True): del(ele["style"]) del(ele["class"]) #print str(ele) #print "-----" killDivs(topParent) clean(topParent, "form") clean(topParent, "object") clean(topParent, "iframe") fixLinks(topParent, link) for s in topParent.findAll("ul"): if get_link_density(s)>0.3: s.extract() lis = topParent.findAll("li") if len(lis)>50: for li in lis: li.extract() for li in lis: if len(li)>1: contents = str(li.contents[1]).replace("\n", "").replace("&nbsp;", "").replace("<br>", "").replace("<br/>", "").replace("<br />", "").replace("<p></p>", "") #print "c", contents if len(contents)==0: li.extract() comments = topParent.findAll(text=lambda text:isinstance(text, Comment)) [comment.extract() for comment in comments] html2 = topParent.renderContents() html2 = removeFrontBreaks(html2) html2 = html2.replace("\n", " ") for i in range(0, 10): html2 = html2.replace(" ", " ") html2 = html2.replace("<div></div>", "") html2 = html2.replace("<p>\xc2\xa0</p>", "") html2 = html2.replace("<p></p>", "<br/>") html2 = html2.replace("<p><br /></p>", "") #html2 = html2.replace("\xc2\xa9", "")# html2 = re.sub(r'&copy; (\w+.\w+)', "", html2) html2 = re.sub(r'&copy; (\w+)', "", html2) html2 = re.sub(r'\xc2\xa9 (\w+.\w+)', "", html2) html2 = re.sub(r'\xc2\xa9 (\w+)', "", html2) #if getNumLinks(html2)>25: # html2 = "html ignored, more then 25 links" #print get_link_density(BeautifulSoup(html2)) html2 = removeEmptyLis(html2) html2 = toUTF8(text2simpleHtml(html2)).replace("a href", "a target='blank' href") html2 = removeEmptyParas(html2) html2 = removeExtraBreaks(html2) html2 = html2.replace("</strong>", "</strong><br/>") html2 = html2.replace("</b>", "</b><br/>") #detect return html2 def fixLinks(parent, link): tags = parent.findAll(True) for t in tags: if ("href" in t): t["href"] = urllib.parse.urljoin(link, t["href"]) if ("src" in t): t["src"] = urllib.parse.urljoin(link, t["src"]) def clean(top, tag, minWords=10000): tags = top.findAll(tag) for t in tags: if (t.renderContents().count(" ") < minWords): t.extract() def killDivs(parent): divs = parent.findAll("div") for d in divs: p = len(d.findAll("p")) img = len(d.findAll("img")) li = len(d.findAll("li")) a = len(d.findAll("a")) embed = len(d.findAll("embed")) pre = len(d.findAll("pre")) #code = len(d.findAll("code")) if (d.renderContents().count(",") < 10): if (pre == 0):# and (code == 0)): if ((img > p ) or (li > p) or (a > p) or (p == 0) or (embed > 0)): d.extract() def upgradeLink(link): link = link.encode("utf-8") if (not (link.startswith("http://news.ycombinator.com") or link.endswith(".pdf"))): linkFile = "upgraded/" + re.sub(PUNCTUATION, "_", link) if (os.path.exists(linkFile)): return open(linkFile).read() else: content = "" try: html = urllib.request.urlopen(link).read() content = grabContent(link, html) filp = open(linkFile, "w") filp.write(content) filp.close() except IOError: pass return content else: return "" def upgradeFeed(feedUrl): feedData = urllib.request.urlopen(feedUrl).read() upgradedLinks = [] parsedFeed = feedparser.parse(feedData) for entry in parsedFeed.entries: upgradedLinks.append((entry, upgradeLink(entry.link))) rss = """<rss version="2.0"> <channel> <title>Hacker News</title> <link>http://news.ycombinator.com/</link> <description>Links for the intellectually curious, ranked by readers.</description> """ for entry, content in upgradedLinks: rss += u""" <item> <title>%s</title> <link>%s</link> <comments>%s</comments> <description> <![CDATA[<a href="%s">Comments</a><br/>%s<br/><a href="%s">Comments</a>]]> </description> </item> """ % (entry.title, escape(entry.link), escape(entry.comments), entry.comments, content.decode("utf-8"), entry.comments) rss += """ </channel> </rss>""" return rss def clog(s): from time import gmtime, strftime s= str(s) print('\033[%93m'+strftime("%Y-%m-%d %H:%M:%S", gmtime())+": "+s+'\033[%0m') if __name__ == "__main__": c = open("usedforscoring.html", "r").read() soup = BeautifulSoup(grabContent('x', c)) clog(soup.prettify())
gpl-2.0
-2,107,189,339,879,482,400
30.965009
159
0.567667
false
2.371823
false
false
false
johnoleary/Farkel
game.py
1
4955
#################### ### Farkel Game #### #################### import dice import player dice_list = [dice.Dice() for i in range(6)] ### Set up players ### number_of_players = input("How many players? ") player_list = [] for i in range(number_of_players): player_list.append(player.Player(raw_input("What is player "+str(i+1)+"'s name?\n"))) print player_list[0].name ### Set up variables for game play ### game_over = False ########## ## GAME ## ########## def check_validity_of_selection(choosen_dice): if len(choosen_dice) == 0: return False for i in range(len(choosen_dice)): if choosen_dice[i] > 6 or choosen_dice[i] < 1: return False amounts_of_dice = [[0]*6,[0]*6] # First list is values of dice_list. Second is from choosen dice. for i in range(len(dice_list)): if not dice_list[i].set_aside: amounts_of_dice[0][dice_list[i].current_side-1] += 1 for i in range(len(choosen_dice)): amounts_of_dice[1][choosen_dice[i]-1] += 1 for i in range(6): if amounts_of_dice[0][i] < amounts_of_dice[1][i]: return False return True; ## This needs to be seriously flushed out def score_dice(choosen_dice): running_score = 0 amounts_of_dice = [0]*6 ## Stores amount of each side show. for i in range(len(choosen_dice)): amounts_of_dice[choosen_dice[i]-1] += 1 ## Check for special patterns groupsOfThree = 0; groupsOfTwo = 0; straight = 0; for i in range(6): if amounts_of_dice[i] == 3: groupsOfThree += 1 if amounts_of_dice[i] == 2: groupsOfTwo += 1 if amounts_of_dice[i] == 1: straight += 1; # Add this in later if groupsOfThree == 2: pass elif groupsOfTwo == 3: pass elif straight == 6: pass for i in range(6): if amounts_of_dice[i] == 6: if i == 0: running_score += 1000*2 else: running_score += (i+1)*100*2 amounts_of_dice[i] = amounts_of_dice[i] - 6 if amounts_of_dice[i] >= 3: if i == 0: running_score += 1000 else: running_score += (i+1)*100 amounts_of_dice[i] = amounts_of_dice[i] - 3 running_score += 100*amounts_of_dice[0] running_score += 50*amounts_of_dice[4] return running_score def choose_dice(dice_list): # Sets aside any dice that are pulled out. valid_entry = False while not valid_entry: tempList = raw_input("Which dice would you like to pull out? ") dice_to_keep = map(int, tempList.split()) valid_entry = check_validity_of_selection(dice_to_keep) if not valid_entry: print "I'm sorry, that is not a valid entry. Try again." # Going to need a way to check that the choosen dice can be legally choosen... # for i in range(len(dice_to_keep)): for j in range(len(dice_list)): if dice_list[j].current_side == dice_to_keep[i] and not dice_list[j].set_aside: dice_list[j].set_aside = True break return dice_to_keep def run_turn(player, starting_score): this_rounds_score = starting_score print player_list[player].name+"'s turn." print "\n=== Roll ===" dice_aside = 0 for i in range(6): if not dice_list[i].set_aside: print dice_list[i].roll() else: dice_aside += 1 if dice_aside > 0: print "--- Set Aside ---" for i in range(6): if dice_list[i].set_aside: print dice_list[i].current_side temp_score = score_dice(choose_dice(dice_list)) this_rounds_score += temp_score if temp_score > 0: dice_remaining = 0 for i in range(6): if not dice_list[i].set_aside: dice_remaining += 1 if dice_remaining == 0: print "You must roll again for removing all dice. Good luck!" for i in range(6): dice_list[i].set_aside = False run_turn(player, this_rounds_score) else: if 0 == input("Type 1 to continue rolling, type 0 to stop. "): player_list[player].current_score += this_rounds_score else: run_turn(player, this_rounds_score) else: print "That's unfortunate." current_player = 0 final_round = False final_round_countdown = number_of_players # Used to allow each player to get one more chance to play after someone gets 5000 ###### Main Loop ###### while not game_over: ## Loop for each turn run_turn(current_player, 0) print player_list[current_player].name+" has a total of "+str(player_list[current_player].current_score)+" points." ## Check if a player has enough points to win if player_list[current_player].current_score >= 5000: final_round = True ## Prepare everything for the next iteration if final_round: final_round_countdown -= 1 if final_round_countdown <= 0: game_over = True current_player += 1 current_player = current_player % number_of_players ## To cycle through the players for i in range(6): dice_list[i].set_aside = False ### Finish the Game ### winning_player_idx = -1 top_score = 0 for i in range(number_of_players): if player_list[i].current_score > top_score: winning_player_idx = i top_score = player_list[i].current_score # Need to figure out how to handle a tie print player_list[winning_player_idx].name+" wins with "+str(top_score)+" points!"
mit
8,157,945,392,773,263,000
24.152284
124
0.655298
false
2.703219
false
false
false