repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
| prefix
stringlengths 0
8.16k
| middle
stringlengths 3
512
| suffix
stringlengths 0
8.17k
|
|---|---|---|---|---|---|---|---|---|
happz/ducky
|
ducky/asm/lexer.py
|
Python
|
mit
| 2,836
| 0.012694
|
import ply.lex
#
# Lexer setup
#
instructions = (
'NOP', 'INT', 'IPI', 'RETINT', 'CALL', 'RET', 'CLI', 'STI', 'HLT', 'RST', 'IDLE',
'PUSH', 'POP', 'INC', 'DEC', 'ADD', 'SUB', 'CMP', 'J', 'AND', 'OR', 'XOR', 'NOT',
'SHL', 'SHR', 'SHRS', 'LW', 'LS', 'LB', 'LI', 'LIU', 'LA', 'STW', 'STS', 'STB',
'MOV', 'SWP', 'MUL', 'UDIV', 'MOD', 'CMPU', 'CAS', 'SIS', 'DIV',
'BE', 'BNE', 'BS', 'BNS', 'BZ', 'BNZ', 'BO', 'BNO', "BL", "BLE", "BGE", "BG",
'SETE', 'SETNE', 'SETZ', 'SETNZ', 'SETO', 'SETNO', 'SETS', 'SETNS', "SETL", "SETLE", "SETGE", "SETG",
'SELE', 'SELNE', 'SELZ', 'SELNZ', 'SELS', 'SELNS', 'SELO', 'SELNO', "SELL", "SELLE", "SELGE", "SELG",
'LPM', 'CTR', 'CTW', 'FPTC'
)
math_instructions = (
'PUSHW', 'SAVEW', 'POPW', 'LOADW', 'POPUW', 'LOADUW', 'SAVE', 'LOAD',
'INCL', 'DECL', 'ADDL', 'MULL', 'DIVL', 'MODL', 'UDIVL', 'UMODL',
'DUP', 'DUP2', 'SWPL', 'DROP', 'SYMDIVL', 'SYMMODL',
'PUSHL', 'POPL'
)
directives = (
'data', 'text',
'type', 'global',
'ascii', 'byte', 'short', 'space', 'string', 'word',
'section',
'align', 'file',
'set'
)
# Construct list of tokens, and map of reserved words
tokens = instructions + math_instructions + (
'COMMA', 'COLON', 'HASH', 'LBRAC', 'RBRAC', 'DOT', 'PLUS',
'SCONST', 'ICONST',
'ID', 'REGISTER'
)
reserved_map = {
# Special registers
'sp': 'REGISTER',
'fp': 'REGISTER',
# Special instructions
'shiftl': 'SHL',
'shiftr': 'SHR',
'shiftrs': 'SHRS'
}
reserved_map.update({i.lower(): i for i in instructions})
reserved_map.update({i.lower(): i for i in math_instructions})
tokens = tokens + tuple([i.upper() for i in directives])
reserved_map.update({'.' + i: i.upper() for i in directives})
reserved_map.update({i: i.upper() for i in directives})
reserved_map.update({'r%d' % i: 'REGISTER' for i in range(0, 32)})
# Newlines
def t_NEWLINE(t):
r'\n+'
t.lexer.lineno += t.value.count('\n')
# Tokens
t_COMMA = r','
t_COLON = r':'
t_HASH = r'\#'
t_LBRAC = r'\['
t_RBRAC = r'\]'
t_DOT = r'\.'
t_PLUS = r'\+'
t_SCONST = r'\"([^\\\n]|(\\.))*?\"'
t_ICONST = r'-?(?:(?:0x[0-9a-fA-F][0-9a-fA-F]*)|(?:[0-9][0-9]*))'
def t_ID(t):
r'[a-zA-Z_\.][a-zA-Z0-9_\.]*'
t.type = reserved_map.get(t.value, 'ID')
return t
t_ignore = " \t"
def t_error(t):
from ..errors import AssemblyIllegalCharError
loc = t.lexer.location.copy()
loc.lineno = t.lineno - loc.lineno
loc.column = t.lexer.parser.lexpos_to_lineno(
|
t.lexpos)
raise AssemblyIllegalCharError(c = t.value[0], location = loc, line = t.lexer.parser.lineno_to_line(t.lineno))
class AssemblyLexer(object):
def __init__(self):
self._lexer = ply.lex.lex()
def token(self, *args, **kwa
|
rgs):
return self._lexer.token(*args, **kwargs)
def input(self, *args, **kwargs):
return self._lexer.input(*args, **kwargs)
|
blackspiraldev/jira-python
|
jira/packages/requests_oauth/__init__.py
|
Python
|
bsd-2-clause
| 235
| 0
|
# requests-oauth 0.4.0
# Hacked to
|
support RSA-SHA1 encryption for Atlassian OAuth.
# Original author: Miguel Araujo
# Forked from https://github.com/maraujop/requests_oauth
# Original license: 3-clause BSD
from hook impo
|
rt OAuthHook
|
firebitsbr/pwn_plug_sources
|
src/wifizoo/wifiglobals.py
|
Python
|
gpl-3.0
| 8,025
| 0.04486
|
# WifiZoo
# complains to Hernan Ochoa (hernan@gmail.com)
import curses.ascii
from scapy import *
import datetime
import WifiZooEntities
import os
class WifiGlobals:
def __init__(self):
self.APdict = {}
self.AccessPointsList = []
self.ProbeRequestsListBySSID = []
self.ProbeRequestsListBySRC = []
self._hasPrismHeaders = 1
self._logdir = "./logs/"
self._Cookie = None
self._CookiesList = []
self._pktCounters = {}
self._OUIList = ''
def incrementCounter(self, proto):
if self._pktCounters.has_key(proto):
n = self._pktCounters[proto]
self._pktCounters[proto] = n+1
else:
self._pktCounters[proto] = 1
def getCounter(self, proto):
return self._pktCounters[proto]
def getAllCounters(self):
return self._pktCounters
def getMACVendor(self, aMAC):
if len(aMAC) < 8:
return 'Unknown'
if self._OUIList == '':
f = open("oui_list.txt", "rb")
self._OUIList = f.read()
f.close()
lines = self._OUIList.split('\n')
myMAC = aMAC[0:8]
myMAC = myMAC.lower()
for line in lines:
if len(line) > 8:
vendorname = line.split('\t')[2]
vendormac = line[0:8].
|
replace('-',':').lower()
if vendormac == myMAC:
return vendorname
return 'Unknown'
def addCookie(self, aCookie):
self._CookiesList.append(aCookie)
return
def getCookiesList(self):
return self._Cook
|
iesList
def setCookie(self, aCookie):
self._Cookie = aCookie
def getCookie(self):
return self._Cookie
def setHasPrismHeaders(self, aBoolean):
self._hasPrismHeaders = aBoolean
def hasPrismHeaders(self):
return self._hasPrismHeaders
def getClients(self):
return self.APdict
def logDir(self):
if not os.path.isdir( self._logdir ):
os.mkdir( self._logdir )
return "./logs/"
def getProbeRequestsBySSID(self):
return self.ProbeRequestsListBySSID
def addProbeRequest(self, aProbeRequest):
# add ProbeRequest by SSID
found = 0
if len(aProbeRequest.getSSID()) > 0:
for pr in self.ProbeRequestsListBySSID:
if pr.getSSID() == aProbeRequest.getSSID():
# TODO: change the LASTSEEN thing
found = 1
# if SSID was not seen before, add it to the list
if len(aProbeRequest.getSSID()) > 0 and found == 0:
self.ProbeRequestsListBySSID.append( aProbeRequest )
#add ProbeRequest by SRC
if len(aProbeRequest.getSSID()) == 0:
for pr in self.ProbeRequestsListBySRC:
if aProbeRequest.getSRC() == pr.getSRC() and pr.getSSID() == "<Empty>":
return
aProbeRequest.setSSID( "<Empty>" )
self.ProbeRequestsListBySRC.append( aProbeRequest )
return
else:
for pr in self.ProbeRequestsListBySRC:
if pr.getSRC() == aProbeRequest.getSRC() and pr.getSSID() == aProbeRequest.getSSID():
return
# add proberequests with different src or ssid
self.ProbeRequestsListBySRC.append( aProbeRequest )
def dumpProbeRequests(self):
if len(self.ProbeRequestsListBySSID) >= 1:
prf = open( self.logDir() + "probereqsuniqssid.log", "wb" )
for pr in self.ProbeRequestsListBySSID:
prf.write("ssid=" + pr.getSSID() + " dst=" + pr.getDST() + " src=" + pr.getSRC() + " bssid=" + pr.getBSSID() + " (ch: " + str(pr.getChannel()) + ")" + "\n")
prf.close()
if len(self.ProbeRequestsListBySRC) >= 1:
prf = open( self.logDir() + "probereqbysrc.log", "wb" )
setup = """
digraph ProbeReqGraph {
compound=true;
ranksep=1.25;
rankdir="LR";
label="Probe Requests by SRC and SSID";
node [shape=ellipse, fontsize=12];
bgcolor=white;
edge[arrowsize=1, color=black];
"""
prf.write(setup + "\n\n")
for pr in self.ProbeRequestsListBySRC:
prf.write( "\"" + pr.getSSID() + "\"" + " -> " + "\"" + pr.getSRC() + "\"" + "\r\n" )
prf.write("}\n\n")
prf.close()
def getAPList(self):
return self.AccessPointsList
def getAPbyBSSID(self, aBSSID):
for ap in self.AccessPointsList:
if ap.getBSSID() == aBSSID:
return ap
return None
def addAccessPoint(self, bssid, ssid, channel, isprotected):
apFound = 0
for ap in self.AccessPointsList:
if ap.getBSSID() == bssid:
apFound = 1
# could modify this to 'update' SSID of bssid, but mmm
if apFound == 1:
return 0
anAP = WifiZooEntities.AccessPoint()
anAP.setBSSID( bssid )
anAP.setSSID( ssid )
anAP.setChannel( channel )
anAP.setProtected( isprotected )
# I assume it was found NOW, right before this function was called
anAP.setFoundWhen( datetime.datetime.now() )
self.AccessPointsList.append(anAP)
return 1
def dumpAccessPointsList(self, outfile='ssids.log'):
if len(self.AccessPointsList) < 1:
return
sf = open( self.logDir() + outfile , "wb" )
# first dump OPEN networks
for ap in self.AccessPointsList:
if not ap.isProtected():
sf.write( str(ap.getBSSID()) + " -> " + str(ap.getSSID()) + " (ch:" + str(ap.getChannel()) + ")" + " (Encryption:Open)" + " (when: " + str(ap.getFoundWhenString()) + ")" + "\n" )
# now protected networks
for ap in self.AccessPointsList:
if ap.isProtected():
sf.write( str(ap.getBSSID()) + " -> " + str(ap.getSSID()) + " (ch:" + str(ap.getChannel()) + ")" + " (Encryption:YES)" + " (when: " + str(ap.getFoundWhenString()) + ")" + "\n" )
sf.close()
return
def addClients(self, src, dst, bssid):
bssidfound = 0
dump = 0
for x in self.APdict.keys():
if x == bssid:
bssidfound = 1
clientList = self.APdict[ x ]
srcFound = 0
dstFound = 0
for client in clientList:
if client == src:
srcFound = 1
if client == dst:
dstFound = 1
if srcFound == 0:
if src != "ff:ff:ff:ff:ff:ff" and src != bssid:
dump = 1
clientList.append(src)
if dstFound == 0:
if dst != "ff:ff:ff:ff:ff:ff" and dst != bssid:
dump = 1
clientList.append(dst)
self.APdict[ x ] = clientList
if bssidfound == 0:
alist = []
if src != 'ff:ff:ff:ff:ff:ff' and src != bssid:
dump = 1
alist.append( src )
if dst != 'ff:ff:ff:ff:ff:ff' and src != dst and dst != bssid:
dump = 1
alist.append( dst )
self.APdict[ bssid ] = alist
# add this 'nameless' bssid also to the list of access points
#self.addAccessPoint(bssid, '<addedbyClient>', 0, 0)
if dump == 1:
fdump = open(self.logDir()+"clients.log", "wb")
#fdump.write("--DUMP-----" + "-"*30 + "\n")
fdump.write("digraph APgraph {\n\n")
setup = """
compound=true;
ranksep=1.25;
rankdir="LR";
label="802.11 bssids->clients";
node [shape=ellipse, fontsize=12];
bgcolor=white;
edge[arrowsize=1, color=black];
"""
fdump.write(setup + "\n\n")
for apmac in self.APdict.keys():
clientList = self.APdict[ apmac ]
for client in clientList:
#fdump.write("\"" + apmac + "\" -> \"" + client + "\"\n")
ssid = self.getSSID(apmac)
fdump.write("\"" + apmac + " (" + ssid + ")\" -> \"" + client + "\"\n")
fdump.write("\n }\n")
#fdump.write("-----------" + "-"*30 + "\n")
fdump.close()
def getSSID(self, bssid):
aSsid = 'Unknown'
for ap in self.AccessPointsList:
if ap.getBSSID() == bssid:
aSsid = ap.getSSID()
return aSsid
# my weird version
def isAlpha(self, c):
if c != '\x0A' and c != '\x0D':
if curses.ascii.isctrl(c):
return 0
return 1
def getSrcDstBssid(self, pkt):
bssid = ''
src = ''
dst = ''
#0 = mgmt, 1=control, 2=data
p = pkt.getlayer(Dot11)
# is it a DATA packet?
t = p.type
if t == 2:
# if packet FROMDS then dst,bssid,src
# if packet TODS then bssid,src,dst
# toDS
if p.FCfield & 1:
#print "toDS"
bssid = str(p.addr1)
src = str(p.addr2)
dst = str(p.addr3)
# fromDS
elif p.FCfield & 2:
#print "fromDS"
dst = str(p.addr1)
bssid = str(p.addr2)
src = str(p.addr3)
# if bits are 0 & 0, thn ad-hoc network
# if bits are 1 & 1, then WDS system
# TODO
return (src,dst,bssid)
Info = WifiGlobals()
|
deerwalk/voltdb
|
tests/scripts/valleak.py
|
Python
|
agpl-3.0
| 3,161
| 0.003796
|
#!/usr/bin/env python
# This file is part of VoltDB.
# Copyright (C) 2008-2017 VoltDB Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
# Exit with an error code if Valgrind finds "definitely lost" memory.
import os
import subprocess
import sys
import tempfile
VALGRIND = "valgrind"
def valleak(executable):
"""Returns (error, stdout, stderr).
error == 0 if successful, an integer > 0 if there are memory leaks or errors."""
valgrind_output = tempfile.NamedTemporaryFile()
valgrind_command = (
VALGRIND,
"--leak-check=full",
"--log-file-exactly=" + valgrind_output.name,
"--error-exitcode=1",
executable)
process = subprocess.Popen(
valgrind_command,
bufsize = -1,
stdin = subprocess.PIPE,
stdout = subprocess.PIPE,
stderr = subprocess.PIPE,
close_fds = True)
process.stdin.close()
stdout = process.stdout.read()
stderr = process.stderr.read()
error = process.wait()
valgrind_error_file = open(valgrind_output.name
|
)
valgrind_error = valgrind_error_file.read()
valgrind_error_file.close()
valgrind_output.close()
# Find the last summary
|
block in the valgrind report
# This ignores forks
summary_start = valgrind_error.rindex("== ERROR SUMMARY:")
summary = valgrind_error[summary_start:]
append_valgrind = False
if error == 0:
assert "== ERROR SUMMARY: 0 errors" in summary
# Check for memory leaks
if "== definitely lost:" in summary:
error = 1
append_valgrind = True
elif "== ERROR SUMMARY: 0 errors" not in summary:
# We also have valgrind errors: append the log to stderr
append_valgrind = True
if append_valgrind:
stderr = stderr + "\n\n" + valgrind_error
return (error, stdout, stderr)
if __name__ == "__main__":
if len(sys.argv) != 2:
sys.stderr.write("valleak.py [executable]\n")
sys.exit(1)
exe = sys.argv[1]
error, stdin, stderr = valleak(exe)
sys.stdout.write(stdin)
sys.stderr.write(stderr)
sys.exit(error)
|
liorvh/CuckooSploit
|
lib/cuckoo/core/scheduler.py
|
Python
|
gpl-3.0
| 21,948
| 0.000456
|
# Copyright (C) 2010-2015 Cuckoo Foundation.
# This file is part of Cuckoo Sandbox - http://www.cuckoosandbox.org
# See the file 'docs/LICENSE' for copying permission.
import os
import time
import shutil
import logging
import Queue
from threading import Thread, Lock
from lib.cuckoo.common.config import Config
from lib.cuckoo.common.constants import CUCKOO_ROOT
from lib.cuckoo.common.exceptions import CuckooMachineError, CuckooGuestError
from lib.cuckoo.common.exceptions import CuckooOperationalError
from lib.cuckoo.common.exceptions import CuckooCriticalError
from lib.cuckoo.common.objects import File
from lib.cuckoo.common.utils import create_folder
from lib.cuckoo.core.database import Database, TASK_COMPLETED, TASK_REPORTED
from lib.cuckoo.core.guest import GuestManager
from lib.cuckoo.core.plugins import list_plugins, RunAuxiliary, RunProcessing
from lib.cuckoo.core.plugins import RunSignature
|
s, R
|
unReporting
from lib.cuckoo.core.resultserver import ResultServer
log = logging.getLogger(__name__)
machinery = None
machine_lock = Lock()
latest_symlink_lock = Lock()
active_analysis_count = 0
class CuckooDeadMachine(Exception):
"""Exception thrown when a machine turns dead.
When this exception has been thrown, the analysis task will start again,
and will try to use another machine, when available.
"""
pass
class AnalysisManager(Thread):
"""Analysis Manager.
This class handles the full analysis process for a given task. It takes
care of selecting the analysis machine, preparing the configuration and
interacting with the guest agent and analyzer components to launch and
complete the analysis and store, process and report its results.
"""
def __init__(self, task, error_queue):
"""@param task: task object containing the details for the analysis."""
Thread.__init__(self)
Thread.daemon = True
self.task = task
self.errors = error_queue
self.cfg = Config()
self.storage = ""
self.binary = ""
self.machine = None
def init_storage(self):
"""Initialize analysis storage folder."""
self.storage = os.path.join(CUCKOO_ROOT,
"storage",
"analyses",
str(self.task.id))
# If the analysis storage folder already exists, we need to abort the
# analysis or previous results will be overwritten and lost.
if os.path.exists(self.storage):
log.error("Analysis results folder already exists at path \"%s\","
" analysis aborted", self.storage)
return False
# If we're not able to create the analysis storage folder, we have to
# abort the analysis.
try:
create_folder(folder=self.storage)
except CuckooOperationalError:
log.error("Unable to create analysis folder %s", self.storage)
return False
return True
def check_file(self):
"""Checks the integrity of the file to be analyzed."""
sample = Database().view_sample(self.task.sample_id)
sha256 = File(self.task.target).get_sha256()
if sha256 != sample.sha256:
log.error("Target file has been modified after submission: \"%s\"", self.task.target)
return False
return True
def store_file(self):
"""Store a copy of the file being analyzed."""
if not os.path.exists(self.task.target):
log.error("The file to analyze does not exist at path \"%s\", "
"analysis aborted", self.task.target)
return False
sha256 = File(self.task.target).get_sha256()
self.binary = os.path.join(CUCKOO_ROOT, "storage", "binaries", sha256)
if os.path.exists(self.binary):
log.info("File already exists at \"%s\"", self.binary)
else:
# TODO: do we really need to abort the analysis in case we are not
# able to store a copy of the file?
try:
shutil.copy(self.task.target, self.binary)
except (IOError, shutil.Error) as e:
log.error("Unable to store file from \"%s\" to \"%s\", "
"analysis aborted", self.task.target, self.binary)
return False
try:
new_binary_path = os.path.join(self.storage, "binary")
if hasattr(os, "symlink"):
os.symlink(self.binary, new_binary_path)
else:
shutil.copy(self.binary, new_binary_path)
except (AttributeError, OSError) as e:
log.error("Unable to create symlink/copy from \"%s\" to "
"\"%s\": %s", self.binary, self.storage, e)
return True
def acquire_machine(self):
"""Acquire an analysis machine from the pool of available ones."""
machine = None
# Start a loop to acquire the a machine to run the analysis on.
while True:
machine_lock.acquire()
# In some cases it's possible that we enter this loop without
# having any available machines. We should make sure this is not
# such case, or the analysis task will fail completely.
if not machinery.availables():
machine_lock.release()
time.sleep(1)
continue
# If the user specified a specific machine ID, a platform to be
# used or machine tags acquire the machine accordingly.
try:
machine = machinery.acquire(machine_id=self.task.machine,
platform=self.task.platform,
tags=self.task.tags)
finally:
machine_lock.release()
# If no machine is available at this moment, wait for one second
# and try again.
if not machine:
log.debug("Task #%d: no machine available yet", self.task.id)
time.sleep(1)
else:
log.info("Task #%d: acquired machine %s (label=%s)",
self.task.id, machine.name, machine.label)
break
self.machine = machine
def build_options(self):
"""Generate analysis options.
@return: options dict.
"""
options = {}
options["id"] = self.task.id
options["ip"] = self.machine.resultserver_ip
options["port"] = self.machine.resultserver_port
options["category"] = self.task.category
options["target"] = self.task.target
options["package"] = self.task.package
options["options"] = self.task.options
options["enforce_timeout"] = self.task.enforce_timeout
options["clock"] = self.task.clock
options["terminate_processes"] = self.cfg.cuckoo.terminate_processes
if not self.task.timeout or self.task.timeout == 0:
options["timeout"] = self.cfg.timeouts.default
else:
options["timeout"] = self.task.timeout
if self.task.category == "file":
options["file_name"] = File(self.task.target).get_name()
options["file_type"] = File(self.task.target).get_type()
return options
def launch_analysis(self):
"""Start analysis."""
succeeded = False
dead_machine = False
log.info("Starting analysis of %s \"%s\" (task=%d)",
self.task.category.upper(), self.task.target, self.task.id)
# Initialize the analysis folders.
if not self.init_storage():
return False
if self.task.category == "file":
# Check whether the file has been changed for some unknown reason.
# And fail this analysis if it has been modified.
if not self.check_file():
return False
# Store a copy of the original file.
if not self.store_file():
return False
# Acquire analysis machine.
try:
self.
|
EdDev/vdsm
|
tests/osinfo_test.py
|
Python
|
gpl-2.0
| 1,439
| 0
|
#
# Copyright 2016 Red Hat, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# Refer to the README and COPYING files for full details of the license
#
from __future__ import absolute_import
import tempfile
from testlib import V
|
dsmTestCase
from testlib import permutations, expandPermutations
from vdsm import osinfo
@expandPermutations
class TestOsinfo(VdsmTestCase):
@permutations([
[b'', ''],
[b'\n', ''],
[b'a', 'a'],
[b'a\n', 'a'],
[b'a\nb', 'a']
])
def test_kernel_args(self, test_input, expected_result):
with tempfile.NamedTemporaryFile() as f:
f.write(test_input)
f.flush()
self.assertEqual(osinfo.kernel_arg
|
s(f.name),
expected_result)
|
USGSDenverPychron/pychron
|
pychron/envisage/tasks/actions.py
|
Python
|
apache-2.0
| 14,692
| 0.000204
|
# ===============================================================================
# Copyright 2013 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
import os
import sys
from pyface.action.action import Action
from pyface.confirmation_dialog import confirm
from pyface.constant import YES
from pyface.message_dialog import information
from pyface.tasks.action.task_action import TaskAction
from pyface.tasks.task_window_layout import TaskWindowLayout
from traits.api import Any, List
from pychron.envisage.resources import icon
from pychron.envisage.ui_actions import UIAction, UITaskAction
# ===============================================================================
# help
# ===============================================================================
# from pychron.envisage.user_login import login_file
def restart():
os.execl(sys.executable, *([sys.executable] + sys.argv))
def get_key_binding(k_id):
from pychron.envisage.key_bindings import user_key_map
try:
return user_key_map[k_id][0]
except KeyError:
pass
class myTaskAction(TaskAction):
task_ids = List
def _task_changed(self):
if self.task:
if self.task.id in self.task_ids:
enabled = True
if self.enabled_name:
if self.object:
enabled = bool(
self._get_attr(self.object, self.enabled_name, False)
)
if enabled:
self._enabled = True
else:
self._enabled = False
def _enabled_update(self):
"""
reimplement ListeningAction's _enabled_update
"""
if self.enabled_name:
if self.object:
self.enabled = bool(
self._get_attr(self.object, self.enabled_name, False)
)
else:
self.enabled = False
elif self._enabled is not None:
self.enabled = self._enabled
else:
self.enabled = bool(self.object)
class PAction(UIAction):
def __init__(self, *args, **kw):
super(PAction, self).__init__(*args, **kw)
acc = get_key_binding(self.id)
self.accelerator = acc or self.accelerator
class PTaskAction(UITaskAction):
def __init__(self, *args, **kw):
super(PTaskAction, self).__init__(*args, **kw)
acc = get_key_binding(self.id)
self.accelerator = acc or self.accelerator
class DemoAction(Action):
name = "Demo"
accelerator = "Shift+Ctrl+0"
def perform(self, event):
app = event.task.application
app.info("Demo message: {}".format("Hello version 2.0"))
class StartupTestsAction(Action):
name = "Run Startup Tests"
def perform(self, event):
app = event.task.application
app.do_startup_tests(
force_show_results=True, cancel_auto_close=True, can_cancel=False
)
class KeyBindingsAction(PAction):
name = "Edit Key Bindings"
def perform(self, event):
from pychron.envisage.key_bindings import edit_key_bindings
edit_key_bindings()
class UserAction(PAction):
def _get_current_user(self, event):
app = event.task.application
args = app.id.split(".")
cuser = args[-1]
base_id = ".".join(args[:-1])
return base_id, cuser
class SwitchUserAction(UserAction):
name = "Switch User"
image = icon("user_suit"
|
)
def perform(self, event):
pass
# from pychron.envisage.user_login import get_user
#
# base_id, cuser = self._get_current_user(event)
# user = get_user(current=cuser)
# if user:
# # from pychron.paths import paths
# # set login file
# with open(login_file, 'w') as wfile:
# wfile.write(user)
# restart()
class CopyPreferencesAction(UserAction):
name = "Copy Preferences"
def perform(
|
self, event):
pass
# from pychron.envisage.user_login import get_src_dest_user
#
# base_id, cuser = self._get_current_user(event)
# src_name, dest_names = get_src_dest_user(cuser)
#
# if src_name:
#
# for di in dest_names:
# dest_id = '{}.{}'.format(base_id, di)
# src_id = '{}.{}'.format(base_id, src_name)
#
# root = os.path.join(os.path.expanduser('~'), '.enthought')
#
# src_dir = os.path.join(root, src_id)
# dest_dir = os.path.join(root, dest_id)
# if not os.path.isdir(dest_dir):
# os.mkdir(dest_dir)
#
# name = 'preferences.ini'
# dest = os.path.join(dest_dir, name)
# src = os.path.join(src_dir, name)
# shutil.copyfile(src, dest)
class RestartAction(PAction):
name = "Restart"
image = icon("system-restart")
def perform(self, event):
restart()
class WebAction(PAction):
def _open_url(self, url):
import webbrowser
import requests
try:
requests.get(url)
except BaseException as e:
print("web action url:{} exception:{}".format(url, e))
return
webbrowser.open_new(url)
return True
class IssueAction(WebAction):
name = "Add Request/Report Bug"
image = icon("bug")
def perform(self, event):
"""
goto issues page add an request or report bug
"""
app = event.task.window.application
name = app.preferences.get("pychron.general.organization")
if not name:
information(
event.task.window.control,
'Please set an "Organziation" in General Preferences',
)
return
url = "https://github.com/{}/pychron/issues/new".format(name)
self._open_url(url)
class SettingsAction(Action):
def perform(self, event):
app = event.task.window.application
name = app.preferences.get("pychron.general.remote")
if not name:
information(
event.task.window.control,
'Please set an "Laboratory Repo" in General Preferences',
)
return
from pychron.envisage.settings_repo import SettingsRepoManager
from pychron.paths import paths
root = os.path.join(paths.root_dir, ".lab")
exists = os.path.isdir(os.path.join(root, ".git"))
if exists:
repo = SettingsRepoManager()
repo.path = root
repo.open_repo(root)
repo.pull()
else:
url = "https://github.com/{}".format(name)
repo = SettingsRepoManager.clone_from(url, root)
self._perform(repo)
def _perform(self, repo):
raise NotImplementedError
class ApplySettingsAction(SettingsAction):
name = "Apply Settings..."
def _perform(self, repo):
"""
select and apply settings from the laboratory's repository
:param repo:
:return:
"""
repo.apply_settings()
class ShareSettingsAction(SettingsAction):
name = "Share Settings..."
def _perform(self, repo):
"""
save current settings to the laboratory's repository
:param repo:
:return:
"""
repo.share_settings()
class NoteAction(WebAction):
name = "Add
|
cemsbr/expyrimenter
|
tests/test_executor.py
|
Python
|
gpl-3.0
| 4,989
| 0.0002
|
import unittest
from mock import Mock, patch
from expyrimenter import Executor
from expyrimenter.runnable import Runnable
from subprocess import CalledProcessError
from concurrent.futures import ThreadPoolExecutor
import re
class TestExecutor(unittest.TestCase):
output = 'TestExecutor output'
outputs = ['TestExecutor 1', 'TestExecutor 2']
def test_runnable_output(self):
executor = Executor()
with patch.object(Runnable, 'run', return_value=TestExecutor.output):
executor.run(Runnable())
executor.wait()
results = executor.results
self.assertEqual(1, len(results))
self.assertEqual(TestExecutor.output, results[0])
def test_runnable_outputs(self):
executor = Executor()
runnable = Runnable()
with patch.object(Runnable, 'run', side_effect=TestExecutor.outputs):
executor.run(runnable)
executor.run(runnable)
executor.wait()
results = executor.results
self.assertListEqual(TestExecutor.outputs, results)
def test_function_output(self):
executor = Executor()
executor.run_function(background_function)
executor.wait()
output = executor.results[0]
self.assertEqual(TestExecutor.output, output)
def test_function_outputs(self):
executor = Executor()
runnable = Runnable()
with patch.object(Runnable, 'run', side_effect=TestExecutor.outputs):
executor.run(runnable)
executor.run(runnable)
executor.wait()
results = executor.results
self.assertListEqual(TestExecutor.outputs, r
|
esults)
def test_against_runnable_memory_leak(self):
executor = Executor()
|
with patch.object(Runnable, 'run'):
executor.run(Runnable())
executor.wait()
self.assertEqual(0, len(executor._future_runnables))
def test_against_function_memory_leak(self):
executor = Executor()
executor.run_function(background_function)
executor.wait()
self.assertEqual(0, len(executor._function_titles))
def test_if_shutdown_shutdowns_executor(self):
executor = Executor()
executor._executor = Mock()
executor.shutdown()
executor._executor.shutdown.called_once_with()
def test_if_shutdown_clears_function_resources(self):
executor = Executor()
executor._function_titles = Mock()
executor.shutdown()
executor._function_titles.clear.assert_called_once_with()
def test_if_shutdown_clears_runnable_resources(self):
executor = Executor()
executor._future_runnables = Mock()
executor.shutdown()
executor._future_runnables.clear.assert_called_once_with()
def test_exception_logging(self):
executor = Executor()
executor._log = Mock()
with patch.object(Runnable, 'run', side_effect=Exception):
executor.run(Runnable)
executor.wait()
self.assertEqual(1, executor._log.error.call_count)
@patch.object(ThreadPoolExecutor, '__init__', return_value=None)
def test_specified_max_workers(self, pool_mock):
max = 42
Executor(max)
pool_mock.assert_called_once_with(42)
def test_calledprocesserror_logging(self):
executor = Executor()
executor._log = Mock()
exception = CalledProcessError(returncode=1, cmd='command')
with patch.object(Runnable, 'run', side_effect=exception):
executor.run(Runnable)
executor.wait()
self.assertEqual(1, executor._log.error.call_count)
def test_if_logged_title_is_hidden_if_it_equals_command(self):
command = 'command'
runnable = Runnable()
runnable.title = command
exception = CalledProcessError(returncode=1, cmd=command)
runnable.run = Mock(side_effect=exception)
executor = Executor()
executor._log = Mock()
executor.run(runnable)
executor.wait()
executor._log.error.assert_called_once_with(Matcher(has_not_title))
def test_logged_title_when_it_differs_from_command(self):
command, title = 'command', 'title'
runnable = Runnable()
runnable.title = title
exception = CalledProcessError(returncode=1, cmd=command)
runnable.run = Mock(side_effect=exception)
executor = Executor()
executor._log = Mock()
executor.run(runnable)
executor.wait()
executor._log.error.assert_called_once_with(Matcher(has_title))
def has_title(msg):
return re.match("(?ims).*Title", msg) is not None
def has_not_title(msg):
return re.match("(?ims).*Title", msg) is None
class Matcher:
def __init__(self, compare):
self.compare = compare
def __eq__(self, msg):
return self.compare(msg)
def background_function():
return TestExecutor.output
if __name__ == '__main__':
unittest.main()
|
wooey/Wooey
|
wooey/tasks.py
|
Python
|
bsd-3-clause
| 10,721
| 0.002332
|
from __future__ import absolute_import
import os
import subprocess
import sys
import tarfile
import tempfile
import traceback
import zipfile
from threading import Thread
import six
from django.utils.text import get_valid_filename
from django.core.files import File
from django.conf import settings
from celery import Task
from celery import app
from celery.schedules import crontab
from celery.signals import worker_process_init
from .backend import utils
from . import settings as wooey_settings
try:
from Queue import Empty, Queue
except ImportError:
from queue import Empty, Queue # python 3.x
ON_POSIX = 'posix' in sys.builtin_module_names
celery_app = app.app_or_default()
def enqueue_output(out, q):
for line in iter(out.readline, b''):
q.put(line.decode('utf-8'))
try:
out.close()
except IOError:
pass
def output_monitor_queue(queue, out):
p = Thread(target=enqueue_output, args=(out, queue))
p.start()
return p
def update_from_output_queue(queue, out):
lines = []
while True:
try:
line = queue.get_nowait()
lines.append(line)
except Empty:
break
out += ''.join(map(str, lines))
return out
@worker_process_init.connect
def configure_workers(*args, **kwargs):
# this sets up Django on nodes started by the worker daemon.
import django
django.setup()
class WooeyTask(Task):
pass
# def after_return(self, status, retval, task_id, args, kwargs, einfo):
# job, created = WooeyJob.objects.get_or_create(wooey_celery_id=task_id)
# job.content_type.wooey_celery_state = status
# job.save()
def get_latest_script(script_version):
"""Downloads the latest script version to the local storage.
|
:param script_version: :py:class:`~wooey.models.core.ScriptVersion`
:return: boolean
Returns true if a new version was downloaded.
"""
script_path = script_version.script_path
local_storage = utils.get_storage(local=True)
script_exists = local_storage.exists(script_path.name)
|
if not script_exists:
local_storage.save(script_path.name, script_path.file)
return True
else:
# If script exists, make sure the version is valid, otherwise fetch a new one
script_contents = local_storage.open(script_path.name).read()
script_checksum = utils.get_checksum(buff=script_contents)
if script_checksum != script_version.checksum:
tf = tempfile.TemporaryFile()
with tf:
tf.write(script_contents)
tf.seek(0)
local_storage.delete(script_path.name)
local_storage.save(script_path.name, tf)
return True
return False
@celery_app.task(base=WooeyTask)
def submit_script(**kwargs):
job_id = kwargs.pop('wooey_job')
resubmit = kwargs.pop('wooey_resubmit', False)
from .models import WooeyJob, UserFile
job = WooeyJob.objects.get(pk=job_id)
stdout, stderr = '', ''
try:
command = utils.get_job_commands(job=job)
if resubmit:
# clone ourselves, setting pk=None seems hackish but it works
job.pk = None
# This is where the script works from -- it is what is after the media_root since that may change between
# setups/where our user uploads are stored.
cwd = job.get_output_path()
abscwd = os.path.abspath(os.path.join(settings.MEDIA_ROOT, cwd))
job.command = ' '.join(command)
job.save_path = cwd
utils.mkdirs(abscwd)
# make sure we have the script, otherwise download it. This can happen if we have an ephemeral file system or are
# executing jobs on a worker node.
get_latest_script(job.script_version)
job.status = WooeyJob.RUNNING
job.save()
proc = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=abscwd, bufsize=0)
# We need to use subprocesses to capture the IO, otherwise they will block one another
# i.e. a check against stderr will sit waiting on stderr before returning
# we use Queues to communicate
qout, qerr = Queue(), Queue()
pout = output_monitor_queue(qout, proc.stdout)
perr = output_monitor_queue(qerr, proc.stderr)
prev_std = None
def check_output(job, stdout, stderr, prev_std):
# Check for updates from either (non-blocking)
stdout = update_from_output_queue(qout, stdout)
stderr = update_from_output_queue(qerr, stderr)
# If there are changes, update the db
if (stdout, stderr) != prev_std:
job.update_realtime(stdout=stdout, stderr=stderr)
prev_std = (stdout, stderr)
return stdout, stderr, prev_std
# Loop until the process is complete + both stdout/stderr have EOFd
while proc.poll() is None or pout.is_alive() or perr.is_alive():
stdout, stderr, prev_std = check_output(job, stdout, stderr, prev_std)
# Catch any remaining output
try:
proc.stdout.flush()
except ValueError: # Handle if stdout is closed
pass
stdout, stderr, prev_std = check_output(job, stdout, stderr, prev_std)
return_code = proc.returncode
# tar/zip up the generated content for bulk downloads
def get_valid_file(cwd, name, ext):
out = os.path.join(cwd, name)
index = 0
while os.path.exists(six.u('{}.{}').format(out, ext)):
index += 1
out = os.path.join(cwd, six.u('{}_{}').format(name, index))
return six.u('{}.{}').format(out, ext)
# fetch the job again in case the database connection was lost during the job or something else changed.
job = WooeyJob.objects.get(pk=job_id)
# if there are files generated, make zip/tar files for download
if len(os.listdir(abscwd)):
tar_out = get_valid_file(abscwd, get_valid_filename(job.job_name), 'tar.gz')
tar = tarfile.open(tar_out, "w:gz")
tar_name = os.path.splitext(os.path.splitext(os.path.split(tar_out)[1])[0])[0]
tar.add(abscwd, arcname=tar_name)
tar.close()
zip_out = get_valid_file(abscwd, get_valid_filename(job.job_name), 'zip')
zip = zipfile.ZipFile(zip_out, "w")
arcname = os.path.splitext(os.path.split(zip_out)[1])[0]
zip.write(abscwd, arcname=arcname)
base_dir = os.path.split(zip_out)[0]
for root, folders, filenames in os.walk(base_dir):
for filename in filenames:
path = os.path.join(root, filename)
archive_name = path.replace(base_dir, '')
if archive_name.startswith(os.path.sep):
archive_name = archive_name.replace(os.path.sep, '', 1)
archive_name = os.path.join(arcname, archive_name)
if path == tar_out:
continue
if path == zip_out:
continue
try:
zip.write(path, arcname=archive_name)
except:
stderr = '{}\n{}'.format(stderr, traceback.format_exc())
try:
zip.close()
except:
stderr = '{}\n{}'.format(stderr, traceback.format_exc())
# save all the files generated as well to our default storage for ephemeral storage setups
if wooey_settings.WOOEY_EPHEMERAL_FILES:
for root, folders, files in os.walk(abscwd):
for filename in files:
filepath = os.path.join(root, filename)
s3path = os.path.join(root[root.find(cwd):], filename)
remote = utils.get_storage(local=False)
exists = remote.exists(s3path)
filesize = remote.size(s3path) if exists else 0
if not exists or (ex
|
segasai/astrolibpy
|
my_utils/from_hex.py
|
Python
|
gpl-3.0
| 1,118
| 0.027728
|
# Copyright (C) 2009-2010 Sergey Koposov
# This file is part of astrolibpy
#
# astrolibpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Found
|
ation, either version 3 of the License, or
# (at your option) any later version.
#
# astrolibpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with astrolibpy. If not, see <http://www.gnu.o
|
rg/licenses/>.
import numpy, re
def from_hex(arr, delim=':'):
r=re.compile('\s*(\-?)(.+)%s(.+)%s(.+)'%(delim,delim))
ret=[]
for a in arr:
m = r.search(a)
sign = m.group(1)=='-'
if sign:
sign=-1
else:
sign=1
i1 = int(m.group(2))
i2 = int(m.group(3))
i3 = float(m.group(4))
val = sign*(int(i1)+int(i2)/60.+(float(i3))/3600.)
ret.append(val)
return numpy.array(ret)
|
perkinslr/pypyjs
|
tools/rematcher.py
|
Python
|
mit
| 4,972
| 0.000402
|
import os
import sys
from time import time as clock
from rpython.rlib import jit
from rpython.rlib import rrandom
from rpython.jit.codewriter.policy import JitPolicy
# The regex is built up from a combination individual Regex objects.
# Each is responsiblef for implementing a specific operator.
class Regex(object):
_immutable_fields_ = ["empty"]
def __init__(self, empty):
self.empty = empty
self.marked = 0
def reset(self):
self.marked = 0
def shift(self, c, mark):
marked = self._shift(c, mark)
self.marked = marked
return marked
class Char(Regex):
_immutable_fields_ = ["c"]
def __init__(self, c):
Regex.__init__(self, 0)
self.c = c
def _shift(self, c, mark):
return mark & (c == self.c)
class Epsilon(Regex):
def __init__(self):
Regex.__init__(self, empty=1)
def _shift(self, c, mark):
return 0
class Binary(Regex):
_immutable_fields_ = ["left", "right"]
def __init__(self, le
|
ft, right, empty):
Regex.__init__(self, empty)
self.left = left
self.right = right
def reset(self):
self.left.reset()
self.right.reset()
Regex.reset(self)
class Alternative(Binary):
def __init__(self, left, right):
empty = left.empty | right.empty
Binary.__init__(self, left, right, empty)
def _shift(self, c, mark):
marked_left = self.left.shift(c, mark)
marked_right = self.right.shift(c, mark)
return m
|
arked_left | marked_right
class Repetition(Regex):
_immutable_fields_ = ["re"]
def __init__(self, re):
Regex.__init__(self, 1)
self.re = re
def _shift(self, c, mark):
return self.re.shift(c, mark | self.marked)
def reset(self):
self.re.reset()
Regex.reset(self)
class Sequence(Binary):
def __init__(self, left, right):
empty = left.empty & right.empty
Binary.__init__(self, left, right, empty)
def _shift(self, c, mark):
old_marked_left = self.left.marked
marked_left = self.left.shift(c, mark)
marked_right = self.right.shift(
c, old_marked_left | (mark & self.left.empty))
return (marked_left & self.right.empty) | marked_right
# The matching loop just shifts each characer from the input string
# into the regex object. If it's "marked" by the time we hit the
# end of the string, then it matches.
jitdriver = jit.JitDriver(reds="auto", greens=["re"])
def match(re, s):
if not s:
return re.empty
result = re.shift(s[0], 1)
i = 1
while i < len(s):
jitdriver.jit_merge_point(re=re)
result = re.shift(s[i], 0)
i += 1
re.reset()
return result
def entry_point(argv):
# Adjust the amount of work we do based on command-line arguments.
# NUM_INPUTS increases the number of loop iterations.
# INPUT_LENGTH increases the amount of work done per loop iteration.
NUM_INPUTS = 1000
INPUT_LENGTH = 50
if len(argv) > 1:
NUM_INPUTS = int(argv[1])
if len(argv) > 2:
INPUT_LENGTH = int(argv[2])
if len(argv) > 3:
raise RuntimeError("too many arguments")
# Build up the regex pattern.
# Target pattern: (a|b)*a(a|b){20}a(a|b)*
# For now we use the same pattern every time, but it must be
# dynamically constructed or it gets eliminated at compile-time.
prefix = Sequence(Repetition(Alternative(Char("a"), Char("b"))), Char("a"))
suffix = Sequence(Char("a"), Repetition(Alternative(Char("a"), Char("b"))))
pattern = prefix
for _ in xrange(20):
pattern = Sequence(pattern, Alternative(Char("a"), Char("b")))
pattern = Sequence(pattern, suffix)
# Generate "random input" to match against the pattern.
# Ideally this would come from the outside world, but stdio
# on pypy.js doesn't seem to work just yet.
print "Generating", NUM_INPUTS, "strings of length", INPUT_LENGTH, "..."
inputs = [None] * NUM_INPUTS
r = rrandom.Random(42)
for i in xrange(len(inputs)):
s = []
for _ in xrange(INPUT_LENGTH):
if r.random() > 0.5:
s.append("a")
else:
s.append("b")
inputs[i] = "".join(s)
# Run each input string through the regex.
# Time how long it takes for the total run.
print "Matching all strings against the regex..."
ts = clock()
for i in xrange(len(inputs)):
# No output, we just want to exercise the loop.
matched = match(pattern, inputs[i])
tdiff = clock() - ts
print "Done!"
print "Matching time for %d strings: %f" % (len(inputs), tdiff)
print "Performed %f matches per second." % (len(inputs) / tdiff,)
return 0
def jitpolicy(driver):
return JitPolicy()
def target(*args):
return entry_point, None
if __name__ == "__main__":
sys.exit(entry_point(sys.argv))
|
tecknicaltom/xhtml2pdf
|
xhtml2pdf/util.py
|
Python
|
apache-2.0
| 27,717
| 0.003355
|
# -*- coding: utf-8 -*-
from reportlab.lib.colors import Color, CMYKColor, getAllNamedColors, toColor, \
HexColor
from reportlab.lib.enums import TA_LEFT, TA_CENTER, TA_RIGHT, TA_JUSTIFY
from reportlab.lib.units import inch, cm
import base64
import httplib
import logging
import mimetypes
import os.path
import re
import reportlab
import shutil
import string
import sys
import tempfile
import types
import urllib
import urllib2
import urlparse
# Copyright 2010 Dirk Holtwick, holtwick.it
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
rgb_re = re.compile("^.*?rgb[(]([0-9]+).*?([0-9]+).*?([0-9]+)[)].*?[ ]*$")
if not(reportlab.Version[0] == "2" and reportlab.Version[2] >= "1"):
raise ImportError("Reportlab Version 2.1+ is needed!")
REPORTLAB22 = (reportlab.Version[0] == "2" and reportlab.Version[2] >= "2")
# print "***", reportlab.Version, REPORTLAB22, reportlab.__file__
log = logging.getLogger("xhtml2pdf")
try:
import cStringIO as StringIO
except:
import StringIO
try:
import pyPdf
except:
pyPdf = None
try:
from reportlab.graphics import renderPM
except:
renderPM = None
try:
from reportlab.graphics import renderSVG
except:
renderSVG = None
#===============================================================================
# Memoize decorator
#===============================================================================
class memoized(object):
"""
A kwargs-aware memoizer, better than the one in python :)
Don't pass in too large kwargs, since this turns them into a tuple of tuples
Also, avoid mutable types (as usual for memoizers)
What this does is to create a dictionnary of {(*parameters):return value},
and uses it as a cache for subsequent calls to the same method.
It is especially useful for functions that don't rely on external variables
and that are called often. It's a perfect match for our getSize etc...
"""
def __init__(self, func):
self.cache = {}
self.func = func
self.__doc__ = self.func.__doc__ # To avoid great confusion
self.__name__ = self.func.__name__ # This also avoids great confusion
def __call__(self, *args, **kwargs):
# Make sure the following line is not actually slower than what you're
# trying to memoize
args_plus = tuple(kwargs.items())
key = (args, args_plus)
if key not in self.cache:
res = self.func(*args, **kwargs)
self.cache[key] = res
return self.cache[key]
def ErrorMsg():
"""
Helper to get a nice traceback as string
"""
import traceback, sys
type = value = tb = limit = None
type, value, tb = sys.exc_info()
list = traceback.format_tb(tb, limit) + traceback.format_exception_only(type, value)
return "Traceback (innermost last):\n" + "%-20s %s" % (
string.join(list[: - 1], ""),
list[ - 1])
def toList(value):
if type(value) not in (types.ListType, types.TupleType):
return [value]
return list(value)
#def _toColor(arg, default=None):
# '''try to map an arbitrary arg to a color instance'''
# if isinstance(arg, Color):
# return arg
# tArg = type(arg)
# if tArg in (types.ListType, types.TupleType):
# assert 3 <= len(arg) <= 4, 'Can only convert 3 and 4 sequences to color'
# assert 0 <= min(arg) and max(arg) <= 1
# return len(arg) == 3 and Color(arg[0], arg[1], arg[2]) or CMYKColor(arg[0], arg[1], arg[2], arg[3])
# elif tArg == types.StringType:
# C = getAllNamedColors()
# s = arg.lower()
# if C.has_key(s): return C[s]
# try:
# return toColor(eval(arg))
# except:
# pass
# try:
# return HexColor(arg)
# except:
# if default is None:
# raise ValueError('Invalid color value %r' % arg)
# return default
@memoized
def getColor(value, default=None):
"""
Convert to color value.
This returns a Color object instance from a text bit.
"""
if isinstance(value, Color):
return value
value = str(value).strip().lower()
if value == "transparent" or value == "none":
return default
if value in COLOR_BY_NAME:
return COLOR_BY_NAME[value]
if value.startswith("#") and len(value) == 4:
value = "#" + value[1] + value[1] + value[2] + value[2] + value[3] + value[3]
elif rgb_re.search(value):
|
# e.g., value = "<css function: rgb(153, 51, 153)>", go figure:
r, g, b = [int(x) for x in rgb_re.search(value).groups()]
value = "#%02x%02x%02x" % (r, g, b)
else:
# Shrug
pass
return toColor(value, default) # Calling the reportlab function
def getBorderStyl
|
e(value, default=None):
# log.debug(value)
if value and (str(value).lower() not in ("none", "hidden")):
return value
return default
mm = cm / 10.0
dpi96 = (1.0 / 96.0 * inch)
_absoluteSizeTable = {
"1": 50.0 / 100.0,
"xx-small": 50.0 / 100.0,
"x-small": 50.0 / 100.0,
"2": 75.0 / 100.0,
"small": 75.0 / 100.0,
"3": 100.0 / 100.0,
"medium": 100.0 / 100.0,
"4": 125.0 / 100.0,
"large": 125.0 / 100.0,
"5": 150.0 / 100.0,
"x-large": 150.0 / 100.0,
"6": 175.0 / 100.0,
"xx-large": 175.0 / 100.0,
"7": 200.0 / 100.0,
"xxx-large": 200.0 / 100.0,
#"xx-small" : 3./5.,
#"x-small": 3./4.,
#"small": 8./9.,
#"medium": 1./1.,
#"large": 6./5.,
#"x-large": 3./2.,
#"xx-large": 2./1.,
#"xxx-large": 3./1.,
}
_relativeSizeTable = {
"larger": 1.25,
"smaller": 0.75,
"+4": 200.0 / 100.0,
"+3": 175.0 / 100.0,
"+2": 150.0 / 100.0,
"+1": 125.0 / 100.0,
"-1": 75.0 / 100.0,
"-2": 50.0 / 100.0,
"-3": 25.0 / 100.0,
}
MIN_FONT_SIZE = 1.0
@memoized
def getSize(value, relative=0, base=None, default=0.0):
"""
Converts strings to standard sizes.
That is the function taking a string of CSS size ('12pt', '1cm' and so on)
and converts it into a float in a standard unit (in our case, points).
>>> getSize('12pt')
12.0
>>> getSize('1cm')
28.346456692913385
"""
try:
original = value
if value is None:
return relative
elif type(value) is types.FloatType:
return value
elif isinstance(value, int):
return float(value)
elif type(value) in (types.TupleType, types.ListType):
value = "".join(value)
value = str(value).strip().lower().replace(",", ".")
if value[-2:] == 'cm':
return float(value[:-2].strip()) * cm
elif value[-2:] == 'mm':
return (float(value[:-2].strip()) * mm) # 1mm = 0.1cm
elif value[-2:] == 'in':
return float(value[:-2].strip()) * inch # 1pt == 1/72inch
elif value[-2:] == 'inch':
return float(value[:-4].strip()) * inch # 1pt == 1/72inch
elif value[-2:] == 'pt':
return float(value[:-2].strip())
elif value[-2:] == 'pc':
return float(value[:-2].strip()) * 12.0 # 1pc == 12pt
elif value[-2:] == 'px':
return float(value[:-2].strip()) * dpi96 # XXX W3C says, use 96pdi http://www.w3.org/TR/CSS21/syndata.html#length-units
elif value[-1:] == 'i': # 1pt == 1/72inch
return float(value[:-1].strip()) * inch
elif value in ("none", "0", "auto"):
return 0.0
elif relative:
if value[-2:] == 'em': # XXX
return (float(value[:-2].strip()) * relative) # 1em = 1 * fontSize
elif value[-2:] == 'ex': # XXX
return (float(value[:-2].
|
iw3hxn/LibrERP
|
l10n_it_sale/models/inherit_sale_order.py
|
Python
|
agpl-3.0
| 2,854
| 0.002803
|
# -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2010-2012 Associazione OpenERP Italia
# (<http://www.openerp-italia.org>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##########################
|
####################################################
from openerp.osv import orm, fields
class sale_order(orm.Model):
_inherit = "sale.order"
_columns = {
'cig': fields.char('CIG', size=15, help="Codice identificativo di gara"),
'
|
cup': fields.char('CUP', size=15, help="Codice unico di Progetto")
}
#-----------------------------------------------------------------------------
# EVITARE LA COPIA DI 'NUMERO cig/cup'
#-----------------------------------------------------------------------------
def copy(self, cr, uid, id, default={}, context=None):
default = default or {}
default.update({
'cig': '',
'cup': '',
})
if 'cig' not in default:
default.update({
'cig': False
})
if 'cup' not in default:
default.update({
'cup': False
})
return super(sale_order, self).copy(cr, uid, id, default, context)
def _prepare_invoice(self, cr, uid, order, lines, context):
invoice_vals = super(sale_order, self)._prepare_invoice(cr, uid, order, lines, context)
invoice_vals.update({
'cig': order.cig,
'cup': order.cup,
})
return invoice_vals
def _prepare_order_picking(self, cr, uid, order, context=None):
picking_vals = super(sale_order, self)._prepare_order_picking(cr, uid, order, context)
picking_vals.update({
'cig': order.cig,
'cup': order.cup,
})
return picking_vals
# is better to use hook function, in this mode hope to speedup
def _inv_get(self, cr, uid, order, context=None):
return {
'carriage_condition_id': order.carriage_condition_id.id,
'goods_description_id': order.goods_description_id.id,
'cig': order.cig or '',
'cup': order.cup or ''
}
|
sandeepdsouza93/TensorFlow-15712
|
tensorflow/python/client/timeline_test.py
|
Python
|
apache-2.0
| 7,065
| 0.00368
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.python.client.Timeline."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import tensorflow as tf
from tensorflow.python.client import timeline
class TimelineTest(tf.test.TestCase):
def _validateTrace(self, chrome_trace_format):
# Check that the supplied string is valid JSON.
trace = json.loads(chrome_trace_format)
# It should have a top-level key containing events.
self.assertTrue('traceEvents' in trace)
# Every event in the list should have a 'ph' field.
for event in trace['traceEvents']:
self.assertTrue('ph' in event)
def testSimpleTimeline(self):
run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
run_metadata = tf.RunMetadata()
with tf.device('/cpu:0'):
with tf.Session() as sess:
sess.run(
tf.constant(1.0),
options=run_options,
run_metadata=run_metadata)
self.assertTrue(run_metadata.HasField('step_stats'))
tl = timeline.Timeline(run_metadata.step_stats)
ctf = tl.generate_chrome_trace_format()
self._validateTrace(ctf)
def testTimelineCpu(self):
run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
run_metadata = tf.RunMetadata()
with self.test_session(use_gpu=False) as sess:
const1 = tf.constant(1.0, name='const1')
const2 = tf.constant(2.0, name='const2')
result = tf.add(const1, const2) + const1 * const2
sess.run(result, options=run_options, run_metadata=run_metadata)
self.assertTrue(run_me
|
tadata.HasField('step_stats'))
step_stats = run_metadata.step_stats
devices = [d.device for d in step_stats.dev_stats]
self.assertTrue('/job:localhost/replica:0/task:0/cpu:0' in devices)
tl = timeline.Timeline(step_stats)
ctf = tl.generate_chrome_trace_format()
self._validateTrace(ctf)
tl = timeline.Timeline(step_stats)
ctf = tl.generate_chrome_trace_format(show_dataflo
|
w=False)
self._validateTrace(ctf)
tl = timeline.Timeline(step_stats)
ctf = tl.generate_chrome_trace_format(show_memory=False)
self._validateTrace(ctf)
tl = timeline.Timeline(step_stats)
ctf = tl.generate_chrome_trace_format(show_memory=False,
show_dataflow=False)
self._validateTrace(ctf)
def testTimelineGpu(self):
if not tf.test.is_gpu_available(cuda_only=True):
return
run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
run_metadata = tf.RunMetadata()
with self.test_session(force_gpu=True) as sess:
const1 = tf.constant(1.0, name='const1')
const2 = tf.constant(2.0, name='const2')
result = tf.add(const1, const2) + const1 * const2
sess.run(result, options=run_options, run_metadata=run_metadata)
self.assertTrue(run_metadata.HasField('step_stats'))
step_stats = run_metadata.step_stats
devices = [d.device for d in step_stats.dev_stats]
self.assertTrue('/job:localhost/replica:0/task:0/gpu:0' in devices)
self.assertTrue('/gpu:0/stream:all' in devices)
tl = timeline.Timeline(step_stats)
ctf = tl.generate_chrome_trace_format()
self._validateTrace(ctf)
tl = timeline.Timeline(step_stats)
ctf = tl.generate_chrome_trace_format(show_dataflow=False)
self._validateTrace(ctf)
tl = timeline.Timeline(step_stats)
ctf = tl.generate_chrome_trace_format(show_memory=False)
self._validateTrace(ctf)
tl = timeline.Timeline(step_stats)
ctf = tl.generate_chrome_trace_format(show_memory=False,
show_dataflow=False)
self._validateTrace(ctf)
def testAnalysisAndAllocations(self):
run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
run_metadata = tf.RunMetadata()
config = tf.ConfigProto(device_count={'CPU': 3})
with tf.Session(config=config) as sess:
with tf.device('/cpu:0'):
const1 = tf.constant(1.0, name='const1')
with tf.device('/cpu:1'):
const2 = tf.constant(2.0, name='const2')
with tf.device('/cpu:2'):
result = const1 + const2 + const1 * const2
sess.run(result, options=run_options, run_metadata=run_metadata)
self.assertTrue(run_metadata.HasField('step_stats'))
tl = timeline.Timeline(run_metadata.step_stats)
step_analysis = tl.analyze_step_stats()
ctf = step_analysis.chrome_trace.format_to_string()
self._validateTrace(ctf)
maximums = step_analysis.allocator_maximums
self.assertTrue('cpu' in maximums)
cpu_max = maximums['cpu']
# At least const1 + const2, both float32s (4 bytes each)
self.assertGreater(cpu_max.num_bytes, 8)
self.assertGreater(cpu_max.timestamp, 0)
self.assertTrue('const1' in cpu_max.tensors)
self.assertTrue('const2' in cpu_max.tensors)
def testManyCPUs(self):
run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
run_metadata = tf.RunMetadata()
config = tf.ConfigProto(device_count={'CPU': 3})
with tf.Session(config=config) as sess:
with tf.device('/cpu:0'):
const1 = tf.constant(1.0, name='const1')
with tf.device('/cpu:1'):
const2 = tf.constant(2.0, name='const2')
with tf.device('/cpu:2'):
result = const1 + const2 + const1 * const2
sess.run(result, options=run_options, run_metadata=run_metadata)
self.assertTrue(run_metadata.HasField('step_stats'))
step_stats = run_metadata.step_stats
devices = [d.device for d in step_stats.dev_stats]
self.assertTrue('/job:localhost/replica:0/task:0/cpu:0' in devices)
self.assertTrue('/job:localhost/replica:0/task:0/cpu:1' in devices)
self.assertTrue('/job:localhost/replica:0/task:0/cpu:2' in devices)
tl = timeline.Timeline(step_stats)
ctf = tl.generate_chrome_trace_format()
self._validateTrace(ctf)
tl = timeline.Timeline(step_stats)
ctf = tl.generate_chrome_trace_format(show_dataflow=False)
self._validateTrace(ctf)
tl = timeline.Timeline(step_stats)
ctf = tl.generate_chrome_trace_format(show_memory=False)
self._validateTrace(ctf)
tl = timeline.Timeline(step_stats)
ctf = tl.generate_chrome_trace_format(show_memory=False,
show_dataflow=False)
self._validateTrace(ctf)
if __name__ == '__main__':
tf.test.main()
|
idning/redis-rdb-tools
|
tests/parser_tests.py
|
Python
|
mit
| 14,949
| 0.009967
|
import unittest
import os
import math
from rdbtools import RdbCallback, RdbParser
class RedisParserTestCase(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_empty_rdb(self):
r = load_rdb('empty_database.rdb')
self.assert_('start_rdb' in r.methods_called)
self.assert_('end_rdb' in r.methods_called)
self.assertEquals(len(r.databases), 0, msg = "didn't expect any databases")
def test_multiple_databases(self):
r = load_rdb('multiple_databases.rdb')
self.assert_(len(r.databases), 2)
self.assert_(1 not in r.databases)
self.assertEquals(r.databases[0]["key_in_zeroth_database"], "zero")
self.assertEquals(r.databases[2]["key_in_second_database"], "second")
def test_keys_with_expiry(self):
r = load_rdb('keys_with_expiry.rdb')
expiry = r.exp
|
iry[0]['expires_ms_precision']
self.assertEquals(expiry.year, 2022)
self.assertEquals(expiry.month, 12)
self.assertEquals(expiry.day, 25)
self.assertEquals(expiry.hour, 10)
self.assertEquals(expiry.minute, 11)
self.assertEquals(expiry.second, 12)
self.assertEquals(expiry.microsecond, 573000)
def test_integer_keys(self):
r = load_rdb('integer_keys.rdb')
self.assertEquals(r.databases[0][
|
125], "Positive 8 bit integer")
self.assertEquals(r.databases[0][0xABAB], "Positive 16 bit integer")
self.assertEquals(r.databases[0][0x0AEDD325], "Positive 32 bit integer")
def test_negative_integer_keys(self):
r = load_rdb('integer_keys.rdb')
self.assertEquals(r.databases[0][-123], "Negative 8 bit integer")
self.assertEquals(r.databases[0][-0x7325], "Negative 16 bit integer")
self.assertEquals(r.databases[0][-0x0AEDD325], "Negative 32 bit integer")
def test_string_key_with_compression(self):
r = load_rdb('easily_compressible_string_key.rdb')
key = "".join('a' for x in range(0, 200))
value = "Key that redis should compress easily"
self.assertEquals(r.databases[0][key], value)
def test_zipmap_thats_compresses_easily(self):
r = load_rdb('zipmap_that_compresses_easily.rdb')
self.assertEquals(r.databases[0]["zipmap_compresses_easily"]["a"], "aa")
self.assertEquals(r.databases[0]["zipmap_compresses_easily"]["aa"], "aaaa")
self.assertEquals(r.databases[0]["zipmap_compresses_easily"]["aaaaa"], "aaaaaaaaaaaaaa")
def test_zipmap_that_doesnt_compress(self):
r = load_rdb('zipmap_that_doesnt_compress.rdb')
self.assertEquals(r.databases[0]["zimap_doesnt_compress"]["MKD1G6"], 2)
self.assertEquals(r.databases[0]["zimap_doesnt_compress"]["YNNXK"], "F7TI")
def test_zipmap_with_big_values(self):
''' See issue https://github.com/sripathikrishnan/redis-rdb-tools/issues/2
Values with length around 253/254/255 bytes are treated specially in the parser
This test exercises those boundary conditions
In order to test a bug with large ziplists, it is necessary to start
Redis with "hash-max-ziplist-value 21000", create this rdb file,
and run the test. That forces the 20kbyte value to be stored as a
ziplist with a length encoding of 5 bytes.
'''
r = load_rdb('zipmap_with_big_values.rdb')
self.assertEquals(len(r.databases[0]["zipmap_with_big_values"]["253bytes"]), 253)
self.assertEquals(len(r.databases[0]["zipmap_with_big_values"]["254bytes"]), 254)
self.assertEquals(len(r.databases[0]["zipmap_with_big_values"]["255bytes"]), 255)
self.assertEquals(len(r.databases[0]["zipmap_with_big_values"]["300bytes"]), 300)
self.assertEquals(len(r.databases[0]["zipmap_with_big_values"]["20kbytes"]), 20000)
def test_hash_as_ziplist(self):
'''In redis dump version = 4, hashmaps are stored as ziplists'''
r = load_rdb('hash_as_ziplist.rdb')
self.assertEquals(r.databases[0]["zipmap_compresses_easily"]["a"], "aa")
self.assertEquals(r.databases[0]["zipmap_compresses_easily"]["aa"], "aaaa")
self.assertEquals(r.databases[0]["zipmap_compresses_easily"]["aaaaa"], "aaaaaaaaaaaaaa")
def test_dictionary(self):
r = load_rdb('dictionary.rdb')
self.assertEquals(r.lengths[0]["force_dictionary"], 1000)
self.assertEquals(r.databases[0]["force_dictionary"]["ZMU5WEJDG7KU89AOG5LJT6K7HMNB3DEI43M6EYTJ83VRJ6XNXQ"],
"T63SOS8DQJF0Q0VJEZ0D1IQFCYTIPSBOUIAI9SB0OV57MQR1FI")
self.assertEquals(r.databases[0]["force_dictionary"]["UHS5ESW4HLK8XOGTM39IK1SJEUGVV9WOPK6JYA5QBZSJU84491"],
"6VULTCV52FXJ8MGVSFTZVAGK2JXZMGQ5F8OVJI0X6GEDDR27RZ")
def test_ziplist_that_compresses_easily(self):
r = load_rdb('ziplist_that_compresses_easily.rdb')
self.assertEquals(r.lengths[0]["ziplist_compresses_easily"], 6)
for idx, length in enumerate([6, 12, 18, 24, 30, 36]) :
self.assertEquals(("".join("a" for x in xrange(length))), r.databases[0]["ziplist_compresses_easily"][idx])
def test_ziplist_that_doesnt_compress(self):
r = load_rdb('ziplist_that_doesnt_compress.rdb')
self.assertEquals(r.lengths[0]["ziplist_doesnt_compress"], 2)
self.assert_("aj2410" in r.databases[0]["ziplist_doesnt_compress"])
self.assert_("cc953a17a8e096e76a44169ad3f9ac87c5f8248a403274416179aa9fbd852344"
in r.databases[0]["ziplist_doesnt_compress"])
def test_ziplist_with_integers(self):
r = load_rdb('ziplist_with_integers.rdb')
expected_numbers = []
for x in range(0,13):
expected_numbers.append(x)
expected_numbers += [-2, 13, 25, -61, 63, 16380, -16000, 65535, -65523, 4194304, 0x7fffffffffffffff]
self.assertEquals(r.lengths[0]["ziplist_with_integers"], len(expected_numbers))
for num in expected_numbers :
self.assert_(num in r.databases[0]["ziplist_with_integers"], "Cannot find %d" % num)
def test_linkedlist(self):
r = load_rdb('linkedlist.rdb')
self.assertEquals(r.lengths[0]["force_linkedlist"], 1000)
self.assert_("JYY4GIFI0ETHKP4VAJF5333082J4R1UPNPLE329YT0EYPGHSJQ" in r.databases[0]["force_linkedlist"])
self.assert_("TKBXHJOX9Q99ICF4V78XTCA2Y1UYW6ERL35JCIL1O0KSGXS58S" in r.databases[0]["force_linkedlist"])
def test_intset_16(self):
r = load_rdb('intset_16.rdb')
self.assertEquals(r.lengths[0]["intset_16"], 3)
for num in (0x7ffe, 0x7ffd, 0x7ffc) :
self.assert_(num in r.databases[0]["intset_16"])
def test_intset_32(self):
r = load_rdb('intset_32.rdb')
self.assertEquals(r.lengths[0]["intset_32"], 3)
for num in (0x7ffefffe, 0x7ffefffd, 0x7ffefffc) :
self.assert_(num in r.databases[0]["intset_32"])
def test_intset_64(self):
r = load_rdb('intset_64.rdb')
self.assertEquals(r.lengths[0]["intset_64"], 3)
for num in (0x7ffefffefffefffe, 0x7ffefffefffefffd, 0x7ffefffefffefffc) :
self.assert_(num in r.databases[0]["intset_64"])
def test_regular_set(self):
r = load_rdb('regular_set.rdb')
self.assertEquals(r.lengths[0]["regular_set"], 6)
for member in ("alpha", "beta", "gamma", "delta", "phi", "kappa") :
self.assert_(member in r.databases[0]["regular_set"], msg=('%s missing' % member))
def test_sorted_set_as_ziplist(self):
r = load_rdb('sorted_set_as_ziplist.rdb')
self.assertEquals(r.lengths[0]["sorted_set_as_ziplist"], 3)
zset = r.databases[0]["sorted_set_as_ziplist"]
self.assert_(floateq(zset['8b6ba6718a786daefa69438148361901'], 1))
self.assert_(floateq(zset['cb7a24bb7528f934b841b34c3a73e0c7'], 2.37))
self.assert_(floateq(zset['523af537946b79c4f8369ed39ba78605'], 3.423))
def test_filtering_by_keys(self):
r = load_rdb('parser_filters.rdb', filters={"keys":"k[0-
|
HPC-buildtest/buildtest-framework
|
docs/scripting_examples/ex1.py
|
Python
|
mit
| 300
| 0.003333
|
import os
from buildtest.defaults import BUILDTEST_ROOT
from buildtest.menu.build import discover_buildspecs
included_bp, excluded_bp = discover_buildspecs(
buildspec=[os.path.join(BUILDTEST_ROOT, "tu
|
torials")]
)
print(f"discovered_buildspec: {included_bp} excluded bui
|
ldspec: {excluded_bp}")
|
fs714/drcontroller
|
drcontroller/db/init_db.py
|
Python
|
apache-2.0
| 38
| 0
|
from
|
db_Dao import init_db
init_db()
| |
dNG-git/pas_upnp
|
src/dNG/data/upnp/services/abstract_service.py
|
Python
|
gpl-2.0
| 20,705
| 0.008211
|
# -*- coding: utf-8 -*-
"""
direct PAS
Python Application Services
----------------------------------------------------------------------------
(C) direct Netware Group - All rights reserved
https://www.direct-netware.de/redirect?pas;upnp
The following license agreement remains valid unless any additions or
changes are being made by direct Netware Group in a written form.
This program is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
Free Software Foundation; either version 2 of the License, or (at your
option) any later version.
This program is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
more details.
You should have received a copy of the GNU General Public License along with
this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
----------------------------------------------------------------------------
https://www.direct-netware.de/redirect?licenses;gpl
----------------------------------------------------------------------------
#echo(pasUPnPVersion)#
#echo(__FILEPATH__)#
"""
from dNG.data.text.link import Link
from dNG.data.upnp.client_settings_mixin import ClientSettingsMixin
from dNG.data.upnp.service import Service
from dNG.data.upnp.upnp_exception import UpnpException
from dNG.data.upnp.variable import Variable
from dNG.plugins.hook import Hook
from dNG.runtime.not_implemented_exception import NotImplementedException
from dNG.runtime.type_exception import TypeException
class AbstractService(Service, ClientSettingsMixin):
"""
An extended, abstract se
|
rvice implementation for server services.
:author: direct Netware Group et al.
:copyright: direct Netware Group - All rights reserved
:package: pas
:subpackage: upnp
:since: v0.2.00
:license: https://www.direct-netware.de/redirect?licenses;gpl
GNU General Public License 2
"""
def __init__(self):
"""
Con
|
structor __init__(AbstractService)
:since: v0.2.00
"""
Service.__init__(self)
ClientSettingsMixin.__init__(self)
self.configid = None
"""
UPnP configId value
"""
self.host_service = False
"""
UPnP service is managed by host
"""
self.type = None
"""
UPnP service type
"""
self.udn = None
"""
UPnP UDN value
"""
self.upnp_domain = None
"""
UPnP service specification domain
"""
self.version = None
"""
UPnP service type version
"""
#
def add_host_action(self, action, argument_variables = None, return_variable = None, result_variables = None):
"""
Adds the given host service action.
:param action: SOAP action
:param argument_variables: Argument variables definition
:param return_variable: Return variable definition
:param result_variables: Result variables definition
:since: v0.2.00
"""
if (action not in self.actions):
if (argument_variables is None): argument_variables = [ ]
elif (type(argument_variables) is not list): raise TypeException("Given argument variables definition is invalid")
if (return_variable is None): return_variable = { }
elif (type(return_variable) is not dict): raise TypeException("Given return variables definition is invalid")
if (result_variables is None): result_variables = [ ]
elif (type(result_variables) is not list): raise TypeException("Given result variables definition is invalid")
self.actions[action] = { "argument_variables": argument_variables,
"return_variable": return_variable,
"result_variables": result_variables
}
#
#
def add_host_variable(self, name, definition):
"""
Adds the given host service variable.
:param name: Variable name
:param definition: Variable definition
:since: v0.2.00
"""
if (name not in self.variables):
if (type(definition) is not dict): raise TypeException("Given variable definition is invalid")
self.variables[name] = definition
#
#
def get_name(self):
"""
Returns the UPnP service name (URN without version).
:return: (str) Service name
:since: v0.2.00
"""
return ("{0}:service:{1}".format(self.upnp_domain, self.type) if (self.host_service) else Service.get_name(self))
#
def get_service_id(self):
"""
Returns the UPnP serviceId value.
:return: (str) UPnP serviceId value
:since: v0.2.00
"""
return (self.service_id if (self.host_service) else Service.get_service_id(self))
#
def get_service_id_urn(self):
"""
Returns the UPnP serviceId value.
:return: (str) UPnP serviceId URN
:since: v0.2.00
"""
return ("{0}:serviceId:{1}".format(self.upnp_domain, self.service_id) if (self.host_service) else Service.get_service_id_urn(self))
#
def get_type(self):
"""
Returns the UPnP service type.
:return: (str) Service type
:since: v0.2.00
"""
return (self.type if (self.host_service) else Service.get_type(self))
#
def get_udn(self):
"""
Returns the UPnP UDN value.
:return: (str) UPnP device UDN
:since: v0.2.00
"""
return (self.udn if (self.host_service) else Service.get_udn(self))
#
def get_upnp_domain(self):
"""
Returns the UPnP service specification domain.
:return: (str) UPnP device UUID
:since: v0.2.00
"""
return (self.upnp_domain if (self.host_service) else Service.get_upnp_domain(self))
#
def get_urn(self):
"""
Returns the UPnP serviceType value.
:return: (str) UPnP URN
:since: v0.2.00
"""
return ("{0}:service:{1}:{2}".format(self.get_upnp_domain(), self.get_type(), self.get_version())
if (self.host_service) else
Service.get_urn(self)
)
#
def get_version(self):
"""
Returns the UPnP service type version.
:return: (str) Service type version
:since: v0.2.00
"""
return (self.version if (self.host_service) else Service.get_version(self))
#
def get_xml(self):
"""
Returns the UPnP SCPD.
:return: (str) UPnP SCPD XML
:since: v0.2.00
"""
xml_resource = self._get_xml(self._init_xml_resource())
return xml_resource.export_cache(True)
#
def _get_xml(self, xml_resource):
"""
Returns the UPnP SCPD.
:param xml_resource: XML resource
:return: (object) UPnP SCPD XML resource
:since: v0.2.00
"""
if (self.log_handler is not None): self.log_handler.debug("#echo(__FILEPATH__)# -{0!r}._get_xml()- (#echo(__LINE__)#)", self, context = "pas_upnp")
client_settings = self.get_client_settings()
if (not client_settings.get("upnp_xml_cdata_encoded", False)): xml_resource.set_cdata_encoding(False)
attributes = { "xmlns": "urn:schemas-upnp-org:service-1-0" }
if (self.configid is not None): attributes['configId'] = self.configid
xml_resource.add_node("scpd", attributes = attributes)
xml_resource.set_cached_node("scpd")
spec_version = (self.get_spec_version()
if (client_settings.get("upnp_spec_versioning_supported", True)) else
( 1, 0 )
)
xml_resource.add_node("scpd specVersion major", spec_version[0])
xml_resource.add_node("scpd specVersion minor", spec_version[1])
if (len(self.actions) > 0):
position = 0
for action_name in self.actions:
xml_base_path = "scpd actionList action#{0:d}".format(position)
xml_resource.add_node(xml_base_path)
xml_resource.set_cached_node(xml_base_path)
action = self.a
|
markbrough/iati-country-tester
|
segment_ro.py
|
Python
|
mit
| 3,383
| 0.005912
|
#!/usr/bin/env python
# Takes apart large IATI XML files and outputs one file per reporting org.
# Copyright 2013 Mark Brough.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License v3.0 as
# published by the Free Software Foundation, either version 3 of the License,
# or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
from lxml import etree
import unicodecsv
import sys
import os
# FIXME: if there are multiple countries/countries+regions, then don't
# output to the same file.
def segment_file(prefix, filename, output_directory):
print "Segmenting file", filename
doc=etree.parse(os.path.join(filename))
extorgs = set(doc.xpath("//iati-activity/reporting-org/@ref"))
print "Found orgs", list(extorgs)
out = {}
iatiactivities = doc.xpath('//iati-activities')[0]
for org in extorgs:
out[org] = {
'title': prefix.upper() + " Activity file " + org,
'data': etree.Element('iati-activities')
}
for attribute, attribute_value in iatiactivities.items():
out[org]['data'].set(attribute, attribute_value)
activities = doc.xpath('//iati-activity')
for activity in activities:
if (activity.xpath("reporting-org/@ref")) and (activity.xpath("reporting-org/@ref")[0] != ""):
org = activity.xpath("reporting-org/@ref")[0]
out[org]['orgname'] = activity.xpath("reporting-org/text()")[0] if activity.xpath("reporting-org/text()") else ""
out[org]['orgtype'] = activity.xpath("reporting-org/@type")[0] if activity.xpath("reporting-org/@type") else ""
out[org]['data'].append(activity)
# Create metadata file...
fieldnames = ['org', 'orgname', 'orgtype', 'official', 'filename', 'url',
'package_name', 'package_title']
metadata_file = open(output_directory + 'metadata.csv', 'w')
metadata = unicodecsv.DictWriter(metadata_file, fieldnames)
metadata.writeheader()
for org, data in out.items():
print "Writing data for", org
# Check not empty
if data['data'].xpath('//iati-activity'):
d = etree.ElementTree(data['data'])
d.write(output_directory+prefix+"-"+org+".xml",
pretty_print=True,
xml_declaration=True,
|
encoding="UTF-8")
metadata.writerow({
'org':org,
'orgname':data['orgname'],
'orgtype':data['orgtype'],
'filename':prefix+"-"+org
|
+'.xml',
'package_name': prefix+"-"+org,
'package_title': data['title']})
print "Finished writing data, find the files in", output_directory
metadata_file.close()
if __name__ == '__main__':
arguments = sys.argv
arguments.pop(0)
prefix = arguments[0]
arguments.pop(0)
filenames = arguments
output_directory = 'data/'
if not filenames:
print "No filenames"
else:
for filename in filenames:
segment_file(prefix, filename, output_directory)
|
sharmaking/CoIntegrationAnalysis
|
windowController.py
|
Python
|
mit
| 579
| 0.03255
|
#!/usr/bin/p
|
ython
# -*- coding: utf-8 -*-
#windowController.py
from PyQt4 import QtGui
import sys, multiprocessing
import mainWindow, windowListerner
class QWindowsController(multiprocessing.Process):
def __init__(self, messageBox):
super(QWindowsController, self).__init__()
self.messageBox = messageBox
def run(self):
app = QtGui.QApplication(sys.argv)
QMain = mainWindow.QMainWindow()
#界面信息处理线程
wListerner = windowListerner.QWindowListerner(QMain,
|
self.messageBox)
wListerner.start()
#显示主窗口
QMain.show()
sys.exit(app.exec_())
|
alexmojaki/birdseye
|
birdseye/__main__.py
|
Python
|
mit
| 72
| 0
|
from birdseye.server
|
import main
if __name__
|
== '__main__':
main()
|
mattvonrocketstein/smash
|
smashlib/ipy3x/html/services/kernelspecs/tests/test_kernelspecs_api.py
|
Python
|
mit
| 4,428
| 0.001355
|
# coding: utf-8
"""Test the kernel specs webservice API."""
import errno
import io
import json
import os
import shutil
pjoin = os.path.join
import requests
from IPython.kernel.kernelspec import NATIVE_KERNEL_NAME
from IPython.html.utils import url_path_join
from IPython.html.tests.launchnotebook import NotebookTestBase, assert_http_error
# Copied from IPython.kernel.tests.test_kernelspec so updating that doesn't
# break these tests
sample_kernel_json = {'argv': ['cat', '{connection_file}'],
'display_name': 'Test kernel',
}
some_resource = u"The very model of a modern major general"
class KernelSpecAPI(object):
"""Wrapper for notebook API calls."""
def __init__(self, base_url):
self.base_url = base_url
def _req(self, verb, path, body=None):
response = requests.request(verb,
url_path_join(self.base_url, path),
data=body,
)
response.raise_for_status()
return response
def list(self):
return self._req('GET', 'api/kernelspecs')
def kernel_spec_info(self, name):
return self._req('GET', url_path_join('api/kernelspecs', name))
def kernel_resource(self, name, path):
return self._req('GET',
|
url_path_join('kernelspecs', name, path))
class APITest(NotebookTestBase):
"""Test the kernelspec web service API"""
def setUp(self):
ipydir = self.ipython_dir.name
sample_kernel_dir = pjoin(ipydir, 'kernels', 'sample')
try:
os.makedirs(sample_kernel_dir)
except OSError as e:
if e.errno != errno.EEXIST:
raise
with open(pjoin(sample_kernel_dir, 'kernel
|
.json'), 'w') as f:
json.dump(sample_kernel_json, f)
with io.open(pjoin(sample_kernel_dir, 'resource.txt'), 'w',
encoding='utf-8') as f:
f.write(some_resource)
self.ks_api = KernelSpecAPI(self.base_url())
def test_list_kernelspecs_bad(self):
"""Can list kernelspecs when one is invalid"""
bad_kernel_dir = pjoin(self.ipython_dir.name, 'kernels', 'bad')
try:
os.makedirs(bad_kernel_dir)
except OSError as e:
if e.errno != errno.EEXIST:
raise
with open(pjoin(bad_kernel_dir, 'kernel.json'), 'w') as f:
f.write("garbage")
model = self.ks_api.list().json()
assert isinstance(model, dict)
self.assertEqual(model['default'], NATIVE_KERNEL_NAME)
specs = model['kernelspecs']
assert isinstance(specs, dict)
# 2: the sample kernelspec created in setUp, and the native Python
# kernel
self.assertGreaterEqual(len(specs), 2)
shutil.rmtree(bad_kernel_dir)
def test_list_kernelspecs(self):
model = self.ks_api.list().json()
assert isinstance(model, dict)
self.assertEqual(model['default'], NATIVE_KERNEL_NAME)
specs = model['kernelspecs']
assert isinstance(specs, dict)
# 2: the sample kernelspec created in setUp, and the native Python
# kernel
self.assertGreaterEqual(len(specs), 2)
def is_sample_kernelspec(s):
return s['name'] == 'sample' and s['display_name'] == 'Test kernel'
def is_default_kernelspec(s):
return s['name'] == NATIVE_KERNEL_NAME and s['display_name'].startswith("IPython")
assert any(is_sample_kernelspec(s) for s in specs.values()), specs
assert any(is_default_kernelspec(s) for s in specs.values()), specs
def test_get_kernelspec(self):
spec = self.ks_api.kernel_spec_info(
'Sample').json() # Case insensitive
self.assertEqual(spec['display_name'], 'Test kernel')
def test_get_nonexistant_kernelspec(self):
with assert_http_error(404):
self.ks_api.kernel_spec_info('nonexistant')
def test_get_kernel_resource_file(self):
res = self.ks_api.kernel_resource('sAmple', 'resource.txt')
self.assertEqual(res.text, some_resource)
def test_get_nonexistant_resource(self):
with assert_http_error(404):
self.ks_api.kernel_resource('nonexistant', 'resource.txt')
with assert_http_error(404):
self.ks_api.kernel_resource('sample', 'nonexistant.txt')
|
FedoraScientific/salome-paravis
|
test/VisuPrs/StreamLines/F7.py
|
Python
|
lgpl-2.1
| 1,521
| 0.001972
|
# Co
|
pyright (C) 2010-2014 CEA/DEN, EDF R&D
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
|
See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
#
# This case corresponds to: /visu/StreamLines/F7 case
# Create Stream Lines for all fields of the the given MED file
import sys
from paravistest import datadir, pictureext, get_picture_dir
from presentations import CreatePrsForFile, PrsTypeEnum
import pvserver as paravis
# Directory for saving snapshots
picturedir = get_picture_dir("StreamLines/F7")
# Create presentations
myParavis = paravis.myParavis
file = datadir + "occ4050.med"
print " --------------------------------- "
print "file ", file
print " --------------------------------- "
print "\nCreatePrsForFile..."
CreatePrsForFile(myParavis, file, [PrsTypeEnum.STREAMLINES], picturedir, pictureext)
|
Drowrin/Weeabot
|
cogs/anilist.py
|
Python
|
mit
| 4,392
| 0.003189
|
from datetime import datetime, timedelta
import dateutil.parser
import discord
from discord.ext import commands
import utils
def not_season_or_year(ctx):
now = datetime.now()
return AniList.seasons[now.month // 3] not in ctx.message.content or str(now.year) not in ctx.message.content
class AniList(utils.SessionCog):
"""Commands that access AniList. Mostly just for seasonal anime."""
daynames = ['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday']
seasons = ["winter", "spring", "summer", "fall"]
season_colors = {
'winter': discord.Colour.lighter_grey(),
'spring': discord.Colour.green(),
'summer': discord.Colour.gold(),
'fall': discord.Colour.orange()
}
types = ["tv", "tv short"]
@commands.command()
@utils.cooldown_reset_if(not_season_or_year)
@commands.cooldown(1, 1800, commands.BucketType.channel)
async def anime_list(self, season=None, year=None):
"""Lists anime airing in a given season, or the current season if none is specified.
Can take both year and season because of the rollover into winter season."""
def datestr(da: datetime):
if da is None:
return "Not Listed"
return dateutil.parser.parse(da).strftime("%m/%d/%Y")
token = await self.check_token()
now = datetime.now()
season = season or self.seasons[now.month // 3]
year = year or now.year
days = [[], [], [], [], [], [], []]
m = await self.bot.say("collecting info")
for t in self.types:
params = {"access_token": token, "year": year, "season": season, "type": t}
url = "https://anilist.co/api/browse/anime"
async with self.session.get(url, params=params) as r:
js = await r.json()
if r.status != 200:
await self.bot.edit_message(m, f"error in api call: response {r.status}\n{r.reason}\n{js['error_message']}")
return
for anime in js:
if not anime["adult"]:
url = f"https://anilist.co/api/anime/{anime['id']}"
async with self.session.get(url, params={"access_token": token}) as r2:
anime = await r2.json()
d = dateutil.parser.
|
parse(anime["start_date"])
days[d.weekday()].append(anime)
anilist_url = f'http://anilist.co/browse/anime?sort=start_date-desc&year={year}&season={season}'
|
e: discord.Embed = discord.Embed(
title=f"{season.title()} {year} Anime",
url=anilist_url,
color=self.season_colors[season]
)
for day, shows in enumerate(days):
shows = sorted(shows, key=lambda a: a['start_date_fuzzy'])
value = [
f"""*{anime['title_romaji']}*
{datestr(anime['start_date'])} — {datestr(anime['end_date'])}
{f"Time until next episode: {utils.down_to_minutes(timedelta(seconds=anime['airing']['countdown']))}"
if anime['airing'] is not None and 'countdown' in anime['airing'] else ''
}
"""
for anime in shows
]
pages = [[]]
for v in value:
if len('\n'.join(pages[-1])) + len(v) < 1024:
pages[-1].append(v)
else:
pages.append([v])
e.add_field(name=self.daynames[day], value='\n'.join(pages[0]), inline=False)
for p in pages[1:]:
e.add_field(name='\N{ZERO WIDTH SPACE}', value='\n'.join(p), inline=False)
await self.bot.delete_message(m)
await self.bot.say(embed=e)
async def check_token(self):
params = {"client_id": utils.tokens['anilist_id'], "client_secret": utils.tokens['anilist_secret'], "grant_type": "client_credentials"}
url = "https://anilist.co/api/auth/access_token"
async with self.session.post(url, params=params) as r:
if r.status != 200:
await self.bot.say(f"error in check_token call: response {r.status}")
return
token = (await r.json())["access_token"]
return token
def setup(bot):
bot.add_cog(AniList(bot))
|
tschijnmo/drudge
|
drudge/_tceparser.py
|
Python
|
mit
| 5,439
| 0
|
"""
Tensor Contraction Engine output parser.
This module provides parsers of the output of the Tensor Contraction Engine of
So Hirata into Tensor objects in drudge.
"""
import collections
import itertools
import re
from sympy import nsimplify, sympify, Symbol
from drudge import Term
#
# The driver function
# -------------------
#
def parse_tce_out(tce_out, range_cb, base_cb):
"""Parse a TCE output into a list of terms.
A list of terms, and a dictionary of free symbols will be returned.
"""
lines = []
for line in tce_out.splitlines():
stripped = line.strip()
if len(stripped) > 0:
lines.append(stripped)
continue
free_vars = collections.defaultdict(set)
return list(itertools.chain.from_iterable(
_parse_tce_line(line, range_cb, base_cb, free_vars)
for line in lines
)), free_vars
#
# Internal functions
# ------------------
#
def _parse_tce_line(line, range_cb, base_cb, free_vars):
"""Parse a TCE output line into a list of terms.
"""
# Get the initial part in the bracket and the actual term specification
# part after it.
match_res = re.match(
r'^\s*\[(?P<factors>.*)\](?P<term>[^\[\]]+)$',
line
)
if match_res is None:
raise ValueError('Invalid TCE output line', line)
factors_str = match_res.group('factors').strip()
term_str = match_res.group('term').strip()
# Get the actual term in its raw form.
raw_term = _parse_term(term_str, range_cb, base_cb, free_vars)
# Generates the actual list of terms based on the factors, possibly with
# permutations.
return _gen_terms(factors_str, raw_term)
#
# Some constants for the TCE output format
#
_SUM_BASE = 'Sum'
#
# Parsing the term specification
#
def _parse_term(term_str, range_cb, base_cb, free_vars):
"""Parse the term string after the square bracket into a Term.
"""
# First break the string into indexed values.
summed_vars, idxed_vals = _break_into_idxed(term_str)
sums = tuple((Symbol(i), range_cb(i)) for i in summed_vars)
dumms = {i[0] for i in sums}
amp = sympify('1')
for base, indices in idxed_vals:
indices_symbs = tuple(Symbol(i) for i in indices)
for i, j in zip(indices_symbs, indices):
if i not in dumms:
free_vars[range_cb(j)].add(i)
continue
base_symb = base_cb(base, indices_symbs)
amp *= base_symb[indices_symbs]
continue
return Term(sums=sums, amp=amp, vecs=())
def _break_into_idxed(term_str):
"""Break the term string into pairs of indexed base and indices.
Both the base and the indices variables are going to be simple strings in
the return value.
"""
# First break it into fields separated by the multiplication asterisk.
fields = (i for i in re.split(r'\s*\*\s*', term_str) if len(i) > 0)
# Parse the fields one-by-one.
idxed_vals = []
for field in fields:
# Break the field into the base part and the indices part.
match_res = re.match(
r'(?P<base>\w+)\s*\((?P<indices>.*)\)', field
)
if match_res is None:
raise ValueError('Invalid indexed value', field)
# Generate the final result.
idxed_vals.append((
match_res.group('base'),
tuple(match_res.group('indices').split())
))
continue
# Summation always comes first in TCE output.
if idxed_vals[0][0] == _SUM_BASE:
return idxed_vals[0][1], idxed_vals[1:]
else:
return (), idxed_vals
#
# Final term generation based on the raw term
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#
def _gen_terms(factors_str, raw_term):
"""Generate the actual terms based on the initial factor string.
The raw term should be a term directly
|
parsed from the term specification
part of the TCE line. This function will use the factors string in the
square bracket to turn it into a list of terms for the final value of the
line.
"""
# The regular expression for a factor.
factor_regex = r'\s*'.join([
r'(?P<sign>[+-])',
r'(?P<factor_number>[0-9.]+)',
r'(?:\*\s*P\((?P<perm_from>[^=>]*)=>(?P<perm_to>[^)]*)\))?',
]) + r'\s
|
*'
mismatch_regex = r'.'
regex = '(?P<factor>{})|(?P<mismatch>{})'.format(
factor_regex, mismatch_regex
)
# Iterate over the factors.
terms = []
for match_res in re.finditer(regex, factors_str):
# Test if the result matches a factor.
if match_res.group('factor') is None:
raise ValueError('Invalid factor string', factors_str)
# The value of the factor.
factor_value = nsimplify(''.join(
match_res.group('sign', 'factor_number')
), rational=True)
# Get the substitution for the permutation of the indices.
if match_res.group('perm_from') is not None:
from_vars = match_res.group('perm_from').split()
to_vars = match_res.group('perm_to').split()
subs = {
Symbol(from_var): Symbol(to_var)
for from_var, to_var in zip(from_vars, to_vars)
}
else:
subs = {}
# Add the result.
terms.append(raw_term.subst(subs).scale(factor_value))
# Continue to the next factor.
continue
return terms
|
prplfoundation/prpl-hypervisor
|
bin/board-control.py
|
Python
|
isc
| 1,093
| 0.013724
|
import serial
from time import sleep
import base64
import sys
def readSerial():
while True:
response = ser.readline();
return response
# main
ser = serial.Serial(port='/dev/ttyACM0', baudrate=115200, timeout=3)
ser.isOpen()
# Wait UART Listener VM to be done.
while(1):
message = readSerial()
if 'Listener' in message:
break
#Requires the keycode to the Litener VM
ser.write('\n'.encode())
ser.flush()
#Receive the keyCode
while(1):
message = readSerial()
|
if 'keyCode' in message:
hex_keyCode = message[9:-1]
break
print "KeyCode: ", hex_keyCode
binary_keyCode = base64.b16decode(hex_keyCode.upper())
while(1):
print "ARM Commands: "
print "1 - Start"
print "2 - Stop"
c = '0'
|
while c!='1' and c!='2':
c = raw_input('Input:')
print 'Sending the arm command...'
for i in range(0, len(binary_keyCode)):
ser.write(binary_keyCode[i])
ser.flush()
ser.write(c.encode())
ser.write('\n'.encode())
ser.flush()
print 'Board response: %s' % readSerial()
|
hehongliang/tensorflow
|
tensorflow/contrib/cluster_resolver/python/training/slurm_cluster_resolver.py
|
Python
|
apache-2.0
| 8,923
| 0.003474
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implementation of Cluster Resolvers for Slurm workload manager."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import os
import subprocess
from tensorflow.contrib.cluster_resolver.python.training.cluster_resolver import ClusterResolver
from tensorflow.python.training.server_lib import ClusterSpec
class SlurmClusterResolver(ClusterResolver):
"""Cluster Resolver for system with Slurm workload manager.
This is an implementation of cluster resolvers for Slurm clusters. This allows
the specification of jobs and task counts, number of tasks per node, number of
GPUs on each node and number of GPUs for each task, It retrieves system
attributes by Slurm environment variables, resolves allocated computing node
names, construct a cluster and return a Cluster Resolver object which an be
use for distributed TensorFlow.
"""
def _resolve_hostnames(self):
"""Resolve host names of nodes allocated in current jobs.
Returns:
A list of node names as strings.
"""
hostlist = (subprocess.check_output(['scontrol', 'show', 'hostname']).
decode('utf-8').strip().split('\n'))
return hostlist
def __init__(self,
jobs,
port_base=8888,
gpus_per_node=1,
gpus_per_task=1,
tasks_per_node=None,
auto_set_gpu=True,
rpc_layer='grpc'):
"""Creates a new SlurmClusterResolver object.
This takes in parameters and creates a SlurmClusterResolver object. It uses
those parameters to check which nodes will processes reside and resolves
their hostnames. With the number of the GPUs on each node and number of GPUs
for each task it offsets the port number for each processes and allocate
GPUs to tasks by setting environment variables. The resolver currently
supports homogeneous tasks and default Slurm process allocation.
Args:
jobs: Dictionary with job names as key and number of tasks in the job as
value
port_base: The first port number to start with for processes on a node.
gpus_per_node: Number of GPUs available on each node.
gpus_per_task: Number of GPUs to be used for each task.
tasks_per_node: Number of tasks to run on each node, if not set defaults
to Slurm's output environment variable SLURM_NTASKS_PER_NODE.
auto_set_gpu: Set the visible CUDA devices automatically while resolving
the cluster by setting CUDA_VISIBLE_DEVICES environment variable.
Def
|
aults to True.
rpc_layer: (Optional) The protocol TensorFlow uses to communicate between
nodes. Defaults to 'grpc'.
Returns:
A ClusterResolver object which can be used with distributed TensorFlow.
Raises:
RuntimeE
|
rror: If requested more GPUs per node then available or requested
more tasks then assigned tasks.
"""
# check if launched by mpirun
if 'OMPI_COMM_WORLD_RANK' in os.environ:
self._rank = int(os.environ['OMPI_COMM_WORLD_RANK'])
num_tasks = int(os.environ['OMPI_COMM_WORLD_SIZE'])
else:
self._rank = int(os.environ['SLURM_PROCID'])
num_tasks = int(os.environ['SLURM_NTASKS'])
self._jobs = collections.OrderedDict(sorted(jobs.items()))
self._port_base = port_base
# user specification overrides SLURM specification
if tasks_per_node is not None:
self._tasks_per_node = tasks_per_node
elif tasks_per_node is None and 'SLURM_NTASKS_PER_NODE' in os.environ:
self._tasks_per_node = int(os.environ['SLURM_NTASKS_PER_NODE'])
else:
raise RuntimeError('Neither `tasks_per_node` or '
'SLURM_NTASKS_PER_NODE is set.')
self._gpus_per_node = gpus_per_node
self._gpus_per_task = gpus_per_task
self._auto_set_gpu = auto_set_gpu
self.task_type = None
self.task_index = None
self.rpc_layer = rpc_layer
self._gpu_allocation = []
self._cluster_allocation = {}
if self._tasks_per_node * self._gpus_per_task > self._gpus_per_node:
raise RuntimeError('Requested more GPUs per node then available.')
if sum(self._jobs.values()) != num_tasks:
raise RuntimeError('Requested more tasks then assigned tasks.')
def cluster_spec(self):
"""Returns a ClusterSpec object based on the latest instance group info.
This returns a ClusterSpec object for use based on information from the
specified initialization parameters and Slurm environment variables. The
cluster specification is resolved each time this function is called. The
resolver extract hostnames of nodes by scontrol and pack tasks in that
order until a node a has number of tasks that is equal to specification.
GPUs on nodes are allocated to tasks by specification through setting
CUDA_VISIBLE_DEVICES environment variable.
Returns:
A ClusterSpec containing host information retrieved from Slurm's
environment variables.
"""
hostlist = self._resolve_hostnames()
task_list = []
self._gpu_allocation = []
self._cluster_allocation = {}
for host in hostlist:
for port_offset, gpu_offset in zip(
range(self._tasks_per_node),
range(0, self._gpus_per_node, self._gpus_per_task)):
host_addr = '%s:%d' % (host, self._port_base + port_offset)
task_list.append(host_addr)
gpu_id_list = []
for gpu_id in range(gpu_offset, gpu_offset + self._gpus_per_task):
gpu_id_list.append(str(gpu_id))
self._gpu_allocation.append(','.join(gpu_id_list))
cluster_rank_offset_start = 0
cluster_rank_offset_end = 0
for task_type, num_tasks in self._jobs.items():
cluster_rank_offset_end = cluster_rank_offset_start + num_tasks
self._cluster_allocation[task_type] = (
task_list[cluster_rank_offset_start:cluster_rank_offset_end])
if cluster_rank_offset_start <= self._rank < cluster_rank_offset_end:
self.task_type = task_type
self.task_index = self._rank - cluster_rank_offset_start
cluster_rank_offset_start = cluster_rank_offset_end
if self._auto_set_gpu is True:
os.environ['CUDA_VISIBLE_DEVICES'] = self._gpu_allocation[self._rank]
return ClusterSpec(self._cluster_allocation)
def get_task_info(self):
"""Returns job name and task_index for the process which calls this.
This returns the job name and task index for the process which calls this
function according to its rank and cluster specification. The job name and
task index are set after a cluster is constructed by cluster_spec otherwise
defaults to None.
Returns:
A string specifying job name the process belongs to and an integner
specifying the task index the process belongs to in that job.
"""
return self.task_type, self.task_index
def master(self, task_type=None, task_index=None, rpc_layer=None):
"""Returns the master string for connecting to a TensorFlow master.
Args:
task_type: (Optional) Overrides the default auto-selected task type.
task_index: (Optional) Overrides the default auto-slected task index.
rpc_layer: (Optional) Overrides the default RPC protocol TensorFlow uses
to communicate across nodes.
Returns:
A connection string for connecting to a TensorFlow master.
"""
task_type = task_type if task_type is not None else self.task_typ
|
kickstandproject/wildcard
|
wildcard/dashboards/admin/info/tables.py
|
Python
|
apache-2.0
| 2,073
| 0
|
# -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django import template
from django.utils.translation import ugettext_lazy as _ # noqa
from horizon import tables
class ServiceFilterAction(tables.FilterAction):
def filter(self, table, services, filter_string):
q = filter_string.lower()
def comp(service):
if q in service.type.lower():
return True
return False
return filter(comp, services)
def get_stats(service):
return template.loader.render_to_string('admin/services/_stats.html',
{'service': service})
def get_enabled(service, reverse=False):
options = ["Enabled", "Disabled"]
if reverse:
options.reverse
|
()
# if not configured in this region, neither option makes sense
if service.host:
return options[0] if not service.disabled else options[1]
return None
class ServicesTable(tables.DataTable):
id = tables.Column('id', verbose_name=_('Id'), hidden=True)
name = tables.Column("name", verbose_name=_('Name'))
service_type = tables.Column('__unicode__', verbose_name=_('Service'))
host = tables.Column('host', ver
|
bose_name=_('Host'))
enabled = tables.Column(get_enabled,
verbose_name=_('Enabled'),
status=True)
class Meta:
name = "services"
verbose_name = _("Services")
table_actions = (ServiceFilterAction,)
multi_select = False
status_columns = ["enabled"]
|
pbabik/OGCServer
|
conf/fcgi_app.py
|
Python
|
bsd-3-clause
| 198
| 0.010101
|
from ogcserv
|
er.cgiserver import Handler
from jon import fcgi
class OGCSe
|
rverHandler(Handler):
configpath = '/path/to/ogcserver.conf'
fcgi.Server({fcgi.FCGI_RESPONDER: OGCServerHandler}).run()
|
diblaze/TDP002
|
2.3/testdemo/test/demo_test.py
|
Python
|
mit
| 614
| 0.006515
|
#!/usr/bin/env python
"""
Test module for demo.py.
Runs various tests on the demo module. Simply run this module to test
the demo.py module.
"""
import test
import demo
def test_echo():
print("In echo test")
echo = demo.echo("hej")
test.assert_equal("hej", echo)
test.assert_not_equal(None, echo)
def test_add():
print("In add test")
added = demo.add("hej ", "hopp")
test.assert_equal("hej hopp", added)
test.assert_not_equal("hej", added)
def run_module_t
|
ests():
test.run_tests([test_echo,
test_add])
if __name__ == "__main__":
run_module_tests()
| |
ThomasA/pywt
|
pywt/tests/test_functions.py
|
Python
|
mit
| 1,468
| 0
|
#!/usr/bin/env python
from __future__ import division, print_function, absolute_import
from numpy.testing import (run_module_suite, assert_almost_equal,
assert_allclose)
import pywt
def test_centrfreq():
# db1 is Haar function, frequency=1
w = pywt.Wavelet('db1')
expected = 1
result = pywt.centfrq(w, precis
|
ion=12)
assert_almost_equal(result, expected, decimal=3)
# db2, frequency=2/3
w = pywt.Wavelet('db2')
expected = 2/3.
result = pywt.centfrq(w, precision=12)
assert_almost_equal(result, expected)
def test_scal2frq_scale():
scale = 2
delta = 1
w = pywt.Wavelet('db1')
expected = 1. / scale
result = pywt.scal2frq(w, scale, delta, precision=12)
assert_almost_equal(result, expe
|
cted, decimal=3)
def test_scal2frq_delta():
scale = 1
delta = 2
w = pywt.Wavelet('db1')
expected = 1. / delta
result = pywt.scal2frq(w, scale, delta, precision=12)
assert_almost_equal(result, expected, decimal=3)
def test_intwave_orthogonal():
w = pywt.Wavelet('db1')
int_psi, x = pywt.intwave(w, precision=12)
ix = x < 0.5
# For x < 0.5, the integral is equal to x
assert_allclose(int_psi[ix], x[ix])
# For x > 0.5, the integral is equal to (1 - x)
# Ignore last point here, there x > 1 and something goes wrong
assert_allclose(int_psi[~ix][:-1], 1 - x[~ix][:-1], atol=1e-10)
if __name__ == '__main__':
run_module_suite()
|
kennethreitz-archive/mead
|
mead/core/tools/date_time.py
|
Python
|
isc
| 3,456
| 0.000289
|
# Part of Mead. See LICENSE file for full copyright and licensing details.
import datetime, re
def datetime_convert(time):
"""
Convert time to YYYYY-MM-DD HH:MM:SS
"""
_time = str(time)
retime = re.compile(r'\W+')
_list = retime.split(_time)
if len(_list) >= 6:
year = int(_list[0])
mounth = int(_list[1])
day = int(_list[2])
hour = int(_list[3])
minute = int(_list[4])
second = int(_list[5])
time = datetime.datetime(year, mounth, day, hour, minute, second)
return time
else:
try:
hour = int(_list[0])
minute = int(_list[1])
second = int(_list[2])
time = datetime.datetime(100, 1, 1, hour, minute, second)
return time
except IndexError:
hour = int(_list[0])
minute = int(_list[1])
time = datetime.datetime(hour, minute)
return time
def date_convert(date):
"""
Convert date to YYYY-MM-DD
"""
_date = str(date)
redate = re.compile(r'\W+')
_list = redate.split(_date)
try:
day = int(_list[0])
mounth = int(_list[1])
year = int(_list[2])
date = datetime.date(year, mounth, day)
return date
except ValueError:
day = int(_list[2])
mounth = int(_list[1])
year = int(_list[0])
date = datetime.date(year, mounth, day)
return date
def time_convert(time):
"""
Convert time to HH:MM:SS
"""
_time = str(time)
retime = re.compile(r'\W+')
_list = retime.split(_time)
try:
hour = int(_list[0])
minute = int(_list[1])
second = int(_list[2])
time = datetime.time(hour, minute, second)
return time
except IndexError:
hour = int(_list[0])
minute = int(_list[1])
time = datetime.time(hour, minute)
return time
def convert_in_second(time):
if time:
_time = str(time)
retime = re.compile(r'\W+')
_list = retime.split(_time)
try:
hour = int(_list[0]) * 3600
minute = int(_list[1]) * 60
second = int(_list[2])
time = hour + minute + second
return time
except IndexError:
hour = int(_list[0]) * 3600
minute = int(_list[1]) * 60
time = hour + minute
return time
else:
time = 0
return time
def add_time(time, retard):
"""
Add time to the current time
"""
time = datetime_convert(time)
if retard:
_time = str(retard)
retime = re.compile(r'\W+')
_list = retime.split(_time)
hour = int(_list[0]) * 3600
minute = int(_list[1]) * 60
time2 = hour + minute
new_time = time + datetime.timedelta(0, time2)
else:
new_time = time
return new_time.time()
def remove_time(time, retard):
|
time = datetime_convert(time)
if retard:
_time = str(retard)
|
retime = re.compile(r'\W+')
_list = retime.split(_time)
hour = int(_list[0]) * 3600
minute = int(_list[1]) * 60
time2 = hour + minute
new_time = time - datetime.timedelta(0, time2)
else:
new_time = time
return new_time.time()
def format_date(date, format=None):
"""
Format date
"""
newdate = date.strftime(format)
return newdate
|
0xPhoeniX/MazeWalker
|
MazeTracer/PyScripts/post_regopenkeyexa.py
|
Python
|
lgpl-3.0
| 1,056
| 0.001894
|
import ctypes
import json
def post_analyzer(HKEY_hKey,
LPCTSTR_lpSubKey,
DWORD_ulOptions,
REGSAM_samDesired,
PHKEY_phkResult,
**kwargs):
lpSubKey = ctypes.c_char_p.from_address(LPCTSTR_lpSubKey)
hKey = ctypes.c_void_p.from_address(HKEY_hKey)
res = []
if (lpSubKey and lpSubKey.value):
result = {'name': 'lpSubKey', 'data': lpSubKey.value}
res.append(result)
if hKey and hKey.value:
result = {}
result['name'] = 'hKey'
if hKey.value == 0x80000000:
result['d
|
ata'] = 'HKCR'
elif hKey.value == 0x80000001:
result['data'] = 'HKCU'
elif hKey.value == 0x80000002:
result['data'] = 'HKLM'
elif hKey.value == 0x80000003:
result['data'] = 'HKU'
elif hKey.value == 0x80000005:
result['data'] = 'HKCC'
else:
result['data'] = '0x%x' % hKey.value
r
|
es.append(result)
return json.dumps(res)
|
detiber/lib_openshift
|
lib_openshift/models/v1_cluster_role_list.py
|
Python
|
apache-2.0
| 6,290
| 0.001272
|
# coding: utf-8
"""
OpenAPI spec version:
Generated by: https://github.com/swagger-api/swagger-codegen.git
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from pprint import pformat
from six import iteritems
import re
class V1ClusterRoleList(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
operations = [
]
# The key is attribute name
# and the value is attribute type.
swagger_types = {
'kind': 'str',
'api_version': 'str',
'metadata': 'UnversionedListMeta',
'items': 'list[V1ClusterRole]'
}
# The key is attribute name
# and the value is json key in definition.
attribute_map = {
'kind': 'kind',
'api_version': 'apiVersion',
'metadata': 'metadata',
'items': 'items'
}
def __init__(self, kind=None, api_version=None, metadata=None, items=None):
"""
V1ClusterRoleList - a model defined in Swagger
"""
self._kind = kind
self._api_version = api_version
self._metadata = metadata
self._items = items
@property
def kind(self):
"""
Gets the kind of this V1ClusterRoleList.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#types-kinds
:return: The kind of this V1ClusterRoleList.
:rtype: str
"""
return self._kind
@kind.setter
def kind(self, kind):
"""
Sets the kind of this V1ClusterRoleList.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#types-kinds
:param kind: The kind of this V1ClusterRoleList.
|
:type: str
"""
self._kind = kind
@property
def api_version(self):
"""
Gets the api_version of this V1
|
ClusterRoleList.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#resources
:return: The api_version of this V1ClusterRoleList.
:rtype: str
"""
return self._api_version
@api_version.setter
def api_version(self, api_version):
"""
Sets the api_version of this V1ClusterRoleList.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#resources
:param api_version: The api_version of this V1ClusterRoleList.
:type: str
"""
self._api_version = api_version
@property
def metadata(self):
"""
Gets the metadata of this V1ClusterRoleList.
Standard object's metadata.
:return: The metadata of this V1ClusterRoleList.
:rtype: UnversionedListMeta
"""
return self._metadata
@metadata.setter
def metadata(self, metadata):
"""
Sets the metadata of this V1ClusterRoleList.
Standard object's metadata.
:param metadata: The metadata of this V1ClusterRoleList.
:type: UnversionedListMeta
"""
self._metadata = metadata
@property
def items(self):
"""
Gets the items of this V1ClusterRoleList.
Items is a list of ClusterRoles
:return: The items of this V1ClusterRoleList.
:rtype: list[V1ClusterRole]
"""
return self._items
@items.setter
def items(self, items):
"""
Sets the items of this V1ClusterRoleList.
Items is a list of ClusterRoles
:param items: The items of this V1ClusterRoleList.
:type: list[V1ClusterRole]
"""
self._items = items
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(V1ClusterRoleList.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
ViMiao/PythonLearning
|
ProgrammingPython/C01/dump_db_classes.py
|
Python
|
gpl-3.0
| 177
| 0.00565
|
import shelve
|
db = shelve.open('class-shelve')
for key in db:
print(key, '=>\n)', db[key].name, db[key].pay)
bob = db['bob']
print(bob.lastNa
|
me())
print(db['tom'].lastName)
|
ekarulf/pymp
|
setup.py
|
Python
|
mit
| 461
| 0.043384
|
fro
|
m setuptools import setup, find_packages
setup(
name = "pymp",
version = "0.1",
url = 'http://www.fort-awesome.net/wiki/pymp',
license = 'MIT',
description = "A very specific case when Python's multiprocessing library doesn't work",
author = 'Erik Karulf',
# Below this line is tasty Kool-Aide provided by the Cargo Cult
packages = find_packages('src'),
package_dir = {'': 'src'},
install_requires = ['setuptools
|
'],
)
|
metamarcdw/PyBitmessage-I2P
|
src/i2p/test/test_socket.py
|
Python
|
mit
| 12,449
| 0.019038
|
# --------------------------------------------------------
# test_socket.py: Unit tests for socket, select.
# --------------------------------------------------------
# Make sure we can import i2p
import sys; sys.path += ['../../']
import traceback, time, thread, threading, random, copy
from i2p import socket, select
def test_passed(s, msg='OK'):
"""Notify user that the given unit test passed."""
print ' ' + (s + ':').ljust(50) + msg
def verify_html(s):
"""Raise an error if s does not end with </html>"""
assert s.strip().lower()[-7:] == '</html>'
def resolve_test(name='duck.i2p'):
"""Unit test for resolve."""
try:
rname = socket.resolve(name)
except:
print 'Unit test failed for socket.resolve'
traceback.print_exc(); sys.exit()
test_passed('socket.resolve', 'See below')
print ' Use hosts.txt to verify that ' + name + '=' + \
rname[:15] + '...'
def stream_client(dest):
"""Sub-unit test for socket.socket in SOCK_STREAM mode."""
S = socket.socket('Alice', socket.SOCK_STREAM)
S.connect(dest)
S.send('GET / HTTP/1.0\r\n\r\n') # Send request
f = S.makefile() # File object
while True: # Read header
line = f.readline().strip() # Read a line
if line == '': break # Content begins
s = f.read() # Get content
f.close()
S.close()
def stream_client_test():
"""Unit test for socket.socket in SOCK_STREAM mode."""
url = 'duck.i2p'
stream_client('http://' + url + '/')
stream_client(url)
stream_client(url + '/')
stream_client('http://' + url)
stream_client(socket.resolve('http://' + url + '/'))
test_passed('socket.socket stream client')
def packet_test(raw=True):
"""Unit test for socket.socket in SOCK_DGRAM or SOCK_RAW modes."""
tr
|
y:
multithread_wait_time = 500.0
may_need_increase = False
kwargs = {'in_depth': 0, 'out_depth': 0}
if raw:
C = socket.socket('Carola', socket.SOCK_RAW, **kwargs)
D = socket.socket('Davey', socket.SOCK_RAW, **kwargs)
else:
C = socket.socket('Carol', socket.SOCK_DGRAM, **kwargs)
|
D = socket.socket('Dave', socket.SOCK_DGRAM, **kwargs)
global C_recv, D_recv, C_got, D_got, __lock
C_recv = [] # Packets C *should* receive
D_recv = [] # Packets D *should* receive
C_got = [] # Packets C actually got
D_got = [] # Packets D actually got
n = 50 # Create n threads
m = 40 # Each thread sends m packets
global __done_count
__done_count = 0
__lock = threading.Lock()
# Use C and D to send and read in many different threads.
def f():
# This code is run in each separate thread
global C_recv, D_recv, C_got, D_got, __lock, __done_count
for i in range(m):
# Random binary string of length 2-80.
index_list = range(random.randrange(2, 80))
s = ''.join([chr(random.randrange(256)) for j in index_list])
if random.randrange(2) == 0:
# Send packet from C to D, and log it.
C.sendto(s, 0, D.dest)
__lock.acquire()
D_recv += [s]
__lock.release()
else:
# Send packet from D to C, and log it.
D.sendto(s, 0, C.dest)
__lock.acquire()
C_recv += [s]
__lock.release()
time.sleep(0.01*random.uniform(0.0,1.0))
# Read any available packets.
try: (p, fromaddr) = C.recvfrom(1000, socket.MSG_DONTWAIT)
except socket.BlockError: p = None
if p != None and not raw: assert fromaddr == D.dest
__lock.acquire()
if p != None: C_got += [p]
__lock.release()
try: (p, fromaddr) = D.recvfrom(1000, socket.MSG_DONTWAIT)
except socket.BlockError: p = None
if p != None and not raw: assert fromaddr == C.dest
__lock.acquire()
if p != None: D_got += [p]
__lock.release()
__lock.acquire()
__done_count += 1
__lock.release()
# Create n threads.
for i in range(n):
threading.Thread(target=f).start()
# Wait for them to finish.
while __done_count < n: time.sleep(0.01)
# Read any left-over received packets.
end_time = time.time() + multithread_wait_time
while time.time() < end_time:
# Read any available packets.
try: (p, fromaddr) = C.recvfrom(1000, socket.MSG_DONTWAIT)
except socket.BlockError: p = None
if p != None and not raw: assert fromaddr == D.dest
if p != None: C_got += [p]
try: (p, fromaddr) = D.recvfrom(1000, socket.MSG_DONTWAIT)
except socket.BlockError: p = None
if p != None and not raw: assert fromaddr == C.dest
if p != None: D_got += [p]
if len(C_got) == len(C_recv) and len(D_got) == len(D_recv):
break
if time.time() >= end_time:
may_need_increase = True
C_got.sort()
D_got.sort()
C_recv.sort()
D_recv.sort()
assert C_got == C_recv
assert D_got == D_recv
C.close()
D.close()
except:
if raw:
print 'Unit test failed for socket.socket (SOCK_RAW).'
print 'Raw packets are not reliable.'
else:
print 'Unit test failed for socket.socket (SOCK_DGRAM).'
print 'Datagram packets are not reliable.'
if may_need_increase:
print 'Try increasing multithread_wait_time.'
traceback.print_exc(); sys.exit()
if raw:
test_passed('socket.socket (SOCK_RAW)')
else:
test_passed('socket.socket (SOCK_DGRAM)')
def stream_test():
"""Multithreaded unit test for socket.socket (SOCK_STREAM)."""
try:
multithread_wait_time = 200.0
may_need_increase = False
kwargs = {'in_depth':0, 'out_depth':0}
C = socket.socket('Carolic', socket.SOCK_STREAM, **kwargs)
D = socket.socket('David', socket.SOCK_STREAM, **kwargs)
Cout = socket.socket('Carolic', socket.SOCK_STREAM, **kwargs)
Dout = socket.socket('David', socket.SOCK_STREAM, **kwargs)
assert C.dest == Cout.dest
assert D.dest == Dout.dest
C.listen(5)
D.listen(5)
Cout.connect(D.dest)
Dout.connect(C.dest)
(Cin, ignoredest) = C.accept()
(Din, ignoredest) = D.accept()
global C_recv, D_recv, C_got, D_got, __lock
C_recv = [] # String data C *should* receive
D_recv = [] # String data D *should* receive
C_got = [] # String data C actually got
D_got = [] # String data D actually got
n = 50 # Create n threads
m = 40 # Each thread sends m strings
global __done_count
__done_count = 0
__lock = threading.Lock()
# Use C and D to send and read in many different threads.
def f():
# This code is run in each separate thread
global C_recv, D_recv, C_got, D_got, __lock, __done_count
for i in range(m):
# Random binary string of length 2-80.
index_list = range(random.randrange(2, 80))
s = ''.join([chr(random.randrange(256)) for j in index_list])
if random.randrange(2) == 0:
# Send packet from C to D, and log it.
__lock.acquire()
Cout.send(s)
D_recv += [s]
__lock.release()
else:
# Send packet from D to C, and log it.
__lock.acquire()
Dout.send(s)
C_recv += [s]
__lock.release()
time.sleep(0.01*random.uniform(0.0,1.0))
# Read any available string data, non-blocking.
__lock.acquire()
try: p = Cin.recv(100000, socket.MSG_DONTWAIT)
except socket.BlockError: p = None
if p != None: C_got += [p]
__lock.release()
__lock.acquire()
try: p = Din.recv(100000, socket.MSG_DONTWAIT)
except socket.BlockError: p = None
if p != None: D_got += [p]
__lock.release()
__lock.acquire()
__done_count += 1
__lock.release()
# Create n threads.
for i in range(n):
threading.Thread(target=f).start()
# Wait for them to finish.
while __done_count < n: time.sleep(0.01)
# Read
|
AutorestCI/azure-sdk-for-python
|
azure-mgmt-network/azure/mgmt/network/v2017_11_01/operations/usages_operations.py
|
Python
|
mit
| 4,282
| 0.002102
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
import uuid
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
from .. import models
class UsagesOperations(object):
"""UsagesOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An objec model deserializer.
:ivar api_version: Client API
|
version. Constant value: "2017-11-01".
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize
|
= deserializer
self.api_version = "2017-11-01"
self.config = config
def list(
self, location, custom_headers=None, raw=False, **operation_config):
"""List network usages for a subscription.
:param location: The location where resource usage is queried.
:type location: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of Usage
:rtype:
~azure.mgmt.network.v2017_11_01.models.UsagePaged[~azure.mgmt.network.v2017_11_01.models.Usage]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = '/subscriptions/{subscriptionId}/providers/Microsoft.Network/locations/{location}/usages'
path_format_arguments = {
'location': self._serialize.url("location", location, 'str', pattern=r'^[-\w\._]+$'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.UsagePaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.UsagePaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
|
I-sektionen/i-portalen
|
wsgi/iportalen_django/articles/migrations/0013_auto_20151021_0155.py
|
Python
|
mit
| 598
| 0.00335
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('articles', '0012_article_organisations'),
]
operations = [
migrations.AlterField(
|
model_name='article',
name='organisations',
field=models.ManyToManyField(default=None, help_text='Organisation/organisationer som artikeln hör till',
blank=True, to='organisations.Organisat
|
ion', verbose_name='organisationer'),
),
]
|
fmichea/lpbm
|
lpbm/exceptions.py
|
Python
|
bsd-3-clause
| 2,013
| 0.000497
|
# lpbm/exceptions.py - All the errors that can be raised in the program.
# Author: Franck Michea < franck.michea@gmail.com >
# License: New BSD License (See LICENSE)
class GeneralOptionError(Exception):
def __init__(self, name):
self.name = name
def __str__(self):
msg = 'Could not find or call any function for `--{}` options.'
return msg.format(self.name)
class IdOptionError(GeneralOptionError):
pass
class IdOptionMissingError(Exception):
def __init__(self, name):
self.name = name
def __str__(self):
return 'Id must be precised when using option `--{}`.'.format(self.name)
class ObjectNotFound(Exception):
|
def __init__(self, id, name):
self.id, self.name = id, name
def __str__(self):
return 'There is no {} with this id ({}).'.format(self.name, self.id)
# Field Errors
class FieldReadOnlyError(Exception):
def __str__(self):
return 'Cannot assign read-only value.'
class FieldRequiredError(Exception):
def __str__(self):
return 'Field is required and cannot be set to empty value None.'
class ConfigOptionArgsError(Exception):
def __
|
str__(self):
msgs = [
'ConfigOptionField.__init__ takes one or two arguments.',
'See documentation for more details.',
]
return ' '.join(msgs)
# Model Errors
class AttributeNotAFieldError(Exception):
def __init__(self, attr_name):
self.attr_name = attr_name
def __str__(self):
msg = 'Attribute `{attr_name}` is not a field. You must implement '
msg += '`interactive_{attr_name}` if you want it to be interactive.'
return msg.format(attr_name=self.attr_name)
class ModelDoesNotExistError(Exception):
def __init__(self, object_name, id):
self.object_name, self.id = object_name, id
def __str__(self):
return 'There is no such {object_name} (id = {id}).'.format(
object_name=self.object_name, id=self.id
)
|
dialounke/pylayers
|
pylayers/antprop/diff.py
|
Python
|
mit
| 13,499
| 0.029632
|
"""
.. currentmodule:: pylayers.antprop.diff
.. autosummary::
:members:
"""
from __future__ import print_function
import doctest
import os
import glob
import numpy as np
import scipy.special as sps
import matplotlib.pyplot as plt
import pdb
def diff(fGHz,phi0,phi,si,sd,N,mat0,matN,beta=np.pi/2):
""" Luebbers Diffration coefficient
Parameters
----------
fGHz
phi0
phi
si
sd
N
mat0
matN
beta : float
skew incidence angle (rad)
Examples
--------
>>> import numpy as np
>>> from pylayers.antprop.slab import *
>>> fGHz = 3.
>>> N = 320/180.
>>> #phi = 40*np.pi/180.
>>> phi0 = np.linspace(0,N*np.pi,500)
>>> phi = np.linspace(0,3*np.pi/2,100)
>>> dm = MatDB()
>>> mat0 = dm['WOOD']
>>> matN = dm['WOOD']
>>> si = 1
>>> sd = 1
>>> Ds,Dh = Coediff(fGHz,phi0,phi,si,sd,N,mat0,matN)
"""
if not isinstance(fGHz,np.ndarray):
fGHz = np.array([fGHz])
if not isinstance(phi0,np.ndarray):
phi0 = np.array([phi0])
if not isinstance(phi,np.ndarray):
phi = np.array([phi])
if not isinstance(si,np.ndarray):
si = np.array([si])
if not isinstance(sd,np.ndarray):
sd = np.array([sd])
if not isinstance(N,np.ndarray):
N = np.array([N])
if not isinstance(beta,np.ndarray):
beta = np.array([beta])
fGHz = fGHz[:,None,None,None,None,None,None]
phi0 = phi0[None,:,None,None,None,None,None]
phi = phi[None,None,:,None,None,None,None]
si = si[None,None,None,:,None,None,None]
sd = sd[None,None,None,None,:,None,None]
N = N[None,None,None,None,None,:,None]
beta = beta[None,None,None,None,None,None,:]
L = si*sd/(si+sd)
k = 2*np.pi*fGHz/0.3
#--------------------------------------------------
# R on faces 'o' and 'n'
#--------------------------------------------------
tho = np.empty((phi0.shape[1],phi.shape[2],N.shape[5]))[None,:,:,None,None,:,None]
thn = np.empty((phi0.shape[1],phi.shape[2],N.shape[5]))[None,:,:,None,None,:,None]
PHI0 = phi0 * np.ones(phi.shape)*np.ones(N.shape)
PHI = np.ones(phi0.shape)*phi*np.ones(N.shape)
BN = np.ones(phi0.shape)*np.ones(phi.shape)*N
c1 = PHI>PHI0
c2 = ~c1
tho[c1] = PHI0[c1]
thn[c1] = BN[c1]*np.pi-PHI[c1]
tho[c2] = PH
|
I[c2]
thn[c2] = BN[c2]*np.pi-PHI0[c2]
er0 = np.real(mat0['epr'])
err0 = np.imag(mat0['epr'])
ur0 = np.real(mat0['mur'])
urr0 = np.imag(mat0['mur'])
sigma0 = mat0['sigma']
deltah0 = mat0['roughness']
|
erN = np.real(matN['epr'])
errN = np.imag(matN['epr'])
urN = np.real(mat0['mur'])
urrN = np.imag(mat0['mur'])
sigmaN = matN['sigma']
deltahN = matN['roughness']
Rsofto,Rhardo = R(tho,k,er0,err0,sigma0,ur0,urr0,deltah0)
Rsoftn,Rhardn = R(thn,k,erN,errN,sigmaN,urN,urrN,deltahN)
#--------------------------------------------------
# grazing angle Go et Gn
#--------------------------------------------------
Gsofto,Gsoftn = G(N,phi0,Rsofto,Rsoftn)
Ghardo,Ghardn = G(N,phi0,Rhardo,Rhardn)
#--------------------------------------------------
#calcul des 4 termes du coeff diff
#--------------------------------------------------
sign = 1.0
D1 = Dfunc(sign,k,N,phi-phi0,si,sd,beta)
sign = -1.0
D2 = Dfunc(sign,k,N,phi-phi0,si,sd,beta)
sign = +1.0
D3 = Dfunc(sign,k,N,phi+phi0,si,sd,beta)
sign = -1.0
D4 = Dfunc(sign,k,N,phi+phi0,si,sd,beta)
#--------------------------------------
#n>=1 : exterior wedge
#--------------------------------------
Dsoft =np.empty(np.shape(D1),dtype=complex)
Dhard =np.empty(np.shape(D1),dtype=complex)
#c1 = BN>=1.0
Dsoft = D1+D2+Rsoftn*D3+Rsofto*D4
Dhard = D1+D2+Rhardn*D3+Rhardo*D4
# Dsoft = D2-D4
# Dhard = D2+D4
#Dsoft = D1+D2-D3-D4
#Dhard = D1+D2+D3+D4
# Dsoft = Gsoftn*(D1+Rsoftn*D3)+Gsofto*(D2+Rsofto*D4)
# Dhard = Ghardn*(D1+Rhardn*D3)+Ghardo*(D2+Rhardo*D4)
# c1 = abs(Gsoftn+1.0) < 1e-6
# c2 = abs(Gsofto+1.0) < 1e-6
# c3 = abs(Ghardn+1.0) < 1e-6
# c4 = abs(Ghardo+1.0) < 1e-6
#
# Dsoft[c1]= 0.5*(D1[c1]+D3[c1])+Gsofto[c1]*(D2[c1]+Rsofto[c1]*D4[c1])
# Dsoft[c2]= Gsoftn[c2]*(D1[c2]+Rsoftn[c2]*D3[c2])+0.5*(D2[c2]+D4[c2])
# Dhard[c3]= 0.5*(D1[c3]+D3[c3])+Ghardo[c3]*(D2[c3]+Rhardo[c3]*D4[c3])
# Dhard[c4]= Ghardn[c4]*(D1[c4]+Rhardn[c4]*D3[c4])+0.5*(D2[c4]+D4[c4])
#--------------------------------------
#traitement des cas ou Go (ou Gn) = -1
#--------------------------------------
# if (abs(Gsoftn+1.0) < 1e-6):
# DTsoft = 0.5*(D1+D3)+Gsofto*(D2+Rsofto*D4)
#
# if (abs(Gsofto+1.0)<1e-6):
# DTsoft = Gsoftn*(D1+Rsoftn*D3)+0.5*(D2+D4)
#
# if (abs(Ghardn+1.0) < 1.0e-6):
# DThard = 0.5*(D1+D3)+Ghardo*(D2+Rhardo*D4)
#
# if (abs(Ghardo+1.0)<1e-6):
# DThard = Ghardn*(D1+Rhardn*D3)+0.5*(D2+D4)
#
##--------------------------------------
##cas ou n<1 : interior wedge
##--------------------------------------
# else:
#
# thoz = N*np.pi-tho
# thnz = N*np.pi-thn
#
#
# [Rsoftnz,Rhardnz] = R(thnz,k,ero,erro,condo,uro,deltaho)
# [Rsoftoz,Rhardoz] = R(thoz,k,ern,errn,condn,urn,deltahn)
#
# DTsoft = Rsoftoz*Rsoftnz*D1+Rsoftn*D3+(Rsofto*Rsoftn*D2+Rsofto*D4)
#
# DThard = Rhardoz*Rhardnz*D1+Rhardn*D3+(Rhardo*Rhardn*D2+Rhardo*D4)
return Dsoft,Dhard,D1,D2,D3,D4
def G(N,phi0,Ro,Rn):
""" grazing angle correction
Parameters
----------
N : wedge parameter
phi0 : incidence angle (rad)
Ro : R coefficient on face o
Rn : R coefficient on face n
Luebbers 89 "a heuristique UTD slope diffraction coefficient for
rough lossy wedges"
"""
if not isinstance(phi0,np.ndarray):
phi0 = np.array([phi0])
if not isinstance(N,np.ndarray):
N = np.array([N])
PHI0 = phi0 * np.ones(Ro.shape)
BN = N * np.ones(Ro.shape)
# face o
Go = np.ones(np.shape(Ro))
c1 = (abs(PHI0) < 1.0e-6) * (abs(Ro+1.0)>1.0e-6)
c2 = (abs(PHI0) < 1.0e-6) * (abs(Ro+1.0)<1.0e-6)
c3 = abs(PHI0-BN*np.pi) < 1.0e-6
Go[c1] = 1.0/(1.0+Ro[c1])
Go[c2] = -1.
Go[c3] = 0.5
# face n
Gn = np.ones(np.shape(Rn))
c1 = (abs(PHI0-BN*np.pi) < 1.0e-6) * (abs(Rn+1.0)>1.0e-6)
c2 = (abs(PHI0-BN*np.pi) < 1.0e-6) * (abs(Rn+1.0)<1.0e-6)
c3 = abs(PHI0) < 1.0e-6
Gn[c1] = 1.0/(1.0+Rn[c1])
Gn[c2] = -1.
Gn[c3] = 0.5
return Go,Gn
def Dfunc(sign,k,N,dphi,si,sd,beta=np.pi/2):
"""
Parameters
----------
sign : int
+1 | -1
k : wave number
N : wedge parameter
dphi : phi-phi0 or phi+phi0
si : distance source-D
sd : distance D-observation
beta : skew incidence angle
Reference
---------
[1] KOUYOUMJIAN-PATHAK a uniform geometrical theory of diffraction for an edge
in a perfectly conducting surface" IEEE AP nov 74 vol 62 N11
Notes
-----
e-jnp.pi/4 1
Di= ------------------ * ----------- * F(kla) ([1] eq 25)
2n*racine(2*np.pi*k) np.tan(dphi/n)sin(beta)
"""
cste = (1.0-1.0*1j)*(1.0/(4.0*N*np.sqrt(k*np.pi)*np.sin(beta)))
rnn = (dphi+np.pi*sign)/(2.0*N*np.pi)
nn = np.zeros(np.shape(rnn))
nn[rnn>0.5] = 1
nn[rnn>1.5] = 2
nn[rnn<-0.5] = -1
nn[rnn<-1.5] = -2
# KLA ref[1] eq 27
L = (si*sd)/(1.*(si+sd))
AC = np.cos( (2.0*N*nn*np.pi-dphi) / 2.0 )
A = 2*AC**2
KLA = k * L * A
epsi = AC*2.0
angle = (np.pi+sign*dphi)/(2.0*N)
tan = np.tan(angle)
Di = np.empty(KLA.shape)
Fkla,ys,yL = FreF(KLA)
# 4.56 Mac Namara
Di = -cste*Fkla/tan
c5 = np.where(np.abs(tan)<1e-9)
BL = np.ones(Di.shape)*L
Di[c5] = 0.5*np.sqrt(BL[c5])
return(Di)
def FresnelI(x) :
""" calculates Fresnel integral
Parameters
----------
x : array
real argument
"""
v = np.zeros(x.shape,dtype=complex)
y = np.abs(x)
z = .25*y
u1 = np.where(z>1)
u2 = np.where(z<=1)
y1 = y[u1]
y2 = y[u2]
d1 = np.cos
|
jtoppins/beaker
|
Server/bkr/server/alembic/versions/2e171e6198e6_add_data_migration_table.py
|
Python
|
gpl-2.0
| 863
| 0.005794
|
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
"""Add data_migration table
Revision ID: 2e171e6198e6
Revises: 15d3fad78656
Create Date: 2016-08-03 11:11:55.680872
"""
# revision identifiers, used by Alembic.
revision = '2e171e6198e6'
down_revision = '15d3fad78656'
from alembic import op
from sqlalchemy im
|
port Column, Integer, Unicode, DateTime
def upgrade():
op.create_table('data_m
|
igration',
Column('id', Integer, primary_key=True),
Column('name', Unicode(255), nullable=False, unique=True),
Column('finish_time', DateTime),
mysql_engine='InnoDB')
def downgrade():
op.drop_table('data_migration')
|
nestauk/inet
|
inet/sources/companies_house.py
|
Python
|
mit
| 1,473
| 0
|
# -*- coding: utf-8 -*-
import logging
import chwrapper
logger = logging.getLogger(__name__)
logger.addHandler(logging.NullHandler())
class CompaniesHouseClient():
def __init__(self):
self._ch = chwrapper.Search()
def get_company_data(self, k, v):
"""Search companies house for the data"""
try:
pc = v['postal_code']
except AttributeError:
logger.warn("No postal code found for {}".format(k))
return []
r = self._ch.search_companies(k, items_per_page=200)
items = r.json()['items']
data = []
for item in items:
try:
if item['address'].get('postal_code') == pc:
data.append(item)
except AttributeError:
logger.info("No address item for {}")
return data
def get_directors(self, k, v):
"""Return directors data for a c
|
ompany number."""
try:
company_number = v.get('company_data')[0].get('company_number')
except IndexError as e:
logger.warn("No Company data found.", e)
|
return []
if not company_number:
logger.warn("No postal code found for {}".format(k))
return []
r = self._ch.officers(company_number)
items = r.json()['items']
data = []
for item in items:
data.append(item)
return data
ch_client = CompaniesHouseClient()
|
kburts/django-playlist
|
django_playlist/songs/urls.py
|
Python
|
mit
| 465
| 0.021505
|
from django.conf.urls import patterns, url
f
|
rom .views import PlaylistList, PlaylistDetail, SongList, SongUpdate
urlpatterns = patterns('songs.views',
url(r'^playlists$', PlaylistList.as_view(), name='playlists_list'),
url(r'^playlists/(?P<playlist_pk>[0-9]+)/$', PlaylistDetail.as_view(),
name="playlist_detail"),
url(r'^songs$', SongList.as_view(), name='songs_list'),
url(r'^songs
|
/(?P<song_pk>[0-9]+)/$', SongUpdate.as_view(),
name='songs_update'),
)
|
robmcmullen/peppy
|
peppy/i18n/ar.py
|
Python
|
gpl-2.0
| 2,474
| 0.017785
|
# -*- coding: utf-8 -*-
#This is generated code - do not edit
encoding = 'utf-8'
dict = {
'&About...': '&\xd8\xb9\xd9\x86...',
'&Delete Window': '&\xd8\xa7\xd8\xad\xd8\xb0\xd9\x81 \xd8\xa7\xd9\x84\xd9\x86\xd8\xa7\xd9\x81\xd8\xb0\xd8\xa9',
'&Describe Action': '&\xd8\xa3\xd9\x88\xd8\xb5\xd9\x81 \xd8\xa7\xd9\x84\xd8\xb9\xd9\x85\xd9\x84\xd9\x8a\xd8\xa9',
'&Execute Action': '&\xd9\x86\xd9\x81\xd8\xb0 \xd8\xa7\xd9\x84\xd8\xb9\xd9\x85\xd9\x84\xd9\x8a\xd8\xa9',
'&Folding': '&\xd8\xa7\xd9\x84\xd8\xb7\xd9\x8a',
'&Help': '&\xd9\x85\xd8\xb3\xd8\xa7\xd8\xb9\xd8\xaf\xd8\xa9',
'&Line Numbers': '&\xd8\xb9\xd8\xaf\xd8\xaf \xd8\xa7\xd9\x84\xd8\xb3\xd8\xb7\xd9\x88\xd8\xb1',
'&New Window': '&\xd9\x86\xd8\xa7\xd9\x81\xd8\xb0\xd8\xa9 \xd8\xac\xd8\xaf\xd9\x8a\xd8\xaf\xd8\xa9',
'&Preferences...': '&\xd8\xa7\xd9\x84\xd8\xaa\xd9\x81
|
\xd8\xb6\xd9\x8a\xd9\x84\xd8\xa7\xd8\xaa...',
'&Revert': '&\xd8\xa5\xd8\xb3\xd8\xaa\xd8\xb1\xd8\xac\xd8\xb9',
'&Save...': '&\xd8\xad\xd9\x81\xd8\xb8...',
'&Show Toolbars': '&\xd
|
8\xb9\xd8\xb1\xd8\xb6 \xd8\xb4\xd8\xb1\xd9\x8a\xd8\xb7 \xd8\xa7\xd9\x84\xd8\xa3\xd8\xaf\xd9\x88\xd8\xa7\xd8\xa9',
'&Word Count': '&\xd8\xb9\xd8\xaf \xd8\xa7\xd9\x84\xd9\x83\xd9\x84\xd9\x85\xd8\xa7\xd8\xaa',
'About this program': '\xd8\xad\xd9\x88\xd9\x92\xd9\x84 \xd9\x87\xd8\xb0\xd8\xa7 \xd8\xa7\xd9\x84\xd8\xa8\xd8\xb1\xd9\x86\xd8\xa7\xd9\x85\xd8\xac',
'Actions': '\xd8\xa5\xd8\xac\xd8\xb1\xd8\xa7\xd8\xa1\xd8\xa7\xd8\xaa',
'Attributes': '\xd8\xa7\xd9\x84\xd8\xb5\xd9\x91\xd9\x81\xd8\xa7\xd8\xaa',
'Background': '\xd8\xa7\xd9\x84\xd8\xae\xd9\x84\xd9\x81\xd9\x8a\xd9\x91\xd8\xa9',
'Cancel': '\xd8\xa5\xd9\x84\xd8\xba\xd8\xa7\xef\xba\x80',
'Case': '\xd8\xa7\xd9\x84\xd8\xad\xd8\xa7\xd9\x84\xd8\xa9',
'Clear Playlist': '\xd9\x85\xd8\xb3\xd8\xad \xd9\x82\xd8\xa7\xd8\xa6\xd9\x85\xd8\xa9 \xd8\xa7\xd9\x84\xd8\xaa\xd8\xb4\xd8\xba\xd9\x8a\xd9\x84',
'Close Tab': '\xd8\xa3\xd8\xba\xd9\x84\xd9\x82 \xd8\xa7\xd9\x84\xd9\x84\xd8\xb3\xd8\xa7\xd9\x86',
'Close the current tab': '\xd8\xa3\xd8\xba\xd9\x84\xd9\x82 \xd8\xa7\xd9\x84\xd9\x84\xd8\xb3\xd8\xa7\xd9\x86 \xd8\xa7\xd9\x84\xd8\xad\xd8\xa7\xd9\x84\xd9\x8a',
'Color': '\xd8\xa7\xd9\x84\xd9\x84\xd9\x88\xd9\x86',
'Contrast': '\xd8\xa7\xd9\x84\xd8\xaa\xd8\xa8\xd8\xa7\xd9\x8a\xd9\x86',
'Copy': '\xd9\x86\xd8\xb3\xd8\xae',
'Cut': '\xd9\x82\xd8\xb5',
'Debug': '\xd8\xaa\xd9\x86\xd9\x82\xd9\x8a\xd8\xad',
'Documents': '\xd8\xa7\xd9\x84\xd9\x85\xd8\xb3\xd8\xaa\xd9\x86\xd8\xaf\xd8\xa7\xd8\xaa',
'E&xit': '&\xd8\xae\xd8\xb1\xd9\x88\xd8\xac',
}
|
GiulioRossetti/ndlib
|
ndlib/models/compartments/NodeNumericalAttribute.py
|
Python
|
bsd-2-clause
| 2,351
| 0.005104
|
from ndlib.models.compartments.Compartment import Compartiment
import networkx as nx
import numpy as np
import operator
__author__ = 'Giulio Rossetti'
__license__ = "BSD-2-Clause"
__email__ = "giulio.rossetti@gmail.com"
class NodeNumericalAttribute(Compartiment):
def __init__(self, attribute, value=None, op=None, probability=1, **kwargs):
super(self.__clas
|
s__, self).__init__(kwargs)
self.__available_operators = {"==": operator.__eq__, "<": operator.__lt__,
">": operator.__gt__, "<=": operator.__le__,
|
">=": operator.__ge__, "!=": operator.__ne__,
"IN": (operator.__ge__, operator.__le__)}
self.attribute = attribute
self.attribute_range = value
self.probability = probability
self.operator = op
if self.attribute_range is None:
raise ValueError("A valid attribute value must be provided")
if self.operator is not None and self.operator in self.__available_operators:
if self.operator == "IN":
if not isinstance(self.attribute_range, list) or self.attribute_range[1] < self.attribute_range[0]:
raise ValueError("A range list is required to test IN condition")
else:
if not isinstance(self.attribute_range, int):
if not isinstance(self.attribute_range, float):
raise ValueError("A numeric value is required to test the selected condition")
else:
raise ValueError("The operator provided '%s' is not valid" % operator)
def execute(self, node, graph, status, status_map, *args, **kwargs):
val = nx.get_node_attributes(graph, self.attribute)[node]
p = np.random.random_sample()
if self.operator == "IN":
condition = self.__available_operators[self.operator][0](val, self.attribute_range[0]) and \
self.__available_operators[self.operator][1](val, self.attribute_range[1])
else:
condition = self.__available_operators[self.operator](val, self.attribute_range)
test = condition and p <= self.probability
if test:
return self.compose(node, graph, status, status_map, kwargs)
return False
|
UltimateNate/TURPG
|
PyAnimationEngine.py
|
Python
|
gpl-3.0
| 2,064
| 0.009205
|
# PyAnimation - Animation, in a terminal.
# Copyright (C) 2015 Nathaniel Olsen
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should
|
have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import time
import os
EngineVersion = "0.3-indev"
def clear():
os.system('clear')
# The Legacy wait for legacy or outdated programs.
def legacywait():
time.sleep(
|
0.4)
def legacywait2():
time.sleep(0.2)
def legacywait3():
time.sleep(0.1)
# The new wait.
def waitpoint1():
time.sleep(0.1)
def waitpoint2():
time.sleep(0.2)
def waitpoint3():
time.sleep(0.3)
def waitpoint4():
time.sleep(0.4)
def waitpoint5():
time.sleep(0.5)
def waitpoint6():
time.sleep(0.6)
def wait1():
time.sleep(1)
def wait2():
time.sleep(2)
def wait3():
time.sleep(3)
def loading_screen():
while True:
print("L")
waitpoint1()
clear()
print("Lo")
waitpoint1()
clear()
print("Loa")
waitpoint1()
clear()
print("Load")
waitpoint1()
clear()
print("Loadi")
waitpoint1()
clear()
print("Loadi")
waitpoint1()
clear()
print("Loadin")
waitpoint1()
clear()
print("Loading")
waitpoint1()
clear()
print("Loadin")
waitpoint1()
clear()
print("Loadi")
waitpoint1()
clear()
print("Load")
waitpoint1()
clear()
print("Loa")
waitpoint1()
clear()
print("Lo")
waitpoint1()
clear()
print("L")
waitpoint1()
clear()
|
ArtezGDA/text-IO
|
Martijn/format.py
|
Python
|
mit
| 170
| 0.058824
|
import my_data_file
d = my_data_file.my_data
print "Hello my name i
|
s %s and i am %d years of age and my coolnes is %d " % (d [ 'naam' ], d [ 'age' ], d ['cool
|
heid'])
|
twm/yarrharr
|
yarrharr/application.py
|
Python
|
gpl-3.0
| 20,459
| 0.000978
|
# Copyright © 2013, 2015, 2016, 2017, 2018, 2020, 2022 Tom Most <twm@freecog.net>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Additional permission under GNU GPL version 3 section 7
#
# If you modify this Program, or any covered work, by linking or
# combining it with OpenSSL (or a modified version of that library),
# containing parts covered by the terms of the OpenSSL License, the
# licensors of this Program grant you additional permission to convey
# the resulting work. Corresponding Source for a non-source form of
# such a combination shall include the source code for the parts of
# OpenSSL used as well as that of the covered work.
"""
Yarrharr production server via Twisted Web
"""
import io
import json
import logging
import os
import re
import sys
from base64 import b64encode
import attr
from django.conf import settings
from django.dispatch import receiver
from twisted.internet import defer
from twisted.internet.endpoints import serverFromString
from twisted.logger import (
FileLogObserver,
FilteringLogObserver,
ILogFilterPredicate,
Logger,
LogLevel,
PredicateResult,
formatEvent,
globalLogBeginner,
globalLogPublisher,
)
from twisted.python.filepath import FilePath
from twisted.web.resource import ErrorPage, NoResource, Resource
from twisted.web.server import Site
from twisted.web.static import File
from twisted.web.wsgi import WSGIResource
from zope.interface import implementer
from . import __version__
from .signals import schedule_changed
from .wsgi import application
log = Logger()
@attr.s
class CSPReport(object):
url = attr.ib()
referrer = attr.ib()
resource = attr.ib()
violatedDirective = attr.ib()
effectiveDirective = attr.ib()
source = attr.ib()
sample = attr.ib()
status = attr.ib()
policy = attr.ib()
disposition = attr.ib()
def __str__(self):
bits = []
for a in attr.fields(self.__class__):
value = getattr(self, a.name)
if value is None:
continue
bits.append("{}={!r}".format(a.name, value))
return "\n".join(bits)
@classmethod
def fromJSON(cls, data):
"""
Construct a :class:`CSPReport` from the serialization of a violation
per CSP Level 3 §5.3.
"""
if {"source-file", "line-number", "column-number"} <= data.keys():
source = "{source-file} {line-number}:{column-number}".format_map(data)
elif {"source-file", "line-number"} <= data.keys():
source = "{source-file} {line-number}".format_map(data)
else:
source = data.get("source-file")
return cls(
url=data["document-uri"],
referrer=data["referrer"] or None, # Always seems to be an empty string.
resource=data["blocked-uri"],
violatedDirective=data.get("violated-directive"),
effectiveDirective=data.get("effective-directive"),
policy=data["original-policy"],
disposition=data.get("disposition"),
status=data.get("status-code"),
sample=data.get("script-sample") or None,
source=source,
)
class CSPReportLogger(Resource):
isLeaf = True
_log = Logger()
def render(self, request):
if request.method != b"POST":
request.setResponseCode(405)
request.setHeader("Allow", "POST")
return b"HTTP 405: Method Not Allowed\n"
if request.requestHeaders.getRawHeaders("Content-Type") != ["application/csp-report"]:
request.setResponseCode(415)
return b"HTTP 415: Only application/csp-report requests are accepted\n"
# Process the JSON text produced per
# https://w3c.github.io/webappsec-csp/#deprecated-serialize-violation
report = CSPReport.fromJSON(json.load(io.TextIOWrapper(request.content, encoding="utf-8"))["csp-report"])
if report.sample and report.sample.startswith(";(function installGlobalHook(window) {"):
# This seems to be a misbehavior in some Firefox extension.
# I cannot reproduce it with a clean profile.
return b""
if report.sample and report.sample == "call to eval() or related function blocked by CSP":
# This is caused by Tridactyl due to a Firefox issue. It's quite
# chatty so we'll disable for now, even thou
|
gh the message is
# generated by the browser and might indicate a script injection.
# See <https://github.com/cmcaine/tridactyl/issues/109> and
# <https://bugzilla.mozilla.org/show_bug.cgi?id=1267027>.
return b""
self._log.debug(
"Content Security Policy violation reported by {userAgent!r}:\n{report}",
|
userAgent=", ".join(request.requestHeaders.getRawHeaders("User-Agent", [])),
report=report,
)
return b"" # Browser ignores the response.
class FallbackResource(Resource):
"""
Resource which falls back to an alternative resource tree if it doesn't
have a matching child resource.
"""
def __init__(self, fallback):
Resource.__init__(self)
self.fallback = fallback
def render(self, request):
"""
Render this path with the fallback resource.
"""
return self.fallback.render(request)
def getChild(self, path, request):
"""
Dispatch unhandled requests to the fallback resource.
"""
# Mutate the request path such that it's like FallbackResource didn't handle
# the request at all. This is a bit of a nasty hack, since we're
# relying on the t.w.server implementation's behavior to not break when
# we do this. A better way would be to create a wrapper for the request object
request.postpath.insert(0, request.prepath.pop())
return self.fallback
class Static(Resource):
"""
Serve up Yarrharr's static assets directory. The files in this directory
have names like::
In development, the files are served uncompressed and named like so::
main-afffb00fd22ca3ce0250.js
The second dot-delimited section is a hash of the file's contents or source
material. As the filename changes each time the content does, these files
are served with a long max-age and the ``immutable`` flag in the
`Cache-Control`_ header.
In production, each file has two pre-compressed variants: one with
a ``.gz`` extension, and one with a ``.br`` extension. For example::
main-afffb00fd22ca3ce0250.js
main-afffb00fd22ca3ce0250.js.br
main-afffb00fd22ca3ce0250.js.gz
The actual serving of the files is done by `twisted.web.static.File`, which
is fancy and supports range requests, conditional gets, etc.
.. note::
Several features used here are only available to HTTPS origins.
Cache-Control: immutable and Brotli compression both are in Firefox.
.. _cache-control: https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Cache-Control
"""
_dir = FilePath(settings.STATIC_ROOT)
_validName = re.compile(rb"^[a-zA-Z0-9]+-[a-zA-Z0-9]+(\.[a-z]+)+$")
# NOTE: RFC 7231 § 5.3.4 is not completely clear about whether
# content-coding tokens are case-sensitive or not. The "identity" token
# appears in EBNF and is therefore definitely case-insensitive, but the
# other tokens only appear in IANA registry tables in lowercase form. In
# contrast, the transfer-coding possibilities are clearly defined in
|
MindPass/Code
|
Interface_graphique/PyQt/application/classeGestion.py
|
Python
|
gpl-3.0
| 24,892
| 0.028687
|
import sqlite3
import sys
"""<Mindpass is a intelligent password manager written in Python3
that checks your mailbox for logins and passwords that you do not remember.>
Copyright (C) <2016> <Cantaluppi Thibaut, Garchery Martial, Domain Alexandre, Boulmane Yassine>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>."""
sys.path.append('../fenetres/')
from functools import partial
from PyQt5 import QtWidgets, QtGui, QtCore
from fenetreGestion import Ui_fenetreGestion
from requetes import *
import numpy as np
import colorsys
bdd = "../../../Traitement_mails/bdd.sq3"
def print_(arg):
"""
Args:
arg: la valeur à afficher
Returns: la valeur à afficher ainsi qu'une série de '-', afin d'espacer l'affichage
"""
print(arg)
print("-------------------------------------")
class LineEditWithFocusOut(QtWidgets.QLineEdit):
"""docstring for LineEditWithFocusOut
Ré-implémentation de QLineEdit(), en modifiant son comportement
lors d'un focusOut event. Ici, on update l'identifiant de la
table sites_reconnus_"+self.nom_table+".
"""
def __init__(self, nom_table):
super().__init__()
self.nom_table = nom_table
def focusOutEvent(self, arg):
QtWidgets.QLineEdit.focusOutEvent(self, arg)
# self.id contient l'id de la LigneEdit, ajouté dans afficher_ligne_site()
requete= "UPDATE sites_reconnus_"+self.nom_table+" SET identifiant =? WHERE rowid=?"
bdd_update(requete, (self.text(), self.id +1))
if self.text() == "":
self.setPlaceholderText("Ajouter un pseudo")
class LigneSite(object):
"""docstring for LigneSite"""
def __init__(self, y, site_web, identifiant, mdp, categorie, objet, nom_table):
self.position = y
self.objet = objet
self.nom_site = site_web
self.nom_mdp = mdp
self.nom_cat = categorie
self.nom_table = nom_table
self.ligne = QtWidgets.QHBoxLayout()
self.site_web =QtWidgets.QLabel()
self.site_web.setAlignment(QtCore.Qt.AlignCenter)
self.site_web.setObjectName("site_web")
self.site_web.setText(site_web)
self.ligne.addWidget(self.site_web)
self.identifiant = LineEditWithFocusOut(self.nom_table)
self.identifiant.setAlignment(QtCore.Qt.AlignCenter)
self.identifiant.setObjectName('identifiant')
self.identifiant.id = y
if identifiant is None or identifiant == "":
self.identifiant.setPlaceholderText("Ajouter un pseudo")
else:
self.identifiant.setText(identifiant)
self.ligne.addWidget(self.identifiant)
self.mdp = QtWidgets.QComboBox()
self.mdp.setObjectName("mdp")
self.afficher_combo_pwd() # affichage des éléments de la combobox en fonction de la bdd
self.ligne.addWidget(self.mdp)
self.categorie = QtWidgets.QComboBox()
self.categorie.setObjectName("categorie")
self.afficher_combo_cat() # affichage des éléments de la combobox en fonction de la bdd
self.ligne.addWidget(self.categorie)
self.ligne.setStretch(0, 2)
self.ligne.setStretch(1, 2)
self.ligne.setStretch(2, 2)
self.ligne.setStretch(3, 2)
self.categorie.currentIndexChanged.connect(self.changement_cat)
self.mdp.currentIndexChanged.connect(self.changement_pwd)
def changement_cat(self, event):
requete ="SELECT categorie FROM sites_reconnus_"+self.nom_table+" WHERE rowid=?"
ancienne_categorie = toliste(bdd_select(requete, (self.position+1,)))[0]
# On ajoute le site_web sous la catégorie correspondante
requete= "UPDATE sites_reconnus_"+self.nom_table+" SET categorie=? WHERE rowid=?"
bdd_update(requete, (self.categorie.currentText(), self.position +1))
print("Catégorie changée en"+ self.categorie.currentText())
for k in range(len(self.objet.cats)):
if(self.objet.cats[k].nom == self.categorie.currentText()):
liste_label_name =[]
for element in self.objet.cats[k].labels:
liste_label_name.append(element.text())
if(self.categorie.currentText() not in liste_label_name):
label = QtWidgets.QLabel()
font = QtGui.QFont()
font.setPointSize(9)
font.setItalic(True)
label.setFont(font)
label.setObjectName("sites_lies_cat")
label.setText(self.site_web.text())
self.objet.cats[k].labels.append(label)
self.objet.cats[k].verticalLayout_groupBox.addWidget(label)
break
# On met à jour le groupBox de l'ancienne catégorie
for k in range(len(self.objet.cats)):
if(self.objet.cats[k].nom == ancienne_categorie):
for label in self.objet.cats[k].labels:
label.deleteLater()
self.objet.cats[k].labels = []
requete ="SELECT site_web FROM sites_reconnus_"+self.nom_table+" WHERE categorie=?"
sites_lies= toliste(bdd_select(requete, (ancienne_categorie,)))
self.objet.cats[k].affichage_sites_lies(sites_lies)
# On update le label dont la catégorie a été changée
for pwd in self.objet.pwds:
for label in pwd.labels:
if(label.texte == self.nom_site):
pwd.update(label, self.categorie.currentText())
# On update la couleur du groupBox_pwd contenant le label associé
pwd.update_color_groupBox()
def changement_pwd(self):
requete ="SELECT mdp FROM sites_reconnus_"+self.nom_table+" WHERE rowid=?"
ancien_mdp = toliste(bdd_select(requete, (self.position+1,)))[0]
# On ajoute le site_web sous le mdp correspondant
requete= "UPDATE sites_reconnus_"+self.nom_table+" SET mdp=? WHERE rowid=?"
nouveau_mdp = self.mdp.currentText()
bdd_u
|
pdate(requete, (nouveau_mdp , self.position +1))
print("Mdp changée en"+ nouveau_mdp)
for k in range(len(self.objet.pwds)):
if(self.objet.pwds[k].nom == nouveau_mdp):
liste_label_name =[]
for eleme
|
nt in self.objet.pwds[k].labels:
liste_label_name.append(element.text())
if(nouveau_mdp not in liste_label_name):
self.objet.pwds[k].label(self.site_web.text())
break
# On met à jour le groupBox de l'ancienn mdp
for k in range(len(self.objet.pwds)):
if(self.objet.pwds[k].nom == ancien_mdp):
for label in self.objet.pwds[k].labels:
label.deleteLater()
self.objet.pwds[k].labels = []
requete ="SELECT site_web FROM sites_reconnus_"+self.nom_table+" WHERE mdp=?"
sites_lies= toliste(bdd_select(requete, (ancien_mdp,)))
self.objet.pwds[k].affichage_sites_lies(sites_lies)
for pwd in self.objet.pwds:
if(pwd.nom == ancien_mdp):
pwd.update_color_groupBox()
elif(pwd.nom == nouveau_mdp):
pwd.update_color_groupBox()
def update_pwd_combobox(self, complet):
print(self.mdp.maxCount())
def afficher_combo_pwd(self):
requete= "SELECT mdp FROM mdps_"+self.nom_table+""
tab = bdd_select(requete)
result = []
for k in range(len(tab)):
result.append(tab[k][0])
self.mdp.addItem(self.nom_mdp)
for pwd in result:
if pwd and pwd != self.nom_mdp:
self.mdp.addItem(pwd)
if(self.nom_mdp and self.nom_mdp != ""):
self.mdp.addItem("")
def afficher_combo_cat(self):
requete= "SELECT nom_categorie FROM categories_"+self.nom_table
tab = bdd_select(requete)
result = []
for k in range(len(tab)):
result.append(tab[k][0])
self.categorie.addItem(self.nom_cat)
for cat in result:
if cat and cat != self.nom_cat:
self.categorie.addItem(cat)
if(self.nom_cat and self.nom_cat != ""):
self.categorie.addItem("")
class Ligne(object):
"""docstring for ligneCategorie
(objet) est l'objet contenant tous les éléments de la fenetre.
Permet d'accéder à ces éléments et de les modifier.
"""
def __init__(self, position, nom, sites_lies, objet, nom_table):
self.position = position
self.nom = nom
self.sites_lies = sites_lies
self.objet = o
|
hubo1016/vlcp
|
vlcp/protocol/openflow/defs/openflow13.py
|
Python
|
apache-2.0
| 130,995
| 0.031612
|
'''
/* Copyright (c) 2008 The Board of Trustees of The Leland Stanford
* Junior University
* Copyright (c) 2011, 2012 Open Networking Foundation
*
* We are making the OpenFlow specification and associated documentation
* (Software) available for public use and benefit with the expectation
* that others will use, modify and enhance the Software and contribute
* those enhancements back to the community. However, since we would
* like to make the Software available for broadest use, with as few
* restrictions as possible permission is hereby granted, free of
* charge, to any person obtaining a copy of this Software to deal in
* the Software under the copyrights without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sublicense, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* The name and trademarks of copyright holder(s) may NOT be used in
* advertising or publicity pertaining to the Software or any
* derivatives without specific, written prior permission.
*/
Created on 2015/7/14
:author: hubo
'''
from .common import *
from . import common
from namedstruct.namedstruct import rawtype as _rawtype
from namedstruct.namedstruct import StructDefWarning
import warnings as _warnings
with _warnings.catch_warnings():
_warnings.filterwarnings('ignore', '^padding', StructDefWarning)
ofp_port_no = enum('ofp_port_no',
globals(),
uint32,
OFPP_MAX = 0xffffff00,
OFPP_IN_PORT = 0xfffffff8,
OFPP_TABLE = 0xfffffff9,
OFPP_NORMAL = 0xfffffffa,
OFPP_FLOOD = 0xfffffffb,
OFPP_ALL = 0xfffffffc,
OFPP_CONTROLLER = 0xfffffffd,
OFPP_LOCAL = 0xfffffffe,
OFPP_ANY = 0xffffffff)
ofp_error_type = ofp_error_type.extend(globals(),
OFPET_BAD_INSTRUCTION = 3, #/* Error in instruction list. */
OFPET_BAD_MATCH = 4, #/* Error in match. */
OFPET_FLOW_MOD_FAILED = 5, #/* Problem modifying flow entry. */
OFPET_GROUP_MOD_FAILED = 6, #/* Problem modifying group entry. */
OFPET_PORT_MOD_FAILED = 7, #/* Port mod request failed. */
OFPET_TABLE_MOD_FAILED = 8, #/* Table mod request failed. */
OFPET_QUEUE_OP_FAILED = 9, #/* Queue operation failed. */
OFPET_SWITCH_CONFIG_FAILED = 10, #/* Switch config request failed. */
OFPET_ROLE_REQUEST_FAILED = 11, #/* Controller Role request failed. */
OFPET_METER_MOD_FAILED = 12, #/* Error in meter. */
OFPET_TABLE_FEATURES_FAILED = 13,# /* Setting table features failed. */
OFPET_EXPERIMENTER = 0xffff #/* Experimenter error messages. */
)
ofp_type = ofp_type.extend(globals(),
OFPT_EXPERIMENTER = 4, #/* Symmetric message */
# /* Switch configuration messages. */
OFPT_FEATURES_REQUEST = 5, #/* Controller/switch message */
OFPT_FEATURES_REPLY = 6, #/* Controller/switch message */
OFPT_GET_CONFIG_REQUEST = 7, #/* Controller/switch message */
OFPT_GET_CONFIG_REPLY = 8, #/* Controller/switch message */
OFPT_SET_CONFIG = 9, #/* Controller/switch message */
# /* Asynchronous messages. */
OFPT_PACKET_IN = 10, #/* Async message */
OFPT_FLOW_REMOVED = 11, #/* Async message */
OFPT_PORT_STATUS = 12, #/* Async message */
# /* Controller command messages. */
OFPT_PACKET_OUT = 13, #/* Controller/switch message */
OFPT_FLOW_MOD = 14, #/* Controller/switch message */
OFPT_GROUP_MOD = 15, #/* Controller/switch message */
OFPT_PORT_MOD = 16, #/* Controller/switch message */
OFPT_TABLE_MOD = 17, #/* Controller/switch message */
# /* Multipart messages. */
OFPT_MULTIPART_REQUEST = 18, #/* Controller/switch message */
OFPT_MULTIPART_REPLY = 19, #/* Controller/switch message */
# /* Barrier messages. */
OFPT_BARRIER_REQUEST = 20, #/* Controller/switch message */
OFPT_BARRIER_REPLY = 21, #/* Controller/switch message */
# /* Queue Configuration messages. */
OFPT_QUEUE_GET_CONFIG_REQUEST = 22, #/* Controller/switch message */
OFPT_QUEUE_GET_CONFIG_REPLY = 23, #/* Controller/switch message */
# /* Controller role change request messages. */
OFPT_ROLE_REQUEST = 24, #/* Controller/switch message */
OFPT_ROLE_REPLY = 25, #/* Controller/switch message */
# /* Asynchronous message configuration. */
OFPT_GET_ASYNC_REQUEST = 26, #/* Controller/switch message */
OFPT_GET_ASYNC_REPLY = 27, #/* Controller/switch message */
OFPT_SET_ASYNC = 28, #/* Controller/switch message */
# /* Meters and rate limiters configuration messages. */
OFPT_METER_MOD =
|
29, #/* Controller/switch message */
)
ofp_type_reply_set = set([OFPT_ECHO_REPLY, OFPT_FEATURES_REPLY, OFPT_GET_CONFIG_REPLY, OFPT_MULTIPART_REPLY, OFPT_BARRIER_REPLY, OFPT_QUEUE_GET_CONFIG_REPLY, OFPT_ROLE_REPLY, OFPT_GET_ASYNC_REPLY])
ofp_type_asyncmessage_set = set([OFPT_PACKET_IN, OFPT_FLOW_REMOVED, OFPT_PORT_STATUS])
OFP_VERSION =
|
OFP13_VERSION
ofp_msg = nstruct(name = 'ofp_msg',
base = common.ofp_msg_mutable,
criteria = lambda x: x.header.version == OFP_VERSION,
init = packvalue(OFP_VERSION, 'header', 'version'),
classifyby = (OFP_VERSION,),
classifier = lambda x: x.header.type,
extend = {('header', 'type') : ofp_type})
'''
/* Switch configuration. */
'''
ofp_switch_config = nstruct((ofp_config_flags, 'flags'),
(uint16, 'miss_send_len'),
name = 'ofp_switch_config',
base = ofp_msg,
criteria = lambda x: x.header.type == OFPT_GET_CONFIG_REPLY or x.header.type == OFPT_SET_CONFIG,
classifyby = (OFPT_SET_CONFIG, OFPT_GET_CONFIG_REPLY),
init = packvalue(OFPT_SET_CONFIG, 'header','type'))
'''
/* Configure/Modify behavior of a flow table */
'''
ofp_table_mod = nstruct(
(ofp_table, 'table_id'), # /* ID of the table, OFPTT_ALL indicates all tables */
(uint8[3],), # /* Pad to 32 bits */
(ofp_table_config, 'config'), # /* Bitmap of OFPTC_* flags */
name = 'ofp_table_mod',
base = ofp_msg,
criteria = lambda x: x.header.type == OFPT_TABLE_MOD,
classifyby = (OFPT_TABLE_MOD,),
init = packvalue(OFPT_TABLE_MOD, 'header', 'type')
)
'''
/* Capabilities supported by the datapath. */
'''
ofp_capabilities = ofp_capabilities.extend(globals(),
OFPC_GROUP_STATS = 1 << 3, # /* Group statistics. */
OFPC_PORT_BLOCKED = 1 << 8 # /* Switch will block looping ports. */
)
'''
/* Current state of the physical port. These are not configurable from
* the contr
|
TomAugspurger/pandas
|
pandas/tests/base/test_conversion.py
|
Python
|
bsd-3-clause
| 14,519
| 0.000551
|
import numpy as np
import pytest
from pandas.core.dtypes.common import is_datetime64_dtype, is_timedelta64_dtype
from pandas.core.dtypes.dtypes import DatetimeTZDtype
import pandas as pd
from pandas import CategoricalIndex, Series, Timedelta, Timestamp
import pandas._testing as tm
from pandas.core.arrays import (
DatetimeArray,
IntervalArray,
PandasArray,
PeriodArray,
SparseArray,
TimedeltaArray,
)
class TestToIterable:
# test that we convert an iterable to python types
dtypes = [
("int8", int),
("int16", int),
("int32", int),
("int64", int),
("uint8", int),
("uint16", int),
("uint32", int),
("uint64", int),
("float16", float),
("float32", float),
("float64", float),
("datetime64[ns]", Timestamp),
("datetime64[ns, US/Eastern]", Timestamp),
("timedelta64[ns]", Timedelta),
]
@pytest.mark.parametrize("dtype, rdtype", dtypes)
@pytest.mark.parametrize(
"method",
[
lambda x: x.tolist(),
lambda x: x.to_list(),
lambda x: list(x),
lambda x: list(x.__iter__()),
],
ids=["tolist", "to_list", "list", "iter"],
)
def test_iterable(self, index_or_series, method, dtype, rdtype):
# gh-10904
# gh-13258
# coerce iteration to underlying python / pandas types
typ = index_or_series
s = typ([1], dtype=dtype)
result = method(s)[0]
assert isinstance(result, rdtype)
@pytest.mark.parametrize(
"dtype, rdtype, obj",
[
("object", object, "a"),
("object", int, 1),
("category", object, "a"),
("category", int, 1),
],
)
@pytest.mark.parametrize(
"method",
[
lambda x: x.tolist(),
lambda x: x.to_list(),
lambda x: list(x),
lambda x: list(x.__iter__()),
],
ids=["tolist", "to_list", "list", "iter"],
)
def test_iterable_object_and_category(
self, index_or_series, method, dtype, rdtype, obj
):
# gh-10904
# gh-13258
# coerce iteration to underlying python / pandas types
typ = index_or_series
s = typ([obj], dtype=dtype)
result = method(s)[0]
assert isinstance(result, rdtype)
@pytest.mark.parametrize("dtype, rdtype", dtypes)
def test_iterable_items(self, dtype, rdtype):
# gh-13258
# test if items yields the correct boxed scalars
# this only applies to series
s = Series([1], dtype=dtype)
_, result = list(s.items())[0]
assert isinstance(result, rdtype)
_, result = list(s.items())[0]
assert isinstance(result, rdtype)
@pytest.mark.parametrize(
"dtype, rdtype", dtypes + [("object", int), ("category", int)]
)
def test_iterable_map(self, index_or_series, dtype, rdtype):
# gh-13236
# coerce iteration to underlying python / pandas types
typ = index_or_series
s = typ([1], dtype=dtype)
result = s.map(type)[0]
if not isinstance(rdtype, tuple):
rdtype = tuple([rdtype])
assert result in rdtype
@pytest.mark.parametrize(
"method",
[
lambda x: x.tolist(),
lambda x: x.to_list(),
lambda x: list(x),
lambda x: list(x.__iter__()),
],
ids=["tolist", "to_list", "list", "iter"],
)
def test_categorial_datetimelike(self, method):
i = CategoricalIndex([Timestamp("1999-12-31"), Timestamp("2000-12-31")])
result = method(i)[0]
assert isinstance(result, Timestamp)
def test_iter_box(self):
vals = [Timestamp("2011-01-01"), Timestamp("2011-01-02")]
s = Series(vals)
assert s.dtype == "datetime64[ns]"
for res, exp in zip(s, vals):
assert isinstance(res, Timestamp)
assert res.tz is None
assert res == exp
vals = [
Timestamp("2011-01-01", tz="US/Eastern"),
Timestamp("2011-01-02", tz="US/Eastern"),
]
s = Series(vals)
assert s.dtype == "datetime64[ns, US/Eastern]"
for res, exp in zip(s, vals):
assert isinstance(res, Timestamp)
assert res.tz == exp.tz
assert res == exp
# timedelta
vals = [Timedelta("1 days"), Timedelta("2 days")]
s = Series(vals)
assert s.dtype == "timedelta64[ns]"
for res, exp in zip(s, vals):
assert isinstance(res, Timedelta)
assert res == exp
# period
vals = [pd.Period("2011-01-01", freq="M"), pd.Period("2011-01-02", freq="M")]
s = Series(vals)
assert s.dtype == "Period[M]"
for res, exp in zip(s, vals):
assert isinstance(res, pd.Period)
assert res.freq == "M"
assert res == exp
@pytest.mark.parametrize(
"array, expected_type, dtype",
[
|
(np.array([0, 1], dtype=np.int64), np.ndarray, "int64"),
(np.array(["a", "b"]), np.ndarray, "object"),
(pd.Categorical(["a", "b"]), pd.Categorical, "category"),
|
(
pd.DatetimeIndex(["2017", "2018"], tz="US/Central"),
DatetimeArray,
"datetime64[ns, US/Central]",
),
(
pd.PeriodIndex([2018, 2019], freq="A"),
PeriodArray,
pd.core.dtypes.dtypes.PeriodDtype("A-DEC"),
),
(pd.IntervalIndex.from_breaks([0, 1, 2]), IntervalArray, "interval",),
# This test is currently failing for datetime64[ns] and timedelta64[ns].
# The NumPy type system is sufficient for representing these types, so
# we just use NumPy for Series / DataFrame columns of these types (so
# we get consolidation and so on).
# However, DatetimeIndex and TimedeltaIndex use the DateLikeArray
# abstraction to for code reuse.
# At the moment, we've judged that allowing this test to fail is more
# practical that overriding Series._values to special case
# Series[M8[ns]] and Series[m8[ns]] to return a DateLikeArray.
pytest.param(
pd.DatetimeIndex(["2017", "2018"]),
np.ndarray,
"datetime64[ns]",
marks=[pytest.mark.xfail(reason="datetime _values", strict=True)],
),
pytest.param(
pd.TimedeltaIndex([10 ** 10]),
np.ndarray,
"m8[ns]",
marks=[pytest.mark.xfail(reason="timedelta _values", strict=True)],
),
],
)
def test_values_consistent(array, expected_type, dtype):
l_values = pd.Series(array)._values
r_values = pd.Index(array)._values
assert type(l_values) is expected_type
assert type(l_values) is type(r_values)
tm.assert_equal(l_values, r_values)
@pytest.mark.parametrize("arr", [np.array([1, 2, 3])])
def test_numpy_array(arr):
ser = pd.Series(arr)
result = ser.array
expected = PandasArray(arr)
tm.assert_extension_array_equal(result, expected)
def test_numpy_array_all_dtypes(any_numpy_dtype):
ser = pd.Series(dtype=any_numpy_dtype)
result = ser.array
if is_datetime64_dtype(any_numpy_dtype):
assert isinstance(result, DatetimeArray)
elif is_timedelta64_dtype(any_numpy_dtype):
assert isinstance(result, TimedeltaArray)
else:
assert isinstance(result, PandasArray)
@pytest.mark.parametrize(
"array, attr",
[
(pd.Categorical(["a", "b"]), "_codes"),
(pd.core.arrays.period_array(["2000", "2001"], freq="D"), "_data"),
(pd.core.arrays.integer_array([0, np.nan]), "_data"),
(IntervalArray.from_breaks([0, 1]), "_left"),
(SparseArray([0, 1]), "_sparse_values"),
(DatetimeArray(np.array([1, 2], dtype="datetime64[ns]")), "_data"),
# tz-aware Datetime
(
DatetimeArray(
np.array(
["2000-01-01T12:00:00", "2000-01-02T12:00:00"], dtype="M8[ns]"
),
|
Onager/plaso
|
tests/cli/helpers/storage_file.py
|
Python
|
apache-2.0
| 1,615
| 0.004334
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Tests for the storage file CLI arguments helper."""
import argparse
import unittest
from plaso.cli import tools
from plaso.cli.helpers i
|
mport stor
|
age_file
from plaso.lib import errors
from tests.cli import test_lib as cli_test_lib
class StorageFileArgumentsHelperTest(cli_test_lib.CLIToolTestCase):
"""Tests for the storage file CLI arguments helper."""
# pylint: disable=no-member,protected-access
_EXPECTED_OUTPUT = """\
usage: cli_helper.py [STORAGE_FILE]
Test argument parser.
positional arguments:
STORAGE_FILE Path to a storage file.
"""
def testAddArguments(self):
"""Tests the AddArguments function."""
argument_parser = argparse.ArgumentParser(
prog='cli_helper.py', description='Test argument parser.',
add_help=False,
formatter_class=cli_test_lib.SortedArgumentsHelpFormatter)
storage_file.StorageFileArgumentsHelper.AddArguments(argument_parser)
output = self._RunArgparseFormatHelp(argument_parser)
self.assertEqual(output, self._EXPECTED_OUTPUT)
def testParseOptions(self):
"""Tests the ParseOptions function."""
test_tool = tools.CLITool()
options = cli_test_lib.TestOptions()
options.storage_file = self._GetTestFilePath(['test.plaso'])
storage_file.StorageFileArgumentsHelper.ParseOptions(options, test_tool)
self.assertEqual(test_tool._storage_file_path, options.storage_file)
with self.assertRaises(errors.BadConfigObject):
storage_file.StorageFileArgumentsHelper.ParseOptions(options, None)
if __name__ == '__main__':
unittest.main()
|
raphaeldore/analyzr
|
analyzr/utils/file.py
|
Python
|
mit
| 2,428
| 0.002471
|
import errno
import os
import sys
from contextlib import contextmanager
@contextmanager
def open_with_error(filename: str, mode: str = "r", encoding: str = "utf-8"):
try:
f = open(filename, mode=mode, encoding=encoding)
except IOError as err:
yield None, err
else:
try:
yield f, None
finally:
f.close()
def get_next_file_path(folder: str, base_filename: str):
"""
Python version of this C# code: http://stackoverflow.com/a/1078898
Given a base file name, creates a unique filename. Check to see if the given file exists, and if it does
tries to find the next available file name by appending numbers to the base filename until a valid filename is
found.
:param folder: Full path to folder. If last path separator is omitted, then the function adds it. Ex:
``C:\\users\\bob\\images\\``
``C:\\users\\bob\\images`` (will add the backslash)
:param base_filename: The base filename of the file. Ex:
``image.png``
:return: The next available filename (Ex: image_2.png).
"""
pattern = "{filename}_{nb}{ext}"
if not folder.endswith(os.path.sep):
folder += os.path.sep
full_path = folder + base_filename
if not os.path.isfile(full_path):
return full_path
filename, file_extension
|
= os.path.splitext(base_filename)
min_nbr, max_nbr = 1, 2
while os.path.isfile(
os.path.join(folder, pattern.format(filename=filename, nb=str(max_nbr), ext=file_extension))
|
):
min_nbr = max_nbr
max_nbr *= 2
while max_nbr != min_nbr + 1:
pivot = int((max_nbr + min_nbr) / 2)
if os.path.isfile(
os.path.join(folder, pattern.format(filename=filename, nb=str(pivot), ext=file_extension))):
min_nbr = pivot
else:
max_nbr = pivot
return os.path.join(folder, pattern.format(filename=filename, nb=str(max_nbr), ext=file_extension))
def make_sure_path_exists(path: str) -> None:
"""
Makes sure that the path exists. If it does not exist
creates the path (all directories and sub-directories in the given path).
"""
if sys.version_info[:3] >= (3, 4, 1):
os.makedirs(path, exist_ok=True)
else:
try:
os.makedirs(path)
except OSError as exception:
if exception.errno != errno.EEXIST:
raise
|
mothyjohn/VELA-CLARA-Controllers
|
General/enums/bin/Release/test.py
|
Python
|
gpl-3.0
| 307
| 0.022801
|
import sys,os
import numpy as np
#os.envir
|
on["EPICS_CA_AUTO_ADDR_LIST"] = "NO"
#os.environ["EPICS_CA_ADDR_LIST"] = "192.168.82.10"
|
#os.environ["EPICS_CA_MAX_ARRAY_BYTES"] = "10000000000"
import velaINJMagnetControl as VIMC
a = VIMC.velaINJMagnetController(True,False)
print( np.array(a.getQuadNames()))
|
franek/weboob
|
modules/youtube/test.py
|
Python
|
agpl-3.0
| 1,394
| 0.002152
|
# -*- coding: utf-8 -*-
# Copyright(C) 2010-2011 Romain Bignon
#
# This file is part of weboob.
#
# weboob is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# weboob is distributed in the hope that it will be useful,
# but WITH
|
OUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with weboo
|
b. If not, see <http://www.gnu.org/licenses/>.
from weboob.tools.test import BackendTest
from weboob.capabilities.video import BaseVideo
class YoutubeTest(BackendTest):
BACKEND = 'youtube'
def test_search(self):
l = list(self.backend.search_videos('lol'))
self.assertTrue(len(l) > 0)
v = l[0]
self.backend.fillobj(v, ('url',))
self.assertTrue(v.url and v.url.startswith('http://'), 'URL for video "%s" not found: %s' % (v.id, v.url))
assert self.backend.get_video(v.shorturl)
self.backend.browser.openurl(v.url)
def test_latest(self):
l = list(self.backend.iter_resources([BaseVideo], [u'latest']))
assert len(l) > 0
|
1upon0/rfid-auth-system
|
GUI/printer/Pillow-2.7.0/Tests/test_lib_pack.py
|
Python
|
apache-2.0
| 5,896
| 0
|
from helper import unittest, PillowTestCase, py3
from PIL import Image
class TestLibPack(PillowTestCase):
def pack(self):
pass # not yet
def test_pack(self):
def pack(mode, rawmode):
if len(mode) == 1:
im = Image.new(mode, (1, 1), 1)
else:
im = Image.new(mode, (1, 1), (1, 2, 3, 4)[:len(mode)])
if py3:
return list(im.tobytes("raw", rawmode))
else:
return [ord(c) for c in im.tobytes("raw", rawmode)]
order = 1 if Image._ENDIAN == '<' else -1
self.assertEqual(pack("1", "1"), [128])
self.assertEqual(pack("1", "1;I"), [0])
self.assertEqual(pack("1", "1;R"), [1])
self.assertEqual(pack("1", "1;IR"), [0])
self.assertEqual(pack("L", "L"), [1])
self.assertEqual(pack("I", "I"), [1, 0, 0, 0][::order])
self.assertEqual(pack("F", "F"), [0, 0, 128, 63][::order])
self.assertEqual(pack("LA", "LA"), [1, 2])
self.assertEqual(pack("RGB", "RGB"), [1, 2, 3])
self.assertEqual(pack("RGB", "RGB;L"), [1, 2, 3])
self.assertEqual(pack("RGB", "BGR"), [3, 2, 1])
self.assertEqual(pack("RGB", "RGBX"), [1, 2, 3, 255]) # 255?
self.assertEqual(pack("RGB", "BGRX"), [3, 2, 1, 0])
self.assertEqual(pack("RGB", "XRGB"), [0, 1, 2, 3])
self.assertEqual(pack("RGB", "XBGR"), [0, 3, 2, 1])
self.assertEqual(pack("RGBX", "RGBX"), [1, 2, 3, 4]) # 4->255?
self.assertEqual(pack("RGBA", "RGBA"), [1, 2, 3, 4])
self.assertEqual(pack("CMYK", "CMYK"), [1, 2, 3, 4])
self.assertEqual(pack("YCbCr", "YCbCr"), [1, 2, 3])
def test_unpack(self):
def unpack(mode, rawmode, bytes_):
im = None
if py3:
data = bytes(range(1, bytes_+1))
else:
data = ''.join(chr(i) for i in range(1, bytes_+1))
im = Image.frombytes(mode, (1, 1), data, "raw", rawmode, 0, 1)
return im.getpixel((0, 0))
def unpack_1(mode, rawmode, value):
assert mode == "1"
im = None
if py3:
im = Image.frombytes(
mode, (8, 1), bytes([value]), "raw", rawmode, 0, 1)
else:
im = Image.frombytes(
mode, (8, 1), chr(value), "raw", rawmode, 0, 1)
return tuple(im.getdata())
X = 255
self.assertEqual(unpack_1("1", "1", 1), (0, 0, 0, 0, 0, 0, 0, X))
self.assertEqual(unpack_1("1", "1;I", 1), (X, X, X, X, X, X, X, 0))
self.assertEqual(unpack_1("1", "1;R", 1), (X, 0, 0, 0, 0, 0, 0, 0))
self.assertEqual(unpack_1("1", "1;IR", 1), (0, X, X, X, X, X, X, X))
self.assertEqual(unpack_1("1", "1", 170), (X, 0, X, 0, X, 0, X, 0))
self.assertEqual(unpack_1("1", "1;I", 170), (0, X, 0, X, 0, X, 0, X))
self.assertEqual(unpack_1("1", "1;R", 170), (0, X, 0, X, 0, X, 0, X))
self.assertEqual(unpack_1("1", "1;IR", 170), (X, 0, X, 0, X, 0, X, 0))
self.assertEqual(unpack("L", "L;2", 1), 0)
self.assertEqual(unpack("L", "L;4", 1), 0)
self.assertEqual(unpack("L", "L", 1), 1)
self.assertEqual(unpack("L", "L;I", 1), 254)
self.assertEqual(unpack("L", "L;R", 1), 128)
self.assertEqual(unpack("L", "L;16", 2), 2) # little endian
self.assertEqual(unpack("L", "L;16B", 2), 1) # big endian
self.assertEqual(unpack("LA", "LA", 2), (1, 2))
self.assertEqual(unpack("LA", "LA;L", 2), (1, 2))
self.assertEqual(unpack("RGB", "RGB", 3), (1, 2, 3))
self.assertEqual(unpack("RGB", "RGB;L", 3), (1, 2, 3))
self.assertEqual(unpack("RGB", "RGB;R", 3), (128, 64, 192))
self.assertEqual(unpack("RGB", "RGB;16B", 6), (1, 3, 5)) # ?
self.assertEqual(unpack("RGB", "BGR", 3), (3, 2, 1))
self.assertEqual(unpack("RGB", "RGB;15", 2), (8, 131, 0))
self.assertEqual(unpack("RGB", "BGR;15", 2), (0, 131, 8))
self.assertEqual(unpack("RGB", "R
|
GB;16", 2), (8, 64, 0))
self.assertEqual(unpack("RGB", "BGR;16", 2), (0, 64, 8))
self.assertEqual(unpack("RGB", "RGB;4B", 2), (17, 0, 34))
self.assertEqual(unpack("RGB", "RGBX", 4), (1, 2, 3))
self.assertEqual(unpack("RGB", "BGRX", 4), (3, 2, 1))
self.assertEqual(unpack("RGB", "XRGB", 4), (2, 3, 4))
self.assertEqual(unpack("RGB", "XBGR", 4), (4, 3, 2))
self.
|
assertEqual(unpack("RGBA", "RGBA", 4), (1, 2, 3, 4))
self.assertEqual(unpack("RGBA", "BGRA", 4), (3, 2, 1, 4))
self.assertEqual(unpack("RGBA", "ARGB", 4), (2, 3, 4, 1))
self.assertEqual(unpack("RGBA", "ABGR", 4), (4, 3, 2, 1))
self.assertEqual(unpack("RGBA", "RGBA;15", 2), (8, 131, 0, 0))
self.assertEqual(unpack("RGBA", "BGRA;15", 2), (0, 131, 8, 0))
self.assertEqual(unpack("RGBA", "RGBA;4B", 2), (17, 0, 34, 0))
self.assertEqual(unpack("RGBX", "RGBX", 4), (1, 2, 3, 4)) # 4->255?
self.assertEqual(unpack("RGBX", "BGRX", 4), (3, 2, 1, 255))
self.assertEqual(unpack("RGBX", "XRGB", 4), (2, 3, 4, 255))
self.assertEqual(unpack("RGBX", "XBGR", 4), (4, 3, 2, 255))
self.assertEqual(unpack("RGBX", "RGB;15", 2), (8, 131, 0, 255))
self.assertEqual(unpack("RGBX", "BGR;15", 2), (0, 131, 8, 255))
self.assertEqual(unpack("RGBX", "RGB;4B", 2), (17, 0, 34, 255))
self.assertEqual(unpack("CMYK", "CMYK", 4), (1, 2, 3, 4))
self.assertEqual(unpack("CMYK", "CMYK;I", 4), (254, 253, 252, 251))
self.assertRaises(ValueError, lambda: unpack("L", "L", 0))
self.assertRaises(ValueError, lambda: unpack("RGB", "RGB", 2))
self.assertRaises(ValueError, lambda: unpack("CMYK", "CMYK", 2))
if __name__ == '__main__':
unittest.main()
# End of file
|
brownian/frescobaldi
|
frescobaldi_app/lydocument.py
|
Python
|
gpl-2.0
| 8,870
| 0.000676
|
# This file is part of the Frescobaldi project, http://www.frescobaldi.org/
#
# Copyright (c) 2013 - 2014 by Wilbert Berendsen
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
# See http://www.gnu.org/licenses/ for more information.
"""
Document
Provides a ly.document.Document api for a QTextDocument (or: more specifically
a Frescobaldi document.Document).
This can be used to perform operations from the ly module on a loaded
Frescobaldi document.
You don't need to save a Document instance. Just create it and use it, then
discard it.
"""
from PyQt5.QtGui import QTextCursor
import ly.document
import tokeniter
import highlighter
def cursor(cursor, select_all=False):
"""Return a Cursor for the specified QTextCursor.
The ly Cursor is instantiated with a Document proxying for the
original cursors document.
So you can call all operations in the ly module and they will work on a
Frescobaldi document (which is a subclass of QTextDocument).
If select_all is True, the ly Cursor selects the whole document if the
original cursor has no selection.
"""
if not select_all or cursor.hasSelection():
start, end = cursor.selectionStart(), cursor.selectionEnd()
else:
start, end = 0, None
return Cursor(Document(cursor.document()), start, end)
class Cursor(ly.document.Cursor):
"""A ly.document.Cursor with an extra cursor() method."""
def cursor(self):
"""Return a QTextCursor with the same selection."""
c = QTextCursor(self.document.document)
c.movePosition(QTextCursor.End) if self.end is None else c.setPosition(self.end)
c.setPosition(self.start, QTextCursor.KeepAnchor)
return c
class Document(ly.document.DocumentBase):
"""Document proxies a loaded Frescobaldi document (QTextDocument).
This is used to let the tools in the ly module operate on Frescobaldi
documents.
Creating a Document is very fast, you do not need to save it. When
applying the changes, Document starts an editblock, so that the
operations appears as one undo-item.
It is recommended to not nest calls to QTextCursor.beginEditBlock(), as
the highlighter is not called to update the tokens until the last
endEditBlock() is called.
Therefore Document provides a simple mechanism for combining several
change operations via the combine_undo attribute.
If combine_undo is None (the default), the first time changes are applied
QTextCursor.beginEditBlock() will be called, but subsequent times
QTextCursor.joinPreviousEditBlock() will be used. So the highlighter
updates the tokens between the operations, but they will appear as one
undo-item.
If you want to combine the very first operation already with an earlier
change, set combine_undo to True before the changes are applied (e.g.
before entering or exiting the context).
If you do not want to combine operations into a single undo-item at all,
set combine_undo to False.
(Of course you can nest calls to QTextCursor.beginEditBlock(), but in
that case the tokens will not be updated between your operations. If
your operations do not depend on the tokens, it is no problem
whatsoever. The tokens *are* updated after the last call to
QTextCursor.endEditBlock().)
"""
def __init__(self, document):
self._d = document
super(Document, self).__init__()
self.combine_undo = None
def __len__(self):
"""Return the number of blocks"""
return self._d.blockCount()
def __getitem__(self, index):
"""Return the block at the specified index."""
return self._d.findBlockByNumbe
|
r(index)
@property
def document(self):
"""Return the QTextDocument we were instantiated with."""
retu
|
rn self._d
@property
def filename(self):
"""Return the document's local filename, if any."""
return self.document.url().toLocalFile()
def plaintext(self):
"""The document contents as a plain text string."""
return self._d.toPlainText()
def setplaintext(self, text):
"""Sets the document contents to the text string."""
self._d.setPlainText(text)
def size(self):
"""Return the number of characters in the document."""
return self._d.characterCount()
def block(self, position):
"""Return the text block at the specified character position.
The text block itself has no methods, but it can be used as an
argument to other methods of this class.
(Blocks do have to support the '==' operator.)
"""
return self._d.findBlock(position)
def index(self, block):
"""Return the linenumber of the block (starting with 0)."""
return block.blockNumber()
def position(self, block):
"""Return the position of the specified block."""
return block.position()
def text(self, block):
"""Return the text of the specified block."""
return block.text()
def next_block(self, block):
"""Return the next block, which may be invalid."""
return block.next()
def previous_block(self, block):
"""Return the previous block, which may be invalid."""
return block.previous()
def isvalid(self, block):
"""Return True if the block is a valid block."""
return block.isValid()
def apply_changes(self):
"""Apply the changes and update the tokens."""
c = QTextCursor(self._d)
# record a sensible position for undo
c.setPosition(self._changes_list[-1][0])
c.joinPreviousEditBlock() if self.combine_undo else c.beginEditBlock()
try:
for start, end, text in self._changes_list:
c.movePosition(QTextCursor.End) if end is None else c.setPosition(end)
c.setPosition(start, QTextCursor.KeepAnchor)
c.insertText(text)
finally:
c.endEditBlock()
if self.combine_undo is None:
self.combine_undo = True
def tokens(self, block):
"""Return the tuple of tokens of the specified block."""
return tokeniter.tokens(block)
def initial_state(self):
"""Return the state at the beginning of the document."""
return highlighter.highlighter(self._d).initialState()
def state(self, block):
"""Return the state at the start of the specified block."""
return tokeniter.state(block)
def state_end(self, block):
"""Return the state at the end of the specified block."""
return tokeniter.state_end(block)
class Runner(ly.document.Runner):
"""A Runner that adds a cursor() method, returning a QTextCursor."""
def cursor(self, start=0, end=None):
"""Returns a QTextCursor for the last token.
If start is given the cursor will start at position start in the token
(from the beginning of the token). Start defaults to 0.
If end is given, the cursor will end at that position in the token (from
the beginning of the token). End defaults to the length of the token.
"""
if end is None:
end = len(self.token())
c = QTextCursor(self.document.document)
c.setPosition(self.position() + start)
c.setPosition(self.position() + end, QTextCursor.KeepAnchor)
return c
class Source(ly.document.Source):
"""A Source that adds a cursor() method, returni
|
yangdw/PyRepo
|
src/annotation/haven/haven/autoreload.py
|
Python
|
mit
| 5,900
| 0.002712
|
# Autoreloading launcher.
# Borrowed from Peter Hunt and the CherryPy project (http://www.cherrypy.org).
# Some taken from Ian Bicking's Paste (http://pythonpaste.org/).
#
# Portions copyright (c) 2004, CherryPy Team (team@cherrypy.org)
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentatio
|
n
# and/or other materials provided with the distribution.
# * Neither the name of the CherryPy Team nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT L
|
IMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os, sys, time, signal, traceback
import thread
try:
import termios
except ImportError:
termios = None
RUN_RELOADER = True
_mtimes = {}
_win = (sys.platform == "win32")
_error_files = []
def code_changed():
global _mtimes, _win
filenames = []
for m in list(sys.modules.values()):
try:
filenames.append(m.__file__)
except AttributeError:
pass
for filename in filenames + _error_files:
if not filename:
continue
if filename.endswith(".pyc") or filename.endswith(".pyo"):
filename = filename[:-1]
if filename.endswith("$py.class"):
filename = filename[:-9] + ".py"
if not os.path.exists(filename):
continue # File might be in an egg, so it can't be reloaded.
stat = os.stat(filename)
mtime = stat.st_mtime
if _win:
mtime -= stat.st_ctime
if filename not in _mtimes:
_mtimes[filename] = mtime
continue
if mtime != _mtimes[filename]:
_mtimes = {}
try:
del _error_files[_error_files.index(filename)]
except ValueError:
pass
return True
return False
def check_errors(fn):
def wrapper(*args, **kwargs):
try:
fn(*args, **kwargs)
except (ImportError, IndentationError, NameError, SyntaxError,
TypeError, AttributeError):
et, ev, tb = sys.exc_info()
if getattr(ev, 'filename', None) is None:
# get the filename from the last item in the stack
filename = traceback.extract_tb(tb)[-1][0]
else:
filename = ev.filename
if filename not in _error_files:
_error_files.append(filename)
raise
return wrapper
def ensure_echo_on():
if termios:
fd = sys.stdin
if fd.isatty():
attr_list = termios.tcgetattr(fd)
if not attr_list[3] & termios.ECHO:
attr_list[3] |= termios.ECHO
if hasattr(signal, 'SIGTTOU'):
old_handler = signal.signal(signal.SIGTTOU, signal.SIG_IGN)
else:
old_handler = None
termios.tcsetattr(fd, termios.TCSANOW, attr_list)
if old_handler is not None:
signal.signal(signal.SIGTTOU, old_handler)
def reloader_thread():
ensure_echo_on()
while RUN_RELOADER:
if code_changed():
sys.exit(3) # force reload
time.sleep(1)
def restart_with_reloader():
while True:
args = [sys.executable] + ['-W%s' % o for o in sys.warnoptions] + sys.argv
if sys.platform == "win32":
args = ['"%s"' % arg for arg in args]
new_environ = os.environ.copy()
new_environ["RUN_MAIN"] = 'true'
exit_code = os.spawnve(os.P_WAIT, sys.executable, args, new_environ)
if exit_code != 3:
return exit_code
def python_reloader(main_func, args, kwargs):
if os.environ.get("RUN_MAIN") == "true":
thread.start_new_thread(main_func, args, kwargs)
try:
reloader_thread()
except KeyboardInterrupt:
pass
else:
try:
exit_code = restart_with_reloader()
if exit_code < 0:
os.kill(os.getpid(), -exit_code)
else:
sys.exit(exit_code)
except KeyboardInterrupt:
pass
def jython_reloader(main_func, args, kwargs):
from _systemrestart import SystemRestart
thread.start_new_thread(main_func, args)
while True:
if code_changed():
raise SystemRestart
time.sleep(1)
def main(main_func, args=None, kwargs=None):
if args is None:
args = ()
if kwargs is None:
kwargs = {}
if sys.platform.startswith('java'):
reloader = jython_reloader
else:
reloader = python_reloader
wrapped_main_func = check_errors(main_func)
reloader(wrapped_main_func, args, kwargs)
|
kidmose/python-course
|
mandelbrot/tests/__init__.py
|
Python
|
mit
| 674
| 0.004451
|
"""
Test suite for module.
Holds constants and methods shared among multiple tests.
See submodules for individual tests.
"""
import os
import shutil
PARAM_LIST = {
|
'pre_min': 100,
'pre_max': 101,
'Pre': 10,
'pim_min': -101,
'pim_max': -100,
'Pim': 14,
'T': 16,
'I': 20,
}
OUTPUT_DIR = "test-output"
def purge_output_dir(p
|
ath=OUTPUT_DIR):
delete_output_dir(path=path)
if os.path.exists(path):
raise Exception("Failed to removed test output folder")
os.makedirs(path)
def delete_output_dir(path=OUTPUT_DIR):
if os.path.isfile(path):
os.remove(path)
if os.path.isdir(path):
shutil.rmtree(path)
|
toddsifleet/equals
|
equals/constraints/containing.py
|
Python
|
mit
| 440
| 0
|
from .base import Base
cl
|
ass Containing(Base):
_description = 'containing: {}'
def _check(self, value):
# This will check list like objects
for v in self.args:
if v not in value:
return False
# This will check dictionary like objects
for k, v in self.kwargs.items():
if k not in value or not value[k] == v:
return False
|
return True
|
defance/edx-platform
|
common/djangoapps/enrollment/management/tests/test_enroll_user_in_course.py
|
Python
|
agpl-3.0
| 2,563
| 0.00039
|
""" Test the change_enrollment command line script."""
import ddt
import unittest
from uuid import uuid4
from django.conf import settings
from django.core.management import call_command
from django.core.management.base import CommandError
from enrollment.api import get_enrollment
from student.tests.factories import UserFactory
from xmodule.modulestore.tests.django_utils import SharedModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory
@ddt.ddt
@unittest.skipUnless(settings.ROOT_URLCONF == 'lms.urls', 'Test only valid in lms')
class EnrollManagementCommandTest(SharedModuleStoreTestCase):
"""
Test the enroll_user_in_course management command
"""
@classmethod
def setUpClass(cls):
super(EnrollManagementCommandTest, cls).setUpClass()
cls.course = CourseFactory.create(org='fooX', number='007')
def setUp(self):
super(EnrollManagementCommandTest, self).setUp()
self.course_id = unicode(self.course.id)
self.username = 'ralph' + uuid4().hex
self.user_email = self.username + '@example.com'
UserFactory(username=self.username, email=self.user_email)
def test_enroll_user(self):
command_args = [
'--course', self.course_id,
'--email', self.user_email,
]
call_command(
'enroll_user_in_course',
*command_args
)
user_enroll = get_enrollment(self.username, self.course_id)
self.assertTrue(user_enroll['is_active'])
def test_enroll_user_twice(self):
"""
Ensures the command is idempotent.
"""
command_args = [
'--course', self.course_id,
'--email', self.user_email,
]
for _ in range(2):
call_command(
'enroll_user_i
|
n_course
|
',
*command_args
)
# Second run does not impact the first run (i.e., the
# user is still enrolled, no exception was raised, etc)
user_enroll = get_enrollment(self.username, self.course_id)
self.assertTrue(user_enroll['is_active'])
@ddt.data(['--email', 'foo'], ['--course', 'bar'], ['--bad-param', 'baz'])
def test_not_enough_args(self, arg):
"""
When the command is missing certain arguments, it should
raise an exception
"""
command_args = arg
with self.assertRaises(CommandError):
call_command(
'enroll_user_in_course',
*command_args
)
|
oinopion/pipeye
|
pipeye/urls.py
|
Python
|
bsd-2-clause
| 820
| 0.004878
|
from django.conf.urls import patterns, include, url
|
from django.core.urlresolvers import reverse_lazy
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
url(r'^$', 'pipeye.views.home', name='home'),
url(r'^watches/', include('pipeye.watches.urls')),
url(r'^packages/', include('pipeye.packages.urls')),
url(r'^accounts/', include('pipeye.accounts.urls')),
# github login
url(r'^login/$', 'social_auth.views.auth',
{'backend': 'github'}, name='login'),
url(r'^login/complete/(?P<
|
backend>\w+)/$',
'social_auth.views.complete', name='login_complete'),
url(r'^logout/$', 'django.contrib.auth.views.logout',
{'next_page': reverse_lazy('home')}, name='logout'),
# admin
url(r'^admin/', include(admin.site.urls)),
)
|
siliconchris1973/fairytale
|
RASPI-stuff/python-codeline/fairytale/main.py
|
Python
|
apache-2.0
| 3,954
| 0.006576
|
#!/usr/bin/env python3
# encoding: utf-8
"""
main.py
The entry point for the book reader application.
"""
__version_info__ = (0, 0, 1)
__version__ = '.'.join(map(str, __version_info__))
__author__ = "c.guenther@mac.com"
import time
import sqlite3
import pdb
import signal
import sys, os
import rfid
import config
import RPi.GPIO as GPIO
from player import Player
from status_light import StatusLight
from threading import Thread
class BookReader(object):
"""The main class that controls the player, the GPIO pins and the RFID reader"""
def __init__(self):
"""Initialize all the things"""
self.rfid_reader = rfid.Reader(**config.serial)
# setup signal handlers. SIGINT for KeyboardInterrupt
# and SIGTERM for when running from supervisord
signal.signal(signal.SIGINT, self.signal_handler)
signal.signal(signal.SIGTERM, self.signal_handler)
self.status_light = StatusLight(config.status_light_pin)
thread = Thread(target=self.status_light.start)
thread.start()
self.setup_db()
self.player = Player(config.mpd_conn, self.status_light)
self.setup_gpio()
def setup_db(self):
"""Setup a connection to the SQLite db"""
self.db_conn = sqlite3.connect(config.db_file)
self.db_cursor = self.db_conn.cursor()
def setup_gpio(self):
"""Setup all GPIO pins"""
GPIO.setmode(GPIO.BCM)
# input pins for buttons
for pin in config.gpio_pins:
GPIO.setup(pin['pin_id'], GPIO.IN, pull_up_down=GPIO.PUD_UP)
GPIO.add_event_detect(pin['pin_id'], GPIO.FALLING, callback=getattr(self.player, pin['callback']), bouncetime=pin['bounce_time'])
def signal_handler(self, signal, frame):
"""When quiting, stop playback, close the player and release GPIO pins"""
self.player.close()
self.status_light.exit()
GPIO.cleanup()
sys.exit(0)
def loop(self):
"""The main event loop. Th
|
is is where we look for new RFID cards on the RFID reader. If one is
|
present and different from the book that's currently playing, in which case:
1. Stop playback of the current book if one is playing
2. Start playing
"""
while True:
if self.player.is_playing():
self.on_playing()
elif self.player.finished_book():
# when at the end of a book, delete its progress from the db
# so we can listen to it again
self.db_cursor.execute(
'DELETE FROM progress WHERE book_id = %d' % self.player.book.book_id)
self.db_conn.commit()
self.player.book.reset()
rfid_card = self.rfid_reader.read()
if not rfid_card:
continue
book_id = rfid_card.get_id()
if book_id and book_id != self.player.book.book_id: # a change in book id
progress = self.db_cursor.execute(
'SELECT * FROM progress WHERE book_id = "%s"' % book_id).fetchone()
self.player.play(book_id, progress)
def on_playing(self):
"""Executed for each loop execution. Here we update self.player.book with the latest known position
and save the prigress to db"""
status = self.player.get_status()
self.player.book.elapsed = float(status['elapsed'])
self.player.book.part = int(status['song']) + 1
#print "%s second of part %s" % (self.player.book.elapsed, self.player.book.part)
self.db_cursor.execute(
'INSERT OR REPLACE INTO progress (book_id, part, elapsed) VALUES (%s, %d, %f)' %\
(self.player.book.book_id, self.player.book.part, self.player.book.elapsed))
self.db_conn.commit()
if __name__ == '__main__':
reader = BookReader()
reader.loop()
|
Newterm/florence
|
editor/toolkit.py
|
Python
|
gpl-2.0
| 16,795
| 0.050491
|
#!/usr/bin/python
import cairo
import gtk
import copy
def abs(x):
if x < 0:
return -x
else:
return x
class object:
def __init__(self, name, x, y, w, h):
self.name = name
self.x = x
self.y = y
self.w = w
self.h = h
# huddung vector
self.dx = 0
self.dy = 0
# 0 = normal ; 1 = active ; 2 = selected
self.status = 0
self.moving = False
self.offsetx = 0
self.offsety = 0
self.anchor = 0
self.dirty = False
def get_extents(self):
return self.x, self.y, self.w, self.h
def get_extents_after_hug(self):
# 173
# 506
# 284
if self.anchor == 1:
return self.x + self.dx, self.y + self.dy, self.w - self.dx, self.h - self.dy
elif self.anchor == 2:
return self.x + self.dx, self.y, self.w - self.dx, self.h + self.dy
elif self.anchor == 3:
return self.x, self.y + self.dy, self.w + self.dx, self.h - self.dy
elif self.anchor == 4:
return self.x, self.y, self.w + self.dx, self.h + self.dy
elif self.anchor == 5:
return self.x + self.dx, self.y, self.w - self.dx, self.h
elif self.anchor == 6:
return self.x, self.y, self.w + self.dx, self.h
elif self.anchor == 7:
return self.x, self.y + self.dy, self.w, self.h - self.dy
elif self.anchor == 8:
return self.x, self.y, self.w, self.h + self.dy
else:
return self.x + self.dx, self.y + self.dy, self.w, self.h
def inbox(self, x, y, bx, by, bw=10, bh=10):
x2 = bx + bw
y2 = by + bh
return ( x > bx ) and ( y > by ) and ( x < x2 ) and ( y < y2 )
def hit(self, x, y):
return self.inbox(x, y, self.x, self.y, self.w, self.h)
def collide(self, ax, ay, ax2, ay2):
if ax < ax2:
x = ax
x2 = ax2
else:
x = ax2
x2 = ax
if ay < ay2:
y = ay
y2 = ay2
else:
y = ay2
y2 = ay
ret = ( ( ( self.x <= x ) and ( (self.x+self.w) >= x ) ) or ( ( self.x >= x ) and ( self.x <= x2 ) ) )
ret = ret and ( ( ( self.y <= y ) and ( (self.y+self.h) >= y ) ) or ( ( self.y >= y ) and ( self.y <= y2 ) ) )
return ret
def activate(self):
if self.status < 1:
self.dirty = True
self.status = 1
def deactivate(self):
if self.status == 1:
self.dirty = True
self.status = 0
def select(self):
if self.status != 2:
self.dirty = True
self.status = 2
def deselect(self):
if self.status != 0:
self.dirty = True
self.status = 0
def onpress(self, x, y):
self.moving = True
self.offsetx = x - self.x
self.offsety = y - self.y
# 173
# 506
# 284
if ( self.offsetx <= 10 ) and ( self.offsety <= 10 ):
self.anchor = 1
elif ( self.offsetx <= 10 ) and ( self.offsety >= ( self.h - 10 ) ):
self.anchor = 2
elif ( self.offsety <= 10 ) and ( self.offsetx >= ( self.w - 10 ) ):
self.anchor = 3
elif ( self.offsetx >= ( self.w - 10 ) ) and ( self.offsety >= ( self.h - 10 ) ):
self.anchor = 4
elif self.inbox( self.offsetx, self.offsety, 0, (self.h/2)-5 ):
self.anchor = 5
elif self.inbox( self.offsetx, self.offsety, self.w-10, (self.h/2)-5 ):
self.anchor = 6
elif self.inbox( self.offsetx, self.offsety, (self.w/2)-5, 0 ):
self.anchor = 7
elif self.inbox( self.offsetx, self.offsety, (self.w/2)-5, self.h-10 ):
self.anchor = 8
else:
self.anchor = 0
def onrelease(self):
self.moving = False
if self.anchor == 1:
self.x = self.x + self.dx
self.y = self.y + self.dy
self.w = self.w - self.dx
self.h = self.h - self.dy
elif self.anchor == 2:
self.x = self.x + self.dx
self.w = self.w - self.dx
self.h = self.h + self.dy
elif self.anchor == 3:
self.y = self.y + self.dy
self.w = self.w + self.dx
self.h = self.h - self.dy
elif self.anchor == 4:
self.w = self.w + self.dx
self.h = self.h + self.dy
elif self.anchor == 5:
self.x = self.x + self.dx
self.w = self.w - self.dx
elif self.anchor == 6:
self.w = self.w + self.dx
elif self.anchor == 7:
self.y = self.y + self.dy
self.h = self.h - self.dy
elif self.anchor == 8:
self.h = self.h + self.dy
else:
self.x = self.x + self.dx
self.y = self.y + self.dy
self.dx = 0
self.dy = 0
def onmotion(self, x, y):
if self.moving:
oldx = self.x
oldy = self.y
oldw = self.w
oldh = self.h
oldoffx = self.offsetx
oldoffy = self.offsety
# 173
# 506
# 284
if self.anchor == 1:
self.w = self.w + self.x
self.x = x - self.offsetx
self.w = self.w - self.x
self.h = self.h + self.y
self.y = y - self.offsety
self.h = self.h - self.y
elif self.anchor == 2:
self.w = self.w + self.x
self.x = x - self.offsetx
self.w = self.w - self.x
self.h = self.h - self.offsety
self.offsety = y - self.y
self.h = self.h + self.offsety
elif self.anchor == 3:
self.h = self.h + self.y
self.y = y - self.offsety
self.h = self.h - self.y
self.w = self.w - self.offsetx
self.offsetx = x - self.x
self.w = self.w + self.offsetx
elif self.anchor == 4:
self.w = self.w - self.offsetx
self.offsetx = x - self.x
self.w = self.w + self.offsetx
self.h = self.h - self.offsety
self.offsety = y - self.y
self.h = self.h + self.offsety
elif self.anchor == 5:
self.w = self.w + self.x
self.x = x - self.offsetx
self.w = self.w - self.x
elif self.anchor == 6:
self.w = self.w - self.offsetx
self.offsetx = x - self.x
self.w = self.w + self.offsetx
elif self.anchor == 7:
self.h = self.h + self.y
self.y = y - self.offsety
self.h = self.h - self.y
elif self.anchor == 8:
self.h = self.h - self.offsety
self.offsety = y - self.y
self.h = self.h + self.offsety
else:
self.x = x - self.offsetx
self.y = y - self.offsety
if self.w < 10:
self.w = 10
if self.x != oldx:
self.x = oldx + oldw - 10
if self.offsetx != oldoffx:
self.offsetx = oldoffx - oldw + 10
if self.h < 10:
self.h = 10
if self.y != oldy:
self.y = oldy + oldh - 10
if self.offsety != oldoffy:
self.offsety = oldoffy - oldh + 10
self.dirty = True
elif self.hit(x, y):
if self.status == 0:
self.dirty = True
self.status = 1
else:
if self.status == 1:
self.dirty = True
self.status = 0
def onleave(self):
if self.status == 1:
self.dirty = True
self.status = 0
def draw(self, crctx):
a = 1
if self.moving:
a = 0.7
# hug
if ( self.dx != 0 ) or ( self.dy != 0 ):
tmp = cairo.ImageSurface(cairo.FORMAT_A8, 16, 16)
cr2 = cairo.Context(tmp)
cr2.set_source_rgba(0, 0, 0, 1)
cr2.set_line_width(8)
cr2.move_to(0, 0)
cr2.line_to(16, 16)
cr2.stroke()
cr2.move_to(12, -4)
cr2.line_to(20, 4)
cr2.stroke()
cr2.move_to(-4, 12)
cr2.line_to(4, 20)
cr2.stroke()
pat = cairo.SurfacePattern(tmp)
pat.set_extend(cairo.EXTEND_REPEAT)
crctx.set_source(pat)
x, y, w, h = self.get_extents_after_hug()
crctx.rectangle(x + 2, y + 2, w - 4, h - 4)
crctx.set_line_width(4)
crctx.stroke()
crctx.set_source_rgba(0.7, 0.7, 0.7, a)
crctx.rectangle(self.x, self.y, self.w, self.h)
crctx.fill()
crctx.set_line_width(2)
if self.status > 0:
if self.status == 1:
crctx.set_source_rgba(0.5, 0.5, 0.5, a)
elif self.status == 2:
crctx.set_source_rgba(0.1, 0, 0.5, a)
crctx.rectangle(self.x, self.y, self.w, self.h)
crctx.stroke()
# corner anchors
crctx.rectangle(self.x, self.y, 10, 10)
crctx.fill()
cr
|
ctx.rectangle(self.x+self.w-10, self.y, 10, 10)
crctx.fill()
crctx.rectangle(self.x, self.y+self.h-10, 10, 10)
crctx.fill()
crctx.rectangle(self.x+self.w-10, self.y+self.h-10, 10, 10)
crctx.fill()
#edge anchors
crctx.rectangle(self.x, self.y+(self.h/2)-5, 10, 10)
crctx.fill()
crctx.rectangle(self.x+self.w-10, self.y+(self.h/2)-5, 10, 10)
crctx.fill()
crctx.rectangle(self.x+(sel
|
f.w/2)-5, self.y, 10, 10)
crctx.fill()
crctx.rectangle(self.x+(self.w/2)-5, self.y+self.h-10, 10, 10)
crctx.fill()
else:
crctx.set_source_rgba(0, 0, 0, 1)
crctx.rectangle(self.x, self.y, self.w, self.h)
crctx.stroke()
xbearing, ybearing, width, height, xadvance, yadvance = crctx.text_extents ( self.name )
crctx.move_to( self.x + ( self.w / 2 ) + 0.5 - xbearing - ( width / 2 ), self.y + (self.h / 2 ) +
|
rhdedgar/openshift-tools
|
openshift_tools/monitoring/ocutil.py
|
Python
|
apache-2.0
| 4,378
| 0.001827
|
#!/usr/bin/env python
# vim: expandtab:tabstop=4:shiftwidth=4
"""
Interface to OpenShift oc command
"""
#
# Copyright 2015 Red Hat Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import shlex
import atexit
import shutil
import string
import random
import yaml
import subprocess
# pylint: disable=bare-except
def cleanup_file(inc_file):
""" clean up """
try:
os.unlink(inc_file)
except:
pass
class OCUtil(object):
""" Wrapper for interfacing with OpenShift 'oc' utility """
def __init__(self, namespace='default', config_file='/tmp/admin.kubeconfig', verbose=False, logger=None):
"""
Take initial values for running 'oc'
Ensure to set non-default namespace if that is what is desired
"""
self.namespace = namespace
self.config_file = config_file
self.verbose = verbose
self.copy_kubeconfig()
self.logger = logger
def copy_kubeconfig(self):
""" make a copy of the kubeconfig """
file_name = os.path.join(
'/tmp',
''.jo
|
in(random.choice(string.ascii_uppercase + string.digits) fo
|
r _ in range(7))
)
shutil.copy(self.config_file, file_name)
atexit.register(cleanup_file, file_name)
self.config_file = file_name
def _run_cmd(self, cmd, base_cmd='oc', ):
""" Actually execute the command """
cmd = " ".join([base_cmd, '--config', self.config_file, '-n', self.namespace, cmd])
if self.logger:
self.logger.debug("ocutil._run_cmd( {} )".format(cmd))
cmd = shlex.split(cmd)
if self.verbose:
print "Running command: {}".format(str(cmd))
try:
return subprocess.check_output(cmd)
except subprocess.CalledProcessError as err:
if self.logger:
self.logger.exception('Error from server: %s' % err.output)
raise err
def _run_cmd_yaml(self, cmd, base_cmd='oc', yaml_cmd='-o yaml'):
""" Actually execute the command and expects yaml """
return yaml.safe_load(self._run_cmd(" ".join([cmd, yaml_cmd]), base_cmd=base_cmd))
def run_user_cmd(self, cmd, base_cmd='oc'):
""" Runs a custom user command """
return self._run_cmd(cmd, base_cmd=base_cmd)
def run_user_cmd_yaml(self, cmd, base_cmd='oc', yaml_cmd='-o yaml'):
"""Runs a custom user command and expects yaml"""
return self._run_cmd_yaml(cmd, base_cmd=base_cmd, yaml_cmd=yaml_cmd)
def get_secrets(self, name):
""" Get secrets from object 'name' """
return self._run_cmd_yaml("get secrets {}".format(name))
def get_endpoint(self, name):
""" Get endpoint details """
return self._run_cmd_yaml("get endpoints {}".format(name))
def get_service(self, name):
""" Get service details """
return self._run_cmd_yaml("get service {}".format(name))
def get_rc(self, name):
""" Get replication controller details """
return self._run_cmd_yaml("get rc {}".format(name))
def get_dc(self, name):
""" Get deployment config details """
return self._run_cmd_yaml("get dc {}".format(name))
def get_route(self, name):
""" Get routes details """
return self._run_cmd_yaml("get route {}".format(name))
def get_pods(self):
""" Get all the pods in the namespace """
return self._run_cmd_yaml("get pods")
def get_projects(self):
""" Get all projects in the cluster """
return self._run_cmd_yaml("get projects")
def get_nodes(self):
""" Get all the nodes in the cluster """
return self._run_cmd_yaml("get nodes")
def get_log(self, name):
""" Gets the log for the specified container """
return self._run_cmd("logs {}".format(name))
|
woodymit/millstone
|
genome_designer/pipeline/variant_calling/freebayes.py
|
Python
|
mit
| 10,343
| 0.002127
|
"""Wrapper for running Freebayes.
"""
import collections
import errno
import fileinput
import glob
import tempfile
import os
import shutil
import subprocess
import vcf
from celery import task
from django.conf import settings
from main.models import Dataset
from main.model_utils import get_dataset_with_type
from pipeline.read_alignment_util import ensure_bwa_index
from pipeline.variant_calling.common import add_vcf_dataset
from pipeline.variant_calling.common import process_vcf_dataset
from pipeline.variant_calling.common import get_common_tool_params
from pipeline.variant_calling.constants import TOOL_FREEBAYES
from pipeline.variant_effects import run_snpeff
from utils import uppercase_underscore
VCF_AF_HEADER = '##FORMAT=<ID=AF,Number=1,Type=Float,Description="Alternate allele observation frequency, AO/(RO+AO)">'
def freebayes_regions(ref_genome,
region_size=settings.FREEBAYES_REGION_SIZE):
"""
Use bamtools (installed as part of freebayes) to intelligently
generate regions that will be run in freebayes in parallel.
ref_genome: the reference genome object
region_size: how many bases each parallelized region 'chunk' will be
"""
ref_genome_fasta = get_dataset_with_type(ref_genome,
Dataset.TYPE.REFERENCE_GENOME_FASTA).get_absolute_location()
# ensure that the fasta file has an index
ensure_bwa_index(ref_genome_fasta)
ref_genome_faidx = ref_genome_fasta + '.fai'
regions = []
with open(ref_genome_faidx) as faidx_fh:
for line in faidx_fh:
fields = line.strip().split('\t')
chr_name, chr_len = fields[:2]
chr_len = int(chr_len)
end = 0
while end < chr_len:
start = end
end = start + region_size
if end > chr_len:
end = chr_len
regions.append('{chr_name}:{start}-{end}'.format(
chr_name=chr_name,
start=start,
end=end))
start = end
return regions
def run_freebayes(fasta_ref, sample_alignments, vcf_output_dir,
vcf_output_filename, alignment_type, region=None, **kwargs):
"""Run freebayes using the bam alignment files keyed by the alignment_type
for all Genomes of the passed in ReferenceGenome.
NOTE: If a Genome doesn't have a bam alignment file with this
alignment_type, then it won't be used.
Returns:
Boolean, True if successfully made it to the end, else False.
"""
print 'RUNNING FREEBAYES...'
bam_files = [
get_dataset_with_type(sa, alignment_type).get_absolute_location()
for sa in sample_alignments]
# Build up the bam part of the freebayes binary call.
bam_part = []
for bam_file in bam_files:
bam_part.append('--bam')
bam_part.append(bam_file)
# Determine alignment ploidy (haploid or diploid).
alignment_group = sample_alignments[0].alignment_group
if alignment_group.alignment_options['call_as_haploid']:
alignment_ploidy = 1
else:
alignment_ploidy = 2
other_args_part = [
'--fasta-reference', fasta_ref,
'--pvar', '0.001',
'--ploidy', str(alignment_ploidy),
'--min-alternate-fraction', '.3',
'--no-population-priors',
# '--binomial-obs-priors-off',
'--use-mapping-quality',
'--min-base-quality', '25',
'--min-mapping-quality', '30'
]
if region:
other_args_part.extend(['--region', region])
# Build the full command and execute it for all bam files at once.
full_command = (
['%s/freebayes/freebayes' % settings.TOOLS_DIR] +
bam_part +
other_args_part)
print ' '.join(full_command)
# Run Freebayes.
with open(vcf_output_filename + '.error', 'w') as error_output_fh:
with open(vcf_output_filename, 'w') as fh:
subprocess.check_call(
full_command, stdout=fh, stderr=error_output_fh)
# add the allele frequency FORMAT field to the vcf.
process_freebayes_region_vcf(vcf_output_filename)
return True # success
def process_freebayes_region_vcf(vcf_output_filename):
"""
Processes vcf before region merging.
IF AO and RO are available for an allele, also add alt allele
percentages (AF), as percentage of total depth can be a good way to filter
het/hom calls.
"""
# store the modified VCF in this temporary file, then move it to overwrite
# the original file when done adding this field.
temp_fh = tempfile.NamedTemporaryFile(delete=False)
with open(vcf_output_filename, 'r') as vcf_input_fh:
vcf_reader = vcf.Reader(vcf_input_fh)
# Generate extra header row for AF = AO/(RO+AO).
vcf_reader._header_lines.append(VCF_AF_HEADER)
key, val = vcf.parser._vcf_metadata_parser().read_format(VCF_AF_HEADER)
vcf_reader.formats[key] = val
# A list of all the FORMAT genotype keys, in order
format_keys = vcf_reader.formats.keys()
vcf_writer = vcf.Writer(temp_fh, vcf_reader)
# Write the old records with the new AF FORMAT field
for record in vcf_reader:
# This simply appends ':AF' to the record format field
record.add_format('AF')
# check if there are multiple alternate alleles
multi_alts = len(record.ALT) > 1
for sample in record.samples:
# Get alt allele frequencies for each alternate allele.
try:
# TODO: Right now, summing multiple alternate alleles because
# we turn arrays into strings in the UI.
if multi_alts:
to
|
tal_obs = float(sum(sample['AO']) + sample['RO'])
if total_obs > 0:
af = sum([float(ao) / total_obs for ao in sample['AO']])
# if a single alternate allele:
else:
total_obs = float(sample['AO'] + sample['RO'])
if total_obs > 0:
|
af = float(sample['AO']) / total_obs
except:
af = 0.0
# new namedtuple with the additional format field
CallData = collections.namedtuple(
'CallData',
sample.data._fields+('AF',))
sample.data = CallData(*sample.data, AF=af)
vcf_writer.write_record(record)
# close the writer and move the temp file over the original to replace it
vcf_writer.close()
shutil.move(temp_fh.name, vcf_output_filename)
print 'moved from {} to {}'.format(temp_fh.name, vcf_output_filename)
def merge_freebayes_parallel(alignment_group):
"""
Merge, sort, and make unique all regional freebayes variant calls after
parallel execution.
Returns the Dataset pointing to the merged vcf file. If no freebayes files,
returns None.
"""
# First, grab all freebayes parallel vcf files.
common_params = get_common_tool_params(alignment_group)
partial_freebayes_vcf_output_dir = os.path.join(
common_params['output_dir'], 'freebayes')
# Glob all the parial (region-specific) vcf files.
# Assert that there is at least one.
vcf_output_filename_prefix = os.path.join(partial_freebayes_vcf_output_dir,
uppercase_underscore(common_params['alignment_type']) +
'.partial.*.vcf')
vcf_files = glob.glob(vcf_output_filename_prefix)
if not len(vcf_files):
return None
# Generate output filename.
vcf_ouput_filename_merged = os.path.join(partial_freebayes_vcf_output_dir,
uppercase_underscore(common_params['alignment_type']) + '.vcf')
vcf_ouput_filename_merged_fh = open(vcf_ouput_filename_merged, 'w')
streamsort_cmd = ' '.join([
settings.VCFSTREAMSORT_BINARY,
'-w 1000 | ',
settings.VCFUNIQ_BINARY])
# create a pipe to write to that will sort all the sub-vcfs
stream_merge_proc = subprocess.Popen(strea
|
nyarasha/firemix
|
plugins/radial_wipe.py
|
Python
|
gpl-3.0
| 1,645
| 0.001824
|
# This file is part of Firemix.
#
# Copyright 2013-2016 Jonathan Evans <jon@craftyjon.com>
#
# Firemix is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation,
|
either version 3 of the License, or
# (at your option) any later version.
#
# Firemix is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Firemix. If not, see <http://www.gnu.org/licenses/>.
import numpy as np
import mat
|
h
from lib.transition import Transition
from lib.buffer_utils import BufferUtils
class RadialWipe(Transition):
"""
Implements a radial wipe (Iris) transition
"""
def __init__(self, app):
Transition.__init__(self, app)
def __str__(self):
return "Radial Wipe"
def reset(self):
locations = self._app.scene.get_all_pixel_locations()
locations -= self._app.scene.center_point()
#locations -= locations[np.random.randint(0, len(locations) - 1)]
locations = np.square(locations)
self.distances = locations.T[0] + locations.T[1]
self.distances /= max(self.distances)
def get(self, start, end, progress):
buffer = np.where(self.distances < progress, end.T, start.T)
buffer[1][np.abs(self.distances - progress) < 0.02] += 0.5 # we can apply effects to transition line here
return buffer.T
|
kuenishi/vHut
|
src/admin_server/vhut/agent/vhut/vhutac.py
|
Python
|
gpl-2.0
| 12,562
| 0.006064
|
# -*- coding: utf-8 -*-
'''
Copyright 2011 NTT Software Corporation.
All Rights Reserved.
@author NTT Software Corporation.
@version 1.0.0
$Date: 2010-08-31 09:54:14 +0900 (火, 31 8 2010) $
$Revision: 435 $
$Author: NTT Software Corporation. $
'''
import os
import sys
from optparse import OptionParser
from nw import NwHandler
import csv
import logging.config
#キー
KEY_ENV_VHUT_HOME = 'VHUT_HOME'
#設定ファイル
PATH_CONFIG = os.environ[KEY_ENV_VHUT_HOME]+"/agent/conf/vhuta.conf"
#NW設定DBファイル
PATH_DATA_NW = os.environ[KEY_ENV_VHUT_HOME]+"/agent/data/nw.db"
def main():
usage = "%prog --action ACTION [parameter options]"
psr = OptionParser(usage=usage)
psr.add_option('--role', action='store', type='string', default=False, dest='role', help="server's role (manager/node)")
psr.add_option('--public_if', action='store', type='string', default=False, dest='public_if', help="server's public network interface")
psr.add_option('--private_if', action='store', type='string', default=False, dest='private_if', help="server's private network interface")
psr.add_option('--private_network', action='store', type='string', default=False, dest='private_network', help="server's own network address")
psr.add_option('--private_netmask', action='store', type='string', default=False, dest='private_netmask', help="server's own network netmask")
psr.add_option('--log', action='store', type='string', default=False, dest='log', help='logfile path')
psr.add_option('--loglevel', action='store', type='string', default=False, dest='loglevel', help='loglevel (DEBUG/INFO/WARING/ERROR/CRITICAL)')
# psr.add_option('--action', action='store', type='string', dest='action', help='network management action (wipe/revive/show/add_network/del_network/add_ip/del_ip/add_nat/del_nat/set_filter/add_instance_bridge/del_instance_bridge/init_network)')
psr.add_option('--action', action='store', type='string', dest='action', help='network management action (init/clear/show/add_network/del_network/add_ip/del_ip/add_nat/del_nat/set_filter/import)')
psr.add_option('--vlan' , action='store', type='int', dest='vlan', help='VLAN ID')
psr.add_option('--network', action='store', type='string', dest='network', help='network address for VLAN')
psr.add_option('--netmask', action='store', type='string', dest='netmask', help='netmask for VLAN')
psr.add_option('--gateway', action='store', type='string', dest='gateway', help='gateway address for VLAN')
psr.add_option('--broadcast', action='store', type='string', dest='broadcast', help='broadcat address for VLAN')
psr.add_option('--nameserver', action='store', type='string', dest='nameserver', help='nameserver address for VLAN')
psr.add_option('--dhcp', action='store', type='string', dest='dhcp', help='dhcp address for VLAN')
psr.add_option('--username', action='store', type='string', dest='username', help='user name of VLAN')
psr.add_option('--ip' , action='store', type='string', dest='ip', help="instance's IP address")
psr.add_option
|
('--mac', action='store', type='string', dest='mac', help="instance's MAC address")
psr.add_option('--publicip', action='store', type='string', des
|
t='publicip', help='public IP address binding by NAT')
psr.add_option('--privateip', action='store', type='string', dest='privateip', help='private IP address binding by NAT')
psr.add_option('--bridge', action='store', type='string', dest='bridge', help='instance bridge prefix name')
psr.add_option('--filtertype', action='store', type='string', dest='filtertype', help='netfilter filter action type (open/close)')
psr.add_option('--destname', action='store', type='string', dest='destname', help='netfilter filter destination user name')
psr.add_option('--sourcename', action='store', type='string',default=False, dest='sourcename', help='netfilter filter source user name')
psr.add_option('--sourcenet', action='store', type='string', default=False, dest='sourcenet', help='netfilter filter source network')
psr.add_option('--protocol', action='store', type='string', default=False, dest='protocol', help='netfilter filter protocol name')
psr.add_option('--minport', action='store', type='string', default=False, dest='minport', help='netfilter filter port range min')
psr.add_option('--maxport', action='store', type='string', default=False, dest='maxport', help='netfilter filter port range max')
psr.add_option('--csv', action='store', type='string', default=False, dest='csv', help='import csv file path')
psr.add_option('--nodump', action="store_true", dest="nodump", default=False, help='do not write db flag')
(opts, args) = psr.parse_args(sys.argv)
nwa = NwHandler(PATH_CONFIG, PATH_DATA_NW)
if opts.action:
if opts.action == 'import':
if opts.csv:
reader = csv.DictReader(file(opts.csv, "rb"))
for network in reader:
if nwa.add_network(network["vlan"], network["address"], network["mask"], network["broadcast"], network["gateway"], network["dns"], network["dhcp"], network["name"], get_nodump(opts)):
print "%s is added." % network["name"]
else:
print "%s is faild!" % network["name"]
exit(1)
print "init network: done."
else:
print "We need those options: --csv."
elif opts.action == 'init':
if nwa.init(False, get_nodump(opts)):
print "init: done."
else:
print "init: failed!"
elif opts.action == 'clear':
if nwa.init(True, get_nodump(opts)):
print "clear: done."
else:
print "clear: failed!"
# elif opts.action == 'revive':
# if nwa.revive():
# print "revive: done."
# else:
# print "revive: failed!"
# exit(1)
elif opts.action == 'show':
config_print(nwa.get_config())
elif opts.action == 'add_network':
if opts.vlan and opts.network and opts.netmask and opts.broadcast and opts.gateway and opts.nameserver and opts.username:
if nwa.add_network(opts.vlan, opts.network, opts.netmask, opts.broadcast, opts.gateway, opts.nameserver, opts.username, get_nodump(opts)):
print "add network: done."
else:
print "add network: failed!"
else:
print "We need those options: --vlan, --network, --netmask,--broadcast, --gateway, --nameserver, --dhcp, --username."
exit(1)
elif opts.action == 'del_network':
if opts.vlan:
if nwa.del_network(opts.vlan, get_nodump(opts)):
print "del network: done."
else:
print "del network: failed!"
else:
print "We need those options: --vlan."
exit(1)
elif opts.action == 'add_ip':
if opts.ip and opts.mac:
if nwa.add_ip(opts.ip, opts.mac, get_nodump(opts)):
print "add ip: done."
else:
print "add ip: failed!"
else:
print "We need those options: --ip, --mac."
exit(1)
elif opts.action == 'del_ip':
if opts.ip and opts.mac:
if nwa.del_ip(opts.ip, opts.mac, get_nodump(opts)):
print "del ip: done."
else:
print "del ip: failed!"
else:
print "We need those options: --ip, --mac."
elif opts.action == 'add_nat':
if opts.publicip and opts.privateip:
if nwa.add_nat(opts.privateip, opts.publicip, get_nodump(opts)):
print "add nat: done."
else:
print "add nat: failed!"
else:
p
|
pburdet/hyperspy
|
hyperspy/_signals/eels.py
|
Python
|
gpl-3.0
| 48,505
| 0.000309
|
# -*- coding: utf-8 -*-
# Copyright 2007-2011 The HyperSpy developers
#
# This file is part of HyperSpy.
#
# HyperSpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# HyperSpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with HyperSpy. If not, see <http://www.gnu.org/licenses/>.
import numbers
import warnings
import numpy as np
import traits.api as t
from scipy import constants
from hyperspy._signals.spectrum import Spectrum
from hyperspy.misc.elements import elements as elements_db
import hyperspy.axes
from hyperspy.decorators import only_interactive
from hyperspy.gui.eels import TEMParametersUI
from hyperspy.defaults_parser import preferences
import hyperspy.gui.messages as messagesui
from hyperspy.misc.progressbar import progressbar
from hyperspy.components import PowerLaw
from hyperspy.misc.utils import isiterable, closest_power_of_two, underline
from hyperspy.misc.utils import without_nans
class EELSSpectrum(Spectrum):
_signal_type = "EELS"
def __init__(self, *args, **kwards):
Spectrum.__init__(self, *args, **kwards)
# Attributes defaults
self.subshells = set()
self.elements = set()
self.edges = list()
if hasattr(self.metadata, 'Sample') and \
hasattr(self.metadata.Sample, 'elements'):
print('Elemental composition read from file')
self.add_elements(self.metadata.Sample.elements)
self.metadata.Signal.binned = True
def add_elements(self, elements, include_pre_edges=False):
"""Declare the elemental composition of the sample.
The ionisation edges of the elements present in the current
energy range will be added automatically.
Parameters
----------
elements : tuple of strings
The symbol of the elements. Note this input must always be
in the form of a tuple. Meaning: add_elements(('C',)) will
work, while add_elements(('C')) will NOT work.
include_pre_edges : bool
If True, the ionization edges with an onset below the lower
energy limit of the SI will be incluided
Examples
--------
>>> s = signals.EELSSpectrum(np.arange(1024))
>>> s.add_elements(('C', 'O'))
Adding C_K subshell
Adding O_K subshell
Raises
------
ValueError
"""
if not isiterable(elements) or isinstance(elements, basestring):
raise ValueError(
"Input must be in the form of a tuple. For example, "
"if `s` is the variable containing this EELS spectrum:\n "
">>> s.add_elements(('C',))\n"
"See the docstring for more information.")
for element in elements:
if element in elements_db:
self.elements.add(element)
else:
raise ValueError(
"%s is not a valid symbol of a chemical element"
% element)
if not hasattr(self.metadata, 'Sample'):
self.metadata.add_node('Sample')
self.metadata.Sample.elements = list(self.elements)
if self.elements:
self.generate_subshells(include_pre_edges)
def generate_subshells(self, include_pre_edges=False):
"""Calculate the subshells for the current energy range for the
elements present in self.elements
Parameters
----------
include_pre_edges : bool
If True, the ionization edges with an onset below the lower
energy limit of the SI will be incluided
"""
Eaxis = self.axes_manager.signal_axes[0].axis
if not include_pre_edges:
start_energy = Eaxis[0]
else:
start_energy = 0.
end_energy = Eaxis[-1]
for element in self.elements:
e_shells = list()
for shell in elements_db[element]['Atomic_properties']['Binding_energies']:
if shell[-1] != 'a':
if start_energy <= \
elements_db[element]['Atomic_properties']['Binding_energies'][shell][
'onset_energy (eV)'] \
<= end_energy:
subshell = '%s_%s' % (element, shell)
if subshell not in self.subshells:
print "Adding %s subshell" % (subshell)
self.subshells.add(
'%s_%s' % (element, shell))
e_shells.append(subshell)
def estimate_zero_loss_peak_centre(self, mask=None):
"""Estimate the posision of the zero-loss peak.
This function provides just a coarse estimation of the position
of the zero-loss peak centre by computing the position of the maximum
of the spectra. For subpixel accuracy use `estimate_shift1D`.
Parameters
----------
mask : Signal of bool data type.
It must have signal_dimension = 0 and navigation_shape equal to the
current signal. Where mask is True the
|
shift is not computed
and set to nan.
Returns
-------
zlpc : Signal subclass
The estimated position of the maximum of the ZLP peak.
Notes
-----
This function only works when the zero-loss peak is the most
intense feature in the spectrum. If it is not in most cases
the spectrum can be cropped to meet this criterium.
Alternat
|
ively use `estimate_shift1D`.
See Also
--------
estimate_shift1D, align_zero_loss_peak
"""
self._check_signal_dimension_equals_one()
self._check_navigation_mask(mask)
zlpc = self.valuemax(-1)
if self.axes_manager.navigation_dimension == 1:
zlpc = zlpc.as_spectrum(0)
elif self.axes_manager.navigation_dimension > 1:
zlpc = zlpc.as_image((0, 1))
if mask is not None:
zlpc.data[mask.data] = np.nan
return zlpc
def align_zero_loss_peak(
self,
calibrate=True,
also_align=[],
print_stats=True,
subpixel=True,
mask=None,
**kwargs):
"""Align the zero-loss peak.
This function first aligns the spectra using the result of
`estimate_zero_loss_peak_centre` and afterward, if subpixel is True,
proceeds to align with subpixel accuracy using `align1D`. The offset
is automatically correct if `calibrate` is True.
Parameters
----------
calibrate : bool
If True, set the offset of the spectral axis so that the
zero-loss peak is at position zero.
also_align : list of signals
A list containing other spectra of identical dimensions to
align using the shifts applied to the current spectrum.
If `calibrate` is True, the calibration is also applied to
the spectra in the list.
print_stats : bool
If True, print summary statistics the ZLP maximum before
the aligment.
subpixel : bool
If True, perform the alignment with subpixel accuracy
using cross-correlation.
mask : Signal of bool data type.
It must have signal_dimension = 0 and navigation_shape equal to the
current signal. Where mask is True the shift is not computed
and set to nan.
See Also
--------
estimate_zero_loss_peak_centre, align1D, estimate_shift1D.
Notes
-----
Any extra keyword arguments ar
|
nadrees/PyRosalind
|
Python Village/INI4.py
|
Python
|
unlicense
| 243
| 0.004115
|
__author__ = 'Nathen'
#
|
get min and max bounds
|
a, b = map(lambda i: int(i), input('Nums: ').split(' '))
# create generator of all odd nums
nums = [x for x in range(a, b + 1) if x % 2 == 1]
# sum nums
ans = sum(nums)
# print answer
print(ans)
|
rohanraja/cgt_distributed
|
examples/char.py
|
Python
|
mit
| 12,474
| 0.010742
|
"""
A nearly direct translation of Andrej's code
https://github.com/karpathy/char-rnn
"""
from __future__ import division
import cgt
from cgt import nn, utils, profiler
import numpy as np, numpy.random as nr
import os.path as osp
import argparse
from time import time
from StringIO import StringIO
from param_collection import ParamCollection
# via https://github.com/karpathy/char-rnn/blob/master/model/GRU.lua
# via http://arxiv.org/pdf/1412.3555v1.pdf
def make_deep_gru(size_input, size_mem, n_layers, size_output, size_batch):
inputs = [cgt.matrix() for i_layer in xrange(n_layers+1)]
outputs = []
for i_layer in xrange(n_layers):
prev_h = inputs[i_layer+1] # note that inputs[0] is the external input, so we add 1
x = inputs[0] if i_layer==0 else outputs[i_layer-1]
size_x = size_input if i_layer==0 else size_mem
update_gate = cgt.sigmoid(
nn.Affine(size_x, size_mem,name="i2u")(x)
+ nn.Affine(size_mem, size_mem, name="h2u")(prev_h))
reset_gate = cgt.sigmoid(
nn.Affine(size_x, size_mem,name="i2r")(x)
+ nn.Affine(size_mem, size_mem, name="h2r")(prev_h))
gated_hidden = reset_gate * prev_h
p2 = nn.Affine(size_mem, size_mem)(gated_hidden)
p1 = nn.Affine(size_x, size_mem)(x)
hidden_target = cgt.tanh(p1+p2)
next_h = (1.0-update_gate)*prev_h + update_gate*hidden_target
outputs.append(next_h)
category_activations = nn.Affine(size_mem, size_output,name="pred")(outputs[-1])
logprobs = nn.logsoftmax(category_activations)
outputs.append(logprobs)
return nn.Module(inputs, outputs)
def make_deep_lstm(size_input, size_mem, n_layers, size_output, size_batch):
inputs = [cgt.matrix(fixed_shape=(size_batch, size_input))]
for _ in xrange(2*n_layers):
inputs.append(cgt.matrix(fixed_shape=(size_batch, size_mem)))
outputs = []
for i_layer in xrange(n_layers):
prev_h = inputs[i_layer*2]
prev_c = inputs[i_layer*2+1]
if i_layer==0:
x = inputs[0]
size_x = size_input
else:
x = outputs[(i_layer-1)*2]
size_x = size_mem
input_sums = nn.Affine(size_x, 4*size_mem)(x) + nn.Affine(size_x, 4*size_mem)(prev_h)
sigmoid_chunk = cgt.sigmoid(input_sums[:,0:3*size_mem])
in_gate = sigmoid_chunk[:,0:size_mem]
forget_gate = sigmoid_chunk[:,size_mem:2*size_mem]
out_gate = sigmoid_chunk[:,2*size_mem:3*size_mem]
in_transform = cgt.tanh(input_sums[:,3*size_mem:4*size_mem])
next_c = forget_gate*prev_c + in_gate * in_transform
next_h = out_gate*cgt.tanh(next_c)
outputs.append(next_c)
outputs.append(next_h)
category_activations = nn.Affine(size_mem, size_output)(outputs[-1])
logprobs = nn.logsoftmax(category_activations)
outputs.append(logprobs)
return nn.Module(inputs, outputs)
def flatcat(xs):
return cgt.concatenate([x.flatten() for x in xs])
def cat_sample(ps):
"""
sample from categorical distribution
ps is a 2D array whose rows are vectors of probabilities
"""
r = nr.rand(len(ps))
out = np.zeros(len(ps),dtype='i4')
cumsums = np.cumsum(ps, axis=1)
for (irow,csrow) in enumerate(cumsums):
for (icol, csel) in enumerate(csrow):
if csel > r[irow]:
out[irow] = icol
break
return out
def rmsprop_update(grad, state):
state.sqgrad[:] *= state.decay_rate
state.count *= state.decay_rate
np.square(grad, out=state.scratch) # scratch=g^2
state.sqgrad += state.scratch
state.count += 1
np.sqrt(state.sqgrad, out=state.scratch) # scratch = sum of squares
np.divide(state.scratch, np.sqrt(state.count), out=state.scratch) # scratch = rms
np.divide(grad, state.scratch, out=state.scratch) # scratch = grad/rms
np.multiply(state.scratch, state.step_size, out=state.scratch)
state.theta[:] -= state.scratch
def make_loss_and_grad_and_step(arch, size_input, size_output, size_mem, size_batch, n_layers, n_unroll):
# symbolic variables
x_tnk = cgt.tensor3()
targ_tnk = cgt.tensor3()
make_network = make_deep_lstm if arch=="lstm" else make_deep_gru
network = make_network(size_input, size_mem, n_layers, size_output, size_batch)
init_hiddens = [cgt.matrix() for _ in xrange(get_num_hiddens(arch, n_layers))]
# TODO fixed sizes
cur_hiddens = init_hiddens
loss = 0
for t in xrange(n_unroll):
outputs = network([x_tnk[t]] + cur_hiddens)
cur_hiddens, prediction_logprobs = outputs[:-1], outputs[-1]
# loss = loss + nn.categorical_negloglik(prediction_probs, targ_tnk[t]).sum()
loss = loss - (prediction_logprobs*targ_tnk[t]).sum()
cur_hiddens = outputs[:-1]
final_hiddens = cur_hiddens
loss = loss / (n_unroll * size_batch)
params = network.get_parameters()
gradloss = cgt.grad(loss, params)
flatgrad = flatcat(gradloss)
with utils.Message("compiling loss+grad"):
f_loss_and_grad = cgt.function([x_tnk, targ_tnk] + init_hiddens, [loss, flatgrad] + final_hiddens)
f_loss = cgt.function([x_tnk, targ_tnk] + init_hiddens, loss)
assert len(init_hiddens) == len(final_hiddens)
x_nk = cgt.matrix('x')
outputs = network([x_nk] + init_hiddens)
f_step = cgt.function([x_nk]+init_hiddens, outputs)
# print "node count", cgt.count_nodes(flatgrad)
return network, f_loss, f_loss_and_grad, f_step
class Table(dict):
"dictionary-like object that exposes its keys as attributes"
def __init__(self, **kwargs):
dict.__init__(self, kwargs)
self.__dict__ = self
def make_rmsprop_state(theta, step_size, decay_rate):
return Table(theta=theta, sq
|
grad=np.zeros_like(theta)+1e-6, scratch=np.empty_like(theta),
step_size=step_size, decay_rate=decay_rate, count=0)
class Loader(object):
def __init__(self, data_dir, size_batch, n_unroll, split_fractions):
input_file = osp.join(data_dir,"input.txt")
preproc_file = osp.join(data_dir, "preproc.
|
npz")
run_preproc = not osp.exists(preproc_file) or osp.getmtime(input_file) > osp.getmtime(preproc_file)
if run_preproc:
text_to_tensor(input_file, preproc_file)
data_file = np.load(preproc_file)
self.char2ind = {char:ind for (ind,char) in enumerate(data_file["chars"])}
data = data_file["inds"]
data = data[:data.shape[0] - (data.shape[0] % size_batch)].reshape(size_batch, -1).T # inds_tn
n_batches = (data.shape[0]-1) // n_unroll
data = data[:n_batches*n_unroll+1] # now t-1 is divisble by batch size
self.n_unroll = n_unroll
self.data = data
self.n_train_batches = int(n_batches*split_fractions[0])
self.n_test_batches = int(n_batches*split_fractions[1])
self.n_val_batches = n_batches - self.n_train_batches - self.n_test_batches
print "%i train batches, %i test batches, %i val batches"%(self.n_train_batches, self.n_test_batches, self.n_val_batches)
@property
def size_vocab(self):
return len(self.char2ind)
def train_batches_iter(self):
for i in xrange(self.n_train_batches):
start = i*self.n_unroll
stop = (i+1)*self.n_unroll
yield ind2onehot(self.data[start:stop], self.size_vocab), ind2onehot(self.data[start+1:stop+1], self.size_vocab) # XXX
# XXX move elsewhere
def ind2onehot(inds, n_cls):
inds = np.asarray(inds)
out = np.zeros(inds.shape+(n_cls,),cgt.floatX)
out.flat[np.arange(inds.size)*n_cls + inds.ravel()] = 1
return out
def text_to_tensor(text_file, preproc_file):
with open(text_file,"r") as fh:
text = fh.read()
char2ind = {}
inds = []
for char in text:
ind = char2ind.get(char, -1)
if ind == -1:
ind = len(char2ind)
char2ind[char] = ind
inds.append(ind)
np.savez(preproc_file, inds = inds, chars = sorted(char2ind, key = lambda char : char2ind[char]))
def get_num_hiddens(arch, n_layers):
return {"lstm" : 2 * n_layers, "gru" : n_layers
|
nnic/home-assistant
|
homeassistant/components/verisure.py
|
Python
|
mit
| 5,074
| 0
|
"""
components.verisure
~~~~~~~~~~~~~~~~~~~
Provides support for verisure components.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/verisure/
"""
import logging
import time
from datetime import timedelta
from homeassistant import bootstrap
from homeassistant.const import (
ATTR_DISCOVERED, ATTR_SERVICE, CONF_PASSWORD, CONF_USERNAME,
EVENT_PLATFORM_DISCOVERED)
from homeassistant.helpers import validate_config
from homeassistant.loader import get_component
from homeassistant.util import Throttle
DOMAIN = "verisure"
DISCOVER_SENSORS = 'verisure.sensors'
DISCOVER_SWITCHES = 'verisure.switches'
DISCOVER_ALARMS = 'verisure.alarm_control_panel'
DISCOVER_LOCKS = 'verisure.lock'
DEPENDENCIES = ['alarm_control_panel']
REQUIREMENTS = ['vsure==0.5.1']
_LOGGER = logging.getLogger(__name__)
MY_PAGES = None
ALARM_STATUS = {}
SMARTPLUG_STATUS = {}
CLIMATE_STATUS = {}
LOCK_STATUS = {}
MOUSEDETECTION_STATUS = {}
VERISURE_LOGIN_ERROR = None
VERISURE_ERROR = None
SHOW_THERMOMETERS = True
SHOW_HYGROMETERS = True
SHOW_ALARM =
|
True
SHOW_SMARTPLUGS = True
SHOW_LOCKS = True
SHOW_MOUSEDETECTION = True
CODE_DIGITS = 4
# if wrong password was given don't try again
WRONG_PASSWORD_GIVEN = False
MIN_TIME_BETWEEN_REQUESTS = timedelta(seconds=1)
|
def setup(hass, config):
""" Setup the Verisure component. """
if not validate_config(config,
{DOMAIN: [CONF_USERNAME, CONF_PASSWORD]},
_LOGGER):
return False
from verisure import MyPages, LoginError, Error
global SHOW_THERMOMETERS, SHOW_HYGROMETERS,\
SHOW_ALARM, SHOW_SMARTPLUGS, SHOW_LOCKS, SHOW_MOUSEDETECTION,\
CODE_DIGITS
SHOW_THERMOMETERS = int(config[DOMAIN].get('thermometers', '1'))
SHOW_HYGROMETERS = int(config[DOMAIN].get('hygrometers', '1'))
SHOW_ALARM = int(config[DOMAIN].get('alarm', '1'))
SHOW_SMARTPLUGS = int(config[DOMAIN].get('smartplugs', '1'))
SHOW_LOCKS = int(config[DOMAIN].get('locks', '1'))
SHOW_MOUSEDETECTION = int(config[DOMAIN].get('mouse', '1'))
CODE_DIGITS = int(config[DOMAIN].get('code_digits', '4'))
global MY_PAGES
MY_PAGES = MyPages(
config[DOMAIN][CONF_USERNAME],
config[DOMAIN][CONF_PASSWORD])
global VERISURE_LOGIN_ERROR, VERISURE_ERROR
VERISURE_LOGIN_ERROR = LoginError
VERISURE_ERROR = Error
try:
MY_PAGES.login()
except (ConnectionError, Error) as ex:
_LOGGER.error('Could not log in to verisure mypages, %s', ex)
return False
update_alarm()
update_climate()
update_smartplug()
update_lock()
update_mousedetection()
# Load components for the devices in the ISY controller that we support
for comp_name, discovery in ((('sensor', DISCOVER_SENSORS),
('switch', DISCOVER_SWITCHES),
('alarm_control_panel', DISCOVER_ALARMS),
('lock', DISCOVER_LOCKS))):
component = get_component(comp_name)
_LOGGER.info(config[DOMAIN])
bootstrap.setup_component(hass, component.DOMAIN, config)
hass.bus.fire(EVENT_PLATFORM_DISCOVERED,
{ATTR_SERVICE: discovery,
ATTR_DISCOVERED: {}})
return True
def reconnect():
""" Reconnect to verisure mypages. """
try:
time.sleep(1)
MY_PAGES.login()
except VERISURE_LOGIN_ERROR as ex:
_LOGGER.error("Could not login to Verisure mypages, %s", ex)
global WRONG_PASSWORD_GIVEN
WRONG_PASSWORD_GIVEN = True
except (ConnectionError, VERISURE_ERROR) as ex:
_LOGGER.error("Could not login to Verisure mypages, %s", ex)
@Throttle(MIN_TIME_BETWEEN_REQUESTS)
def update_alarm():
""" Updates the status of alarms. """
update_component(MY_PAGES.alarm.get, ALARM_STATUS)
@Throttle(MIN_TIME_BETWEEN_REQUESTS)
def update_climate():
""" Updates the status of climate sensors. """
update_component(MY_PAGES.climate.get, CLIMATE_STATUS)
@Throttle(MIN_TIME_BETWEEN_REQUESTS)
def update_smartplug():
""" Updates the status of smartplugs. """
update_component(MY_PAGES.smartplug.get, SMARTPLUG_STATUS)
def update_lock():
""" Updates the status of alarms. """
update_component(MY_PAGES.lock.get, LOCK_STATUS)
def update_mousedetection():
""" Updates the status of mouse detectors. """
update_component(MY_PAGES.mousedetection.get, MOUSEDETECTION_STATUS)
def update_component(get_function, status):
""" Updates the status of verisure components. """
if WRONG_PASSWORD_GIVEN:
_LOGGER.error('Wrong password')
return
try:
for overview in get_function():
try:
status[overview.id] = overview
except AttributeError:
status[overview.deviceLabel] = overview
except (ConnectionError, VERISURE_ERROR) as ex:
_LOGGER.error('Caught connection error %s, tries to reconnect', ex)
reconnect()
|
zhkzyth/a-super-fast-crawler
|
crawler.py
|
Python
|
mit
| 9,133
| 0.00478
|
#!/usr/bin/env python
# encoding: utf-8
"""
crawler.py
~~~~~~~~~~~~~
主要模块,爬虫的具体实现。
"""
import re
import time
import logging
import threading
import traceback
from hashlib import md5
from bs4 import BeautifulSoup
from datetime import datetime
from collections import deque
from locale import getdefaultlocale
from urlparse import urljoin,urlparse
from database import Database
from webPage import WebPage
from threadPool import ThreadPool
log = logging.getLogger('spider')
class Crawler(threading.Thread):
def __init__(self, args, queue):
threading.Thread.__init__(self)
#指定网页深度
self.depth = args['depth']
#标注初始爬虫深度,从1开始
self.currentDepth = 1
#指定关键词,使用console的默认编码来解码
self.keyword = args['keyword'].decode(getdefaultlocale()[1])
#数据库
self.database = Database(db="bt_tornado")
#线程池,指定线程数
self.threadPool = ThreadPool(args['threadNum'])
#已访问的链接
self.visitedHrefs = set()
#待访问的链接
self.unvisitedHrefs = deque()
#添加待访问的链接
for url in args['url']:
self.unvisitedHrefs.append(url)
#标记爬虫是否开始执行任务
self.isCrawling = False
# allow or deny crawl url
self.entryFilter = args['entryFilter']
# allow to output back url
self.yieldFilter = args['yieldFilter']
#
self.callbackFilter = args['callbackFilter']
#
self.db = args['db']
self.collection = args['collection']
# communication queue
self.queue = queue
def run(self):
print '\nStart Crawling\n'
if not self._isDatabaseAvaliable():
print 'Error: Unable to open database file.\n'
else:
self.isCrawling = True
self.threadPool.startThreads()
while self.currentDepth < self.depth+1:
#分配任务,线程池并发下载当前深度的所有页面(该操作不阻塞)
self._assignCurrentDepthTasks ()
#等待当前线程池完成所有任务,当池内的所有任务完成时,即代表爬完了一个网页深度
#self.threadPool.taskJoin()可代替以下操作,可无法Ctrl-C Interupt
while self.threadPool.getTaskLeft():
time.sleep(8)
print 'Depth %d Finish. Totally visited %d links. \n' % (
self.currentDepth, len(self.visitedHrefs))
log.info('Depth %d Finish. Total visited Links: %d\n' % (
self.currentDepth, len(self.visitedHrefs)))
self.currentDepth += 1
self.stop()
def stop(self):
self.isCrawling = False
self.threadPool.stopThreads()
self.database.close()
#use queue to communicate between threads
self.queue.get()
self.queue.task_done()
def getAlreadyVisitedNum(self):
#visitedHrefs保存已经分配给taskQueue的链接,有可能链接还在处理中。
#因此真实的已访问链接数为visitedHrefs数减去待访问的链接数
return len(self.visitedHrefs) - self.threadPool.getTaskLeft()
def _assignCurrentDepthTasks(self):
while self.unvisitedHrefs:
url = self.unvisitedHrefs.popleft()
if not self.__entry_filter(url):
self.visitedHrefs.add(url)
continue
#向任务队列分配任务
self.threadPool.putTask(self._taskHandler, url)
#标注该链接已被访问,或即将被访问,防止重复访问相同链接
self.visitedHrefs.add(url)
def _callback_filter(self, webPage):
#parse the web page to do sth
url , pageSource = webPage.getDatas()
for tmp in self.callbackFilter['List']:
if re.compile(tmp,re.I|re.U).search(url):
self.callbackFilter['func'](webPage)
def _taskHandler(self, url):
#先拿网页源码,再保存,两个都是高阻塞的操作,交给线程处理
webPage = WebPage(url)
tmp = webPage.fetch()
if tmp:
self._callback_filter(webPage)
self._saveTaskResults(webPage)
self._addUnvisitedHrefs(webPage)
def _saveTaskResults(self, webPage):
url, pageSource = webPage.getDatas()
_id = md5(url).hexdigest()
try:
if self.__yield_filter(url):
query = {"id": _id}
document = {"id": _id, "url":url, "createTime": datetime.now()}
self.database.saveData(query=query, collection=self.collection, document=document)
except Exception, e:
log.error(' URL: %s ' % url + traceback.format_exc())
def _addUnvisitedHrefs(self, webPage):
'''添加未访问的链接。将有效的url放进UnvisitedHrefs列表'''
#对链接进行过滤
|
:1.只获取http或https网页;2.保证每个链接只访问一次
url, pageSource = webPage.getDatas()
hrefs = self._getAllHrefsFromPage(url, pageSource)
for href in hrefs:
if self._isHttpOrHttpsProtocol(href):
if not self._isHrefRepeated(href):
self.unvisitedHrefs.append(href)
def _getAllHrefsFro
|
mPage(self, url, pageSource):
'''解析html源码,获取页面所有链接。返回链接列表'''
hrefs = []
soup = BeautifulSoup(pageSource)
results = soup.find_all('a',href=True)
for a in results:
#必须将链接encode为utf8, 因为中文文件链接如 http://aa.com/文件.pdf
#在bs4中不会被自动url编码,从而导致encodeException
href = a.get('href').encode('utf8')
if not href.startswith('http'):
href = urljoin(url, href)#处理相对链接的问题
hrefs.append(href)
return hrefs
def _isHttpOrHttpsProtocol(self, href):
protocal = urlparse(href).scheme
if protocal == 'http' or protocal == 'https':
return True
return False
def _isHrefRepeated(self, href):
if href in self.visitedHrefs or href in self.unvisitedHrefs:
return True
return False
def _isDatabaseAvaliable(self):
if self.database.isConn():
return True
return False
def __entry_filter(self, checkURL):
'''
入口过滤器
决定了爬虫可以进入哪些url指向的页面进行抓取
@param checkURL: 交给过滤器检查的url
@type checkURL: 字符串
@return: 通过检查则返回True,否则返回False
@rtype: 布尔值
'''
# 如果定义了过滤器则检查过滤器
if self.entryFilter:
if self.entryFilter['Type'] == 'allow': # 允许模式,只要满足一个就允许,否则不允许
result = False
for rule in self.entryFilter['List']:
pattern = re.compile(rule, re.I | re.U)
if pattern.search(checkURL):
result = True
break
return result
elif self.entryFilter['Type'] == 'deny': # 排除模式,只要满足一个就不允许,否则允许
result = True
for rule in self.entryFilter['List']:
pattern = re.compile(rule, re.I | re.U)
if pattern.search(checkURL):
result = False
break
return result
# 没有过滤器则默认允许
return True
def __yield_filter(self, checkURL):
'''
生成过滤器
决定了爬虫可以返回哪些url
@param checkURL: 交给过滤器检查的url
@type checkURL: 字符串
@return: 通过检查则返回True,否则返回False
@rtype: 布尔值
'''
# 如果定义了过滤器则检查过滤器
if self.yieldFilter:
if self.yieldFilter['Type'] == 'allow': # 允许模式,只要满足一个就允许,否则不允许
result = False
for rule in self.yieldFilter['List']:
pattern = re.compile(rule, re.I | re.U)
if pattern.search(checkURL):
result = True
break
return result
elif self.yieldFilter['Type'] == 'deny': # 排除模式,只要满足一个就不允许,否则允许
result = True
for rule in self.yieldFilter['List']:
pattern = re.compile(rule, re.I | re.U)
if pattern.search(checkURL):
result = False
break
return result
# 没有过滤器则默认允许
return True
|
kamyu104/GoogleCodeJam-2014
|
Round 1B/the-repeater.py
|
Python
|
mit
| 2,367
| 0.00169
|
# Copyright (c) 2016 kamyu. All rights reserved.
#
# Google Code Jam 2014 Round 1B - Problem A. The Repeater
# https://code.google.com/codejam/contest/2994486/dashboard#s=p0
#
# Time: O(X * N), N is the number
|
of strings,
# X is the number of characters in the frequency string.
# Space: O(X * N)
#
from random import randint
def find_kth_largest(nums, k):
def partition_around_pivot(left, right, pivot_idx, nums):
pivot_value = nums[pivot_idx]
new_pivot_idx = left
nums[pivot_idx], nums[right] = nums[right], nums[pivot_idx]
for i in xrange(left, righ
|
t):
if nums[i] > pivot_value:
nums[i], nums[new_pivot_idx] = nums[new_pivot_idx], nums[i]
new_pivot_idx += 1
nums[right], nums[new_pivot_idx] = nums[new_pivot_idx], nums[right]
return new_pivot_idx
left, right = 0, len(nums) - 1
while left <= right:
pivot_idx = randint(left, right)
new_pivot_idx = partition_around_pivot(left, right, pivot_idx, nums)
if new_pivot_idx == k - 1:
return nums[new_pivot_idx]
elif new_pivot_idx > k - 1:
right = new_pivot_idx - 1
else: # new_pivot_idx < k - 1.
left = new_pivot_idx + 1
def run_length_encoding(s):
encode_str = [[1, s[0]]]
for i in xrange(1, len(s)):
if s[i] != encode_str[-1][1]:
encode_str.append([1, s[i]])
else:
encode_str[-1][0] += 1
return encode_str
def the_repeater():
strs = []
for _ in xrange(input()):
strs.append(run_length_encoding(raw_input().strip()))
for s in strs:
if len(s) != len(strs[0]):
return "Fegla Won"
for i in xrange(len(s)):
if s[i][1] != strs[0][i][1]:
return "Fegla Won"
move = 0
for j in xrange(len(strs[0])): # X times.
freqs = [strs[i][j][0] for i in xrange(len(strs))] # N times.
# Median minimizes the sum of absolute deviations.
# freqs.sort() # O(NlogN)
# median = freqs[len(freqs)/2]
median = find_kth_largest(freqs, len(freqs)/2 + 1) # O(N) on average.
for freq in freqs:
move += abs(freq - median)
return move
for case in xrange(input()):
print 'Case #%d: %s' % (case+1, the_repeater())
|
ketancmaheshwari/hello-goog
|
src/python/collectionsexample.py
|
Python
|
apache-2.0
| 718
| 0.001393
|
#!/bin/env python
import itertools
import collections
def read_table(filename):
with open(filename) as fp:
|
header = next(fp).split()
rows = [line.split()[1:] for line in fp if line.strip()]
columns = zip(*rows)
data = dict(zip(header, columns))
return data
table = read_table("../../data/colldata.txt")
pots = sorted(table)
alphabet = "+-?"
for num in range(2, len(table) + 1):
for group in itertools.combinations(pots, num):
patterns = zip(*[table[p] for p in group])
counts = collections.Counter(patterns)
for poss in itertools.product(alphabet, repeat=num):
print ', '.join(group) + ':',
print ''.join(poss), counts[poss]
|
|
fxia22/ASM_xf
|
PythonD/site_python/Numeric/MA/__init__.py
|
Python
|
gpl-2.0
| 119
| 0
|
f
|
rom MA_version import version as __version__
from MA_version import version_info as __version_info__
from MA impo
|
rt *
|
BdEINSALyon/resa
|
permissions/views.py
|
Python
|
gpl-3.0
| 589
| 0.005093
|
import requests
from django.shortcuts import render
# Create your views here.
from django.template.response import Templ
|
ateResponse
from account.models import OAuthToken
def list_azure_groups(request):
token = OAuthToken.objects.filter(user=request.user, service__name='microsoft').last()
if token is None:
return ''
r = requests.get('https://graph.microsoft.com/v1.0/groups',
headers={'Auth
|
orization': 'Bearer {}'.format(token.auth_token)})
return TemplateResponse(request, 'permissions/azure.html', context={'groups': r.json()['value']})
|
OWASP/django-DefectDojo
|
dojo/admin.py
|
Python
|
bsd-3-clause
| 103
| 0
|
from auditlog.models import LogEntry
from django.contrib import admin
admin.site.unregister(Log
|
Entry)
| |
wkentaro/fcn
|
fcn/external/fcn.berkeleyvision.org/nyud-fcn32s-hha/solve.py
|
Python
|
mit
| 616
| 0.001623
|
import caffe
import surgery, score
import numpy as np
import os
import setproctitle
setproctitle.setproctitle(os.path.basename(os.getcwd()))
weights = '../ilsvrc-nets/vgg16-fcn.caffemodel'
# init
caffe.set_device(int(sys.argv[1]))
caffe.set_mode_gpu()
solver = caffe.SGDSolver('solver.prototxt')
solver.net.copy_from(weights)
# surgeries
interp_layers = [k for k in solver.net.params.keys() if 'up' in k]
surgery.interp(solver.net, interp_layers)
# scoring
test =
|
np.loadtxt('../data/nyud/test.txt', dtype=st
|
r)
for _ in range(50):
solver.step(2000)
score.seg_tests(solver, False, val, layer='score')
|
kubeflow/pipelines
|
backend/api/python_http_client/test/test_api_run_storage_state.py
|
Python
|
apache-2.0
| 1,436
| 0.003482
|
# coding: utf-8
"""
Kubeflow Pipelines API
This file contains REST API specification for Kubeflow Pipelines. The file is autogenerated from the swagger definition.
Contact: kubeflow-pipelines@google.com
Generated by: https://openapi-generator.tech
"""
from __future__ import ab
|
solute_import
import unittest
import datetime
import kfp_server_api
from kfp_server_api.models.api_run_storage_state import ApiRunStorageState # noqa: E501
from kfp_server_api.rest import ApiException
class TestApiRunStorageState(unittest.TestCase):
"""ApiRunStorageState unit test stubs"""
def setUp(self):
pass
def tearDown(
|
self):
pass
def make_instance(self, include_optional):
"""Test ApiRunStorageState
include_option is a boolean, when False only required
params are included, when True both required and
optional params are included """
# model = kfp_server_api.models.api_run_storage_state.ApiRunStorageState() # noqa: E501
if include_optional :
return ApiRunStorageState(
)
else :
return ApiRunStorageState(
)
def testApiRunStorageState(self):
"""Test ApiRunStorageState"""
inst_req_only = self.make_instance(include_optional=False)
inst_req_and_optional = self.make_instance(include_optional=True)
if __name__ == '__main__':
unittest.main()
|
bitmazk/django-unshorten
|
unshorten/tests/rate_limit_tests.py
|
Python
|
mit
| 1,841
| 0
|
"""Tests for the simple rate limiting classes of the ``unshorten`` app."""
from mock import Mock
from django.conf import settings
from django.test import TestCase
from mixer.backend.django import mixer
from ..backend im
|
port RateLimit
from ..models import APICallDayHistory
class SimpleRateLimitTestCase(TestCase):
"""Tests for the ``SimpleRateLimit`` class."""
|
longMessage = True
def setUp(self):
self.history = mixer.blend('unshorten.APICallDayHistory',
amount_api_calls=2500)
self.request = Mock(user=self.history.user)
def test_is_rate_limit_exceeded(self):
"""Test for the ``is_rate_limit_exceeded`` method."""
rate_limit = RateLimit(self.request)
self.assertEqual(rate_limit.is_rate_limit_exceeded(), False, msg=(
'Rate limit should not be exceeded.'))
rate_limit = RateLimit(self.request)
self.history.amount_api_calls = settings.UNSHORTEN_DAILY_LIMIT
self.history.save()
self.assertEqual(rate_limit.is_rate_limit_exceeded(), True, msg=(
'Rate limit should be exceeded.'))
rate_limit = RateLimit(self.request)
self.history.delete()
self.assertEqual(rate_limit.is_rate_limit_exceeded(), False, msg=(
'Rate limit should not be exceeded if no history is logged.'))
def test_log_api_call(self):
"""Test for the ``log_api_call`` method."""
rate_limit = RateLimit(self.request)
history = rate_limit.log_api_call()
self.assertEqual(APICallDayHistory.objects.all().count(), 1, msg=(
'Should create a APICallDayHistory object.'))
self.assertEqual(
history.amount_api_calls, self.history.amount_api_calls + 1, msg=(
'The amount of api calls should have increased.'))
|
radumas/qgis2web
|
leafletScriptStrings.py
|
Python
|
gpl-2.0
| 20,814
| 0.002114
|
from utils import scaleToZoom
def jsonScript(layer):
json = """
<script src="data/json_{layer}.js\"></script>""".format(layer=layer)
return json
def scaleDependentLayerScript(layer, layerName):
min = layer.minimumScale()
max = layer.maximumScale()
scaleDependentLayer = """
if (map.getZoom() <= {min} && map.getZoom() >= {max}) {{
feature_group.addLayer(json_{layerName}JSON);
console.log("show");
//restackLayers();
}} else if (map.getZoom() > {min} || map.getZoom() < {max}) {{
feature_group.removeLayer(json_{layerName}JSON);
console.log("hide");
//restackLayers();
}}""".format(min=scaleToZoom(min), max=scaleToZoom(max), layerName=layerName)
return scaleDependentLayer
def scaleDependentScript(layers):
scaleDependent = """
map.on("zoomend", function(e) {"""
scaleDependent += layers
scaleDependent += """
});"""
scaleDependent += layers
return scaleDependent
def openScript():
openScript = """
<script>"""
return openScript
def crsScript(crsAuthId, crsProj4):
crs = """
var crs = new L.Proj.CRS('{crsAuthId}', '{crsProj4}', {{
resolutions: [2800, 1400, 700, 350, 175, 84, 42, 21, 11.2, 5.6, 2.8, 1.4, 0.7, 0.35, 0.14, 0.07],
}});""".format(crsAuthId=crsAuthId, crsProj4=crsProj4)
return crs
def mapScript(extent, matchCRS, crsAuthId, measure, maxZoom, minZoom, bounds):
map = """
var map = L.map('map', {"""
if extent == "Canvas extent" and matchCRS and crsAuthId != 'EPSG:4326':
map += """
crs: crs,
continuousWorld: false,
worldCopyJump: false, """
if measure:
map += """
measureControl:true,"""
map += """
zoomControl:true, maxZoom:""" + unicode(maxZoom) + """, minZoom:""" + unicode(minZoom) + """
})"""
if extent == "Canvas extent":
map += """.fitBounds(""" + bounds + """);"""
map += """
var hash = new L.Hash(map);
var additional_attrib = '<a href="https://github.com/tomchadwin/qgis2web" target ="_blank">qgis2web</a>';"""
return map
def featureGroupsScript():
featureGroups = """
var feature_group = new L.featureGroup([]);
var raster_group = new L.LayerGroup([]);"""
return featureGroups
def basemapsScript(basemap, attribution):
basemaps = """
var basemap = L.tileLayer('{basemap}', {{
attribution: additional_attrib + ' {attribution}'
}});
basemap.addTo(map);""".format(basemap=basemap, attribution=attribution)
return basemaps
def layerOrderScript():
layerOrder = """
var layerOrder=new Array();
function restackLayers() {
for (index = 0; index < layerOrder.length; index++) {
feature_group.removeLayer(layerOrder[index]);
feature_group.addLayer(layerOrder[index]);
}
}
layerControl = L.control.layers({},{},{collapsed:false});"""
return layerOrder
def popFuncsScript(table):
popFuncs = """
var popupContent = {table};
layer.bindPopup(popupContent);""".format(table=table)
return popFuncs
def popupScript(safeLayerName, popFuncs):
popup = """
function pop_{safeLayerName}(feature, layer) {{{popFuncs}
}}""".format(safeLayerName=safeLayerName, popFuncs=popFuncs)
return popup
def pointToLayerScript(radius, borderWidth, borderStyle, colorName, borderColor, borderOpacity, opacity, labeltext):
pointToLayer = """
pointToLayer: function (feature, latlng) {{
return L.circleMarker(latlng, {{
radius: {radius},
fillColor: '{colorName}',
color: '{borderColor}',
weight: {borderWidth},
opacity: {borderOpacity},
dashArray: '{dashArray}',
fillOpacity: {opacity}
}}){labeltext}""".format(radius=radius,
colorName=colorName,
borderColor=borderColor,
borderWidth=borderWidth * 4,
borderOpacity=borderOpacity if borderStyle != 0 else 0,
dashArray=getLineStyle(borderStyle, borderWidth),
opacity=opacity,
labeltext=labeltext)
return pointToLayer
def pointStyleScript(pointToLayer, popFuncs):
pointStyle = """{pointToLayer}
}},
onEachFeature: function (feature, layer) {{{popFuncs}
}}""".format(pointToLayer=pointToLayer, popFuncs=popFuncs)
return pointStyle
def wfsScript(scriptTag):
wfs = """
<script src='{scriptTag}'></script>""".format(scriptTag=scriptTag)
return wfs
def jsonPointScript(safeLayerName, pointToLayer, usedFields):
if usedFields != 0:
jsonPoint = """
var json_{safeLayerName}JSON = new L.geoJson(json_{safeLayerName}, {{
onEachFeature: pop_{safeLayerName}, {pointToLayer}
}}
}});
layerOrder[layerOrder.length] = json_{safeLayerName}JSON;""".format(safeLayerName=safeLayerName, pointToLayer=pointToLayer)
else:
jsonPoint = """
var json_{safeLayerName}JSON = new L.geoJson(json_{safeLayerName}, {{
{point
|
ToLayer}
}}
}});
|
layerOrder[layerOrder.length] = json_{safeLayerName}JSON;""".format(safeLayerName=safeLayerName, pointToLayer=pointToLayer)
return jsonPoint
def clusterScript(safeLayerName):
cluster = """
var cluster_group{safeLayerName}JSON = new L.MarkerClusterGroup({{showCoverageOnHover: false}});
cluster_group{safeLayerName}JSON.addLayer(json_{safeLayerName}JSON);""".format(safeLayerName=safeLayerName)
return cluster
def categorizedPointStylesScript(symbol, opacity, borderOpacity):
styleValues = """
radius: '{radius}',
fillColor: '{fillColor}',
color: '{color}',
weight: {borderWidth},
opacity: {borderOpacity},
dashArray: '{dashArray}',
fillOpacity: '{opacity}',
}};
break;""".format(radius=symbol.size() * 2,
fillColor=symbol.color().name(),
color=symbol.symbolLayer(0).borderColor().name(),
borderWidth=symbol.symbolLayer(0).outlineWidth() * 4,
borderOpacity=borderOpacity if symbol.symbolLayer(0).outlineStyle() != 0 else 0,
dashArray=getLineStyle(symbol.symbolLayer(0).outlineStyle(), symbol.symbolLayer(0).outlineWidth()),
opacity=opacity)
return styleValues
def simpleLineStyleScript(radius, colorName, penStyle, opacity):
lineStyle = """
return {{
weight: {radius},
color: '{colorName}',
dashArray: '{penStyle}',
opacity: {opacity}
}};""".format(radius=radius * 4,
colorName=colorName,
penStyle=penStyle,
opacity=opacity)
return lineStyle
def singlePolyStyleScript(radius, colorName, borderOpacity, fillColor, penStyle, opacity):
polyStyle = """
return {{
weight: {radius},
color: '{colorName}',
fillColor: '{fillColor}',
dashArray: '{penStyle}',
opacity: {borderOpacity},
fillOpacity: {opacity}
}};""".format(radius=radius,
colorName=colorName,
fillColor=fillColor,
penStyle=penStyle,
borderOpacity=borderOpacity,
opacity=opacity)
return polyStyle
def nonPointStylePopupsScript(lineStyle, popFuncs):
nonPointStylePopups = """
style
|
xueyaodeai/DjangoWebsite
|
superlists/wsgi.py
|
Python
|
mit
| 398
| 0
|
"""
WSGI c
|
onfig for superlists project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "superlists.settings")
application = get
|
_wsgi_application()
|
jnewland/home-assistant
|
homeassistant/components/gearbest/__init__.py
|
Python
|
apache-2.0
| 30
| 0
|
"""The gearbest com
|
ponent."
|
""
|
voutilad/courtlistener
|
cl/search/migrations/0033_auto_20160819_1214.py
|
Python
|
agpl-3.0
| 473
| 0.002114
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('search', '0032
|
_auto_20160819_1209'),
]
operations = [
migrations.AlterField(
model_na
|
me='docket',
name='nature_of_suit',
field=models.CharField(help_text=b'The nature of suit code from PACER.', max_length=1000, blank=True),
),
]
|
intel/ipmctl
|
BaseTools/Source/Python/Ecc/Database.py
|
Python
|
bsd-3-clause
| 14,225
| 0.005413
|
## @file
# This file is used to create a database used by ECC tool
#
# Copyright (c) 2007 - 2014, Intel Corporation. All rights reserved.<BR>
# This program and the accompanying materials
# are licensed and made available under the terms and conditions of the BSD License
# which accompanies this distribution. The full text of the license may be found at
# http://opensource.org/licenses/bsd-license.php
#
# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
#
##
# Import Modules
#
import sqlite3
import Common.LongFilePathOs as os, time
import Common.EdkLogger as EdkLogger
import CommonDataClass.DataClass as DataClass
from Table.TableDataModel import TableDataModel
from Table.TableFile import TableFile
from Table.TableFunction import TableFunction
from Table.TablePcd import TablePcd
from Table.TableIdentifier import TableIdentifier
from Table.TableReport import TableReport
from MetaFileWorkspace.MetaFileTable import ModuleTable
from MetaFileWorkspace.MetaFileTable import PackageTable
from MetaFileWorkspace.MetaFileTable import PlatformTable
from Table.TableFdf import TableFdf
##
# Static definitions
#
DATABASE_PATH = "Ecc.db"
## Database
#
# This class defined the ECC databse
# During the phase of initialization, the database will create all tables and
# insert all records of table DataModel
#
# @param object: Inherited from object class
# @param DbPath: A string for the path of the ECC database
#
# @var Conn: Connection of the ECC database
# @var Cur: Cursor of the connection
# @var TblDataModel: Local instance for TableDataModel
#
class Database(object):
def __init__(self, DbPath):
self.DbPath = DbPath
self.Conn = None
self.Cur = None
self.TblDataModel = None
self.TblFile = None
self.TblFunction = None
self.TblIdentifier = None
self.TblPcd = None
self.TblReport = None
self.TblInf = None
self.TblDec = None
self.TblDsc = None
self.TblFdf = None
## Initialize ECC database
#
# 1. Delete all old existing tables
# 2. Create new tables
# 3. Initialize table DataModel
#
def InitDatabase(self, NewDatabase = True):
EdkLogger.verbose("\nInitialize ECC database started ...")
#
# Drop all old existing tables
#
if NewDatabase:
if os.path.exists(self.DbPath):
os.remove(self.DbPath)
self.Conn = sqlite3.connect(self.DbPath, isolation_level = 'DEFERRED')
self.Conn.execute("PRAGMA page_size=4096")
self.Conn.execute("PRAGMA synchronous=OFF")
# to avoid non-ascii charater conversion error
self.Conn.text_factory = str
self.Cur = self.Conn.cursor()
self.TblDataModel = TableDataModel(self.Cur)
self.TblFile = TableFile(self.Cur)
self.TblFunction = TableFunction(self.Cur)
self.TblIdentifier = TableIdentifier(self.Cur)
self.TblPcd = TablePcd(self.Cur)
self.TblReport = TableReport(self.Cur)
self.TblInf = ModuleTable(self.Cur)
self.TblDec = PackageTable(self.Cur)
self.TblDsc = PlatformTable(self.Cur)
self.TblFdf = TableFdf(self.Cur)
#
# Create new tables
#
if NewDatabase:
self.TblDataModel.Create()
self.TblFile.Create()
self.TblFunction.Create()
self.TblPcd.Create()
self.TblReport.Create()
self.TblInf.Create()
self.TblDec.Create()
self.TblDsc.Create()
|
self.TblFdf.Create()
|
#
# Init each table's ID
#
self.TblDataModel.InitID()
self.TblFile.InitID()
self.TblFunction.InitID()
self.TblPcd.InitID()
self.TblReport.InitID()
self.TblInf.InitID()
self.TblDec.InitID()
self.TblDsc.InitID()
self.TblFdf.InitID()
#
# Initialize table DataModel
#
if NewDatabase:
self.TblDataModel.InitTable()
EdkLogger.verbose("Initialize ECC database ... DONE!")
## Query a table
#
# @param Table: The instance of the table to be queried
#
def QueryTable(self, Table):
Table.Query()
## Close entire database
#
# Commit all first
# Close the connection and cursor
#
def Close(self):
#
# Commit to file
#
self.Conn.commit()
#
# Close connection and cursor
#
self.Cur.close()
self.Conn.close()
## Insert one file information
#
# Insert one file's information to the database
# 1. Create a record in TableFile
# 2. Create functions one by one
# 2.1 Create variables of function one by one
# 2.2 Create pcds of function one by one
# 3. Create variables one by one
# 4. Create pcds one by one
#
def InsertOneFile(self, File):
#
# Insert a record for file
#
FileID = self.TblFile.Insert(File.Name, File.ExtName, File.Path, File.FullPath, Model = File.Model, TimeStamp = File.TimeStamp)
if File.Model == DataClass.MODEL_FILE_C or File.Model == DataClass.MODEL_FILE_H:
IdTable = TableIdentifier(self.Cur)
IdTable.Table = "Identifier%s" % FileID
IdTable.Create()
#
# Insert function of file
#
for Function in File.FunctionList:
FunctionID = self.TblFunction.Insert(Function.Header, Function.Modifier, Function.Name, Function.ReturnStatement, \
Function.StartLine, Function.StartColumn, Function.EndLine, Function.EndColumn, \
Function.BodyStartLine, Function.BodyStartColumn, FileID, \
Function.FunNameStartLine, Function.FunNameStartColumn)
#
# Insert Identifier of function
#
for Identifier in Function.IdentifierList:
IdentifierID = IdTable.Insert(Identifier.Modifier, Identifier.Type, Identifier.Name, Identifier.Value, Identifier.Model, \
FileID, FunctionID, Identifier.StartLine, Identifier.StartColumn, Identifier.EndLine, Identifier.EndColumn)
#
# Insert Pcd of function
#
for Pcd in Function.PcdList:
PcdID = self.TblPcd.Insert(Pcd.CName, Pcd.TokenSpaceGuidCName, Pcd.Token, Pcd.DatumType, Pcd.Model, \
FileID, FunctionID, Pcd.StartLine, Pcd.StartColumn, Pcd.EndLine, Pcd.EndColumn)
#
# Insert Identifier of file
#
for Identifier in File.IdentifierList:
IdentifierID = IdTable.Insert(Identifier.Modifier, Identifier.Type, Identifier.Name, Identifier.Value, Identifier.Model, \
FileID, -1, Identifier.StartLine, Identifier.StartColumn, Identifier.EndLine, Identifier.EndColumn)
#
# Insert Pcd of file
#
for Pcd in File.PcdList:
PcdID = self.TblPcd.Insert(Pcd.CName, Pcd.TokenSpaceGuidCName, Pcd.Token, Pcd.DatumType, Pcd.Model, \
FileID, -1, Pcd.StartLine, Pcd.StartColumn, Pcd.EndLine, Pcd.EndColumn)
EdkLogger.verbose("Insert information from file %s ... DONE!" % File.FullPath)
## UpdateIdentifierBelongsToFunction
#
# Update the field "BelongsToFunction" for each Indentifier
#
#
def UpdateIdentifierBelongsToFunction_disabled(self):
EdkLogger.verbose("Update 'BelongsToFunction' for Identifiers started ...")
SqlCommand = """select ID, BelongsToFile, StartL
|
guilhermerc/fournir-au-public
|
tools/fann_input_norm.py
|
Python
|
gpl-3.0
| 2,101
| 0.005236
|
#! /usr/bin/env python3
import sys
import csv
import menu_map
import datetime
db_input = sys.argv[1]
def normalize_elem(min_, max_, value):
'''DESCUBRA'''
return (value - min_)/(max_ - min_) - 0.5
def normalize(array, min_, max_):
'''DESCUBRA'''
for i in range(0, len(array)):
array[i] = normalize_elem(min_, max_, array[i])
return array
with open(db_input, newline='\n') as db_in:
day_of_the_week = []
month = []
menu = []
temp_avg = []
rain_acc = []
nutri_week = []
vacation = []
strike = []
total_enrolled =
|
[]
target = []
traindb = csv.reader(db_in, delimiter='\t', quotechar='"')
for row in traindb:
date = row[0].split('-')
date_info = datetime.date(int(date[0]), int(date[1]), int(date[2])).timetuple()
day_of_the_week.append(float(date_info.tm_wday))
month.append(float(date_info.tm_mon))
menu.append(menu_map.map[row[1]])
temp_avg.appe
|
nd(float(row[4]))
rain_acc.append(float(row[5]))
nutri_week.append(float(row[6]))
vacation.append(float(row[7]))
strike.append(float(row[8]))
total_enrolled.append(float(row[9]))
target.append(float(row[3]))
# normalizing values into -0.5 - 0.5 range
day_of_the_week = normalize(day_of_the_week, 0, 6)
month = normalize(month, 1, 12)
menu = normalize(menu, 0, 27)
temp_avg = normalize(temp_avg, 5, 40)
rain_acc = normalize(rain_acc, 0, 100)
nutri_week = normalize(nutri_week, 0, 1)
vacation = normalize(vacation, 0, 1)
strike = normalize(strike, 0, 1)
total_enrolled = normalize(total_enrolled, 30000, 50000)
target = normalize(target, 1000, 13000)
# input normalization for fann using
print(str(len(target)) + ' 9 1')
for i in range(0, len(target)):
print(str(day_of_the_week[i]) + ' ' + str(month[i]) + ' ' + str(menu[i]) + ' ' + str(temp_avg[i]) + ' ' + str(rain_acc[i]) + ' ' + str(nutri_week[i]) + ' ' + str(vacation[i]) + ' ' + str(strike[i]) + ' ' + str(total_enrolled[i]))
print(str(target[i]))
|
sagnik17/Movie-Recommendation-System
|
mrs/recsys/cf.py
|
Python
|
gpl-3.0
| 387
| 0.005168
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# create a class for correlation matrix and other class to find out simi
|
lar users who have rated a particular movie w.r.t. a particular user based on correlation
import numpy as np
import pandas as pd
class Correlation:
|
"""
"""
def pearson(self, rating_matrix):
return pd.DataFrame(rating_matrix.T).corr().as_matrix()
|
vliangzy/sillypool
|
sillypool/spider/parser/xpath.py
|
Python
|
apache-2.0
| 2,885
| 0.000365
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2017 liangzy
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import logging
from lxml.etree import HTML
from sillypool.settings import QQWRY_PATH
from sillypool.database.models import Proxy
from sillypool.libs.exception import ParserTypeNotSupportError
from sillypool.libs.iplocator import IPLocator
class Parser:
def __init__(self):
self.ip_locator = IPLocator(QQWRY_PATH)
def parse(self, response, url_config, url):
if url_config['type'] == 'xpath':
return self.parse_xpath(response, url_config, url)
else:
raise ParserTypeNotSupportError(url_config['type'])
def parse_xpath(self, response, url_config, url):
proxy_list = []
root = HTML(response)
proxy_all = root.xpath(url_config['pattern'])
for proxy in proxy_all:
try:
ip = proxy.xpath(url_config['position']['ip'])[0].text
country, address = self.ip_locator.get_ip_address(self.ip_locator.str2ip(ip))
proxy = Proxy(
ip=proxy.xpath(url_config['position']['ip'])[0].text,
port=proxy.xpath(url_config['position']['port'])[0].text,
country=self.judge_country(country),
area=address,
crawl_time=datetime.datetime.utcnow()
)
proxy_list.append(proxy)
except OSError as e:
logging.error("parser error: " + url)
break
except Exception as e:
logging.error(e)
logging.error('proxy: ' + proxy)
return proxy_list
@staticmethod
def judge_country(country):
china_area = ['河北', '山东', '辽宁', '黑龙江', '吉林',
'甘肃', '青海', '河南', '江苏', '湖北',
'湖南', '江西', '浙江', '广东', '云南',
'福建', '台湾', '海南', '山西', '四川',
'陕西', '贵州', '安徽', '重庆', '北京',
'上海',
|
'天津', '广西', '内蒙', '西藏',
'新疆', '宁夏', '香港', '澳门']
for area in china_area:
if area in country:
return "中
|
国"
return country
|
Trust-Code/trust-addons
|
trust_sale/models/__init__.py
|
Python
|
agpl-3.0
| 1,413
| 0
|
# -*- encoding: utf-8 -*-
###############################################################################
# #
# Copyright (C) 2015 Trustcode - www.trustcode.com.br #
# #
# This program is free software: you can redistribute it and/or modify #
# it under the terms of the GNU Affero General Public License as published by #
# the Free Software Foundation, either version 3 of the License, or #
# (at your option) any later version.
|
#
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
#
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
###############################################################################
from . import sale_order
|
autosportlabs/RaceCapture_App
|
spacer.py
|
Python
|
gpl-3.0
| 1,123
| 0.005343
|
#
# Race Capture App
#
# Copyright (C) 2014-2017 Autosport Labs
#
# This file is part of the Race Capture App
#
# This is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any
|
later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
#
# See the GNU General Public License for
|
more details. You should
# have received a copy of the GNU General Public License along with
# this code. If not, see <http://www.gnu.org/licenses/>.
from kivy.uix.widget import Widget
class HorizontalSpacer(Widget):
def __init__(self, **kwargs):
super(HorizontalSpacer, self).__init__( **kwargs)
self.size_hint_y = None
self.height=0
class VerticalSpacer(Widget):
def __init__(self, **kwargs):
super(VerticalSpacer, self).__init__( **kwargs)
self.size_hint_x = None
self.width=0
|
excelly/xpy-ml
|
omop/simu_flow.py
|
Python
|
apache-2.0
| 2,109
| 0.009483
|
import sys
import os
import shutil as sh
import logging as log
import multiprocessing as mp
import ex.util as eu
from OMOP import OMOP
import base
eu.InitLog(log.INFO)
dat_dest="D:/Documents/DataSet/omop/simulation/"
# dat_dest="~/h/data/omop/simulation/"
def DoTask(configs):
modifier=configs[0]
folder=
|
base.Simulate(
modifier, validation=True, n_drug=10, n_cond=10, n_person=500,
cond_alt=configs[1], ob_alt=configs[2], drug_alt=configs[3],
dexposure_alt=configs[4],doutcome_alt=configs[5],ind_alt=configs[6],
no_simu=True)
ds=OMOP(modifier, f
|
older)
ds.CreateDB()
ds.OrderDB()
ds.IndexDB()
ds.JoinDrugCond(simu=True)
ds.ExpandCondOccur(simu=True)
ds.GenCountTable()
return(folder)
if __name__ == '__main__':
if len(sys.argv) > 1 and sys.argv[1].startswith('s'):
log.info('''OMOP Single threaded simulation.''')
parallel=False
else:
log.info('''OMOP Parallel simulation.''')
parallel=True
log.warn("A Numpy bug may make char arrays wrong in matlab. To fix A, use A=reshape(A, size(A,2), size(A,1))'")
tasks=[# ("TEST", False, False, False, False, False, False),
# ("TEST_C", True, False, False, False, False, False),
# ("TEST_OB", False, True, False, False, False, False),
# ("TEST_D", False, False, True, False, False, False),
# ("TEST_DE", False, False, False, True, False, False),
# ("TEST_DO", False, False, False, False, True, False),
# ("TEST_IN", False, False, False, False, False, True),
("TEST_C_D_DO", True, False, True, False, True, False),
("TEST_D_DO", False, False, True, False, True, False),
]
if parallel:
pool_size=min((mp.cpu_count() - 1, len(tasks), 5))
p=mp.Pool(max(2,pool_size))
folders=p.map(DoTask, tasks)
else:
folders=[DoTask(task) for task in tasks]
for folder in folders:
os.system("mv {0}/*.mat {1}".format(folder, dat_dest))
os.system("cp {0}/*.db3 {1}".format(folder, dat_dest))
|
nagaozen/my-os-customizations
|
home/nagaozen/.gnome2/gedit/plugins/better-defaults/__init__.py
|
Python
|
gpl-3.0
| 6,370
| 0.027002
|
# -*- coding: utf-8 -*-
# Gedit Better Defaults plugin
# Copyright (C) 2017 Fabio Zendhi Nagao
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import gedit
import gtk
import re
ui_str = """
<ui>
<menubar name="MenuBar">
<menu name="EditMenu" action="Edit">
<placeholder name="EditOps_4">
<menuitem action="DuplicateLine" name="Duplicate line"/>
</placeholder>
</menu>
</menubar>
</ui>
"""
class BetterDefaultsWindowHelper:
def __init__(self, plugin, window):
self._window = window
self._plugin = plugin
self.install_ui()
for view in self._window.get_views():
self.activate_view(view)
for doc in self._window.get_documents():
self.activate_doc(doc)
self._tab_added_id = self._window.connect("tab_added", self.on_tab_added)
# self._key_press_id = self._window.connect("key-press-event", self.on_key_press_event)
def deactivate(self):
# self._window.disconnect(self._key_press_id)
self._window.disconnect(self._tab_added_id)
for doc in self._window.get_documents():
self.deactivate_doc(doc)
for view in self._window.get_views():
self.deactivate_view(view)
self.uninstall_ui()
self._window = None
self._plugin = None
def update_ui(self):
pass
# # TODO: Use key press and button press events instead of update_ui
# doc = self._window.get_active_document()
# if doc:
# bounds = doc.get_selection_bounds()
# if bounds:
# content = doc.get_text(*bounds).decode("utf-8")
# highlightable = re.compile(r"[\S\{\}\[\]\(\)]+", flags=re.UNICODE)
# if highlightable.search(content):
# doc.set_search_text(content, gedit.SEARCH_CASE_SENSITIVE)
# else:
# doc.set_search_text("", gedit.SEARCH_CASE_SENSITIVE)
# else:
# doc.set_search_text("", gedit.SEARCH_CASE_SENSITIVE)
def install_ui(self):
manager = self._window.get_ui_manager()
self._action_group = gtk.ActionGroup("BetterDefaultsPluginActions")
self._action_group.add_actions([
( "DuplicateLine", None, _("Duplicate line"), "<Ctrl><Shift>d", _("Duplicate Line"), self.duplicate_line )
])
manager.insert_action_group(self._action_group, -1)
self._ui_id = manager.add_ui_from_string(ui_str)
def uninstall_ui(self):
manager = self._window.get_ui_manager()
manager.remove_ui(self._ui_id)
manager.remove_action_group(self._action_group)
manager.ensure_update()
def activate_view(self, view):
view.set_smart_home_end(Tr
|
ue)
view.set_data("vscrolling_helper", (0.0, 0.0))
size_allocate_id = view.connect("size-allocate", self.on_size_allocate)
view.set_data("on_size_allocate_id", size_allocate_id)
va = view.get_vadjustment()
value_change_id = va.connect("value_changed", self.on_value_changed)
view.set_data("on_value_changed_id", value_change_id)
def deactivate_view(self, view):
va = view.get_vadjustment()
va.disconnect( view.get_data
|
("on_value_changed_id") )
view.disconnect( view.get_data("on_size_allocate_id") )
view.set_smart_home_end(False)
def activate_doc(self, doc):
save_id = doc.connect("save", self.on_document_save)
doc.set_data("on_save_id", save_id)
def deactivate_doc(self, doc):
doc.disconnect( view.get_data("on_save_id") )
def on_tab_added(self, w, t):
self.activate_view(t.get_view())
self.activate_doc(t.get_document())
def on_document_save(self, doc):
piter = doc.get_end_iter()
if piter.starts_line():
while piter.backward_char():
if not piter.ends_line():
piter.forward_to_line_end()
break
doc.delete(piter, doc.get_end_iter())
def on_size_allocate(self, view, allocation):
va = view.get_vadjustment()
vsz = va.get_upper() + ( va.get_page_size() / 2 )
if va.get_upper() > va.get_page_size():
va.set_upper(vsz)
if va.get_value() < view.get_data("vscrolling_helper")[1]:
va.set_value(view.get_data("vscrolling_helper")[1])
view.set_data("vscrolling_helper", (vsz, va.get_value()))
def on_value_changed(self, adjustment):
view = self._window.get_active_view()
va = view.get_vadjustment()
if( va.get_upper() == view.get_data("vscrolling_helper")[0] ):
view.set_data( "vscrolling_helper", ( view.get_data("vscrolling_helper")[0], va.get_value() ) )
def duplicate_line(self, action):
doc = self._window.get_active_document()
doc.begin_user_action()
liter = doc.get_iter_at_mark(doc.get_insert())
liter.set_line_offset(0);
riter = doc.get_iter_at_mark(doc.get_insert())
f = riter.forward_line()
line = doc.get_slice(liter, riter, True)
if f:
doc.insert(riter, line)
else:
doc.insert(riter, '\n' + line)
doc.end_user_action()
def enclose_selected(self, l, r):
doc = self._window.get_active_document()
(a, b) = doc.get_selection_bounds()
doc.insert(b, r)
(a, b) = doc.get_selection_bounds()
doc.insert(a, l)
def on_key_press_event(self, window, event):
doc = self._window.get_active_document()
bounds = doc.get_selection_bounds()
if bounds:
c = event.keyval
if c == 123:
self.enclose_selected('{', '}')
elif c == 91:
self.enclose_selected('[', ']')
elif c == 40:
self.enclose_selected('(', ')')
elif c == 60:
self.enclose_selected('<', '>')
elif c == 65111:
self.enclose_selected('"', '"')
elif c == 65105:
self.enclose_selected("'", "'")
if c in [123, 91, 40, 60, 65111, 65105]:
return True
class BetterDefaultsPlugin(gedit.Plugin):
WINDOW_DATA_KEY = "BetterDefaultsPluginWindowData"
def __init__(self):
gedit.Plugin.__init__(self)
def activate(self, window):
helper = BetterDefaultsWindowHelper(self, window)
window.set_data(self.WINDOW_DATA_KEY, helper)
def deactivate(self, window):
window.get_data(self.WINDOW_DATA_KEY).deactivate()
window.set_data(self.WINDOW_DATA_KEY, None)
def update_ui(self, window):
window.get_data(self.WINDOW_DATA_KEY).update_ui()
|
TemosEngenharia/RPI-IO
|
RPi_IO/rpi_io.py
|
Python
|
agpl-3.0
| 6,968
| 0.004167
|
# -*- coding: utf-8 -*-
import RPi.GPIO as GPIO
from models import session
from models import Device
from models import Module
from models import Event_Log
from time import sleep
from sys import stdout
# Modules pins and description
M1A = session.query(Module).filter(Module.name == 'M1A').first()
M1B = session.query(Module).filter(Module.name == 'M1B').first()
M1C = session.query(Module).filter(Module.name == 'M1C').first()
M2A = session.query(Module).filter(Module.name == 'M2A').first()
M2B = session.query(Module).filter(Module.name == 'M2B').first()
M2C = session.query(Module).filter(Module.name == 'M2C').first()
M3A = session.query(Module).filter(Module.name == 'M3A').first()
M3B = session.query(Module).filter(Module.name == 'M3B').first()
M3C = session.query(Module).filter(Module.name == 'M3C').first()
M4A = session.query(Module).filter(Module.name == 'M4A').first()
M4B = session.query(Module).filter(Module.name == 'M4B').first()
M4C = session.query(Module).filter(Module.name == 'M4C').first()
M5A = session.query(Module).filter(Module.name == 'M5A').first()
M5B = session.query(Module).filter(Module.name == 'M5B').first()
M5C = session.query(Module).filter(Module.name == 'M5C').first()
M6A = session.query(Module).filter(Module.name == 'M6A').first()
M6B = session.query(Module).filter(Module.name == 'M6B').first()
M6C = session.query(Modu
|
le).filter(Module.name == 'M6C').first()
M7A = session.query(Module).filter(Module.name == 'M7A').first()
M7B =
|
session.query(Module).filter(Module.name == 'M7B').first()
M7C = session.query(Module).filter(Module.name == 'M7C').first()
# Statup inputs BCM pin
input_pins = [0, 1, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 16, 17, 18, 19, 20,
21, 22, 23, 24, 25]
# Statup outputs BCM pin
output_pins = [26, 27]
def main():
# Set up GPIO using BCM numbering
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
GPIO.setup(26, GPIO.OUT, initial=GPIO.LOW)
GPIO.setup(27, GPIO.OUT, initial=GPIO.LOW)
def modo0():
for pin in input_pins:
try:
GPIO.setup(pin, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)
except:
print u"Erro de ativação do Pino BCM %s", pin
stdout.flush()
for pin in output_pins:
try:
GPIO.setup(pin, GPIO.OUT, initial=GPIO.LOW)
except:
print u"Erro na ativação do Pino BCM %s", pin
stdout.flush()
return(True)
def modo1():
global M1A, M1B, M1C
global M2A, M2B, M2C
global M3A, M3B, M3C
global M4A, M4B, M4C
global M5A, M5B, M5C
global M6A, M6B, M6C
global M7A, M7B, M7C
try:
GPIO.output(26, GPIO.HIGH)
except:
print u'Erro ao setar o nível do pino BCM pin 26'
try:
GPIO.output(27, GPIO.LOW)
except:
print u'Erro ao setar o nível do pino BCM pin 27'
sleep(5)
discovery_mods(M1A, M1B, M1C)
discovery_mods(M2A, M2B, M2C)
discovery_mods(M3A, M3B, M3C)
discovery_mods(M4A, M4B, M4C)
discovery_mods(M5A, M5B, M5C)
discovery_mods(M6A, M6B, M6C)
discovery_mods(M7A, M7B, M7C)
def modo3():
try:
GPIO.output(26, GPIO.HIGH)
except:
print u'Erro ao setar o nível do pino BCM pin 26'
try:
GPIO.output(27, GPIO.HIGH)
except:
print u'Erro ao setar o nível do pino BCM pin 27'
return True
def switch_on(_M):
import RPi.GPIO as GPIO
from models import session
from models import Device
from models import Module
from models import Event_Log
from time import sleep
from sys import stdout
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
if GPIO.gpio_function(_M.gpio) == 0:
GPIO.setup(_M.gpio, GPIO.OUT, initial=GPIO.LOW)
GPIO.output(_M.gpio, GPIO.HIGH)
_M.status = True
session.commit()
else:
print 'ERROR! This pin is set as a input'
def switch_off(_M):
import RPi.GPIO as GPIO
from models import session
from models import Device
from models import Module
from models import Event_Log
from time import sleep
from sys import stdout
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
if GPIO.gpio_function(_M.gpio) == 0:
GPIO.setup(_M.gpio, GPIO.OUT, initial=GPIO.HIGH)
GPIO.output(_M.gpio, GPIO.LOW)
_M.status = False
session.commit()
else:
print 'ERROR! This pin is set as a input'
def reset_pin(_M, _time):
import RPi.GPIO as GPIO
from models import session
from models import Device
from models import Module
from models import Event_Log
from time import sleep
from sys import stdout
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
if GPIO.gpio_function(_M.gpio) == 0:
switch_on(_M)
sleep(_time)
switch_off(_M)
else:
print 'ERROR! This pin is set as a input'
def softreset(_host):
from subprocess import call
call(["net", "rpc", "shutdown", "-r", "-I", "192.168.1.21", "-U", "Administrador%SemParar"])
def discovery_mods(_MA, _MB, _MC):
import RPi.GPIO as GPIO
from models import session
from models import Device
from models import Module
from models import Event_Log
from time import sleep
from sys import stdout
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
if GPIO.input(_MA.gpio) == 0 and GPIO.input(_MB.gpio) == 1:
GPIO.setup(_MA.gpio, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)
GPIO.setup(_MB.gpio, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)
GPIO.setup(_MC.gpio, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)
_MA.io_type = 'input'
_MA.rpull = False
_MB.io_type = 'input'
_MB.rpull = False
_MC.io_type = 'input'
_MC.rpull = False
session.commit()
elif GPIO.input(_MA.gpio) == 1 and GPIO.input(_MB.gpio) == 0:
GPIO.setup(_MA.gpio, GPIO.OUT, initial=GPIO.LOW)
GPIO.setup(_MB.gpio, GPIO.OUT, initial=GPIO.LOW)
GPIO.setup(_MC.gpio, GPIO.OUT, initial=GPIO.LOW)
_MA.io_type = 'output'
_MA.status = False
_MB.io_type = 'output'
_MB.status = False
_MC.io_type = 'output'
_MC.status = False
session.commit()
else:
GPIO.setup(_MA.gpio, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)
GPIO.setup(_MB.gpio, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)
GPIO.setup(_MC.gpio, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)
_MA.io_type = 'empty'
_MA.rpull = False
_MB.io_type = 'empty'
_MB.rpull = False
_MC.io_type = 'empty'
_MC.rpull = False
session.commit()
def cleanup_pins():
import RPi.GPIO as GPIO
from models import session
from models import Device
from models import Module
from models import Event_Log
from time import sleep
from sys import stdout
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
GPIO.cleanup()
if __name__ == "__main__":
main()
|
python-xlib/python-xlib
|
examples/xfixes-selection-notify.py
|
Python
|
lgpl-2.1
| 2,764
| 0.002894
|
#!/usr/bin/python3
#
# examples/xfixes-selection-notify.py -- demonstrate the XFIXES extension
# SelectionNotify event.
#
# Copyright (C) 2019
# Tony Crisci <tony@dubstepdish.com>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public License
# as published by the Free Software Foundation; either version 2.1
# of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc.,
# 59 Temple Place,
# Suite 330,
# Boston, MA 02111-1307 USA
# Pyth
|
on 2/3 compatibility.
from __future__ import print_function
import sys
import os
import time
# Change path so we find Xlib
sys.path.append(os.path.join
|
(os.path.dirname(__file__), '..'))
from Xlib.display import Display
from Xlib.ext import xfixes
def main(argv):
if len(sys.argv) != 2:
sys.exit('usage: {0} SELECTION\n\n'
'SELECTION is typically PRIMARY, SECONDARY or CLIPBOARD.\n'
.format(sys.argv[0]))
display = Display()
sel_name = sys.argv[1]
sel_atom = display.get_atom(sel_name)
if not display.has_extension('XFIXES'):
if display.query_extension('XFIXES') is None:
print('XFIXES extension not supported', file=sys.stderr)
return 1
xfixes_version = display.xfixes_query_version()
print('Found XFIXES version %s.%s' % (
xfixes_version.major_version,
xfixes_version.minor_version,
), file=sys.stderr)
screen = display.screen()
mask = xfixes.XFixesSetSelectionOwnerNotifyMask | \
xfixes.XFixesSelectionWindowDestroyNotifyMask | \
xfixes.XFixesSelectionClientCloseNotifyMask
display.xfixes_select_selection_input(screen.root, sel_atom, mask)
while True:
e = display.next_event()
print(e)
if (e.type, e.sub_code) == display.extension_event.SetSelectionOwnerNotify:
print('SetSelectionOwner: owner=0x{0:08x}'.format(e.owner.id))
elif (e.type, e.sub_code) == display.extension_event.SelectionWindowDestroyNotify:
print('SelectionWindowDestroy: owner=0x{0:08x}'.format(e.owner.id))
elif (e.type, e.sub_code) == display.extension_event.SelectionClientCloseNotify:
print('SelectionClientClose: owner=0x{0:08x}'.format(e.owner.id))
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
qrkourier/ansible
|
lib/ansible/plugins/action/dellos6.py
|
Python
|
gpl-3.0
| 4,351
| 0.001379
|
# 2016 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible
|
. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import sys
import copy
from ansible import constants as C
from ansible.plugins.action.normal import ActionM
|
odule as _ActionModule
from ansible.module_utils.six import iteritems
from ansible.module_utils.dellos6 import dellos6_argument_spec
from ansible.module_utils.basic import AnsibleFallbackNotFound
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
class ActionModule(_ActionModule):
def run(self, tmp=None, task_vars=None):
if self._play_context.connection != 'local':
return dict(
failed=True,
msg='invalid connection specified, expected connection=local, '
'got %s' % self._play_context.connection
)
provider = self.load_provider()
pc = copy.deepcopy(self._play_context)
pc.connection = 'network_cli'
pc.network_os = 'dellos6'
pc.port = int(provider['port'] or self._play_context.port or 22)
pc.remote_user = provider['username'] or self._play_context.connection_user
pc.password = provider['password'] or self._play_context.password
pc.private_key_file = provider['ssh_keyfile'] or self._play_context.private_key_file
pc.timeout = int(provider['timeout'] or C.PERSISTENT_COMMAND_TIMEOUT)
pc.become = provider['authorize'] or False
pc.become_pass = provider['auth_pass']
display.vvv('using connection plugin %s' % pc.connection, pc.remote_addr)
connection = self._shared_loader_obj.connection_loader.get('persistent', pc, sys.stdin)
socket_path = connection.run()
display.vvvv('socket_path: %s' % socket_path, pc.remote_addr)
if not socket_path:
return {'failed': True,
'msg': 'unable to open shell. Please see: ' +
'https://docs.ansible.com/ansible/network_debug_troubleshooting.html#unable-to-open-shell'}
# make sure we are in the right cli context which should be
# enable mode and not config module
rc, out, err = connection.exec_command('prompt()')
while str(out).strip().endswith(')#'):
display.vvvv('wrong context, sending exit to device', self._play_context.remote_addr)
connection.exec_command('exit')
rc, out, err = connection.exec_command('prompt()')
task_vars['ansible_socket'] = socket_path
if self._play_context.become_method == 'enable':
self._play_context.become = False
self._play_context.become_method = None
result = super(ActionModule, self).run(tmp, task_vars)
return result
def load_provider(self):
provider = self._task.args.get('provider', {})
for key, value in iteritems(dellos6_argument_spec):
if key != 'provider' and key not in provider:
if key in self._task.args:
provider[key] = self._task.args[key]
elif 'fallback' in value:
provider[key] = self._fallback(value['fallback'])
elif key not in provider:
provider[key] = None
return provider
def _fallback(self, fallback):
strategy = fallback[0]
args = []
kwargs = {}
for item in fallback[1:]:
if isinstance(item, dict):
kwargs = item
else:
args = item
try:
return strategy(*args, **kwargs)
except AnsibleFallbackNotFound:
pass
|
melvinodsa/odsatransform
|
Code/Test_Reverse_Transform.py
|
Python
|
apache-2.0
| 2,316
| 0.007772
|
## @file Test_Reverse_Transform.py
#
# This script file has simple implementation the reverse odsa transform algorithm
## reverse_jumps function
#
# make reverse jumps to the elloborateed output
def reverse_jumps(letter, index, input_text):
output = input_text
for x in xrange(0, len(letter)):
output = output[:output.index(letter[x])] + output[output.index(letter[x])+1:]
output = output[:index[x]]+[letter[x]]+output[index[x]:]
return output
## elloborate function
#
# ellobrate the output text from the letter output map
def elloborate(letter, index):
output = []
for x in xrange(0, len(letter)):
for y in xrange(index[x],index[x+1]):
output += letter[x]
return output
#main program
fo = open('../Test/Transform.kt','rb')
lines = []
for line in fo:
lines.append(line.strip())
fo.close()
fo = open('../Test/TestOutput.txt','wb')
fo.write('')
fo.close()
inSize = 0
outSize = 0
len_letter_map = 0
for x in xrange(0,len(lines)/4):
letter_map = lines[4*x].split(' ')
index_map = map(int, lines[4*x+1].split(' '))
letter_transform = lines[4*x+2].split(' ')
index_transform = map(int, lines[4*x+3].split(' '))
output = elloborate(letter_map, index_map)
output = reverse_jumps(letter_transform, index_transform, output)
fo = open('../Test/TestOutput.txt','a')
fo.write(''.join(output))
fo.close()
inSize += len(output)
outSize += len(letter_map)+len(letter_transform)+len(index_map)+len(index_transform)
len_letter_map += len(letter_map)
if x == (len(lines)/4)-1 and not len(lines)%4 == 0:
letter_map = lines[4*(x+1)].split(' ')
index_map = map(int, lines[4*(x+1)+1].split(' '))
output = elloborate(letter_map, index_map)
fo = open('../Test/TestOutput.txt','a')
fo.write(''.jo
|
in(output))
fo.close()
inSize += len(output)
outSize += len(
|
letter_map)+len(letter_transform)+len(index_map)+len(index_transform)
len_letter_map += len(letter_map)
print 'Input size =', inSize, ' bytes.'
print 'Output size =', outSize, ' bytes.'
print 'Actual file size =', ((outSize*2)+len_letter_map+4), ' bytes'
print 'Efficency =', (100 - (outSize)*100/inSize), '%'
print 'Actual efficency =', (100 - ((outSize*2)+len_letter_map+4)*100/inSize), '%'
|
xolox/python-coloredlogs
|
coloredlogs/syslog.py
|
Python
|
mit
| 11,849
| 0.001772
|
# Easy to use system logging for Python's logging module.
#
# Author: Peter Odding <peter@peterodding.com>
# Last Change: December 10, 2020
# URL: https://coloredlogs.readthedocs.io
"""
Easy to use UNIX system logging for Python's :mod:`logging` module.
Admittedly system logging has little to do with colored terminal output, however:
- The `coloredlogs` package is my attempt to do Python logging right and system
logging is an important part of that equation.
- I've seen a surprising number of quirks and mistakes in system logging done
in Python, for example including ``%(asctime)s`` in a format string (the
system logging daemon is responsible for adding timestamps and thus you end
up with duplicate timestamps that make the logs awful to read :-).
- The ``%(programname)s`` filter originated in my system logging code and I
wanted it in `coloredlogs` so the step to include this module wasn't that big.
- As a bonus this Python module now has a test suite and proper documentation.
So there :-P. Go take a look at :func:`enable_system_logging()`.
"""
# Standard library modules.
import logging
import logging.handlers
import os
import socket
import sys
# External dependencies.
from humanfriendly import coerce_boolean
from humanfriendly.compat import on_macos, on_windows
# Modules included in our package.
from coloredlogs import (
DEFAULT_LOG_LEVEL,
ProgramNameFilter,
adjust_level,
find_program_name,
level_to_number,
replace_handler,
)
LOG_DEVICE_MACOSX = '/var/run/syslog'
"""The pathname of the log device on Mac OS X (a string)."""
LOG_DEVICE_UNIX = '/dev/log'
"""The pathname of the log device on Linux and most other UNIX systems (a string)."""
DEFAULT_LOG_FORMAT = '%(programname)s[%(process)d]: %(levelname)s %(message)s'
"""
The default format for log messages sent to the system log (a string).
The ``%(programname)s`` format requires :class:`~coloredlogs.ProgramNameFilter`
but :func:`enable_system_logging()` takes care of this for you.
The ``name[pid]:`` construct (specifically the colon) in the format allows
rsyslogd_ to extract the ``$programname`` from each log message, which in turn
allows configuration files in ``/etc/rsyslog.d/*.conf`` to filter these log
messages to a separate log file (if the need arises).
.. _rsyslogd: https://en.wikipedia.org/wiki/Rsyslog
"""
# Initialize a logger for this module.
logger = logging.getLogger(__name__)
class SystemLogging(object):
"""Context manager to enable system logging."""
def __init__(self, *args, **kw):
"""
Initialize a :class:`SystemLogging` object.
:param args: Positional arguments to :func:`enable_system_logging()`.
:param kw: Keyword arguments to :func:`enable_system_logging()`.
"""
self.args = args
self.kw = kw
self.handler = None
def __enter__(self):
"""Enable system logging when entering the context."""
if self.handler is None:
self.handler = enable_system_logging(*self.args, **self.kw)
return self.handler
def __exit__(self, exc_type=None, exc_value=None, traceback=None):
"""
Disable system logging when leaving the context.
.. note:: If an exception is being handled when we leave the context a
warning message including traceback is logged *before* system
logging is disabled.
"""
if self.handler is not None:
if exc_type is not None:
logger.warning("Disabling system logging due to unhandled exception!", exc_info=True)
(self.kw.get('logger') or logging.getLogger()).removeHandler(self.handler)
self.handler = None
def enable_system_logging(programname=None, fmt=None, logger=None, reconfigure=True, **kw):
"""
Redirect :mod:`logging` messages to the system log (e.g. ``/var/log/syslog``).
:param programname: The program name to embed in log messages (a string, defaults
to the result of :func:`~coloredlogs.find_program_name()`).
:param fmt: The log format for system log messages (a string, defaults to
:data:`DEFAULT_LOG_FORMAT`).
:param logger: The logger to which the :class:`~logging.handlers.SysLogHandler`
should be connected (defaults to the root logger).
:param level: The logging level for the :class:`~logging.handlers.SysLogHandler`
(defaults to :data:`.DEFAULT_LOG_LEVEL`). This value is coerced
using :func:`~coloredlogs.level_to_number()`.
:param reconfigure: If :data:`True` (the default) multiple calls to
:func:`enable_system_logging()` will each override
the previous configuration.
:param kw: Refer to :func:`connect_to_syslog()`.
:returns: A :class:`~logging.handlers.SysLogHandler` object or
:data:`None`. If an existing handler is found and `reconfigure`
is :data:`False` the existing handler object is returned. If the
connection to the system logging daemon fails :data:`None` is
returned.
As of release 15.0 this function uses :func:`is_syslog_supported()` to
check whether system logging is supported and appropriate before it's
enabled.
.. note:: When the logger's effective level is too restrictive it is
relaxed (refer to `notes about log levels`_ for details).
"""
# Check whether system logging is supported / appropriate.
if not is_syslog_supported():
return None
# Provide defaults for omitted arguments.
programname = programname or find_program_name()
logger = logger or logging.getLogger()
fmt = fmt or DEFAULT_LOG_FORMAT
level = level_to_number(kw.get('level', DEFAULT_LOG_LEVEL))
# Check whether system logging is already enabled.
handler, logger = replace_handler(logger, match_syslog_handler, reconfigure)
# Make sure reconfiguration is allowed or not relevant.
if not (handler and not reconfigure):
# Create a system logging handler.
handler = connect_to_syslog(**kw)
# Make sure the handler was successfully created.
if handler:
# Enable the use of %(programname)s.
ProgramNameFilter.install(handler=handler, fmt=fmt, programname=programname)
# Connect the formatter, handler and logger.
handler.setFormatter(logging.Formatter(fmt))
logger.addHandler(handler)
# Adjust the level of the selected logger.
adjust_level(logger, level)
return handler
def connect_to_syslog(address=None, facility=None, level=None):
"""
Create a :class:`~logging.handlers.SysLogHandler`.
:param address: The device file or network address of the system logging
daemon (a string or tuple, defaults to the result of
:func:`find_syslog_address()`).
:param facility: Refer to :class:`~logging.handlers.SysLogHandler`.
Defaults to ``LOG_USER``.
:param level: The logging level for the :class:`~logging.handlers.SysLogHandler`
(defaults to :data:`.DEFAULT_LOG_LEVEL`). This value is coerced
using :func:`
|
~coloredlogs.level_to_number()`.
:returns: A :class:`~logging.handlers.SysLogHandler` object or :data:`None` (if the
system logging daemon is unavailable).
The process of connecting to the system logging daemon goes as follows:
- The following two socke
|
t types are tried (in decreasing preference):
1. :data:`~socket.SOCK_RAW` avoids truncation of log messages but may
not be supported.
2. :data:`~socket.SOCK_STREAM` (TCP) supports longer messages than the
default (which is UDP).
"""
if not address:
address = find_syslog_address()
if facility is None:
facility = logging.handlers.SysLogHandler.LOG_USER
if level is None:
level = DEFAULT_LOG_LEVEL
for socktype in socket.SOCK_RAW, socket.SOCK_STREAM, None:
kw = dict(facility=facility, address=address)
if sockt
|
hying-caritas/ibsuite
|
ibpy/ibpy/image.py
|
Python
|
gpl-2.0
| 12,386
| 0.00549
|
#
# Copyright 2008 Huang Ying <huang.ying.caritas@gmail.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
import os
import tempfile
import Image, ImageFilter
import config
from util import *
class PageImageRef(object):
def __init__(self, page_num, sub_page_num = 0,
image = None, file_name = None):
object.__init__(self)
self.page_num = page_num
self.sub_page_num = sub_page_num
self.image = image
self.file_name = file_name
def __del__(self):
if self.file_name and not config.debug:
os.unlink(self.file_name)
def clear(self):
self.file_name = None
self.image = None
def derive(self, image = None, file_name = None):
return PageImageRef(self.page_num, self.sub_page_num,
image, file_name)
def get_image(self):
if self.image:
return self.image
elif self.file_name:
self.image = Image.open(self.file_name)
return self.image
def get_file_name(self, ext = 'pgm'):
if self.file_name:
return self.file_name
self.file_name = temp_file_name('.'+ext)
if self.image:
self.image.save(self.file_name)
return self.file_name
class NullConv(object):
def __init__(self, config):
object.__init__(self)
def convert(self, pimg_ref, out_file_name = None):
return pimg_ref
class PreCrop(object):
def __init__(self, config):
object.__init__(self)
self.trim_left = config.trim_left
self.trim_top = config.trim_top
self.trim_right = config.trim_right
self.trim_bottom = config.trim_bottom
def convert(self, pimg_ref, out_file_name = None):
if self.trim_left < 0.01 and self.trim_top < 0.01 and \
self.trim_right < 0.01 and self.trim_bottom < 0.01:
return pimg_ref
img = pimg_ref.get_image()
iw, ih = img.size
left = nround(self.trim_left * iw)
right = iw - nround(self.trim_right * iw)
top = nround(self.trim_top * ih)
bottom = ih - nround(self.trim_bottom * ih)
img = img.crop((left, top, right, bottom))
return pimg_ref.derive(img)
class Dilate(object):
def __init__(self, config):
object.__init__(self)
def convert(self, pimg_ref, out_file_name = None):
img = pimg_ref.get_image()
img = img.filter(ImageFilter.MinFilter(3))
return pimg_ref.derive(img)
def create_dilate(config):
if config.dilate:
return Dilate(config)
else:
return NullConv(config)
class Unpaper(object):
def __init__(self, config):
object.__init__(self)
def convert(self, pimg_ref, out_file_name = None):
if out_file_name is None:
out_file_name = temp_file_name('.pgm')
check_call(['unpaper', '-q', '--no-deskew',
pimg_ref.get_file_name(), out_file_name])
return pimg_ref.derive(file_name = out_file_name)
class RowCondense(object):
def __init__(self, config):
object.__init__(self)
self.unpaper_keep_size = config.unpaper_keep_size
def convert(self, pimg_ref, out_file_name = None):
img = pimg_ref.get_image()
iw, ih = img.size
ethr = max(ih/500, 1)
def not_empty(h):
return sum(h[:-32]) > ethr
top = 0
bottom = ih
left = -1
right = iw
for x in range(1, iw+1):
ir = img.crop((x - 1, 0, x, ih))
if not_empty(ir.histogram()):
left = x - 1
break
if left == -1:
if self.unpaper_keep_size:
nimg = img
else:
nimg = img.crop((0, 0, 2, ih))
return pimg_ref.derive(nimg)
for x in range(left, iw-1, -1):
ir = img.crop((x, 0, x+1, ih))
if not_empty(ir.histogram()):
right = x+1
break
rows = []
pe = True
for y in range(1, ih+1):
ic = img.crop((left, y-1, right, y))
ce = not not_empty(ic.histogram())
if pe != ce:
rows.append(y-1)
pe = ce
if not pe:
rows.append(ih)
if len(rows) == 0:
if self.unpaper_keep_size:
nimg = img
else:
nimg = img.crop((0, 0, 2, ih))
return pimg_ref.derive(nimg)
minh_empty = max(ih / 100, 5)
for i in range(len(rows)-3, 1, -2):
if rows[i+1] - rows[i] < minh_empty:
del rows[i+1]
del rows[i]
minh_ink = max(ih / 100, 5)
nh = 0
for i in range(0, len(rows) - 2, 2):
inkh = rows[i+1] - rows[i]
ninkh = rows[i+3] - rows[i+2]
nh = nh + inkh
if inkh < minh_ink or ninkh < minh_ink:
nh = nh + minh_empty
else:
nh = nh + rows[i+2] - rows[i+1]
nh += rows[-1] - rows[-2]
nw = right - left
if self.unpaper_keep_size:
nw, nh = iw, ih
nimg = Image.new("L", (nw, nh))
nimg.paste(255, [0, 0, nw, nh])
else:
nimg = Image.new("L", (nw, nh))
cy = 0
|
for i in range(0, len(rows) - 2, 2):
inkh = rows[i+1] - rows[i]
ninkh = rows[i+3] - rows[i+2]
nimg.paste(img.crop((left, rows[i], right, rows[i+1])), (0, cy))
cy = cy + inkh
if inkh < minh_ink or ninkh < minh_ink:
eh = minh_empty
else:
eh = rows[i+2] - rows[i+1]
nimg.paste(255, (0, cy, nw, cy + eh))
cy = cy + eh
ni
|
mg.paste(img.crop((left, rows[-2], right, rows[-1])), (0, cy))
return pimg_ref.derive(nimg)
class ColumnCondense(object):
def __init__(self, config):
object.__init__(self)
self.unpaper_keep_size = config.unpaper_keep_size
def convert(self, pimg_ref, out_file_name = None):
img = pimg_ref.get_image()
iw, ih = img.size
ethr = max(iw/500, 1)
def not_empty(h):
return sum(h[:-32]) > ethr
top = -1
bottom = ih
left = 0
right = iw
for y in range(1, ih+1):
ir = img.crop((0, y - 1, iw, y))
if not_empty(ir.histogram()):
top = y - 1
break
if top == -1:
if self.unpaper_keep_size:
nimg = img
else:
nimg = img.crop((0, 0, iw, 2))
return pimg_ref.derive(nimg)
for y in range(ih-1, top, -1):
ir = img.crop((0, y, iw, y+1))
if not_empty(ir.histogram()):
bottom = y+1
break
cols = []
pe = True
for x in range(1, iw+1):
ic = img.crop((x-1, top, x, bottom))
ce = not not_empty(ic.histogram())
if pe != ce:
cols.append(x-1)
pe = ce
if not pe:
cols.append(iw)
if len(cols) == 0:
if self.unpaper_keep_size:
nimg = img
else:
nimg = img.crop((0, 0, iw, 2))
return pimg_ref.derive(nimg)
minw_empty = max(iw / 100, 5)
for i in range(len(cols)-3, 1, -2):
if cols[i+1] - cols[i] < minw_empty:
del cols[i+1]
del cols[i]
minw_ink = max(iw / 100, 5)
nw = 0
for i in range(0, len(cols) - 2, 2):
inkw = cols[i+1] - cols[i]
ninkw = cols[i+3] - cols[i+2]
nw = nw + inkw
if inkw < minw_ink or ninkw < minw_ink:
nw = nw + minw_empty
else:
nw = nw + cols[i+2] - cols[i+1]
nw += cols[-1] - cols[-2]
nh = bottom
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.