repo_name stringlengths 5 100 | path stringlengths 4 231 | language stringclasses 1 value | license stringclasses 15 values | size int64 6 947k | score float64 0 0.34 | prefix stringlengths 0 8.16k | middle stringlengths 3 512 | suffix stringlengths 0 8.17k |
|---|---|---|---|---|---|---|---|---|
JQIamo/artiq | artiq/frontend/artiq_flash.py | Python | lgpl-3.0 | 5,247 | 0 | #!/usr/bin/env python3
# Copyright (C) 2015 Robert Jordens <jordens@gmail.com>
import argparse
import os
import subprocess
import tempfile
import shutil
from artiq import __artiq_dir__ as artiq_dir
from artiq.frontend.bit2bin import bit2bin
def scripts_path():
p = ["share", "openocd", "scripts"]
if os.name == "nt":
p.insert(0, "Library")
p = os.path.abspath(os.path.join(
os.path.dirname(shutil.which("openocd")),
"..", *p))
return p
def get_argparser():
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description="ARTIQ flashing/deployment tool",
epilog="""\
Valid actions:
* proxy: load the flash proxy gateware bitstream
* gateware: write gateware bitstream to flash
* bios: write bios to flash
* runtime: write runtime to flash
* storage: write storage image to flash
* load: load gateware bitstream into device (volatile but fast)
* start: trigger the target to (re)load its gateware bitstream from flash
Prerequisites:
* Connect the board through its/a JTAG adapter.
* Have OpenOCD installed and in your $PATH.
* Have access to the JTAG adapter's devices. Udev rules from OpenOCD:
'sudo cp openocd/contrib/99-openocd.rules /etc/udev/rules.d'
and replug the device. Ensure you are member of the
plugdev group: 'sudo adduser $USER plugdev' and re-login.
""")
parser.add_argument("-t", "--target", default="kc705",
help="target board, default: %(default)s")
parser.add_argument("-m", "--adapter", default="nist_clock",
help="target adapter, default: %(default)s")
parser.add_argument("--target-file", default=None,
help="use alternative OpenOCD target file")
parser.add_argument("-f", "--storage", help="write file to storage area")
parser.add_argument("-d", "--dir", help="look for files in this directory")
parser.add_argument("action", metavar="ACTION", nargs="*",
default="proxy gateware bios runtime start".split(),
help="actions to perform, default: %(de | fault)s")
return parser
def main():
| parser = get_argparser()
opts = parser.parse_args()
config = {
"kc705": {
"chip": "xc7k325t",
"start": "xc7_program xc7.tap",
"gateware": 0x000000,
"bios": 0xaf0000,
"runtime": 0xb00000,
"storage": 0xb80000,
},
}[opts.target]
if opts.dir is None:
opts.dir = os.path.join(artiq_dir, "binaries",
"{}-{}".format(opts.target, opts.adapter))
if not os.path.exists(opts.dir) and opts.action != ["start"]:
raise SystemExit("Binaries directory '{}' does not exist"
.format(opts.dir))
conv = False
prog = []
prog.append("init")
for action in opts.action:
if action == "proxy":
proxy_base = "bscan_spi_{}.bit".format(config["chip"])
proxy = None
for p in [opts.dir, os.path.expanduser("~/.migen"),
"/usr/local/share/migen", "/usr/share/migen"]:
proxy_ = os.path.join(p, proxy_base)
if os.access(proxy_, os.R_OK):
proxy = "jtagspi_init 0 {{{}}}".format(proxy_)
break
if not proxy:
raise SystemExit(
"proxy gateware bitstream {} not found".format(proxy_base))
prog.append(proxy)
elif action == "gateware":
bin = os.path.join(opts.dir, "top.bin")
if not os.access(bin, os.R_OK):
bin_handle, bin = tempfile.mkstemp()
bit = os.path.join(opts.dir, "top.bit")
conv = True
prog.append("jtagspi_program {{{}}} 0x{:x}".format(
bin, config["gateware"]))
elif action == "bios":
prog.append("jtagspi_program {{{}}} 0x{:x}".format(
os.path.join(opts.dir, "bios.bin"), config["bios"]))
elif action == "runtime":
prog.append("jtagspi_program {{{}}} 0x{:x}".format(
os.path.join(opts.dir, "runtime.fbi"), config["runtime"]))
elif action == "storage":
prog.append("jtagspi_program {{{}}} 0x{:x}".format(
opts.storage, config["storage"]))
elif action == "load":
prog.append("pld load 0 {{{}}}".format(
os.path.join(opts.dir, "top.bit")))
elif action == "start":
prog.append(config["start"])
else:
raise ValueError("invalid action", action)
prog.append("exit")
try:
if conv:
bit2bin(bit, bin_handle)
if opts.target_file is None:
target_file = os.path.join("board", opts.target + ".cfg")
else:
target_file = opts.target_file
subprocess.check_call([
"openocd",
"-s", scripts_path(),
"-f", target_file,
"-c", "; ".join(prog),
])
finally:
if conv:
os.unlink(bin)
if __name__ == "__main__":
main()
|
CarnegieHall/metadata-matching | idMatching_flyers.py | Python | mit | 4,920 | 0.004675 | # !/usr/local/bin/python3.4.2
# ----Copyright (c) 2016 Carnegie Hall | The MIT License (MIT)----
# ----For the full license terms, please visit https://github.com/CarnegieHall/quality-control/blob/master/LICENSE----
# run script with 5 arguments:
# argument 0 is the script name
# argument 1 is the path to the Isilon HDD volume containing the assets
# argument 2 is the path to the metadata spreadsheet [~/Carnegie_Hall_Flyers.csv]
# argument 3 is the path ~/OPAS_ID_exports/OPAS_flyers_IDs_titles.csv
# argument 4 is the path to the folder y | ou want to save your unmatched performance IDs to
# argument 5 is the harddrive ID/volume that will be added to the output filename (E.g. ABH_20150901)
import csv
import glob
import itertools
import json
import os
from os.path import isfile, join, split
import sys
filePath_1 = str(sys.argv[1])
filePath_2 = str(sys.argv[2])
filePath_3 = str(sys.argv[3])
filePath_4 = str(sys.argv[4])
fileDict = {}
flyerDict = {}
titleDict = | {}
##matchedList = []
unmatchedIDs = []
#Set a variable to equal the harddrive volume number, which is extracted from the file path
volume = sys.argv[len(sys.argv)-1]
#Extract filenames from the full file path and build dictionary
for full_filePath in glob.glob(os.path.join(filePath_1, '*.tif')):
file_name = os.path.basename(full_filePath)
file_flyerID = os.path.basename(full_filePath).split('_')[0]
fileDict[str(file_name)] = {}
fileDict[str(file_name)]['File Name'] = file_name
fileDict[str(file_name)]['Source Unique ID'] = file_flyerID
with open(filePath_2, 'rU') as f:
with open(filePath_3, encoding='utf-8') as g:
flyerData = csv.reader(f, dialect='excel', delimiter=',')
next(flyerData, None) # skip the headers
titleData = csv.reader(g, dialect='excel', delimiter=',')
for row in titleData:
event_id = row[0]
titleMatch_id = ''.join(['CONC', event_id])
text = row[1]
if not text:
text = '[No title available]'
titleDict[titleMatch_id] = text
for row in flyerData:
opas_id = row[0]
source_unique_id = row[1].strip()
collection = row[2]
if collection == 'Main Hall Flyers':
cortexFolder = 'CH_FLYERS_01'
event = row[3]
entities = row[4]
event_date = row[5]
event_year = row[6]
note = row[7]
try:
if opas_id != '':
opas_id = ''.join(['CONC', opas_id])
if event_year:
title = ''.join([titleDict[opas_id], ', ', event_year])
else:
opas_id = ''.join([cortexFolder])
title = event
flyerDict[str(source_unique_id)] = {}
flyerDict[str(source_unique_id)]['OPAS ID'] = opas_id
flyerDict[str(source_unique_id)]['Collection'] = collection
flyerDict[str(source_unique_id)]['Date 1 (YYYY/mm/dd)'] = event_date
flyerDict[str(source_unique_id)]['Note'] = note
flyerDict[str(source_unique_id)]['Title'] = title
#If OPAS ID from metadata spreadsheet is NOT in OPAS ID export, it will cause a KeyError
#This exception catches those errors, and adds the IDs to a list of unmatched IDs
#Since we added "CONC" to the OPAS ID above, we remove it here (opas_id[4:]) to allow for easier OPAS QC
except KeyError:
if opas_id not in unmatchedIDs:
unmatchedIDs.append(opas_id[4:])
##print (json.dumps(flyerDict, indent=4))
for key in fileDict:
file_flyerID = fileDict[key]['Source Unique ID']
if file_flyerID in flyerDict.keys():
fileDict[key]['OPAS ID'] = flyerDict[file_flyerID]['OPAS ID']
fileDict[key]['Collection'] = flyerDict[file_flyerID]['Collection']
fileDict[key]['Date 1 (YYYY/mm/dd)'] = flyerDict[file_flyerID]['Date 1 (YYYY/mm/dd)']
fileDict[key]['Note'] = flyerDict[file_flyerID]['Note']
fileDict[key]['Title'] = flyerDict[file_flyerID]['Title']
matchedFiles_name = ''.join([str(filePath_1), '/Central_OPASmatchedFiles_flyers_', volume, '.csv'])
unmatchedIDs_name = ''.join([str(filePath_4), '/unmatched_flyer_IDs_', volume, '.txt'])
# This writes the nested dictionary to a CSV file
fields = ['OPAS ID', 'Source Unique ID', 'Collection', 'Title', 'Date 1 (YYYY/mm/dd)', 'Note', 'File Name']
with open(matchedFiles_name, 'w', newline='') as csvfile:
w = csv.DictWriter(csvfile, fields)
w.writeheader()
for k in fileDict:
w.writerow({field: fileDict[k].get(field) for field in fields})
#This saves the unmatched OPAS IDs as a text file, so you can check the issues in OPAS
with open(unmatchedIDs_name, 'w') as h:
h.write(','.join(str(opas_id) for opas_id in unmatchedIDs)) |
sideshownick/Snaking_Networks | MyPython/gen2.py | Python | gpl-2.0 | 4,649 | 0.068187 | #!python
#from Numeric import *
from numpy import *
from scipy import *
from scipy import zeros
from scipy.linalg import *
from scipy import sparse
import os
#import time
#from pylab import save
from random import random
#global size
#global ratioC
def generate(size,ratioC):
N=2*size**2 #=size*(size+1) + size*(size-1) #=H + V
fval=zeros([4*(size+10)**2])
for i in range(0,(2*size+1)**2):
if random() < ratioC:
fval[i]=1
matC=[]
matR=[]
#horizontal components:
for cy in range(0,size+1):
for cx in range(1,size-1):
if random() < ratioC:
#os.system('echo "%d %d 1" >> matrixC.mat'%(cx+cy*(size-1)+1,cx+cy*(size-1)))
matC.append([cx+cy*(size-1)+1,cx+cy*(size-1)])
else:
#os.system('echo "%d %d 1" >> matrixR.mat'%(cx+cy*(size-1)+1,cx+cy*(size-1)))
matR.append([cx+cy*(size-1)+1,cx+cy*(size-1)])
#vertical components:
for cy in range(0,size):
for cx in range(1,size):
if random() < ratioC:
#os.system('echo "%d %d 1" >> matrixC.mat'%(cx+cy*(size-1)+size-1,cx+cy*(size-1)))
matC.append([cx+cy*(size-1)+size-1,cx+cy*(size-1)])
else:
#os.system('echo "%d %d 1" >> matrixR.mat'%(cx+cy*(size-1)+size-1,cx+cy*(size-1)))
matR.append([cx+cy*(size-1)+size-1,cx+cy*(size-1)])
#boundary:
#note there are (s+1)(s-1) internal nodes, plus 2 boundary nodes
for cy in range(0,size+1):
cx=1
#boundary 1
if random() < ratioC:
#os.system('echo "%d %d 1" >> matrixC.mat'%((size-1)*(size)+2,cy*(size-1)+1)) |
matC.append([(size+1)*(size-1)+1, cy*(size-1)+(cx-1)])
else:
#os.system('echo "%d %d 1" >> matrixR.mat'%((size-1)*(size)+2,cy*(size-1)+1))
matR.append([(size+1)*(size-1)+1, cy*(size-1)+(cx-1)])
#boundary 2
cx=size-1
if random() < ratioC:
#os.system('echo "%d %d 1" >> matrixC.mat'%((size-1)*(size)+1,(cy+1)*(size-1)))
m | atC.append([(size+1)*(size-1)+2, cy*(size-1)+1])
else:
#os.system('echo "%d %d 1" >> matrixR.mat'%((size-1)*(size)+1,(cy+1)*(size-1)))
matR.append([(size+1)*(size-1)+2, cy*(size-1)+1])
size1=2*size-1
size2=size+size+1
size1a=2*size-3
size2a=size+size-1
size0=size+1
spread=0.0
N=(size+1)*(size-1)
Np=(size+1)*(size-1)+2
LC=mat(zeros([Np, Np]))
LR=mat(zeros([Np, Np]))
LCi=[]
LCj=[]
LCv=[]
LRi=[]
LRj=[]
LRv=[]
for line in matC: #file('matrixC.mat'):
values = line[0], line[1], 1 #.split()
LC[int(values[0])-1,int(values[1])-1]-=(double(values[2]))
LC[int(values[1])-1,int(values[0])-1]-=(double(values[2]))
LC[int(values[1])-1,int(values[1])-1]+=(double(values[2]))
LC[int(values[0])-1,int(values[0])-1]+=(double(values[2]))
#os.system('echo "%d %d %f" >> matrixC.txt' %(double(values[0]),double(values[1]),double(values[2])+randvar))
if int(values[0]) < N+1 and int(values[1]) < N+1:
LCi.append(int(values[0])-1)
LCj.append(int(values[1])-1)
LCv.append(-(double(values[2])))
LCi.append(int(values[1])-1)
LCj.append(int(values[0])-1)
LCv.append(-(double(values[2])))
if int(values[0]) < N+1:
LCi.append(int(values[0])-1)
LCj.append(int(values[0])-1)
LCv.append(double(values[2]))
if int(values[1]) < N+1:
LCi.append(int(values[1])-1)
LCj.append(int(values[1])-1)
LCv.append(double(values[2]))
for line in matR: #file('matrixR.mat'):
values = line[0], line[1], 1 #.split()
LR[int(values[0])-1,int(values[1])-1]-=(double(values[2]))
LR[int(values[1])-1,int(values[0])-1]-=(double(values[2]))
LR[int(values[1])-1,int(values[1])-1]+=(double(values[2]))
LR[int(values[0])-1,int(values[0])-1]+=(double(values[2]))
#os.system('echo "%d %d %f" >> matrixR.txt' %(double(values[0]),double(values[1]),double(values[2])+randvar))
if int(values[0]) < N+1 and int(values[1]) < N+1:
LRi.append(int(values[0])-1)
LRj.append(int(values[1])-1)
LRv.append(-(double(values[2])))
LRi.append(int(values[1])-1)
LRj.append(int(values[0])-1)
LRv.append(-(double(values[2])))
if int(values[0]) < N+1:
LRi.append(int(values[0])-1)
LRj.append(int(values[0])-1)
LRv.append(double(values[2]))
if int(values[1]) < N+1:
LRi.append(int(values[1])-1)
LRj.append(int(values[1])-1)
LRv.append(double(values[2]))
LC2 = sparse.coo_matrix((LCv,(LCi,LCj)),shape=(N,N)).tocsr()
LR2 = sparse.coo_matrix((LRv,(LRi,LRj)),shape=(N,N)).tocsr()
LC1=LC2#.ensure_sorted_indices()
LR1=LR2#.ensure_sorted_indices()
return LC, LR, LC1, LR1
if __name__ == '__main__':
print generate(4, 0.4)
|
drincruz/slidedecker | config.py | Python | mit | 838 | 0.004773 | # Statement for enabling the development environment
DEBUG = True
# Define the application directory
import os
BASE_DIR = os.path.abspath(os.path.dirname(__file__))
# Define the database - we are wor | king with
# SQLite for this example
SQLALCHEMY_DATABASE_URI = 'sqlite:///' + os.path.join(BASE_DIR, 'app.db')
DATABASE_CONNECT_OPTIONS = {}
# Application threads. A common general assumption is
# using 2 per available processor cores - to handle
# incoming requests using one and performing background
# operations using the other.
THREADS_PER_PAGE = 2
# Enable protection agains *Cross-site | Request Forgery (CSRF)*
CSRF_ENABLED = True
# Use a secure, unique and absolutely secret key for
# signing the data.
CSRF_SESSION_KEY = "secret"
# Secret key for signing cookies
SECRET_KEY = "secret"
# Version Info
VERSION = '0.3.7'
|
tombstone/models | research/neural_gpu/wmt_utils.py | Python | apache-2.0 | 16,455 | 0.008508 | # Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is | distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for downloading data from WMT, tokenizing, vocabularies."""
from __future__ import print_function
import gzip
import os
import | re
import tarfile
from six.moves import urllib
import tensorflow as tf
# Special vocabulary symbols - we always put them at the start.
_PAD = b"_PAD"
_GO = b"_GO"
_EOS = b"_EOS"
_UNK = b"_CHAR_UNK"
_SPACE = b"_SPACE"
_START_VOCAB = [_PAD, _GO, _EOS, _UNK, _SPACE]
PAD_ID = 0
GO_ID = 1
EOS_ID = 2
UNK_ID = 3
SPACE_ID = 4
# Regular expressions used to tokenize.
_CHAR_MARKER = "_CHAR_"
_CHAR_MARKER_LEN = len(_CHAR_MARKER)
_SPEC_CHARS = "" + chr(226) + chr(153) + chr(128)
_PUNCTUATION = "][.,!?\"':;%$#@&*+}{|><=/^~)(_`,0123456789" + _SPEC_CHARS + "-"
_WORD_SPLIT = re.compile("([" + _PUNCTUATION + "])")
_OLD_WORD_SPLIT = re.compile(b"([.,!?\"':;)(])")
_DIGIT_RE = re.compile(br"\d")
# URLs for WMT data.
_WMT_ENFR_TRAIN_URL = "http://www.statmt.org/wmt10/training-giga-fren.tar"
_WMT_ENFR_DEV_URL = "http://www.statmt.org/wmt15/dev-v2.tgz"
def maybe_download(directory, filename, url):
"""Download filename from url unless it's already in directory."""
if not tf.gfile.Exists(directory):
print("Creating directory %s" % directory)
os.mkdir(directory)
filepath = os.path.join(directory, filename)
if not tf.gfile.Exists(filepath):
print("Downloading %s to %s" % (url, filepath))
filepath, _ = urllib.request.urlretrieve(url, filepath)
statinfo = os.stat(filepath)
print("Successfully downloaded", filename, statinfo.st_size, "bytes")
return filepath
def gunzip_file(gz_path, new_path):
"""Unzips from gz_path into new_path."""
print("Unpacking %s to %s" % (gz_path, new_path))
with gzip.open(gz_path, "rb") as gz_file:
with open(new_path, "wb") as new_file:
for line in gz_file:
new_file.write(line)
def get_wmt_enfr_train_set(directory):
"""Download the WMT en-fr training corpus to directory unless it's there."""
train_path = os.path.join(directory, "giga-fren.release2.fixed")
if not (tf.gfile.Exists(train_path +".fr") and
tf.gfile.Exists(train_path +".en")):
corpus_file = maybe_download(directory, "training-giga-fren.tar",
_WMT_ENFR_TRAIN_URL)
print("Extracting tar file %s" % corpus_file)
with tarfile.open(corpus_file, "r") as corpus_tar:
corpus_tar.extractall(directory)
gunzip_file(train_path + ".fr.gz", train_path + ".fr")
gunzip_file(train_path + ".en.gz", train_path + ".en")
return train_path
def get_wmt_enfr_dev_set(directory):
"""Download the WMT en-fr training corpus to directory unless it's there."""
dev_name = "newstest2013"
dev_path = os.path.join(directory, dev_name)
if not (tf.gfile.Exists(dev_path + ".fr") and
tf.gfile.Exists(dev_path + ".en")):
dev_file = maybe_download(directory, "dev-v2.tgz", _WMT_ENFR_DEV_URL)
print("Extracting tgz file %s" % dev_file)
with tarfile.open(dev_file, "r:gz") as dev_tar:
fr_dev_file = dev_tar.getmember("dev/" + dev_name + ".fr")
en_dev_file = dev_tar.getmember("dev/" + dev_name + ".en")
fr_dev_file.name = dev_name + ".fr" # Extract without "dev/" prefix.
en_dev_file.name = dev_name + ".en"
dev_tar.extract(fr_dev_file, directory)
dev_tar.extract(en_dev_file, directory)
return dev_path
def is_char(token):
if len(token) > _CHAR_MARKER_LEN:
if token[:_CHAR_MARKER_LEN] == _CHAR_MARKER:
return True
return False
def basic_detokenizer(tokens):
"""Reverse the process of the basic tokenizer below."""
result = []
previous_nospace = True
for t in tokens:
if is_char(t):
result.append(t[_CHAR_MARKER_LEN:])
previous_nospace = True
elif t == _SPACE:
result.append(" ")
previous_nospace = True
elif previous_nospace:
result.append(t)
previous_nospace = False
else:
result.extend([" ", t])
previous_nospace = False
return "".join(result)
old_style = False
def basic_tokenizer(sentence):
"""Very basic tokenizer: split the sentence into a list of tokens."""
words = []
if old_style:
for space_separated_fragment in sentence.strip().split():
words.extend(re.split(_OLD_WORD_SPLIT, space_separated_fragment))
return [w for w in words if w]
for space_separated_fragment in sentence.strip().split():
tokens = [t for t in re.split(_WORD_SPLIT, space_separated_fragment) if t]
first_is_char = False
for i, t in enumerate(tokens):
if len(t) == 1 and t in _PUNCTUATION:
tokens[i] = _CHAR_MARKER + t
if i == 0:
first_is_char = True
if words and words[-1] != _SPACE and (first_is_char or is_char(words[-1])):
tokens = [_SPACE] + tokens
spaced_tokens = []
for i, tok in enumerate(tokens):
spaced_tokens.append(tokens[i])
if i < len(tokens) - 1:
if tok != _SPACE and not (is_char(tok) or is_char(tokens[i+1])):
spaced_tokens.append(_SPACE)
words.extend(spaced_tokens)
return words
def space_tokenizer(sentence):
return sentence.strip().split()
def is_pos_tag(token):
"""Check if token is a part-of-speech tag."""
return(token in ["CC", "CD", "DT", "EX", "FW", "IN", "JJ", "JJR",
"JJS", "LS", "MD", "NN", "NNS", "NNP", "NNPS", "PDT",
"POS", "PRP", "PRP$", "RB", "RBR", "RBS", "RP", "SYM", "TO",
"UH", "VB", "VBD", "VBG", "VBN", "VBP", "VBZ", "WDT", "WP",
"WP$", "WRB", ".", ",", ":", ")", "-LRB-", "(", "-RRB-",
"HYPH", "$", "``", "''", "ADD", "AFX", "QTR", "BES", "-DFL-",
"GW", "HVS", "NFP"])
def parse_constraints(inpt, res):
ntags = len(res)
nwords = len(inpt)
npostags = len([x for x in res if is_pos_tag(x)])
nclose = len([x for x in res if x[0] == "/"])
nopen = ntags - nclose - npostags
return (abs(npostags - nwords), abs(nclose - nopen))
def create_vocabulary(vocabulary_path, data_path, max_vocabulary_size,
tokenizer=None, normalize_digits=False):
"""Create vocabulary file (if it does not exist yet) from data file.
Data file is assumed to contain one sentence per line. Each sentence is
tokenized and digits are normalized (if normalize_digits is set).
Vocabulary contains the most-frequent tokens up to max_vocabulary_size.
We write it to vocabulary_path in a one-token-per-line format, so that later
token in the first line gets id=0, second line gets id=1, and so on.
Args:
vocabulary_path: path where the vocabulary will be created.
data_path: data file that will be used to create vocabulary.
max_vocabulary_size: limit on the size of the created vocabulary.
tokenizer: a function to use to tokenize each data sentence;
if None, basic_tokenizer will be used.
normalize_digits: Boolean; if true, all digits are replaced by 0s.
"""
if not tf.gfile.Exists(vocabulary_path):
print("Creating vocabulary %s from data %s" % (vocabulary_path, data_path))
vocab, chars = {}, {}
for c in _PUNCTUATION:
chars[c] = 1
# Read French file.
with tf.gfile.GFile(data_path + ".fr", mode="rb") as f:
counter = 0
for line_in in f:
line = " ".join(line_in.split())
counter += 1
if counter % 100000 == 0:
print(" processing fr line %d" % counter)
for c in line:
if c in chars:
chars[c] += 1
else:
chars[c] = 1
tokens = tokenizer(line) if tok |
jordanemedlock/psychtruths | temboo/core/Library/Amazon/Marketplace/Orders/__init__.py | Python | apache-2.0 | 1,089 | 0.00551 | from temboo.Library.Amazon.Marketplace.Orders.GetOrder import GetOrder, GetOrderInputSet, GetOrderResultSet, GetOrderChoreographyExecution
from temboo.Library.Amazon.Marketplace.Orders.GetServiceStatus import GetServiceStatus, GetServiceStatusInputSet, GetServiceStatusResultSet, GetServiceStatusChoreographyExecution
from temboo.Library.Amazon.Marketplace.Orders.ListOrderItems import ListOrderItems, ListOrderItemsInputSet, ListOrderItemsResultSet, ListOrderItemsChoreographyExecution
from temboo.Library.Amazon.Marketplace.Orders.ListOrders import ListOrders, ListOrdersInputSet, ListOrdersResultSet, ListOrdersChoreographyExecution
from temboo.Library.Amazon.Marketplace.Orders.ListOrdersWithBuyerE | mail import ListOrdersWithBuyerEmail, ListOrdersWithBuyerEmailInputSet, ListOrdersWithBuyerEmailResultSet, ListOrdersWithBuyerEmailChoreographyExecution
from temboo.Library.Amazon.Ma | rketplace.Orders.ListOrdersWithSellerOrderId import ListOrdersWithSellerOrderId, ListOrdersWithSellerOrderIdInputSet, ListOrdersWithSellerOrderIdResultSet, ListOrdersWithSellerOrderIdChoreographyExecution
|
jtoppins/beaker | IntegrationTests/src/bkr/inttest/client/test_job_delete.py | Python | gpl-2.0 | 5,026 | 0.002786 |
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
import os
from unittest2 import SkipTest
from turbogears.database import session
from bkr.inttest import data_setup, with_transaction, start_process, \
stop_process, CONFIG_FILE, edit_file
from bkr.inttest.client import run_client, create_client_config, ClientError, \
ClientTestCase
class JobDeleteTest(ClientTestCase):
@with_transaction
def setUp(self):
self.user = data_setup.create_user(password=u'asdf')
self.job = data_setup.create_completed_job(owner=self.user)
self.client_config = create_client_config(username=self.user.user_name,
password='asdf')
def test_delete_group_job(self):
with session.begin():
group = data_setup.create_group()
user = data_setup.create_user(password='password')
u | ser2 = data_setup.create_user()
group.add_member(user)
group.add_member(user2)
self.job.group = group
self.j | ob.owner = user2
client_config = create_client_config(username=user.user_name,
password='password')
out = run_client(['bkr', 'job-delete', self.job.t_id],
config=client_config)
self.assert_(out.startswith('Jobs deleted:'), out)
self.assert_(self.job.t_id in out, out)
def test_delete_job(self):
out = run_client(['bkr', 'job-delete', self.job.t_id],
config=self.client_config)
self.assert_(out.startswith('Jobs deleted:'), out)
self.assert_(self.job.t_id in out, out)
def test_delete_others_job(self):
with session.begin():
other_user = data_setup.create_user(password=u'asdf')
other_job = data_setup.create_completed_job(owner=other_user)
try:
out = run_client(['bkr', 'job-delete', other_job.t_id],
config=self.client_config)
fail('should raise')
except ClientError, e:
self.assert_("don't have permission" in e.stderr_output)
def test_cant_delete_group_mates_job(self):
# The test_delete_group_job case above is similar, but here the job is
# *not* declared as a group job, therefore we don't have permission to
# delete it.
with session.begin():
group = data_setup.create_group()
mate = data_setup.create_user(password=u'asdf')
test_job = data_setup.create_completed_job(owner=mate)
group.add_member(self.user)
group.add_member(mate)
try:
run_client(['bkr', 'job-delete', test_job.t_id],
config=self.client_config)
self.fail('We should not have permission to delete %s' % \
test_job.t_id)
except ClientError, e:
self.assertIn("You don't have permission to delete job %s" %
test_job.t_id, e.stderr_output)
def test_delete_job_with_admin(self):
with session.begin():
other_user = data_setup.create_user(password=u'asdf')
tag = data_setup.create_retention_tag(name=u'myblahtag')
job1 = data_setup.create_completed_job(owner=other_user)
job2 = data_setup.create_completed_job(owner=other_user, \
retention_tag=tag.tag)
# As the default admin user
# Admin can delete other's job with job ID
out = run_client(['bkr', 'job-delete', job1.t_id])
self.assert_(out.startswith('Jobs deleted:'), out)
self.assert_(job1.t_id in out, out)
# Admin can not delete other's job with tags
out = run_client(['bkr', 'job-delete', '-t%s' % tag.tag])
self.assert_(out.startswith('Jobs deleted:'), out)
self.assert_(job2.t_id not in out, out)
# https://bugzilla.redhat.com/show_bug.cgi?id=595512
def test_invalid_taskspec(self):
try:
run_client(['bkr', 'job-delete', '12345'])
fail('should raise')
except ClientError, e:
self.assert_('Invalid taskspec' in e.stderr_output)
# https://bugzilla.redhat.com/show_bug.cgi?id=990943
def test_zero_value_completeDays(self):
try:
run_client(['bkr', 'job-delete', '--completeDays', '0'])
self.fail('Must raise')
except ClientError as e:
self.assertIn('Please pass a positive integer to completeDays', e.stderr_output)
# https://bugzilla.redhat.com/show_bug.cgi?id=990943
def test_negative_value_completeDays(self):
try:
run_client(['bkr', 'job-delete', '--completeDays', '-1'])
self.fail('Must raise')
except ClientError as e:
self.assertIn('Please pass a positive integer to completeDays', e.stderr_output)
|
zlorb/mitmproxy | test/mitmproxy/tools/console/test_commander.py | Python | mit | 2,995 | 0 |
from mitmproxy.tools.console.commander import commander
from mitmproxy.test import taddons
class TestListCompleter:
def test_cycle(self):
tests = [
[
"",
["a", "b", "c"],
["a", "b", "c", "a"]
],
[
"xxx",
["a", "b", "c"],
["xxx", "xxx", "xxx"]
],
[
"b",
["a", "b", "ba", "bb", "c"],
["b", "ba", "bb", "b"]
],
]
for start, options, cycle in tests:
c = commander.ListCompleter(start, options)
for expected in cycle:
assert c.cyc | le() == expected
class TestCommandBuffer:
def test | _backspace(self):
tests = [
[("", 0), ("", 0)],
[("1", 0), ("1", 0)],
[("1", 1), ("", 0)],
[("123", 3), ("12", 2)],
[("123", 2), ("13", 1)],
[("123", 0), ("123", 0)],
]
with taddons.context() as tctx:
for start, output in tests:
cb = commander.CommandBuffer(tctx.master)
cb.text, cb.cursor = start[0], start[1]
cb.backspace()
assert cb.text == output[0]
assert cb.cursor == output[1]
def test_left(self):
cursors = [3, 2, 1, 0, 0]
with taddons.context() as tctx:
cb = commander.CommandBuffer(tctx.master)
cb.text, cb.cursor = "abcd", 4
for c in cursors:
cb.left()
assert cb.cursor == c
def test_right(self):
cursors = [1, 2, 3, 4, 4]
with taddons.context() as tctx:
cb = commander.CommandBuffer(tctx.master)
cb.text, cb.cursor = "abcd", 0
for c in cursors:
cb.right()
assert cb.cursor == c
def test_insert(self):
tests = [
[("", 0), ("x", 1)],
[("a", 0), ("xa", 1)],
[("xa", 2), ("xax", 3)],
]
with taddons.context() as tctx:
for start, output in tests:
cb = commander.CommandBuffer(tctx.master)
cb.text, cb.cursor = start[0], start[1]
cb.insert("x")
assert cb.text == output[0]
assert cb.cursor == output[1]
def test_cycle_completion(self):
with taddons.context() as tctx:
cb = commander.CommandBuffer(tctx.master)
cb.text = "foo bar"
cb.cursor = len(cb.text)
cb.cycle_completion()
def test_render(self):
with taddons.context() as tctx:
cb = commander.CommandBuffer(tctx.master)
cb.text = "foo"
assert cb.render()
def test_flatten(self):
with taddons.context() as tctx:
cb = commander.CommandBuffer(tctx.master)
assert cb.flatten("foo bar") == "foo bar"
|
Aracthor/cpp-maker | scripts/definitions.py | Python | mit | 3,275 | 0.00458 | #!/usr/bin/python3
## definitions.py for cpp-maker in /home/aracthor/programs/projects/cpp-maker
##
## Made by Aracthor
##
## Started on Mon Sep 7 10:09:33 2015 Aracthor
## Last Update Wed Sep 9 10:33:00 2015 Aracthor
##
def boolean_input(name, default):
question = name + " ? "
if (default == True):
question += "[Y/n]"
else:
question += "[y/N]"
result = input(question)
if result == "y" or result == "Y":
return True
elif result == "n" or result == "N":
return False
elif result == "":
return default
else:
return boolean_input(name, default)
def string_input(name):
return input(name)
# Unexhaustive, of course
NATIVE_TYPES=["bool", "char", "short", "int", "long", "float", "double"]
class Member:
def __init__(self):
self.valid = True
self.pure_type = ""
self.return_type = ""
self.name = ""
self.getter = True
self.include = None
def askUserForDefinition(self):
self.pure_type = string_input("Type: ")
self.valid = (self.pure_type != "")
| if self.valid:
self.name = string_input("Name: ")
self.getter = boolean_input("Getter", True)
self.calcReturnType()
def calcReturnType(self):
self.return_type = self.pure_type
object_type = self.pure_type
const = (object_type[: | 5] == "const")
if const:
object_type = object_type[6:]
object_pure_type = object_type.replace("&", "").replace("*", "")
pointer_or_ref = object_type != object_pure_type
if object_pure_type not in NATIVE_TYPES:
self.include = object_pure_type.replace("::", "/")
if not const:
self.return_type = "const " + self.return_type
if object_pure_type == object_type:
self.return_type += "&"
else:
if pointer_or_ref and not const:
self.return_type = "const " + self.return_type
elif const:
self.return_type = self.return_type[6:]
def isValid(self):
return self.valid
class Definition:
def __init__(self):
self.project = None
self.interface = False
self.default_constructor = False
self.copy_constructor = False
self.mother_class = None
self.final = False
self.members = []
self.getters = []
def askUserForDefinition(self, options):
if "emacs" in options:
self.project = string_input("Project: ")
self.interface = boolean_input("Interface", False)
self.mother_class = string_input("Mother class: ")
if not self.interface:
self.default_constructor = boolean_input("Default constructor", True)
self.copy_constructor = boolean_input("Copy constructor", False)
self.final = boolean_input("Final", False)
member = Member()
member.askUserForDefinition()
while (member.isValid()):
self.members.append(member)
if member.getter:
self.getters.append(member)
member = Member()
member.askUserForDefinition()
|
Endika/hr | hr_experience/models/hr_academic.py | Python | agpl-3.0 | 1,388 | 0 | # -*- encoding: utf-8 -*-
##############################################################################
#
# Odoo, Open Source Management Solution
# This module copyright (C) 2014 Savoir-faire Linux
# (<http://www.savoirfairelinux.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# a | long with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import models, fields
class hr_academic(models.Model):
_name = 'hr.academic'
_inherit = 'hr.curriculum'
diploma | = fields.Char(string='Diploma', translate=True)
study_field = fields.Char(string='Field of study', translate=True,)
activities = fields.Text(string='Activities and associations',
translate=True)
|
Justasic/StackSmash | StackSmash/apps/blog/admin.py | Python | bsd-2-clause | 509 | 0 | from django.contrib import admin
from StackSmash.apps.blog.models import Post, Comment
class PostAdmin(admin.ModelAdmin):
list_display | = ['title']
list_filter = ['listed', 'pub_date']
search_fields = ['title', 'content']
date_heirachy = 'pub_date'
save_on_top = True
p | repopulated_fields = {"slug": ("title",)}
class CommentAdmin(admin.ModelAdmin):
display_fields = ["post", "author", "created"]
admin.site.register(Post, PostAdmin)
admin.site.register(Comment, CommentAdmin)
|
jdzero/foundation | foundation/backend/views/__init__.py | Python | mit | 46 | 0 | f | rom .base import *
from .controller import *
| |
billychasen/billots | billots/src/model/crypto.py | Python | mit | 1,409 | 0.002129 | # Copyright (c) 2017-present, Billy Chasen.
# See LICENSE for details.
# Created by Billy Chasen on 8/17/17.
from Crypto.PublicKey import RSA
from Crypto.Hash import SHA512
from Crypto.Signature import PKCS1_v1_5
from billots.src.utils.utils import Utils
class Crypto:
def __init__(self, key_size = 4096):
self.key_size = key_size
def generate_keys(self):
"""
Generate a new private/public keypair
"""
keys = RS | A.generate(self.key_size)
return {"public": keys.publickey().exportKey(),
"private": keys.exportKey()}
@staticmethod
def hash(val):
"""
Hash the value with SHA512
"""
h = SHA512.new()
h.update(Utils.safe_enc(val))
return h.hexdigest()
@staticmethod
def sign(private_key, data):
"""
Sign something with a pri | vate key
"""
key = RSA.importKey(private_key)
hashed = SHA512.new(Utils.safe_enc(data))
signer = PKCS1_v1_5.new(key)
return signer.sign(hashed)
@staticmethod
def verify_signed(public_key, signature, data):
"""
Verify a signature
"""
key = RSA.importKey(public_key)
hashed = SHA512.new(Utils.safe_enc(data))
verifier = PKCS1_v1_5.new(key)
if verifier.verify(hashed, signature):
return True
return False
|
loopingz/nuxeo-drive | nuxeo-drive-client/nxdrive/tests/test_shared_folders.py | Python | lgpl-2.1 | 3,409 | 0.00088 | from nxdrive.tests.common_unit_test import UnitTestCase
from nxdrive.client import RemoteDocumentClient
from nxdrive.client import LocalClient
class TestSharedFolders(UnitTestCase):
def test_move_sync_root_child_to_user_workspace(self):
"""See https://jira.nuxeo.com/browse/NXP-14870"""
admin_remote_client = self.root_remote_client
user1_workspace_path = ('/default-domain/UserWorkspaces/'
'nuxeoDriveTestUser-user-1')
try:
# Get remote and local clients
remote_user1 = RemoteDocumentClient(
self.nuxeo_url, self.user_1, u'nxdrive-test-device-1',
self.version, password=self.password_1,
upload_tmp_dir=self.upload_tmp_dir)
remote_user2 = RemoteDocumentClient(
self.nuxeo_url, self.user_2, u'nxdrive-test-device-2',
self.version, password=self.password_2,
upload_tmp_dir=self.upload_tmp_dir)
local_user2 = LocalClient(self.local_nxdrive | _folder_2)
# Make sure personal workspace is created for user1
remote_user1.make_file_in_user_workspace('File in user workspace',
| filename='UWFile.txt')
# As user1 register personal workspace as a sync root
remote_user1.register_as_root(user1_workspace_path)
# As user1 create a parent folder in user1's personal workspace
remote_user1.make_folder(user1_workspace_path, 'Parent')
# As user1 grant Everything permission to user2 on parent folder
parent_folder_path = user1_workspace_path + '/Parent'
op_input = "doc:" + parent_folder_path
admin_remote_client.execute("Document.SetACE", op_input=op_input, user="nuxeoDriveTestUser_user_2",
permission="Everything", grant="true")
# As user1 create a child folder in parent folder
remote_user1.make_folder(parent_folder_path, 'Child')
# As user2 register parent folder as a sync root
remote_user2.register_as_root(parent_folder_path)
remote_user2.unregister_as_root(self.workspace)
# Start engine for user2
self.engine_2.start()
# Wait for synchronization
self.wait_sync(wait_for_async=True, wait_for_engine_1=False, wait_for_engine_2=True)
# Check locally synchronized content
self.assertEquals(len(local_user2.get_children_info('/')), 1)
self.assertTrue(local_user2.exists('/Parent'))
self.assertTrue(local_user2.exists('/Parent/Child'))
# As user1 move child folder to user1's personal workspace
remote_user1.move(parent_folder_path + '/Child',
user1_workspace_path)
# Wait for synchronization
self.wait_sync(wait_for_async=True, wait_for_engine_1=False, wait_for_engine_2=True)
# Check locally synchronized content
self.assertFalse(local_user2.exists('/Parent/Child'))
finally:
# Cleanup user1 personal workspace
if admin_remote_client.exists(user1_workspace_path):
admin_remote_client.delete(user1_workspace_path,
use_trash=False)
|
DailyActie/Surrogate-Model | 01-codes/scikit-learn-master/examples/model_selection/plot_train_error_vs_test_error.py | Python | mit | 2,578 | 0.001164 | """
=========================
Train error vs Test error
=========================
Illustration of how the performance of an estimator on unseen data (test data)
is not the same as the performance on training data. As the regularization
increases the performance on train decreases while the performance on test
is optimal within a range of values of the regularization parameter.
The example with an Elastic-Net regression model and the performance is
measured using the explained variance a.k.a. R^2.
"""
print(__doc__)
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# License: BSD 3 clause
import numpy as np
from sklearn import linear_model
###############################################################################
# Generate sample data
n_samples_train, n_samples_test, n_features = 75, 150, 500
np.random.seed(0)
coef = np.random.randn(n_features)
coef[50:] = 0.0 # only the top 10 features are impacting the model
X = np.random.randn(n_samples_train + n_samples_test, n_features)
y = np.dot(X, coef)
# Split train and test data
X_train, X_test = X[:n_samples_train], X[n_samples_train:]
y_train, y_test = y[:n_samples_train], y[n_samples_train:]
###############################################################################
# Compute train and test errors
alphas = np.logspace(-5, 1, 60)
enet = linear_model.ElasticNet(l1_ratio=0.7)
train_errors = list()
test_errors = list()
for alpha in alphas:
enet.set_params(alpha=alpha)
enet.fit(X_train, y_train)
train_errors.append(enet.score(X_train, y_train))
test_errors.append(enet.score(X_test, y_test))
i_alpha_optim = np.argmax(test_errors)
alpha_optim = alphas[i_alpha_optim]
print("Optimal regularization parameter : %s" % alpha_optim)
# Estimate the coef_ on full data with optimal regularization parameter
enet.set_params(alpha=alpha_optim)
coef_ = enet.fit(X, y).coef_
###############################################################################
# Plot results functions
import matplotlib.pyplot as plt
plt. | subplot(2, 1, 1)
plt.semilogx(alphas, train_errors, label='Train')
plt.semilogx(alphas, test_errors, label='Test')
plt.vlines(alpha_optim, plt.ylim()[0], np.max(test_errors), color='k',
linewidth=3, label='Optimum on test')
plt.legend(loc='lower left')
plt.ylim([0, 1.2])
plt.xlabel('Regularization parameter')
plt.ylabel('Performance')
# Show est | imated coef_ vs true coef
plt.subplot(2, 1, 2)
plt.plot(coef, label='True coef')
plt.plot(coef_, label='Estimated coef')
plt.legend()
plt.subplots_adjust(0.09, 0.04, 0.94, 0.94, 0.26, 0.26)
plt.show()
|
mlecours/fake-switches | fake_switches/brocade/command_processor/config_vlan.py | Python | apache-2.0 | 3,733 | 0.003482 | # Copyright 2015 Internap.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from fake_switches.command_processing.base_command_processor import BaseCommandProcessor
from fake_switches.switch_configuration import split_port_name, VlanPort
class ConfigVlanCommandProcessor(BaseCommandProcessor):
def __init__(self, switch_configuration, terminal_controller, logger, piping_processor, vlan):
BaseCommandProcessor.__init__(self, switch_configuration, terminal_controller, logger, piping_processor)
self.vlan = vlan
def get_prompt(self):
return "SSH@%s(config-vlan-%s)#" % (self.switch_configuration.name, self.vlan.number)
def do_untagged(self, *args):
port = self.switch_configuration.get_port_by_partial_name(" ".join(args))
if port is not None:
if port.trunk_vlans is None:
port.access_vlan = self.vlan.number
else:
port.trunk_native_vlan = self.vlan.number
else:
self.write_line("Invalid input -> %s" % " ".join(args[1:]))
self.write_line("Type ? for a list")
def do_no_untagged(self, *args):
port = self.switch_configuration.get_port_by_partial_name(" ".join(args))
if port.trunk_vlans is None:
port.access_vlan = None
else:
port.trunk_native_vlan = None
def do_tagged(self, *args):
port = self.switch_configuration.get_port_by_partial_name(" ".join(args))
if port is not None:
if port.trunk_vlans is None:
port.trunk_vlans = []
port.trunk_native_vlan = port.access_vlan or 1
port.access_vlan = None
if self.vlan.number not in port.trunk_vlans:
port.trunk_vlans.append(self.vlan.number)
else:
self.write_line("Invalid input -> %s" % " ".join(args[1:]))
self.write_line("Type ? for a list")
def do_no_tagged(self, *args):
port = self.switch_configuration.get_port_by_partial_name(" ".join(args))
port.trunk_vlans.remove(self.vlan.number)
if len(port.trunk_vlans) == 0:
port.trunk_vlans = None
if port.trunk_native_vlan and port.trunk_native_vlan != 1:
port.access_vlan = port.trunk_native_vlan
port.trunk_native_vlan = None
def do_router_interface(self, * | args):
actual_ve = next(
(p for p in self.switch_configuration.ports if isinstance(p, VlanPort) and p.vlan_id == self.vlan.number),
False)
if not actual_ve:
name = " ".join(args)
self.switch_configuration.add_port(self.switch_configuration.new("VlanPort", self.v | lan.number,
name))
else:
self.write_line("Error: VLAN: %s already has router-interface %s" % (
self.vlan.number, split_port_name(actual_ve.name)[1]))
def do_no_router_interface(self, *_):
self.switch_configuration.remove_port(next(
p for p in self.switch_configuration.ports if isinstance(p, VlanPort) and p.vlan_id == self.vlan.number))
def do_exit(self):
self.is_done = True
|
SSSD/sssd | src/tests/multihost/alltests/test_krb5.py | Python | gpl-3.0 | 6,587 | 0 | """ Automation of Krb5 tests
:subsystemteam: sst_idm_sssd
:upstream: yes
"""
from __future__ import print_function
import pytest
from sssd.testlib.common.utils import sssdTools
from sssd.testlib.common.expect import pexpect_ssh
from sssd.testlib.common.exceptions import SSHLoginException
@pytest.mark.usefixtures('setup_sssd_krb', 'create_posix_usersgroups')
@pytest.mark.krb5
class TestKrbWithLogin(object):
@pytest.mark.tier1
def test_0001_krb5_not_working_based_on_k5login(self,
multihost,
localusers,
backupsssdconf):
"""
:title: krb5: access_provider = krb5 is not
working in RHEL8 while restricting logins
based on .k5login file
:id: dfc177ff-58a7-4697-8d23-e444928c7092
:casecomponent: authselect
:customerscenario: True
:requirement: IDM-SSSD-REQ :: Authselect replaced authconfig
:bugzilla:
https://bugzilla.redhat.com/show_bug.cgi?id=1734094
"""
multihost.client[0].run_command(f'authselect '
f'select sssd '
f'with-files-access-provider')
multihost.client[0].service_sssd('stop')
client_tool = sssdTools(multihost.client[0])
domain_params = {'id_provider': 'files',
'access_provider': 'krb5'}
client_tool.sssd_conf('domain/example1', domain_params)
dmain_delete = {"ldap_user_home_directory": "/home/%u",
"ldap_uri": multihost.master[0].sys_hostname,
"ldap_search_base": "dc=example,dc=test",
"ldap_tls_cacert": "/etc/openldap/cacerts/cacert.pem",
"use_fully_qualified_names": "True"}
client_tool.sssd_conf('domain/example1', dmain_delete, action='delete')
multihost.client[0].service_sssd('start')
user = 'user5000'
client_hostname = multihost.client[0].sys_hostname
multihost.client[0].run_command(f'touch /home/{user}/.k5login')
multihost.client[0].run_command(f'chown {user} /home/{user}/.k5login')
multihost.client[0].run_command(f'chgrp {user} /home/{user}/.k5login')
multihost.client[0].run_command(f'chmod 664 /home/{user}/.k5login')
multihost.client[0].service_sssd('restart')
client = pexpect_ssh(client_hostname, user, 'Secret123', debug=False)
with pytest.raises(Exception):
client.login(login_timeout=10, sync_multiplier=1,
auto_prompt_reset=False)
multihost.client[0].run_command(f'rm -vf /home/{user}/.k5login')
multihost.client[0].service_sssd('restart')
client = pexpect_ssh(client_hostname, user, 'Secret123', debug=False)
try:
client.login(login_timeout=30, sync_multiplier=5,
auto_prompt_reset=False)
except SSHLoginException:
pytest.fail("%s failed to login" % user)
else:
client.logout()
multihost.client[0].run_command('authselect select sssd')
@pytest.mark.tier1_2
def test_0002_generating_lot_of(self, multihost, backupsssdconf):
"""
:title: SSSD is generating lot of LDAP
queries in a very large environment
:bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=17 | 72513
:id: 74a60320-e48b-11eb-ba19-845cf3eff344
:requirement: IDM-SSSD-REQ : LDAP Provider
:steps:
1. Start SSSD with any configuration
2. Call 'getent passwd username@domain'
3. Check the entry is present in data and timestamp cache
4. Now stop SSSD and remove the timestamp cache
5. Start SSSD and call 'sss_cache -E'
6. Call 'getent passwd username@domain'
7. Do the ldbsearch checks again
:expectedresult | s:
1. Should succeed
2. Should succeed
3. Should succeed
4. Should succeed
5. Should succeed
6. Should succeed
7. Should succeed
"""
multihost.client[0].service_sssd('restart')
cmd = multihost.client[0].run_command('getent passwd '
'foo1@example1')
assert 'foo1@example1' in cmd.stdout_text
multihost.client[0].run_command("yum install -y ldb-tools")
sssd_client = multihost.client[0]
cmd_search1 = sssd_client.run_command("ldbsearch "
"-H /var/lib/sss/db/"
"cache_example1.ldb -b "
"name=foo1@example1,"
"cn=users,cn=example1,"
"cn=sysdb")
assert 'name=foo1@example1,cn=users,cn=example1,cn=sysdb' \
in cmd_search1.stdout_text
cmd_search2 = sssd_client.run_command('ldbsearch -H '
'/var/lib/sss/db/'
'timestamps_example1.ldb '
'-b name=foo1@example1,'
'cn=users,cn=example1,'
'cn=sysdb')
assert "dn: name=foo1@example1,cn=users,cn=example1,cn=sysdb" in \
cmd_search2.stdout_text
multihost.client[0].run_command("rm -vf /var/lib/sss/db/"
"timestamps_example1.ldb")
multihost.client[0].service_sssd('restart')
multihost.client[0].run_command("sss_cache -E")
cmd = multihost.client[0].run_command('getent passwd '
'foo1@example1')
assert 'foo1@example1' in cmd.stdout_text
cmd_search1 = multihost.client[0].run_command(
"ldbsearch -H /var/lib/sss/db/cache_example1.ldb -b "
"name=foo1@example1,cn=users,cn=example1,cn=sysdb")
assert 'name=foo1@example1,cn=users,cn=example1,cn=sysdb' in \
cmd_search1.stdout_text
cmd_search2 = multihost.client[0].run_command(
'ldbsearch -H /var/lib/sss/db/timestamps_example1.ldb -b '
'name=foo1@example1,cn=users,cn=example1,cn=sysdb')
assert "dn: name=foo1@example1,cn=users,cn=example1,cn=sysdb" \
in cmd_search2.stdout_text
|
mitdbg/modeldb | client/verta/verta/endpoint/autoscaling/__init__.py | Python | mit | 239 | 0 | # -*- coding: utf-8 -*-
"""Autoscaling configuration for endpoints."""
from verta | ._internal_utils import documentation
from ._autoscaling import Autoscaling
documentation.reassign_module(
[Autoscaling | ],
module_name=__name__,
)
|
hnakamur/ansible | lib/ansible/plugins/lookup/__init__.py | Python | gpl-3.0 | 1,681 | 0.001785 | # (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
__all__ = ['LookupBase']
class LookupBase:
def __init__(self, loader=None, **kwargs):
self._loader = loader
self._display = display
def _flatten(self, terms):
ret = []
for term in terms:
if isinstance(term, (list, tuple)):
ret.extend(term)
else:
ret.append(term)
return ret
def _combine(self, a, b):
results = []
for x in a:
for y in b:
| results.append(self._flatten([x,y]))
return results
def _flatten_hash_to_list(self, terms):
ret = []
for ke | y in terms:
ret.append({'key': key, 'value': terms[key]})
return ret
|
KamLii/Databaes | Crate/forms.py | Python | mit | 302 | 0.003311 | from django import forms
from Crate.models import Discussion
class ReportForm(forms.Form):
| report = forms.CharField(label="Enter your report", max_length=500, widget=forms.Textarea)
class DiscussionForm(forms.ModelForm):
| class Meta:
model = Discussion
fields = ['comment']
|
magreiner/orchestration-tools | template_testing.py | Python | apache-2.0 | 859 | 0 | #!/bin/python3
# Testscript for template generation and deploying
from cloud_provider.amazon import Amazon
from template.template import CloudFormationTemplate
from pprint import pprint
if __name__ == "__main__":
# Amazon Settings
region = "eu-west-1"
stack_name = 'TestStack'
# Template settings
template_file = '/tmp/template.txt'
template_json_source_file = 'test-cluster | .json'
# Create template
cfn_template = CloudFormationTemplate()
cfn_template.load_json_source(template_json_source_file)
cfn_template.save_template_file(template_file)
# ppr | int(cfn_template.source)
# Connect to Amazon CloudFormation
aws = Amazon(region)
# Deploy CloudFormation Template
aws.deploy_stack(stack_name, template_file=template_file)
# Delete Stack if error occured
# aws.delete_stack(stack_name)
|
NESCent/Chimp-Recs-FieldObservations | ObservationParsing/src/obsparser/app.py | Python | cc0-1.0 | 4,869 | 0.010885 | '''
Created on Feb 8, 2011
@author: vgapeyev
'''
import csv, re
__nonspace_whitespace = re.compile(r"[\t\n\r\f\v]")
__long_whitespace = re.compile(r"[ ]{2,}")
def normalize_whitespace(str):
str = re.sub(__nonspace_whitespace, " ", str)
str = re.sub(__long_whitespace, " ", str)
return str
def empty_line(line):
if line.isspace():
return True
else:
return False
def at_beginning(matchobj, str):
if not matchobj:
return False
prefix = str[:matchobj.start()]
return prefix == "" or prefix.isspace()
def likely_chimp_name(prov_time, prov_rest):
return (prov_time == "PM" or prov_time == "AM") \
and prov_rest[0] == " " \
and prov_rest[1].isalpha()
def pad_zero(time):
if time.isdigit() and len(time) == 3:
return "0" + time
else:
return time
def pick_time(line):
# timepat_spec = r"(?P<time>\d\d\d\d)"
# timepat_spec = r"(?P<time>AM|PM|(\d{4}(\s*(-|until)\s*\d{4})?(\s*(AM|PM))?))"
timepat_spec = r"(?P<time>AM|PM|(\d{3,4}(\s*(-|until)\s*\d{3,4})?(\s*(AM|PM))?))"
timepat = re.compile(timepat_spec)
time_match = re.search(timepat, line)
if time_match and at_beginning(time_match, line):
time = time_match.group("time")
rest = line[time_match.end("time"):]
if not likely_chimp_name(time, rest):
return (pad_zero(time), rest.lstrip())
else: return ("", line)
else:
return ("", line)
def pick_recnum(line):
# pat_spec = r"N-(?P<animal>[a-zA-Z]+)-(?P<num>\d+)"
# pat_spec = r"N-(?P<animal>[a-zA-Z]+)-(?P<num>\d+\w*)"
pat_spec = r"[Nn]\s*(- | |_|=)=?\s*(?P<animal>[a-zA-Z]+)\s*(-|_)?\s*(?P<num>\d+\w*)"
pat = re.compile(pat_spec)
match = re.search(pat, line)
if match and at_beginning(match, line):
equip = "N"
animal = match.group("animal").upper()
num = match.group("num")
rest = line[match.end():]
| return ((equip, animal, num), rest.lstrip())
else:
return (("", "", ""), line)
def parse_line(line):
(time, line) = pick_time(line)
(recnum, line) = pick_recnum(line)
text = normalize_whitespace(line.strip())
return (time, recnum[0], recnum[1], recnum[2], text)
def parse_one_file(src_file, dest_file):
#print "Parsing %s" % src_file
#print "Output to %s" % dest_file
fin = open(src_file)
fout = open(dest_file, "w")
csv_writer = csv.writer(fout)
count = 0
for line in fin:
count = count + 1
if not empty_line(line):
(time, equip, animal, num, text) = parse_line(line)
csv_writer.writerow([count, time, equip, animal, num, text])
fin.close()
fout.close()
__txt_fmt = "%-60.60s"
__csv_fmt = "%3.3s %5s %1.1s %3.3s %3s |%-120.120s|"
def display_parse(txt_fname, csv_fname):
txt_file = open(txt_fname)
csv_file = open(csv_fname)
csv_reader = csv.reader(csv_file)
txt_num = 1
for csv_line in csv_reader:
csv_num = int(csv_line[0])
while txt_num < csv_num:
txt_num = txt_num + 1
print __csv_fmt % ("", "", "", "", "", "",),
print ("#"+__txt_fmt+"#") % txt_file.readline().rstrip()
txt_num = txt_num + 1
print __csv_fmt % tuple(csv_line),
print ("$"+__txt_fmt+"$") % txt_file.readline().rstrip()
txt_file.close()
csv_file.close()
def compare_parses(old_fname, new_fname):
old_file, new_file = open(old_fname), open(new_fname)
old_reader, new_reader = csv.reader(old_file), csv.reader(new_file)
for (old_line, new_line) in zip(old_reader, new_reader):
if old_line != new_line:
print ("o"+__csv_fmt) % tuple(old_line)
print ("n"+__csv_fmt) % tuple(new_line)
print ""
old_file.close(), new_file.close()
def main():
import optparse, sys
p = optparse.OptionParser()
p.set_usage("%prog source_file dest_file")
p.set_description("TODO description")
opt, args = p.parse_args()
if len(args) != 2:
sys.stderr.write(p.get_usage())
raise SystemExit(1)
src_file = args[0]
dest_file = args[1]
parse_one_file(src_file, dest_file)
def main_hardwired(base):
infile = "test_data/inputs/%s.txt" % base
outfile = "test_data/outputs/%s.csv" % base
parse_one_file(infile, outfile)
def display_hardwired(base):
infile = "test_data/inputs/%s.txt" % base
outfile = "test_data/work/%s.csv" % base
display_parse(infile, outfile)
def compare_hardwired(base):
workfile = "test_data/work/%s.csv" % base
outfile = "test_data/outputs/%s.csv" % base
compare_parses(workfile, outfile)
if __name__ == '__main__':
#main()
#main_hardwired("1971-07-15")
#display_hardwired("1971-07-14")
compare_hardwired("1971-07-14") |
bitmazk/django-unshorten | manage.py | Python | mit | 259 | 0 | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "unshorten.tests.setti | ngs")
from djang | o.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
wittrup/crap | python/keyboard.py | Python | mit | 3,048 | 0.006234 | import ctypes
from ctypes import wintypes
import time
user32 = ctypes.WinDLL('user32', use_last_error=True)
INPUT_MOUSE = 0
INPUT_KEYBOARD = 1
INPUT_HARDWARE = 2
KEYEVENTF_EXTENDEDKEY = 0x0001
KEYEVENTF_KEYUP = 0x0002
KEYEVENTF_UNICODE = 0x0004
KEYEVENTF_SCANCODE = 0x0008
MAPVK_VK_TO_VSC = 0
# msdn.microsoft.com/en-us/library/dd375731
VK_TAB = 0x09
VK_SHIFT = 0x10
VK_CONTROL = 0x11
VK_MENU = 0x12
key_V = 0x56
# C struct definitions
wintypes.ULONG_PTR = wintypes.WPARAM
class MOUSEINPUT(ctypes.Structure):
_fields_ = (("dx", wintypes.LONG),
("dy", wintypes.LONG),
("mouseData", wintypes.DWORD),
("dwFlags", wintypes.DWORD),
("time", wintypes.DWORD),
("dwExtraInfo", wintypes.ULONG_PTR))
class KEYBDINPUT(ctypes.Structure):
_fields_ = (("wVk", wintypes.WORD),
("wScan", wintypes.WORD),
("dwFlags", wintypes.DWORD),
("time", wintypes.DWORD),
("dwExtraInfo", wintypes.ULONG_PTR))
def __init__(self, *args, **kwds):
super(KEYBDINPUT, self).__init__(*args, **kwds)
# some programs use the scan code even if KEYEVENTF_SCANCODE
# isn't set in dwFflags, so attempt to map the correct code.
if not self.dwFlags & KEYEVENTF_UNICODE:
self.wScan = user32.MapVirtualKeyExW(self.wVk,
MAPVK_VK_TO_VSC, 0)
class HARDWAREINPUT(ctypes.Structure):
_fields_ = (("uMsg", wintypes.DWORD),
("wParamL", wintypes.WORD),
("wParamH", wintypes.WORD))
class INPUT(ctypes.Structure):
class _INPUT(ctypes.Union):
_fields_ = (("ki", KEYBDINPUT),
("mi", MOUSEINPUT),
("hi", HARDWAREINPUT))
_anonymous_ = ("_input",)
_fields_ = (("type", wintypes.DWORD),
("_input", _INPUT))
LPINPUT = ctypes.POINTER(INPUT)
def _check_count(result, func, args):
if result == 0:
raise ctypes.WinError(ctypes.get_last_error())
return args
user32.SendInput.errcheck = _check_count
user32.SendInput.argtypes = (wintypes.UINT, # nInputs
| LPINPUT, # pInputs
ctypes.c_int) # cbSize
# Functions
def PressKey(hexKeyCode):
x = INPUT(type=INPUT_KEYBOARD,
ki=KEYBDINPUT(wVk=hexKeyCode))
user32.SendInput(1, ctypes.byref(x), ctypes.sizeof(x))
def ReleaseKey(hexKeyCode):
x = INPUT(type=INPUT_KEYBOARD,
| ki=KEYBDINPUT(wVk=hexKeyCode,
dwFlags=KEYEVENTF_KEYUP))
user32.SendInput(1, ctypes.byref(x), ctypes.sizeof(x))
def AltTab():
"""Press Alt+Tab and hold Alt key for 2 seconds
in order to see the overlay.
"""
PressKey(VK_MENU) # Alt
PressKey(VK_TAB) # Tab
ReleaseKey(VK_TAB) # Tab~
time.sleep(2)
ReleaseKey(VK_MENU) # Alt~
if __name__ == "__main__":
AltTab() |
popazerty/EG-2 | lib/python/Components/Converter/ConditionalShowHide.py | Python | gpl-2.0 | 1,460 | 0.037671 | from enigma import eTimer
from Converter import Converter
class ConditionalShowHide(Converter, object):
def __init__(self, argstr):
Converter.__init__(self, argstr)
args = argstr.split(',')
self.invert = "Invert" in args
self.blink = "Blink" in args
if self.blink:
self.blinktime = len(args) == 2 and args[1].isdigit() and int(args[1]) or 500
self.timer = eTimer()
self.timer.callback.append(self.blinkFunc)
else:
self.timer = None
d | ef blinkFunc(self):
if self.blinking:
for x in self.downstream_elements:
x.visible = not x.visible
def startBlinking(self):
self.blinking = True
self.timer.start(self.blinktime)
def stopBlinking(self):
self.blinking = False
for x in self.downstream_elements:
| if x.visible:
x.hide()
self.timer.stop()
def calcVisibility(self):
b = self.source.boolean
if b is None:
return True
b ^= self.invert
return b
def changed(self, what):
vis = self.calcVisibility()
if self.blink:
if vis:
self.startBlinking()
else:
self.stopBlinking()
else:
for x in self.downstream_elements:
x.visible = vis
def connectDownstream(self, downstream):
Converter.connectDownstream(self, downstream)
vis = self.calcVisibility()
if self.blink:
if vis:
self.startBlinking()
else:
self.stopBlinking()
else:
downstream.visible = self.calcVisibility()
def destroy(self):
if self.timer:
self.timer.callback.remove(self.blinkFunc)
|
ee08b397/LeetCode-4 | 070 Text Justification.py | Python | mit | 3,264 | 0.007353 | """
Given an array of words and a length L, format the text such that each line has exactly L characters and is fully (left
and right) justified.
You should pack your words in a greedy approach; that is, pack as many words as you can in each line. Pad extra spaces
' ' when necessary so that each line has exactly L characters.
|
Extra spaces between words should be distributed as evenly as possible. If the number of spaces o | n a line do not divide
evenly between words, the empty slots on the left will be assigned more spaces than the slots on the right.
For the last line of text, it should be left justified and no extra space is inserted between words.
For example,
words: ["This", "is", "an", "example", "of", "text", "justification."]
L: 16.
Return the formatted lines as:
[
"This is an",
"example of text",
"justification. "
]
Note: Each word is guaranteed not to exceed L in length.
click to show corner cases.
Corner Cases:
A line other than the last line might contain only one word. What should you do in this case?
In this case, that line should be left-justified.
"""
__author__ = 'Danyang'
class Solution:
def fullJustify(self, words, L):
"""
:param words: a list of str
:param L: int
:return: a list of str
"""
result = []
self.break_line(words, L, result)
return self.distribute_space(L, result)
def break_line(self, words, L, result):
if not words:
return
cur_length = -1
lst = []
i = 0
while i<len(words):
word = words[i]
cur_length += 1 # space in left justified
cur_length += len(word)
if cur_length>L: break
lst.append(word)
i += 1
result.append(lst)
self.break_line(words[i:], L, result)
def distribute_space(self, L, result):
new_result = []
for ind, line in enumerate(result):
word_cnt = len(line)
str_builder = []
space_cnt = L-sum(len(word) for word in line)
hole_cnt = word_cnt-1
if ind<len(result)-1:
if hole_cnt>0:
space = space_cnt/hole_cnt
remain = space_cnt%hole_cnt
for word in line[:-1]:
str_builder.append(word)
str_builder.append(" "*space)
if remain>0:
str_builder.append(" ")
remain -= 1
str_builder.append(line[-1])
else:
str_builder.append(line[-1])
str_builder.append(" "*space_cnt)
else: # last line, special handling
str_builder = [" ".join(line)]
str_builder.append(" "*(space_cnt-hole_cnt))
new_result.append("".join(str_builder))
return new_result
if __name__=="__main__":
print Solution().fullJustify(["This", "is", "an", "example", "of", "text", "justification."], 16)
print Solution().fullJustify(["What","must","be","shall","be."], 12) |
luo2chun1lei2/AgileEditor | ve/src/ViewDialogPreferences.py | Python | gpl-2.0 | 3,229 | 0.011375 | #-*- coding:utf-8 -*-
###########################################################
# 项目各种选项的对话框。
import os, string, logging
import ConfigParser
from gi.repository import Gtk, Gdk, GtkSource
from VeUtils import *
from ModelProject import ModelProject
from VeEventPipe import VeEventPipe
###########################################################
class ViewDialogPreferences(Gtk.Dialog):
# 显示当前项目各种配置,并可以进行修改。
def __init__(self, parent, setting):
self.parent = parent
self.setting = setting
Gtk.Dialog.__init__(self, "项目设定", parent, 0,
(Gtk.STOCK_CANCEL, Gtk.ResponseType.CANCEL,
Gtk.STOCK_OK, Gtk.ResponseType.OK))
self.set_default_size(300, 400)
vbox = Gtk.VBox(spacing = 10)
###############################
## 样式
lbl_prj_name = Gtk.Label("样式")
lbl_prj_name.set_justify(Gtk.Justification.LEFT)
vbox.pack_start(lbl_prj_name, False, True, 0)
self.cmb_style = self._init_styles()
vbox.pack_start(self.cmb_style, True, True, 0)
###############################
## 语言(应该每个文件一个)TODO
lbl_src_path = Gtk.Label("代码路径")
vbox.pack_start(lbl_src_path, True, True, 0)
self.picker_src_path = Gtk.FileChooserButton.new('请选择一个文件夹 ',
Gtk.FileChooserAction.SELECT_FOLDER)
vbox.pack_start(self.picker_src_path, True, True, 1.0)
###############################
box = self.get_content_area()
box.add(vbox)
self.show_all()
def _init_styles(self):
styles = GtkSource.StyleSchemeManager.get_default().get_scheme_ids()
#styleScheme = styleSchemeManager.get_scheme("cobalt")
| #if styleScheme is not None:
# self.styleScheme = styleScheme # 不能丢弃
#src_buffer.set_style_scheme(self.styleScheme)
model = Gtk.ListStore(str)
found_index = -1
for i in range(len(styles)):
model.append([styles[i]])
if styles[i] == self.setting['style']:
found_index = i
cmb = Gtk.ComboBox.new_with_mod | el(model)
cell_render = Gtk.CellRendererText.new()
cmb.pack_start(cell_render, True)
cmb.add_attribute(cell_render, "text", 0)
cmb.set_active(found_index)
cmb.connect("changed", self.on_style_changed)
return cmb
def on_style_changed(self, combobox):
self.setting['style'] = combobox.get_active()
return True
@staticmethod
def show(parent, setting):
dialog = ViewDialogPreferences(parent, setting)
prj_name = None
prj_src_path = None
response = dialog.run()
if response == Gtk.ResponseType.OK:
pass
# 收集设定?TODO
dialog.destroy()
# 返回信息。
return setting
|
usrlocalben/pydux | test/test_apply_middleware.py | Python | mit | 1,791 | 0.002233 | from __future__ import absolute_import
import unittest
import mock
from pydux import create_store, apply_middleware
from .helpers.reducers import reducers
from .helpers.action_creators import add_todo, add_todo_if_empty
from .helpers.middleware import thunk
class TestApplyMiddleware(unittest.TestCase):
def test_wraps_dispatch_method_with_middleware_once(self):
def test(spy_on_methods):
def apply(methods):
spy_on_methods(methods)
| return lambda next: lambda action: next(action)
return apply
spy = mock.MagicMock()
store = apply_middleware(test(spy), thunk)(cre | ate_store)(reducers['todos'])
store['dispatch'](add_todo('Use Redux'))
store['dispatch'](add_todo('Flux FTW!'))
self.assertEqual(spy.call_count, 1)
args, kwargs = spy.call_args
self.assertEqual(sorted(list(args[0].keys())),
sorted(['get_state', 'dispatch']))
self.assertEqual(store['get_state'](),
[dict(id=1, text='Use Redux'), dict(id=2, text='Flux FTW!')])
def test_works_with_thunk_middleware(self):
store = apply_middleware(thunk)(create_store)(reducers['todos'])
store.dispatch(add_todo_if_empty('Hello'))
self.assertEqual(store['get_state'](), [
{
'id': 1,
'text': 'Hello'
}
])
store['dispatch'](add_todo('World'))
self.assertEqual(store['get_state'](), [
{
'id': 1,
'text': 'Hello'
},
{
'id': 2,
'text': 'World'
}
])
##TODO: add_todo_async
if __name__ == '__main__':
unittest.main()
|
adrianratnapala/elm0 | n0run.py | Python | isc | 10,558 | 0.022163 | #!/usr/bin/python3
"""
n0run.py -- a unit test runner.
n0run runs 0unit based tests programs and then does wierd things far beyond
the power of 0unit. We use n0run when we want to make sure that things fail
when and "how" they are supposed to, even if the "how" means the error
propagates all the way to the top level of the program.
Copyright (C) 2012, Adrian Ratnapala, under the ISC license. See file LICENSE.
"""
import sys
# errors -----------------------------------------------------
def warn(msg, x = None , errno=1, txt='warning') :
ERROR="\033[31m\033[1m0run {}: \033[0m".format(txt)
if x : sys.stderr.write(ERROR+"{}: {}\n".format(msg, x))
else : sys.stderr.write(ERROR+"{}\n".format(msg))
return errno
def die(msg, x = None , errno=1) :
warn("[%d] %s" % (errno, msg), x, errno, 'error')
sys.exit(errno)
class Error(Exception) : pass
class Fail( Error ) :
def __init__( s, msg, errno, command ) :
Error.__init__( s, msg )
s.errno = errno
s.msg = msg
s.command = command
s.args = (errno, msg, command)
class NoMatch(Error) : pass
class DuplicateTest(Error) : pass
# util -------------------------------------------------------
def lines_without_ansi(po) :
import re
ansi = re.compile(b"\033\[?.*?[@-~]")
endl = re.compile(b"\r?\n")
for line in po :
yield endl.sub( b'', ansi.sub(b'', line ) )
# data gathering ---------------------------------------------
def Maker(base=object) : # turns a function into a class with only a constructor
def dec(init) :
class cls() :
__doc__=init.__doc__
__init__ = init
cls.__name__=init.__name__
return cls
return dec
@Maker()
def RunData(s, command, source) :
import os
from subprocess import Popen, PIPE
s.command = command
s.source = source
test_dir = os.environ.get('TEST_DIR', None)
popen = Popen(s.command, stdout=PIPE, stderr=PIPE, cwd=test_dir)
s.out, s.err = popen.communicate()
s.errno = popen.wait()
# source -----------------------------------------------------
def scan_source(filename, def_re = None, cb = (lambda l,m : None) ) :
"""
Scans a named source file for lines matching the regex /def_re/. This
regex sould include a match object with name "n". The set of all
matches a returned, identified by the "n" match. For each match, an
optional callback function /cb(line,match)/, where line is the entire
line of text (FIX?: a python string) and "m" the match object resulting
from the regex.
If the regex "def_re" is omitted, a default is used that matches lines
of roughly the form
[static] int test_SOME_NAME([signature])
where items in are optional.
"""
import re
if not def_re :
storage_class = br"(static\s+)?"
type_and_name = br"int\s+(?P<n>test_[_a-zA-Z0-9]*)";
args=br"\(.*\)";
def_re = re.compile(b"\s*" + storage_class +
type_and_name + b"\s*" +
args );
tests = set()
with open(filename, "rb") as f:
for line in f:
m = def_re.match(line)
if not m : continue
cb(line, m)
tests.add( m.group('n').strip().decode('utf-8') )
return tests
# output scanning --------------------------------------------
def compile_matchers(sources) :
def showok(name, line) :
from sys import stdout
stdout.buffer.write(b'\033[32m\033[1mOK:\033[0m '+line+b'\n')
def compile_one(s) :
from re import compile
if type(s) != tuple :
raise Exception('match spec must be a tuple of' +
'the form (name, byte-regex, [act])')
n, b, *f = s
f = f[0] if f else s | howok
if not isinstance(b, bytes) :
raise Exception('regex must be bytes.')
return n, compile(b), f
return list(map(compile_one, sources))
match_passe | d = compile_matchers([ ('passed', b'^passed: (?P<n>test\S*)'), ])
match_failed = compile_matchers([ ('FAILED', b'^FAILED: [^:]+:[0-9]+:(?P<n>test\S*)') ])
match_allpassed = compile_matchers([ (None, b'^All [0-9]+ tests passed$')])
def scan_output(po, matchers = match_passed ) :
out = {}
err = []
# m=match name, s=set of names per mname, r=regex, a = callback ("act")
msra = [ (m, out.setdefault(m, set()), r, a) for m, r, a in matchers]
for line in lines_without_ansi(po) :
if not line.strip() : continue
for (mname, s,re,act) in msra :
m = re.match(line)
if m : break
else :
err.append(NoMatch("unmatched output line", line))
continue
if not mname:
continue
name = m.group('n').decode('utf-8');
if name in out :
err.append( DuplicateTest(
"Test '{}' found twice!".format( name)),
(name, mname) )
s.add(name)
act(name, line)
return out, err
# runner -----------------------------------------------------
class Runner :
matchers = match_passed + match_allpassed
command_pre = ['valgrind', '-q', '--leak-check=yes']
def __init__(s, command, source) :
s.data = RunData(s.command_pre + list(command), source)
s.check_output()
s.lines = s.data.out.split(b'\n')
# any one of these might be overriden
def scan_source(s) : return scan_source(s.data.source)
def scan_output(s) : return scan_output(s.lines, s.matchers)
def check_output(s):
if s.data.err != b'' :
sys.stderr.buffer.write(s.data.err)
yield Fail("test program wrote to stderr",
-1, s.data.command)
if s.data.errno :
yield Fail("test program failed",
s.data.errno, s.data.command)
class Fail_Runner(Runner) :
matchers = match_passed + match_failed
command_pre = ['valgrind', '-q']
def __init__(s, command, source, xerrno=None) :
s.xerrno = xerrno
Runner.__init__(s, command, source)
def check_output(s) :
if s.data.errno == 0 :
s.errno = -1
yield Fail("test program should have failed "
"but did not", -1, s.data.command)
if s.xerrno is None :
return
if s.data.errno != s.xerrno :
yield Fail("test program failed but not with %d" % s.xerrno,
s.data.errno, s.data.command)
def scan_output(s) :
out, oe = scan_output(s.lines, s.matchers)
err, ee = scan_output(s.data.err.split(b'\n'), s.err_matchers)
# FIX: check for duplicates.
out.update(err)
return out, oe + ee;
# CLI --------------------------------------------------------
def parse_argv(argv=None):
argv = argv or sys.argv
if(len(argv) < 3) :
die("usage: {} source_file test_command "
"... args to test command ...")
prog, source, *command = argv
assert(len(command) >= 1)
return command, source
def cli_scan_source(r) :
try : |
moradin/renderdoc | util/test/tests/Vulkan/VK_SPIRV_13_Shaders.py | Python | mit | 3,571 | 0.006161 | import renderdoc as rd
import rdtest
class VK_SPIRV_13_Shaders(rdtest.TestCase):
demos_test_name = 'VK_SPIRV_13_Shaders'
def check_capture(self):
action = self.find_action("Draw")
self.check(action is not None)
self.controller.SetFrameEvent(action.eventId, False)
pipe: rd.PipeState = self.controller.GetPipelineState()
refl: rd.ShaderReflection = pipe.GetShaderReflection(rd.ShaderStage.Vertex)
disasm: str = self.controller.DisassembleShader(pipe.GetGraphicsPipelineObject(), refl, "")
if (refl.inputSignature[0].varName != 'pos' or refl.inputSignature[0].compCount != 3):
raise rdtest.TestFailureException("Vertex shader input 'pos' not reflected corre | ctly")
if (refl.inputSignature[1].varName != 'col' or refl.inputSignature[1].compCount != 4):
raise rdtest.TestFailureException( | "Vertex shader input 'col' not reflected correctly")
if (refl.inputSignature[2].varName != 'uv' or refl.inputSignature[2].compCount != 2):
raise rdtest.TestFailureException("Vertex shader input 'uv' not reflected correctly")
if (refl.outputSignature[0].varName != 'opos' or refl.outputSignature[0].compCount != 4 or refl.outputSignature[0].systemValue != rd.ShaderBuiltin.Position):
raise rdtest.TestFailureException("Vertex shader output 'opos' not reflected correctly")
if (refl.outputSignature[1].varName != 'outcol' or refl.outputSignature[1].compCount != 4):
raise rdtest.TestFailureException("Vertex shader output 'outcol' not reflected correctly")
if 'vertmain' not in disasm:
raise rdtest.TestFailureException("Vertex shader disassembly failed, entry point not found")
refl: rd.ShaderReflection = pipe.GetShaderReflection(rd.ShaderStage.Fragment)
disasm: str = self.controller.DisassembleShader(pipe.GetGraphicsPipelineObject(), refl, "")
if (refl.inputSignature[0].varName != 'incol' or refl.inputSignature[0].compCount != 4):
raise rdtest.TestFailureException("Fragment shader input 'incol' not reflected correctly")
if (refl.outputSignature[0].varName != 'ocol' or refl.outputSignature[0].compCount != 4 or refl.outputSignature[0].systemValue != rd.ShaderBuiltin.ColorOutput):
raise rdtest.TestFailureException("Fragment shader output 'ocol' not reflected correctly")
if 'fragmain' not in disasm:
raise rdtest.TestFailureException("Fragment shader disassembly failed, entry point not found")
rdtest.log.success("shader reflection and disassembly as expected")
postvs_data = self.get_postvs(action, rd.MeshDataStage.VSOut, 0, action.numIndices)
postvs_ref = {
0: {
'vtx': 0,
'idx': 0,
'opos': [-0.5, 0.5, 0.0, 1.0],
'outcol': [0.0, 1.0, 0.0, 1.0],
},
1: {
'vtx': 1,
'idx': 1,
'opos': [0.0, -0.5, 0.0, 1.0],
'outcol': [0.0, 1.0, 0.0, 1.0],
},
2: {
'vtx': 2,
'idx': 2,
'opos': [0.5, 0.5, 0.0, 1.0],
'outcol': [0.0, 1.0, 0.0, 1.0],
},
}
self.check_mesh_data(postvs_ref, postvs_data)
rdtest.log.success("vertex output is as expected")
self.check_pixel_value(pipe.GetOutputTargets()[0].resourceId, 0.5, 0.5, [0.0, 1.0, 0.0, 1.0])
rdtest.log.success("picked value is as expected")
|
duedil-ltd/pyfilesystem | fs/__init__.py | Python | bsd-3-clause | 1,781 | 0.005053 | """
fs: a filesystem abstraction.
This module provides an abstract base class 'FS' that defines a consistent
interface to different kinds of filesystem, along with a range of concrete
implementations of this interface such as:
OSFS: access the local filesystem, through the 'os' module
TempFS: a temporary filesystem that's automatically cleared on exit
MemoryFS: a filesystem that exists only in memory
ZipFS: access a zipfile like a filesystem
SFTPFS: access files on a SFTP server
S3FS: access files stored in Amazon S3
"""
__version__ = "0.5.0"
__author__ = "Will McGugan (will@willmcgugan.com)"
# provide these by default so people can use 'fs.path.basename' etc.
from fs import errors
from fs import path
_thread_synchronize_default = True
def set_thread_synchronize_default(sync):
"""Sets the default thread synchronisation flag.
FS objects are made thread-safe through the use of a per-FS threading Lock
object. Since this can introduce an small overhead it can be disabled with
this function if the code is single-threaded.
:param sync: Set whether to use thread synchronisation for new FS objects
"""
global _thread_synchronization_default
_thread_synchronization_default = sync
# Store some ide | ntifiers in the fs namespace
import os
SEEK_CUR = os.SEEK_CUR
SEEK_END = os.SEEK_END
SEEK_SET = os.SEEK_SET
# Allow clean use of logging throughout the lib
import logging | as _logging
class _NullHandler(_logging.Handler):
def emit(self,record):
pass
_logging.getLogger("fs").addHandler(_NullHandler())
def getLogger(name):
"""Get a logger object for use within the pyfilesystem library."""
assert name.startswith("fs.")
return _logging.getLogger(name)
|
sunlightlabs/upwardly | src/movingup/utils/douglaspeucker.py | Python | bsd-3-clause | 2,902 | 0.006547 | # pure-Python Douglas-Peucker line simplification/generalization
#
# this code was written by Schuyler Erle <schuyler@nocat.net> and is
# made available in the public domain.
#
# the code was ported from a freely-licensed example at
# http://www.3dsoftware.com/Cartography/Programming/PolyLineReduction/
#
# the original page is no longer available, but is mirrored at
# http://www.mappinghacks.com/code/PolyLineReduction/
"""
>>> line = [(0,0),(1,0),(2,0),(2,1),(2,2),(1,2),(0,2),(0,1),(0,0)]
>>> simplify_points(line, 1.0)
[(0, 0), (2, 0), (2, 2), (0, 2), (0, 0)]
>>> line = [(0,0),(0.5,0.5),(1,0),(1.25,-0.25),(1.5,.5)]
>>> simplify_points(line, 0.25)
[(0, 0), (0.5, 0.5), (1.25, -0.25), (1.5, 0.5)]
"""
import math
def simplify_p | oints (pts, tolerance):
anch | or = 0
floater = len(pts) - 1
stack = []
keep = set()
stack.append((anchor, floater))
while stack:
anchor, floater = stack.pop()
# initialize line segment
if pts[floater] != pts[anchor]:
anchorX = float(pts[floater][0] - pts[anchor][0])
anchorY = float(pts[floater][1] - pts[anchor][1])
seg_len = math.sqrt(anchorX ** 2 + anchorY ** 2)
# get the unit vector
anchorX /= seg_len
anchorY /= seg_len
else:
anchorX = anchorY = seg_len = 0.0
# inner loop:
max_dist = 0.0
farthest = anchor + 1
for i in range(anchor + 1, floater):
dist_to_seg = 0.0
# compare to anchor
vecX = float(pts[i][0] - pts[anchor][0])
vecY = float(pts[i][1] - pts[anchor][1])
seg_len = math.sqrt( vecX ** 2 + vecY ** 2 )
# dot product:
proj = vecX * anchorX + vecY * anchorY
if proj < 0.0:
dist_to_seg = seg_len
else:
# compare to floater
vecX = float(pts[i][0] - pts[floater][0])
vecY = float(pts[i][1] - pts[floater][1])
seg_len = math.sqrt( vecX ** 2 + vecY ** 2 )
# dot product:
proj = vecX * (-anchorX) + vecY * (-anchorY)
if proj < 0.0:
dist_to_seg = seg_len
else: # calculate perpendicular distance to line (pythagorean theorem):
dist_to_seg = math.sqrt(abs(seg_len ** 2 - proj ** 2))
if max_dist < dist_to_seg:
max_dist = dist_to_seg
farthest = i
if max_dist <= tolerance: # use line segment
keep.add(anchor)
keep.add(floater)
else:
stack.append((anchor, farthest))
stack.append((farthest, floater))
keep = list(keep)
keep.sort()
return [pts[i] for i in keep]
if __name__ == "__main__":
import doctest
doctest.testmod() |
stack-of-tasks/rbdlpy | tutorial/lib/python2.7/site-packages/OpenGL/raw/GL/SGIS/texture_color_mask.py | Python | lgpl-3.0 | 714 | 0.026611 | '''Autogenerated by xml_generate script, do not edit!'''
from OpenGL import platform as _p, arrays
# Code generation uses this
from OpenGL.raw.GL import _types as _cs
# End users want this...
from OpenGL.raw. | GL._types import *
from OpenGL.raw.GL import _errors
from OpenGL.constant import Constant as _C
import ctypes
_EXTENS | ION_NAME = 'GL_SGIS_texture_color_mask'
def _f( function ):
return _p.createFunction( function,_p.PLATFORM.GL,'GL_SGIS_texture_color_mask',error_checker=_errors._error_checker)
GL_TEXTURE_COLOR_WRITEMASK_SGIS=_C('GL_TEXTURE_COLOR_WRITEMASK_SGIS',0x81EF)
@_f
@_p.types(None,_cs.GLboolean,_cs.GLboolean,_cs.GLboolean,_cs.GLboolean)
def glTextureColorMaskSGIS(red,green,blue,alpha):pass
|
danielnyga/dnutils | src/dnutils/logs.py | Python | mit | 18,264 | 0.002245 | import json
import logging
import os
import re
import sys
import tempfile
import atexit
import warnings
import colored
import datetime
from .tools import ifnone
from .debug import _caller
from .threads import RLock, interrupted, Lock
from .tools import jsonify
import portalocker
FLock = portalocker.Lock
DEBUG = logging.DEBUG
INFO = logging.INFO
WARNING = logging.WARNING
ERROR = logging.ERROR
CRITICAL = logging.CRITICAL
class FileHandler(logging.FileHandler):
def __init__(self, filename, mode='a', encoding=None, delay=False):
logging.File | Handler.__init__(self, filename, mode=mode, encoding=encoding, delay=delay)
self.timeformatstr = '%Y-%m-%d %H:%M:%S'
def emit(self, record):
try:
msg = self.format(record)
stream = self.stream
stream.write(msg)
stream.write(self.terminator)
self.flush()
except Exception:
self.handleError(record)
def format(self, record):
return '{} - {} - {}' | .format(datetime.datetime.fromtimestamp(record.created).strftime(self.timeformatstr),
record.levelname,
' '.join(' '.join(map(str, record.msg)).split('\n')))
StreamHandler = logging.StreamHandler
_expose_basedir = '.exposure'
_exposures = None
_writelockname = '.%s.lock'
_MAX_EXPOSURES = 9999
exposure_dir = None
def set_exposure_dir(d):
global exposure_dir
exposure_dir = d
def tmpdir():
'''
Returns the path for temporary files.
On Unix systems, eg. mostly ``/tmp``
:return:
'''
with tempfile.NamedTemporaryFile(delete=True) as f:
return os.path.dirname(f.name)
class ExposureEmptyError(Exception): pass
class ExposureLockedError(Exception): pass
def active_exposures(name='/*'):
'''
Generates the names of all exposures that are currently active (system-wide).
:param name: a pattern that the list of exposure names can be filtered (supports the wildcard character *)
:return:
'''
tmp = tmpdir()
rootdir = ifnone(exposure_dir, tmp)
rootdir = os.path.join(rootdir, _expose_basedir)
for root, dirs, files in os.walk(rootdir):
for f in files:
if re.match(r'\.\w+\.lock', f): # skip file locks
continue
try:
tmplock = FLock(os.path.join(root, _writelockname % f), timeout=0, fail_when_locked=True)
tmplock.acquire()
except portalocker.LockException:
expname = '/'.join([root.replace(rootdir, ''), f])
tokens = expname.split('/')
patterns = name.split('/')
ok = False
for idx, pat in enumerate(patterns):
try:
repattern = '^%s$' % re.escape(pat).replace(r'\*', r'.*?')
ok = re.match(repattern, tokens[idx]) is not None
except IndexError:
ok = False
if not ok: break
else:
if ok:
yield expname
else:
tmplock.release()
class ExposureManager:
'''
Manages all instances of exposures.
'''
def __init__(self, basedir=None):
self.exposures = {}
basedir = ifnone(basedir, tmpdir())
self.basedir = os.path.join(basedir, _expose_basedir)
atexit.register(_cleanup_exposures)
self._lock = RLock()
def _create(self, name):
'''
Create a new exposure with name ``name``.
:param name:
:return:
'''
e = Exposure(name, self.basedir)
self.exposures[name] = e
return e
def get(self, name):
with self._lock:
if not name in self.exposures:
self.exposures[name] = Exposure(name, self.basedir)
return self.exposures.get(name)
def delete(self):
with self._lock:
for name, exposure in self.exposures.items():
exposure.delete()
def _cleanup_exposures(*_):
_exposures.delete()
# def exposures(basedir='.'):
# global _exposures
# _exposures = ExposureManager(basedir)
def expose(name, *data, ignore_errors=False):
'''
Expose the data ``data`` under the exposure name ``name``.
:param name:
:param data:
:return:
'''
global _exposures
if _exposures is None:
_exposures = ExposureManager(exposure_dir)
e = _exposures.get(name)
if data:
if len(data) == 1:
data = data[0]
e.dump(data, ignore_errors=ignore_errors)
return e.name
def inspect(name):
'''
Inspect the exposure with the name ``name``.
:param name:
:return:
'''
global _exposures
if _exposures is None:
_exposures = ExposureManager(exposure_dir)
if name in _exposures.exposures:
e = _exposures.exposures[name]
else:
e = _exposures.get(name)
try:
return e.load()
except IOError:
return None
def exposure(name):
'''
Get the exposure object with the given name.
:param name:
:return:
'''
global _exposures
if _exposures is None:
_exposures = ExposureManager(exposure_dir)
e = _exposures.get(name)
return e
class Exposure:
'''
This class implements a data structure for easy and lightweight exposure of
parts of a program's state. An exposure is, in essence, a read/write
wrapper around a regular file, which is being json data written to and read from.
'''
def __init__(self, name, basedir=None):
self._lock = RLock()
if sum([1 for c in name if c == '#']):
raise ValueError('exposure name may contain maximally one hash symbol: "%s"' % name)
self.flock = None
self.counter = 0
counter = 1
while 1:
name_ = name.replace('#', str(counter))
self._init(name_, basedir)
if not self.acquire(blocking=False):
if '#' not in name or counter >= _MAX_EXPOSURES:
raise ExposureLockedError()
counter += 1
else:
self.release()
break
def _init(self, name, basedir):
if basedir is None:
basedir = os.path.join(tmpdir(), _expose_basedir)
if not os.path.exists(basedir):
os.mkdir(basedir)
dirs = list(os.path.split(name))
if not dirs[0].startswith('/'):
raise ValueError('exposure names must start with "/"')
else:
dirs[0] = dirs[0].replace('/', '')
fname = dirs[-1]
fullpath = basedir
for d in dirs[:-1]:
fullpath = os.path.join(fullpath, d)
if not os.path.exists(fullpath):
os.mkdir(fullpath)
self.abspath = os.path.abspath(fullpath)
self.filepath = os.path.join(self.abspath, fname)
self.filename = fname
self.flockname = os.path.join(self.abspath, _writelockname % self.filename)
# acquire the lock if write access is required
self.flock = FLock(self.flockname, timeout=0, fail_when_locked=True)
self.name = name
def acquire(self, blocking=True, timeout=None):
'''
Acquire the exposure.
An exposure may only be acquired by one process at a time and acts like a re-entrant lock.
:param blocking:
:param timeout:
:return:
'''
with self._lock:
if self.counter > 0: # exposure can be re-entered
self.counter += 1
return True
if not blocking:
timeout = 0
elif blocking and timeout is None:
timeout = .5
ret = None
while ret is None and not interrupted():
with warnings.catch_warnings():
try:
ret = self.flock.acquire(timeout, fail_when_locked=False)
except portalocker.LockException:
if not blocking: break |
hal0x2328/neo-python | neo/Network/nodeweight.py | Python | mit | 2,320 | 0.002586 | from datetime import datetime
class NodeWeight:
SPEED_RECORD_COUNT = 3
SPEED_INIT_VALUE = 100 * 1024 ^ 2 # Start with a big speed of 100 MB/s
REQUEST_TIME_RECORD_COUNT = 3
def __init__(self, nodeid):
self.id: int = nodeid
self.speed = [self.SPEED_INIT_VALUE] * self.SPEED_RECORD_COUNT
self.timeout_count = 0
self.error_response_count = 0
now = dateti | me.utcnow().timestamp() * 1000 # milliseconds
self.request_time = [now] * self.REQUEST_TIME_RECORD_COUNT
def append_new_speed(self, speed) -> None:
# remove oldest
self.speed.pop(-1)
# add new
self.speed.insert(0, speed)
def append_new_request_time(self) -> None:
self.request_time.pop(-1)
now = datetime.utcnow().timestamp() * 1000 # m | illiseconds
self.request_time.insert(0, now)
def _avg_speed(self) -> float:
return sum(self.speed) / self.SPEED_RECORD_COUNT
def _avg_request_time(self) -> float:
avg_request_time = 0
now = datetime.utcnow().timestamp() * 1000 # milliseconds
for t in self.request_time:
avg_request_time += now - t
avg_request_time = avg_request_time / self.REQUEST_TIME_RECORD_COUNT
return avg_request_time
def weight(self):
# nodes with the highest speed and the longest time between querying for data have the highest weight
# and will be accessed first unless their error/timeout count is higher. This distributes load across nodes
weight = self._avg_speed() + self._avg_request_time()
# punish errors and timeouts harder than slower speeds and more recent access
if self.error_response_count:
weight /= self.error_response_count + 1 # make sure we at least always divide by 2
if self.timeout_count:
weight /= self.timeout_count + 1
return weight
def __lt__(self, other):
return self.weight() < other.weight()
def __repr__(self):
# return f"<{self.__class__.__name__} at {hex(id(self))}> w:{self.weight():.2f} r:{self.error_response_count} t:{self.timeout_count}"
return f"{self.id} {self._avg_speed():.2f} {self._avg_request_time():.2f} w:{self.weight():.2f} r:{self.error_response_count} t:{self.timeout_count}"
|
pokey/smartAutocomplete | pythonServer/iterateRuns.py | Python | mit | 3,283 | 0.005483 | #!/usr/bin/python
from execrunner import o, selo, selmo, cmdList, f
import subprocess, argparse
# Dataset prefix
dp = '/u/nlp/data/smart-autocomplete/datasets/'
constants = {
'python': '/u/nlp/packages/python-2.7.4/bin/python2.7',
'statePath': '/u/nlp/data/smart-autocomplete/state'
}
def main():
parser = \
argparse.ArgumentParser(description="iterate runs on nlp cluster")
parser.add_argument("-s", "--nlpServer", default='jacob.stanford.edu',
help="url of nlp cluster")
parser.add_argument("-d", "--srcDir", default='~/src/smartAutocomplete',
help="location of smartAutocomplete src on nlp cluster")
parser.add_argument("-v", "--addToView", default='')
args = parser.parse_args()
constants['srcDir'] = args.srcDir
constants['nlpServer'] = args.nlpServer
constants['addToView'] = args.addToView
cmds = \
cmdList('{python} server/runBenchmarks.py'.format(**constants),
selo('startAt', 2, 0, .5, .9),
selmo(('trainTokens', 'weightTraining', 'maxSamples'), 0,
(100000, None, 500), (1000000, 10000, 1000)),
selmo(('dataset', 'extension'), 1,
(dp + 'node', 'js'),
(dp + 'django', 'py'),
(dp + 'english-large-jokes', None),
(dp + 'english-small-sanity-check', None),
(dp + 'javascript-large-d3subset', 'js'),
(dp + 'javascript-medium-emile', 'js'),
(dp + 'python-large-web-py-subset', 'py'),
(dp + 'python-medium-si | ngularity-chess', 'py')),
selo('features', -2,
# KN
['scope', 'ngram'],
# Ngrams
['simple', 'prev', 'prevTwo', 'prevThree'],
# Basic features
['simple', 'path', 'filetype', 'prev', 'prevTwo', 'prevThree',
'prevPrev'],
# Experiment
['simple', 'path', 'prev'],
| # Individual features
['simple'],
['path'],
['filetype'],
['prev'],
['prevTwo'],
['prevThree'],
['prevPrev'],
['prevForm'],
['lineStart'],
['indentLevel'],
['dirH'],
['linePrefix'],
['scope'],
['ngram'],
# All features
None),
f("onlyIdentifiers"),
o("samplePeriod", 50),
o("statePath", constants["statePath"]),
selo("learningAlgorithm", 0, "counter", "weights", "perceptron",
"perceptronMixer", "bucketPerceptronMixer", "naiveBayes",
"bma", "grid"))
proc = subprocess.Popen('ssh {nlpServer} {python} '
'{srcDir}/server/populateTasks.py {srcDir} {addToView}'
.format(**constants), shell=True,
stdin=subprocess.PIPE)
proc.communicate('\n'.join(cmds)+'\n')
if __name__ == "__main__":
main()
|
jaantoots/bridgeview | render/render.py | Python | gpl-3.0 | 18,995 | 0.000211 | """Provides methods for rendering the labelled model."""
import json
import os
import hashlib
import glob
import numpy as np
import bpy # pylint: disable=import-error
from . import helpers
class Render():
"""Configure and render the scene.
Parameters are read from conf_file. During testing and setup one
can be generated with the default parameters. It is possible to
place the sun and the camera randomly and create the
renders. However, generating the sun and camera positions
beforehand allows doing all the visual renders first and the
semantic renders only afterwards (recommended, as semantic and
depth renders may break visual rendering setup).
Blender file should be set up with the correct settings: sky,
clouds, mist and Cycles parameters. Only Cycles samples and film
exposure are set from the configuration file with the expectation
that these might be necessary for finetuning the quality.
Configuration options:
landscape (list): List of object names that are not part of the
bridge. The first item is used for choosing the camera
position. Additional elements are excluded when calculating
the bounding sphere automatically. If spheres are provided in
a file, additional elements are optional.
sun_theta (list): Range of sun's polar angle.
sun_size (float): Size of the sun (sharpness of shadows).
sun_strength (float): Strength of sunlight (balance with exposure
determines relative brightness of sky.
sun_color (list): RGBA of the color of sunlight (#FFFFF8 seems
good and the scene is also influenced by the sky texture which
means there is no need to worry about the color too much).
camera_distance_factor (dict: mean, sigma): Relative camera
distance (in terms of bounding sphere radius) is from a
Gaussian distribution with given mean and sigma.
camera_clearance (list or float): Camera clearance above
landscape. List is used as a uniform range. Float is used as a
lower limit and camera_theta is used otherwise.
camera_floor (float): Absolute floor (Z position) for the camera
position. It might be useful to set this to the water level if
applicable.
camera_lens (dict: mean, log_sigma): Camera lens focal length is
drawn from lognormal distribution with the given mean (in mm)
and log_sigma.
camera_theta (list): Range of camera's polar angle. This is only
looked at if camera_clearance is not provided or is a float.
camera_noise (float): Noise to add to the camera angle to increase
viewpoint variety.
resolution (list: x, y): Resolution of rendered images.
film_exposure (float): Film exposure for visual renders (see note
at sun_strength).
cycles_samples (int): Number of samples to render, higher numbers
decrease noise but take longer.
clamp_indirect (float): Limit speckles caused by high intensity
reflections. If this is set to 0 (disables), get random white
pixels due to noisy reflections.
compositing_mist (float): Mist intensity, from no mist to
completely white surroundings at some distance. Conservative
values are recommended.
sky (dict): Sky configuration (see help for set_sky).
spheres (dict: name, (dict: centre, radius)): Positions of spheres
to use for positioning the camera.
lines (dict: name, (dict: start, end)): Lines to use for choosing
camera positions. Most other camera configuration parameters
are irrelevant when using this, but camera_sigma is required.
camera_sigma (float): Sigma of polar angle around horizontal when
choosing camera rotation using lines (otherwise irrelevant).
camera_location_noise (float): Noise to add to camera location
when using lines (otherwise irrelevant).
"""
def __init__(self, objects: list, conf_file=None):
"""Create Render object for specified Blender objects."""
# Load configuration
self.opts = {}
if conf_file is not None:
with open(conf_file) as file:
self.opts = json.load(file)
self._default()
# Initialise objects, terrain should be the first item in landscape
self.objects = objects[:]
self.landscape = None
self.landscape_tree = None
landscape_list = helpers.all_instances(
self.opts['landscape'][0], self.objects)
if len(landscape_list) > 0:
self.landscape = landscape_list[0]
self.landscape_tree = helpers.landscape_tree(self.landscape)
# Remove landscape for bounding sphere calculation
for obj_name in self.opts['landscape']:
for obj in helpers.all_instances(obj_name, self.objects):
self.objects.remove(obj)
# Initialise bounding spheres | for camera views
if self.opts.get('spheres') is None:
sphere = helpers.BoundingSphere()
self.opts['spheres'] = {}
self.opts['spheres']['default'] = sphere.find(self.objects)
# Convert camera lines if provided
if self.opts.get('lines') is not None:
| self.opts['lines'] = {name: {point: np.array(coords)
for point, coords in line.items()}
for name, line in self.opts['lines'].items()}
# Initialise things
self.sun = self.new_sun()
self.camera = self.new_camera()
def _default(self):
"""Read default configuration parameters if not given."""
default_file = os.path.join(os.path.dirname(__file__), 'render.json')
with open(default_file) as file:
defaults = json.load(file)
# This guarantees that all parameters exist. Only need to test
# for existence of "spheres" and "lines" in opts as these can
# genuinely be expected to be unset since that determines how
# camera positions are generated.
for key, value in defaults.items():
if self.opts.get(key) is None:
self.opts[key] = value
def write_conf(self, conf_file: str):
"""Write current configuration to file."""
with open(conf_file, 'w') as file:
json.dump(self.opts, file)
def new_sun(self):
"""Add a new sun to the scene and set its parameters."""
bpy.ops.object.lamp_add(type='SUN')
sun = bpy.context.object
# Set the parameters
sun.data.shadow_soft_size = self.opts['sun_size']
emission = sun.data.node_tree.nodes['Emission']
emission.inputs['Strength'].default_value = self.opts['sun_strength']
emission.inputs['Color'].default_value = self.opts['sun_color']
return sun
def random_sun(self):
"""Generate a random rotation for the sun."""
theta = np.random.uniform(self.opts['sun_theta'][0],
self.opts['sun_theta'][1])
phi = np.random.uniform(0, 2*np.pi)
return [theta, 0, phi]
def place_sun(self, rotation=None):
"""Place the sun at specified angle."""
if rotation is None:
rotation = self.random_sun()
self.sun.rotation_euler = rotation
self.set_sky() # Set sun direction and randomise clouds
return self.sun
def new_camera(self):
"""Add a camera to the scene and set the resolution for rendering."""
bpy.ops.object.camera_add()
camera = bpy.context.object
bpy.data.scenes[0].camera = camera
bpy.data.scenes[0].render.resolution_x = self.opts['resolution'][0]
bpy.data.scenes[0].render.resolution_y = self.opts['resolution'][1]
bpy.data.scenes[0].render.resolution_percentage = 100
camera.data.clip_end = self.opts['camera_clip_end']
return camera
def random_camera(self):
"""Generate a random camera position with the objects in view."""
# Random focal length (approx median, relative sigma)
focal_length = np.random.lognormal(
np.log |
pfalcon/ScratchABit | plugins/cpu/arm_32_arm_capstone.py | Python | gpl-3.0 | 891 | 0.001122 | # ScratchABit - interactive disassembler
#
# Copyright (c) 2018 Paul Sokolovsky
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import capstone
import _any_capstone
dis = capstone.Cs(capstone.C | S_ARCH_ARM, capstone.CS_MODE_ARM)
def PROCESSOR_ENTRY():
| return _any_capstone.Processor("arm_32", dis)
|
atrsoftgmbh/atrshmlog | python/src/tests/t_clock_id.py | Python | apache-2.0 | 594 | 0.006734 | #!/usr/bin/python3
#
# $Id:$
#
# We test a bit of the atrshmlog here.
#
# This is for the first starter, so only the basic things.
import sys
import atrshmlog
r = atrshmlog.attach()
id = atrshmlog.get_clock_id()
print('clock id : ' + s | tr(id) + ' : ')
oldid = atrshmlog.set_clock_id(2)
print('clock id : ' + str(oldid) + ' : ')
id = atrshmlog.get_clock_id()
print('clock | id : ' + str(id) + ' : ')
oldid = atrshmlog.set_clock_id(1)
print('clock id : ' + str(oldid) + ' : ')
id = atrshmlog.get_clock_id()
print('clock id : ' + str(id) + ' : ')
print (' ')
exit(0);
# end of test
|
mhbu50/frappe | frappe/model/document.py | Python | mit | 43,429 | 0.028 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
import frappe
import time
from frappe import _, msgprint, is_whitelisted
from frappe.utils import flt, cstr, now, get_datetime_str, file_lock, date_diff
from frappe.model.base_document import BaseDocument, get_controller
from frappe.model.naming import set_new_name, gen_new_name_for_cancelled_doc
from werkzeug.exceptions import NotFound, Forbidden
import hashlib, json
from frappe.model import optional_fields, table_fields
from frappe.model.workflow import validate_workflow
from frappe.model.workflow import set_workflow_state_on_action
from frappe.utils.global_search import update_global_search
from frappe.integrations.doctype.webhook import run_webhooks
from frappe.desk.form.document_follow import follow_document
from frappe.core.doctype.server_script.server_script_utils import run_server_script_for_doc_event
from frappe.utils.data import get_absolute_url
# once_only validation
# methods
def get_doc(*args, **kwargs):
"""returns a frappe.model.Document object.
:param arg1: Document dict or DocType name.
:param arg2: [optional] document name.
:param for_update: [optional] select document for update.
There are multiple ways to call `get_doc`
# will fetch the latest user object (with child table) from the database
user = get_doc("User", "test@example.com")
# create a new object
user = get_doc({
"doctype":"User"
"email_id": "test@example.com",
"roles: [
{"role": "System Manager"}
]
})
# create new object with keyword arguments
user = get_doc(doctype='User', email_id='test@example.com')
# select a document for update
user = get_doc("User", "test@example.com", for_update=True)
"""
if args:
if isinstance(args[0], BaseDocument):
# already a document
return args[0]
elif isinstance(args[0], str):
doctype = args[0]
elif isinstance(args[0], dict):
# passed a dict
kwargs = args[0]
else:
raise ValueError('First non keyword argument must be a string or dict')
if len(args) < 2 and kwargs:
if 'doctype' in kwargs:
doctype = kwargs['doctype']
else:
raise ValueError('"doctype" is a required key')
controller = get_controller(doctype)
if controller:
return controller(*args, **kwargs)
raise ImportError(doctype)
class Document(BaseDocument):
"""All controllers inherit from `Document`."""
def __init__(self, *args, **kwargs):
"""Constructor.
:param arg1: DocType name as string or document **dict**
:param arg2: Document name, if `arg1` is DocType name.
If DocType name and document name are passed, the object will load
all values (including child documents) from the database.
"""
self.doctype = self.name = None
self._default_new_do | cs = {}
self.flags = frappe._dict()
if args and args[0] and isinstance(args[0], str):
# first arugment is doctype
if len(args)==1:
# single
self.doctype = self.name = args[0]
else:
self.doctype = args[0]
if isinstance(args[1], dict):
# filter
self.name = frappe.db.get_value(args[0], | args[1], "name")
if self.name is None:
frappe.throw(_("{0} {1} not found").format(_(args[0]), args[1]),
frappe.DoesNotExistError)
else:
self.name = args[1]
if 'for_update' in kwargs:
self.flags.for_update = kwargs.get('for_update')
self.load_from_db()
return
if args and args[0] and isinstance(args[0], dict):
# first argument is a dict
kwargs = args[0]
if kwargs:
# init base document
super(Document, self).__init__(kwargs)
self.init_valid_columns()
else:
# incorrect arguments. let's not proceed.
raise ValueError('Illegal arguments')
@staticmethod
def whitelist(fn):
"""Decorator: Whitelist method to be called remotely via REST API."""
frappe.whitelist()(fn)
return fn
def reload(self):
"""Reload document from database"""
self.load_from_db()
def load_from_db(self):
"""Load document and children from database and create properties
from fields"""
if not getattr(self, "_metaclass", False) and self.meta.issingle:
single_doc = frappe.db.get_singles_dict(self.doctype)
if not single_doc:
single_doc = frappe.new_doc(self.doctype).as_dict()
single_doc["name"] = self.doctype
del single_doc["__islocal"]
super(Document, self).__init__(single_doc)
self.init_valid_columns()
self._fix_numeric_types()
else:
d = frappe.db.get_value(self.doctype, self.name, "*", as_dict=1, for_update=self.flags.for_update)
if not d:
frappe.throw(_("{0} {1} not found").format(_(self.doctype), self.name), frappe.DoesNotExistError)
super(Document, self).__init__(d)
if self.name=="DocType" and self.doctype=="DocType":
from frappe.model.meta import DOCTYPE_TABLE_FIELDS
table_fields = DOCTYPE_TABLE_FIELDS
else:
table_fields = self.meta.get_table_fields()
for df in table_fields:
children = frappe.db.get_values(df.options,
{"parent": self.name, "parenttype": self.doctype, "parentfield": df.fieldname},
"*", as_dict=True, order_by="idx asc")
if children:
self.set(df.fieldname, children)
else:
self.set(df.fieldname, [])
# sometimes __setup__ can depend on child values, hence calling again at the end
if hasattr(self, "__setup__"):
self.__setup__()
def get_latest(self):
if not getattr(self, "latest", None):
self.latest = frappe.get_doc(self.doctype, self.name)
return self.latest
def check_permission(self, permtype='read', permlevel=None):
"""Raise `frappe.PermissionError` if not permitted"""
if not self.has_permission(permtype):
self.raise_no_permission_to(permlevel or permtype)
def has_permission(self, permtype="read", verbose=False):
"""Call `frappe.has_permission` if `self.flags.ignore_permissions`
is not set.
:param permtype: one of `read`, `write`, `submit`, `cancel`, `delete`"""
if self.flags.ignore_permissions:
return True
return frappe.has_permission(self.doctype, permtype, self, verbose=verbose)
def raise_no_permission_to(self, perm_type):
"""Raise `frappe.PermissionError`."""
frappe.flags.error_message = _('Insufficient Permission for {0}').format(self.doctype)
raise frappe.PermissionError
def insert(self, ignore_permissions=None, ignore_links=None, ignore_if_duplicate=False,
ignore_mandatory=None, set_name=None, set_child_names=True):
"""Insert the document in the database (as a new document).
This will check for user permissions and execute `before_insert`,
`validate`, `on_update`, `after_insert` methods if they are written.
:param ignore_permissions: Do not check permissions if True."""
if self.flags.in_print:
return
self.flags.notifications_executed = []
if ignore_permissions!=None:
self.flags.ignore_permissions = ignore_permissions
if ignore_links!=None:
self.flags.ignore_links = ignore_links
if ignore_mandatory!=None:
self.flags.ignore_mandatory = ignore_mandatory
self.set("__islocal", True)
self.check_permission("create")
self._set_defaults()
self.set_user_and_timestamp()
self.set_docstatus()
self.check_if_latest()
self.run_method("before_insert")
self._validate_links()
self.set_new_name(set_name=set_name, set_child_names=set_child_names)
self.set_parent_in_children()
self.validate_higher_perm_levels()
self.flags.in_insert = True
self.run_before_save_methods()
self._validate()
self.set_docstatus()
self.flags.in_insert = False
# run validate, on update etc.
# parent
if getattr(self.meta, "issingle", 0):
self.update_single(self.get_valid_dict())
else:
try:
self.db_insert()
except frappe.DuplicateEntryError as e:
if not ignore_if_duplicate:
raise e
# children
for d in self.get_all_children():
d.db_insert()
self.run_method("after_insert")
self.flags.in_insert = True
if self.get("amended_from"):
self.copy_attachments_from_amended_from()
# flag to prevent creation of event update log for create and update both
# during document creation
self.flags.update_log_for_doc_creation = True
self.run_post_save_methods()
self.flags.in_insert = False
# delete __islocal
if hasattr |
kakunbsc/enigma2 | lib/python/Screens/InfoBar.py | Python | gpl-2.0 | 9,340 | 0.029229 | from Tools.Profile import profile
# workaround for required config entry dependencies.
from Screens.MovieSelection import MovieSelection
from Screen import Screen
profile("LOAD:enigma")
from enigma import iPlayableService
profile("LOAD:InfoBarGenerics")
from Screens.InfoBarGenerics import InfoBarShowHide, \
InfoBarNumberZap, InfoBarChannelSelection, InfoBarMenu, InfoBarRdsDecoder, \
InfoBarEPG, InfoBarSeek, InfoBarInstantRecord, \
InfoBarAudioSelection, InfoBarAdditionalInfo, InfoBarNotifications, InfoBarDish, InfoBarUnhandledKey, \
InfoBarSubserviceSelection, InfoBarShowMovies, InfoBarTimeshift, \
InfoBarServiceNotifications, InfoBarPVRState, InfoBarCueSheetSupport, InfoBarSimpleEventView, \
InfoBarSummarySupport, InfoBarMoviePlayerSummarySupport, InfoBarTimeshiftState, InfoBarTeletextPlugin, InfoBarExtensions, \
InfoBarSubtitleSupport, InfoBarPiP, InfoBarPlugins, InfoBarServiceErrorPopupSupport, InfoBarJobman
profile("LOAD:InitBar_Components")
from Components.ActionMap import HelpableActionMap
from Components.config import config
from Components.ServiceEventTracker import ServiceEventTracker, InfoBarBase
profile("LOAD:HelpableScreen")
from Screens.HelpMenu import HelpableScreen
from DB.dbpBluePanel import dbpBluePanelOpen
from DB.dbpBluePanel import dbpPluginsPanelOpen
from DB.dbpEpgPanel import dbpEpgPanelOpen
class InfoBar(InfoBarBase, InfoBarShowHide,
InfoBarNumberZap, InfoBarChannelSelection, InfoBarMenu, InfoBarEPG, InfoBarRdsDecoder, dbpBluePanelOpen, dbpEpgPanelOpen, dbpPluginsPanelOpen,
InfoBarInstantRecord, InfoBarAudioSelection,
HelpableScreen, InfoBarAdditionalInfo, InfoBarNotifications, InfoBarDish, InfoBarUnhandledKey,
InfoBarSubserviceSelection, InfoBarTimeshift, InfoBarSeek,
InfoBarSummarySupport, InfoBarTimeshiftState, InfoBarTeletextPlugin, InfoBarExtensions,
InfoBarPiP, InfoBarPlugins, InfoBarSubtitleSupport, InfoBarServiceErrorPopupSupport, InfoBarJobman,
Screen):
ALLOW_SUSPEND = True
instance = None
def __init__(self, session):
Screen.__init__(self, session)
self["actions"] = HelpableActionMap(self, "InfobarActions",
{
"showMovies": (self.showMovies, _("Play recorded movies...")),
"showRadio": (self.showRadio, _("Show the radio player...")),
"showTv": (self.showTv, _("Show the tv player...")),
}, prio=2)
self.allowPiP = True
for x in HelpableScreen, \
InfoBarBase, InfoBarShowHide, \
InfoBarNumberZap, InfoBarChannelSelection, InfoBarMenu, InfoBarEPG, InfoBarRdsDecoder, dbpBluePanelOpen, dbpEpgPanelOpen, dbpPluginsPanelOpen, \
InfoBarInstantRecord, InfoBarAudioSelection, InfoBarUnhandledKey, \
InfoBarAdditionalInfo, InfoBarNotifications, InfoBarDish, InfoBarSubserviceSelection, \
InfoBarTimeshift, InfoBarSeek, InfoBarSummarySupport, InfoBarTimeshiftState, \
InfoBarTeletextPlugin, InfoBarExtensions, InfoBarPiP, InfoBarSubtitleSupport, Inf | oBarJobman, \
InfoBarPlugins, InfoBarServiceErrorPopupSupport:
x.__init__(self)
self.helpList.append((self["actions"], "InfobarActions", [(" | showMovies", _("view recordings..."))]))
self.helpList.append((self["actions"], "InfobarActions", [("showRadio", _("hear radio..."))]))
self.__event_tracker = ServiceEventTracker(screen=self, eventmap=
{
iPlayableService.evUpdatedEventInfo: self.__eventInfoChanged
})
self.current_begin_time=0
assert InfoBar.instance is None, "class InfoBar is a singleton class and just one instance of this class is allowed!"
InfoBar.instance = self
def __onClose(self):
InfoBar.instance = None
def __eventInfoChanged(self):
if self.execing:
service = self.session.nav.getCurrentService()
old_begin_time = self.current_begin_time
info = service and service.info()
ptr = info and info.getEvent(0)
self.current_begin_time = ptr and ptr.getBeginTime() or 0
if config.usage.show_infobar_on_event_change.value:
if old_begin_time and old_begin_time != self.current_begin_time:
self.doShow()
def __checkServiceStarted(self):
self.__serviceStarted(True)
self.onExecBegin.remove(self.__checkServiceStarted)
def serviceStarted(self): #override from InfoBarShowHide
new = self.servicelist.newServicePlayed()
if self.execing:
InfoBarShowHide.serviceStarted(self)
self.current_begin_time=0
elif not self.__checkServiceStarted in self.onShown and new:
self.onShown.append(self.__checkServiceStarted)
def __checkServiceStarted(self):
self.serviceStarted()
self.onShown.remove(self.__checkServiceStarted)
def showTv(self):
self.showTvChannelList(True)
def showRadio(self):
if config.usage.e1like_radio_mode.value:
self.showRadioChannelList(True)
else:
self.rds_display.hide() # in InfoBarRdsDecoder
from Screens.ChannelSelection import ChannelSelectionRadio
self.session.openWithCallback(self.ChannelSelectionRadioClosed, ChannelSelectionRadio, self)
def ChannelSelectionRadioClosed(self, *arg):
self.rds_display.show() # in InfoBarRdsDecoder
def showMovies(self):
from Screens.MovieSelection import MovieSelection
self.session.openWithCallback(self.movieSelected, MovieSelection)
def movieSelected(self, service):
if service is not None:
self.session.open(MoviePlayer, service)
class MoviePlayer(InfoBarBase, InfoBarShowHide, \
InfoBarMenu, \
InfoBarSeek, InfoBarShowMovies, InfoBarAudioSelection, HelpableScreen, InfoBarNotifications,
InfoBarServiceNotifications, InfoBarPVRState, InfoBarCueSheetSupport, InfoBarSimpleEventView,
InfoBarMoviePlayerSummarySupport, InfoBarSubtitleSupport, Screen, InfoBarTeletextPlugin,
InfoBarServiceErrorPopupSupport, InfoBarExtensions, InfoBarPlugins, InfoBarPiP):
ENABLE_RESUME_SUPPORT = True
ALLOW_SUSPEND = True
def __init__(self, session, service):
Screen.__init__(self, session)
self["actions"] = HelpableActionMap(self, "MoviePlayerActions",
{
"leavePlayer": (self.leavePlayer, _("leave movie player..."))
})
self.allowPiP = False
for x in HelpableScreen, InfoBarShowHide, InfoBarMenu, \
InfoBarBase, InfoBarSeek, InfoBarShowMovies, \
InfoBarAudioSelection, InfoBarNotifications, InfoBarSimpleEventView, \
InfoBarServiceNotifications, InfoBarPVRState, InfoBarCueSheetSupport, \
InfoBarMoviePlayerSummarySupport, InfoBarSubtitleSupport, \
InfoBarTeletextPlugin, InfoBarServiceErrorPopupSupport, InfoBarExtensions, \
InfoBarPlugins, InfoBarPiP:
x.__init__(self)
self.lastservice = session.nav.getCurrentlyPlayingServiceReference()
session.nav.playService(service)
self.returning = False
self.onClose.append(self.__onClose)
def __onClose(self):
self.session.nav.playService(self.lastservice)
def handleLeave(self, how):
self.is_closing = True
if how == "ask":
if config.usage.setup_level.index < 2: # -expert
list = (
(_("Yes"), "quit"),
(_("No"), "continue")
)
else:
list = (
(_("Yes"), "quit"),
(_("Yes, returning to movie list"), "movielist"),
(_("Yes, and delete this movie"), "quitanddelete"),
(_("No"), "continue"),
(_("No, but restart from begin"), "restart")
)
from Screens.ChoiceBox import ChoiceBox
self.session.openWithCallback(self.leavePlayerConfirmed, ChoiceBox, title=_("Stop playing this movie?"), list = list)
else:
self.leavePlayerConfirmed([True, how])
def leavePlayer(self):
self.handleLeave(config.usage.on_movie_stop.value)
def deleteConfirmed(self, answer):
if answer:
self.leavePlayerConfirmed((True, "quitanddeleteconfirmed"))
def leavePlayerConfirmed(self, answer):
answer = answer and answer[1]
if answer in ("quitanddelete", "quitanddeleteconfirmed"):
ref = self.session.nav.getCurrentlyPlayingServiceReference()
from enigma import eServiceCenter
serviceHandler = eServiceCenter.getInstance()
info = serviceHandler.info(ref)
name = info and info.getName(ref) or _("this recording")
if answer == "quitanddelete":
from Screens.MessageBox import MessageBox
self.session.openWithCallback(self.deleteConfirmed, MessageBox, _("Do you really want to delete %s?") % name)
return
elif answer == "quitanddeleteconfirmed":
offline = serviceHandler.offlineOperations(r |
skarra/PRS | libs/sqlalchemy/util/deprecations.py | Python | agpl-3.0 | 7,169 | 0.000139 | # util/deprecations.py
# Copyright (C) 2005-2019 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Helpers related to deprecation of functions, methods, classes, other
functionality."""
import re
import warnings
from . import compat
from .langhelpers import decorator
from .langhelpers import inject_docstring_text
from .langhelpers import inject_param_text
from .. import exc
def warn_deprecated(msg, stacklevel=3):
warnings.warn(msg, exc.SADeprecationWarning, stacklevel=stacklevel)
def warn_pending_deprecation(msg, stacklevel=3):
warnings.warn(msg, exc.SAPendingDeprecationWarning, stacklevel=stacklevel)
def deprecated_cls(version, message, constructor="__init__"):
header = ".. deprecated:: %s %s" % (version, (message or ""))
def decorate(cls):
return _decorate_cls_with_warning(
cls,
constructor,
exc.SADeprecationWarning,
message % dict(func=constructor),
header,
)
return decorate
def deprecated(version, message=None, add_deprecation_to_docstring=True):
"""Decorates a function and issues a deprecation warning on use.
:param version:
Issue version in the warning.
:param message:
If provided, issue message in the | warning. A sensible default
is used if not provided.
:param add_deprecation_to_docstring:
Default True. If False, the wrapped function's __doc__ is left
as-is. If True, the 'message' is prepended to the docs if
provided, or sensible default if message is omitted.
"""
if add_deprecation_to_docstring:
header = ".. deprecated:: %s %s" % (version, (message or ""))
else:
header = None
if message is None:
message = | "Call to deprecated function %(func)s"
def decorate(fn):
return _decorate_with_warning(
fn,
exc.SADeprecationWarning,
message % dict(func=fn.__name__),
header,
)
return decorate
def deprecated_params(**specs):
"""Decorates a function to warn on use of certain parameters.
e.g. ::
@deprecated_params(
weak_identity_map=(
"0.7",
"the :paramref:`.Session.weak_identity_map parameter "
"is deprecated."
)
)
"""
messages = {}
for param, (version, message) in specs.items():
messages[param] = _sanitize_restructured_text(message)
def decorate(fn):
spec = compat.inspect_getfullargspec(fn)
if spec.defaults is not None:
defaults = dict(
zip(
spec.args[(len(spec.args) - len(spec.defaults)) :],
spec.defaults,
)
)
check_defaults = set(defaults).intersection(messages)
check_kw = set(messages).difference(defaults)
else:
check_defaults = ()
check_kw = set(messages)
has_kw = spec.varkw is not None
@decorator
def warned(fn, *args, **kwargs):
for m in check_defaults:
if kwargs[m] != defaults[m]:
warnings.warn(
messages[m], exc.SADeprecationWarning, stacklevel=3
)
for m in check_kw:
if m in kwargs:
warnings.warn(
messages[m], exc.SADeprecationWarning, stacklevel=3
)
return fn(*args, **kwargs)
doc = fn.__doc__ is not None and fn.__doc__ or ""
if doc:
doc = inject_param_text(
doc,
{
param: ".. deprecated:: %s %s" % (version, (message or ""))
for param, (version, message) in specs.items()
},
)
decorated = warned(fn)
decorated.__doc__ = doc
return decorated
return decorate
def pending_deprecation(
version, message=None, add_deprecation_to_docstring=True
):
"""Decorates a function and issues a pending deprecation warning on use.
:param version:
An approximate future version at which point the pending deprecation
will become deprecated. Not used in messaging.
:param message:
If provided, issue message in the warning. A sensible default
is used if not provided.
:param add_deprecation_to_docstring:
Default True. If False, the wrapped function's __doc__ is left
as-is. If True, the 'message' is prepended to the docs if
provided, or sensible default if message is omitted.
"""
if add_deprecation_to_docstring:
header = ".. deprecated:: %s (pending) %s" % (version, (message or ""))
else:
header = None
if message is None:
message = "Call to deprecated function %(func)s"
def decorate(fn):
return _decorate_with_warning(
fn,
exc.SAPendingDeprecationWarning,
message % dict(func=fn.__name__),
header,
)
return decorate
def deprecated_option_value(parameter_value, default_value, warning_text):
if parameter_value is None:
return default_value
else:
warn_deprecated(warning_text)
return parameter_value
def _sanitize_restructured_text(text):
def repl(m):
type_, name = m.group(1, 2)
if type_ in ("func", "meth"):
name += "()"
return name
return re.sub(r"\:(\w+)\:`~?\.?(.+?)`", repl, text)
def _decorate_cls_with_warning(
cls, constructor, wtype, message, docstring_header=None
):
doc = cls.__doc__ is not None and cls.__doc__ or ""
if docstring_header is not None:
docstring_header %= dict(func=constructor)
doc = inject_docstring_text(doc, docstring_header, 1)
if type(cls) is type:
clsdict = dict(cls.__dict__)
clsdict["__doc__"] = doc
cls = type(cls.__name__, cls.__bases__, clsdict)
constructor_fn = clsdict[constructor]
else:
cls.__doc__ = doc
constructor_fn = getattr(cls, constructor)
setattr(
cls,
constructor,
_decorate_with_warning(constructor_fn, wtype, message, None),
)
return cls
def _decorate_with_warning(func, wtype, message, docstring_header=None):
"""Wrap a function with a warnings.warn and augmented docstring."""
message = _sanitize_restructured_text(message)
@decorator
def warned(fn, *args, **kwargs):
warnings.warn(message, wtype, stacklevel=3)
return fn(*args, **kwargs)
doc = func.__doc__ is not None and func.__doc__ or ""
if docstring_header is not None:
docstring_header %= dict(func=func.__name__)
doc = inject_docstring_text(doc, docstring_header, 1)
decorated = warned(func)
decorated.__doc__ = doc
decorated._sa_warn = lambda: warnings.warn(message, wtype, stacklevel=3)
return decorated
|
nens/threedi-qgis-plugin | tests/test_spatialalchemy.py | Python | gpl-3.0 | 2,234 | 0.000448 | from geoalchemy2.types import Geometry
from sqlalchemy import Column
from sqlalchemy import Integer
from sqlalchemy import String
from sqlalchemy.ext.declarative import declarative_base
from ThreeDiToolbox.utils.threedi_database import ThreediDatabase
import logging
import os.path
import tempfile
import unittest
logger = logging.getLogger(__name__)
Base = declarative_base()
class User(Base):
__table | name__ = "users"
id = Column(Integer, primary_key=True)
name = Column(String)
def __repr__(self):
return "<User(name='%s')>" % (self.name)
class GeoTable(Base):
__tablename__ = "geotable"
id = Column(Integer, primary_key=True)
name = Column(String)
geom = Column(
Geometry(geometry_type="POINT", srid=4326, management=True, spatial_index=True)
)
def __repr__(self):
return "<User(geom='%s')>" % (self.geom)
class TestSpatialAlchem | yWithSpatialite(unittest.TestCase):
def setUp(self):
self.tmp_directory = tempfile.mkdtemp()
self.file_path = os.path.join(self.tmp_directory, "testdb.sqlite")
db = ThreediDatabase(
{"db_file": self.file_path, "db_path": self.file_path}, echo=True
)
db.create_db()
self.engine = db.get_engine()
self.session = db.get_session()
Base.metadata.bind = self.engine
Base.metadata.create_all(self.engine)
def test_insert_and_get_normal_table(self):
user = User(name="test")
self.session.add(user)
self.session.commit()
self.assertIsNotNone(user.id)
self.assertEqual(self.session.query(User).count(), 1)
user = self.session.query(User).limit(1)[0]
self.assertEqual(user.name, "test")
def test_insert_and_get_geo_data(self):
geo_table = GeoTable(geom="srid=4326;POINT(1.01234567 4.01234567)")
self.session.add(geo_table)
self.session.commit()
self.assertIsNotNone(geo_table.id)
self.assertEqual(self.session.query(GeoTable).count(), 1)
geo_table = self.session.query(GeoTable).limit(1)[0]
self.assertIsNotNone(geo_table.geom)
def tearDown(self):
self.session.close_all()
os.remove(self.file_path)
|
schleichdi2/OpenNfr_E2_Gui-6.0 | lib/python/Screens/FactoryReset.py | Python | gpl-2.0 | 1,747 | 0.017745 | from Screens.MessageBox import MessageBox
from boxbranding import getMachineBrand, getMachineName
from Screens.ParentalControlSetup import ProtectedScreen
from Components.config import config
from Tools.BoundFunction import boundFunction
from Screens.InputBox import PinInput
class FactoryReset(MessageBox, ProtectedScreen):
def __init__(self, session):
MessageBox.__init__(self, session, _("When you do a factory reset, you will lose ALL your configuration data\n"
"(including bouquets, services, satellite data ...)\n"
"After completion of factory reset, your %s %s will restart automatically!\n\n"
"Really do a factory reset?") % (getMachineBrand(), getMachineName()), MessageBox.TYPE_YESNO, default = False)
self.setTitle(_("Factory reset"))
self.skinName = "MessageBox"
if self.isProtected() and config.ParentalControl.servicepin[0].value:
self.onFirstExecBegin.append(boundFunction(self.session.openWithCallback, self.pinEntered, PinInput, pin | List=[x.value for x in config.ParentalControl.servicepin], triesEntry=config.ParentalControl.retries.servicepin, title=_("Please enter the correct pin code"), windowTitle=_("Enter pin code")))
def isProtected(self):
return config.ParentalCont | rol.setuppinactive.value and (not config.ParentalControl.config_sections.main_menu.value or hasattr(self.session, 'infobar') and self.session.infobar is None) and config.ParentalControl.config_sections.manufacturer_reset.value
def pinEntered(self, result):
if result is None:
self.closeProtectedScreen()
elif not result:
self.session.openWithCallback(self.close(), MessageBox, _("The pin code you entered is wrong."), MessageBox.TYPE_ERROR, timeout=3)
def closeProtectedScreen(self, result=None):
self.close(None) |
Etxea/gestioneide | gestioneide/migrations/0003_auto_20160228_1748.py | Python | gpl-3.0 | 768 | 0.001302 | # -*- coding: utf-8 -*-
# Generated by Django 1.9.2 on 2016-02-28 16:48
from __future__ import unicode_literals
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
| ('gestioneide', '0002_auto_20160223_1743'),
]
operations = [
migrations.AddField(
model_name='alumno',
name='fecha_nacimiento',
field=models.DateField(blank=True, default=datetime.date.today),
),
migrations.AlterField(
model_name='clase',
name='dia_semana',
field=models.DecimalField(choices=[(1, 'Lunes'), (2, 'Martes'), (3, 'Miercole | s'), (4, 'Jueves'), (5, 'Viernes')], decimal_places=0, max_digits=1),
),
]
|
dvl/imagefy-web | imagefy/core/views.py | Python | mit | 380 | 0.002632 | from allauth.socialaccount.providers.facebook.v | iews import FacebookOAuth2Adapter
from allauth.socialaccount.providers.shopify.views import ShopifyOAuth2Adapter
from rest_auth.registration.views import SocialLoginV | iew
class FacebookLogin(SocialLoginView):
adapter_class = FacebookOAuth2Adapter
class ShopifyLogin(SocialLoginView):
adapter_class = ShopifyOAuth2Adapter
|
jr-garcia/Engendro3D | Demos/_base/_model_paths.py | Python | mit | 305 | 0.003279 | import os
| maindir = os.path.join(os.path.dirname(__file__), os.pardir,"models")
duckMODEL = os.path.join(maindir, "duck", "duck.3DS")
dwarfMODEL = os.path.join(maindir, 'dwarf', "dwarf.x")
tubeMODEL = os.path.join(maindir, 'cil', 'cil.x')
triangleMODEL = os.path.join(maindir, | 'triangle', 'triangle.x')
|
furthz/colegio | src/register/forms.py | Python | mit | 14,437 | 0.007206 | from crispy_forms.helper import FormHelper
from crispy_forms.layout import Submit
from dal import autocomplete
from django import forms
from django.forms import ModelForm
from profiles.models import Profile
from register.models import Alumno, Apoderado, Personal, Promotor, Director, Cajero, Tesorero, Proveedor, Colegio, Sistemas, Administrativo, \
Direccion, Docente
from utils.forms import ValidProfileFormMixin
from utils.models import TipoDocumento, TipoSexo, Departamento, Provincia, Distrito
class PersonaForm(ModelForm):
direccion = forms.CharField(widget=forms.TextInput(attrs={'tabindex': '13', 'class': 'form-control'}), label="Direccion")
referencia = forms.CharField(widget=forms.TextInput(attrs={'tabindex': '14', 'class': 'form-control'}), label="Referencia")
departamento = forms.ChoiceField(widget=forms.Select(attrs={'class': 'form-control'}), label="Departamento")
provincia = forms.ChoiceField(widget=forms.Select(attrs={'class': 'form-control'}), label="Provincia", required=False)
distrito = forms.ChoiceField(widget=forms.Select(attrs={'class': 'form-control'}), label="Distrito", required=False)
tipo_cel = forms.ChoiceField(widget=forms.Select(attrs={'tabindex': '15', 'class': 'form-control'}), label="Tipo Movil",
required=False)
celular = forms.CharField(widget=forms.NumberInput(attrs={'tabindex': '16', 'class': 'form-control', 'maxlength':'9'}), label="Celular",
required=False)
celulares = forms.MultipleChoiceField(widget=forms.SelectMultiple(attrs={'tabindex': '17', 'class': 'form-control'}), label="Números",
required=False)
@property
def ChoiceTipoDocumento(self):
choices = [(tipo.id_tipo, tipo.descripcion) for tipo in TipoDocumento.objects.all()]
return choices
@property
def ChoiceTipoSexo(self):
choices = [(sex.id_sexo, sex.descripcion) for sex in TipoSexo.objects.all()]
return choices
@property
def ChoiceDepartamento(self):
choices = []
choices.append(('-1', 'Seleccione'))
for d in Departamento.objects.all():
choices.append((d.id_departamento, d.descripcion))
return choices
# @property
def ChoiceProvincia(self):
# choices = [(p.id_provincia, p.descripcion) for p in Provincia.objects.filter(departamento__id_departamento=dpto)]
choices = [(p.id_provincia, p.descripcion) for p in
Provincia.objects.all()]
return choices
# @property
def ChoiceDistrito(self):
# choices = [(d.id_distrito, d.descripcion) for d in Distrito.objects.filter(provincia__id_provincia=prov)]
choices = [(d.id_distrito, d.descripcion) for d in Distrito.objects.all()]
return choices
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields['tipo_documento'] = forms.ChoiceField(choices=self.ChoiceTipoDocumento,
widget=forms.Select(attrs={'tabindex': '-5', 'class': 'form-control'}))
self.fields['sexo'] = forms.ChoiceField(choices=self.ChoiceTipoSexo,
widget=forms.Select(attrs={'tabindex': '7', 'class': 'form-control'}))
self.fields['departamento'] = forms.ChoiceField(choices = self.ChoiceDepartamento, initial='-1',
widget=forms.Select(attrs={'tabindex': '10', 'class': 'form-control'}))
self.fields['nombre'].widget.attrs = {'tabindex': '1', 'class': 'form-control', 'maxlength': '50'}
self.fields['segundo_nombre'].widget.attrs = {'tabindex': '2', 'class': 'form-control', 'maxlength': '200'}
self.fields['apellido_pa'].widget.attrs = {'tabindex': '3', 'class': 'form-control', 'maxlength': '50'}
self.fields['apellido_ma'].widget.attrs = {'tabindex': '4', 'class': 'form-control', 'maxlength': '50'}
self.fields['numero_documento'].widget.attrs = {'tabindex': '-6' , 'class': 'form-control'}
self.fields['correo'].widget.attrs = {'tabindex': '-7', 'class': 'form-control'}
self.fields['fecha_nac'] = forms.DateField(widget=forms.DateInput, input_formats=['%Y-%m-%d'])
self.fields['fecha_nac'].widget.attrs = {'tabindex': '8', 'class': 'form-control', 'onChange': 'validarFecNac()'}
try:
#cargar los valores guardados en la dirección
if kwargs['instance'].pk is not None:
direc = Direccion.objects.get(persona__id_persona=kwargs['instance'].pk)
self.fields['departamento'].initial = direc.dpto
opciones_provincias = self.ChoiceProvincia()
opciones_distritos = self.ChoiceDistrito()
self.fields['provincia'] = forms.ChoiceField(choices=opciones_provincias,
widget=forms.Select(attrs={'tabindex': '11', 'class': 'form-control'}))
self.fields['distrito'] = forms.ChoiceField(choices=opc | iones_distritos,
widget=forms.Select(attrs={'tabindex': '12', 'class': 'form-control'}))
self.fields['provincia'].initial = direc.provincia
self.fields['distrito'].initial = direc.distrito
self.fields['direccion' | ].initial = direc.calle
self.fields['referencia'].initial = direc.referencia
except:
self.fields['provincia'] = forms.ChoiceField(choices=self.ChoiceProvincia, initial='-1',
widget=forms.Select(
attrs={'tabindex': '10', 'class': 'form-control'}))
self.fields['distrito'] = forms.ChoiceField(choices=self.ChoiceDistrito, initial='-1',
widget=forms.Select(
attrs={'tabindex': '10', 'class': 'form-control'}))
class Meta:
model = Profile
fields = ['nombre', 'segundo_nombre', 'apellido_pa', 'apellido_ma', 'tipo_documento', 'numero_documento',
'sexo', 'correo', 'fecha_nac']
class AlumnoForm(ValidProfileFormMixin, PersonaForm):
title = forms.CharField(label="Registrar Alumno", required=False)
class Meta:
model = Alumno
fields = ['nombre', 'segundo_nombre', 'apellido_pa', 'apellido_ma', 'tipo_documento', 'numero_documento',
'sexo', 'correo', 'fecha_nac', 'colegio_id']
class ApoderadoForm(ValidProfileFormMixin, PersonaForm):
title = forms.CharField(label="Registrar Apoderado", required=False)
#alu = forms.CharField(widget=forms.TextInput(attrs={'tabindex': '23', 'class': 'form-control'}), label="Direccion")
alumno = forms.ModelMultipleChoiceField(queryset=Alumno.objects.all(),required= False,
widget=autocomplete.ModelSelect2Multiple(url='registers:alumno_autocomplete',attrs={'tabindex': '27', 'class': 'form-control'}))
#parentesco = forms.CharField(max_length=30, widget=forms.TextInput(attrs={'class' : 'form-control'}))
def ChoiceParentesco(self):
MY_CHOICES = (
('Padre', 'Padre'),
('Madre', 'Madre'),
('Tio', 'Tio'),
('Hermano', 'Hermano'),
('Pariente', 'Pariente')
)
return MY_CHOICES
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields['parentesco'] = forms.ChoiceField(choices=self.ChoiceParentesco())
self.fields['parentesco'].widget.attrs.update({'tabindex': '10', 'class': 'form-control'})
class Meta:
model = Apoderado
fields = ['nombre', 'segundo_nombre', 'apellido_pa', 'apellido_ma', 'tipo_documento',
'numero_documento', 'sexo', 'correo', 'fecha_nac']
class PersonalForm(ValidProfileFormMixin, PersonaForm):
class Meta:
model = Personal
fields = ['nombre', 'segundo_nombre', 'apellido_pa', 'apellido_ma', 'tipo_documento', 'numero_docu |
kcsry/wurst | wurst/api/utils.py | Python | mit | 646 | 0.003096 | from rest_framework import serializers
def serializer_factory(model, serializer_class=serializers.ModelSerializer, attrs=None, meta=None):
"""
Generate a simple serializer for the given model class.
:param model: | Model class
:param serializer_class: Serializer base class
:param attrs: Serializer class attrs
:param meta: Serializer Meta class attrs
:return: a Serializer class
"""
attrs = attrs or {}
meta = meta or {}
meta.setdefault("model", model)
attrs.setdefa | ult("Meta", type(str("Meta"), (object,), meta))
return type(str("%sSerializer" % model.__name__), (serializer_class,), attrs)
|
olafhauk/mne-python | mne/preprocessing/nirs/_tddr.py | Python | bsd-3-clause | 4,673 | 0 | # Authors: Robert Luke <mail@robertluke.net>
# Frank Fishburn
#
# License: BSD (3-clause)
import numpy as np
from ... import pick_types
from ...io import BaseRaw
from ...utils import _validate_type
from ...io.pick import _picks_to_idx
def temporal_derivative_distribution_repair(raw):
"""Apply temporal derivative distribution repair to data.
Applies temporal derivative distribution repair (TDDR) t | o data
:footcite:`FishburnEtAl2019`. This approach removes baseline shift
and spike artifacts without the need for any user-supplied parameters.
Parameters
----------
raw : instance of Raw
The raw data.
%(verbose)s
Returns
-------
raw : instance of Raw
Data with TDDR applied.
Notes
-----
There is a shorter alias ``mne.preprocessing.nirs.tddr`` that can be used
instead of this function (e.g. if line le | ngth is an issue).
References
----------
.. footbibliography::
"""
raw = raw.copy().load_data()
_validate_type(raw, BaseRaw, 'raw')
if not len(pick_types(raw.info, fnirs='fnirs_od')):
raise RuntimeError('TDDR should be run on optical density data.')
picks = _picks_to_idx(raw.info, 'fnirs_od', exclude=[])
for pick in picks:
raw._data[pick] = _TDDR(raw._data[pick], raw.info['sfreq'])
return raw
# provide a short alias
tddr = temporal_derivative_distribution_repair
# Taken from https://github.com/frankfishburn/TDDR/ (MIT license).
# With permission https://github.com/frankfishburn/TDDR/issues/1.
# The only modification is the name, scipy signal import and flake fixes.
def _TDDR(signal, sample_rate):
# This function is the reference implementation for the TDDR algorithm for
# motion correction of fNIRS data, as described in:
#
# Fishburn F.A., Ludlum R.S., Vaidya C.J., & Medvedev A.V. (2019).
# Temporal Derivative Distribution Repair (TDDR): A motion correction
# method for fNIRS. NeuroImage, 184, 171-179.
# https://doi.org/10.1016/j.neuroimage.2018.09.025
#
# Usage:
# signals_corrected = TDDR( signals , sample_rate );
#
# Inputs:
# signals: A [sample x channel] matrix of uncorrected optical density
# data
# sample_rate: A scalar reflecting the rate of acquisition in Hz
#
# Outputs:
# signals_corrected: A [sample x channel] matrix of corrected optical
# density data
from scipy.signal import butter, filtfilt
signal = np.array(signal)
if len(signal.shape) != 1:
for ch in range(signal.shape[1]):
signal[:, ch] = _TDDR(signal[:, ch], sample_rate)
return signal
# Preprocess: Separate high and low frequencies
filter_cutoff = .5
filter_order = 3
Fc = filter_cutoff * 2 / sample_rate
signal_mean = np.mean(signal)
signal -= signal_mean
if Fc < 1:
fb, fa = butter(filter_order, Fc)
signal_low = filtfilt(fb, fa, signal, padlen=0)
else:
signal_low = signal
signal_high = signal - signal_low
# Initialize
tune = 4.685
D = np.sqrt(np.finfo(signal.dtype).eps)
mu = np.inf
iter = 0
# Step 1. Compute temporal derivative of the signal
deriv = np.diff(signal_low)
# Step 2. Initialize observation weights
w = np.ones(deriv.shape)
# Step 3. Iterative estimation of robust weights
while iter < 50:
iter = iter + 1
mu0 = mu
# Step 3a. Estimate weighted mean
mu = np.sum(w * deriv) / np.sum(w)
# Step 3b. Calculate absolute residuals of estimate
dev = np.abs(deriv - mu)
# Step 3c. Robust estimate of standard deviation of the residuals
sigma = 1.4826 * np.median(dev)
# Step 3d. Scale deviations by standard deviation and tuning parameter
r = dev / (sigma * tune)
# Step 3e. Calculate new weights according to Tukey's biweight function
w = ((1 - r**2) * (r < 1)) ** 2
# Step 3f. Terminate if new estimate is within
# machine-precision of old estimate
if abs(mu - mu0) < D * max(abs(mu), abs(mu0)):
break
# Step 4. Apply robust weights to centered derivative
new_deriv = w * (deriv - mu)
# Step 5. Integrate corrected derivative
signal_low_corrected = np.cumsum(np.insert(new_deriv, 0, 0.0))
# Postprocess: Center the corrected signal
signal_low_corrected = signal_low_corrected - np.mean(signal_low_corrected)
# Postprocess: Merge back with uncorrected high frequency component
signal_corrected = signal_low_corrected + signal_high + signal_mean
return signal_corrected
|
fedora-infra/pkgdb2 | pkgdb2/api/packages.py | Python | gpl-2.0 | 53,012 | 0.000094 | # -*- coding: utf-8 -*-
#
# Copyright © 2013-2016 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions
# of the GNU General Public License v.2, or (at your option) any later
# version. This program is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY expressed or implied, including the
# implied warranties of MERCHANTABILITY or FITNESS FOR A PARTICULAR
# PURPOSE. See the GNU General Public License for more details. You
# should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# Any Red Hat trademarks that are incorporated in the source
# code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission
# of Red Hat, Inc.
#
'''
Packages
========
API for package management.
'''
import flask
import itertools
from math import ceil
from sqlalchemy.exc import SQLAlchemyError
from sqlalchemy.orm.exc import NoResultFound
import pkgdb2.lib as pkgdblib
from pkgdb2 import APP, SESSION, forms, is_admin, packager_login_required
from pkgdb2.api import API, get_limit
from pkgdb2.lib.exceptions import PkgdbException, PkgdbBugzillaException
## Some of the object we use here have inherited methods which apparently
## pylint does not detect.
# pylint: disable=E1101
## Too many variables
# pylint: disable=R0914
## Package
@API.route('/package/new/', methods=['POST'])
@is_admin
def api_package_new():
'''
New package
-----------
Create a new package.
::
/api/package/new/
Accepts POST queries only.
:arg pkgname: String of the package name to be created.
:arg summary: String of the summary description of the package.
:arg description: String describing the package (same as in the
spec file).
:arg review_url: the URL of the package review on the bugzilla.
:arg status: status of the package can be one of: 'Approved',
'Awaiting Review', 'Denied', 'Obsolete', 'Removed'
:arg branches: one or more branch names of the collection in which
this package is added.
:arg poc: FAS username of the point of contact
:arg upstream_url: the URL of the upstream project
:arg critpath: boolean specifying if the package is in the critpath
:kwarg namespace: String of the namespace of the package to create
(defaults to ``rpms``).
:kwarg monitoring_status: the new release monitoring status for this
package (defaults to ``True``, can be ``True``, ``False`` or
``nobuild``).
:kwarg koschei: the koschei integration status for this package
(defaults to ``False``, can be ``True`` or ``False``).
Sample response:
::
{
"output": "ok",
"messages": ["Package created"]
}
{
"output": "notok",
"error": ["You're not allowed to add a package"]
}
'''
httpcode = 200
output = {}
collections = pkgdblib.search_collection(
SESSION, '*', 'Under Development')
collections.extend(pkgdblib.search_collection(SESSION, '*', 'Active'))
pkg_status = pkgdblib.get_status(SESSION, 'pkg_status')['pkg_status']
namespaces = pkgdblib.get_status(SESSION, 'namespaces')['namespaces']
form = forms.AddPackageForm(
csrf_enabled=False,
collections=collections,
pkg_status_list=pkg_status,
namespaces=namespaces,
)
if str(form.namespace.data) in ['None', '']:
form.namespace.data = 'rpms'
violation = enforce_namespace_policy(form)
if violation:
return violation
if form.validate_on_submit():
namespace = form.namespace.data
pkg_name = form.pkgname.data
pkg_summary = form.summary.data
pkg_description = form.description.data
pkg_review_url = form.review_url.data
pkg_status = form.status.data
pkg_collection = form.branches.data
pkg_poc = form.poc.data
pkg_upstream_url = form.upstream_url.data
pkg_critpath = form.critpath.data
monitoring_status = form.monitoring_status.data
koschei = form.koschei.data
try:
message = pkgdblib.add_package(
SESSION,
namespace=namespace,
pkg_name=pkg_name,
pkg_summary=pkg_summary,
pkg_description=pkg_description,
pkg_review_url=pkg_review_url,
pkg_status=pkg_status,
pkg_collection=pkg_collection,
pkg_poc=pkg_poc,
pkg_upstream_url=pkg_upstream_url,
pkg_critpath=pkg_critpath,
monitoring_status=monitoring_status,
koschei=koschei,
user=flask.g.fas_user
)
SESSION.commit()
output['output'] = 'ok'
output['messages'] = [message]
except PkgdbException as err:
SESSION.rollback()
output['output'] = 'notok'
output['error'] = str(err)
httpcode = 500
else:
output['output'] = 'notok'
| output['error'] = 'Inva | lid input submitted'
if form.errors:
detail = []
for error in form.errors:
detail.append('%s: %s' % (error,
'; '.join(form.errors[error])))
output['error_detail'] = detail
httpcode = 500
jsonout = flask.jsonify(output)
jsonout.status_code = httpcode
return jsonout
@API.route('/package/edit/', methods=['POST'])
@is_admin
def api_package_edit():
'''
Edit a package
--------------
Edit a package.
::
/api/package/edit/
Accepts POST queries only.
:arg pkgname: String of the package name to be edited.
:arg summary: String of the summary description of the package.
:arg description: String describing the package (same as in the
spec file).
:arg review_url: the URL of the package review on the bugzilla.
:arg status: status of the package can be one of: 'Approved',
'Awaiting Review', 'Denied', 'Obsolete', 'Removed'
:arg upstream_url: the URL of the upstream project
:kwarg namespace: String of the namespace of the package to be edited
(defaults to ``rpms``).
Sample response:
::
{
"output": "ok",
"messages": ["Package edited"]
}
{
"output": "notok",
"error": ["You're not allowed to edit this package"]
}
'''
httpcode = 200
output = {}
pkg_status = pkgdblib.get_status(SESSION, 'pkg_status')['pkg_status']
namespaces = pkgdblib.get_status(SESSION, 'namespaces')['namespaces']
form = forms.EditPackageForm(
csrf_enabled=False,
pkg_status_list=pkg_status,
namespaces=namespaces,
)
if str(form.namespace.data) in ['None', '']:
form.namespace.data = 'rpms'
violation = enforce_namespace_policy(form)
if violation:
return violation
if form.validate_on_submit():
namespace = form.namespace.data
pkg_name = form.pkgname.data
package = None
try:
package = pkgdblib.search_package(
SESSION, namespace, pkg_name, limit=1)[0]
except (NoResultFound, IndexError):
SESSION.rollback()
output['output'] = 'notok'
output['error'] = 'No package of this name found'
httpcode = 500
if package:
pkg_summary = form.summary.data
pkg_description = form.description.data
pkg_review_url = form.review_url.data
pkg_status = form.status.data
if pkg_status == 'None':
pkg_status = None
pkg_upstream_url = form.upstream_url.data
try:
message = pkgdblib.edit_package(
SESSION,
package, |
ros-infrastructure/ros_buildfarm | scripts/devel/build_and_test.py | Python | apache-2.0 | 6,218 | 0.000161 | #!/usr/bin/env python3
# Copyright 2014, 2016 Open Source Robotics Foundation, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import sys
from ros_buildfarm.argument import add_argument_build_tool
from ros_buildfarm.argument import add_argument_build_tool_args
from ros_buildfarm.argument import add_argument_build_tool_test_args
from ros_buildfarm.argument import add_argument_require_gpu_support
from ros_buildfarm.argument import add_argument_ros_version
from ros_buildfarm.argument import extract_multiple_remainders
from ros_buildfarm.common import Scope
from ros_buildfarm.workspace import call_build_tool
from ros_buildfarm.workspace import clean_workspace
from ros_buildfarm.workspace import ensure_workspace_exists
def main(argv=sys.argv[1:]):
parser = argparse.ArgumentParser(
description='Invoke the build tool on a workspace while enabling and '
'running the tests')
parser.add_argument(
'--rosdistro-name',
required=True,
help='The name of the ROS distro to identify the setup file to be '
'sourced (if available)')
add_argument_ros_version(parser)
add_argument_build_tool(parser, required=True)
a1 = add_argument_build_tool_args(parser)
a2 = add_argument_build_tool_test_args(parser)
parser.add_argument(
'--workspace-root',
required=True,
help='The root path of the workspace to compile')
parser.add_argument(
'--parent-result-space', nargs='*',
help='The paths of the parent result spaces')
parser.add_argument(
'--clean-before',
action='store_true',
help='The flag if the workspace should be cleaned before the '
'invocation')
parser.add_argument(
'--clean-after',
action='store_true',
help='The flag if the workspace should be cleaned after the '
'invocation')
add_argument_require_gpu_support(parser)
remainder_args = extract_multiple_remainders(argv, (a1, a2))
args = parser.parse_args(argv)
for k, v in remainder_args.items():
setattr(args, k, v)
ensure_workspace_exists(args.workspace_root)
if args.clean_before:
clean_workspace(args.workspace_root)
parent_result_spaces = None
if args.parent_result_space:
parent_result_spaces = args.parent_result_space
try:
with Scope('SUBSECTION', 'build workspace in isolation'):
test_results_dir = os.path.join(
args.workspace_root, 'test_results')
cmake_args = ['-DBUILD_TESTING=1']
if args.ros_version == 1:
cmake_args += [
'-DCATKIN_ENABLE_TESTING=1', '-DCATKIN_SKIP_TESTI | NG=0',
| '-DCATKIN_TEST_RESULTS_DIR=%s' % test_results_dir]
additional_args = args.build_tool_args or []
if args.build_tool == 'colcon':
additional_args += ['--test-result-base', test_results_dir]
env = dict(os.environ)
env.setdefault('MAKEFLAGS', '-j1')
rc = call_build_tool(
args.build_tool, args.rosdistro_name, args.workspace_root,
cmake_clean_cache=True,
cmake_args=cmake_args, args=additional_args,
parent_result_spaces=parent_result_spaces, env=env)
if not rc:
with Scope('SUBSECTION', 'build tests'):
additional_args = args.build_tool_args or []
if args.build_tool == 'colcon':
additional_args += ['--cmake-target-skip-unavailable']
rc = call_build_tool(
args.build_tool, args.rosdistro_name, args.workspace_root,
cmake_args=cmake_args,
make_args=['tests'], args=additional_args,
parent_result_spaces=parent_result_spaces, env=env)
if not rc:
make_args = ['run_tests']
additional_args = args.build_tool_args or []
if args.build_tool == 'colcon':
cmake_args = None
make_args = None
additional_args = ['--test-result-base', test_results_dir]
additional_args += args.build_tool_test_args or []
# for workspaces with only plain cmake packages the setup files
# generated by cmi won't implicitly source the underlays
if parent_result_spaces is None:
parent_result_spaces = ['/opt/ros/%s' % args.rosdistro_name]
if args.build_tool == 'catkin_make_isolated':
devel_space = os.path.join(
args.workspace_root, 'devel_isolated')
parent_result_spaces.append(devel_space)
# since catkin_make_isolated doesn't provide a custom
# environment to run tests this needs to source the devel space
# and force a CMake run ro use the new environment
with Scope('SUBSECTION', 'run tests'):
rc = call_build_tool(
args.build_tool,
args.rosdistro_name, args.workspace_root,
cmake_args=cmake_args,
force_cmake=args.build_tool == 'catkin_make_isolated',
make_args=make_args, args=additional_args,
parent_result_spaces=parent_result_spaces, env=env,
colcon_verb='test')
finally:
if args.clean_after:
clean_workspace(args.workspace_root)
return rc
if __name__ == '__main__':
sys.exit(main())
|
armab/st2 | st2common/st2common/models/utils/action_alias_utils.py | Python | apache-2.0 | 4,206 | 0.001189 | # Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from st2common.exceptions import content
__all__ = [
'ActionAliasFormatParser'
]
class ActionAliasFormatParser(object):
def __init__(self, alias_format=None, param_stream=None):
self._format = alias_format or ''
self._param_stream = param_stream or ''
def get_extracted_param_value(self):
result = {}
# As there's a lot of questions about using regular expressions,
# I'll try to be thorough when documenting this code.
# We're parsing the arbitrary key-value pairs at the end of the stream
# to support passing of parameters not specified in the format string,
# and cutting them from the stream as they're no longer needed.
# Possible values are quoted strings, a word, or anything inside "{}".
pai | rs_match = r'(?:^|\s+)(\S+)=("(.*?)"|\'(.*?)\'|({.*?})|(\S+))'
extra = re.match(r'.*?((' + pairs_mat | ch + r'\s*)*)$',
self._param_stream, re.DOTALL)
if extra:
kv_pairs = re.findall(pairs_match,
extra.group(1), re.DOTALL)
self._param_stream = self._param_stream.replace(extra.group(1), '')
self._param_stream = " %s " % self._param_stream
# Now we'll match parameters with default values in form of
# {{ value = parameter }} (and all possible permutations of spaces),
# compiling them into a list.
# "test {{ url = http://google.com }} {{ extra = Test }}" will become
# [ ["url", "http://google.com"], ["extra", "Test"] ]
params = re.findall(r'{{\s*(.+?)\s*(?:=\s*[\'"]?({.+?}|.+?)[\'"]?)?\s*}}',
self._format, re.DOTALL)
# Now we're transforming our format string into a regular expression,
# substituting {{ ... }} with regex named groups, so that param_stream
# matched against this expression yields a dict of params with values.
param_match = r'["\']?(?P<\2>(?:(?<=\').+?(?=\')|(?<=").+?(?=")|{.+?}|.+?))["\']?'
reg = re.sub(r'(\s*){{\s*([^=}]+?)\s*}}(?![\'"]?\s+}})',
r'\1' + param_match,
self._format)
reg = re.sub(r'(\s*){{\s*(\S+)\s*=\s*(?:{.+?}|.+?)\s*}}',
r'(?:\1' + param_match + r')?',
reg)
reg = re.sub(r'(\s*){{\s*(.+?)\s*}}',
r'\1' + param_match,
reg)
reg = '^\s*' + reg + r'\s*$'
# Now we're matching param_stream against our format string regex,
# getting a dict of values. We'll also get default values from
# "params" list if something is not present.
# Priority, from lowest to highest:
# 1. Default parameters
# 2. Matched parameters
# 3. Extra parameters
matched_stream = re.match(reg, self._param_stream, re.DOTALL)
if matched_stream:
values = matched_stream.groupdict()
for param in params:
matched_value = values[param[0]] if matched_stream else None
result[param[0]] = matched_value or param[1]
if extra:
for pair in kv_pairs:
result[pair[0]] = ''.join(pair[2:])
if self._format and not (self._param_stream.strip() or any(result.values())):
raise content.ParseException('No value supplied and no default value found.')
return result
|
powerpak/pph2_and_rank | pph2_and_rank.py | Python | mit | 11,921 | 0.007557 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""\
pph2_and_rank.py
Submits a list of genes with SNPs to PolyPhen2 and orders them by
a score measuring the overall deleteriousness of the mutations,
normalized for protein length.
LICENSE
Written by Ted Pak for the Roth Laboratory (http://llama.mshri.on.ca).
Released under an MIT license; please see MIT-LICENSE.txt.
"""
USAGE = """\
Usage: %s [-h|--help] [--hg19] [--humdiv] [-q|--quiet] [-s|--sid SID]
[-o|--output OUTPUT] MUT_LIST_1 [MUT_LIST_2 ...]
Options:
-h|--help Displays this message.
--hg19 Uses build 19 of the human genome assembly instead of build 18.
--humdiv Uses the HumDiv PolyPhen2 classifier model instead of HumVar.
-o|--output OUTPUT Prints to file named OUTPUT instead of standard output.
-q|--quiet Do not print status messages to standard error.
-s|--sid SID Specify a pre-existing GGI SID.
Use this to continue from a prior run.
See README.mdown for complete details."""
import sys, urllib, urllib2, csv, time, math, os
from BeautifulSoup import BeautifulSoup
import re
import sqlite3
import socket
socket.setdefaulttimeout(10)
pph2_url = 'http://genetics.bwh.harvard.edu/cgi-bin/ggi/ggi2.cgi'
pph2_result_url = 'http://genetics.bwh.harvard.edu/ggi/pph2/%s/1/pph2-full.txt'
pph2_track_url = 'http://genetics.bwh.harvard.edu/cgi-bin/ggi/ggi2.cgi?_ggi_project=PPHWeb2'\
+ '&sid=%s&_ggi_target_manage=Refresh&_ggi_origin=manage'
# If the list is too big to fit in memory, change this to a filename.
db = sqlite3.connect(':memory:')
c = db.cursor()
d = db.cursor()
accid_cache = {}
quiet = False
steps = ['(1/7) Validating input', '(2/7) Mapping genomic SNPs',
'(3/7) Collecting output', '(4/7) Building MSA and annotating proteins',
'(5/7) Collecting output', '(6/7) Predicting',
'(7/7) Generating reports']
# Attempts to get the terminal width; will only succeed on a *NIX
try:
term_columns = map(int, os.popen('stty size', 'r').read().split())[1]
except Exception:
term_columns = 80
def setup_db():
"""Create the SQLite3 database that will hold interim results."""
c.execute('''CREATE TABLE t (
id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT,
gene TEXT DEFAULT NULL,
accid TEXT DEFAULT NULL,
seqlen INTEGER DEFAULT NULL,
seqpos INTEGER DEFAULT NULL,
aa1 TEXT DEFAULT NULL,
aa2 TEXT DEFAULT NULL,
pph2_score REAL
)''')
c.execute('CREATE INDEX idx_gene ON t (gene ASC)')
c.execute('CREATE INDEX idx_accid ON t (accid ASC)')
def write_status(msg, num=None, denom=None):
""" Writes a progress message to standard error.
num and denom can be used to add a progress bar."""
if quiet: return
if msg is True:
return sys.stderr.write(' => Done.\n')
if num is not None:
sys.stderr.write('\r' + ''.join([' '] * term_columns) + '\r')
if denom is not None: msg = "%s => %s" % (msg, progress(num, denom))
sys.stderr.write(msg)
def progress(curr, finish, width=10):
"""Creates a progress bar using unicode characters; the terminal must support unicode."""
ticks = ['▂','▃','▄','▅','▆','▇']
finish = max(finish, 1)
pct = max(curr, 0) / float(finish)
bartip, barlen = math.modf(pct * width)
bartip = ticks[int(len(ticks) * bartip)] if pct < 1.0 else ''
bar = "%s%s%s" % (ticks[-1] * int(barlen), bartip, ' ' * (width - int(barlen) - 1))
prog = "%s%% [ %s ] %d/%d" % (str(int(pct*100)).rjust(3), bar, curr, finish)
return prog
def spin(seconds):
"""Count down a number of seconds, printing the countdown to standard error."""
for x in range(seconds):
padded_secs = str(seconds - x).rjust(len(str(seconds)))
msg = ' (%s)' % padded_secs
sys.stderr.write(msg)
time.sleep(1)
sys.stderr.write('\b' * len(msg))
def get_seqlen_and_gene(accid):
"""Gets the sequence length and gene name for a SwissProt/UniProt accession ID."""
cached = accid_cache.get(accid)
if cached is not None:
return cached
result = [None, None]
try:
f = urllib.urlopen("http://www.uniprot.org/uniprot/%s.txt" % accid)
lines = f.readlines()
except (socket.timeout, IOError):
raise RemoteException("UniProt didn't respond to an HTTP request. That's odd.")
for line in lines:
match = re.match(r'^SQ\s+SEQUENCE\s+(\d+)\s*AA;', line)
if match: result[0] = match.group(1)
match = re.match(r'^GN Name=([^;]+);', line)
if match: result[1] = match.group(1)
if result[0] is not None and result[1] is not None:
accid_cache[accid] = result
break
return result
def submit_to_polyphen2(batch, humvar=True, hg19=False):
""" Submits a batch file of accids + SNPs to PolyPhen2 and returns
the session ID for the job."""
params = urllib.urlencode({
'_ggi_project': 'PPHWeb2',
'_ggi_origin': 'query',
'_ggi_target_pipeline': '1',
'MODELNAME': 'HumVar' if humvar else 'HumDiv',
'UCSCDB': 'hg19' if hg19 else 'hg18',
'SNPFUNC': 'm',
'NOTIFYME': '',
'SNPFILTER': '0',
'_ggi_batch': batch
})
doc = None
while doc is None:
try:
response = urllib2.urlopen(pph2_url, params)
doc = response.read()
except (socket.timeout, IOError): pass
soup = Beautif | ulSoup(doc)
sid_input = soup | .find('input', {'name': 'sid'})
if sid_input is None:
print doc
raise RemoteException("GGI returned a weird page without a SID.")
sid = sid_input['value']
return sid
def poll_for_polyphen2_results(sid):
""" Polls PolyPhen2's GGI web interface for updates on the progress of the job.
Once the job has completed the full result file is returned. """
curr_step = -1
max_tries = 10
tries = 0
wait_msg = "Waiting for PolyPhen2 results => %s"
done_msg = " => Done.\n"
while True:
params = urllib.urlencode({
'_ggi_project': 'PPHWeb2',
'_ggi_origin': 'manage',
'_ggi_target_manage': 'Refresh',
'sid': sid
})
doc = None
while doc is None:
try:
response = urllib2.urlopen(pph2_url, params)
doc = response.read()
except (socket.timeout, IOError): pass
soup = BeautifulSoup(doc)
status_td = soup.find('td', text=re.compile(r'^Batch \d+:'))
if status_td is None:
# We might be done, make sure this page is not an error page
if soup.find('b', text=re.compile(r'^Service Name:')): break
else:
tries += 1
if tries >= max_tries:
raise RemoteException('PolyPhen won\'t let us check the status right now.')
spin(15)
continue
pos_td = status_td.parent.parent.findAll('td')[1]
try: pos = int(pos_td.string)
except ValueError: pos = 0
shortened = re.sub(r'^Batch \d+:\s+', '', str(status_td))
this_step = steps.index(shortened)
if curr_step != this_step:
if curr_step is not -1:
write_status((wait_msg + done_msg) % steps[curr_step], True)
curr_step += 1
while curr_step < this_step: # Write out steps that were completed between refreshes.
write_status((wait_msg + done_msg) % steps[curr_step])
curr_step += 1
maxpos = pos
write_status(wait_msg % shortened, maxpos - pos, maxpos)
spin(15)
if curr_step != -1: write_status((wait_msg + done_msg) % steps[curr_step], True)
curr_step += 1
while curr_step < len(steps): # Write out steps that were completed before last refresh.
write_status((wait_msg + done_msg) % steps[curr_step])
curr_step += 1
result_url = pph2_result_url % sid
while True:
error = False
try:
write_status("Waiting for PolyPhen2 results => Waiting for download", True)
response = urlli |
yippeecw/sfa | sfa/server/xmlrpcapi.py | Python | mit | 6,069 | 0.008898 | #
# SFA XML-RPC and SOAP interfaces
#
import string
import xmlrpclib
# SOAP support is optional
try:
import SOAPpy
from SOAPpy.Parser import parseSOAPRPC
from SOAPpy.Types import faultType
from SOAPpy.NS import NS
from SOAPpy.SOAP | Builder import buildSOAP
except ImportError:
SOAPpy = None
####################
#from sfa.util.faults import SfaNotImplemented, SfaAPIError, SfaInvalidAPIMethod, SfaFault
from sfa.util.faults import SfaInvalidAPIMethod, SfaAPIError, SfaFault
from sfa.util.sfalogging import logger
####################
# See "2.2 Characters" in the XML specification:
#
# #x9 | #xA | #xD | [#x20-#xD7FF] | [#xE000-#xFFFD]
# avoiding
# [#x7F-#x84], [#x86-#x9F], [#xFDD0-#xFDDF]
invalid_xml_ascii | = map(chr, range(0x0, 0x8) + [0xB, 0xC] + range(0xE, 0x1F))
xml_escape_table = string.maketrans("".join(invalid_xml_ascii), "?" * len(invalid_xml_ascii))
def xmlrpclib_escape(s, replace = string.replace):
"""
xmlrpclib does not handle invalid 7-bit control characters. This
function augments xmlrpclib.escape, which by default only replaces
'&', '<', and '>' with entities.
"""
# This is the standard xmlrpclib.escape function
s = replace(s, "&", "&")
s = replace(s, "<", "<")
s = replace(s, ">", ">",)
# Replace invalid 7-bit control characters with '?'
return s.translate(xml_escape_table)
def xmlrpclib_dump(self, value, write):
"""
xmlrpclib cannot marshal instances of subclasses of built-in
types. This function overrides xmlrpclib.Marshaller.__dump so that
any value that is an instance of one of its acceptable types is
marshalled as that type.
xmlrpclib also cannot handle invalid 7-bit control characters. See
above.
"""
# Use our escape function
args = [self, value, write]
if isinstance(value, (str, unicode)):
args.append(xmlrpclib_escape)
try:
# Try for an exact match first
f = self.dispatch[type(value)]
except KeyError:
raise
# Try for an isinstance() match
for Type, f in self.dispatch.iteritems():
if isinstance(value, Type):
f(*args)
return
raise TypeError, "cannot marshal %s objects" % type(value)
else:
f(*args)
# You can't hide from me!
xmlrpclib.Marshaller._Marshaller__dump = xmlrpclib_dump
class XmlrpcApi:
"""
The XmlrpcApi class implements a basic xmlrpc (or soap) service
"""
protocol = None
def __init__ (self, encoding="utf-8", methods='sfa.methods'):
self.encoding = encoding
self.source = None
# flat list of method names
self.methods_module = methods_module = __import__(methods, fromlist=[methods])
self.methods = methods_module.all
self.logger = logger
def callable(self, method):
"""
Return a new instance of the specified method.
"""
# Look up method
if method not in self.methods:
raise SfaInvalidAPIMethod, method
# Get new instance of method
try:
classname = method.split(".")[-1]
module = __import__(self.methods_module.__name__ + "." + method, globals(), locals(), [classname])
callablemethod = getattr(module, classname)(self)
return getattr(module, classname)(self)
except (ImportError, AttributeError):
self.logger.log_exc("Error importing method: %s" % method)
raise SfaInvalidAPIMethod, method
def call(self, source, method, *args):
"""
Call the named method from the specified source with the
specified arguments.
"""
function = self.callable(method)
function.source = source
self.source = source
return function(*args)
def handle(self, source, data, method_map):
"""
Handle an XML-RPC or SOAP request from the specified source.
"""
# Parse request into method name and arguments
try:
interface = xmlrpclib
self.protocol = 'xmlrpclib'
(args, method) = xmlrpclib.loads(data)
if method_map.has_key(method):
method = method_map[method]
methodresponse = True
except Exception, e:
if SOAPpy is not None:
self.protocol = 'soap'
interface = SOAPpy
(r, header, body, attrs) = parseSOAPRPC(data, header = 1, body = 1, attrs = 1)
method = r._name
args = r._aslist()
# XXX Support named arguments
else:
raise e
try:
result = self.call(source, method, *args)
except SfaFault, fault:
result = fault
self.logger.log_exc("XmlrpcApi.handle has caught Exception")
except Exception, fault:
self.logger.log_exc("XmlrpcApi.handle has caught Exception")
result = SfaAPIError(fault)
# Return result
response = self.prepare_response(result, method)
return response
def prepare_response(self, result, method=""):
"""
convert result to a valid xmlrpc or soap response
"""
if self.protocol == 'xmlrpclib':
if not isinstance(result, SfaFault):
result = (result,)
response = xmlrpclib.dumps(result, methodresponse = True, encoding = self.encoding, allow_none = 1)
elif self.protocol == 'soap':
if isinstance(result, Exception):
result = faultParameter(NS.ENV_T + ":Server", "Method Failed", method)
result._setDetail("Fault %d: %s" % (result.faultCode, result.faultString))
else:
response = buildSOAP(kw = {'%sResponse' % method: {'Result': result}}, encoding = self.encoding)
else:
if isinstance(result, Exception):
raise result
return response
|
PyPila/auth-client | authclient/decorators.py | Python | gpl-3.0 | 1,544 | 0 | import logging
from functools import wraps
from requests.exceptions import HTTPError
from django.utils.decorators import available_attrs
from django.conf import settings
from authclient import _get_user_session_key, SESSION_KEY
from authclient.client import auth_client
logger = logging.getLogger('authclient')
def app_auth_exempt(function=None):
def decorator(view_func):
@wraps(view_func)
| def _wrapped(request, *args, **kwargs):
return view_func(request, *args, **kwargs)
_wrapped.app_auth_exempt = True
return _wrapped
if function:
return decorator(function)
return decorator
def refresh_jwt(view_func):
"""
Decorator that adds headers to a response so that it will
never be cached.
"""
@wraps(view_func, assigned=a | vailable_attrs(view_func))
def _wrapped_view_func(request, *args, **kwargs):
response = view_func(request, *args, **kwargs)
try:
resource_token = _get_user_session_key(request)
except KeyError:
pass
else:
try:
resource_token = auth_client.token_refresh.call(
payload={'token': resource_token},
headers={'X-APPLICATION': settings.AUTH_API_TOKEN},
)['resource_token']
except HTTPError:
logger.debug('Failed to refresh the JWT.')
else:
request.session[SESSION_KEY] = resource_token
return response
return _wrapped_view_func
|
sgaynetdinov/py-vkontakte | tests/test_user_career.py | Python | mit | 1,213 | 0 | import pytest
from vk.users import UserCareer
def test_user_career(factory):
career_json = factory('user_career.json')['career'][0]
career = UserCareer.from_json(None, career_json)
assert isinstance(career, UserCareer)
assert career.group == 22822305
assert career.company is None
assert career.country == 1
assert career.city == 2
assert career.city_name is None
assert career.start == 2006
assert career.end == 2014
assert career.position == 'Генеральный директор'
assert career.id == career.group
def test_without_group_id(factory):
career_json = factory('user_career.json')['career'][1] |
career = UserCareer.from_json(None, career_json)
assert 'group_id' not in career_json
assert career.group is None
assert career.company == 'Telegram'
assert career.id == hash(career.company)
def test_wit | hout_city_id(factory):
career_json = factory('user_career.json')['career'][0]
del career_json['city_id']
career_json['city_name'] = 'Moscow'
career = UserCareer.from_json(None, career_json)
assert 'city_id' not in career_json
assert career.city is None
assert career.city_name == 'Moscow'
|
Bogadon/Battletech | ai.py | Python | gpl-2.0 | 18,339 | 0.006816 | # AI Classes
'''
Initiative: Either I win or I lose, if I win I go second, so I know my
opponents moves already- otherwise I have to try and predict them
Move Phase: Number of possible moves per unit depends on speeds and
world mechanics. It can easily be 100+ per unit. If we lost the
initiative then we need to try and predict the opponents moves first
as well.
Shoot Phase: Given disposition we can logically pick the best move given
the previous phase. This approach makes sense, but having a fixed
disposition at all times does not. If a mech will be destroyed we always
pick the reckless move, which is start, but more conditions should be
checked to adjust dispositions for better results. If we are able to run
turns with enough processing power we can calculate the best
disposition by doing every one and every turn thereafter
Dispositions
Reckless: Ignores Heat, always maximises damage dealt but still favours
moves with less damage taken
Aggressive: Aims for dealt/taken ratio 1:2 with shutdown < 30%, avg dam > 2 above
Neutral: Aims for dealt/taken ratio 1:1 with shutdown < 30%, avg dam > 4 above
Defensive: Aims for ratio 2:1 with shutdown < 30%, never shoots above
Cowardly: Never shoots with shutdown > 0%, reverse of reckless
'''
import pickle
from copy import deepcopy
from random import randint
from player import *
import btlib
# Consider not using a global
global m_types
m_types = ['stationary', 'walk', 'run', 'jump']
# Incomplete
'''
class AI_Calc_Instance(AI_Player):
def __init__(self, parent, order, turns, game_data):
self.parent = parent
self.order = order
name = parent + '-child-' + str(order)
super( AI_Calc_Instance, self ).__init__( name, game_data['mechs'],
game_data['disposition'], game_data['World'],
game_data['MAX_TURN_CALCS'] )
self.name = name
self.infer_opponent = game_data['infer']
self.memory = game_data['memory']
#self.best_move = calc_turns( turns,
def calc_turns(self, move_list, enemy_units):
# Finds the best move for each specified disposition
best_move = {}
for disposition in self.all_disps:
best_move[disposition]['moves'] = None
best_move[disposition]['score'] = None
while len(move_list) > 0:
# Remove move set from list
this_move = move_list.pop(0)
for disp in self.all_disps:
# Calc expected result from my and opponents shoot phase
My_Shots, Her_Shots = self.get_shoot_phase( this_move,
enemy_units, disp )
# Evaluate success based on seperate function
this_score = self.evaluate_turn(My_Shots, Her_Shots)
# Note: currently we only store 1 turn
if this_score > best_move[disp]['score']:
best_move[disp]['moves'] = this_move
best_move[disp]['score'] = this_score
'''
# Memory is list of previous turns contained in dicts:
# { 'dmg_dealt': int, 'dmg_taken': int, 'opponent heats': [int, int..] }
# infer_opponent is the disposition prediction based on | memory
class AI_Player(Player):
def __init__(self, name, mechs, disposition): #, World, MAX_TURN_CALCS):
super(AI_Player, self).__init__(name, mechs)
unique_id = 0
self.unique_ids = {}
for unit in self.units_live:
unit.unique_id = unique_id
self.unique_ids[unique_id] = unit
unique_id += 1
self.is_ai = True
self.dispositio | n = disposition
self.all_disps = [ 'reckless', 'aggressive', 'neutral', 'defensive',
'cowardly' ]
self.World = None
self.infer_opponent = 'neutral'
self.memory = []
self._all_turn_movements = []
self.my_turn = None
# Per Instance
#self.MAX_TURN_CALCS = MAX_TURN_CALC
#self.filename = 'ai-' + name
def get_turn(self, Opponent, World):
self.World = World
Move_Phase = AI_Move_Phase( self.units_live, Opponent.units_live, World )
poss_moves = Move_Phase.moves
move_list = self.get_all_moves_as_list(poss_moves)
# This is a huge list of all possible unique moves for entire
# set of units. List of lists of dicts {move info}
self._all_turn_movements = [] # clear some place else
self.recurse_get_moves( [], move_list )
turn_calcs = 0
best_move = self.calc_turns(self._all_turn_movements, Opponent.units_live)
# Dynamic disposition not yet implemented
self.my_turn = best_move[self.disposition]
self.my_turn['moves'] = self.my_turn['moves'][0]
# Shooting uses duplicates for calcing, we need to remap
'''
print self.my_turn
for Unit in self.my_turn['moves'].keys():
for Unique in self.units_live:
if Unit.ai_id == Unique.ai_id:
self.my_turn['moves'][Unique] = (
self.my_turn['moves'].pop(Unit) )
'''
'''
while len(self._all_turn_movements) > MAX_TURN_CALCS:
while ( len(self._all_turn_movements) > 0 and
turn_calcs < self.MAX_TURN_CALCS ):
turn_calcs += 1
# choose any random turn option left in list
choice = randint( 0, len(self._all_turn_movements) - 1 )
moves = move_list.pop(choice)
My_Shoot_Phase, Opponent_Shoot_Phase = get_shoot_phase()
'''
def calc_turns(self, move_list, enemy_units):
# Finds the best move for each specified disposition
best_move = {}
for disposition in self.all_disps:
best_move[disposition] = {}
best_move[disposition]['moves'] = None
best_move[disposition]['score'] = None
while len(move_list) > 0:
# Remove move set from list
this_move = move_list.pop(0)
for disp in self.all_disps:
# Calc expected result from my and opponents shoot phase
My_Shots, Her_Shots = self.get_shoot_phase( this_move,
enemy_units, disp )
# Evaluate success based on seperate function
this_score = self.evaluate_turn(My_Shots, Her_Shots)
# Note: currently we only store 1 turn
if this_score > best_move[disp]['score']:
best_move[disp]['moves'] = this_move
best_move[disp]['score'] = this_score
best_move[disp]['shooting'] = My_Shots
return best_move
# This function is currently entirely rudimentary, but is a placeholder
# for future development. The turn_score is the returned rating.
def evaluate_turn(self, My_Shoot_Phase, Opponent_Shoot_Phase):
net_damage = ( My_Shoot_Phase.total_dam_dealt -
Opponent_Shoot_Phase.total_dam_dealt )
turn_score = net_damage
return turn_score
# moves is a 1-entry list of dicts of single unit positions
def get_shoot_phase(self, moves, enemy_units, disposition):
my_units = [] #deepcopy(self.units_live)
# the keys are objects, this may cause you problems- look at hashing
#if len(moves) != 1:
# print "Error! bad move type for unit: ", moves
# raise ValueError
# Only 1 entry now, from previous giant list
moves = moves[0]
for unit in moves.keys():
# Error Check
m_type = moves[unit].keys()[0]
#if len(moves[unit][m_type]) != 1:
# print "Error! bad move coord for unit: ", moves[unit][m_type]
# raise ValueError
new_loc = moves[unit][m_type] #[0]
# Hopefully this is the obj, hence caps
This_Unit = deepcopy(unit) #my_units[unit]
# Note: calcing dist again here is repetition. Fix sometime.
dist, ang = self. |
ClusterHQ/libcloud | libcloud/dns/drivers/zerigo.py | Python | apache-2.0 | 18,252 | 0.000055 | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__all__ = [
'ZerigoDNSDriver'
]
import copy
import base64
from libcloud.utils.py3 import httplib
from libcloud.utils.py3 import b
from xml.etree import ElementTree as ET
from libcloud.utils.misc import merge_valid_keys, get_new_obj
from libcloud.utils.xml import findtext, findall
from libcloud.common.base import XmlResponse, ConnectionUserAndKey
from libcloud.common.types import InvalidCredsError, LibcloudError
from libcloud.common.types import MalformedResponseError
from libcloud.dns.types import Provider, RecordType
from libcloud.dns.types import ZoneDoesNotExistError, RecordDoesNotExistError
from libcloud.dns.base import DNSDriver, Zone, Record
API_HOST = 'ns.zerigo.com'
API_VERSION = '1.1'
API_ROOT = '/api/%s/' % (API_VERSION)
VALID_ZONE_EXTRA_PARAMS = ['notes', 'tag-list', 'ns1', 'slave-nameservers']
VALID_RECORD_EXTRA_PARAMS = ['notes', 'ttl', 'priority']
# Number of items per page (maximum limit is 1000)
ITEMS_PER_PAGE = 100
class ZerigoError(LibcloudError):
def __init__(self, code, errors):
self.code = code
self.errors = errors or []
def __str__(self):
return 'Errors: %s' % (', '.join(self.errors))
def __repr__(self):
return ('<ZerigoError response code=%s, errors count=%s>' % (
self.code, len(self.errors)))
class ZerigoDNSResponse(XmlResponse):
def success(self):
return self.status in [httplib.OK, httplib.CREATED, httplib.ACCEPTED]
def parse_error(self):
status = int(self.status)
if status == 401:
if not self.body:
raise InvalidCredsError(str(self.status) + ': ' + self.error)
else:
raise InvalidCredsError(self.body)
elif status == 404:
context = self.connection.context
if context['resource'] == 'zone':
raise ZoneDoesNotExistError(value='', driver=self,
zone_id=context['id'])
elif context['resource'] == 'record':
raise RecordDoesNotExistError(value='', driver=self,
record_id=context['id'])
elif status != 503:
try:
body = ET.XML(self.body)
except:
raise MalformedResponseError('Failed to parse XML',
body=self.body)
errors = []
for error in findall(element=body, xpath='error'):
errors.append(error.text)
raise ZerigoError(code=status, errors=errors)
return self.body
class ZerigoDNSConnection(ConnectionUserAndKey):
host = API_HOST
secure = True
responseCls = ZerigoDNSResponse
def add_default_headers(self, headers):
auth_b64 = base64.b64encode(b('%s:%s' % (self.user_id, self.key)))
headers['Authorization'] = 'Basic %s' % (auth_b64.decode('utf-8'))
return headers
def request(self, action, params=None, data='', headers=None,
method='GET'):
if not headers:
headers = {}
if not params:
params = {}
if method in ("POST", "PUT"):
headers = {'Content-Type': 'application/xml; charset=UTF-8'}
return super(ZerigoDNSConnection, self).request(action=action,
params=params,
data=data,
method=method,
headers=headers)
class ZerigoDNSDriver(DNSDriver):
type = Provider.ZERIGO
name = 'Zerigo DNS'
website = 'http://www.zerigo.com/'
connectionCls = ZerigoDNSConnection
RECORD_TYPE_MAP = {
RecordType.A: 'A',
RecordType.AAAA: 'AAAA',
RecordType.CNAME: 'CNAME',
RecordType.MX: 'MX',
RecordType.REDIRECT: 'REDIRECT',
RecordType.TXT: 'TXT',
RecordType.SRV: 'SRV',
RecordType.NAPTR: 'NAPTR',
RecordType.NS: 'NS',
RecordType.PTR: 'PTR',
RecordType.SPF: 'SPF',
RecordType.GEO: 'GEO',
RecordType.URL: 'URL',
}
def iterate_zones(self):
return self._get_more('zones')
def iterate_records(self, zone):
return self._get_more('records', zone=zone)
def get_zone(self, zone_id):
path = API_ROOT + 'zones/%s.xml' % (zone_id)
self.connection.set_context({'resource': 'zone', 'id': zone_id})
data = self.connection.request(path).object
zone = self._to_zone(elem=data)
return zone
def get_record(self, zone_id, record_id):
zone = self.get_zone(zone_id=zone_id)
self.connection.set_context({'resource': 'record', 'id': record_id})
| path = API_ROOT + 'hosts/%s.xml' % (record_id)
data = self.connection.request(path).object
record = self._to_record(elem=data, zone=zone)
return record
def create_zone(self, domain, type='master', ttl=None, extra=None):
"""
Create a new zone.
Provider API docs:
https://www.zerigo.com/docs/apis/dns/1.1/zones/crea | te
@inherits: :class:`DNSDriver.create_zone`
"""
path = API_ROOT + 'zones.xml'
zone_elem = self._to_zone_elem(domain=domain, type=type, ttl=ttl,
extra=extra)
data = self.connection.request(action=path,
data=ET.tostring(zone_elem),
method='POST').object
zone = self._to_zone(elem=data)
return zone
def update_zone(self, zone, domain=None, type=None, ttl=None, extra=None):
"""
Update an existing zone.
Provider API docs:
https://www.zerigo.com/docs/apis/dns/1.1/zones/update
@inherits: :class:`DNSDriver.update_zone`
"""
if domain:
raise LibcloudError('Domain cannot be changed', driver=self)
path = API_ROOT + 'zones/%s.xml' % (zone.id)
zone_elem = self._to_zone_elem(domain=domain, type=type, ttl=ttl,
extra=extra)
response = self.connection.request(action=path,
data=ET.tostring(zone_elem),
method='PUT')
assert response.status == httplib.OK
merged = merge_valid_keys(params=copy.deepcopy(zone.extra),
valid_keys=VALID_ZONE_EXTRA_PARAMS,
extra=extra)
updated_zone = get_new_obj(obj=zone, klass=Zone,
attributes={'type': type,
'ttl': ttl,
'extra': merged})
return updated_zone
def create_record(self, name, zone, type, data, extra=None):
"""
Create a new record.
Provider API docs:
https://www.zerigo.com/docs/apis/dns/1.1/hosts/create
@inherits: :class:`DNSDriver.create_record`
"""
path = API_ROOT + 'zones/%s/hosts.xml' % (zone.id)
record_elem = self._to_record_elem(name=name, type=type, data=data,
extra=extra)
response = self.connect |
hectormartinez/rougexstem | taln2016/icsisumm-primary-sys34_v1/nltk/nltk-0.9.2/nltk/corpus/reader/ycoe.py | Python | apache-2.0 | 11,296 | 0.00239 | # -*- coding: iso-8859-1 -*-
# Natural Language Toolkit: York-Toronto-Helsinki Parsed Corpus of Old English Prose (YCOE)
#
# Copyright (C) 2001-2008 University of Pennsylvania
# Author: Selina Dennis <selina@tranzfusion.net>
# URL: <http://nltk.sf.net>
# For license information, see LICENSE.TXT
"""
Corpus reader for the York-Toronto-Helsinki Parsed Corpus of Old
English Prose (YCOE), a 1.5 million word syntactically-annotated
corpus of Old English prose texts. The corpus is distributed by the
Oxford Text Archive: http://www.ota.ahds.ac.uk/ It is not included
with NLTK.
The YCOE corpus is divided into 100 files, each representing
an Old English prose text. Tags used within each text complies
to the YCOE standard: http://www-users.york.ac.uk/~lang22/YCOE/YcoeHome.htm
"""
from nltk.corpus.reader.util import *
from nltk.corpus.reader.api import *
from nltk.tokenize import RegexpTokenizer
from nltk.corpus.reader.bracket_parse import BracketParseCorpusReader
from nltk.corpus.reader.tagged import TaggedCorpusReader
from string import split
import os, re
from nltk.internals import deprecated
class YCOECorpusReader(CorpusReader):
"""
Corpus reader for the York-Toronto-Helsinki Parsed Corpus of Old
English Prose (YCOE), a 1.5 million word syntactically-annotated
corpus of Old English prose texts.
"""
def __init__(self, root):
self._psd_reader = YCOEParseCorpusReader(
os.path.join(root, 'psd'), '.*', '.psd')
self._pos_reader = YCOETaggedCorpusReader(
os.path.join(root, 'pos'), '.*', '.pos')
# Make sure we have a consistent set of items:
documents = set(f[:-4] for f in self._psd_reader.files())
if set(f[:-4] for f in self._pos_reader.files()) != documents:
raise ValueError('Items in "psd" and "pos" '
'subdirectories do not match.')
files = sorted(['%s.psd' % doc for doc in documents] +
['%s.pos' % doc for doc in documents])
CorpusReader.__init__(self, root, files)
self._documents = tuple(sorted(documents))
def documents(self, files=None):
"""
Return a list of document identifiers for all documents in
this corpus, or for the documents with the given file(s) if
specified.
"""
if files is None:
return self._documents
if isinstance(files, basestring):
files = [files]
for f in files:
if f not in self._files:
raise KeyError('File id %s not found' % files)
# Strip off the '.pos' and '.psd' extensions.
return sorted(set(f[:-4] for f in files))
def files(self, documents=None):
"""
Return a list of file identifiers for the files that make up
this corpus, or that store the given document(s) if specified.
"""
if documents is None:
return self._files
elif isinstance(documents, basestring):
documents = [documents]
return sorted(set(['%s.pos' % doc for doc in documents] +
['%s.psd' % doc for doc in documents]))
def _getfiles(self, documents, subcorpus):
"""
Helper that selects the appropraite files for a given set of
documents from a given subcorpus (pos or psd).
"""
if documents is None:
documents = self._documents
else:
if isinstance(documents, basestring):
documents = [documents]
for document in documents:
if document not in self._documents:
if document[-4:] in ('.pos', '.psd'):
raise ValueError(
'Expected a document identifier, not a file '
'identifier. (Use corpus.documents() to get '
'a list of document identifiers.')
else:
raise ValueError('Document identifier %s not found'
% document)
return ['%s.%s' % (d, subcorpus) for d in documents]
# Delegate to one of our two sub-readers:
def words(self, documents=None):
return self._pos_reader.words(self._getfiles(documents, 'pos'))
def sents(self, documents=None):
return self._pos_reader.sents(self._getfiles(documents, 'pos'))
def paras(self, documents=None):
return self._pos_reader.paras(self._getfiles(documents, 'pos'))
def tagged_words(self, documents=None):
return self._pos_reader.tagged_words(self._getfiles(documents, 'pos'))
def tagged_sents(self, documents=None):
return self._pos_reader.tagged_sents(self._getfiles(documents, 'pos'))
def tagged_paras(self, documents=None):
return self._pos_reader.tagged_paras(self._getfiles(documents, 'pos'))
def parsed_sents(self, documents=None):
return self._psd_reader.parsed_sents(self._getfiles(documents, 'psd'))
#{ Deprecated since 0.8
@deprecated("Use .raw() or .words() or .tagged_words() or "
".parsed_sents() instead.")
def read(self, items=None | , format='parsed'):
if format == 'parsed': return self.parsed_sents(items)
if format == 'raw': return self.raw(items)
if format == 'tokenized': return self.words(items)
if format == 'tagged': return self.tagged_words(items)
if format == 'chunked': raise ValueError('no longer supported')
raise ValueError('bad format %r' % format)
@deprecated("Use .parsed_sents() instead.")
def parsed(self, items=None):
return self.parse | d_sents(items)
@deprecated("Use .words() instead.")
def tokenized(self, items=None):
return self.words(items)
@deprecated("Use .tagged_words() instead.")
def tagged(self, items=None):
return self.tagged_words(items)
@deprecated("Operation no longer supported.")
def chunked(self, items=None):
raise ValueError('format "chunked" no longer supported')
#}
class YCOEParseCorpusReader(BracketParseCorpusReader):
"""Specialized version of the standard bracket parse corpus reader
that strips out (CODE ...) and (ID ...) nodes."""
def _parse(self, t):
t = re.sub(r'(?u)\((CODE|ID)[^\)]*\)', '', t)
if re.match(r'\s*\(\s*\)\s*$', t): return None
return BracketParseCorpusReader._parse(self, t)
class YCOETaggedCorpusReader(TaggedCorpusReader):
def __init__(self, root, items):
gaps_re = r'(?u)\(?<=/\.)\s+|\s*\S*_CODE\s*|\s*\S*_ID\s*'
sent_tokenizer = RegexpTokenizer(gaps_re, gaps=True)
TaggedCorpusReader.__init__(self, root, items, sep='_',
sent_tokenizer=sent_tokenizer)
#: A list of all documents and their titles in ycoe.
documents = {
'coadrian.o34': 'Adrian and Ritheus',
'coaelhom.o3': 'Ælfric, Supplemental Homilies',
'coaelive.o3': 'Ælfric\'s Lives of Saints',
'coalcuin': 'Alcuin De virtutibus et vitiis',
'coalex.o23': 'Alexander\'s Letter to Aristotle',
'coapollo.o3': 'Apollonius of Tyre',
'coaugust': 'Augustine',
'cobede.o2': 'Bede\'s History of the English Church',
'cobenrul.o3': 'Benedictine Rule',
'coblick.o23': 'Blickling Homilies',
'coboeth.o2': 'Boethius\' Consolation of Philosophy',
'cobyrhtf.o3': 'Byrhtferth\'s Manual',
'cocanedgD': 'Canons of Edgar (D)',
'cocanedgX': 'Canons of Edgar (X)',
'cocathom1.o3': 'Ælfric\'s Catholic Homilies I',
'cocathom2.o3': 'Ælfric\'s Catholic Homilies II',
'cochad.o24': 'Saint Chad',
'cochdrul': 'Chrodegang of Metz, Rule',
'cochristoph': 'Saint Christopher',
'cochronA.o23': 'Anglo-Saxon Chronicle A',
'cochronC': 'Anglo-Saxon Chronicle C',
'cochronD': 'Anglo-Saxon Chronicle D',
'cochronE.o34': 'Anglo-Saxon Chronicle E',
'cocura.o2': 'Cura Pastoralis',
'cocuraC': 'Cura Pastoralis (Cotton)',
'codicts.o34': 'Dicts of Cato',
'codocu1.o1': 'Documents 1 (O1)',
'codocu2.o12': 'Documents 2 (O1/O2)',
'codocu2.o2': 'Documents 2 (O2)',
'codocu3.o23': 'Documents 3 (O2 |
jacobamey/Learning-Python | HF-Programming/HFP-CH1/GuessingGame.py | Python | gpl-2.0 | 342 | 0 | from rando | m import randint
secret = randint(1, 10)
print("Welcome!")
guess = 0
while guess != secret:
g = input("Guess the number: ")
guess = int(g)
if guess == secret:
print("You win!")
else:
if guess > secret:
print("Too High")
else:
print(" | Too low")
print("Game over!")
|
etkirsch/legends-of-erukar | erukar/content/inventory/materials/raw/AnimalHide.py | Python | agpl-3.0 | 204 | 0.004902 | from erukar.system.engine import MaterialGood
class | AnimalHide(MaterialGood):
BaseName = "Animal Hide"
BriefDescription = "an animal's hide"
BasePricePerSingle = 13
Weight | PerSingle = 0.9
|
buchuki/django-registration-paypal | paypal_registration/urls.py | Python | bsd-3-clause | 1,950 | 0.000513 | """
URLconf for registration and activation, using the paypal_registration backend
If the default behavior of these views is acceptable to you, simply
use a line like this in your root URLconf to set up the default URLs
for registration::
(r'^accounts/', include('registration.backends.default.urls')),
"""
from django.conf.ur | ls.defaults import *
from django.views.generic.simple import direct_to_template
try:
from django.views.decorators.csrf import csrf_exempt
except ImportError:
from django.contrib.csrf.middleware import csr | f_exempt
from registration.views import activate
from registration.views import register
urlpatterns = patterns('',
url(r'^activate/complete/$',
direct_to_template,
{'template': 'registration/activation_complete.html'},
name='registration_activation_complete'),
url(r'^activate/(?P<activation_key>\w+)/$',
activate,
{'backend': 'paypal_registration.backend.PaypalBackend'},
name='registration_activate'),
url(r'^register/$',
register,
{'backend': 'paypal_registration.backend.PaypalBackend'},
name='registration_register'),
url(r'^register/complete/$',
direct_to_template,
{'template': 'registration/registration_complete.html'},
name='registration_complete'),
url(r'^register/closed/$',
direct_to_template,
{'template': 'registration/registration_closed.html'},
name='registration_disallowed'),
url(r'^pay_with_paypal/(?P<username>\w+)/$',
'paypal_registration.views.pay_with_paypal',
name='pay_with_paypal'),
url(r'^payment_confirmation/$',
csrf_exempt(direct_to_template),
{'template': 'registration/confirm_payment_received.html'},
name="confirm_payment_received"),
url(r'^paypal_IPN_notify/$',
'paypal_registration.views.paypal_instant_notify',
name='paypal_notify'),
(r'', include('registration.auth_urls')),
)
|
Lyleo/OmniMarkupPreviewer | OmniMarkupLib/Renderers/libs/python3/genshi/builder.py | Python | mit | 11,729 | 0.002558 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2006-2009 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://genshi.edgewall.org/wiki/License.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://genshi.edgewall.org/log/.
"""Support for programmatically generating markup streams from Python code using
a very simple syntax. The main entry point to this module is the `tag` object
(which is actually an instance of the ``ElementFactory`` class). | You should
rarely (if ever) need to directly import and use any of the other classes in
this module.
E | lements can be created using the `tag` object using attribute access. For
example:
>>> doc = tag.p('Some text and ', tag.a('a link', href='http://example.org/'), '.')
>>> doc
<Element "p">
This produces an `Element` instance which can be further modified to add child
nodes and attributes. This is done by "calling" the element: positional
arguments are added as child nodes (alternatively, the `Element.append` method
can be used for that purpose), whereas keywords arguments are added as
attributes:
>>> doc(tag.br)
<Element "p">
>>> print(doc)
<p>Some text and <a href="http://example.org/">a link</a>.<br/></p>
If an attribute name collides with a Python keyword, simply append an underscore
to the name:
>>> doc(class_='intro')
<Element "p">
>>> print(doc)
<p class="intro">Some text and <a href="http://example.org/">a link</a>.<br/></p>
As shown above, an `Element` can easily be directly rendered to XML text by
printing it or using the Python ``str()`` function. This is basically a
shortcut for converting the `Element` to a stream and serializing that
stream:
>>> stream = doc.generate()
>>> stream #doctest: +ELLIPSIS
<genshi.core.Stream object at ...>
>>> print(stream)
<p class="intro">Some text and <a href="http://example.org/">a link</a>.<br/></p>
The `tag` object also allows creating "fragments", which are basically lists
of nodes (elements or text) that don't have a parent element. This can be useful
for creating snippets of markup that are attached to a parent element later (for
example in a template). Fragments are created by calling the `tag` object, which
returns an object of type `Fragment`:
>>> fragment = tag('Hello, ', tag.em('world'), '!')
>>> fragment
<Fragment>
>>> print(fragment)
Hello, <em>world</em>!
"""
from genshi.core import Attrs, Markup, Namespace, QName, Stream, \
START, END, TEXT
__all__ = ['Fragment', 'Element', 'ElementFactory', 'tag']
__docformat__ = 'restructuredtext en'
class Fragment(object):
"""Represents a markup fragment, which is basically just a list of element
or text nodes.
"""
__slots__ = ['children']
def __init__(self):
"""Create a new fragment."""
self.children = []
def __add__(self, other):
return Fragment()(self, other)
def __call__(self, *args):
"""Append any positional arguments as child nodes.
:see: `append`
"""
for arg in args:
self.append(arg)
return self
def __iter__(self):
return self._generate()
def __repr__(self):
return '<%s>' % type(self).__name__
def __str__(self):
return str(self.generate())
def __unicode__(self):
return str(self.generate())
def __html__(self):
return Markup(self.generate())
def append(self, node):
"""Append an element or string as child node.
:param node: the node to append; can be an `Element`, `Fragment`, or a
`Stream`, or a Python string or number
"""
if isinstance(node, (Stream, Element, str, int, float)):
# For objects of a known/primitive type, we avoid the check for
# whether it is iterable for better performance
self.children.append(node)
elif isinstance(node, Fragment):
self.children.extend(node.children)
elif node is not None:
try:
for child in node:
self.append(child)
except TypeError:
self.children.append(node)
def _generate(self):
for child in self.children:
if isinstance(child, Fragment):
for event in child._generate():
yield event
elif isinstance(child, Stream):
for event in child:
yield event
else:
if not isinstance(child, str):
child = str(child)
yield TEXT, child, (None, -1, -1)
def generate(self):
"""Return a markup event stream for the fragment.
:rtype: `Stream`
"""
return Stream(self._generate())
def _kwargs_to_attrs(kwargs):
attrs = []
names = set()
for name, value in list(kwargs.items()):
name = name.rstrip('_').replace('_', '-')
if value is not None and name not in names:
attrs.append((QName(name), str(value)))
names.add(name)
return Attrs(attrs)
class Element(Fragment):
"""Simple XML output generator based on the builder pattern.
Construct XML elements by passing the tag name to the constructor:
>>> print((Element('strong')))
<strong/>
Attributes can be specified using keyword arguments. The values of the
arguments will be converted to strings and any special XML characters
escaped:
>>> print((Element('textarea', rows=10)))
<textarea rows="10"/>
>>> print((Element('span', title='1 < 2')))
<span title="1 < 2"/>
>>> print((Element('span', title='"baz"')))
<span title=""baz""/>
The " character is escaped using a numerical entity.
The order in which attributes are rendered is undefined.
If an attribute value evaluates to `None`, that attribute is not included
in the output:
>>> print((Element('a', name=None)))
<a/>
Attribute names that conflict with Python keywords can be specified by
appending an underscore:
>>> print((Element('div', class_='warning')))
<div class="warning"/>
Nested elements can be added to an element using item access notation.
The call notation can also be used for this and for adding attributes
using keyword arguments, as one would do in the constructor.
>>> print((Element('ul')(Element('li'), Element('li'))))
<ul><li/><li/></ul>
>>> print((Element('a')('Label')))
<a>Label</a>
>>> print((Element('a')('Label', href="target")))
<a href="target">Label</a>
Text nodes can be nested in an element by adding strings instead of
elements. Any special characters in the strings are escaped automatically:
>>> print((Element('em')('Hello world')))
<em>Hello world</em>
>>> print((Element('em')(42)))
<em>42</em>
>>> print((Element('em')('1 < 2')))
<em>1 < 2</em>
This technique also allows mixed content:
>>> print((Element('p')('Hello ', Element('b')('world'))))
<p>Hello <b>world</b></p>
Quotes are not escaped inside text nodes:
>>> print((Element('p')('"Hello"')))
<p>"Hello"</p>
Elements can also be combined with other elements or strings using the
addition operator, which results in a `Fragment` object that contains the
operands:
>>> print((Element('br') + 'some text' + Element('br')))
<br/>some text<br/>
Elements with a namespace can be generated using the `Namespace` and/or
`QName` classes:
>>> from genshi.core import Namespace
>>> xhtml = Namespace('http://www.w3.org/1999/xhtml')
>>> print((Element(xhtml.html, lang='en')))
<html xmlns="http://www.w3.org/1999/xhtml" lang="en"/>
"""
__slots__ = ['tag', 'attrib']
def __init__(self, tag_, **attrib):
Fragment.__init__(self)
self.tag = QName(tag_)
self.attrib = _kwargs_to_attrs(at |
maruqu/flask-jsonapi | tests/test_filters_schema.py | Python | bsd-3-clause | 8,296 | 0.001205 | import uuid
import marshmallow_jsonapi
import pytest
from marshmallow_jsonapi import fields
import flask_jsonapi
from flask_jsonapi import filters_schema
class TestFiltersSchemaCreation:
def test_simple(self):
class ExampleFiltersSchema(filters_schema.FilterSchema):
title = filters_schema.ListFilterField()
assert ExampleFiltersSchema.base_filters == {
'title': filters_schema.ListFilterField()
}
def test_inheriting_fields(self):
class ExampleFiltersSchem | a(filters_schema.FilterSchema):
title = filters_schema.FilterField()
class ExampleFiltersSchemaDerived(ExampleFiltersSchema):
content = filters_schema.FilterField()
assert ExampleFiltersSchemaDerived.base_filters == {
| 'title': filters_schema.FilterField(),
'content': filters_schema.FilterField(),
}
def test_creation_using_schema(self):
class ExampleSchema(marshmallow_jsonapi.Schema):
id = fields.UUID(required=True)
body = fields.Str()
is_active = fields.Boolean(attribute='active')
class Meta:
type_ = 'example'
class ExampleFiltersSchema(filters_schema.FilterSchema):
class Meta:
schema = ExampleSchema
fields = ['id', 'body', 'is_active']
assert ExampleFiltersSchema.base_filters == {
'id': filters_schema.FilterField(attribute='id', type_=fields.UUID),
'body': filters_schema.FilterField(attribute='body'),
'is_active': filters_schema.FilterField(attribute='active', type_=fields.Boolean),
}
class TestFiltersSchemaBasic:
def test_basic(self, app):
class ExampleFiltersSchema(filters_schema.FilterSchema):
basic = filters_schema.FilterField()
listed = filters_schema.ListFilterField()
dumb_name = filters_schema.FilterField(attribute='renamed')
integer = filters_schema.FilterField(type_=fields.Int)
skipped_filter = filters_schema.FilterField()
with app.test_request_context('?filter[basic]=text'
'&filter[listed]=first,second'
'&filter[dumb-name]=another'
'&filter[integer]=3'):
parsed_filters = ExampleFiltersSchema().parse()
assert parsed_filters == {
'basic': 'text',
'listed': ['first', 'second'],
'renamed': 'another',
'integer': 3,
}
def test_invalid_filter(self, app):
class ExampleFiltersSchema(filters_schema.FilterSchema):
valid = filters_schema.FilterField()
with app.test_request_context('?filter[invalid]=text'):
with pytest.raises(flask_jsonapi.exceptions.InvalidFilters):
ExampleFiltersSchema().parse()
def test_empty_filter_value(self, app):
class ExampleFiltersSchema(filters_schema.FilterSchema):
valid = filters_schema.FilterField()
with app.test_request_context('?filter[valid]='):
with pytest.raises(flask_jsonapi.exceptions.InvalidFilters):
ExampleFiltersSchema().parse()
def test_parse_value(self, app):
class ExampleFiltersSchema(filters_schema.FilterSchema):
identifier = filters_schema.FilterField(type_=fields.UUID)
with app.test_request_context('?filter[identifier]=11111111-1111-1111-1111-111111111111'):
parsed_filters = ExampleFiltersSchema().parse()
assert parsed_filters == {'identifier': uuid.UUID('11111111-1111-1111-1111-111111111111')}
def test_parse_value_error(self, app):
class ExampleFiltersSchema(filters_schema.FilterSchema):
identifier = filters_schema.FilterField(type_=fields.UUID)
with app.test_request_context('?filter[identifier]=1234'):
with pytest.raises(flask_jsonapi.exceptions.InvalidFilters):
ExampleFiltersSchema().parse()
def test_parse_operator(self, app):
class ExampleFiltersSchema(filters_schema.FilterSchema):
basic = filters_schema.FilterField(operators=[filters_schema.Operators.NE])
with app.test_request_context('?filter[basic][ne]=text'):
parsed_filters = ExampleFiltersSchema().parse()
assert parsed_filters == {
'basic__ne': 'text',
}
def test_custom_default_operator(self, app):
class ExampleFiltersSchema(filters_schema.FilterSchema):
basic = filters_schema.FilterField(default_operator='like')
with app.test_request_context('?filter[basic]=text'):
parsed_filters = ExampleFiltersSchema().parse()
assert parsed_filters == {'basic__like': 'text'}
def test_operator_not_allowed(self, app):
class ExampleFiltersSchema(filters_schema.FilterSchema):
basic = filters_schema.FilterField(
operators=[filters_schema.Operators.NE, filters_schema.Operators.EQ]
)
with app.test_request_context('?filter[basic][like]=text'):
with pytest.raises(flask_jsonapi.exceptions.InvalidFilters):
ExampleFiltersSchema().parse()
def test_default_operator_not_in_operators(self, app):
class ExampleFiltersSchema(filters_schema.FilterSchema):
basic = filters_schema.FilterField(
operators=[filters_schema.Operators.NE]
)
with app.test_request_context('?filter[basic]=text'):
with pytest.raises(flask_jsonapi.exceptions.InvalidFilters):
ExampleFiltersSchema().parse()
def test_parse_generated_from_schema(self, app):
class ExampleSchema(marshmallow_jsonapi.Schema):
id = fields.UUID(required=True)
first_body = fields.Str()
second_body = fields.Str()
is_active = fields.Boolean(attribute='active')
related = fields.Relationship(attribute='related_id')
other_related = fields.Relationship(id_field='id')
class Meta:
type_ = 'example'
class ExampleFiltersSchema(filters_schema.FilterSchema):
class Meta:
schema = ExampleSchema
fields = ['id', 'first_body', 'second_body', 'is_active', 'related', 'other_related']
with app.test_request_context('?filter[id]=11111111-1111-1111-1111-111111111111'
'&filter[first-body]=first-text'
'&filter[second_body]=second-text'
'&filter[is-active]=true'
'&filter[related]=456'
'&filter[other-related]=789'):
parsed_filters = ExampleFiltersSchema().parse()
assert parsed_filters == {
'id': uuid.UUID('11111111-1111-1111-1111-111111111111'),
'first_body': 'first-text',
'second_body': 'second-text',
'active': True,
'related_id': '456',
'other_related__id': '789',
}
class TestFiltersSchemaRelationship:
def test_basic(self, app):
class FirstFiltersSchema(filters_schema.FilterSchema):
id = filters_schema.FilterField()
class SecondFiltersSchema(filters_schema.FilterSchema):
attribute = filters_schema.FilterField()
other_relationship = filters_schema.RelationshipFilterField(
FirstFiltersSchema, attribute='renamed_relationship'
)
class ThirdFiltersSchema(filters_schema.FilterSchema):
relationship = filters_schema.RelationshipFilterField(SecondFiltersSchema)
with app.test_request_context('?filter[relationship][other_relationship][id]=123'
'&filter[relationship][attribute]=text'):
parsed_filters = ThirdFiltersSchema().parse()
assert parsed_filters == {
|
blossomica/airmozilla | airmozilla/manage/views/channels.py | Python | bsd-3-clause | 2,361 | 0 | from django.contrib import messages
from django.shortcuts import render, redirect
from django.db import transaction
from airmozilla.main.models import Channel
from airmozilla.manage import forms
from .decorators import (
staff_required,
permission_required,
cancel_redirect
)
@staff_required
@permission_required('main.change_channel')
def channels(request):
channels = Channel.objects.all()
return render(request, 'manage/channels.html',
{'channels': channels})
@staff_required
@permission_required('main.add_channel')
@cancel_redirect('manage:channels')
@transaction.atomic
def channel_new(request):
use_ace = bool(int(request.GET.get('use_ace', 1)))
if request.method == 'POST':
form = forms.ChannelForm(request.POST, instance=Channel())
if form.is_valid():
form.save()
messages.success(request, 'Channel created.')
return redirect('manage:channels')
else:
form = forms.ChannelForm()
| return render(request,
'manage/channel_new.html',
{'form': form,
'use_ace': use_ace})
@staff_required
@permission_required('main.change_channel')
@cancel_redirect('manage:channels')
@transaction.atomic
def channel_edit(request, id):
channel = Channel.objects.get(id=id)
use_ace = bool(int(request.GET.get('use_ace', 1)))
if request.method == 'POST':
form = forms.ChannelForm(request.POST, request.FILES, | instance=channel)
if form.is_valid():
channel = form.save()
if channel.parent and not form.cleaned_data['parent']:
channel.parent = None
channel.save()
messages.info(request, 'Channel "%s" saved.' % channel.name)
return redirect('manage:channels')
else:
form = forms.ChannelForm(instance=channel)
return render(request, 'manage/channel_edit.html',
{'form': form, 'channel': channel,
'use_ace': use_ace})
@staff_required
@permission_required('main.delete_channel')
@transaction.atomic
def channel_remove(request, id):
if request.method == 'POST':
channel = Channel.objects.get(id=id)
channel.delete()
messages.info(request, 'Channel "%s" removed.' % channel.name)
return redirect('manage:channels')
|
sminteractive/ndb-gae-admin | demo.py | Python | apache-2.0 | 193 | 0 | import sma | dmin as admin
# Setup the app
# app (instance of AdminApplication) inherits of webapp2.WSGIApplication
app = | admin.app
app.routes_prefix = '/admin'
app.discover_admins('demo_admin')
|
DevangS/CoralNet | lib/forms.py | Python | bsd-2-clause | 2,225 | 0.001798 | from django import forms
from images.models import Image
class ContactForm(forms.Form):
"""
Allows a user to send a general email to the site admins.
"""
email = forms.EmailField(
label='Your email address',
help_text="Enter your email address so we can reply to you.",
)
subject = forms.CharField(
label='Subject',
# Total length of the subject (including any auto-added prefix)
# should try not to exceed 78 characters.
# http://stackoverflow.com/questions/1592291/
max_length=55,
)
message = forms.CharField(
label='Message/Body',
max_length=5000,
widget=forms.Textarea(
attrs={'class': 'large'},
),
)
def __init__(self, user, *args, **kwargs):
super(ContactForm, self).__ini | t__(*args, **kwargs)
if user.is_authenticated():
del self.fields['email']
def clean_comma_separated_image_ids_field(value, source):
"""
Clean a char field that contains some image ids separated by commas.
e.g. "5,6,8,9" |
This would preferably go in a custom Field class's clean() method,
but I don't know how to define a custom Field's clean() method that
takes a custom parameter (in this case, the source). -Stephen
"""
# Turn the comma-separated string of ids into a proper list.
id_str_list = value.split(',')
id_list = []
for img_id in id_str_list:
try:
id_num = int(img_id)
except ValueError:
# for whatever reason, the img_id str can't be interpreted
# as int. just skip this faulty id.
continue
# Check that these ids correspond to images in the source (not to
# images of other sources).
# This ensures that any attempt to forge POST data to specify
# other sources' image ids will not work.
try:
Image.objects.get(pk=id_num, source=source)
except Image.DoesNotExist:
# the image either doesn't exist or isn't in this source.
# skip it.
#raise ValueError("no: %d".format(id_num))
continue
id_list.append(id_num)
return id_list
|
gatieme/EnergyEfficient | script/plot/perflogplot.py | Python | gpl-3.0 | 8,694 | 0.024284 | #!/usr/bin/python
# encoding=utf-8
#!coding:utf-8
import re
import sys
import argparse
import commands
import os
import subprocess
import parse
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import openpyxl
class PerfPlotData :
plotName = "none name"
logfile = "none path"
xData = []
yData = []
def __init__(self, plotName, logFile, xData, yData, color, marker) :
"""
namelist plot数据的标识, 对应各个调度算法[bl-switch, iks, hmp, hmpdb]
xdata 存储了横轴的数据
"""
self.plotName = plotName
self.logFile = logFile
self.xData = xData
self.yData = yData
self.color = color
self.marker = marker
def ShowPerfPlot(plotDataList, poly):
#http://blog.csdn.net/kkxgx/article/details/6951959
#http://www.mamicode.com/info-detail-280610.html
#http://blog.csdn.net/panda1234lee/article/details/52311593
# 中文信息支持
mpl.rcParams['font.sans-serif'] = ['SimHei'] #用来正常显示中文标签
mpl.rcParams['axes.unicode_minus'] = False #用来正常显示负号
#自动调整label显示方式,如果太挤则倾斜显示
fig = plt.figure(num = 1, figsize = (6, 5))
fig.autofmt_xdate( )
#plt.title("Scheduler Bench Performance...")
title_str = "scheduler performance (perf/messaging) GROUP=100"
plt.title(title_str)
plt.xlabel("loop", size = 14)
plt.ylabel("time", size = 14)
plt.grid( ) # 开启网格
for data in plotDataList :
# 设置图表的信息
print len(data.xData), len(data.yData)
# 曲线平滑--http://rys1314520.iteye.com/blog/1820777
# 曲线平滑--http://blog.sina.com.cn/s/blog_142e602960102wegx.html
if poly == True :
#计算多项式
c = np.polyfit(data.xData, data.yData, 10) # 拟合多项式的系数存储在数组c中
yy = np.polyval(c, data.xData) # 根据多项式求函数值
#进行曲线绘制
x_new = np.linspace(0, 1000000, 11)
f_liner = np.polyval(c,x_new)
plt.plot(x_new, f_liner, color = data.color, linestyle = '--', marker = data.marker, label = data.plotName)
else :
plt.plot(data.xData, data.yData, color = data.color, linestyle = '--', marker = data.marker, label = data.plotName)
plt.legend(loc = "upper left")
#plt.savefig(title_str + '.png', format = 'png')
plt.show( )
def ParsePlotData(str) :
# 测试字符串格式化
# 通过parse库可以实现与format相反的功能
# 其结果类似与C语言的sscanf
str_format = "{:s}{:d}, {:f}"
xydata = parse.parse(str_format, str)
#print xydata
return xydata #(xydata[1], xydata[2])
def ReadPlotXData(minData, maxData, step) :
# 生成X轴的数据,从minData~maxData,步长为step
xData = range(minData, maxData, step)
return xData;
def ReadPlotData(filepath, lines, iszero) :
fileobject = open(filepath)
if iszero == True :
xData = [ 0 ]
yData = [ 0 ]
else :
xData = [ ]
yData = [ ]
while 1 :
linedata = fileobject.readlines(lines)
if not linedata:
break
for line in linedata:
#print line
xyData = ParsePlotData(line)
if (xyData != None) :
#print "data = ", xyData[0], xyData[1]
xData.append(xyData[1])
yData.append(xyData[2])
else :
#print "line = ", line
pass
return (xData, yData)
#def PerfBenchPlotRun(nameTuple, colorTuple, marketTuple, bench, ming, maxg, setg) :
def PerfBenchPlotRun(nameTuple, colorTuple, marketTuple, args) :
plotDataList = []
#for name in nameTuple :
for index in range(len(nameTuple)) :
name = nameTuple[index]
color = colorTuple[index]
marker = markerTuple[index | ]
if (name == "NULL") :
break
resultfile = args.directory + "/" + name + "/perf/" + args.bench + "/" \
+ args.min_group + "-" + args.max_group + "-" + args.step_group + "-" +args.loop + ".log"
print "\n=========================================="
print "resultfile :", resultfile
if ((int(args.min_group) | + int(args.step_group)) > int(args.max_group)) : # 同一个循环多次
iszero = False
else :
iszero = True
(xData, yData) = ReadPlotData(resultfile, 1000, iszero)
print "+++++++", len(xData), len(yData), "+++++++"
print xData
print yData
print "==========================================\n"
plotdata = PerfPlotData(name, resultfile, xData, yData, color, marker)
plotDataList.append(plotdata)
#ShowPerfPlot(plotDataList, False)
filename = args.bench + ".xlsx";
sheetname = args.min_group + "-" + args.max_group + "-" + args.step_group + "-" + args.loop
WriteExcelFile(plotDataList, filename, sheetname)
def WriteExcelFile(plotDataList, filename, sheetname) :
"""
将数据写入Excel
"""
#wb = openpyxl.Workbook()
#ws = wb.active
wb = openpyxl.load_workbook(filename)
print wb.get_sheet_names( )
if sheetname in wb.get_sheet_names( ) :
ws = wb.get_sheet_by_name(sheetname)
print sheetname, "in", wb.get_sheet_names( )
else :
ws = wb.create_sheet(sheetname)
print "create", sheetname, "sheet"
for row in range(len(plotDataList)) :
ydata = plotDataList[row].yData
ws.cell(row = row + 1, column = 1).value = str.upper(plotDataList[row].plotName)
for col in range(len(plotDataList[row].yData)) :
if row == 0:
ws.cell(row = 1, column = col + 2).value = plotDataList[row].xData[col]
else :
ws.cell(row = row + 1, column = col + 2).value = ydata[col]
wb.save(filename)
if __name__ == "__main__" :
#python logplot.py -d ../bench -b messaging -min 10 -max 100 -step 10 -l 5
reload(sys)
sys.setdefaultencoding("utf-8")
if len(sys.argv) > 1: # 如果在程序运行时,传递了命令行参数
pass
# 打印传递的命令行参数的信息
#print "您输入的所有参数共 %d 个,信息为 sys.argv = %s" % (len(sys.argv), sys.argv)
#for i, eachArg in enumerate(sys.argv):
# print "[%d] = %s" % (i, eachArg)
else:
print "Useage : read.py file..."
exit(0)
parser = argparse.ArgumentParser( )
#parser.add_argument("-n", "--name", dest = "name", help = "bl-switch | iks | hmp | hmpdb...")
parser.add_argument("-b", "--bench", dest = "bench", help = "messaging | pipe...")
parser.add_argument("-d", "--dir", dest = "directory", help = "The Directory")
parser.add_argument("-f", "--file", dest = "resultfile", help = "The file you want to read...")
parser.add_argument("-min", "--min_group", dest = "min_group", help = "The min group you give...")
parser.add_argument("-max", "--max_group", dest = "max_group", help = "The max group you give...")
parser.add_argument("-step", "--step_group", dest = "step_group", help = "The step of the group grown you give...")
parser.add_argument("-l", "--loop", dest = "loop", help = "The file you want to read...")
args = parser.parse_args( )
#nameTuple = ( "hmp", "hmpdb")
nameTuple = ( "bl-switch", "iks", "hmp", "hmpdb")
#nameTuple = ( "bl-switch", "iks", "hmp", "hmpdb", "little-cluster", "big-cluster", "big-little-cluster")
#nameTuple = ( "little-cluster", "big-cluster", "big-little-cluster")
# 1)控制颜色
# 颜色之间的对应关系为
# b---blue c---cyan g---green k----black
# m---magenta r---red w---white y----yellow
colorTuple = ( 'b', 'c', 'g', 'k', 'm', 'r', 'y', 'y')
#. Point marker
#, Pixel marker
#o Circle marker
#v Triangle down marker
#^ Triangle up marker
#< Triangle left marker
#> Triangle right marker
#1 Tripod down marker
#2 Tripod up marker
#3 Tripod left marker
#4 Tripod right marker
#s Square marker
#p Pentagon marker
#* Star marker
#h Hexagon marker
#H Rotated hexagon D Diamond marker
#d Thin diamond marker
#| Vertical line (vlinesymbol) marker
#_ Horizontal line (hline symbol) marker
#+ Plus marker
#x Cross (x) marker
markerTuple= ( 'o', '^', '*', 's', 'p', '2', 'h', )
PerfBenchPlotRun |
ivanalejandro0/bitmask_client | src/leap/bitmask/services/mail/emailfirewall.py | Python | gpl-3.0 | 3,225 | 0 | # -*- coding: utf-8 -*-
# emailfirewall.py
# Copyright (C) 2014 LEAP
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Email firewall implementation.
"""
import os
import subprocess
from abc import ABCMeta, abstractmethod
from leap.bitmask.config import flags
from leap.bitmask.platform_init import IS_LINUX |
from leap.bitmask.util import first, force_eval
from leap.bitmask.util.privilege_policies import LinuxPolicyChecker
from leap.common.check import leap_assert
def get_email_firewall():
"""
Return the email firewall handler for the current platform.
"""
# disable email firewall on a docker container so we can access from an
# external MUA
if os.environ.get("LEAP_DOCKERIZED"):
return None
if n | ot (IS_LINUX):
error_msg = "Email firewall not implemented for this platform."
raise NotImplementedError(error_msg)
firewall = None
if IS_LINUX:
firewall = LinuxEmailFirewall
leap_assert(firewall is not None)
return firewall()
class EmailFirewall(object):
"""
Abstract email firwall class
"""
__metaclass__ = ABCMeta
@abstractmethod
def start(self):
"""
Start email firewall
"""
return False
@abstractmethod
def stop(self):
"""
Stop email firewall
"""
return False
class EmailFirewallException(Exception):
pass
class LinuxEmailFirewall(EmailFirewall):
class BITMASK_ROOT(object):
def __call__(self):
return ("/usr/local/sbin/bitmask-root" if flags.STANDALONE else
"/usr/sbin/bitmask-root")
def start(self):
uid = str(os.getuid())
return True if self._run(["start", uid]) is 0 else False
def stop(self):
return True if self._run(["stop"]) is 0 else False
def _run(self, cmd):
"""
Run an email firewall command with bitmask-root
Might raise:
NoPkexecAvailable,
NoPolkitAuthAgentAvailable,
:param cmd: command to send to bitmask-root fw-email
:type cmd: [str]
:returns: exit code of bitmask-root
:rtype: int
"""
command = []
policyChecker = LinuxPolicyChecker()
pkexec = policyChecker.maybe_pkexec()
if pkexec:
command.append(first(pkexec))
command.append(force_eval(self.BITMASK_ROOT))
command.append("fw-email")
command += cmd
# XXX: will be nice to use twisted ProcessProtocol instead of
# subprocess to avoid blocking until it finish
return subprocess.call(command)
|
campadrenalin/python-libcps | dbcps/sinks/redis.py | Python | lgpl-3.0 | 1,395 | 0.004301 | '''
This file is part of the Python CPS library.
The Python CPS library is f | ree software: you can redistribute it
and/or modify it under the terms of the GNU Lesser Public License as
published by the Free Software Foundation, either version 3 of the
License, or (at your option) any later version.
the Python CPS library is distributed in the hope that it will be
useful, but WITHOUT ANY WARRANTY; without even the implied war | ranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser Public License for more details.
You should have received a copy of the GNU Lesser Public License
along with the Python CPS library. If not, see
<http://www.gnu.org/licenses/>.
'''
redislib = __import__("redis", {})
from dbcps.sinks.core import Sink
class Redis(Sink):
'''
Uses the redis module to store data to a key value store.
>>> from __future__ import print_function
>>> s = Redis()
>>> s['hello'] = 'world'
>>> print(s['hello'].decode())
world
>>> 'hello' in s
True
>>> 'hell' in s
False
>>> del s['hello']
>>> 'hello' in s
False
>>> s['blueberry'] = 'pancakes'
>>> del s
>>> s = Redis()
>>> print(s['blueberry'].decode())
pancakes
'''
def __init__(self, origin=None, host='localhost', port=6379):
Sink.__init__(self, origin)
self.backend = redislib.Redis(host, port)
|
pmghalvorsen/gramps_branch | gramps/cli/user.py | Python | gpl-2.0 | 6,799 | 0.005442 | #
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2010 Brian G. Matherly
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
"""
The User class provides basic interaction with the user.
"""
#------------------------------------------------------------------------
#
# Python Modules
#
#------------------------------------------------------------------------
from __future__ import print_function, unicode_literals
import sys
#------------------------------------------------------------------------
#
# Gramps Modules
#
#-------------------------- | ----------------------------------------------
from gramps.gen.const import GRAMPS_LOCALE as glocale
_ = glocale.translation.gettext
from gramps.gen import user
#------------------------------------------------------------------------
#
# Private Constants
#
#------------------------------------------------------------------------
_SPINNER = ['|', '/', '-', '\\']
#-------------------------------------------------------------------------
#
# User class |
#
#-------------------------------------------------------------------------
class User(user.User):
"""
This class provides a means to interact with the user via CLI.
It implements the interface in :class:`.gen.user.User`
"""
def __init__(self, callback=None, error=None, auto_accept=False, quiet=False):
"""
Init.
:param error: If given, notify_error delegates to this callback
:type error: function(title, error)
"""
user.User.__init__(self, callback, error)
self.steps = 0;
self.current_step = 0;
self._input = raw_input if sys.version_info[0] < 3 else input
def yes(*args):
return True
if auto_accept:
self.prompt = yes
if quiet:
self.begin_progress = self.end_progress = self.step_progress = \
self._default_callback = yes
def begin_progress(self, title, message, steps):
"""
Start showing a progress indicator to the user.
:param title: the title of the progress meter
:type title: str
:param message: the message associated with the progress meter
:type message: str
:param steps: the total number of steps for the progress meter.
a value of 0 indicates that the ending is unknown and the
meter should just show activity.
:type steps: int
:returns: none
"""
self._fileout.write(message)
self.steps = steps
self.current_step = 0;
if self.steps == 0:
self._fileout.write(_SPINNER[self.current_step])
else:
self._fileout.write("00%")
def step_progress(self):
"""
Advance the progress meter.
"""
self.current_step += 1
if self.steps == 0:
self.current_step %= 4
self._fileout.write("\r %s " % _SPINNER[self.current_step])
else:
percent = int((float(self.current_step) / self.steps) * 100)
self._fileout.write("\r%02d%%" % percent)
def end_progress(self):
"""
Stop showing the progress indicator to the user.
"""
self._fileout.write("\r100%\n")
def prompt(self, title, message, accept_label, reject_label):
"""
Prompt the user with a message to select an alternative.
:param title: the title of the question, e.g.: "Undo history warning"
:type title: str
:param message: the message, e.g.: "Proceeding with the tool will erase
the undo history. If you think you may want to revert
running this tool, please stop here and make a backup
of the DB."
:type question: str
:param accept_label: what to call the positive choice, e.g.: "Proceed"
:type accept_label: str
:param reject_label: what to call the negative choice, e.g.: "Stop"
:type reject_label: str
:returns: the user's answer to the question
:rtype: bool
"""
accept_label = accept_label.replace("_", "")
reject_label = reject_label.replace("_", "")
text = "{t}\n{m} ([{y}]/{n}): ".format(
t = title,
m = message,
y = accept_label,
n = reject_label)
print (text, file = self._fileout) # TODO python3 add flush=True
try:
reply = self._input()
return reply == "" or reply == accept_label
except EOFError:
return False
def warn(self, title, warning=""):
"""
Warn the user.
:param title: the title of the warning
:type title: str
:param warning: the warning
:type warning: str
:returns: none
"""
self._fileout.write("%s %s" % (title, warning))
def notify_error(self, title, error=""):
"""
Notify the user of an error.
:param title: the title of the error
:type title: str
:param error: the error message
:type error: str
:returns: none
"""
if self.error_function:
self.error_function(title, error)
else:
self._fileout.write("%s %s" % (title, error))
def notify_db_error(self, error):
"""
Notify the user of a DB error.
:param error: the error message
:type error: str
:returns: none
"""
self.notify_error(
_("Low level database corruption detected"),
_("Gramps has detected a problem in the underlying "
"Berkeley database. This can be repaired from "
"the Family Tree Manager. Select the database and "
'click on the Repair button') + '\n\n' + error)
def info(self, msg1, infotext, parent=None, monospaced=False):
"""
Displays information to the CLI
"""
self._fileout.write("{} {}\n".format(msg1, infotext))
|
eoogbe/api-client-staging | generated/python/gapic-google-cloud-dlp-v2beta1/setup.py | Python | bsd-3-clause | 1,575 | 0 | """A setup module for the GAPIC DLP API library.
See:
https://packaging.python.org/en/latest/distributing.html
https://github.com/pypa/sampleproject
"""
from setuptools import setup, find_packages
import sys
install_requires = [
'google-gax>=0.15.7, <0.16dev',
'oauth2client>=2.0.0, <4.0dev',
'proto-google-cloud-dlp-v2beta1[grpc]>=0.15.4, <0.16dev',
'googleapis-common-protos[grpc]>=1.5.2, <2.0dev',
]
setup(
name='gapic-google-cloud-dlp-v2beta1',
version='0.15.4',
author='Google Inc',
author_email='googleapis-packages@google.com',
classifiers=[
'Intended Audience :: Developers',
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
| 'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: Implementation :: CPython',
],
description='GAPIC library for the DLP API',
include_package_data=True,
long_description=open('README.rst').read( | ),
install_requires=install_requires,
license='Apache-2.0',
packages=find_packages(),
namespace_packages=[
'google', 'google.cloud', 'google.cloud.gapic',
'google.cloud.gapic.privacy', 'google.cloud.gapic.privacy.dlp'
],
url='https://github.com/googleapis/googleapis')
|
jiasir/pycs | vulpo/auth.py | Python | mit | 35,320 | 0.00051 | # Copyright 2010 Google Inc.
# Copyright (c) 2011 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2011, Eucalyptus Systems, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
"""
Handles authentication required to AWS and GS
"""
import base64
import vulpo
import vulpo.auth_handler
import vulpo.exception
import vulpo.plugin
import vulpo.utils
import copy
import datetime
from email.utils import formatdate
import hmac
import os
import sys
import time
import urllib
import urlparse
import posixpath
from vulpo.auth_handler import AuthHandler
from vulpo.exception import VulpoClientError
try:
from hashlib import sha1 as sha
from hashlib import sha256 as sha256
except ImportError:
import sha
sha256 = None
class HmacKeys(object):
"""Key based Auth handler helper."""
def __init__(self, host, config, provider):
if provider.access_key is None or provider.secret_key is None:
raise vulpo.auth_handler.NotReadyToAuthenticate()
self.host = host
self.update_provider(provider)
def update_provider(self, provider):
self._provider = provider
self._hmac = hmac.new(self._provider.secret_key, digestmod=sha)
if sha256:
self._hmac_256 = hmac.new(self._provider.secret_key,
digestmod=sha256)
else:
self._hmac_256 = None
def algorithm(self):
if self._hmac_256:
return 'HmacSHA256'
else:
return 'HmacSHA1'
def _get_hmac(self):
if self._hmac_256:
digestmod = sha256
else:
digestmod = sha
return hmac.new(self._provider.secret_key,
digestmod=digestmod)
def sign_string(self, string_to_sign):
new_hmac = self._get_hmac()
new_hmac.update(string_to_sign)
return base64.encodestring(new_hmac.digest()).strip()
def __getstate__(self):
pickled_dict = copy.copy(self.__dict__)
del pickled_dict['_hmac']
del pickled_dict['_hmac_256']
return pickled_dict
def __setstate__(self, dct):
self.__dict__ = dct
self.update_provider(self._provider)
class AnonAuthHandler(AuthHandler, HmacKeys):
"""
Implements Anonymous requests.
"""
capability = ['anon']
def __init__(self, host, config, provider):
super(AnonAuthHandler, self).__init__(host, config, provider)
def add_auth(self, http_request, **kwargs):
pass
class HmacAuthV1Handler(AuthHandler, HmacKeys):
""" Implements the HMAC request signing used by SCS and GS."""
capability = ['hmac-v1', 'scs']
def __init__(self, host, config, provider):
AuthHandler.__init__(self, host, config, provider)
HmacKeys.__init__(self, host, config, provider)
self._hmac_256 = None
def update_provider(self, provider):
super(HmacAuthV1Handler, self).update_provider(provider)
self._hmac_256 = None
def add_auth(self, http_request, **kwargs):
headers = http_request.headers
method = http_request.method
auth_path = http_request.auth_path
if 'Date' not in headers:
headers['Date'] = formatdate(localtime=True)
if self._provider.security_token:
key = self._provider.security_token_header
headers[key] = self._provider.security_token
string_to_sign = vulpo.utils.canonical_string(method, auth_path,
| headers, None,
self._provider)
vulpo.log.debug('StringToSign:\n%s' % string_to_sign)
| b64_hmac = self.sign_string(string_to_sign)
auth_hdr = self._provider.auth_header
auth = ("%s %s:%s" % (auth_hdr, self._provider.access_key, b64_hmac))
vulpo.log.debug('Signature:\n%s' % auth)
headers['Authorization'] = auth
class HmacAuthV2Handler(AuthHandler, HmacKeys):
"""
Implements the simplified HMAC authorization used by CloudFront.
"""
capability = ['hmac-v2', 'cloudfront']
def __init__(self, host, config, provider):
AuthHandler.__init__(self, host, config, provider)
HmacKeys.__init__(self, host, config, provider)
self._hmac_256 = None
def update_provider(self, provider):
super(HmacAuthV2Handler, self).update_provider(provider)
self._hmac_256 = None
def add_auth(self, http_request, **kwargs):
headers = http_request.headers
if 'Date' not in headers:
headers['Date'] = formatdate(usegmt=True)
if self._provider.security_token:
key = self._provider.security_token_header
headers[key] = self._provider.security_token
b64_hmac = self.sign_string(headers['Date'])
auth_hdr = self._provider.auth_header
headers['Authorization'] = ("%s %s:%s" %
(auth_hdr,
self._provider.access_key, b64_hmac))
class HmacAuthV3Handler(AuthHandler, HmacKeys):
"""Implements the new Version 3 HMAC authorization used by Route53."""
capability = ['hmac-v3', 'route53', 'ses']
def __init__(self, host, config, provider):
AuthHandler.__init__(self, host, config, provider)
HmacKeys.__init__(self, host, config, provider)
def add_auth(self, http_request, **kwargs):
headers = http_request.headers
if 'Date' not in headers:
headers['Date'] = formatdate(usegmt=True)
if self._provider.security_token:
key = self._provider.security_token_header
headers[key] = self._provider.security_token
b64_hmac = self.sign_string(headers['Date'])
s = "AWSCS-HTTPS AWSAccessKeyId=%s," % self._provider.access_key
s += "Algorithm=%s,Signature=%s" % (self.algorithm(), b64_hmac)
headers['X-Amzn-Authorization'] = s
class HmacAuthV3HTTPHandler(AuthHandler, HmacKeys):
"""
Implements the new Version 3 HMAC authorization used by DynamoDB.
"""
capability = ['hmac-v3-http']
def __init__(self, host, config, provider):
AuthHandler.__init__(self, host, config, provider)
HmacKeys.__init__(self, host, config, provider)
def headers_to_sign(self, http_request):
"""
Select the headers from the request that need to be included
in the StringToSign.
"""
headers_to_sign = {'Host': self.host}
for name, value in http_request.headers.items():
lname = name.lower()
if lname.startswith('x-amz'):
headers_to_sign[name] = value
return headers_to_sign
def canonical_headers(self, headers_to_sign):
"""
Return the headers that need to be included in the StringToSign
in their canonical form by converting all header keys to lower
case, sorting them in alphabetical order and then joining
them into a string, separated by newlines.
"""
l = sorted(['%s:%s' % (n.lower().strip(),
headers_to_sign[n].strip()) for n in headers_to_sign]) |
ChristineLaMuse/mozillians | vendor-local/lib/python/djcelery/backends/database.py | Python | bsd-3-clause | 1,952 | 0 | from __future__ import absolute_import
from celery import current_app
from celery.backends.base import BaseDictBackend
from celery.utils.timeutils import maybe_timedelta
from ..models import TaskMeta, TaskSetMeta
class DatabaseBackend(BaseDictBackend):
"""The database backend.
Using Django models to store task state.
"""
TaskModel = TaskMeta
TaskSetModel = TaskSetMeta
expires = current_app.conf.CELERY_TASK_RESULT_EXPIRES
create_django_tables = True
subpolling_interval = 0.5
def _store_result(self, task_id, resu | lt, status, traceback=None):
"""Store return value and status of an executed task."""
self.TaskModel._default_manager.store_result(task_id, result, status,
traceback=trac | eback)
return result
def _save_taskset(self, taskset_id, result):
"""Store the result of an executed taskset."""
self.TaskSetModel._default_manager.store_result(taskset_id, result)
return result
def _get_task_meta_for(self, task_id):
"""Get task metadata for a task by id."""
return self.TaskModel._default_manager.get_task(task_id).to_dict()
def _restore_taskset(self, taskset_id):
"""Get taskset metadata for a taskset by id."""
meta = self.TaskSetModel._default_manager.restore_taskset(taskset_id)
if meta:
return meta.to_dict()
def _delete_taskset(self, taskset_id):
self.TaskSetModel._default_manager.delete_taskset(taskset_id)
def _forget(self, task_id):
try:
self.TaskModel._default_manager.get(task_id=task_id).delete()
except self.TaskModel.DoesNotExist:
pass
def cleanup(self):
"""Delete expired metadata."""
expires = maybe_timedelta(self.expires)
for model in self.TaskModel, self.TaskSetModel:
model._default_manager.delete_expired(expires)
|
sbesson/zeroc-ice | rb/test/Ice/retry/run.py | Python | gpl-2.0 | 807 | 0.008674 | #!/usr/bin/env python
# **********************************************************************
#
# Copyright (c) 2003-2013 ZeroC, Inc. All rights reserved.
#
# This copy of Ice is licensed to you under the terms described in the
# ICE_LICENSE file included in this distribution.
#
# **********************************************************************
import os, sys
path = [ ".", "..", "../..", "../../..", "../../../.." ]
head = os.path.dirname(sys.argv[0])
if len(head) > | 0:
path = [os.path.join(head, p) for p in path]
path = [os.path.abspath(p) for p in path if os.path.exists(os.path.join(p, "scripts", "TestUtil.py")) ]
if len(path) == 0:
raise RuntimeError("can't find toplevel directory!")
sys.path.append(os.path.join(path[0], "scripts"))
import TestUtil
TestUtil.clientServerTest()
| |
horance-liu/tensorflow | tensorflow/contrib/layers/python/layers/layers.py | Python | apache-2.0 | 130,351 | 0.003744 | # -*- coding: utf-8 -*-
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=g-short-docstring-punctuation
"""Higher level ops for building layers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import six
from tensorflow.contrib.framework.python.ops import add_arg_scope
from tensorflow.contrib.framework.python.ops import variables
from tensorflow.contrib.layers.python.layers import initializers
from tensorflow.contrib.layers.python.layers import utils
from tensorflow.python.eager import context
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import function
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.layers import base
from tensorflow.python.layers import convolutional as convolutional_layers
from tensorflow.python.layers import core as core_layers
from tensorflow.python.layers import normalization as normalization_layers
from tensorflow.python.layers import pooling as pooling_layers
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops import standard_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables as tf_variables
from tensorflow.python.training import moving_averages
from tensorflow.python.layers.maxout import maxout
# TODO(b/28426988): Replace legacy_* fns migrated from slim.
# TODO(b/28426988): Remove legacy_* when all uses have migrated to new API.
__all__ = ['avg_pool2d',
'avg_pool3d',
'batch_norm',
'bias_add',
'conv2d',
'conv3d',
'conv2d_in_plane',
'conv2d_transpose',
'conv3d_transpose',
'convolution',
'convolution2d',
'convolution2d_in_plane',
'convolution2d_transpose',
'convolution3d',
'convolution3d_transpose',
'dropout',
'elu',
'flatten',
'fully_connected',
'GDN',
'gdn',
'layer_norm',
'linear',
'pool',
'max_pool2d',
'max_pool3d',
'one_hot_encoding',
'relu',
'relu6',
'repeat',
'scale_gradient',
'separable_conv2d',
'separable_convolution2d',
'softmax',
'spatial_softmax',
'stack',
'unit_norm',
'legacy_fully_connected',
'legacy_linear',
'legacy_relu',
'maxout']
DATA_FORMAT_NCHW = 'NCHW'
DATA_FORMAT_NHWC = 'NHWC'
DATA_FORMAT_NCDHW = 'NCDHW'
DATA_FORMAT_NDHWC = 'NDHWC'
@add_arg_scope
def avg_pool2d(inputs,
kernel_size,
stride=2,
padding='VALID',
data_format=DATA_FORMAT_NHWC,
outputs_collections=None,
scope=None):
"""Adds a 2D average pooling op.
It is assumed that the pooling is done per image but not in batch or channels.
Args:
inputs: A 4-D tensor of shape `[batch_size, height, width, channels]` if
`data_format` is `NHWC`, and `[batch_size, channels, height, width]` if
`data_format` is `NCHW`.
kernel_size: A list of length 2: [kernel_height, kernel_width] of the
pooling kernel over which the op is computed. Can be an int if both
values are the same.
stride: A list of length 2: [stride_height, stride_width].
Can be an int if both strides are the same. Note that presently
both strides must have the same value.
padding: The padding method, either 'VALID' or 'SAME'.
data_format: A string. `NHWC` (default) and `NCHW` are supported.
outputs_collections: The collections to which the outputs are added.
scope: Optional scope for name_scope.
Returns:
A `Tensor` representing the results of the pooling operation.
Raises:
ValueError: If `data_format` is neither `NHWC` nor `NCHW`.
"""
if data_format not in (DATA_FORMAT_NCHW, DATA_FORMAT_NHWC):
raise ValueError('data_format has to be either NCHW or NHWC.')
with ops.name_scope(scope, 'AvgPool2D', [inputs]) as sc:
inputs = ops.convert_to_tensor(inputs)
df = ('channels_first' if data_format and data_format.startswith('NC')
else 'channels_last')
layer = pooling_layers.AveragePooling2D(pool_size=kernel_size,
strides=stride,
padding=padding,
data_format=df,
_scope=sc)
outputs = layer.apply(input | s)
return utils.collect_named_outputs(outputs_collections, sc, outputs)
@add_arg_scope
def avg_pool3d(inputs,
| kernel_size,
stride=2,
padding='VALID',
data_format=DATA_FORMAT_NDHWC,
outputs_collections=None,
scope=None):
"""Adds a 3D average pooling op.
It is assumed that the pooling is done per image but not in batch or channels.
Args:
inputs: A 5-D tensor of shape `[batch_size, depth, height, width, channels]`
if `data_format` is `NDHWC`, and `[batch_size, channels, depth, height,
width]` if `data_format` is `NCDHW`.
kernel_size: A list of length 3: [kernel_depth, kernel_height, kernel_width]
of the pooling kernel over which the op is computed. Can be an int if both
values are the same.
stride: A list of length 3: [stride_depth, stride_height, stride_width].
Can be an int if both strides are the same. Note that presently
both strides must have the same value.
padding: The padding method, either 'VALID' or 'SAME'.
data_format: A string. `NDHWC` (default) and `NCDHW` are supported.
outputs_collections: The collections to which the outputs are added.
scope: Optional scope for name_scope.
Returns:
A `Tensor` representing the results of the pooling operation.
Raises:
ValueError: If `data_format` is neither `NDHWC` nor `NCDHW`.
"""
if data_format not in (DATA_FORMAT_NCDHW, DATA_FORMAT_NDHWC):
raise ValueError('data_format has to be either NCDHW or NDHWC.')
with ops.name_scope(scope, 'AvgPool3D', [inputs]) as sc:
inputs = ops.convert_to_tensor(inputs)
df = ('channels_first' if data_format and data_format.startswith('NC')
else 'channels_last')
layer = pooling_layers.AveragePooling3D(pool_size=kernel_size,
strides=stride,
padding=padding,
data_format=df,
_scope=sc)
outputs = layer.apply(inputs)
return utils.collect_named_outputs(outputs_collections, sc, outputs)
def _fused_batch_norm(inputs,
decay=0.999,
center=True,
scale=False,
epsilon=0.001,
activation_fn=None,
param_initializers=None,
param_regularizers=None,
updates_co |
zetaops/zengine | zengine/tornado_server/server.py | Python | gpl-3.0 | 5,363 | 0.001865 | # -*- coding: utf-8 -*-
"""
tornado websocket proxy for WF worker daemons
"""
# Copyright (C) 2015 ZetaOps Inc.
#
# This file is licensed under the GNU General Public License v3
# (GPLv3). See LICENSE.txt for details.
import json
import os, sys
import traceback
from uuid import uuid4
from tornado import websocket, web, ioloop
from tornado.escape import json_decode, json_encode
from tornado.httpclient import HTTPError
sys.path.insert(0, os.path.realpath(os.path.dirname(__file__)))
from ws_to_queue import QueueManager, log, settings
COOKIE_NAME = 'zopsess'
DEBUG = os.getenv("DEBUG", False)
# blocking_connection = BlockingConnectionForHTTP()
class SocketHandler(websocket.WebSocketHandler):
"""
websocket handler
"""
def check_origin(self, origin):
"""
Prevents CORS attacks.
Args:
origin: HTTP "Origin" header. URL of initiator of the request.
Returns:
True if origin is legit, otherwise False
"""
# FIXME: implement CORS checking
return True
def _get_sess_id(self):
# return self.sess_id;
sess_id = self.get_cookie(COOKIE_NAME)
return sess_id
def open(self):
"""
Called on new websocket connection.
"""
sess_id = self._get_sess_id()
if sess_id:
self.application.pc.websockets[self._get_sess_id()] = self
self.write_message(json.dumps({"cmd": "status", "status": "open"}))
else:
self.write_message(json.dumps({"cmd": "error", "error": "Please login", "code": 401}))
def on_message(self, message):
"""
called on new websocket message,
"""
log.debug("WS MSG for %s: %s" % (self._get_sess_id(), message))
self.application.pc.redirect_incoming_message(self._get_sess_id(), message, self.request)
def on_close(self):
"""
remove connection from pool on connection close.
"""
self.application.pc.unregister_websocket(self._get_sess_id())
# noinspection PyAbstractClass
class HttpHandler(web.RequestHandler):
"""
login handler class
"""
def _handle_headers(self):
"""
Do response processing
"""
origin = self.request.headers.get('Origin')
if not settings.DEBUG:
if origin in settings.ALLOWED_ORIGINS or not origin:
self.set_header('Access-Control-Allow-Origin', origin)
else:
log.debug("CORS ERROR: %s not allowed, allowed hosts: %s" % (origin,
settings.ALLOWED_ORIGINS))
raise HTTPError(403, "Origin not in ALLOWED_ORIGINS: %s" % origin)
else:
self.set_header('Access-Control-Allow-Origin', origin or '*')
self.set_header('Access-Control-Allow-Credentials', "true")
self.set_header('Access-Control-Allow-Headers', 'Content-Type')
self.set_header('Access-Control-Allow-Methods', 'OPTIONS')
self.set_header('Content-Type', 'application/json')
@web.asynchronous
def get(self, view_name):
"""
only used to display login form
Args:
view_name: should be "login"
"""
self.post(view_name)
@web.asynchronous
def post(self, view_name):
"""
login handler
"""
sess_id = None
input_data = {}
# try:
self._handle_headers()
# handle input
input_data = json_decode(self.request.body) if self.request.body else {}
input_data['path'] = view_name
# set or get session cookie
if not self.get_cookie(COOKIE_NAME) or 'username' in input_data:
sess_id = uuid4().hex
self.set_cookie(COOKIE_NAME, sess_id) # , domain='127.0.0.1'
else:
sess_id = self.get_cookie(COOKIE_NAME)
# h_sess_id = "HTTP_%s" % sess_id
input_data = {'data': input_data,
'_zops_remote_ip': self.request.remote_ip}
log.info("New Request for %s: %s" % (sess_id, input_data))
self.application.pc.register_websocket(sess_id, self)
self.application.pc.redirect_incoming_message(sess_id,
json_encode(input_data),
self.request)
def write_message(self, output):
log.debug("WRITE MESSAGE To CLIENT: %s" % output)
# if 'login_process' not in output:
# # workaround for prem | ature logout bug (empty login form).
# # FIXME: find a better way to handle HTTP and SOCKET connections for same sess_id.
| # return
self.write(output)
self.finish()
self.flush()
URL_CONFS = [
(r'/ws', SocketHandler),
(r'/(\w+)', HttpHandler),
]
app = web.Application(URL_CONFS, debug=DEBUG, autoreload=False)
def runserver(host=None, port=None):
"""
Run Tornado server
"""
host = host or os.getenv('HTTP_HOST', '0.0.0.0')
port = port or os.getenv('HTTP_PORT', '9001')
zioloop = ioloop.IOLoop.instance()
# setup pika client:
pc = QueueManager(zioloop)
app.pc = pc
pc.connect()
app.listen(port, host)
zioloop.start()
if __name__ == '__main__':
runserver()
|
jordanemedlock/psychtruths | temboo/core/Library/Google/Drive/Comments/Insert.py | Python | apache-2.0 | 5,038 | 0.005161 | # -*- coding: utf-8 -*-
###############################################################################
#
# Insert
# Creates a new comment on the given file.
#
# Python versions 2.6, 2.7, 3.x
#
# Copyright 2014, Temboo Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
#
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class Insert(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the Insert Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
super(Insert, self).__init__(temboo_session, '/Library/Google/Drive/Comments/Insert')
def new_input_set(self):
return InsertInputSet()
def _make_result_set(self, result, path):
return InsertResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return InsertChoreographyExecution(session, exec_id, path)
class InsertInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the Insert
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_RequestBody(self, value):
"""
Set the value of the RequestBody input for this Choreo. ((required, json) A JSON representation of fields in a comment resource which shoud contain at least one key for content. See documentation for formatting examples.)
"""
super(InsertInputSet, self)._set_input('RequestBody', value)
def set_AccessToken(self, value):
"""
Set the value of the AccessToken input for this Choreo. ((optional, string) A valid access token retrieved during the OAuth2 process. This is required unless you provide the ClientID, ClientSecret, and RefreshToken to generate a new access token.)
"""
super(InsertInputSet, self)._set_input('AccessToken', value)
def set_ClientID(self, value):
"""
Set the value of the ClientID input for this Choreo. ((conditional, string) The Client ID provided by Google. Required unless providing a valid AccessToken.)
"""
super(InsertInputSet, self)._set_input('ClientID', value)
def set_ClientSecret(self, value):
"""
Set th | e value of the ClientSecret input for this Choreo. ((conditional, string) The Client Secret provided by Google. Required unless providing a valid AccessToken.)
"""
super(InsertInputSet, self)._set_input('ClientSecret', value)
def set_Fields(self, value):
"""
Set the value of the Fields input for this Choreo. ((optional, string) Selector specifying a subset of fields to include in t | he response.)
"""
super(InsertInputSet, self)._set_input('Fields', value)
def set_FileID(self, value):
"""
Set the value of the FileID input for this Choreo. ((required, string) The ID of the file.)
"""
super(InsertInputSet, self)._set_input('FileID', value)
def set_RefreshToken(self, value):
"""
Set the value of the RefreshToken input for this Choreo. ((conditional, string) An OAuth refresh token used to generate a new access token when the original token is expired. Required unless providing a valid AccessToken.)
"""
super(InsertInputSet, self)._set_input('RefreshToken', value)
class InsertResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the Insert Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. ((json) The response from Google.)
"""
return self._output.get('Response', None)
def get_NewAccessToken(self):
"""
Retrieve the value for the "NewAccessToken" output from this Choreo execution. ((string) Contains a new AccessToken when the RefreshToken is provided.)
"""
return self._output.get('NewAccessToken', None)
class InsertChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return InsertResultSet(response, path)
|
portableant/open-context-py | opencontext_py/apps/ocitems/versions/models.py | Python | gpl-3.0 | 845 | 0 | import hashlib
from django | .db | import IntegrityError
from django.db import models
# A version is a way of caching item data so changes can be
# undone
class Version(models.Model):
change_uuid = models.CharField(max_length=50, db_index=True)
uuid = models.CharField(max_length=50, db_index=True)
item_type = models.CharField(max_length=50)
project_uuid = models.CharField(max_length=50)
source_id = models.CharField(max_length=50)
json_data = modles.TextField() # serialization from Django
json_ld = modles.TextField() # JSON-LD output
updated = models.DateTimeField(auto_now=True)
def save(self, *args, **kwargs):
"""
creates the hash-id on saving to insure a unique subject
"""
super(Version, self).save(*args, **kwargs)
class Meta:
db_table = 'oc_versions'
|
paolo-losi/stormed-amqp | stormed/method/connection.py | Python | mit | 2,532 | 0.011058 | from stormed.util import add_method, AmqpError, logger
from stormed.serialization import table2str
from stormed.heartbeat import HeartbeatMonitor
from stormed.frame import status
from stormed.method.codegen import id2class
from stormed.method.constant import id2constant
from stormed.method.codegen.connection import *
@add_method(Start)
def handle(self, conn):
if 'AMQPLAIN' not in self.mechanisms.split(' '):
raise AmqpError("'AMQPLAIN' not in mechanisms")
if 'en_US' not in self.locales.split(' '):
raise AmqpError("'en_US' not in locales")
response = table2str(dict(LOGIN = conn.username,
PASSWORD = conn.password))
client_properties = {'client': 'stormed-amqp'}
start_ok = StartOk(client_properties=client_properties,
mechanism='AMQPLAIN', response=response,
locale='en_US')
conn.write_method(start_ok)
@add_method(Tune)
def handle(self, conn):
conn.frame_max = self.frame_max or 2**16
tune_ok = TuneOk(frame_max = conn.frame_max,
channel_max = self.channel_max,
heartbeat = conn.heartbeat)
conn.write_method(tune_ok)
_open = Open(virtual_host = conn.vhost,
capabilities = '',
insist = 0)
conn.write_method(_open)
@add_method(OpenOk)
def handle(self, conn):
conn.status = status.OPENED
if conn.heartbeat:
HeartbeatMonitor(conn).start()
try:
conn.on_connect()
except Exception:
logger.error('ERROR in on_connect() callback', exc_info=True)
@add_method(CloseOk)
def handle(self, conn):
conn.status = status.CLOSED
conn.invoke_callback()
conn.reset()
class ConnectionError(object):
def __init | __(self, reply_code, reply_text, method):
self.reply_code = reply_code
self.reply_text = reply_text
self.method = method
@add_method(Close)
def handle(self, conn):
try:
mod = id2class[self | .class_id]
method = getattr(mod, 'id2method')[self.method_id]
except:
method = None
conn.reset()
error_code = id2constant.get(self.reply_code, '')
logger.warn('Connection Hard Error. code=%r. %s', error_code,
self.reply_text)
if conn.on_error:
try:
conn.on_error(ConnectionError(error_code, self.reply_text, method))
except Exception:
logger.error('ERROR in on_error() callback', exc_info=True)
|
unixorn/haze | haze/commands/awsmetadata.py | Python | apache-2.0 | 1,176 | 0.006803 | #!/usr/bin/env python
# Copyright 2015 Joe Block <jpb@unixorn.net>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance | with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitation | s under the License.
#
# I have often found it convenient to be able to read what AWS metadata
# for an instance inside bash scripts on the instance.
import argparse
from haze.ec2 import getMetadataKey
# Bash helpers
def awsReadMetadataKey():
"""Print key from a running instance's metadata"""
parser = argparse.ArgumentParser()
parser.add_argument("--key",
dest="keyname",
help="Which metadata key to read")
cli = parser.parse_args()
print getMetadataKey(name=cli.keyname)
if __name__ == "__main__":
awsReadMetadataKey()
|
pyocd/pyOCD | pyocd/utility/progress.py | Python | apache-2.0 | 4,303 | 0.003486 | # pyOCD debugger
# Copyright (c) 2017-2019 Arm Limited
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import logging
LOG = logging.getLogger(__name__)
class ProgressReport(object):
"""@brief Base progress report class.
This base class implements the logic but no output.
"""
def __init__(self, file=None):
self._file = file or sys.stdout
self.prev_progress = 0
self.backwards_progress = False
self.done = False
self.last = 0
def __call__(self, progress):
assert progress >= 0.0
# Cap progress at 1.0.
if progress > 1.0:
progress = 1.0
LOG.debug("progress out of bounds: %.3f", progress)
# Reset state on 0.0
if progress == 0.0:
self._start()
# Check for backwards progress
if progress < self.prev_progress:
self.backwards_progress = True
self.prev_progress = progress
# print progress bar
if not self.done:
self._update(progress)
# Finish on 1.0
if progress >= 1.0:
self.done = True
self._finish()
if self.backwards_progress:
LOG.debug("Progress went backwards!")
def _start(self):
self.prev_progress = 0
self.backwards_progress = False
self.done = False
self.last = 0
def _update(self, progress):
raise NotImplementedError()
def _finish(self):
raise NotImplementedError()
class ProgressReportTTY(ProgressReport):
"""@brief Progress report subclass for TTYs.
The progress bar is fully redr | awn onscreen as progress is updated to give the
impression of animation.
"""
## These width constants can't be changed yet without changing the code below to match.
WIDTH = 50
def _update(self, progress):
self._file.write('\r')
i = int(progress * self.WIDTH)
self._file.write("[%-50s] %3d%%" % ('=' * i, round(progress * | 100)))
self._file.flush()
def _finish(self):
self._file.write("\n")
class ProgressReportNoTTY(ProgressReport):
"""@brief Progress report subclass for non-TTY output.
A simpler progress bar is used than for the TTY version. Only the difference between
the previous and current progress is drawn for each update, making the output suitable
for piping to a file or similar output.
"""
## These width constants can't be changed yet without changing the code below to match.
WIDTH = 40
def _start(self):
super(ProgressReportNoTTY, self)._start()
self._file.write('[' + '---|' * 9 + '----]\n[')
self._file.flush()
def _update(self, progress):
i = int(progress * self.WIDTH)
delta = i - self.last
self._file.write('=' * delta)
self._file.flush()
self.last = i
def _finish(self):
self._file.write("]\n")
self._file.flush()
def print_progress(file=None):
"""@brief Progress printer factory.
This factory function checks whether the output file is a TTY, and instantiates the
appropriate subclass of ProgressReport.
@param file The output file. Optional. If not provided, or if set to None, then sys.stdout
will be used automatically.
"""
if file is None:
file = sys.stdout
try:
istty = os.isatty(file.fileno())
except (OSError, AttributeError):
# Either the file doesn't have a fileno method, or calling it returned an
# error. In either case, just assume we're not connected to a TTY.
istty = False
klass = ProgressReportTTY if istty else ProgressReportNoTTY
return klass(file)
|
LynxyssCZ/Flexget | flexget/plugins/sites/limetorrents.py | Python | mit | 4,028 | 0.002979 | from __future__ import unicode_literals, division, absolute_import
from builtins import * # noqa pylint: disable=unused-import, redefined-builtin
import logging
from flexget import plugin
from flexget.entry import Entry
from flexget.event import event
from flexget.utils.requests import RequestExc | eption
from flexget.utils.soup import get_soup
from flexget.utils.search import torrent_availability, clean_symbols
from flexget.utils.tools import parse_filesize
log = logging.getLogger('limetorrents')
class Limetorrents(object):
"""
Limetorrents search plugin.
"""
schema = {
'oneOf': [
{'type': 'boolean'},
{
'type': 'object',
'properties': {
'categor | y': {'type': 'string', 'enum': ['all', 'anime', 'applications', 'games', 'movies', 'music',
'tv', 'other'], 'default': 'all'},
'order_by': {'type': 'string', 'enum': ['date', 'seeds'], 'default': 'date'}
},
'additionalProperties': False
}
]
}
base_url = 'https://www.limetorrents.cc/'
errors = False
@plugin.internet(log)
def search(self, task, entry, config):
"""
Search for entries on Limetorrents
"""
if not isinstance(config, dict):
config = {'category': config}
order_by = ''
if isinstance(config.get('order_by'), str):
if config['order_by'] != 'date':
order_by = '{0}/1'.format(config['order_by'])
category = 'all'
if isinstance(config.get('category'), str):
category = '{0}'.format(config['category'])
entries = set()
for search_string in entry.get('search_strings', [entry['title']]):
# No special characters - use dashes instead of %20
cleaned_search_string = clean_symbols(search_string).replace(' ', '-')
query = 'search/{0}/{1}/{2}'.format(category, cleaned_search_string.encode('utf8'), order_by)
log.debug('Using search: %s; category: %s; ordering: %s', cleaned_search_string, category, order_by or 'default')
try:
page = task.requests.get(self.base_url + query)
log.debug('requesting: %s', page.url)
except RequestException as e:
log.error('Limetorrents request failed: %s', e)
continue
soup = get_soup(page.content)
if soup.find('a', attrs={'class': 'csprite_dl14'}) is not None:
for link in soup.findAll('a', attrs={'class': 'csprite_dl14'}):
row = link.find_parent('tr')
info_url = str(link.get('href'))
# Get the title from the URL as it's complete versus the actual Title text which gets cut off
title = str(link.next_sibling.get('href'))
title = title[:title.rfind('-torrent')].replace('-', ' ')
title = title[1:]
data = row.findAll('td', attrs={'class': 'tdnormal'})
size = str(data[1].text).replace(',', '')
seeds = int(row.find('td', attrs={'class': 'tdseed'}).text.replace(',', ''))
leeches = int(row.find('td', attrs={'class': 'tdleech'}).text.replace(',', ''))
size = parse_filesize(size)
e = Entry()
e['url'] = info_url
e['title'] = title
e['torrent_seeds'] = seeds
e['torrent_leeches'] = leeches
e['search_sort'] = torrent_availability(e['torrent_seeds'], e['torrent_leeches'])
e['content_size'] = size
entries.add(e)
return entries
@event('plugin.register')
def register_plugin():
plugin.register(Limetorrents, 'limetorrents', interfaces=['search'], api_ver=2)
|
karimbahgat/TinyPath | setup.py | Python | mit | 816 | 0.035539 | try: from setuptools import setup
except: from distutils.core import setup
setup( lon | g_description=open("README.rst").read(),
name="""tinypath""",
license="""MIT""",
author="""Karim Bahgat""",
author_e | mail="""karim.bahgat.norway@gmail.com""",
py_modules=['tinypath'],
url="""http://github.com/karimbahgat/tinypath""",
version="""0.1.1""",
keywords="""paths files folders organizing""",
classifiers=['License :: OSI Approved', 'Programming Language :: Python', 'Development Status :: 4 - Beta', 'Intended Audience :: Developers', 'Intended Audience :: Science/Research', 'Intended Audience :: End Users/Desktop'],
description="""Tinypath is a tiny object-oriented file path module that provides only the most crucial and commonly needed functionality, making it easy to learn and efficient to use.""",
)
|
bbqbailey/geeksHome | geeksHomeApp/load_Sensors.py | Python | gpl-2.0 | 728 | 0.021978 | __author__ = 'superben'
#Zone, SensorName, SensorLocation, SensorType, SensorState, Header, Pin, GPIO, Direction
Sensors={}
print("Opening file sensors.txt")
f=open('sensors.txt','r')
firstTime=True
for line in f:
#print(line)
lineSplit=line.split(',')
#print("Split: " + str(lineSplit))
lineStripped=[x.strip(' ') for x in lineSplit]
#print("Stripped: " + str(lineStripped))
if(firstTime == True) | :
Sensors['Fields'] = lineStripped
firstTime=False
else:
zone=lineStripped[0]
sensorName=lineStripped[1]
Sensors[zone + '- | ' + sensorName] = lineStripped
print(Sensors)
for x in Sensors:
if (x != 'Fields'):
print(x, Sensors[x])
|
pombredanne/django-constance | constance/__init__.py | Python | bsd-3-clause | 268 | 0.003731 | from django.utils.functional import Lazy | Object
__version__ = '1.2.1'
default_app_config = 'constance.apps.ConstanceConfig'
class LazyConfi | g(LazyObject):
def _setup(self):
from .base import Config
self._wrapped = Config()
config = LazyConfig()
|
cheneydc/qnupload | src/qnupload/qnupload.py | Python | mit | 4,668 | 0.000214 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
This modle will help you put your local file to
QiNiu cloud storage, I use it to share files and
pictures in my blog.
"""
import argparse
# Compatible for Py2 and Py3
try:
import ConfigParser
except ImportError:
import configparser
import os
import qiniu.config
import sys
from qiniu import Auth
from qiniu import BucketManager
conf_file = "/etc/qnupload/qnupload.conf"
def getAuth(accessKey, secretKey):
"""Get the auth object by access key and secret key."""
auth = Auth(accessKey, secretKey)
return auth
def uploadFile(bucketName, filePath, auth, domain, full_name):
"""Upload file to your bucket on qiniu server."""
# Compatible for Py2 and Py3
if full_name:
file_basename = filePath
else:
file_basename = os.path.basename(filePath)
try:
fileName = file_basename.decode("utf-8")
except AttributeError:
fileName = file_basename
up_token = auth.upload_token(bucketName)
ret, resp = qiniu.put_file(up_token, fileName, filePath)
if ret:
print("Upload file: %s" % (filePath))
print("Link: %s" % (domain + fileName))
else:
print("Failed to upload file.")
print(resp)
def getBucket(uploadAuth):
"""Get the bucket object."""
return BucketManager(uploadAuth)
def checkFile(bucket, filePath, bucketName):
"""Check the file path is right and if it is exist in the bucket."""
if not os.path.exists(filePath):
print("Wrong file path: %s" % (filePath))
return False
ret, info = bucket.stat(bucketName, filePath)
if ret:
print("File exists in Qiniu cloud: %s" % (filePath))
return ret is None
def check_conf(conf_file):
"""Check the configure file is existed."""
if not os.path.exists(conf_file):
print("ERROR: Cannot find configure file.")
print("Please create configure file: %s" % (conf_file))
print("[DEFAULT]")
print("default_bucket_name =")
print("access_key =")
print("secret_key =")
print("domain =")
sys.exit(1)
def main():
# Check the configure file
check_conf(conf_file)
# Read configure file
# Compatible for Py2 and Py3
try:
cf = ConfigParser.ConfigParser()
except NameError:
cf = configparser.ConfigParser()
cf.read(conf_file)
parser = argparse.ArgumentParser(
prog="quupload",
description="This is a tool to upload file to Qiniu cloud.")
parser.add_argument("file",
metavar="filepath",
nargs='+',
help="Specify a file to upload to Qiniu cloud.")
parser.add_argument("-b", "--bucket",
help="A bucket under your Qiniu account.")
parser.add_argument("-a", "--access-key",
help="Your access key.")
parser.add_argument("-s", "--secret-key",
help="Your secret key.")
parser.add_argument("-d", "--domain",
help="The domain of your Qiniu account to share \
the file you upload to Qiniu cloud.")
parser.add_argument("--full-name",
action='store_true',
help="The file will be named with the path as \
its prefix when specify this option. ")
args = parser.parse_args()
if args.bucket is None:
bucketName = cf.get("DEFAULT", "default_bucket_name")
else:
bucketName = args.bucket
if args.access_key is None:
access_key = cf.get("DEFAULT", "access_key")
else:
access_key = args.access_key
if args.secret_key is None:
secret_key = cf.get("DEFAULT", "secret_key")
else:
secret_key = args.secret_key
if args.domain is None:
domain = cf.get(bucketName, "domain")
else:
domain = args.domain
full_name = args.full_name
# Parse domain
domain = domain + "/"
if not domain.startswith("http"):
domain = "http://" + domain
| # Get the full file list from the command line
fileList = []
for item in args.file:
if os.path.isdir(item):
fileList.extend([item+'/'+f for f in os.listdir(item)])
elif os.path.isfile(item):
fileList.append(item)
uploadAuth = getAuth(access_key, secret_key)
bucket = getBucket(uploadAuth)
| for filePath in fileList:
if checkFile(bucket, filePath, bucketName):
uploadFile(bucketName, filePath, uploadAuth, domain, full_name)
if __name__ == '__main__':
main()
|
daxtens/planyourpicnic | tools/import_furniture.py | Python | gpl-3.0 | 1,520 | 0.010526 | # Import Public Furniture listing into local database from CSV
import sys
sys.path.append('..')
from settings import *
import psycopg2
import psycopg2.extensions
import csv
class Point(object):
def __init__(self, dataString):
#clip out parenthesis, and split over comma
data = dataString[1:-1].split(', ')
self.longitude = float(data[1])
#input is in standard order (lat,long)
self.latitude = float(data[0])
def adapt_point(point):
#store in sql order (long,lat)
return p | sycopg2.extensions.AsIs("'(%s, %s)'" % (psycopg2.extensions.adapt(point.longitude), psycopg2.extensions.adapt(point.latitude)))
psycopg2.extensions.register_adapter(Point, adapt_point)
db_conn = psycopg2.connect(host=DB_HOST, database=DB_ | DATABASE,
user=DB_USER, password=DB_PASSWORD, port=DB_PORT)
db_cur = db_conn.cursor()
with open("../data/Public_Furniture_in_the_ACT.csv") as csvfile:
reader = csv.reader(csvfile)
#skip column labels line
reader.next()
for entry in reader:
db_cur.execute("INSERT INTO pois (location, type) VALUES (%s, %s) RETURNING id",
(Point(entry[4]), 'furniture'))
newid = db_cur.fetchone()[0]
db_cur.execute("INSERT INTO furniture (id, asset_id, feature_type, division_name, location_name, location) VALUES (%s, %s, %s, %s, %s, %s)",
(newid, entry[0], entry[1], entry[2], entry[3], Point(entry[4])))
db_conn.commit()
db_cur.close()
db_conn.close()
|
asedunov/intellij-community | python/testData/intentions/typeAssertion4_after.py | Python | apache-2.0 | 70 | 0.042857 | def foo3(x, y):
assert isinstance(y, object)
i = x | + y
return i
| |
jrajahalme/envoy | docs/conf.py | Python | apache-2.0 | 9,203 | 0.005216 | # -*- coding: utf-8 -*-
#
# envoy documentation build configuration file, created by
# sphinx-quickstart on Sat May 28 10:51:27 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
from datetime import datetime
import os
from sphinx.directives.code import CodeBlock
import sphinx_rtd_theme
import sys
# https://stackoverflow.com/questions/44761197/how-to-use-substitution-definitions-with-code-blocks
class SubstitutionCodeBlock(CodeBlock):
"""
Similar to CodeBlock but replaces placeholders with variables. See "substitutions" below.
"""
def run(self):
"""
Replace placeholders with given variables.
"""
app = self.state.document.settings.env.app
new_content = []
existing_content = self.content
for item in existing_content:
for pair in app.config.substitutions:
original, replacement = pair
item = item.replace(original, replacement)
new_content.append(item)
self.content = new_content
return list(CodeBlock.run(self))
def setup(app):
app.add_config_value('release_level', '', 'env')
app.add_config_value('substitutions', [], 'html')
app.add_directive('substitution-code-block', SubstitutionCodeBlock)
if not os.environ.get('ENVOY_DOCS_RELEASE_LEVEL'):
raise Exception("ENVOY_DOCS_RELEASE_LEVEL env var must be defined")
release_level = os.environ['ENVOY_DOCS_RELEASE_LEVEL']
blob_sha = os.environ['ENVOY_BLOB_SHA']
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinxcontrib.httpdomain', 'sphinx.ext.extlinks', 'sphinx.ext.ifconfig']
extlinks = {
'repo': ('https://github.com/envoyproxy/envoy/blob/{}/%s'.format(blob_sha), ''),
'api': ('https://github.com/envoyproxy/envoy/blob/{}/api/%s'.format(blob_sha), ''),
}
# Setup global substitutions
if 'pre-release' in release_level:
substitutions = [('|envoy_docker_image|', 'envoy-dev:{}'.format(blob_sha))]
else:
substitutions = [('|envoy_docker_image|', 'envoy:{}'.format(blob_sha))]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'envoy'
copyright = u'2016-{}, Envoy Project Authors'.format(datetime.now().year)
author = u'Envoy Project Authors'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
if not os.environ.get('ENVOY_DOCS_VERSION_STRING'):
raise Exception("ENVOY_DOCS_VERSION_STRING env var must be defined")
# The short X.Y version.
version = os.environ['ENVOY_DOCS_VERSION_STRING']
# The full version, including alpha/beta/rc tags.
release = os.environ['ENVOY_DOCS_VERSION_STRING']
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pa | tterns also effect to | html_static_path and html_extra_path
exclude_patterns = [
'_build',
'_venv',
'Thumbs.db',
'.DS_Store',
'api-v2/api/v2/endpoint/load_report.proto.rst',
'api-v2/service/discovery/v2/hds.proto.rst',
]
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
#pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# The name for this set of Sphinx documents.
# "<project> v<release> documentation" by default.
#html_title = u'envoy v1.0.0'
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = 'favicon.ico'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
html_style = 'css/envoy.css'
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not None, a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
# The empty string is equivalent to '%b %d, %Y'.
#html_last_updated_fmt = None
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch |
saurabh6790/erpnext | erpnext/accounts/report/gross_profit/gross_profit.py | Python | gpl-3.0 | 14,572 | 0.024774 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe import _, scrub
from erpnext.stock.utils import get_incoming_rate
from erpnext.controllers.queries import get_match_cond
from frappe.utils import flt, cint
def execute(filters=None):
if not filters: filters = frappe._dict()
filters.currency = frappe.get_cached_value('Company', filters.company, "default_currency")
gross_profit_data = GrossProfitGenerator(filters)
data = []
group_wise_columns = frappe._dict({
"invoice": ["parent", "customer", "customer_group", "posting_date","item_code", "item_name","item_group", "brand", "description", \
"warehouse", "qty", "base_rate", "buying_rate", "base_amount",
"buying_amount", "gross_profit", "gross_profit_percent", "project"],
"item_code": ["item_code", "item_name", "brand", "description", "qty", "base_rate",
"buying_rate", "base_amount", "buying_amount", "gross_profit", "gross_profit_percent"],
"warehouse": ["warehouse", "qty", "base_rate", "buying_rate", "base_amount", "buying_amount",
"gross_profit", "gross_profit_percent"],
"brand": ["brand", "qty", "base_rate", "buying_rate", "base_amount", "buying_amount",
"gross_profit", "gross_profit_percent"],
"item_group": ["item_group", "qty", "base_rate", "buying_rate", "base_amount", "buying_amount",
"gross_profit", "gross_profit_percent"],
"customer": ["customer", "customer_group", "qt | y", "base_rate", "buying_rate", "base_amount", "buying_amount",
"gross_profit", "gross_profit_percent"],
"customer_group": ["customer_group", "qty", "base_rate", "buying_rate", "base_amount", "buying_amount",
"gross_profit", "gross_profit_percent"],
"sales_person": ["sales_person", "allocated_amount", "qty", "base_rate", "buying_rate", "base_amount", "buying_amount",
"gross_profit", "gross_profit_percent"],
"project": ["project", | "base_amount", "buying_amount", "gross_profit", "gross_profit_percent"],
"territory": ["territory", "base_amount", "buying_amount", "gross_profit", "gross_profit_percent"]
})
columns = get_columns(group_wise_columns, filters)
for src in gross_profit_data.grouped_data:
row = []
for col in group_wise_columns.get(scrub(filters.group_by)):
row.append(src.get(col))
row.append(filters.currency)
data.append(row)
return columns, data
def get_columns(group_wise_columns, filters):
columns = []
column_map = frappe._dict({
"parent": _("Sales Invoice") + ":Link/Sales Invoice:120",
"posting_date": _("Posting Date") + ":Date:100",
"posting_time": _("Posting Time") + ":Data:100",
"item_code": _("Item Code") + ":Link/Item:100",
"item_name": _("Item Name") + ":Data:100",
"item_group": _("Item Group") + ":Link/Item Group:100",
"brand": _("Brand") + ":Link/Brand:100",
"description": _("Description") +":Data:100",
"warehouse": _("Warehouse") + ":Link/Warehouse:100",
"qty": _("Qty") + ":Float:80",
"base_rate": _("Avg. Selling Rate") + ":Currency/currency:100",
"buying_rate": _("Valuation Rate") + ":Currency/currency:100",
"base_amount": _("Selling Amount") + ":Currency/currency:100",
"buying_amount": _("Buying Amount") + ":Currency/currency:100",
"gross_profit": _("Gross Profit") + ":Currency/currency:100",
"gross_profit_percent": _("Gross Profit %") + ":Percent:100",
"project": _("Project") + ":Link/Project:100",
"sales_person": _("Sales person"),
"allocated_amount": _("Allocated Amount") + ":Currency/currency:100",
"customer": _("Customer") + ":Link/Customer:100",
"customer_group": _("Customer Group") + ":Link/Customer Group:100",
"territory": _("Territory") + ":Link/Territory:100"
})
for col in group_wise_columns.get(scrub(filters.group_by)):
columns.append(column_map.get(col))
columns.append({
"fieldname": "currency",
"label" : _("Currency"),
"fieldtype": "Link",
"options": "Currency",
"hidden": 1
})
return columns
class GrossProfitGenerator(object):
def __init__(self, filters=None):
self.data = []
self.average_buying_rate = {}
self.filters = frappe._dict(filters)
self.load_invoice_items()
self.load_stock_ledger_entries()
self.load_product_bundle()
self.load_non_stock_items()
self.get_returned_invoice_items()
self.process()
def process(self):
self.grouped = {}
self.grouped_data = []
self.currency_precision = cint(frappe.db.get_default("currency_precision")) or 3
self.float_precision = cint(frappe.db.get_default("float_precision")) or 2
for row in self.si_list:
if self.skip_row(row, self.product_bundles):
continue
row.base_amount = flt(row.base_net_amount, self.currency_precision)
product_bundles = []
if row.update_stock:
product_bundles = self.product_bundles.get(row.parenttype, {}).get(row.parent, frappe._dict())
elif row.dn_detail:
product_bundles = self.product_bundles.get("Delivery Note", {})\
.get(row.delivery_note, frappe._dict())
row.item_row = row.dn_detail
# get buying amount
if row.item_code in product_bundles:
row.buying_amount = flt(self.get_buying_amount_from_product_bundle(row,
product_bundles[row.item_code]), self.currency_precision)
else:
row.buying_amount = flt(self.get_buying_amount(row, row.item_code),
self.currency_precision)
# get buying rate
if row.qty:
row.buying_rate = flt(row.buying_amount / row.qty, self.float_precision)
row.base_rate = flt(row.base_amount / row.qty, self.float_precision)
else:
row.buying_rate, row.base_rate = 0.0, 0.0
# calculate gross profit
row.gross_profit = flt(row.base_amount - row.buying_amount, self.currency_precision)
if row.base_amount:
row.gross_profit_percent = flt((row.gross_profit / row.base_amount) * 100.0, self.currency_precision)
else:
row.gross_profit_percent = 0.0
# add to grouped
self.grouped.setdefault(row.get(scrub(self.filters.group_by)), []).append(row)
if self.grouped:
self.get_average_rate_based_on_group_by()
def get_average_rate_based_on_group_by(self):
# sum buying / selling totals for group
for key in list(self.grouped):
if self.filters.get("group_by") != "Invoice":
for i, row in enumerate(self.grouped[key]):
if i==0:
new_row = row
else:
new_row.qty += row.qty
new_row.buying_amount += flt(row.buying_amount, self.currency_precision)
new_row.base_amount += flt(row.base_amount, self.currency_precision)
new_row = self.set_average_rate(new_row)
self.grouped_data.append(new_row)
else:
for i, row in enumerate(self.grouped[key]):
if row.parent in self.returned_invoices \
and row.item_code in self.returned_invoices[row.parent]:
returned_item_rows = self.returned_invoices[row.parent][row.item_code]
for returned_item_row in returned_item_rows:
row.qty += returned_item_row.qty
row.base_amount += flt(returned_item_row.base_amount, self.currency_precision)
row.buying_amount = flt(row.qty * row.buying_rate, self.currency_precision)
if row.qty or row.base_amount:
row = self.set_average_rate(row)
self.grouped_data.append(row)
def set_average_rate(self, new_row):
new_row.gross_profit = flt(new_row.base_amount - new_row.buying_amount, self.currency_precision)
new_row.gross_profit_percent = flt(((new_row.gross_profit / new_row.base_amount) * 100.0), self.currency_precision) \
if new_row.base_amount else 0
new_row.buying_rate = flt(new_row.buying_amount / new_row.qty, self.float_precision) if new_row.qty else 0
new_row.base_rate = flt(new_row.base_amount / new_row.qty, self.float_precision) if new_row.qty else 0
return new_row
def get_returned_invoice_items(self):
returned_invoices = frappe.db.sql("""
select
si.name, si_item.item_code, si_item.stock_qty as qty, si_item.base_net_amount as base_amount, si.return_against
from
`tabSales Invoice` si, `tabSales Invoice Item` si_item
where
si.name = si_item.parent
and si.docstatus = 1
and si.is_return = 1
""", as_dict=1)
self.returned_invoices = frappe._dict()
for inv in returned_invoices:
sel |
Sorsly/subtle | google-cloud-sdk/platform/ext-runtime/go/test/runtime_test.py | Python | mit | 4,874 | 0.000616 | # Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import unittest
import yaml
from gae_ext_runtime import testutil
RUNTIME_DEF_ROOT = os.path.dirname(os.path.dirname(__file__))
class RuntimeTests(testutil.TestBase):
def setUp(self):
self.runtime_def_root = RUNTIME_DEF_ROOT
super(RuntimeTests, self).setUp()
def assert_no_file(self, filename):
"""Asserts that the relative path 'filename' does not exist."""
self.assertFalse(os.path.exists(os.path.join(self.temp_path, filename)))
def test_go_files_no_go(self):
self.write_file('foo.notgo', 'package main\nfunc main')
self.assertFalse(self.generate_configs())
self.assertEqual(os.listdir(self.temp_path), ['foo.notgo'])
def test_go_files_with_go(self):
self.write_file('foo.go', 'package main\nfunc main')
self.generate_configs()
with open(self.full_path('app.yaml')) as f:
contents = yaml.load(f)
self.assertEqual(contents,
{'runtime': 'go', 'api_version': 'go1', 'env': 'flex'})
self.assert_no_file('Dockerfile')
self.assert_no_file('.dockerignore')
self.generate_configs(deploy=True)
self.assert_file_exists_with_contents(
'Dockerfile',
self.read_runtime_def_file('data', 'Dockerfile'))
self.assert_file_exists_with_contents(
'.dockerignore',
self.read_runtime_def_file('data', 'dockerignore'))
def test_go_genfiles_with_go(self):
"""Test generate_config_data with single .go file."""
self.write_file('foo.go', 'package main\nfunc main')
self.generate_configs()
with open(self.full_path('app.yaml')) as f:
contents = yaml.load(f)
self.assertEqual(contents,
{'runtime': 'go', 'api_version': 'go1', 'env': 'flex'})
self.assert_no_file('Dockerfile')
self.assert_no_file('.dockerignore')
cfg_files = self.generate_config_data(deploy=True)
self.assert_genfile_exists_with_contents(
cfg_files,
'Dockerfile',
self.read_runtime_def_file('data', 'Dockerfile'))
self.assert_genfile_exists_with_contents(
cfg_files,
'.dockerignore',
self.read_runtime_def_file('data', 'dockerignore'))
def test_go_custom_runtime(self):
self.write_file('foo.go', 'package main\nfunc main')
self.generate_configs(custom=True)
self.assert_file_exists_with_contents(
'app.yaml',
'api_version: go1\nenv: flex\nruntime: go\n')
self.assert_file_exists_with_contents(
'Dockerfile',
self.read_runtime_def_file('data', 'Dockerfile'))
self.assert_file_exists_with_contents(
'.dockerignore',
self.read_runtime_def_file('data', 'dockerignore'))
def test_go_custom_runtime_no_write(self):
"""Test generate_config_data with custom runtime."""
self.write_file('foo.go', 'package main\nfunc main')
cfg_files = self.generate_config_data(custom=True)
self.assert_file_exists_with_contents(
'app.yaml',
'api_version: go1\nenv: flex\nruntime: go\n')
self.assert_genfile_exists_with_contents(
cfg_files,
'Dockerfile',
self.read_runtime_def_file('data', 'Dockerfile'))
self.assert_genfile_exists_with_contents(
cfg_files,
'.dockerignore',
self.read_runtime_def_file('data', 'dockerignore'))
def test_go_runtime_field(self):
self.write_file('foo.go', 'package main\nfunc main')
config = testutil.AppInfoFake(
runtime="go",
env=2,
api_version=1)
self.assertTrue(self.generate_configs(appinfo=config,
deploy=True))
def test_go_custom_runtime_field(self):
self.write_file('foo.go', 'package main\nfunc main')
config = testutil.AppInfoFake(
runtime="custom",
env=2,
api_version=1)
self.assertTrue(self.generate_ | configs(appinfo=config,
deploy=True))
if __name__ == '__main__':
unittest. | main()
|
M4rtinK/modrana | core/fix.py | Python | gpl-3.0 | 2,174 | 0.00368 | # -*- coding: utf-8 -*-
# A fix encapsulating class, based on the AGTL Fix class
from datetime import datetime
class Fix():
BEARING_HOLD_EPD = 90 # arbitrary, yet non-random value
last_bearing = 0
# tracking the minimum difference between a received fix time and
# our current internal time.
min_timediff = datetime.utcnow() - datetime.utcfromtimestamp(0)
def __init__(self,
position=None,
altitude=None,
bearing=None,
speed=None,
climb=None,
magnetic_variation=None,
sats=None,
sats_in_use=None,
dgps=False,
| mode=0,
error=0,
error_bearing=0,
horizontal_accuracy=None,
vertical_accuracy=None, # in meters
speed_accuracy=None, # in meters/sec
climb_accuracy=None, # in meters/sec
bearing_accuracy=None, # in degrees
| time_accuracy=None, # in seconds
gps_time=None,
timestamp=None):
self.position = position
# debug - Brno
# self.position = 49.2, 16.616667
self.altitude = altitude
self.bearing = bearing
self.speed = speed
self.climb = climb
self.magnetic_variation = magnetic_variation
self.sats = sats
self.sats_in_use = sats_in_use
self.dgps = dgps
self.mode = mode
self.error = error
self.error_bearing = error_bearing
self.horizontal_accuracy = horizontal_accuracy
self.vertical_accuracy = vertical_accuracy
self.speed_accuracy = speed_accuracy
self.climb_accuracy = climb_accuracy
self.bearing_accuracy = bearing_accuracy
self.time_accuracy = time_accuracy
self.gps_time = gps_time
if timestamp is None:
self.timestamp = datetime.utcnow()
else:
self.timestamp = timestamp
def __str__(self):
return 'mode:' + str(self.mode) + 'lat,lon:' + self.position + 'elev:' + str(self.altitude) |
Reagankm/KnockKnock | venv/lib/python3.4/site-packages/nltk/draw/dispersion.py | Python | gpl-2.0 | 1,744 | 0.005161 | # Natural Language Toolkit: Dispersion Plots
#
# Copyright (C) 2001-2015 NLTK Project
# Author: Steven Bird <stevenbird1@gmail.com>
# URL: <http://nltk.org/>
# For license inf | ormation, see LICENSE.TXT
"""
A utility for displaying lexical dispersion.
"""
def dispersion_plot(text, words, ignore_case=False, title="Lexical Dispersion Plot"):
"""
Generate a lexical dispersion plot.
:param text: The source text
:type text: list(str) or enum(str)
:param words: The target words
:type words: list of str
:param ignore_case: flag to set if case should be ignored when searching text
:type ignore_case: bool
"""
try:
from matplotlib | import pylab
except ImportError:
raise ValueError('The plot function requires matplotlib to be installed.'
'See http://matplotlib.org/')
text = list(text)
words.reverse()
if ignore_case:
words_to_comp = list(map(str.lower, words))
text_to_comp = list(map(str.lower, text))
else:
words_to_comp = words
text_to_comp = text
points = [(x,y) for x in range(len(text_to_comp))
for y in range(len(words_to_comp))
if text_to_comp[x] == words_to_comp[y]]
if points:
x, y = list(zip(*points))
else:
x = y = ()
pylab.plot(x, y, "b|", scalex=.1)
pylab.yticks(list(range(len(words))), words, color="b")
pylab.ylim(-1, len(words))
pylab.title(title)
pylab.xlabel("Word Offset")
pylab.show()
if __name__ == '__main__':
import nltk.compat
from nltk.corpus import gutenberg
words = ['Elinor', 'Marianne', 'Edward', 'Willoughby']
dispersion_plot(gutenberg.words('austen-sense.txt'), words)
|
ZucchiniZe/nightcrawler | nightcrawler/urls.py | Python | mit | 992 | 0 | """nightcrawler URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en | /1.9/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL | to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
urlpatterns = [
url(r'', include('listing.urls', namespace='listing')),
url(r'', include('extras.urls', namespace='extras')),
url(r'^accounts/', include('allauth.urls')),
url(r'^queue/', include('django_rq.urls')),
url(r'^admin/', admin.site.urls),
]
|
Yarichi/Proyecto-DASI | Malmo/Python_Examples/animation_test.py | Python | gpl-2.0 | 8,932 | 0.005374 | # ------------------------------------------------------------------------------------------------
# Copyright (c) 2016 Microsoft Corporation
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and
# associated documentation files (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge, publish, distribute,
# sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or
# substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT
# NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
# ------------------------------------------------------------------------------------------------
import MalmoPython
import os
import random
import sys
import time
import json
import random
import errno
def GetMissionXML():
return '''<?xml version="1.0" encoding="UTF-8" ?>
<Mission xmlns="http://ProjectMalmo.microsoft.com" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
<About>
<Summary>Moving Times</Summary>
</About>
<ServerSection>
<ServerHandlers>
<FlatWorldGenerator generatorString="3;7,220*1,5*3,2;3;,biome_1" />
<DrawingDecorator>
<DrawCuboid x1="-21" y1="226" z1="-21" x2="21" y2="236" z2="21" type="air"/>
<DrawCuboid x1="-21" y1="226" z1="-21" x2="21" y2="226" z2="21" type="lava"/>
<DrawCuboid x1="-20" y1="226" z1="-20" x2="20" y2="16" z2="20" type="gold_block" />
</DrawingDecorator>''' + getAnimation() + '''
<ServerQuitFromTimeUp timeLimitMs="150000" description="out_of_time"/>
<ServerQuitWhenAnyAgentFinishes />
</ServerHandlers>
</ServerSection>
<AgentSection mode="Survival">
<Name>Walt</Name>
<AgentStart>
<Placement x="0.5" y="227.0" z="0.5"/>
<Inventory>
</Inventory>
</AgentStart>
<AgentHandlers>
<RewardForMissionEnd rewardForDeath="-1000.0">
<Reward description="out_of_time" reward="-900.0"/>
<Reward description="found_goal" reward="100000.0"/>
</RewardForMissionEnd>
<RewardForTouchingBlockType>
<Block type="slime" reward="-100.0"/>
</RewardForTouchingBlockType>
<AgentQuitFromTouchingBlockType>
<Block type="skull" description="found_goal"/>
</AgentQuitFromTouchingBlockType>
<ContinuousMovementCommands turnSpeedDegs="240"/>
</AgentHandlers>
</AgentSection>
</Mission>'''
def getAnimation():
# Silly test of animations: add a few slime constructions and send them moving linearly around the "playing field"...
# Create a slowly descending roof...
# And an orbiting pumpkin with its own skull sattelite.
xml=""
for x in xrange(4):
xml+='''
<AnimationDecorator ticksPerUpdate="10">
<Linear>
<CanvasBounds>
<min x="-20" y="226" z="-20"/>
<max x="20" y="230" z="20"/>
</CanvasBounds>
<InitialPos x="''' + str(random.randint(-16,16)) + '''" y="228" z="''' + str(random.randint(-16,16)) + '''"/>
<InitialVelocity x="'''+str(random.random()-0.5)+'''" y="0" z="'''+str(random.random()-0.5)+'''"/>
</Linear>
<DrawingDecorator>
<DrawBlock x="0" y="1" z="0" type="slime"/>
<DrawBlock x="0" y="-1" z="0" type="slime"/>
<DrawBlock x="1" y="0" z="0" type="slime"/>
<DrawBlock x="-1" y="0" z="0" type="slime"/>
<DrawBlock x="0" y="0" z="1" type="slime"/>
<DrawBlock x="0" y="0" z="-1" type="slime"/>
</DrawingDecorator>
</AnimationDecorator>'''
return xml+'''
<AnimationDecorator>
<Linear>
<CanvasBounds>
<min x="-21" y="225" z="-21"/>
<max x="21" y="247" z="21"/>
</CanvasBounds>
<InitialPos x="0" y="246" z="0"/>
<InitialVelocity x="0" y="-0.025" z="0"/>
</Linear>
<DrawingDecorator>
<DrawCuboid x1="-20" y1="0" z1="-20" | x2="20" y2="1" z2="20" | type="obsidian"/>
</DrawingDecorator>
</AnimationDecorator>
<AnimationDecorator>
<Parametric seed="random">
<x>15*sin(t/20.0)</x>
<y>227-(t/120.0)</y>
<z>15*cos(t/20.0)</z>
</Parametric>
<DrawingDecorator>
<DrawBlock x="0" y="2" z="0" type="fence"/>
<DrawBlock x="0" y="3" z="0" type="fence"/>
<DrawBlock x="0" y="4" z="0" type="pumpkin"/>
</DrawingDecorator>
</AnimationDecorator>
<AnimationDecorator>
<Parametric seed="random">
<x>(15*sin(t/20.0))+(2*sin(t/2.0))</x>
<y>227-(t/120.0)+2*cos(t/1.5)</y>
<z>(15*cos(t/20.0))+(2*cos(t/2.0))</z>
</Parametric>
<DrawingDecorator>
<DrawBlock x="0" y="2" z="0" type="skull"/>
</DrawingDecorator>
</AnimationDecorator>'''
sys.stdout = os.fdopen(sys.stdout.fileno(), 'w', 0) # flush print output immediately
recordingsDirectory="AnimationRecordings"
try:
os.makedirs(recordingsDirectory)
except OSError as exception:
if exception.errno != errno.EEXIST: # ignore error if already existed
raise
validate = True
my_mission = MalmoPython.MissionSpec(GetMissionXML(),validate)
agent_host = MalmoPython.AgentHost()
try:
agent_host.parse( sys.argv )
except RuntimeError as e:
print 'ERROR:',e
print agent_host.getUsage()
exit(1)
if agent_host.receivedArgument("help"):
print agent_host.getUsage()
exit(0)
my_client_pool = MalmoPython.ClientPool()
my_client_pool.add(MalmoPython.ClientInfo("127.0.0.1", 10000))
if agent_host.receivedArgument("test"):
num_reps = 1
else:
num_reps = 30000
for iRepeat in range(num_reps):
# Set up a recording
my_mission_record = MalmoPython.MissionRecordSpec(recordingsDirectory + "//" + "Mission_" + str(iRepeat) + ".tgz")
my_mission_record.recordRewards()
my_mission_record.recordMP4(24,400000)
max_retries = 3
for retry in range(max_retries):
try:
# Attempt to start the mission:
agent_host.startMission( my_mission, my_client_pool, my_mission_record, 0, "missionEndTestExperiment" )
break
except RuntimeError as e:
if retry == max_retries - 1:
print "Error starting mission",e
print "Is the game running?"
exit(1)
else:
time.sleep(2)
world_state = agent_host.getWorldState()
while not world_state.has_mission_begun:
time.sleep(0.1)
world_state = agent_host.getWorldState()
reward = 0.0 # ke |
JonathonReinhart/pykvm | pykvm/kvmstructs.py | Python | mit | 9,642 | 0.002593 | # pykvm
# https://github.com/JonathonReinhart/pykvm
# (C) 2015 Jonathon Reinhart
import ctypes
from ctypes import Structure, Union, c_uint8, c_uint16, c_uint32, c_uint64
class kvm_regs(Structure):
_fields_ = [
('rax', c_uint64),
('rbx', c_uint64),
('rcx', c_uint64),
('rdx', c_uint64),
('rsi', c_uint64),
('rdi', c_uint64),
('rsp', c_uint64),
('rbp', c_uint64),
('r8', c_uint64),
('r9', c_uint64),
('r10', c_uint64),
('r11', c_uint64),
('r12', c_uint64),
('r13', c_uint64),
('r14', c_uint64),
('r15', c_uint64),
('rip', c_uint64),
('rflags', c_uint64),
]
def __str__(self):
return '\n'.join((
' RAX: 0x{:016X}'.format(self.rax),
' RBX: 0x{:016X}'.format(self.rbx),
' RCX: 0x{:016X}'.format(self.rcx),
' RDX: 0x{:016X}'.format(self.rdx),
| ' RSI: 0x{:016X}'.format(self.rsi),
' RDI: 0x{:016X}'.format(self.rdi),
| ' RSP: 0x{:016X}'.format(self.rsp),
' RBP: 0x{:016X}'.format(self.rbp),
' R8: 0x{:016X}'.format(self.r8),
' R9: 0x{:016X}'.format(self.r9),
' R10: 0x{:016X}'.format(self.r10),
' R11: 0x{:016X}'.format(self.r11),
' R12: 0x{:016X}'.format(self.r12),
' R13: 0x{:016X}'.format(self.r13),
' R14: 0x{:016X}'.format(self.r14),
' R15: 0x{:016X}'.format(self.r15),
' RIP: 0x{:016X}'.format(self.rip),
' RFLAGS: 0x{:016X}'.format(self.rflags),
))
class kvm_segment(Structure):
_fields_ = [
('base', c_uint64),
('limit', c_uint32),
('selector', c_uint16),
('type', c_uint8),
('present', c_uint8),
('dpl', c_uint8),
('db', c_uint8),
('s', c_uint8),
('l', c_uint8),
('g', c_uint8),
('avl', c_uint8),
('unusable', c_uint8),
('padding', c_uint8),
]
def __str__(self):
return '\n'.join((
' Base: 0x{:016X} Limit: 0x{:08X} Selector: 0x{:04X} Type: 0x{:02X}'.format(
self.base, self.limit, self.selector, self.type),
' Present: {} DPL: {} DB: {} S: {} L: {} G: {} AVL: {} Unusable: {}'.format(
self.present, self.dpl, self.db, self.s, self.l, self.g, self.avl, self.unusable),
))
class kvm_dtable(Structure):
_fields_ = [
('base', c_uint64),
('limit', c_uint16),
('padding', c_uint16 * 3),
]
def __str__(self):
return ' Base: 0x{:016X} Limit: 0x{:04X}'.format(self.base, self.limit)
KVM_NR_INTERRUPTS = 256
class kvm_sregs(Structure):
_fields_ = [
('cs', kvm_segment),
('ds', kvm_segment),
('es', kvm_segment),
('fs', kvm_segment),
('gs', kvm_segment),
('ss', kvm_segment),
('tr', kvm_segment),
('ldt', kvm_segment),
('gdt', kvm_dtable),
('idt', kvm_dtable),
('cr0', c_uint64),
('cr2', c_uint64),
('cr3', c_uint64),
('cr4', c_uint64),
('cr8', c_uint64),
('efer', c_uint64),
('apic_base', c_uint64),
('interrupt_bitmap', c_uint64 * ((KVM_NR_INTERRUPTS + 63) / 64) ),
]
def __str__(self):
return '\n'.join((
' CS:', str(self.cs),
' DS:', str(self.ds),
' ES:', str(self.es),
' FS:', str(self.fs),
' GS:', str(self.gs),
' SS:', str(self.ss),
' TR:', str(self.tr),
' LDT:', str(self.ldt),
' CR0: 0x{:016X}'.format(self.cr0),
' CR2: 0x{:016X}'.format(self.cr2),
' CR3: 0x{:016X}'.format(self.cr3),
' CR4: 0x{:016X}'.format(self.cr4),
' CR8: 0x{:016X}'.format(self.cr8),
' EFER: 0x{:016X}'.format(self.efer),
' APIC Base: 0x{:016X}'.format(self.apic_base),
))
class kvm_debugregs(Structure):
_fields_ = [
('db', c_uint64 * 4),
('dr6', c_uint64),
('dr7', c_uint64),
('flags', c_uint64),
('reserved', c_uint64 * 9),
]
def __str__(self):
return '\n'.join((
' DB[0]: 0x{:016X}'.format(self.db[0]),
' DB[1]: 0x{:016X}'.format(self.db[1]),
' DB[2]: 0x{:016X}'.format(self.db[2]),
' DB[3]: 0x{:016X}'.format(self.db[3]),
' DR6: 0x{:016X}'.format(self.dr6),
' DR7: 0x{:016X}'.format(self.dr7),
' FLAGS: 0x{:016X}'.format(self.flags),
))
def mkstruct(*fields):
# http://stackoverflow.com/questions/357997
return type('', (Structure,), {"_fields_": fields})
class kvm_debug_exit_arch__x86(Structure):
_fields_ = [
('exception', c_uint32),
('pad', c_uint32),
('pc', c_uint64),
('dr6', c_uint64),
('dr7', c_uint64),
]
kvm_debug_exit_arch = kvm_debug_exit_arch__x86
class kvm_run_exit_info_union(Union):
_fields_ = [
# KVM_EXIT_UNKNOWN
('hw', mkstruct(
('hardware_exit_reason', c_uint64),
)),
# KVM_EXIT_FAIL_ENTRY
('fail_entry', mkstruct(
('hardware_entry_failure_reason', c_uint64),
)),
# KVM_EXIT_EXCEPTION
('ex', mkstruct(
('exception', c_uint32),
('error_code', c_uint32),
)),
# KVM_EXIT_IO
('io', mkstruct(
('direction', c_uint8),
('size', c_uint8), # bytes
('port', c_uint16),
('count', c_uint32),
('data_offset', c_uint64), # relative to kvm_run start
)),
('debug', mkstruct(
('arch', kvm_debug_exit_arch),
)),
# KVM_EXIT_MMIO
('mmio', mkstruct(
('phys_addr', c_uint64),
('data', c_uint8 * 8),
('len', c_uint32),
('is_write', c_uint8),
)),
# KVM_EXIT_HYPERCALL
('hypercall', mkstruct(
('nr', c_uint64),
('args', c_uint64 * 6),
('ret', c_uint64),
('longmode', c_uint32),
('pad', c_uint32),
)),
# KVM_EXIT_TPR_ACCESS
('tpr_access', mkstruct(
('rip', c_uint64),
('is_write', c_uint32),
('pad', c_uint32),
)),
# KVM_EXIT_INTERNAL_ERROR
('internal', mkstruct(
('suberror', c_uint32),
('ndata', c_uint32),
('data', c_uint64 * 16),
)),
# KVM_EXIT_SYSTEM_EVENT
('system_event', mkstruct(
('type', c_uint32),
('flags', c_uint64),
)),
# Fix the size of the union.
('padding', c_uint8 * 256),
]
class kvm_sync_regs__x86(Structure):
_fields_ = [ ]
kvm_sync_regs = kvm_sync_regs__x86
class kvm_shared_regs_union(Union):
_fields_ = [
('regs', kvm_sync_regs),
('padding', c_uint8 * 1024),
]
class kvm_run(Structure):
_anonymous_ = ['_exit_info']
_fields_ = [
# in
('request_interrupt_window', c_uint8),
('padding1', c_uint8 * 7),
# out
|
mark-in/securedrop-app-code | tests/functional/source_navigation_steps.py | Python | agpl-3.0 | 2,626 | 0.000762 | import tempfile
class SourceNavigationSteps():
def _source_visits_source_homepage(self):
self.driver.get(self.source_location)
self.assertEqual("SecureDrop | Protecting Journalists and Sources",
self.driver.title)
def _source_chooses_to_submit_documents(self):
self.driver.find_element_by_id('submit-documents-button').click()
codename = self.driver.find_element_by_css_selector('#codename')
self.assertTrue(len(codename.text) > 0)
self.source_name = codename.text
def _source_continues_to_submit_page(self):
continue_button = self.driver.find_element_by_id('continue-button')
continue_button.click()
headline = self.driver.find_element_by_class_name('headline')
self.assertEqual('Submit documents and messages', headline.text)
def _source_submits_a_file(self):
with tempfile.NamedTemporaryFile() as file:
file.write(self.secret_message)
file.seek(0)
filename = file.name
filebasename = filename.split('/')[-1]
file_upload_box = self.driver.find_element_by_css_selector(
'[name=fh]')
file_upload_box.send_keys(filename)
submit_button = self.driver.find_element_by_css_selector(
'button[type=submit]')
submit_button.click()
notification = self.driver.find_element_by_css_selector(
'p.notification')
expected_notification = ('Thanks for submitting something '
'to SecureDrop! Please check back '
'later for replies.')
self.assertIn(expected_notification, notification.text)
def _source_submits_a_message(self):
text_box = self.driver.find_element_by_css_selector('[name=msg]')
text_box.send_keys(self.secret_message) # send_keys=type into text box
submit_button = self.driver.find_element_by_css_selector(
'button[type=submit]')
submit_button.click()
notification = self.driver.find_element_by_css_selector(
'p.notification')
self.assertIn('Thanks for submitting something to SecureDrop!'
' Please check back later for replies.',
notification.text)
def _source_logs_out(self):
logout_button = self.driver.find_element_by_id( | 'logout').click()
notification = self.driver.find_element_by_css_selector('p.no | tification')
self.assertIn('Thank you for logging out.', notification.text)
|
ronkitay/Rons-Tutorials | Python/BasicTutorials/command_line_arguments.py | Python | mit | 1,520 | 0.003947 | #!/usr/bin/python
# -*- coding: utf-8 -*-
import sys
import os
import io
def get_file_length(file_to_check):
return get_file_length_with_os_stats(file_to_check)
# return get_file_length_with_seek_and_tell(file_to_check)
def get_file_length_with_seek_and_tell(file_to_check):
fi | le_to_check.seek(0, io.SEEK_END)
file_size = file_to_check.tell()
file_to_check.seek(0, io.SEEK_SET)
return file_size
def get_file_length_with_os_stats(file_to_check):
return os.stat(file_to_check.name).st_size
def validate_is_file(file_name):
if os.path.isdir( | file_name):
print ("Path {} is a directory, expected a file!".format(file_name))
exit(2)
if __name__ == '__main__':
if len(sys.argv) != 3:
print ("Expected <source path> <targer path> but got {} arguments instead.".format(len(sys.argv) - 1))
exit(1)
print (sys.platform)
source_file_name = sys.argv[1]
target_file_name = sys.argv[2]
validate_is_file(source_file_name)
validate_is_file(target_file_name)
with open(source_file_name, 'r') as source_file:
source_file_size = get_file_length(source_file)
with open(target_file_name, 'w') as target_file:
current_index = source_file_size
while current_index >= 0:
source_file.seek(current_index)
target_file.write(source_file.read(1))
current_index -= 1
with open(target_file_name, 'r') as target_file:
print (target_file.read()) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.